summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c501.c4
-rw-r--r--drivers/net/3c503.c4
-rw-r--r--drivers/net/3c507.c6
-rw-r--r--drivers/net/3c515.c2
-rw-r--r--drivers/net/3c527.c6
-rw-r--r--drivers/net/8139too.c3
-rw-r--r--drivers/net/82596.c2
-rw-r--r--drivers/net/Kconfig359
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/Space.c5
-rw-r--r--drivers/net/appletalk/Kconfig1
-rw-r--r--drivers/net/ariadne.c5
-rw-r--r--drivers/net/arm/am79c961a.c9
-rw-r--r--drivers/net/arm/ixp4xx_eth.c4
-rw-r--r--drivers/net/arm/ks8695net.c290
-rw-r--r--drivers/net/arm/w90p910_ether.c2
-rw-r--r--drivers/net/at1700.c8
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/atl1c/atl1c.h4
-rw-r--r--drivers/net/atl1c/atl1c_hw.c15
-rw-r--r--drivers/net/atl1c/atl1c_hw.h43
-rw-r--r--drivers/net/atl1c/atl1c_main.c47
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c12
-rw-r--r--drivers/net/atl1e/atl1e_hw.c34
-rw-r--r--drivers/net/atl1e/atl1e_hw.h111
-rw-r--r--drivers/net/atl1e/atl1e_main.c12
-rw-r--r--drivers/net/atlx/atl1.c89
-rw-r--r--drivers/net/atlx/atl2.c6
-rw-r--r--drivers/net/au1000_eth.c2
-rw-r--r--drivers/net/ax88796.c810
-rw-r--r--drivers/net/b44.c11
-rw-r--r--drivers/net/bcm63xx_enet.c2
-rw-r--r--drivers/net/benet/be.h88
-rw-r--r--drivers/net/benet/be_cmds.c351
-rw-r--r--drivers/net/benet/be_cmds.h136
-rw-r--r--drivers/net/benet/be_ethtool.c91
-rw-r--r--drivers/net/benet/be_hw.h149
-rw-r--r--drivers/net/benet/be_main.c804
-rw-r--r--drivers/net/bfin_mac.c83
-rw-r--r--drivers/net/bfin_mac.h11
-rw-r--r--drivers/net/bna/bfa_defs.h22
-rw-r--r--drivers/net/bna/bfa_defs_mfg_comm.h22
-rw-r--r--drivers/net/bna/bfa_ioc.c1219
-rw-r--r--drivers/net/bna/bfa_ioc.h49
-rw-r--r--drivers/net/bna/bfa_ioc_ct.c102
-rw-r--r--drivers/net/bna/bfi_ctreg.h41
-rw-r--r--drivers/net/bna/bna.h6
-rw-r--r--drivers/net/bna/bna_ctrl.c377
-rw-r--r--drivers/net/bna/bna_txrx.c44
-rw-r--r--drivers/net/bna/bna_types.h11
-rw-r--r--drivers/net/bna/bnad.c525
-rw-r--r--drivers/net/bna/bnad.h33
-rw-r--r--drivers/net/bna/bnad_ethtool.c9
-rw-r--r--drivers/net/bnx2.c152
-rw-r--r--drivers/net/bnx2.h9
-rw-r--r--drivers/net/bnx2x/Makefile2
-rw-r--r--drivers/net/bnx2x/bnx2x.h222
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c338
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h108
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.c2145
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.h197
-rw-r--r--drivers/net/bnx2x/bnx2x_dump.h988
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c448
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h445
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h220
-rw-r--r--drivers/net/bnx2x/bnx2x_init_ops.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c3245
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h86
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c1407
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h133
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c22
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/bonding/Makefile5
-rw-r--r--drivers/net/bonding/bond_3ad.c47
-rw-r--r--drivers/net/bonding/bond_3ad.h3
-rw-r--r--drivers/net/bonding/bond_alb.c42
-rw-r--r--drivers/net/bonding/bond_alb.h38
-rw-r--r--drivers/net/bonding/bond_debugfs.c146
-rw-r--r--drivers/net/bonding/bond_ipv6.c7
-rw-r--r--drivers/net/bonding/bond_main.c723
-rw-r--r--drivers/net/bonding/bond_procfs.c275
-rw-r--r--drivers/net/bonding/bond_sysfs.c23
-rw-r--r--drivers/net/bonding/bonding.h137
-rw-r--r--drivers/net/caif/Makefile4
-rw-r--r--drivers/net/caif/caif_shm_u5500.c4
-rw-r--r--drivers/net/caif/caif_shmcore.c2
-rw-r--r--drivers/net/can/Kconfig25
-rw-r--r--drivers/net/can/Makefile3
-rw-r--r--drivers/net/can/at91_can.c138
-rw-r--r--drivers/net/can/c_can/Kconfig15
-rw-r--r--drivers/net/can/c_can/Makefile8
-rw-r--r--drivers/net/can/c_can/c_can.c1158
-rw-r--r--drivers/net/can/c_can/c_can.h86
-rw-r--r--drivers/net/can/c_can/c_can_platform.c215
-rw-r--r--drivers/net/can/janz-ican3.c11
-rw-r--r--drivers/net/can/mcp251x.c2
-rw-r--r--drivers/net/can/mscan/Kconfig2
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c15
-rw-r--r--drivers/net/can/mscan/mscan.c2
-rw-r--r--drivers/net/can/pch_can.c1355
-rw-r--r--drivers/net/can/sja1000/plx_pci.c2
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c17
-rw-r--r--drivers/net/can/slcan.c756
-rw-r--r--drivers/net/can/softing/Kconfig30
-rw-r--r--drivers/net/can/softing/Makefile6
-rw-r--r--drivers/net/can/softing/softing.h167
-rw-r--r--drivers/net/can/softing/softing_cs.c360
-rw-r--r--drivers/net/can/softing/softing_fw.c691
-rw-r--r--drivers/net/can/softing/softing_main.c894
-rw-r--r--drivers/net/can/softing/softing_platform.h40
-rw-r--r--drivers/net/can/usb/esd_usb2.c6
-rw-r--r--drivers/net/cassini.c22
-rw-r--r--drivers/net/cassini.h3
-rw-r--r--drivers/net/chelsio/my3126.c2
-rw-r--r--drivers/net/chelsio/sge.c10
-rw-r--r--drivers/net/chelsio/subr.c2
-rw-r--r--drivers/net/cnic.c860
-rw-r--r--drivers/net/cnic.h29
-rw-r--r--drivers/net/cnic_defs.h2095
-rw-r--r--drivers/net/cnic_if.h30
-rw-r--r--drivers/net/cris/eth_v10.c34
-rw-r--r--drivers/net/cs89x0.c19
-rw-r--r--drivers/net/cxgb3/ael1002.c24
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c6
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c11
-rw-r--r--drivers/net/cxgb3/mc5.c2
-rw-r--r--drivers/net/cxgb3/t3_hw.c4
-rw-r--r--drivers/net/cxgb4/cxgb4.h4
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c144
-rw-r--r--drivers/net/cxgb4/sge.c22
-rw-r--r--drivers/net/cxgb4/t4_hw.c95
-rw-r--r--drivers/net/cxgb4/t4_msg.h1
-rw-r--r--drivers/net/cxgb4/t4fw_api.h1
-rw-r--r--drivers/net/cxgb4vf/adapter.h2
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c142
-rw-r--r--drivers/net/cxgb4vf/sge.c9
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c20
-rw-r--r--drivers/net/davinci_emac.c4
-rw-r--r--drivers/net/depca.c8
-rw-r--r--drivers/net/dl2k.c4
-rw-r--r--drivers/net/dm9000.c18
-rw-r--r--drivers/net/dnet.c3
-rw-r--r--drivers/net/e1000/e1000_hw.c350
-rw-r--r--drivers/net/e1000/e1000_hw.h62
-rw-r--r--drivers/net/e1000/e1000_main.c65
-rw-r--r--drivers/net/e1000/e1000_osdep.h20
-rw-r--r--drivers/net/e1000/e1000_param.c13
-rw-r--r--drivers/net/e1000e/82571.c270
-rw-r--r--drivers/net/e1000e/Makefile2
-rw-r--r--drivers/net/e1000e/defines.h12
-rw-r--r--drivers/net/e1000e/e1000.h14
-rw-r--r--drivers/net/e1000e/es2lan.c14
-rw-r--r--drivers/net/e1000e/ethtool.c263
-rw-r--r--drivers/net/e1000e/hw.h10
-rw-r--r--drivers/net/e1000e/ich8lan.c152
-rw-r--r--drivers/net/e1000e/lib.c168
-rw-r--r--drivers/net/e1000e/netdev.c568
-rw-r--r--drivers/net/e1000e/param.c8
-rw-r--r--drivers/net/e1000e/phy.c104
-rw-r--r--drivers/net/e2100.c2
-rw-r--r--drivers/net/eepro.c13
-rw-r--r--drivers/net/eexpress.c2
-rw-r--r--drivers/net/ehea/ehea.h17
-rw-r--r--drivers/net/ehea/ehea_ethtool.c34
-rw-r--r--drivers/net/ehea/ehea_main.c446
-rw-r--r--drivers/net/ehea/ehea_phyp.c40
-rw-r--r--drivers/net/ehea/ehea_qmr.c89
-rw-r--r--drivers/net/enc28j60.c2
-rw-r--r--drivers/net/enic/Makefile2
-rw-r--r--drivers/net/enic/enic.h15
-rw-r--r--drivers/net/enic/enic_dev.c221
-rw-r--r--drivers/net/enic/enic_dev.h41
-rw-r--r--drivers/net/enic/enic_main.c522
-rw-r--r--drivers/net/enic/enic_res.h1
-rw-r--r--drivers/net/enic/vnic_dev.c26
-rw-r--r--drivers/net/enic/vnic_dev.h8
-rw-r--r--drivers/net/enic/vnic_devcmd.h38
-rw-r--r--drivers/net/enic/vnic_rq.h5
-rw-r--r--drivers/net/enic/vnic_vic.h31
-rw-r--r--drivers/net/epic100.c4
-rw-r--r--drivers/net/eql.c10
-rw-r--r--drivers/net/ethoc.c154
-rw-r--r--drivers/net/fec.c775
-rw-r--r--drivers/net/fec.h5
-rw-r--r--drivers/net/fec_mpc52xx.c32
-rw-r--r--drivers/net/fec_mpc52xx.h2
-rw-r--r--drivers/net/fec_mpc52xx_phy.c5
-rw-r--r--drivers/net/forcedeth.c1176
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c17
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c9
-rw-r--r--drivers/net/fs_enet/mii-fec.c15
-rw-r--r--drivers/net/fsl_pq_mdio.c9
-rw-r--r--drivers/net/ftmac100.c1198
-rw-r--r--drivers/net/ftmac100.h180
-rw-r--r--drivers/net/gianfar.c35
-rw-r--r--drivers/net/gianfar.h10
-rw-r--r--drivers/net/greth.c229
-rw-r--r--drivers/net/greth.h2
-rw-r--r--drivers/net/hamachi.c4
-rw-r--r--drivers/net/hamradio/bpqether.c5
-rw-r--r--drivers/net/hamradio/yam.c4
-rw-r--r--drivers/net/hp.c6
-rw-r--r--drivers/net/ibm_newemac/core.c15
-rw-r--r--drivers/net/ibm_newemac/mal.c9
-rw-r--r--drivers/net/ibm_newemac/rgmii.c9
-rw-r--r--drivers/net/ibm_newemac/tah.c9
-rw-r--r--drivers/net/ibm_newemac/zmii.c9
-rw-r--r--drivers/net/ibmveth.c7
-rw-r--r--drivers/net/ifb.c48
-rw-r--r--drivers/net/igb/e1000_82575.c333
-rw-r--r--drivers/net/igb/e1000_82575.h6
-rw-r--r--drivers/net/igb/e1000_defines.h59
-rw-r--r--drivers/net/igb/e1000_hw.h15
-rw-r--r--drivers/net/igb/e1000_mbx.c38
-rw-r--r--drivers/net/igb/e1000_nvm.c157
-rw-r--r--drivers/net/igb/e1000_nvm.h3
-rw-r--r--drivers/net/igb/e1000_phy.c11
-rw-r--r--drivers/net/igb/e1000_regs.h28
-rw-r--r--drivers/net/igb/igb.h9
-rw-r--r--drivers/net/igb/igb_ethtool.c30
-rw-r--r--drivers/net/igb/igb_main.c332
-rw-r--r--drivers/net/igbvf/Makefile2
-rw-r--r--drivers/net/igbvf/defines.h2
-rw-r--r--drivers/net/igbvf/ethtool.c15
-rw-r--r--drivers/net/igbvf/igbvf.h7
-rw-r--r--drivers/net/igbvf/mbx.c2
-rw-r--r--drivers/net/igbvf/mbx.h2
-rw-r--r--drivers/net/igbvf/netdev.c96
-rw-r--r--drivers/net/igbvf/regs.h2
-rw-r--r--drivers/net/igbvf/vf.c8
-rw-r--r--drivers/net/igbvf/vf.h4
-rw-r--r--drivers/net/ipg.c4
-rw-r--r--drivers/net/irda/act200l-sir.c2
-rw-r--r--drivers/net/irda/bfin_sir.h2
-rw-r--r--drivers/net/irda/donauboe.c4
-rw-r--r--drivers/net/irda/donauboe.h2
-rw-r--r--drivers/net/irda/irtty-sir.c2
-rw-r--r--drivers/net/irda/mcs7780.c2
-rw-r--r--drivers/net/irda/sh_irda.c14
-rw-r--r--drivers/net/irda/smsc-ircc2.c2
-rw-r--r--drivers/net/iseries_veth.c27
-rw-r--r--drivers/net/ixgb/ixgb.h2
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c39
-rw-r--r--drivers/net/ixgb/ixgb_main.c109
-rw-r--r--drivers/net/ixgb/ixgb_param.c21
-rw-r--r--drivers/net/ixgbe/Makefile2
-rw-r--r--drivers/net/ixgbe/ixgbe.h157
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c158
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c1111
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c1206
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h18
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c177
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h15
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c150
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.h25
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c188
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h29
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c482
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c496
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c171
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c2573
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c29
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h6
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c614
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h12
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c173
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h201
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c737
-rw-r--r--drivers/net/ixgbevf/Makefile2
-rw-r--r--drivers/net/ixgbevf/defines.h5
-rw-r--r--drivers/net/ixgbevf/ethtool.c22
-rw-r--r--drivers/net/ixgbevf/ixgbevf.h7
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c123
-rw-r--r--drivers/net/ixgbevf/mbx.c2
-rw-r--r--drivers/net/ixgbevf/mbx.h2
-rw-r--r--drivers/net/ixgbevf/regs.h4
-rw-r--r--drivers/net/ixgbevf/vf.c8
-rw-r--r--drivers/net/ixgbevf/vf.h3
-rw-r--r--drivers/net/jme.c326
-rw-r--r--drivers/net/jme.h87
-rw-r--r--drivers/net/ks8851.c33
-rw-r--r--drivers/net/ksz884x.c22
-rw-r--r--drivers/net/lance.c2
-rw-r--r--drivers/net/lib82596.c2
-rw-r--r--drivers/net/lib8390.c24
-rw-r--r--drivers/net/ll_temac_main.c13
-rw-r--r--drivers/net/loopback.c9
-rw-r--r--drivers/net/macb.c2
-rw-r--r--drivers/net/macvlan.c127
-rw-r--r--drivers/net/macvtap.c26
-rw-r--r--drivers/net/mii.c14
-rw-r--r--drivers/net/mlx4/alloc.c3
-rw-r--r--drivers/net/mlx4/catas.c6
-rw-r--r--drivers/net/mlx4/en_main.c3
-rw-r--r--drivers/net/mlx4/en_netdev.c3
-rw-r--r--drivers/net/mlx4/fw.c4
-rw-r--r--drivers/net/mlx4/main.c17
-rw-r--r--drivers/net/mlx4/mcg.c23
-rw-r--r--drivers/net/mv643xx_eth.c83
-rw-r--r--drivers/net/myri10ge/myri10ge.c12
-rw-r--r--drivers/net/myri_sbus.c8
-rw-r--r--drivers/net/ne-h8300.c12
-rw-r--r--drivers/net/netconsole.c8
-rw-r--r--drivers/net/netxen/netxen_nic.h11
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c15
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c84
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c21
-rw-r--r--drivers/net/netxen/netxen_nic_init.c7
-rw-r--r--drivers/net/netxen/netxen_nic_main.c15
-rw-r--r--drivers/net/ni52.c4
-rw-r--r--drivers/net/ni65.c4
-rw-r--r--drivers/net/niu.c76
-rw-r--r--drivers/net/ns83820.c5
-rw-r--r--drivers/net/pch_gbe/pch_gbe.h2
-rw-r--r--drivers/net/pch_gbe/pch_gbe_ethtool.c19
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c132
-rw-r--r--drivers/net/pcmcia/axnet_cs.c25
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c1
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c2
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c2
-rw-r--r--drivers/net/phy/Kconfig3
-rw-r--r--drivers/net/phy/icplus.c59
-rw-r--r--drivers/net/phy/mdio-gpio.c9
-rw-r--r--drivers/net/phy/micrel.c24
-rw-r--r--drivers/net/phy/phy.c12
-rw-r--r--drivers/net/ppp_async.c10
-rw-r--r--drivers/net/ppp_deflate.c9
-rw-r--r--drivers/net/ppp_generic.c178
-rw-r--r--drivers/net/ppp_mppe.c7
-rw-r--r--drivers/net/ppp_synctty.c3
-rw-r--r--drivers/net/pppoe.c2
-rw-r--r--drivers/net/pptp.c50
-rw-r--r--drivers/net/pxa168_eth.c9
-rw-r--r--drivers/net/qla3xxx.c20
-rw-r--r--drivers/net/qlcnic/qlcnic.h68
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c28
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c159
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h27
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c91
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c186
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c114
-rw-r--r--drivers/net/qlge/qlge.h5
-rw-r--r--drivers/net/qlge/qlge_dbg.c21
-rw-r--r--drivers/net/qlge/qlge_ethtool.c19
-rw-r--r--drivers/net/qlge/qlge_main.c22
-rw-r--r--drivers/net/qlge/qlge_mpi.c14
-rw-r--r--drivers/net/r6040.c117
-rw-r--r--drivers/net/r8169.c2127
-rw-r--r--drivers/net/s2io.c81
-rw-r--r--drivers/net/s2io.h9
-rw-r--r--drivers/net/sc92031.c3
-rw-r--r--drivers/net/sfc/efx.c173
-rw-r--r--drivers/net/sfc/efx.h26
-rw-r--r--drivers/net/sfc/ethtool.c227
-rw-r--r--drivers/net/sfc/falcon.c230
-rw-r--r--drivers/net/sfc/falcon_boards.c122
-rw-r--r--drivers/net/sfc/falcon_xmac.c16
-rw-r--r--drivers/net/sfc/filter.c372
-rw-r--r--drivers/net/sfc/filter.h149
-rw-r--r--drivers/net/sfc/io.h160
-rw-r--r--drivers/net/sfc/mcdi.c35
-rw-r--r--drivers/net/sfc/mcdi.h4
-rw-r--r--drivers/net/sfc/mcdi_mac.c2
-rw-r--r--drivers/net/sfc/mcdi_pcol.h2
-rw-r--r--drivers/net/sfc/mcdi_phy.c3
-rw-r--r--drivers/net/sfc/mdio_10g.c35
-rw-r--r--drivers/net/sfc/mdio_10g.h5
-rw-r--r--drivers/net/sfc/mtd.c100
-rw-r--r--drivers/net/sfc/net_driver.h182
-rw-r--r--drivers/net/sfc/nic.c169
-rw-r--r--drivers/net/sfc/nic.h21
-rw-r--r--drivers/net/sfc/phy.h2
-rw-r--r--drivers/net/sfc/qt202x_phy.c8
-rw-r--r--drivers/net/sfc/regs.h8
-rw-r--r--drivers/net/sfc/rx.c172
-rw-r--r--drivers/net/sfc/selftest.c4
-rw-r--r--drivers/net/sfc/selftest.h2
-rw-r--r--drivers/net/sfc/siena.c34
-rw-r--r--drivers/net/sfc/spi.h7
-rw-r--r--drivers/net/sfc/tenxpress.c6
-rw-r--r--drivers/net/sfc/tx.c214
-rw-r--r--drivers/net/sfc/txc43128_phy.c4
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/sh_eth.c744
-rw-r--r--drivers/net/sh_eth.h655
-rw-r--r--drivers/net/sis190.c3
-rw-r--r--drivers/net/sis900.c7
-rw-r--r--drivers/net/skfp/Makefile2
-rw-r--r--drivers/net/skfp/skfddi.c2
-rw-r--r--drivers/net/skfp/smt.c4
-rw-r--r--drivers/net/skge.c57
-rw-r--r--drivers/net/sky2.c302
-rw-r--r--drivers/net/sky2.h48
-rw-r--r--drivers/net/smc-ultra.c8
-rw-r--r--drivers/net/smc91x.c13
-rw-r--r--drivers/net/smc91x.h62
-rw-r--r--drivers/net/smsc911x.c9
-rw-r--r--drivers/net/starfire.c2
-rw-r--r--drivers/net/stmmac/stmmac.h40
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c16
-rw-r--r--drivers/net/stmmac/stmmac_main.c275
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c8
-rw-r--r--drivers/net/sunbmac.c9
-rw-r--r--drivers/net/sundance.c27
-rw-r--r--drivers/net/sungem.c72
-rw-r--r--drivers/net/sungem.h3
-rw-r--r--drivers/net/sunhme.c16
-rw-r--r--drivers/net/sunlance.c18
-rw-r--r--drivers/net/sunqe.c8
-rw-r--r--drivers/net/tehuti.c10
-rw-r--r--drivers/net/tg3.c778
-rw-r--r--drivers/net/tg3.h56
-rw-r--r--drivers/net/tile/tilepro.c971
-rw-r--r--drivers/net/tlan.c3840
-rw-r--r--drivers/net/tlan.h192
-rw-r--r--drivers/net/tokenring/ibmtr.c5
-rw-r--r--drivers/net/tulip/de2104x.c18
-rw-r--r--drivers/net/tulip/dmfe.c6
-rw-r--r--drivers/net/tulip/tulip_core.c15
-rw-r--r--drivers/net/tun.c89
-rw-r--r--drivers/net/typhoon.c4
-rw-r--r--drivers/net/ucc_geth.c11
-rw-r--r--drivers/net/usb/Kconfig19
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix.c4
-rw-r--r--drivers/net/usb/cdc-phonet.c10
-rw-r--r--drivers/net/usb/cdc_ether.c4
-rw-r--r--drivers/net/usb/cdc_ncm.c1287
-rw-r--r--drivers/net/usb/dm9601.c4
-rw-r--r--drivers/net/usb/hso.c66
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/kaweth.c1
-rw-r--r--drivers/net/usb/mcs7830.c14
-rw-r--r--drivers/net/usb/pegasus.c4
-rw-r--r--drivers/net/usb/sierra_net.c5
-rw-r--r--drivers/net/usb/smsc95xx.c7
-rw-r--r--drivers/net/usb/usbnet.c52
-rw-r--r--drivers/net/veth.c16
-rw-r--r--drivers/net/via-rhine.c326
-rw-r--r--drivers/net/via-velocity.c11
-rw-r--r--drivers/net/via-velocity.h8
-rw-r--r--drivers/net/virtio_net.c29
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c1054
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c406
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h74
-rw-r--r--drivers/net/vxge/vxge-config.c3620
-rw-r--r--drivers/net/vxge/vxge-config.h179
-rw-r--r--drivers/net/vxge/vxge-ethtool.c112
-rw-r--r--drivers/net/vxge/vxge-main.c1297
-rw-r--r--drivers/net/vxge/vxge-main.h111
-rw-r--r--drivers/net/vxge/vxge-reg.h33
-rw-r--r--drivers/net/vxge/vxge-traffic.c891
-rw-r--r--drivers/net/vxge/vxge-traffic.h65
-rw-r--r--drivers/net/vxge/vxge-version.h33
-rw-r--r--drivers/net/wan/dscc4.c8
-rw-r--r--drivers/net/wan/hd64572.c5
-rw-r--r--drivers/net/wan/lmc/Makefile2
-rw-r--r--drivers/net/wan/pc300_tty.c9
-rw-r--r--drivers/net/wd.c2
-rw-r--r--drivers/net/wimax/i2400m/driver.c98
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h23
-rw-r--r--drivers/net/wimax/i2400m/sdio.c1
-rw-r--r--drivers/net/wimax/i2400m/usb.c3
-rw-r--r--drivers/net/wireless/Kconfig2
-rw-r--r--drivers/net/wireless/Makefile4
-rw-r--r--drivers/net/wireless/adm8211.c4
-rw-r--r--drivers/net/wireless/airo.c20
-rw-r--r--drivers/net/wireless/at76c50x-usb.c10
-rw-r--r--drivers/net/wireless/at76c50x-usb.h2
-rw-r--r--drivers/net/wireless/ath/ar9170/Kconfig4
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h2
-rw-r--r--drivers/net/wireless/ath/ar9170/cmd.c2
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c8
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c6
-rw-r--r--drivers/net/wireless/ath/ath.h113
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig29
-rw-r--r--drivers/net/wireless/ath/ath5k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c220
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c40
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h328
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c35
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c1750
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h24
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c54
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c54
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h12
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c24
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.h18
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c180
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c149
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h30
-rw-r--r--drivers/net/wireless/ath/ath5k/initvals.c409
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c11
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c824
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c328
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c569
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c787
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c712
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h52
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c1221
-rw-r--r--drivers/net/wireless/ath/ath5k/rfbuffer.h1169
-rw-r--r--drivers/net/wireless/ath/ath5k/sysfs.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/trace.h107
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c107
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c305
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c238
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c30
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c144
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h106
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c605
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c3225
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h82
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c379
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c123
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c166
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c369
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h80
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h2086
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h219
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c205
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c68
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c30
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h19
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c553
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h36
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c257
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h92
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c339
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c334
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c412
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c195
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c319
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h101
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c184
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c343
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c169
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c1059
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c136
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.h22
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c375
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h90
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c228
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c279
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h19
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c1262
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c128
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c99
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c283
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h58
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c719
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c1087
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h8
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c18
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h14
-rw-r--r--drivers/net/wireless/ath/carl9170/fwdesc.h28
-rw-r--r--drivers/net/wireless/ath/carl9170/hw.h32
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c56
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c31
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.c27
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.h24
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c90
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c60
-rw-r--r--drivers/net/wireless/ath/carl9170/version.h8
-rw-r--r--drivers/net/wireless/ath/carl9170/wlan.h20
-rw-r--r--drivers/net/wireless/ath/debug.c15
-rw-r--r--drivers/net/wireless/ath/debug.h90
-rw-r--r--drivers/net/wireless/ath/key.c42
-rw-r--r--drivers/net/wireless/ath/main.c20
-rw-r--r--drivers/net/wireless/ath/regd.c15
-rw-r--r--drivers/net/wireless/ath/regd.h1
-rw-r--r--drivers/net/wireless/atmel.c6
-rw-r--r--drivers/net/wireless/b43/Kconfig13
-rw-r--r--drivers/net/wireless/b43/Makefile8
-rw-r--r--drivers/net/wireless/b43/b43.h21
-rw-r--r--drivers/net/wireless/b43/dma.c5
-rw-r--r--drivers/net/wireless/b43/main.c74
-rw-r--r--drivers/net/wireless/b43/phy_common.c22
-rw-r--r--drivers/net/wireless/b43/phy_common.h8
-rw-r--r--drivers/net/wireless/b43/phy_g.c2
-rw-r--r--drivers/net/wireless/b43/phy_n.c797
-rw-r--r--drivers/net/wireless/b43/phy_n.h2
-rw-r--r--drivers/net/wireless/b43/radio_2055.c502
-rw-r--r--drivers/net/wireless/b43/radio_2056.c9062
-rw-r--r--drivers/net/wireless/b43/radio_2056.h1084
-rw-r--r--drivers/net/wireless/b43/rfkill.c19
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c1437
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h52
-rw-r--r--drivers/net/wireless/b43/xmit.c75
-rw-r--r--drivers/net/wireless/b43/xmit.h6
-rw-r--r--drivers/net/wireless/b43legacy/main.c52
-rw-r--r--drivers/net/wireless/b43legacy/phy.c2
-rw-r--r--drivers/net/wireless/b43legacy/rfkill.c2
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_config.h4
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c15
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c8
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c1
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c72
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.h1
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c203
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.h4
-rw-r--r--drivers/net/wireless/iwlegacy/Kconfig116
-rw-r--r--drivers/net/wireless/iwlegacy/Makefile25
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c (renamed from drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c)11
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h)4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-fh.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945-fh.h)5
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-hw.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945-hw.h)9
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.c (renamed from drivers/net/wireless/iwlwifi/iwl-3945-led.c)31
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945-led.h)2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-rs.c (renamed from drivers/net/wireless/iwlwifi/iwl-3945-rs.c)41
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.c (renamed from drivers/net/wireless/iwlwifi/iwl-3945.c)357
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.h (renamed from drivers/net/wireless/iwlwifi/iwl-3945.h)20
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-calib.c967
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-calib.h75
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c774
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h59
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c154
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-hw.h (renamed from drivers/net/wireless/iwlwifi/iwl-4965-hw.h)26
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.c74
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.h33
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-lib.c1260
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rs.c2870
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rx.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rx.c)177
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-sta.c721
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-tx.c1369
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-ucode.c166
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c (renamed from drivers/net/wireless/iwlwifi/iwl-4965.c)1027
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.h282
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-commands.h3405
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c2674
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.h646
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-csr.h422
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debug.h198
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debugfs.c1467
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-dev.h1426
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.c45
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.h270
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.c561
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.h344
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-fh.h513
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-hcmd.c271
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-helpers.h181
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-io.h545
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.c188
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.h56
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-legacy-rs.h456
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.c165
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.h55
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-prph.h523
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-rx.c302
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-scan.c625
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-spectrum.h92
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.c816
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.h148
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-tx.c660
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c (renamed from drivers/net/wireless/iwlwifi/iwl3945-base.c)636
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c3632
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig129
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile37
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c101
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c560
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c146
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c510
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c239
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c376
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ict.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c669
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c67
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c691
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c181
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c77
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1150
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h106
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h156
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c1119
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h162
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c159
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h113
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c33
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h117
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h51
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c201
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c192
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c925
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c68
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c75
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c142
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c6
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c7
-rw-r--r--drivers/net/wireless/libertas/cfg.c15
-rw-r--r--drivers/net/wireless/libertas/cmd.c18
-rw-r--r--drivers/net/wireless/libertas/dev.h3
-rw-r--r--drivers/net/wireless/libertas/host.h2
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c1
-rw-r--r--drivers/net/wireless/libertas/if_spi.c369
-rw-r--r--drivers/net/wireless/libertas/if_usb.c13
-rw-r--r--drivers/net/wireless/libertas/main.c82
-rw-r--r--drivers/net/wireless/libertas/mesh.c11
-rw-r--r--drivers/net/wireless/libertas/rx.c4
-rw-r--r--drivers/net/wireless/libertas_tf/main.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c13
-rw-r--r--drivers/net/wireless/mwl8k.c1184
-rw-r--r--drivers/net/wireless/orinoco/main.c18
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c14
-rw-r--r--drivers/net/wireless/orinoco/scan.c13
-rw-r--r--drivers/net/wireless/orinoco/scan.h1
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c14
-rw-r--r--drivers/net/wireless/orinoco/wext.c12
-rw-r--r--drivers/net/wireless/p54/Kconfig5
-rw-r--r--drivers/net/wireless/p54/eeprom.c211
-rw-r--r--drivers/net/wireless/p54/eeprom.h7
-rw-r--r--drivers/net/wireless/p54/fwio.c21
-rw-r--r--drivers/net/wireless/p54/lmac.h3
-rw-r--r--drivers/net/wireless/p54/main.c61
-rw-r--r--drivers/net/wireless/p54/p54.h7
-rw-r--r--drivers/net/wireless/p54/p54pci.c14
-rw-r--r--drivers/net/wireless/p54/p54spi_eeprom.h9
-rw-r--r--drivers/net/wireless/p54/p54usb.c9
-rw-r--r--drivers/net/wireless/p54/txrx.c21
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c6
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c2
-rw-r--r--drivers/net/wireless/ray_cs.c18
-rw-r--r--drivers/net/wireless/rndis_wlan.c405
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig84
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c323
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c325
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c115
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h357
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c1130
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c460
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.h12
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c373
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h181
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c14
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c15
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c170
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00firmware.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00ht.c31
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h36
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c136
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c22
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c444
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h105
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c305
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h12
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c420
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h64
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c181
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h38
-rw-r--r--drivers/net/wireless/rtl818x/Makefile9
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/Makefile5
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c (renamed from drivers/net/wireless/rtl818x/rtl8180_dev.c)18
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/grf5101.c (renamed from drivers/net/wireless/rtl818x/rtl8180_grf5101.c)2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/grf5101.h (renamed from drivers/net/wireless/rtl818x/rtl8180_grf5101.h)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/max2820.c (renamed from drivers/net/wireless/rtl818x/rtl8180_max2820.c)2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/max2820.h (renamed from drivers/net/wireless/rtl818x/rtl8180_max2820.h)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8180.h (renamed from drivers/net/wireless/rtl818x/rtl8180.h)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8225.c (renamed from drivers/net/wireless/rtl818x/rtl8180_rtl8225.c)2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8225.h (renamed from drivers/net/wireless/rtl818x/rtl8180_rtl8225.h)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/sa2400.c (renamed from drivers/net/wireless/rtl818x/rtl8180_sa2400.c)2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/sa2400.h (renamed from drivers/net/wireless/rtl818x/rtl8180_sa2400.h)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/Makefile5
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c (renamed from drivers/net/wireless/rtl818x/rtl8187_dev.c)179
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/leds.c (renamed from drivers/net/wireless/rtl818x/rtl8187_leds.c)2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/leds.h (renamed from drivers/net/wireless/rtl818x/rtl8187_leds.h)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rfkill.c (renamed from drivers/net/wireless/rtl818x/rtl8187_rfkill.c)2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rfkill.h (renamed from drivers/net/wireless/rtl818x/rtl8187_rfkill.h)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8187.h (renamed from drivers/net/wireless/rtl818x/rtl8187.h)4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8225.c (renamed from drivers/net/wireless/rtl818x/rtl8187_rtl8225.c)24
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8225.h (renamed from drivers/net/wireless/rtl818x/rtl8187_rtl8225.h)0
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig33
-rw-r--r--drivers/net/wireless/rtlwifi/Makefile26
-rw-r--r--drivers/net/wireless/rtlwifi/base.c953
-rw-r--r--drivers/net/wireless/rtlwifi/base.h97
-rw-r--r--drivers/net/wireless/rtlwifi/cam.c291
-rw-r--r--drivers/net/wireless/rtlwifi/cam.h53
-rw-r--r--drivers/net/wireless/rtlwifi/core.c1033
-rw-r--r--drivers/net/wireless/rtlwifi/core.h42
-rw-r--r--drivers/net/wireless/rtlwifi/debug.c50
-rw-r--r--drivers/net/wireless/rtlwifi/debug.h213
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c1171
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.h121
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c1880
-rw-r--r--drivers/net/wireless/rtlwifi/pci.h302
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c493
-rw-r--r--drivers/net/wireless/rtlwifi/ps.h43
-rw-r--r--drivers/net/wireless/rtlwifi/rc.c329
-rw-r--r--drivers/net/wireless/rtlwifi/rc.h40
-rw-r--r--drivers/net/wireless/rtlwifi/regd.c400
-rw-r--r--drivers/net/wireless/rtlwifi/regd.h61
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/Makefile9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c1398
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h204
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c774
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h98
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/main.c39
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c2042
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h246
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/Makefile13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/def.h401
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.c113
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.h197
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c2148
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.h68
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/led.c144
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/led.h41
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c627
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.h254
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/reg.h2130
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.c523
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.h47
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c292
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.h51
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/table.c1224
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/table.h58
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c1066
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.h740
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/Makefile14
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/def.h62
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/dm.c113
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/dm.h32
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c2504
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.h116
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/led.c142
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/led.h37
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c1144
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.h180
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c607
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.h36
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/reg.h30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.c493
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.h47
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c336
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.h53
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/table.c1888
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/table.h71
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c687
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.h430
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c1035
-rw-r--r--drivers/net/wireless/rtlwifi/usb.h164
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h1858
-rw-r--r--drivers/net/wireless/wl1251/acx.c53
-rw-r--r--drivers/net/wireless/wl1251/acx.h76
-rw-r--r--drivers/net/wireless/wl1251/boot.c1
-rw-r--r--drivers/net/wireless/wl1251/event.c18
-rw-r--r--drivers/net/wireless/wl1251/main.c40
-rw-r--r--drivers/net/wireless/wl1251/ps.c52
-rw-r--r--drivers/net/wireless/wl1251/rx.c51
-rw-r--r--drivers/net/wireless/wl1251/sdio.c103
-rw-r--r--drivers/net/wireless/wl1251/spi.c9
-rw-r--r--drivers/net/wireless/wl1251/tx.c74
-rw-r--r--drivers/net/wireless/wl1251/wl1251.h10
-rw-r--r--drivers/net/wireless/wl1251/wl12xx_80211.h3
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig64
-rw-r--r--drivers/net/wireless/wl12xx/Makefile22
-rw-r--r--drivers/net/wireless/wl12xx/acx.c (renamed from drivers/net/wireless/wl12xx/wl1271_acx.c)358
-rw-r--r--drivers/net/wireless/wl12xx/acx.h (renamed from drivers/net/wireless/wl12xx/wl1271_acx.h)253
-rw-r--r--drivers/net/wireless/wl12xx/boot.c (renamed from drivers/net/wireless/wl12xx/wl1271_boot.c)76
-rw-r--r--drivers/net/wireless/wl12xx/boot.h (renamed from drivers/net/wireless/wl12xx/wl1271_boot.h)8
-rw-r--r--drivers/net/wireless/wl12xx/cmd.c (renamed from drivers/net/wireless/wl12xx/wl1271_cmd.c)392
-rw-r--r--drivers/net/wireless/wl12xx/cmd.h (renamed from drivers/net/wireless/wl12xx/wl1271_cmd.h)219
-rw-r--r--drivers/net/wireless/wl12xx/conf.h (renamed from drivers/net/wireless/wl12xx/wl1271_conf.h)129
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.c469
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.h (renamed from drivers/net/wireless/wl12xx/wl1271_debugfs.h)6
-rw-r--r--drivers/net/wireless/wl12xx/event.c (renamed from drivers/net/wireless/wl12xx/wl1271_event.c)35
-rw-r--r--drivers/net/wireless/wl12xx/event.h (renamed from drivers/net/wireless/wl12xx/wl1271_event.h)14
-rw-r--r--drivers/net/wireless/wl12xx/ini.h (renamed from drivers/net/wireless/wl12xx/wl1271_ini.h)4
-rw-r--r--drivers/net/wireless/wl12xx/init.c (renamed from drivers/net/wireless/wl12xx/wl1271_init.c)423
-rw-r--r--drivers/net/wireless/wl12xx/init.h (renamed from drivers/net/wireless/wl12xx/wl1271_init.h)8
-rw-r--r--drivers/net/wireless/wl12xx/io.c (renamed from drivers/net/wireless/wl12xx/wl1271_io.c)5
-rw-r--r--drivers/net/wireless/wl12xx/io.h (renamed from drivers/net/wireless/wl12xx/wl1271_io.h)7
-rw-r--r--drivers/net/wireless/wl12xx/main.c (renamed from drivers/net/wireless/wl12xx/wl1271_main.c)1790
-rw-r--r--drivers/net/wireless/wl12xx/ps.c (renamed from drivers/net/wireless/wl12xx/wl1271_ps.c)96
-rw-r--r--drivers/net/wireless/wl12xx/ps.h (renamed from drivers/net/wireless/wl12xx/wl1271_ps.h)12
-rw-r--r--drivers/net/wireless/wl12xx/reg.h (renamed from drivers/net/wireless/wl12xx/wl1271_reg.h)0
-rw-r--r--drivers/net/wireless/wl12xx/rx.c (renamed from drivers/net/wireless/wl12xx/wl1271_rx.c)75
-rw-r--r--drivers/net/wireless/wl12xx/rx.h (renamed from drivers/net/wireless/wl12xx/wl1271_rx.h)23
-rw-r--r--drivers/net/wireless/wl12xx/scan.c (renamed from drivers/net/wireless/wl12xx/wl1271_scan.c)33
-rw-r--r--drivers/net/wireless/wl12xx/scan.h (renamed from drivers/net/wireless/wl12xx/wl1271_scan.h)6
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c (renamed from drivers/net/wireless/wl12xx/wl1271_sdio.c)47
-rw-r--r--drivers/net/wireless/wl12xx/sdio_test.c520
-rw-r--r--drivers/net/wireless/wl12xx/spi.c (renamed from drivers/net/wireless/wl12xx/wl1271_spi.c)28
-rw-r--r--drivers/net/wireless/wl12xx/testmode.c (renamed from drivers/net/wireless/wl12xx/wl1271_testmode.c)18
-rw-r--r--drivers/net/wireless/wl12xx/testmode.h (renamed from drivers/net/wireless/wl12xx/wl1271_testmode.h)4
-rw-r--r--drivers/net/wireless/wl12xx/tx.c762
-rw-r--r--drivers/net/wireless/wl12xx/tx.h (renamed from drivers/net/wireless/wl12xx/wl1271_tx.h)22
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_debugfs.c583
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c437
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx.h (renamed from drivers/net/wireless/wl12xx/wl1271.h)361
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h25
-rw-r--r--drivers/net/wireless/wl3501_cs.c2
-rw-r--r--drivers/net/wireless/zd1201.c3
-rw-r--r--drivers/net/wireless/zd1211rw/Makefile4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c172
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h5
-rw-r--r--drivers/net/wireless/zd1211rw/zd_def.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c456
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h24
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c599
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h37
-rw-r--r--drivers/net/xen-netback/Makefile3
-rw-r--r--drivers/net/xen-netback/common.h161
-rw-r--r--drivers/net/xen-netback/interface.c424
-rw-r--r--drivers/net/xen-netback/netback.c1745
-rw-r--r--drivers/net/xen-netback/xenbus.c490
-rw-r--r--drivers/net/xen-netfront.c122
-rw-r--r--drivers/net/xilinx_emaclite.c46
-rw-r--r--drivers/net/yellowfin.c4
-rw-r--r--drivers/net/znet.c2
950 files changed, 162816 insertions, 45158 deletions
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index 1776ab61b05f..9e1c03eb97ae 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -158,8 +158,8 @@ static int mem_start;
struct net_device * __init el1_probe(int unit)
{
struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- static unsigned ports[] = { 0x280, 0x300, 0};
- unsigned *port;
+ static const unsigned ports[] = { 0x280, 0x300, 0};
+ const unsigned *port;
int err = 0;
if (!dev)
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index 4777a1cbcd8d..d84f6e8903a5 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -392,8 +392,8 @@ el2_open(struct net_device *dev)
int retval;
if (dev->irq < 2) {
- int irqlist[] = {5, 9, 3, 4, 0};
- int *irqp = irqlist;
+ static const int irqlist[] = {5, 9, 3, 4, 0};
+ const int *irqp = irqlist;
outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */
do {
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index ea9b7a098c9b..1e945551c144 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -201,7 +201,7 @@ struct net_local {
#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */
#define RX_BUF_END (dev->mem_end - dev->mem_start)
-#define TX_TIMEOUT 5
+#define TX_TIMEOUT (HZ/20)
/*
That's it: only 86 bytes to set up the beast, including every extra
@@ -311,8 +311,8 @@ static int mem_start;
struct net_device * __init el16_probe(int unit)
{
struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- static unsigned ports[] = { 0x300, 0x320, 0x340, 0x280, 0};
- unsigned *port;
+ static const unsigned ports[] = { 0x300, 0x320, 0x340, 0x280, 0};
+ const unsigned *port;
int err = -ENODEV;
if (!dev)
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index cdf7226a7c43..d2bb4b254c57 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -98,7 +98,7 @@ static int rx_nocopy, rx_copy, queued_packet;
#define WAIT_TX_AVAIL 200
/* Operational parameter that usually are not changed. */
-#define TX_TIMEOUT 40 /* Time in jiffies before concluding Tx hung */
+#define TX_TIMEOUT ((4*HZ)/10) /* Time in jiffies before concluding Tx hung */
/* The size here is somewhat misleading: the Corkscrew also uses the ISA
aliased registers at <base>+0x400.
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 013b7c396663..8c094bae8bf3 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -317,13 +317,13 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
u8 POS;
u32 base;
struct mc32_local *lp = netdev_priv(dev);
- static u16 mca_io_bases[]={
+ static const u16 mca_io_bases[] = {
0x7280,0x7290,
0x7680,0x7690,
0x7A80,0x7A90,
0x7E80,0x7E90
};
- static u32 mca_mem_bases[]={
+ static const u32 mca_mem_bases[] = {
0x00C0000,
0x00C4000,
0x00C8000,
@@ -333,7 +333,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
0x00D8000,
0x00DC000
};
- static char *failures[]={
+ static const char * const failures[] = {
"Processor instruction",
"Processor data bus",
"Processor data bus",
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index f5166dccd8df..98517a373473 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1092,10 +1092,11 @@ err_out:
static void __devexit rtl8139_remove_one (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
+ struct rtl8139_private *tp = netdev_priv(dev);
assert (dev != NULL);
- flush_scheduled_work();
+ cancel_delayed_work_sync(&tp->thread);
unregister_netdev (dev);
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index e2c9c5b949f9..be1f1970c842 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -191,7 +191,7 @@ enum commands {
#define RX_SUSPEND 0x0030
#define RX_ABORT 0x0040
-#define TX_TIMEOUT 5
+#define TX_TIMEOUT (HZ/20)
struct i596_reg {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 4f1755bddf6b..dc280bc8eba2 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -238,8 +238,8 @@ source "drivers/net/arm/Kconfig"
config AX88796
tristate "ASIX AX88796 NE2000 clone support"
depends on ARM || MIPS || SUPERH
- select CRC32
- select MII
+ select PHYLIB
+ select MDIO_BITBANG
help
AX88796 driver, using platform bus to provide
chip detection and resources
@@ -1498,7 +1498,7 @@ config FORCEDETH
config CS89x0
tristate "CS89x0 support"
depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \
- || ARCH_IXDP2X01 || MACH_MX31ADS)
+ || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440)
---help---
Support for CS89x0 chipset based Ethernet cards. If you have a
network (Ethernet) card of this type, say Y and read the
@@ -1512,7 +1512,7 @@ config CS89x0
config CS89x0_NONISA_IRQ
def_bool y
depends on CS89x0 != n
- depends on MACH_IXDP2351 || ARCH_IXDP2X01 || MACH_MX31ADS
+ depends on MACH_IXDP2351 || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440
config TC35815
tristate "TOSHIBA TC35815 Ethernet support"
@@ -1533,7 +1533,7 @@ config E100
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
- to identify the adapter.
+ to identify the adapter.
For the latest Intel PRO/100 network driver for Linux, see:
@@ -1786,17 +1786,17 @@ config KS8842
tristate "Micrel KSZ8841/42 with generic bus interface"
depends on HAS_IOMEM && DMA_ENGINE
help
- This platform driver is for KSZ8841(1-port) / KS8842(2-port)
- ethernet switch chip (managed, VLAN, QoS) from Micrel or
- Timberdale(FPGA).
+ This platform driver is for KSZ8841(1-port) / KS8842(2-port)
+ ethernet switch chip (managed, VLAN, QoS) from Micrel or
+ Timberdale(FPGA).
config KS8851
- tristate "Micrel KS8851 SPI"
- depends on SPI
- select MII
+ tristate "Micrel KS8851 SPI"
+ depends on SPI
+ select MII
select CRC32
- help
- SPI driver for Micrel KS8851 SPI attached network chip.
+ help
+ SPI driver for Micrel KS8851 SPI attached network chip.
config KS8851_MLL
tristate "Micrel KS8851 MLL"
@@ -1944,19 +1944,13 @@ config 68360_ENET
config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
- MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5
+ IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC
+ default IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC if ARM
select PHYLIB
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors.
-config FEC2
- bool "Second FEC ethernet controller (on some ColdFire CPUs)"
- depends on FEC
- help
- Say Y here if you want to use the second built-in 10/100 Fast
- ethernet controller on some Motorola ColdFire processors.
-
config FEC_MPC52xx
tristate "MPC52xx FEC driver"
depends on PPC_MPC52xx && PPC_BESTCOMM
@@ -2014,6 +2008,15 @@ config BCM63XX_ENET
This driver supports the ethernet MACs in the Broadcom 63xx
MIPS chipset family (BCM63XX).
+config FTMAC100
+ tristate "Faraday FTMAC100 10/100 Ethernet support"
+ depends on ARM
+ select MII
+ help
+ This driver supports the FTMAC100 10/100 Ethernet controller
+ from Faraday. It is used on Faraday A320, Andes AG101 and some
+ other ARM/NDS32 SoC's.
+
source "drivers/net/fs_enet/Kconfig"
source "drivers/net/octeon/Kconfig"
@@ -2105,7 +2108,9 @@ config E1000
config E1000E
tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
+ select CRC32
depends on PCI && (!SPARC32 || BROKEN)
+ select CRC32
---help---
This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
ethernet family of adapters. For PCI or PCI-X e1000 adapters,
@@ -2133,25 +2138,25 @@ config IP1000
will be called ipg. This is recommended.
config IGB
- tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
- depends on PCI
- ---help---
- This driver supports Intel(R) 82575/82576 gigabit ethernet family of
- adapters. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
+ tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
+ depends on PCI
+ ---help---
+ This driver supports Intel(R) 82575/82576 gigabit ethernet family of
+ adapters. For more information on how to identify your adapter, go
+ to the Adapter & Driver ID Guide at:
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
- For general information and support, go to the Intel support
- website at:
+ For general information and support, go to the Intel support
+ website at:
- <http://support.intel.com>
+ <http://support.intel.com>
- More specific information on configuring the driver is in
- <file:Documentation/networking/e1000.txt>.
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/e1000.txt>.
- To compile this driver as a module, choose M here. The module
- will be called igb.
+ To compile this driver as a module, choose M here. The module
+ will be called igb.
config IGB_DCA
bool "Direct Cache Access (DCA) Support"
@@ -2163,25 +2168,25 @@ config IGB_DCA
is used, with the intent of lessening the impact of cache misses.
config IGBVF
- tristate "Intel(R) 82576 Virtual Function Ethernet support"
- depends on PCI
- ---help---
- This driver supports Intel(R) 82576 virtual functions. For more
- information on how to identify your adapter, go to the Adapter &
- Driver ID Guide at:
+ tristate "Intel(R) 82576 Virtual Function Ethernet support"
+ depends on PCI
+ ---help---
+ This driver supports Intel(R) 82576 virtual functions. For more
+ information on how to identify your adapter, go to the Adapter &
+ Driver ID Guide at:
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
- For general information and support, go to the Intel support
- website at:
+ For general information and support, go to the Intel support
+ website at:
- <http://support.intel.com>
+ <http://support.intel.com>
- More specific information on configuring the driver is in
- <file:Documentation/networking/e1000.txt>.
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/e1000.txt>.
- To compile this driver as a module, choose M here. The module
- will be called igbvf.
+ To compile this driver as a module, choose M here. The module
+ will be called igbvf.
source "drivers/net/ixp2000/Kconfig"
@@ -2233,6 +2238,7 @@ config YELLOWFIN
config R8169
tristate "Realtek 8169 gigabit ethernet support"
depends on PCI
+ select FW_LOADER
select CRC32
select MII
---help---
@@ -2241,15 +2247,6 @@ config R8169
To compile this driver as a module, choose M here: the module
will be called r8169. This is recommended.
-config R8169_VLAN
- bool "VLAN support"
- depends on R8169 && VLAN_8021Q
- ---help---
- Say Y here for the r8169 driver to support the functions required
- by the kernel 802.1Q code.
-
- If in doubt, say Y.
-
config SB1250_MAC
tristate "SB1250 Gigabit Ethernet support"
depends on SIBYTE_SB1xxx_SOC
@@ -2300,14 +2297,14 @@ config SKGE
will be called skge. This is recommended.
config SKGE_DEBUG
- bool "Debugging interface"
- depends on SKGE && DEBUG_FS
- help
- This option adds the ability to dump driver state for debugging.
- The file /sys/kernel/debug/skge/ethX displays the state of the internal
- transmit and receive rings.
+ bool "Debugging interface"
+ depends on SKGE && DEBUG_FS
+ help
+ This option adds the ability to dump driver state for debugging.
+ The file /sys/kernel/debug/skge/ethX displays the state of the internal
+ transmit and receive rings.
- If unsure, say N.
+ If unsure, say N.
config SKY2
tristate "SysKonnect Yukon2 support"
@@ -2326,14 +2323,14 @@ config SKY2
will be called sky2. This is recommended.
config SKY2_DEBUG
- bool "Debugging interface"
- depends on SKY2 && DEBUG_FS
- help
- This option adds the ability to dump driver state for debugging.
- The file /sys/kernel/debug/sky2/ethX displays the state of the internal
- transmit and receive rings.
+ bool "Debugging interface"
+ depends on SKY2 && DEBUG_FS
+ help
+ This option adds the ability to dump driver state for debugging.
+ The file /sys/kernel/debug/sky2/ethX displays the state of the internal
+ transmit and receive rings.
- If unsure, say N.
+ If unsure, say N.
config VIA_VELOCITY
tristate "VIA Velocity support"
@@ -2389,12 +2386,12 @@ config SPIDER_NET
Cell Processor-Based Blades from IBM.
config TSI108_ETH
- tristate "Tundra TSI108 gigabit Ethernet support"
- depends on TSI108_BRIDGE
- help
- This driver supports Tundra TSI108 gigabit Ethernet ports.
- To compile this driver as a module, choose M here: the module
- will be called tsi108_eth.
+ tristate "Tundra TSI108 gigabit Ethernet support"
+ depends on TSI108_BRIDGE
+ help
+ This driver supports Tundra TSI108 gigabit Ethernet ports.
+ To compile this driver as a module, choose M here: the module
+ will be called tsi108_eth.
config GELIC_NET
tristate "PS3 Gigabit Ethernet driver"
@@ -2573,41 +2570,36 @@ config MDIO
tristate
config CHELSIO_T1
- tristate "Chelsio 10Gb Ethernet support"
- depends on PCI
+ tristate "Chelsio 10Gb Ethernet support"
+ depends on PCI
select CRC32
select MDIO
- help
- This driver supports Chelsio gigabit and 10-gigabit
- Ethernet cards. More information about adapter features and
+ help
+ This driver supports Chelsio gigabit and 10-gigabit
+ Ethernet cards. More information about adapter features and
performance tuning is in <file:Documentation/networking/cxgb.txt>.
- For general information about Chelsio and our products, visit
- our website at <http://www.chelsio.com>.
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
- For customer support, please visit our customer support page at
- <http://www.chelsio.com/support.html>.
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.html>.
- Please send feedback to <linux-bugs@chelsio.com>.
+ Please send feedback to <linux-bugs@chelsio.com>.
- To compile this driver as a module, choose M here: the module
- will be called cxgb.
+ To compile this driver as a module, choose M here: the module
+ will be called cxgb.
config CHELSIO_T1_1G
- bool "Chelsio gigabit Ethernet support"
- depends on CHELSIO_T1
- help
- Enables support for Chelsio's gigabit Ethernet PCI cards. If you
- are using only 10G cards say 'N' here.
-
-config CHELSIO_T3_DEPENDS
- tristate
- depends on PCI && INET
- default y
+ bool "Chelsio gigabit Ethernet support"
+ depends on CHELSIO_T1
+ help
+ Enables support for Chelsio's gigabit Ethernet PCI cards. If you
+ are using only 10G cards say 'N' here.
config CHELSIO_T3
tristate "Chelsio Communications T3 10Gb Ethernet support"
- depends on CHELSIO_T3_DEPENDS
+ depends on PCI && INET
select FW_LOADER
select MDIO
help
@@ -2625,14 +2617,9 @@ config CHELSIO_T3
To compile this driver as a module, choose M here: the module
will be called cxgb3.
-config CHELSIO_T4_DEPENDS
- tristate
- depends on PCI && INET
- default y
-
config CHELSIO_T4
tristate "Chelsio Communications T4 Ethernet support"
- depends on CHELSIO_T4_DEPENDS
+ depends on PCI
select FW_LOADER
select MDIO
help
@@ -2650,14 +2637,9 @@ config CHELSIO_T4
To compile this driver as a module choose M here; the module
will be called cxgb4.
-config CHELSIO_T4VF_DEPENDS
- tristate
- depends on PCI && INET
- default y
-
config CHELSIO_T4VF
tristate "Chelsio Communications T4 Virtual Function Ethernet support"
- depends on CHELSIO_T4VF_DEPENDS
+ depends on PCI
help
This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
adapters with PCI-E SR-IOV Virtual Functions.
@@ -2728,26 +2710,26 @@ config IXGBE_DCB
If unsure, say N.
config IXGBEVF
- tristate "Intel(R) 82599 Virtual Function Ethernet support"
- depends on PCI_MSI
- ---help---
- This driver supports Intel(R) 82599 virtual functions. For more
- information on how to identify your adapter, go to the Adapter &
- Driver ID Guide at:
+ tristate "Intel(R) 82599 Virtual Function Ethernet support"
+ depends on PCI_MSI
+ ---help---
+ This driver supports Intel(R) 82599 virtual functions. For more
+ information on how to identify your adapter, go to the Adapter &
+ Driver ID Guide at:
- <http://support.intel.com/support/network/sb/CS-008441.htm>
+ <http://support.intel.com/support/network/sb/CS-008441.htm>
- For general information and support, go to the Intel support
- website at:
+ For general information and support, go to the Intel support
+ website at:
- <http://support.intel.com>
+ <http://support.intel.com>
- More specific information on configuring the driver is in
- <file:Documentation/networking/ixgbevf.txt>.
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/ixgbevf.txt>.
- To compile this driver as a module, choose M here. The module
- will be called ixgbevf. MSI-X interrupt support is required
- for this driver to work correctly.
+ To compile this driver as a module, choose M here. The module
+ will be called ixgbevf. MSI-X interrupt support is required
+ for this driver to work correctly.
config IXGB
tristate "Intel(R) PRO/10GbE support"
@@ -2772,29 +2754,38 @@ config IXGB
will be called ixgb.
config S2IO
- tristate "S2IO 10Gbe XFrame NIC"
+ tristate "Exar Xframe 10Gb Ethernet Adapter"
depends on PCI
---help---
- This driver supports the 10Gbe XFrame NIC of S2IO.
+ This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters.
+
More specific information on configuring the driver is in
<file:Documentation/networking/s2io.txt>.
+ To compile this driver as a module, choose M here. The module
+ will be called s2io.
+
config VXGE
- tristate "Neterion X3100 Series 10GbE PCIe Server Adapter"
+ tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
depends on PCI && INET
---help---
- This driver supports Neterion Inc's X3100 Series 10 GbE PCIe
+ This driver supports Exar Corp's X3100 Series 10 GbE PCIe
I/O Virtualized Server Adapter.
+
More specific information on configuring the driver is in
<file:Documentation/networking/vxge.txt>.
+ To compile this driver as a module, choose M here. The module
+ will be called vxge.
+
config VXGE_DEBUG_TRACE_ALL
bool "Enabling All Debug trace statments in driver"
default n
depends on VXGE
---help---
Say Y here if you want to enabling all the debug trace statements in
- driver. By default only few debug trace statements are enabled.
+ the vxge driver. By default only few debug trace statements are
+ enabled.
config MYRI10GE
tristate "Myricom Myri-10G Ethernet support"
@@ -2861,7 +2852,7 @@ config MLX4_CORE
default n
config MLX4_DEBUG
- bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED)
+ bool "Verbose debugging output" if (MLX4_CORE && EXPERT)
depends on MLX4_CORE
default y
---help---
@@ -2906,18 +2897,18 @@ config QLGE
will be called qlge.
config BNA
- tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
- depends on PCI
- ---help---
- This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
- cards.
- To compile this driver as a module, choose M here: the module
- will be called bna.
+ tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
+ depends on PCI
+ ---help---
+ This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
+ cards.
+ To compile this driver as a module, choose M here: the module
+ will be called bna.
- For general information and support, go to the Brocade support
- website at:
+ For general information and support, go to the Brocade support
+ website at:
- <http://support.brocade.com>
+ <http://support.brocade.com>
source "drivers/net/sfc/Kconfig"
@@ -2960,14 +2951,41 @@ config TILE_NET
config XEN_NETDEV_FRONTEND
tristate "Xen network device frontend driver"
depends on XEN
+ select XEN_XENBUS_FRONTEND
default y
help
- The network device frontend driver allows the kernel to
- access network devices exported exported by a virtual
- machine containing a physical network device driver. The
- frontend driver is intended for unprivileged guest domains;
- if you are compiling a kernel for a Xen guest, you almost
- certainly want to enable this.
+ This driver provides support for Xen paravirtual network
+ devices exported by a Xen network driver domain (often
+ domain 0).
+
+ The corresponding Linux backend driver is enabled by the
+ CONFIG_XEN_NETDEV_BACKEND option.
+
+ If you are compiling a kernel for use as Xen guest, you
+ should say Y here. To compile this driver as a module, chose
+ M here: the module will be called xen-netfront.
+
+config XEN_NETDEV_BACKEND
+ tristate "Xen backend network device"
+ depends on XEN_BACKEND
+ help
+ This driver allows the kernel to act as a Xen network driver
+ domain which exports paravirtual network devices to other
+ Xen domains. These devices can be accessed by any operating
+ system that implements a compatible front end.
+
+ The corresponding Linux frontend driver is enabled by the
+ CONFIG_XEN_NETDEV_FRONTEND configuration option.
+
+ The backend driver presents a standard network device
+ endpoint for each paravirtual network device to the driver
+ domain network stack. These can then be bridged or routed
+ etc in order to provide full network connectivity.
+
+ If you are compiling a kernel to run in a Xen network driver
+ domain (often this is domain 0) you should say Y here. To
+ compile this driver as a module, chose M here: the module
+ will be called xen-netback.
config ISERIES_VETH
tristate "iSeries Virtual Ethernet driver support"
@@ -3239,18 +3257,18 @@ config PPP_BSDCOMP
modules once you have said "make modules". If unsure, say N.
config PPP_MPPE
- tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
- depends on PPP && EXPERIMENTAL
- select CRYPTO
- select CRYPTO_SHA1
- select CRYPTO_ARC4
- select CRYPTO_ECB
- ---help---
- Support for the MPPE Encryption protocol, as employed by the
- Microsoft Point-to-Point Tunneling Protocol.
-
- See http://pptpclient.sourceforge.net/ for information on
- configuring PPTP clients and servers to utilize this method.
+ tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
+ depends on PPP && EXPERIMENTAL
+ select CRYPTO
+ select CRYPTO_SHA1
+ select CRYPTO_ARC4
+ select CRYPTO_ECB
+ ---help---
+ Support for the MPPE Encryption protocol, as employed by the
+ Microsoft Point-to-Point Tunneling Protocol.
+
+ See http://pptpclient.sourceforge.net/ for information on
+ configuring PPTP clients and servers to utilize this method.
config PPPOE
tristate "PPP over Ethernet (EXPERIMENTAL)"
@@ -3385,8 +3403,7 @@ config NETCONSOLE
config NETCONSOLE_DYNAMIC
bool "Dynamic reconfiguration of logging targets"
- depends on NETCONSOLE && SYSFS
- select CONFIGFS_FS
+ depends on NETCONSOLE && SYSFS && CONFIGFS_FS
help
This option enables the ability to dynamically reconfigure target
parameters (interface, IP addresses, port numbers, MAC addresses)
@@ -3409,14 +3426,14 @@ config VIRTIO_NET
depends on EXPERIMENTAL && VIRTIO
---help---
This is the virtual network driver for virtio. It can be used with
- lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
+ lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
config VMXNET3
- tristate "VMware VMXNET3 ethernet driver"
- depends on PCI && INET
- help
- This driver supports VMware's vmxnet3 virtual ethernet NIC.
- To compile this driver as a module, choose M here: the
- module will be called vmxnet3.
+ tristate "VMware VMXNET3 ethernet driver"
+ depends on PCI && INET
+ help
+ This driver supports VMware's vmxnet3 virtual ethernet NIC.
+ To compile this driver as a module, choose M here: the
+ module will be called vmxnet3.
endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index b90738d13994..01b604ad155e 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -147,6 +147,7 @@ obj-$(CONFIG_FORCEDETH) += forcedeth.o
obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
obj-$(CONFIG_AX88796) += ax88796.o
obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
+obj-$(CONFIG_FTMAC100) += ftmac100.o
obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
@@ -171,6 +172,7 @@ obj-$(CONFIG_SLIP) += slip.o
obj-$(CONFIG_SLHC) += slhc.o
obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
+obj-$(CONFIG_XEN_NETDEV_BACKEND) += xen-netback/
obj-$(CONFIG_DUMMY) += dummy.o
obj-$(CONFIG_IFB) += ifb.o
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 9bb405bd664e..068c3563e00f 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -55,8 +55,6 @@ extern struct net_device *eth16i_probe(int unit);
extern struct net_device *i82596_probe(int unit);
extern struct net_device *ewrk3_probe(int unit);
extern struct net_device *el1_probe(int unit);
-extern struct net_device *wavelan_probe(int unit);
-extern struct net_device *arlan_probe(int unit);
extern struct net_device *el16_probe(int unit);
extern struct net_device *elmc_probe(int unit);
extern struct net_device *elplus_probe(int unit);
@@ -68,7 +66,6 @@ extern struct net_device *ni5010_probe(int unit);
extern struct net_device *ni52_probe(int unit);
extern struct net_device *ni65_probe(int unit);
extern struct net_device *sonic_probe(int unit);
-extern struct net_device *SK_init(int unit);
extern struct net_device *seeq8005_probe(int unit);
extern struct net_device *smc_init(int unit);
extern struct net_device *atarilance_probe(int unit);
@@ -76,8 +73,6 @@ extern struct net_device *sun3lance_probe(int unit);
extern struct net_device *sun3_82586_probe(int unit);
extern struct net_device *apne_probe(int unit);
extern struct net_device *cs89x0_probe(int unit);
-extern struct net_device *hplance_probe(int unit);
-extern struct net_device *bagetlance_probe(int unit);
extern struct net_device *mvme147lance_probe(int unit);
extern struct net_device *tc515_probe(int unit);
extern struct net_device *lance_probe(int unit);
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index 0b376a990972..f5a89164e779 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -3,7 +3,6 @@
#
config ATALK
tristate "Appletalk protocol support"
- depends on BKL # waiting to be removed from net/appletalk/ddp.c
select LLC
---help---
AppleTalk is the protocol that Apple computers can use to communicate
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index 39214e512452..7ca0eded2561 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -425,11 +425,6 @@ static irqreturn_t ariadne_interrupt(int irq, void *data)
int csr0, boguscnt;
int handled = 0;
- if (dev == NULL) {
- printk(KERN_WARNING "ariadne_interrupt(): irq for unknown device.\n");
- return IRQ_NONE;
- }
-
lance->RAP = CSR0; /* PCnet-ISA Controller Status */
if (!(lance->RDP & INTR)) /* Check if any interrupt has been */
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 62f21106efec..0c9217f48b72 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -340,14 +340,6 @@ am79c961_close(struct net_device *dev)
return 0;
}
-/*
- * Get the current statistics.
- */
-static struct net_device_stats *am79c961_getstats (struct net_device *dev)
-{
- return &dev->stats;
-}
-
static void am79c961_mc_hash(char *addr, unsigned short *hash)
{
if (addr[0] & 0x01) {
@@ -665,7 +657,6 @@ static const struct net_device_ops am79c961_netdev_ops = {
.ndo_open = am79c961_open,
.ndo_stop = am79c961_close,
.ndo_start_xmit = am79c961_sendpacket,
- .ndo_get_stats = am79c961_getstats,
.ndo_set_multicast_list = am79c961_setmulticastlist,
.ndo_tx_timeout = am79c961_timeout,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 6028226a7270..9eb9b98a7ae3 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -1229,8 +1229,10 @@ static int __devinit eth_init_one(struct platform_device *pdev)
snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy);
port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
- if ((err = IS_ERR(port->phydev)))
+ if (IS_ERR(port->phydev)) {
+ err = PTR_ERR(port->phydev);
goto err_free_mem;
+ }
port->phydev->irq = PHY_POLL;
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 54c6d849cf25..aa07657744c3 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -854,12 +854,12 @@ ks8695_set_msglevel(struct net_device *ndev, u32 value)
}
/**
- * ks8695_get_settings - Get device-specific settings.
+ * ks8695_wan_get_settings - Get device-specific settings.
* @ndev: The network device to read settings from
* @cmd: The ethtool structure to read into
*/
static int
-ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
{
struct ks8695_priv *ksp = netdev_priv(ndev);
u32 ctrl;
@@ -870,69 +870,50 @@ ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
SUPPORTED_TP | SUPPORTED_MII);
cmd->transceiver = XCVR_INTERNAL;
- /* Port specific extras */
- switch (ksp->dtype) {
- case KS8695_DTYPE_HPNA:
- cmd->phy_address = 0;
- /* not supported for HPNA */
- cmd->autoneg = AUTONEG_DISABLE;
+ cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
+ cmd->port = PORT_MII;
+ cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
+ cmd->phy_address = 0;
- /* BUG: Erm, dtype hpna implies no phy regs */
- /*
- ctrl = readl(KS8695_MISC_VA + KS8695_HMC);
- cmd->speed = (ctrl & HMC_HSS) ? SPEED_100 : SPEED_10;
- cmd->duplex = (ctrl & HMC_HDS) ? DUPLEX_FULL : DUPLEX_HALF;
- */
- return -EOPNOTSUPP;
- case KS8695_DTYPE_WAN:
- cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
- cmd->port = PORT_MII;
- cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
- cmd->phy_address = 0;
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+ if ((ctrl & WMC_WAND) == 0) {
+ /* auto-negotiation is enabled */
+ cmd->advertising |= ADVERTISED_Autoneg;
+ if (ctrl & WMC_WANA100F)
+ cmd->advertising |= ADVERTISED_100baseT_Full;
+ if (ctrl & WMC_WANA100H)
+ cmd->advertising |= ADVERTISED_100baseT_Half;
+ if (ctrl & WMC_WANA10F)
+ cmd->advertising |= ADVERTISED_10baseT_Full;
+ if (ctrl & WMC_WANA10H)
+ cmd->advertising |= ADVERTISED_10baseT_Half;
+ if (ctrl & WMC_WANAP)
+ cmd->advertising |= ADVERTISED_Pause;
+ cmd->autoneg = AUTONEG_ENABLE;
+
+ cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
+ cmd->duplex = (ctrl & WMC_WDS) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ } else {
+ /* auto-negotiation is disabled */
+ cmd->autoneg = AUTONEG_DISABLE;
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
- if ((ctrl & WMC_WAND) == 0) {
- /* auto-negotiation is enabled */
- cmd->advertising |= ADVERTISED_Autoneg;
- if (ctrl & WMC_WANA100F)
- cmd->advertising |= ADVERTISED_100baseT_Full;
- if (ctrl & WMC_WANA100H)
- cmd->advertising |= ADVERTISED_100baseT_Half;
- if (ctrl & WMC_WANA10F)
- cmd->advertising |= ADVERTISED_10baseT_Full;
- if (ctrl & WMC_WANA10H)
- cmd->advertising |= ADVERTISED_10baseT_Half;
- if (ctrl & WMC_WANAP)
- cmd->advertising |= ADVERTISED_Pause;
- cmd->autoneg = AUTONEG_ENABLE;
-
- cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
- cmd->duplex = (ctrl & WMC_WDS) ?
- DUPLEX_FULL : DUPLEX_HALF;
- } else {
- /* auto-negotiation is disabled */
- cmd->autoneg = AUTONEG_DISABLE;
-
- cmd->speed = (ctrl & WMC_WANF100) ?
- SPEED_100 : SPEED_10;
- cmd->duplex = (ctrl & WMC_WANFF) ?
- DUPLEX_FULL : DUPLEX_HALF;
- }
- break;
- case KS8695_DTYPE_LAN:
- return -EOPNOTSUPP;
+ cmd->speed = (ctrl & WMC_WANF100) ?
+ SPEED_100 : SPEED_10;
+ cmd->duplex = (ctrl & WMC_WANFF) ?
+ DUPLEX_FULL : DUPLEX_HALF;
}
return 0;
}
/**
- * ks8695_set_settings - Set device-specific settings.
+ * ks8695_wan_set_settings - Set device-specific settings.
* @ndev: The network device to configure
* @cmd: The settings to configure
*/
static int
-ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
{
struct ks8695_priv *ksp = netdev_priv(ndev);
u32 ctrl;
@@ -956,171 +937,85 @@ ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
ADVERTISED_100baseT_Full)) == 0)
return -EINVAL;
- switch (ksp->dtype) {
- case KS8695_DTYPE_HPNA:
- /* HPNA does not support auto-negotiation. */
- return -EINVAL;
- case KS8695_DTYPE_WAN:
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-
- ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
- WMC_WANA10F | WMC_WANA10H);
- if (cmd->advertising & ADVERTISED_100baseT_Full)
- ctrl |= WMC_WANA100F;
- if (cmd->advertising & ADVERTISED_100baseT_Half)
- ctrl |= WMC_WANA100H;
- if (cmd->advertising & ADVERTISED_10baseT_Full)
- ctrl |= WMC_WANA10F;
- if (cmd->advertising & ADVERTISED_10baseT_Half)
- ctrl |= WMC_WANA10H;
-
- /* force a re-negotiation */
- ctrl |= WMC_WANR;
- writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
- break;
- case KS8695_DTYPE_LAN:
- return -EOPNOTSUPP;
- }
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+ ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
+ WMC_WANA10F | WMC_WANA10H);
+ if (cmd->advertising & ADVERTISED_100baseT_Full)
+ ctrl |= WMC_WANA100F;
+ if (cmd->advertising & ADVERTISED_100baseT_Half)
+ ctrl |= WMC_WANA100H;
+ if (cmd->advertising & ADVERTISED_10baseT_Full)
+ ctrl |= WMC_WANA10F;
+ if (cmd->advertising & ADVERTISED_10baseT_Half)
+ ctrl |= WMC_WANA10H;
+
+ /* force a re-negotiation */
+ ctrl |= WMC_WANR;
+ writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
} else {
- switch (ksp->dtype) {
- case KS8695_DTYPE_HPNA:
- /* BUG: dtype_hpna implies no phy registers */
- /*
- ctrl = __raw_readl(KS8695_MISC_VA + KS8695_HMC);
-
- ctrl &= ~(HMC_HSS | HMC_HDS);
- if (cmd->speed == SPEED_100)
- ctrl |= HMC_HSS;
- if (cmd->duplex == DUPLEX_FULL)
- ctrl |= HMC_HDS;
-
- __raw_writel(ctrl, KS8695_MISC_VA + KS8695_HMC);
- */
- return -EOPNOTSUPP;
- case KS8695_DTYPE_WAN:
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-
- /* disable auto-negotiation */
- ctrl |= WMC_WAND;
- ctrl &= ~(WMC_WANF100 | WMC_WANFF);
-
- if (cmd->speed == SPEED_100)
- ctrl |= WMC_WANF100;
- if (cmd->duplex == DUPLEX_FULL)
- ctrl |= WMC_WANFF;
-
- writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
- break;
- case KS8695_DTYPE_LAN:
- return -EOPNOTSUPP;
- }
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+
+ /* disable auto-negotiation */
+ ctrl |= WMC_WAND;
+ ctrl &= ~(WMC_WANF100 | WMC_WANFF);
+
+ if (cmd->speed == SPEED_100)
+ ctrl |= WMC_WANF100;
+ if (cmd->duplex == DUPLEX_FULL)
+ ctrl |= WMC_WANFF;
+
+ writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
}
return 0;
}
/**
- * ks8695_nwayreset - Restart the autonegotiation on the port.
+ * ks8695_wan_nwayreset - Restart the autonegotiation on the port.
* @ndev: The network device to restart autoneotiation on
*/
static int
-ks8695_nwayreset(struct net_device *ndev)
+ks8695_wan_nwayreset(struct net_device *ndev)
{
struct ks8695_priv *ksp = netdev_priv(ndev);
u32 ctrl;
- switch (ksp->dtype) {
- case KS8695_DTYPE_HPNA:
- /* No phy means no autonegotiation on hpna */
- return -EINVAL;
- case KS8695_DTYPE_WAN:
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-
- if ((ctrl & WMC_WAND) == 0)
- writel(ctrl | WMC_WANR,
- ksp->phyiface_regs + KS8695_WMC);
- else
- /* auto-negotiation not enabled */
- return -EINVAL;
- break;
- case KS8695_DTYPE_LAN:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-/**
- * ks8695_get_link - Retrieve link status of network interface
- * @ndev: The network interface to retrive the link status of.
- */
-static u32
-ks8695_get_link(struct net_device *ndev)
-{
- struct ks8695_priv *ksp = netdev_priv(ndev);
- u32 ctrl;
+ if ((ctrl & WMC_WAND) == 0)
+ writel(ctrl | WMC_WANR,
+ ksp->phyiface_regs + KS8695_WMC);
+ else
+ /* auto-negotiation not enabled */
+ return -EINVAL;
- switch (ksp->dtype) {
- case KS8695_DTYPE_HPNA:
- /* HPNA always has link */
- return 1;
- case KS8695_DTYPE_WAN:
- /* WAN we can read the PHY for */
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
- return ctrl & WMC_WLS;
- case KS8695_DTYPE_LAN:
- return -EOPNOTSUPP;
- }
return 0;
}
/**
- * ks8695_get_pause - Retrieve network pause/flow-control advertising
+ * ks8695_wan_get_pause - Retrieve network pause/flow-control advertising
* @ndev: The device to retrieve settings from
* @param: The structure to fill out with the information
*/
static void
-ks8695_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
+ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
{
struct ks8695_priv *ksp = netdev_priv(ndev);
u32 ctrl;
- switch (ksp->dtype) {
- case KS8695_DTYPE_HPNA:
- /* No phy link on hpna to configure */
- return;
- case KS8695_DTYPE_WAN:
- ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-
- /* advertise Pause */
- param->autoneg = (ctrl & WMC_WANAP);
+ ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
- /* current Rx Flow-control */
- ctrl = ks8695_readreg(ksp, KS8695_DRXC);
- param->rx_pause = (ctrl & DRXC_RFCE);
+ /* advertise Pause */
+ param->autoneg = (ctrl & WMC_WANAP);
- /* current Tx Flow-control */
- ctrl = ks8695_readreg(ksp, KS8695_DTXC);
- param->tx_pause = (ctrl & DTXC_TFCE);
- break;
- case KS8695_DTYPE_LAN:
- /* The LAN's "phy" is a direct-attached switch */
- return;
- }
-}
+ /* current Rx Flow-control */
+ ctrl = ks8695_readreg(ksp, KS8695_DRXC);
+ param->rx_pause = (ctrl & DRXC_RFCE);
-/**
- * ks8695_set_pause - Configure pause/flow-control
- * @ndev: The device to configure
- * @param: The pause parameters to set
- *
- * TODO: Implement this
- */
-static int
-ks8695_set_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
-{
- return -EOPNOTSUPP;
+ /* current Tx Flow-control */
+ ctrl = ks8695_readreg(ksp, KS8695_DTXC);
+ param->tx_pause = (ctrl & DTXC_TFCE);
}
/**
@@ -1140,12 +1035,17 @@ ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
static const struct ethtool_ops ks8695_ethtool_ops = {
.get_msglevel = ks8695_get_msglevel,
.set_msglevel = ks8695_set_msglevel,
- .get_settings = ks8695_get_settings,
- .set_settings = ks8695_set_settings,
- .nway_reset = ks8695_nwayreset,
- .get_link = ks8695_get_link,
- .get_pauseparam = ks8695_get_pause,
- .set_pauseparam = ks8695_set_pause,
+ .get_drvinfo = ks8695_get_drvinfo,
+};
+
+static const struct ethtool_ops ks8695_wan_ethtool_ops = {
+ .get_msglevel = ks8695_get_msglevel,
+ .set_msglevel = ks8695_set_msglevel,
+ .get_settings = ks8695_wan_get_settings,
+ .set_settings = ks8695_wan_set_settings,
+ .nway_reset = ks8695_wan_nwayreset,
+ .get_link = ethtool_op_get_link,
+ .get_pauseparam = ks8695_wan_get_pause,
.get_drvinfo = ks8695_get_drvinfo,
};
@@ -1541,7 +1441,6 @@ ks8695_probe(struct platform_device *pdev)
/* driver system setup */
ndev->netdev_ops = &ks8695_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT);
@@ -1608,12 +1507,15 @@ ks8695_probe(struct platform_device *pdev)
if (ksp->phyiface_regs && ksp->link_irq == -1) {
ks8695_init_switch(ksp);
ksp->dtype = KS8695_DTYPE_LAN;
+ SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
} else if (ksp->phyiface_regs && ksp->link_irq != -1) {
ks8695_init_wan_phy(ksp);
ksp->dtype = KS8695_DTYPE_WAN;
+ SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops);
} else {
/* No initialisation since HPNA does not have a PHY */
ksp->dtype = KS8695_DTYPE_HPNA;
+ SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
}
/* And bring up the net_device with the net core */
@@ -1742,7 +1644,7 @@ ks8695_cleanup(void)
module_init(ks8695_init);
module_exit(ks8695_cleanup);
-MODULE_AUTHOR("Simtec Electronics")
+MODULE_AUTHOR("Simtec Electronics");
MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" MODULENAME);
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index 4545d5a06c24..bfea499a3513 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -117,7 +117,7 @@
#define TX_DESC_SIZE 10
#define MAX_RBUFF_SZ 0x600
#define MAX_TBUFF_SZ 0x600
-#define TX_TIMEOUT 50
+#define TX_TIMEOUT (HZ/2)
#define DELAY 1000
#define CAM0 0x0
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 89876897a6fe..f4744fc89768 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -150,7 +150,7 @@ struct net_local {
#define PORT_OFFSET(o) (o)
-#define TX_TIMEOUT 10
+#define TX_TIMEOUT (HZ/10)
/* Index to functions, as function prototypes. */
@@ -270,9 +270,9 @@ static const struct net_device_ops at1700_netdev_ops = {
static int __init at1700_probe1(struct net_device *dev, int ioaddr)
{
- char fmv_irqmap[4] = {3, 7, 10, 15};
- char fmv_irqmap_pnp[8] = {3, 4, 5, 7, 9, 10, 11, 15};
- char at1700_irqmap[8] = {3, 4, 5, 9, 10, 11, 14, 15};
+ static const char fmv_irqmap[4] = {3, 7, 10, 15};
+ static const char fmv_irqmap_pnp[8] = {3, 4, 5, 7, 9, 10, 11, 15};
+ static const char at1700_irqmap[8] = {3, 4, 5, 9, 10, 11, 14, 15};
unsigned int i, irq, is_fmv18x = 0, is_at1700 = 0;
int slot, ret = -ENODEV;
struct net_local *lp = netdev_priv(dev);
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index 8cb27cb7bca1..ce0091eb06f5 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -116,7 +116,7 @@ MODULE_LICENSE("GPL");
#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5)
#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
-#define TX_TIMEOUT 20
+#define TX_TIMEOUT (HZ/5)
/* The LANCE Rx and Tx ring descriptors. */
struct lance_rx_head {
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index 9ab58097fa2e..7cb375e0e29c 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -265,7 +265,7 @@ struct atl1c_recv_ret_status {
__le32 word3;
};
-/* RFD desciptor */
+/* RFD descriptor */
struct atl1c_rx_free_desc {
__le64 buffer_addr;
};
@@ -531,7 +531,7 @@ struct atl1c_rfd_ring {
struct atl1c_buffer *buffer_info;
};
-/* receive return desciptor (rrd) ring */
+/* receive return descriptor (rrd) ring */
struct atl1c_rrd_ring {
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index 1bf672009948..23f2ab0f2fa8 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -345,7 +345,7 @@ int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data)
*/
static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
{
- u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_SPEED_MASK;
+ u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_ALL;
u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP &
~GIGA_CR_1000T_SPEED_MASK;
@@ -373,7 +373,7 @@ static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
}
if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 ||
- atl1c_write_phy_reg(hw, MII_GIGA_CR, mii_giga_ctrl_data) != 0)
+ atl1c_write_phy_reg(hw, MII_CTRL1000, mii_giga_ctrl_data) != 0)
return -1;
return 0;
}
@@ -517,19 +517,18 @@ int atl1c_phy_init(struct atl1c_hw *hw)
"Error Setting up Auto-Negotiation\n");
return ret_val;
}
- mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG;
+ mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
break;
case MEDIA_TYPE_100M_FULL:
- mii_bmcr_data |= BMCR_SPEED_100 | BMCR_FULL_DUPLEX;
+ mii_bmcr_data |= BMCR_SPEED100 | BMCR_FULLDPLX;
break;
case MEDIA_TYPE_100M_HALF:
- mii_bmcr_data |= BMCR_SPEED_100;
+ mii_bmcr_data |= BMCR_SPEED100;
break;
case MEDIA_TYPE_10M_FULL:
- mii_bmcr_data |= BMCR_SPEED_10 | BMCR_FULL_DUPLEX;
+ mii_bmcr_data |= BMCR_FULLDPLX;
break;
case MEDIA_TYPE_10M_HALF:
- mii_bmcr_data |= BMCR_SPEED_10;
break;
default:
if (netif_msg_link(adapter))
@@ -657,7 +656,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw)
err = atl1c_phy_setup_adv(hw);
if (err)
return err;
- mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG;
+ mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
}
diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h
index 3dd675979aa1..655fc6c4a8a4 100644
--- a/drivers/net/atl1c/atl1c_hw.h
+++ b/drivers/net/atl1c/atl1c_hw.h
@@ -736,55 +736,16 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
#define REG_DEBUG_DATA0 0x1900
#define REG_DEBUG_DATA1 0x1904
-/* PHY Control Register */
-#define MII_BMCR 0x00
-#define BMCR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define BMCR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
-#define BMCR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
-#define BMCR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
-#define BMCR_ISOLATE 0x0400 /* Isolate PHY from MII */
-#define BMCR_POWER_DOWN 0x0800 /* Power down */
-#define BMCR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
-#define BMCR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define BMCR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
-#define BMCR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
-#define BMCR_SPEED_MASK 0x2040
-#define BMCR_SPEED_1000 0x0040
-#define BMCR_SPEED_100 0x2000
-#define BMCR_SPEED_10 0x0000
-
-/* PHY Status Register */
-#define MII_BMSR 0x01
-#define BMMSR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
-#define BMSR_JABBER_DETECT 0x0002 /* Jabber Detected */
-#define BMSR_LINK_STATUS 0x0004 /* Link Status 1 = link */
-#define BMSR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
-#define BMSR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
-#define BMSR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
-#define BMSR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
-#define BMSR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
-#define BMSR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
-#define BMSR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
-#define BMSR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
-#define BMSR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
-#define BMSR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
-#define BMMII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
-#define BMMII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
-
-#define MII_PHYSID1 0x02
-#define MII_PHYSID2 0x03
#define L1D_MPW_PHYID1 0xD01C /* V7 */
#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */
#define L1D_MPW_PHYID3 0xD01E /* V8 */
/* Autoneg Advertisement Register */
-#define MII_ADVERTISE 0x04
-#define ADVERTISE_SPEED_MASK 0x01E0
-#define ADVERTISE_DEFAULT_CAP 0x0DE0
+#define ADVERTISE_DEFAULT_CAP \
+ (ADVERTISE_ALL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)
/* 1000BASE-T Control Register */
-#define MII_GIGA_CR 0x09
#define GIGA_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port 0=DTE device */
#define GIGA_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master 0=Configure PHY as Slave */
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 09b099bfab2b..7d9d5067a65c 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -48,6 +48,7 @@ static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)},
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)},
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)},
/* required last entry */
{ 0 }
};
@@ -702,6 +703,7 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
adapter->wol = 0;
+ device_set_wakeup_enable(&pdev->dev, false);
adapter->link_speed = SPEED_0;
adapter->link_duplex = FULL_DUPLEX;
adapter->num_rx_queues = AT_DEF_RECEIVE_QUEUE;
@@ -1100,10 +1102,10 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter)
AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data);
max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) &
DEVICE_CTRL_MAX_PAYLOAD_MASK;
- hw->dmaw_block = min(max_pay_load, hw->dmaw_block);
+ hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) &
DEVICE_CTRL_MAX_RREQ_SZ_MASK;
- hw->dmar_block = min(max_pay_load, hw->dmar_block);
+ hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) <<
TXQ_NUM_TPD_BURST_SHIFT;
@@ -2078,7 +2080,7 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
check_sum:
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
u8 css, cso;
- cso = skb_transport_offset(skb);
+ cso = skb_checksum_start_offset(skb);
if (unlikely(cso & 0x1)) {
if (netif_msg_tx_err(adapter))
@@ -2444,8 +2446,9 @@ static int atl1c_close(struct net_device *netdev)
return 0;
}
-static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
+static int atl1c_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
@@ -2454,7 +2457,6 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
u32 wol_ctrl_data = 0;
u16 mii_intr_status_data = 0;
u32 wufc = adapter->wol;
- int retval = 0;
atl1c_disable_l0s_l1(hw);
if (netif_running(netdev)) {
@@ -2462,9 +2464,6 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
atl1c_down(adapter);
}
netif_device_detach(netdev);
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
if (wufc)
if (atl1c_phy_power_saving(hw) != 0)
@@ -2525,12 +2524,8 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
- /* pcie patch */
- device_set_wakeup_enable(&pdev->dev, 1);
-
AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
GPHY_CTRL_EXT_RESET);
- pci_prepare_to_sleep(pdev);
} else {
AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_POWER_SAVING);
master_ctrl_data |= MASTER_CTRL_CLK_SEL_DIS;
@@ -2540,25 +2535,17 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
hw->phy_configured = false; /* re-init PHY when resume */
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
}
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
return 0;
}
-static int atl1c_resume(struct pci_dev *pdev)
+static int atl1c_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
-
AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE |
ATL1C_PCIE_PHY_RESET);
@@ -2582,7 +2569,12 @@ static int atl1c_resume(struct pci_dev *pdev)
static void atl1c_shutdown(struct pci_dev *pdev)
{
- atl1c_suspend(pdev, PMSG_SUSPEND);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1c_adapter *adapter = netdev_priv(netdev);
+
+ atl1c_suspend(&pdev->dev);
+ pci_wake_from_d3(pdev, adapter->wol);
+ pci_set_power_state(pdev, PCI_D3hot);
}
static const struct net_device_ops atl1c_netdev_ops = {
@@ -2726,7 +2718,6 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
goto err_reset;
}
- device_init_wakeup(&pdev->dev, 1);
/* reset the controller to
* put the device in a known good starting state */
err = atl1c_phy_init(&adapter->hw);
@@ -2886,16 +2877,16 @@ static struct pci_error_handlers atl1c_err_handler = {
.resume = atl1c_io_resume,
};
+static SIMPLE_DEV_PM_OPS(atl1c_pm_ops, atl1c_suspend, atl1c_resume);
+
static struct pci_driver atl1c_driver = {
.name = atl1c_driver_name,
.id_table = atl1c_pci_tbl,
.probe = atl1c_probe,
.remove = __devexit_p(atl1c_remove),
- /* Power Managment Hooks */
- .suspend = atl1c_suspend,
- .resume = atl1c_resume,
.shutdown = atl1c_shutdown,
- .err_handler = &atl1c_err_handler
+ .err_handler = &atl1c_err_handler,
+ .driver.pm = &atl1c_pm_ops,
};
/*
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index 6943a6c3b948..1209297433b8 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -95,18 +95,18 @@ static int atl1e_set_settings(struct net_device *netdev,
ecmd->advertising = hw->autoneg_advertised |
ADVERTISED_TP | ADVERTISED_Autoneg;
- adv4 = hw->mii_autoneg_adv_reg & ~MII_AR_SPEED_MASK;
+ adv4 = hw->mii_autoneg_adv_reg & ~ADVERTISE_ALL;
adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK;
if (hw->autoneg_advertised & ADVERTISE_10_HALF)
- adv4 |= MII_AR_10T_HD_CAPS;
+ adv4 |= ADVERTISE_10HALF;
if (hw->autoneg_advertised & ADVERTISE_10_FULL)
- adv4 |= MII_AR_10T_FD_CAPS;
+ adv4 |= ADVERTISE_10FULL;
if (hw->autoneg_advertised & ADVERTISE_100_HALF)
- adv4 |= MII_AR_100TX_HD_CAPS;
+ adv4 |= ADVERTISE_100HALF;
if (hw->autoneg_advertised & ADVERTISE_100_FULL)
- adv4 |= MII_AR_100TX_FD_CAPS;
+ adv4 |= ADVERTISE_100FULL;
if (hw->autoneg_advertised & ADVERTISE_1000_FULL)
- adv9 |= MII_AT001_CR_1000T_FD_CAPS;
+ adv9 |= ADVERTISE_1000FULL;
if (adv4 != hw->mii_autoneg_adv_reg ||
adv9 != hw->mii_1000t_ctrl_reg) {
diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c
index 76cc043def8c..923063d2e5bb 100644
--- a/drivers/net/atl1e/atl1e_hw.c
+++ b/drivers/net/atl1e/atl1e_hw.c
@@ -318,7 +318,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
* Advertisement Register (Address 4) and the 1000 mb speed bits in
* the 1000Base-T control Register (Address 9).
*/
- mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
+ mii_autoneg_adv_reg &= ~ADVERTISE_ALL;
mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK;
/*
@@ -327,44 +327,37 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
*/
switch (hw->media_type) {
case MEDIA_TYPE_AUTO_SENSOR:
- mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
- MII_AR_10T_FD_CAPS |
- MII_AR_100TX_HD_CAPS |
- MII_AR_100TX_FD_CAPS);
- hw->autoneg_advertised = ADVERTISE_10_HALF |
- ADVERTISE_10_FULL |
- ADVERTISE_100_HALF |
- ADVERTISE_100_FULL;
+ mii_autoneg_adv_reg |= ADVERTISE_ALL;
+ hw->autoneg_advertised = ADVERTISE_ALL;
if (hw->nic_type == athr_l1e) {
- mii_1000t_ctrl_reg |=
- MII_AT001_CR_1000T_FD_CAPS;
+ mii_1000t_ctrl_reg |= ADVERTISE_1000FULL;
hw->autoneg_advertised |= ADVERTISE_1000_FULL;
}
break;
case MEDIA_TYPE_100M_FULL:
- mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_100FULL;
hw->autoneg_advertised = ADVERTISE_100_FULL;
break;
case MEDIA_TYPE_100M_HALF:
- mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_100_HALF;
hw->autoneg_advertised = ADVERTISE_100_HALF;
break;
case MEDIA_TYPE_10M_FULL:
- mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_10_FULL;
hw->autoneg_advertised = ADVERTISE_10_FULL;
break;
default:
- mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_10_HALF;
hw->autoneg_advertised = ADVERTISE_10_HALF;
break;
}
/* flow control fixed to enable all */
- mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
+ mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
@@ -374,7 +367,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
return ret_val;
if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
- ret_val = atl1e_write_phy_reg(hw, MII_AT001_CR,
+ ret_val = atl1e_write_phy_reg(hw, MII_CTRL1000,
mii_1000t_ctrl_reg);
if (ret_val)
return ret_val;
@@ -397,7 +390,7 @@ int atl1e_phy_commit(struct atl1e_hw *hw)
int ret_val;
u16 phy_data;
- phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
+ phy_data = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data);
if (ret_val) {
@@ -645,15 +638,14 @@ int atl1e_restart_autoneg(struct atl1e_hw *hw)
return err;
if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
- err = atl1e_write_phy_reg(hw, MII_AT001_CR,
+ err = atl1e_write_phy_reg(hw, MII_CTRL1000,
hw->mii_1000t_ctrl_reg);
if (err)
return err;
}
err = atl1e_write_phy_reg(hw, MII_BMCR,
- MII_CR_RESET | MII_CR_AUTO_NEG_EN |
- MII_CR_RESTART_AUTO_NEG);
+ BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
return err;
}
diff --git a/drivers/net/atl1e/atl1e_hw.h b/drivers/net/atl1e/atl1e_hw.h
index 5ea2f4d86cfa..74df16aef793 100644
--- a/drivers/net/atl1e/atl1e_hw.h
+++ b/drivers/net/atl1e/atl1e_hw.h
@@ -629,127 +629,24 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw);
/***************************** MII definition ***************************************/
/* PHY Common Register */
-#define MII_BMCR 0x00
-#define MII_BMSR 0x01
-#define MII_PHYSID1 0x02
-#define MII_PHYSID2 0x03
-#define MII_ADVERTISE 0x04
-#define MII_LPA 0x05
-#define MII_EXPANSION 0x06
-#define MII_AT001_CR 0x09
-#define MII_AT001_SR 0x0A
-#define MII_AT001_ESR 0x0F
#define MII_AT001_PSCR 0x10
#define MII_AT001_PSSR 0x11
#define MII_INT_CTRL 0x12
#define MII_INT_STATUS 0x13
#define MII_SMARTSPEED 0x14
-#define MII_RERRCOUNTER 0x15
-#define MII_SREVISION 0x16
-#define MII_RESV1 0x17
#define MII_LBRERROR 0x18
-#define MII_PHYADDR 0x19
#define MII_RESV2 0x1a
-#define MII_TPISTATUS 0x1b
-#define MII_NCONFIG 0x1c
#define MII_DBG_ADDR 0x1D
#define MII_DBG_DATA 0x1E
-
-/* PHY Control Register */
-#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
-#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
-#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
-#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
-#define MII_CR_POWER_DOWN 0x0800 /* Power down */
-#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
-#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
-#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
-#define MII_CR_SPEED_MASK 0x2040
-#define MII_CR_SPEED_1000 0x0040
-#define MII_CR_SPEED_100 0x2000
-#define MII_CR_SPEED_10 0x0000
-
-
-/* PHY Status Register */
-#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
-#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
-#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
-#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
-#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
-#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
-#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
-#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
-#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
-#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
-#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
-#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
-#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
-#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
-#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
-
-/* Link partner ability register. */
-#define MII_LPA_SLCT 0x001f /* Same as advertise selector */
-#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
-#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
-#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
-#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
-#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */
-#define MII_LPA_PAUSE 0x0400 /* PAUSE */
-#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */
-#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */
-#define MII_LPA_LPACK 0x4000 /* Link partner acked us */
-#define MII_LPA_NPAGE 0x8000 /* Next page bit */
-
/* Autoneg Advertisement Register */
-#define MII_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
-#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
-#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
-#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
-#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
-#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
-#define MII_AR_PAUSE 0x0400 /* Pause operation desired */
-#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
-#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
-#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
-#define MII_AR_SPEED_MASK 0x01E0
-#define MII_AR_DEFAULT_CAP_MASK 0x0DE0
+#define MII_AR_DEFAULT_CAP_MASK 0
/* 1000BASE-T Control Register */
-#define MII_AT001_CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
-#define MII_AT001_CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
-#define MII_AT001_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */
-/* 0=DTE device */
-#define MII_AT001_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
-/* 0=Configure PHY as Slave */
-#define MII_AT001_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
-/* 0=Automatic Master/Slave config */
-#define MII_AT001_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
-#define MII_AT001_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
-#define MII_AT001_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
-#define MII_AT001_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
-#define MII_AT001_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
-#define MII_AT001_CR_1000T_SPEED_MASK 0x0300
-#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK 0x0300
-
-/* 1000BASE-T Status Register */
-#define MII_AT001_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
-#define MII_AT001_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
-#define MII_AT001_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
-#define MII_AT001_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
-#define MII_AT001_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
-#define MII_AT001_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
-#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT 12
-#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT 13
-
-/* Extended Status Register */
-#define MII_AT001_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
-#define MII_AT001_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
-#define MII_AT001_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
-#define MII_AT001_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
+#define MII_AT001_CR_1000T_SPEED_MASK \
+ (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
+#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK MII_AT001_CR_1000T_SPEED_MASK
/* AT001 PHY Specific Control Register */
#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index ef6349bf3b33..1ff001a8270c 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -547,8 +547,8 @@ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
hw->device_id = pdev->device;
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_id = pdev->subsystem_device;
+ hw->revision_id = pdev->revision;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
@@ -932,11 +932,11 @@ static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) &
DEVICE_CTRL_MAX_PAYLOAD_MASK;
- hw->dmaw_block = min(max_pay_load, hw->dmaw_block);
+ hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) &
DEVICE_CTRL_MAX_RREQ_SZ_MASK;
- hw->dmar_block = min(max_pay_load, hw->dmar_block);
+ hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
if (hw->nic_type != athr_l2e_revB)
AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2,
@@ -1649,7 +1649,7 @@ check_sum:
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
u8 css, cso;
- cso = skb_transport_offset(skb);
+ cso = skb_checksum_start_offset(skb);
if (unlikely(cso & 0x1)) {
netdev_err(adapter->netdev,
"payload offset should not ant event number\n");
@@ -2051,9 +2051,9 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
- mii_advertise_data = MII_AR_10T_HD_CAPS;
+ mii_advertise_data = ADVERTISE_10HALF;
- if ((atl1e_write_phy_reg(hw, MII_AT001_CR, 0) != 0) ||
+ if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) ||
(atl1e_write_phy_reg(hw,
MII_ADVERTISE, mii_advertise_data) != 0) ||
(atl1e_phy_commit(hw)) != 0) {
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 53363108994e..67f40b9c16ed 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -950,6 +950,7 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
adapter->wol = 0;
+ device_set_wakeup_enable(&adapter->pdev->dev, false);
adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
adapter->ict = 50000; /* 100ms */
adapter->link_speed = SPEED_0; /* hardware init */
@@ -2174,7 +2175,7 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
u8 css, cso;
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
- css = (u8) (skb->csum_start - skb_headroom(skb));
+ css = skb_checksum_start_offset(skb);
cso = css + (u8) skb->csum_offset;
if (unlikely(css & 0x1)) {
/* L1 hardware requires an even number here */
@@ -2735,15 +2736,15 @@ static int atl1_close(struct net_device *netdev)
}
#ifdef CONFIG_PM
-static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
+static int atl1_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_hw *hw = &adapter->hw;
u32 ctrl = 0;
u32 wufc = adapter->wol;
u32 val;
- int retval;
u16 speed;
u16 duplex;
@@ -2751,17 +2752,15 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
if (netif_running(netdev))
atl1_down(adapter);
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
val = ctrl & BMSR_LSTATUS;
if (val)
wufc &= ~ATLX_WUFC_LNKC;
+ if (!wufc)
+ goto disable_wol;
- if (val && wufc) {
+ if (val) {
val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
if (val) {
if (netif_msg_ifdown(adapter))
@@ -2798,23 +2797,18 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
-
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
- goto exit;
- }
-
- if (!val && wufc) {
+ } else {
ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
ioread32(hw->hw_addr + REG_WOL_CTRL);
iowrite32(0, hw->hw_addr + REG_MAC_CTRL);
ioread32(hw->hw_addr + REG_MAC_CTRL);
hw->phy_configured = false;
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
- goto exit;
}
-disable_wol:
+ return 0;
+
+ disable_wol:
iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
ioread32(hw->hw_addr + REG_WOL_CTRL);
ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
@@ -2822,37 +2816,17 @@ disable_wol:
iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
hw->phy_configured = false;
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
-exit:
- if (netif_running(netdev))
- pci_disable_msi(adapter->pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
-static int atl1_resume(struct pci_dev *pdev)
+static int atl1_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1_adapter *adapter = netdev_priv(netdev);
- u32 err;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- err = pci_enable_device(pdev);
- if (err) {
- if (netif_msg_ifup(adapter))
- dev_printk(KERN_DEBUG, &pdev->dev,
- "error enabling pci device\n");
- return err;
- }
-
- pci_set_master(pdev);
iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
atl1_reset_hw(&adapter->hw);
@@ -2864,16 +2838,25 @@ static int atl1_resume(struct pci_dev *pdev)
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume);
+#define ATL1_PM_OPS (&atl1_pm_ops)
+
#else
-#define atl1_suspend NULL
-#define atl1_resume NULL
+
+static int atl1_suspend(struct device *dev) { return 0; }
+
+#define ATL1_PM_OPS NULL
#endif
static void atl1_shutdown(struct pci_dev *pdev)
{
-#ifdef CONFIG_PM
- atl1_suspend(pdev, PMSG_SUSPEND);
-#endif
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+
+ atl1_suspend(&pdev->dev);
+ pci_wake_from_d3(pdev, adapter->wol);
+ pci_set_power_state(pdev, PCI_D3hot);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3117,9 +3100,8 @@ static struct pci_driver atl1_driver = {
.id_table = atl1_pci_tbl,
.probe = atl1_probe,
.remove = __devexit_p(atl1_remove),
- .suspend = atl1_suspend,
- .resume = atl1_resume,
- .shutdown = atl1_shutdown
+ .shutdown = atl1_shutdown,
+ .driver.pm = ATL1_PM_OPS,
};
/*
@@ -3409,6 +3391,9 @@ static int atl1_set_wol(struct net_device *netdev,
adapter->wol = 0;
if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= ATLX_WUFC_MAG;
+
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
return 0;
}
@@ -3504,6 +3489,8 @@ static int atl1_set_ringparam(struct net_device *netdev,
struct atl1_rfd_ring rfd_old, rfd_new;
struct atl1_rrd_ring rrd_old, rrd_new;
struct atl1_ring_header rhdr_old, rhdr_new;
+ struct atl1_smb smb;
+ struct atl1_cmb cmb;
int err;
tpd_old = adapter->tpd_ring;
@@ -3544,11 +3531,19 @@ static int atl1_set_ringparam(struct net_device *netdev,
adapter->rrd_ring = rrd_old;
adapter->tpd_ring = tpd_old;
adapter->ring_header = rhdr_old;
+ /*
+ * Save SMB and CMB, since atl1_free_ring_resources
+ * will clear them.
+ */
+ smb = adapter->smb;
+ cmb = adapter->cmb;
atl1_free_ring_resources(adapter);
adapter->rfd_ring = rfd_new;
adapter->rrd_ring = rrd_new;
adapter->tpd_ring = tpd_new;
adapter->ring_header = rhdr_new;
+ adapter->smb = smb;
+ adapter->cmb = cmb;
err = atl1_up(adapter);
if (err)
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 35b14bec1207..e637e9f28fd4 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -93,8 +93,8 @@ static int __devinit atl2_sw_init(struct atl2_adapter *adapter)
hw->device_id = pdev->device;
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_id = pdev->subsystem_device;
+ hw->revision_id = pdev->revision;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
adapter->wol = 0;
@@ -1504,8 +1504,8 @@ static void __devexit atl2_remove(struct pci_dev *pdev)
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_config_timer);
-
- flush_scheduled_work();
+ cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->link_chg_task);
unregister_netdev(netdev);
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 53eff9ba6e95..b9debcfb61a0 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -106,8 +106,6 @@ MODULE_VERSION(DRV_VERSION);
* complete immediately.
*/
-struct au1000_private *au_macs[NUM_ETH_INTERFACES];
-
/*
* board-specific configurations
*
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index b6da4cf3694b..e7cb8c8b9776 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -9,7 +9,7 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
-*/
+ */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -17,46 +17,45 @@
#include <linux/isapnp.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
-#include <linux/mii.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/phy.h>
#include <linux/eeprom_93cx6.h>
#include <linux/slab.h>
#include <net/ax88796.h>
#include <asm/system.h>
-#include <asm/io.h>
-
-static int phy_debug = 0;
/* Rename the lib8390.c functions to show that they are in this driver */
-#define __ei_open ax_ei_open
-#define __ei_close ax_ei_close
-#define __ei_poll ax_ei_poll
+#define __ei_open ax_ei_open
+#define __ei_close ax_ei_close
+#define __ei_poll ax_ei_poll
#define __ei_start_xmit ax_ei_start_xmit
#define __ei_tx_timeout ax_ei_tx_timeout
-#define __ei_get_stats ax_ei_get_stats
+#define __ei_get_stats ax_ei_get_stats
#define __ei_set_multicast_list ax_ei_set_multicast_list
-#define __ei_interrupt ax_ei_interrupt
+#define __ei_interrupt ax_ei_interrupt
#define ____alloc_ei_netdev ax__alloc_ei_netdev
-#define __NS8390_init ax_NS8390_init
+#define __NS8390_init ax_NS8390_init
/* force unsigned long back to 'void __iomem *' */
#define ax_convert_addr(_a) ((void __force __iomem *)(_a))
-#define ei_inb(_a) readb(ax_convert_addr(_a))
+#define ei_inb(_a) readb(ax_convert_addr(_a))
#define ei_outb(_v, _a) writeb(_v, ax_convert_addr(_a))
-#define ei_inb_p(_a) ei_inb(_a)
+#define ei_inb_p(_a) ei_inb(_a)
#define ei_outb_p(_v, _a) ei_outb(_v, _a)
/* define EI_SHIFT() to take into account our register offsets */
-#define EI_SHIFT(x) (ei_local->reg_offset[(x)])
+#define EI_SHIFT(x) (ei_local->reg_offset[(x)])
/* Ensure we have our RCR base value */
#define AX88796_PLATFORM
@@ -74,43 +73,46 @@ static unsigned char version[] = "ax88796.c: Copyright 2005,2007 Simtec Electron
#define NE_DATAPORT EI_SHIFT(0x10)
#define NE1SM_START_PG 0x20 /* First page of TX buffer */
-#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */
+#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */
#define NESM_START_PG 0x40 /* First page of TX buffer */
#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+#define AX_GPOC_PPDSET BIT(6)
+
/* device private data */
struct ax_device {
- struct timer_list mii_timer;
- spinlock_t mii_lock;
- struct mii_if_info mii;
-
- u32 msg_enable;
- void __iomem *map2;
- struct platform_device *dev;
- struct resource *mem;
- struct resource *mem2;
- struct ax_plat_data *plat;
-
- unsigned char running;
- unsigned char resume_open;
- unsigned int irqflags;
-
- u32 reg_offsets[0x20];
+ struct mii_bus *mii_bus;
+ struct mdiobb_ctrl bb_ctrl;
+ struct phy_device *phy_dev;
+ void __iomem *addr_memr;
+ u8 reg_memr;
+ int link;
+ int speed;
+ int duplex;
+
+ void __iomem *map2;
+ const struct ax_plat_data *plat;
+
+ unsigned char running;
+ unsigned char resume_open;
+ unsigned int irqflags;
+
+ u32 reg_offsets[0x20];
};
static inline struct ax_device *to_ax_dev(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
- return (struct ax_device *)(ei_local+1);
+ return (struct ax_device *)(ei_local + 1);
}
-/* ax_initial_check
+/*
+ * ax_initial_check
*
* do an initial probe for the card to check wether it exists
* and is functional
*/
-
static int ax_initial_check(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
@@ -122,10 +124,10 @@ static int ax_initial_check(struct net_device *dev)
if (reg0 == 0xFF)
return -ENODEV;
- ei_outb(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
+ ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD);
regd = ei_inb(ioaddr + 0x0d);
ei_outb(0xff, ioaddr + 0x0d);
- ei_outb(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
+ ei_outb(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD);
ei_inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
if (ei_inb(ioaddr + EN0_COUNTER0) != 0) {
ei_outb(reg0, ioaddr);
@@ -136,29 +138,28 @@ static int ax_initial_check(struct net_device *dev)
return 0;
}
-/* Hard reset the card. This used to pause for the same period that a
- 8390 reset command required, but that shouldn't be necessary. */
-
+/*
+ * Hard reset the card. This used to pause for the same period that a
+ * 8390 reset command required, but that shouldn't be necessary.
+ */
static void ax_reset_8390(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
- struct ax_device *ax = to_ax_dev(dev);
unsigned long reset_start_time = jiffies;
void __iomem *addr = (void __iomem *)dev->base_addr;
if (ei_debug > 1)
- dev_dbg(&ax->dev->dev, "resetting the 8390 t=%ld\n", jiffies);
+ netdev_dbg(dev, "resetting the 8390 t=%ld\n", jiffies);
ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
- ei_status.txing = 0;
- ei_status.dmaing = 0;
+ ei_local->txing = 0;
+ ei_local->dmaing = 0;
/* This check _should_not_ be necessary, omit eventually. */
while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
- if (jiffies - reset_start_time > 2*HZ/100) {
- dev_warn(&ax->dev->dev, "%s: %s did not complete.\n",
- __func__, dev->name);
+ if (jiffies - reset_start_time > 2 * HZ / 100) {
+ netdev_warn(dev, "%s: did not complete.\n", __func__);
break;
}
}
@@ -171,70 +172,72 @@ static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
int ring_page)
{
struct ei_device *ei_local = netdev_priv(dev);
- struct ax_device *ax = to_ax_dev(dev);
void __iomem *nic_base = ei_local->mem;
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
- if (ei_status.dmaing) {
- dev_err(&ax->dev->dev, "%s: DMAing conflict in %s "
+ if (ei_local->dmaing) {
+ netdev_err(dev, "DMAing conflict in %s "
"[DMAstat:%d][irqlock:%d].\n",
- dev->name, __func__,
- ei_status.dmaing, ei_status.irqlock);
+ __func__,
+ ei_local->dmaing, ei_local->irqlock);
return;
}
- ei_status.dmaing |= 0x01;
- ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ ei_local->dmaing |= 0x01;
+ ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
ei_outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
ei_outb(0, nic_base + EN0_RCNTHI);
ei_outb(0, nic_base + EN0_RSARLO); /* On page boundary */
ei_outb(ring_page, nic_base + EN0_RSARHI);
ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
- if (ei_status.word16)
- readsw(nic_base + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+ if (ei_local->word16)
+ readsw(nic_base + NE_DATAPORT, hdr,
+ sizeof(struct e8390_pkt_hdr) >> 1);
else
- readsb(nic_base + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
+ readsb(nic_base + NE_DATAPORT, hdr,
+ sizeof(struct e8390_pkt_hdr));
ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
- ei_status.dmaing &= ~0x01;
+ ei_local->dmaing &= ~0x01;
le16_to_cpus(&hdr->count);
}
-/* Block input and output, similar to the Crynwr packet driver. If you
- are porting to a new ethercard, look at the packet driver source for hints.
- The NEx000 doesn't share the on-board packet memory -- you have to put
- the packet out through the "remote DMA" dataport using ei_outb. */
-
+/*
+ * Block input and output, similar to the Crynwr packet driver. If
+ * you are porting to a new ethercard, look at the packet driver
+ * source for hints. The NEx000 doesn't share the on-board packet
+ * memory -- you have to put the packet out through the "remote DMA"
+ * dataport using ei_outb.
+ */
static void ax_block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset)
{
struct ei_device *ei_local = netdev_priv(dev);
- struct ax_device *ax = to_ax_dev(dev);
void __iomem *nic_base = ei_local->mem;
char *buf = skb->data;
- if (ei_status.dmaing) {
- dev_err(&ax->dev->dev,
- "%s: DMAing conflict in %s "
+ if (ei_local->dmaing) {
+ netdev_err(dev,
+ "DMAing conflict in %s "
"[DMAstat:%d][irqlock:%d].\n",
- dev->name, __func__,
- ei_status.dmaing, ei_status.irqlock);
+ __func__,
+ ei_local->dmaing, ei_local->irqlock);
return;
}
- ei_status.dmaing |= 0x01;
+ ei_local->dmaing |= 0x01;
- ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + NE_CMD);
ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
ei_outb(count >> 8, nic_base + EN0_RCNTHI);
ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO);
ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI);
ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
- if (ei_status.word16) {
+ if (ei_local->word16) {
readsw(nic_base + NE_DATAPORT, buf, count >> 1);
if (count & 0x01)
buf[count-1] = ei_inb(nic_base + NE_DATAPORT);
@@ -243,34 +246,34 @@ static void ax_block_input(struct net_device *dev, int count,
readsb(nic_base + NE_DATAPORT, buf, count);
}
- ei_status.dmaing &= ~1;
+ ei_local->dmaing &= ~1;
}
static void ax_block_output(struct net_device *dev, int count,
const unsigned char *buf, const int start_page)
{
struct ei_device *ei_local = netdev_priv(dev);
- struct ax_device *ax = to_ax_dev(dev);
void __iomem *nic_base = ei_local->mem;
unsigned long dma_start;
- /* Round the count up for word writes. Do we need to do this?
- What effect will an odd byte count have on the 8390?
- I should check someday. */
-
- if (ei_status.word16 && (count & 0x01))
+ /*
+ * Round the count up for word writes. Do we need to do this?
+ * What effect will an odd byte count have on the 8390? I
+ * should check someday.
+ */
+ if (ei_local->word16 && (count & 0x01))
count++;
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
- if (ei_status.dmaing) {
- dev_err(&ax->dev->dev, "%s: DMAing conflict in %s."
+ if (ei_local->dmaing) {
+ netdev_err(dev, "DMAing conflict in %s."
"[DMAstat:%d][irqlock:%d]\n",
- dev->name, __func__,
- ei_status.dmaing, ei_status.irqlock);
+ __func__,
+ ei_local->dmaing, ei_local->irqlock);
return;
}
- ei_status.dmaing |= 0x01;
+ ei_local->dmaing |= 0x01;
/* We should already be in page 0, but to be safe... */
ei_outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
@@ -278,250 +281,170 @@ static void ax_block_output(struct net_device *dev, int count,
/* Now the normal output. */
ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
- ei_outb(count >> 8, nic_base + EN0_RCNTHI);
+ ei_outb(count >> 8, nic_base + EN0_RCNTHI);
ei_outb(0x00, nic_base + EN0_RSARLO);
ei_outb(start_page, nic_base + EN0_RSARHI);
ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
- if (ei_status.word16) {
- writesw(nic_base + NE_DATAPORT, buf, count>>1);
- } else {
+ if (ei_local->word16)
+ writesw(nic_base + NE_DATAPORT, buf, count >> 1);
+ else
writesb(nic_base + NE_DATAPORT, buf, count);
- }
dma_start = jiffies;
while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
- if (jiffies - dma_start > 2*HZ/100) { /* 20ms */
- dev_warn(&ax->dev->dev,
- "%s: timeout waiting for Tx RDC.\n", dev->name);
+ if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */
+ netdev_warn(dev, "timeout waiting for Tx RDC.\n");
ax_reset_8390(dev);
- ax_NS8390_init(dev,1);
+ ax_NS8390_init(dev, 1);
break;
}
}
ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
- ei_status.dmaing &= ~0x01;
+ ei_local->dmaing &= ~0x01;
}
/* definitions for accessing MII/EEPROM interface */
#define AX_MEMR EI_SHIFT(0x14)
-#define AX_MEMR_MDC (1<<0)
-#define AX_MEMR_MDIR (1<<1)
-#define AX_MEMR_MDI (1<<2)
-#define AX_MEMR_MDO (1<<3)
-#define AX_MEMR_EECS (1<<4)
-#define AX_MEMR_EEI (1<<5)
-#define AX_MEMR_EEO (1<<6)
-#define AX_MEMR_EECLK (1<<7)
-
-/* ax_mii_ei_outbits
- *
- * write the specified set of bits to the phy
-*/
-
-static void
-ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
+#define AX_MEMR_MDC BIT(0)
+#define AX_MEMR_MDIR BIT(1)
+#define AX_MEMR_MDI BIT(2)
+#define AX_MEMR_MDO BIT(3)
+#define AX_MEMR_EECS BIT(4)
+#define AX_MEMR_EEI BIT(5)
+#define AX_MEMR_EEO BIT(6)
+#define AX_MEMR_EECLK BIT(7)
+
+static void ax_handle_link_change(struct net_device *dev)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
- void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
- unsigned int memr;
-
- /* clock low, data to output mode */
- memr = ei_inb(memr_addr);
- memr &= ~(AX_MEMR_MDC | AX_MEMR_MDIR);
- ei_outb(memr, memr_addr);
-
- for (len--; len >= 0; len--) {
- if (bits & (1 << len))
- memr |= AX_MEMR_MDO;
- else
- memr &= ~AX_MEMR_MDO;
-
- ei_outb(memr, memr_addr);
-
- /* clock high */
+ struct ax_device *ax = to_ax_dev(dev);
+ struct phy_device *phy_dev = ax->phy_dev;
+ int status_change = 0;
- ei_outb(memr | AX_MEMR_MDC, memr_addr);
- udelay(1);
+ if (phy_dev->link && ((ax->speed != phy_dev->speed) ||
+ (ax->duplex != phy_dev->duplex))) {
- /* clock low */
- ei_outb(memr, memr_addr);
+ ax->speed = phy_dev->speed;
+ ax->duplex = phy_dev->duplex;
+ status_change = 1;
}
- /* leaves the clock line low, mdir input */
- memr |= AX_MEMR_MDIR;
- ei_outb(memr, (void __iomem *)dev->base_addr + AX_MEMR);
-}
-
-/* ax_phy_ei_inbits
- *
- * read a specified number of bits from the phy
-*/
-
-static unsigned int
-ax_phy_ei_inbits(struct net_device *dev, int no)
-{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
- void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
- unsigned int memr;
- unsigned int result = 0;
-
- /* clock low, data to input mode */
- memr = ei_inb(memr_addr);
- memr &= ~AX_MEMR_MDC;
- memr |= AX_MEMR_MDIR;
- ei_outb(memr, memr_addr);
-
- for (no--; no >= 0; no--) {
- ei_outb(memr | AX_MEMR_MDC, memr_addr);
-
- udelay(1);
-
- if (ei_inb(memr_addr) & AX_MEMR_MDI)
- result |= (1<<no);
+ if (phy_dev->link != ax->link) {
+ if (!phy_dev->link) {
+ ax->speed = 0;
+ ax->duplex = -1;
+ }
+ ax->link = phy_dev->link;
- ei_outb(memr, memr_addr);
+ status_change = 1;
}
- return result;
-}
-
-/* ax_phy_issueaddr
- *
- * use the low level bit shifting routines to send the address
- * and command to the specified phy
-*/
-
-static void
-ax_phy_issueaddr(struct net_device *dev, int phy_addr, int reg, int opc)
-{
- if (phy_debug)
- pr_debug("%s: dev %p, %04x, %04x, %d\n",
- __func__, dev, phy_addr, reg, opc);
-
- ax_mii_ei_outbits(dev, 0x3f, 6); /* pre-amble */
- ax_mii_ei_outbits(dev, 1, 2); /* frame-start */
- ax_mii_ei_outbits(dev, opc, 2); /* op code */
- ax_mii_ei_outbits(dev, phy_addr, 5); /* phy address */
- ax_mii_ei_outbits(dev, reg, 5); /* reg address */
+ if (status_change)
+ phy_print_status(phy_dev);
}
-static int
-ax_phy_read(struct net_device *dev, int phy_addr, int reg)
+static int ax_mii_probe(struct net_device *dev)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
- unsigned long flags;
- unsigned int result;
+ struct ax_device *ax = to_ax_dev(dev);
+ struct phy_device *phy_dev = NULL;
+ int ret;
- spin_lock_irqsave(&ei_local->page_lock, flags);
+ /* find the first phy */
+ phy_dev = phy_find_first(ax->mii_bus);
+ if (!phy_dev) {
+ netdev_err(dev, "no PHY found\n");
+ return -ENODEV;
+ }
- ax_phy_issueaddr(dev, phy_addr, reg, 2);
+ ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change, 0,
+ PHY_INTERFACE_MODE_MII);
+ if (ret) {
+ netdev_err(dev, "Could not attach to PHY\n");
+ return ret;
+ }
- result = ax_phy_ei_inbits(dev, 17);
- result &= ~(3<<16);
+ /* mask with MAC supported features */
+ phy_dev->supported &= PHY_BASIC_FEATURES;
+ phy_dev->advertising = phy_dev->supported;
- spin_unlock_irqrestore(&ei_local->page_lock, flags);
+ ax->phy_dev = phy_dev;
- if (phy_debug)
- pr_debug("%s: %04x.%04x => read %04x\n", __func__,
- phy_addr, reg, result);
+ netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ phy_dev->drv->name, dev_name(&phy_dev->dev), phy_dev->irq);
- return result;
+ return 0;
}
-static void
-ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
+static void ax_phy_switch(struct net_device *dev, int on)
{
- struct ei_device *ei = (struct ei_device *) netdev_priv(dev);
- struct ax_device *ax = to_ax_dev(dev);
- unsigned long flags;
-
- dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n",
- __func__, dev, phy_addr, reg, value);
-
- spin_lock_irqsave(&ei->page_lock, flags);
-
- ax_phy_issueaddr(dev, phy_addr, reg, 1);
- ax_mii_ei_outbits(dev, 2, 2); /* send TA */
- ax_mii_ei_outbits(dev, value, 16);
-
- spin_unlock_irqrestore(&ei->page_lock, flags);
-}
+ struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
-static void ax_mii_expiry(unsigned long data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct ax_device *ax = to_ax_dev(dev);
- unsigned long flags;
+ u8 reg_gpoc = ax->plat->gpoc_val;
- spin_lock_irqsave(&ax->mii_lock, flags);
- mii_check_media(&ax->mii, netif_msg_link(ax), 0);
- spin_unlock_irqrestore(&ax->mii_lock, flags);
+ if (!!on)
+ reg_gpoc &= ~AX_GPOC_PPDSET;
+ else
+ reg_gpoc |= AX_GPOC_PPDSET;
- if (ax->running) {
- ax->mii_timer.expires = jiffies + HZ*2;
- add_timer(&ax->mii_timer);
- }
+ ei_outb(reg_gpoc, ei_local->mem + EI_SHIFT(0x17));
}
static int ax_open(struct net_device *dev)
{
- struct ax_device *ax = to_ax_dev(dev);
- struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
int ret;
- dev_dbg(&ax->dev->dev, "%s: open\n", dev->name);
+ netdev_dbg(dev, "open\n");
ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags,
dev->name, dev);
if (ret)
- return ret;
-
- ret = ax_ei_open(dev);
- if (ret) {
- free_irq(dev->irq, dev);
- return ret;
- }
+ goto failed_request_irq;
/* turn the phy on (if turned off) */
+ ax_phy_switch(dev, 1);
- ei_outb(ax->plat->gpoc_val, ei_local->mem + EI_SHIFT(0x17));
- ax->running = 1;
-
- /* start the MII timer */
-
- init_timer(&ax->mii_timer);
+ ret = ax_mii_probe(dev);
+ if (ret)
+ goto failed_mii_probe;
+ phy_start(ax->phy_dev);
- ax->mii_timer.expires = jiffies+1;
- ax->mii_timer.data = (unsigned long) dev;
- ax->mii_timer.function = ax_mii_expiry;
+ ret = ax_ei_open(dev);
+ if (ret)
+ goto failed_ax_ei_open;
- add_timer(&ax->mii_timer);
+ ax->running = 1;
return 0;
+
+ failed_ax_ei_open:
+ phy_disconnect(ax->phy_dev);
+ failed_mii_probe:
+ ax_phy_switch(dev, 0);
+ free_irq(dev->irq, dev);
+ failed_request_irq:
+ return ret;
}
static int ax_close(struct net_device *dev)
{
struct ax_device *ax = to_ax_dev(dev);
- struct ei_device *ei_local = netdev_priv(dev);
- dev_dbg(&ax->dev->dev, "%s: close\n", dev->name);
-
- /* turn the phy off */
-
- ei_outb(ax->plat->gpoc_val | (1<<6),
- ei_local->mem + EI_SHIFT(0x17));
+ netdev_dbg(dev, "close\n");
ax->running = 0;
wmb();
- del_timer_sync(&ax->mii_timer);
ax_ei_close(dev);
+ /* turn the phy off */
+ ax_phy_switch(dev, 0);
+ phy_disconnect(ax->phy_dev);
+
free_irq(dev->irq, dev);
return 0;
}
@@ -529,17 +452,15 @@ static int ax_close(struct net_device *dev)
static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
struct ax_device *ax = to_ax_dev(dev);
- unsigned long flags;
- int rc;
+ struct phy_device *phy_dev = ax->phy_dev;
if (!netif_running(dev))
return -EINVAL;
- spin_lock_irqsave(&ax->mii_lock, flags);
- rc = generic_mii_ioctl(&ax->mii, if_mii(req), cmd, NULL);
- spin_unlock_irqrestore(&ax->mii_lock, flags);
+ if (!phy_dev)
+ return -ENODEV;
- return rc;
+ return phy_mii_ioctl(phy_dev, req, cmd);
}
/* ethtool ops */
@@ -547,56 +468,40 @@ static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
static void ax_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- struct ax_device *ax = to_ax_dev(dev);
+ struct platform_device *pdev = to_platform_device(dev->dev.parent);
strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, ax->dev->name);
+ strcpy(info->bus_info, pdev->name);
}
static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct ax_device *ax = to_ax_dev(dev);
- unsigned long flags;
+ struct phy_device *phy_dev = ax->phy_dev;
- spin_lock_irqsave(&ax->mii_lock, flags);
- mii_ethtool_gset(&ax->mii, cmd);
- spin_unlock_irqrestore(&ax->mii_lock, flags);
+ if (!phy_dev)
+ return -ENODEV;
- return 0;
+ return phy_ethtool_gset(phy_dev, cmd);
}
static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct ax_device *ax = to_ax_dev(dev);
- unsigned long flags;
- int rc;
-
- spin_lock_irqsave(&ax->mii_lock, flags);
- rc = mii_ethtool_sset(&ax->mii, cmd);
- spin_unlock_irqrestore(&ax->mii_lock, flags);
-
- return rc;
-}
+ struct phy_device *phy_dev = ax->phy_dev;
-static int ax_nway_reset(struct net_device *dev)
-{
- struct ax_device *ax = to_ax_dev(dev);
- return mii_nway_restart(&ax->mii);
-}
+ if (!phy_dev)
+ return -ENODEV;
-static u32 ax_get_link(struct net_device *dev)
-{
- struct ax_device *ax = to_ax_dev(dev);
- return mii_link_ok(&ax->mii);
+ return phy_ethtool_sset(phy_dev, cmd);
}
static const struct ethtool_ops ax_ethtool_ops = {
.get_drvinfo = ax_get_drvinfo,
.get_settings = ax_get_settings,
.set_settings = ax_set_settings,
- .nway_reset = ax_nway_reset,
- .get_link = ax_get_link,
+ .get_link = ethtool_op_get_link,
};
#ifdef CONFIG_AX88796_93CX6
@@ -640,37 +545,131 @@ static const struct net_device_ops ax_netdev_ops = {
.ndo_get_stats = ax_ei_get_stats,
.ndo_set_multicast_list = ax_ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ax_ei_poll,
#endif
};
+static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level)
+{
+ struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+ if (level)
+ ax->reg_memr |= AX_MEMR_MDC;
+ else
+ ax->reg_memr &= ~AX_MEMR_MDC;
+
+ ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output)
+{
+ struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+ if (output)
+ ax->reg_memr &= ~AX_MEMR_MDIR;
+ else
+ ax->reg_memr |= AX_MEMR_MDIR;
+
+ ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value)
+{
+ struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+ if (value)
+ ax->reg_memr |= AX_MEMR_MDO;
+ else
+ ax->reg_memr &= ~AX_MEMR_MDO;
+
+ ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static int ax_bb_get_data(struct mdiobb_ctrl *ctrl)
+{
+ struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+ int reg_memr = ei_inb(ax->addr_memr);
+
+ return reg_memr & AX_MEMR_MDI ? 1 : 0;
+}
+
+static struct mdiobb_ops bb_ops = {
+ .owner = THIS_MODULE,
+ .set_mdc = ax_bb_mdc,
+ .set_mdio_dir = ax_bb_dir,
+ .set_mdio_data = ax_bb_set_data,
+ .get_mdio_data = ax_bb_get_data,
+};
+
/* setup code */
+static int ax_mii_init(struct net_device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev->dev.parent);
+ struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
+ int err, i;
+
+ ax->bb_ctrl.ops = &bb_ops;
+ ax->addr_memr = ei_local->mem + AX_MEMR;
+ ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl);
+ if (!ax->mii_bus) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ax->mii_bus->name = "ax88796_mii_bus";
+ ax->mii_bus->parent = dev->dev.parent;
+ snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+
+ ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!ax->mii_bus->irq) {
+ err = -ENOMEM;
+ goto out_free_mdio_bitbang;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ ax->mii_bus->irq[i] = PHY_POLL;
+
+ err = mdiobus_register(ax->mii_bus);
+ if (err)
+ goto out_free_irq;
+
+ return 0;
+
+ out_free_irq:
+ kfree(ax->mii_bus->irq);
+ out_free_mdio_bitbang:
+ free_mdio_bitbang(ax->mii_bus);
+ out:
+ return err;
+}
+
static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local)
{
void __iomem *ioaddr = ei_local->mem;
struct ax_device *ax = to_ax_dev(dev);
- /* Select page 0*/
- ei_outb(E8390_NODMA+E8390_PAGE0+E8390_STOP, ioaddr + E8390_CMD);
+ /* Select page 0 */
+ ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_STOP, ioaddr + E8390_CMD);
/* set to byte access */
ei_outb(ax->plat->dcr_val & ~1, ioaddr + EN0_DCFG);
ei_outb(ax->plat->gpoc_val, ioaddr + EI_SHIFT(0x17));
}
-/* ax_init_dev
+/*
+ * ax_init_dev
*
* initialise the specified device, taking care to note the MAC
* address it may already have (if configured), ensure
* the device is ready to be used by lib8390.c and registerd with
* the network layer.
*/
-
-static int ax_init_dev(struct net_device *dev, int first_init)
+static int ax_init_dev(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
struct ax_device *ax = to_ax_dev(dev);
@@ -690,23 +689,23 @@ static int ax_init_dev(struct net_device *dev, int first_init)
/* read the mac from the card prom if we need it */
- if (first_init && ax->plat->flags & AXFLG_HAS_EEPROM) {
+ if (ax->plat->flags & AXFLG_HAS_EEPROM) {
unsigned char SA_prom[32];
- for(i = 0; i < sizeof(SA_prom); i+=2) {
+ for (i = 0; i < sizeof(SA_prom); i += 2) {
SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT);
- SA_prom[i+1] = ei_inb(ioaddr + NE_DATAPORT);
+ SA_prom[i + 1] = ei_inb(ioaddr + NE_DATAPORT);
}
if (ax->plat->wordlength == 2)
for (i = 0; i < 16; i++)
SA_prom[i] = SA_prom[i+i];
- memcpy(dev->dev_addr, SA_prom, 6);
+ memcpy(dev->dev_addr, SA_prom, 6);
}
#ifdef CONFIG_AX88796_93CX6
- if (first_init && ax->plat->flags & AXFLG_HAS_93CX6) {
+ if (ax->plat->flags & AXFLG_HAS_93CX6) {
unsigned char mac_addr[6];
struct eeprom_93cx6 eeprom;
@@ -719,7 +718,7 @@ static int ax_init_dev(struct net_device *dev, int first_init)
(__le16 __force *)mac_addr,
sizeof(mac_addr) >> 1);
- memcpy(dev->dev_addr, mac_addr, 6);
+ memcpy(dev->dev_addr, mac_addr, 6);
}
#endif
if (ax->plat->wordlength == 2) {
@@ -732,67 +731,56 @@ static int ax_init_dev(struct net_device *dev, int first_init)
stop_page = NE1SM_STOP_PG;
}
- /* load the mac-address from the device if this is the
- * first time we've initialised */
-
- if (first_init) {
- if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
- ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
- ei_local->mem + E8390_CMD); /* 0x61 */
- for (i = 0; i < ETHER_ADDR_LEN; i++)
- dev->dev_addr[i] =
- ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
- }
-
- if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
- ax->plat->mac_addr)
- memcpy(dev->dev_addr, ax->plat->mac_addr,
- ETHER_ADDR_LEN);
+ /* load the mac-address from the device */
+ if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
+ ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
+ ei_local->mem + E8390_CMD); /* 0x61 */
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ dev->dev_addr[i] =
+ ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
}
+ if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
+ ax->plat->mac_addr)
+ memcpy(dev->dev_addr, ax->plat->mac_addr,
+ ETHER_ADDR_LEN);
+
ax_reset_8390(dev);
- ei_status.name = "AX88796";
- ei_status.tx_start_page = start_page;
- ei_status.stop_page = stop_page;
- ei_status.word16 = (ax->plat->wordlength == 2);
- ei_status.rx_start_page = start_page + TX_PAGES;
+ ei_local->name = "AX88796";
+ ei_local->tx_start_page = start_page;
+ ei_local->stop_page = stop_page;
+ ei_local->word16 = (ax->plat->wordlength == 2);
+ ei_local->rx_start_page = start_page + TX_PAGES;
#ifdef PACKETBUF_MEMSIZE
- /* Allow the packet buffer size to be overridden by know-it-alls. */
- ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
+ /* Allow the packet buffer size to be overridden by know-it-alls. */
+ ei_local->stop_page = ei_local->tx_start_page + PACKETBUF_MEMSIZE;
#endif
- ei_status.reset_8390 = &ax_reset_8390;
- ei_status.block_input = &ax_block_input;
- ei_status.block_output = &ax_block_output;
- ei_status.get_8390_hdr = &ax_get_8390_hdr;
- ei_status.priv = 0;
-
- dev->netdev_ops = &ax_netdev_ops;
- dev->ethtool_ops = &ax_ethtool_ops;
-
- ax->msg_enable = NETIF_MSG_LINK;
- ax->mii.phy_id_mask = 0x1f;
- ax->mii.reg_num_mask = 0x1f;
- ax->mii.phy_id = 0x10; /* onboard phy */
- ax->mii.force_media = 0;
- ax->mii.full_duplex = 0;
- ax->mii.mdio_read = ax_phy_read;
- ax->mii.mdio_write = ax_phy_write;
- ax->mii.dev = dev;
+ ei_local->reset_8390 = &ax_reset_8390;
+ ei_local->block_input = &ax_block_input;
+ ei_local->block_output = &ax_block_output;
+ ei_local->get_8390_hdr = &ax_get_8390_hdr;
+ ei_local->priv = 0;
- ax_NS8390_init(dev, 0);
+ dev->netdev_ops = &ax_netdev_ops;
+ dev->ethtool_ops = &ax_ethtool_ops;
- if (first_init)
- dev_info(&ax->dev->dev, "%dbit, irq %d, %lx, MAC: %pM\n",
- ei_status.word16 ? 16:8, dev->irq, dev->base_addr,
- dev->dev_addr);
+ ret = ax_mii_init(dev);
+ if (ret)
+ goto out_irq;
+
+ ax_NS8390_init(dev, 0);
ret = register_netdev(dev);
if (ret)
goto out_irq;
+ netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n",
+ ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr,
+ dev->dev_addr);
+
return 0;
out_irq:
@@ -802,24 +790,24 @@ static int ax_init_dev(struct net_device *dev, int first_init)
return ret;
}
-static int ax_remove(struct platform_device *_dev)
+static int ax_remove(struct platform_device *pdev)
{
- struct net_device *dev = platform_get_drvdata(_dev);
- struct ax_device *ax;
-
- ax = to_ax_dev(dev);
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
+ struct resource *mem;
unregister_netdev(dev);
free_irq(dev->irq, dev);
- iounmap(ei_status.mem);
- release_resource(ax->mem);
- kfree(ax->mem);
+ iounmap(ei_local->mem);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
if (ax->map2) {
iounmap(ax->map2);
- release_resource(ax->mem2);
- kfree(ax->mem2);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ release_mem_region(mem->start, resource_size(mem));
}
free_netdev(dev);
@@ -827,19 +815,20 @@ static int ax_remove(struct platform_device *_dev)
return 0;
}
-/* ax_probe
+/*
+ * ax_probe
*
* This is the entry point when the platform device system uses to
- * notify us of a new device to attach to. Allocate memory, find
- * the resources and information passed, and map the necessary registers.
-*/
-
+ * notify us of a new device to attach to. Allocate memory, find the
+ * resources and information passed, and map the necessary registers.
+ */
static int ax_probe(struct platform_device *pdev)
{
struct net_device *dev;
- struct ax_device *ax;
- struct resource *res;
- size_t size;
+ struct ei_device *ei_local;
+ struct ax_device *ax;
+ struct resource *irq, *mem, *mem2;
+ resource_size_t mem_size, mem2_size = 0;
int ret = 0;
dev = ax__alloc_ei_netdev(sizeof(struct ax_device));
@@ -847,120 +836,107 @@ static int ax_probe(struct platform_device *pdev)
return -ENOMEM;
/* ok, let's setup our device */
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ ei_local = netdev_priv(dev);
ax = to_ax_dev(dev);
- memset(ax, 0, sizeof(struct ax_device));
-
- spin_lock_init(&ax->mii_lock);
-
- ax->dev = pdev;
ax->plat = pdev->dev.platform_data;
platform_set_drvdata(pdev, dev);
- ei_status.rxcr_base = ax->plat->rcr_val;
+ ei_local->rxcr_base = ax->plat->rcr_val;
/* find the platform resources */
-
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
dev_err(&pdev->dev, "no IRQ specified\n");
ret = -ENXIO;
goto exit_mem;
}
- dev->irq = res->start;
- ax->irqflags = res->flags & IRQF_TRIGGER_MASK;
+ dev->irq = irq->start;
+ ax->irqflags = irq->flags & IRQF_TRIGGER_MASK;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
dev_err(&pdev->dev, "no MEM specified\n");
ret = -ENXIO;
goto exit_mem;
}
- size = (res->end - res->start) + 1;
-
- /* setup the register offsets from either the platform data
- * or by using the size of the resource provided */
+ mem_size = resource_size(mem);
+ /*
+ * setup the register offsets from either the platform data or
+ * by using the size of the resource provided
+ */
if (ax->plat->reg_offsets)
- ei_status.reg_offset = ax->plat->reg_offsets;
+ ei_local->reg_offset = ax->plat->reg_offsets;
else {
- ei_status.reg_offset = ax->reg_offsets;
+ ei_local->reg_offset = ax->reg_offsets;
for (ret = 0; ret < 0x18; ret++)
- ax->reg_offsets[ret] = (size / 0x18) * ret;
+ ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
}
- ax->mem = request_mem_region(res->start, size, pdev->name);
- if (ax->mem == NULL) {
+ if (!request_mem_region(mem->start, mem_size, pdev->name)) {
dev_err(&pdev->dev, "cannot reserve registers\n");
- ret = -ENXIO;
+ ret = -ENXIO;
goto exit_mem;
}
- ei_status.mem = ioremap(res->start, size);
- dev->base_addr = (unsigned long)ei_status.mem;
+ ei_local->mem = ioremap(mem->start, mem_size);
+ dev->base_addr = (unsigned long)ei_local->mem;
- if (ei_status.mem == NULL) {
- dev_err(&pdev->dev, "Cannot ioremap area (%08llx,%08llx)\n",
- (unsigned long long)res->start,
- (unsigned long long)res->end);
+ if (ei_local->mem == NULL) {
+ dev_err(&pdev->dev, "Cannot ioremap area %pR\n", mem);
- ret = -ENXIO;
+ ret = -ENXIO;
goto exit_req;
}
/* look for reset area */
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res == NULL) {
+ mem2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!mem2) {
if (!ax->plat->reg_offsets) {
for (ret = 0; ret < 0x20; ret++)
- ax->reg_offsets[ret] = (size / 0x20) * ret;
+ ax->reg_offsets[ret] = (mem_size / 0x20) * ret;
}
-
- ax->map2 = NULL;
} else {
- size = (res->end - res->start) + 1;
+ mem2_size = resource_size(mem2);
- ax->mem2 = request_mem_region(res->start, size, pdev->name);
- if (ax->mem2 == NULL) {
+ if (!request_mem_region(mem2->start, mem2_size, pdev->name)) {
dev_err(&pdev->dev, "cannot reserve registers\n");
ret = -ENXIO;
goto exit_mem1;
}
- ax->map2 = ioremap(res->start, size);
- if (ax->map2 == NULL) {
+ ax->map2 = ioremap(mem2->start, mem2_size);
+ if (!ax->map2) {
dev_err(&pdev->dev, "cannot map reset register\n");
ret = -ENXIO;
goto exit_mem2;
}
- ei_status.reg_offset[0x1f] = ax->map2 - ei_status.mem;
+ ei_local->reg_offset[0x1f] = ax->map2 - ei_local->mem;
}
/* got resources, now initialise and register device */
-
- ret = ax_init_dev(dev, 1);
+ ret = ax_init_dev(dev);
if (!ret)
return 0;
- if (ax->map2 == NULL)
+ if (!ax->map2)
goto exit_mem1;
iounmap(ax->map2);
exit_mem2:
- release_resource(ax->mem2);
- kfree(ax->mem2);
+ release_mem_region(mem2->start, mem2_size);
exit_mem1:
- iounmap(ei_status.mem);
+ iounmap(ei_local->mem);
exit_req:
- release_resource(ax->mem);
- kfree(ax->mem);
+ release_mem_region(mem->start, mem_size);
exit_mem:
free_netdev(dev);
@@ -974,7 +950,7 @@ static int ax_probe(struct platform_device *pdev)
static int ax_suspend(struct platform_device *dev, pm_message_t state)
{
struct net_device *ndev = platform_get_drvdata(dev);
- struct ax_device *ax = to_ax_dev(ndev);
+ struct ax_device *ax = to_ax_dev(ndev);
ax->resume_open = ax->running;
@@ -987,7 +963,7 @@ static int ax_suspend(struct platform_device *dev, pm_message_t state)
static int ax_resume(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
- struct ax_device *ax = to_ax_dev(ndev);
+ struct ax_device *ax = to_ax_dev(ndev);
ax_initial_setup(ndev, netdev_priv(ndev));
ax_NS8390_init(ndev, ax->resume_open);
@@ -1001,7 +977,7 @@ static int ax_resume(struct platform_device *pdev)
#else
#define ax_suspend NULL
-#define ax_resume NULL
+#define ax_resume NULL
#endif
static struct platform_driver axdrv = {
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index c6e86315b3f8..2e2b76258ab4 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -381,11 +381,11 @@ static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
__b44_set_flow_ctrl(bp, pause_enab);
}
-#ifdef SSB_DRIVER_MIPS
-extern char *nvram_get(char *name);
+#ifdef CONFIG_BCM47XX
+#include <asm/mach-bcm47xx/nvram.h>
static void b44_wap54g10_workaround(struct b44 *bp)
{
- const char *str;
+ char buf[20];
u32 val;
int err;
@@ -394,10 +394,9 @@ static void b44_wap54g10_workaround(struct b44 *bp)
* see https://dev.openwrt.org/ticket/146
* check and reset bit "isolate"
*/
- str = nvram_get("boardnum");
- if (!str)
+ if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
return;
- if (simple_strtoul(str, NULL, 0) == 2) {
+ if (simple_strtoul(buf, NULL, 0) == 2) {
err = __b44_readphy(bp, 0, MII_BMCR, &val);
if (err)
goto error;
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index ecfef240a303..e94a966af418 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -1097,7 +1097,7 @@ static int bcm_enet_stop(struct net_device *dev)
enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
/* make sure no mib update is scheduled */
- flush_scheduled_work();
+ cancel_work_sync(&priv->mib_update_task);
/* disable dma & mac */
bcm_enet_disable_dma(priv, priv->tx_chan);
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 4594a28b1f66..f803c58b941d 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2010 ServerEngines
+ * Copyright (C) 2005 - 2011 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -8,11 +8,11 @@
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
- * linux-drivers@serverengines.com
+ * linux-drivers@emulex.com
*
- * ServerEngines
- * 209 N. Fair Oaks Ave
- * Sunnyvale, CA 94085
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
*/
#ifndef BE_H
@@ -33,19 +33,22 @@
#include "be_hw.h"
-#define DRV_VER "2.103.175u"
+#define DRV_VER "4.0.100u"
#define DRV_NAME "be2net"
#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
#define OC_NAME "Emulex OneConnect 10Gbps NIC"
-#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)"
+#define OC_NAME_BE OC_NAME "(be3)"
+#define OC_NAME_LANCER OC_NAME "(Lancer)"
#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
#define BE_VENDOR_ID 0x19a2
+#define EMULEX_VENDOR_ID 0x10df
#define BE_DEVICE_ID1 0x211
#define BE_DEVICE_ID2 0x221
-#define OC_DEVICE_ID1 0x700
-#define OC_DEVICE_ID2 0x710
+#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
+#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
+#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
static inline char *nic_name(struct pci_dev *pdev)
{
@@ -53,7 +56,9 @@ static inline char *nic_name(struct pci_dev *pdev)
case OC_DEVICE_ID1:
return OC_NAME;
case OC_DEVICE_ID2:
- return OC_NAME1;
+ return OC_NAME_BE;
+ case OC_DEVICE_ID3:
+ return OC_NAME_LANCER;
case BE_DEVICE_ID2:
return BE3_NAME;
default:
@@ -62,7 +67,7 @@ static inline char *nic_name(struct pci_dev *pdev)
}
/* Number of bytes of an RX frame that are copied to skb->data */
-#define BE_HDR_LEN 64
+#define BE_HDR_LEN ((u16) 64)
#define BE_MAX_JUMBO_FRAME_SIZE 9018
#define BE_MIN_MTU 256
@@ -149,6 +154,7 @@ struct be_eq_obj {
u16 min_eqd; /* in usecs */
u16 max_eqd; /* in usecs */
u16 cur_eqd; /* in usecs */
+ u8 msix_vec_idx;
struct napi_struct napi;
};
@@ -205,10 +211,30 @@ struct be_rx_stats {
u32 rx_fps; /* Rx frags per second */
};
+struct be_rx_compl_info {
+ u32 rss_hash;
+ u16 vid;
+ u16 pkt_size;
+ u16 rxq_idx;
+ u16 mac_id;
+ u8 vlanf;
+ u8 num_rcvd;
+ u8 err;
+ u8 ipf;
+ u8 tcpf;
+ u8 udpf;
+ u8 ip_csum;
+ u8 l4_csum;
+ u8 ipv6;
+ u8 vtm;
+ u8 pkt_type;
+};
+
struct be_rx_obj {
struct be_adapter *adapter;
struct be_queue_info q;
struct be_queue_info cq;
+ struct be_rx_compl_info rxcp;
struct be_rx_page_info page_info_tbl[RX_Q_LEN];
struct be_eq_obj rx_eq;
struct be_rx_stats stats;
@@ -217,6 +243,10 @@ struct be_rx_obj {
u32 cache_line_barrier[16];
};
+struct be_drv_stats {
+ u8 be_on_die_temperature;
+};
+
struct be_vf_cfg {
unsigned char vf_mac_addr[ETH_ALEN];
u32 vf_if_handle;
@@ -226,6 +256,7 @@ struct be_vf_cfg {
};
#define BE_INVALID_PMAC_ID 0xffffffff
+
struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
@@ -234,7 +265,7 @@ struct be_adapter {
u8 __iomem *db; /* Door Bell */
u8 __iomem *pcicfg; /* PCI config space */
- spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
+ struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
struct be_dma_mem mbox_mem;
/* Mbox mem is adjusted to align to 16 bytes. The allocated addr
* is stored for freeing purpose */
@@ -260,6 +291,9 @@ struct be_adapter {
u32 num_rx_qs;
u32 big_page_size; /* Compounded page size shared by rx wrbs */
+ u8 msix_vec_next_idx;
+ struct be_drv_stats drv_stats;
+
struct vlan_group *vlan_grp;
u16 vlans_added;
u16 max_vlans; /* Number of vlans supported */
@@ -271,6 +305,7 @@ struct be_adapter {
struct be_dma_mem stats_cmd;
/* Work queue used to perform periodic tasks like getting statistics */
struct delayed_work work;
+ u16 work_counter;
/* Ethtool knobs and info */
bool rx_csum; /* BE card must perform rx-checksumming */
@@ -288,7 +323,7 @@ struct be_adapter {
u32 rx_fc; /* Rx flow control */
u32 tx_fc; /* Tx flow control */
bool ue_detected;
- bool stats_ioctl_sent;
+ bool stats_cmd_sent;
int link_speed;
u8 port_type;
u8 transceiver;
@@ -297,10 +332,13 @@ struct be_adapter {
u32 flash_status;
struct completion flash_compl;
+ bool be3_native;
bool sriov_enabled;
struct be_vf_cfg vf_cfg[BE_MAX_VF];
- u8 base_eq_id;
u8 is_virtfn;
+ u32 sli_family;
+ u8 hba_port_num;
+ u16 pvid;
};
#define be_physfn(adapter) (!adapter->is_virtfn)
@@ -309,6 +347,8 @@ struct be_adapter {
#define BE_GEN2 2
#define BE_GEN3 3
+#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3)
+
extern const struct ethtool_ops be_ethtool_ops;
#define tx_stats(adapter) (&adapter->tx_stats)
@@ -416,10 +456,17 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
{
u8 data;
-
- pci_write_config_byte(adapter->pdev, 0xFE, 0xAA);
- pci_read_config_byte(adapter->pdev, 0xFE, &data);
- adapter->is_virtfn = (data != 0xAA);
+ u32 sli_intf;
+
+ if (lancer_chip(adapter)) {
+ pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET,
+ &sli_intf);
+ adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
+ } else {
+ pci_write_config_byte(adapter->pdev, 0xFE, 0xAA);
+ pci_read_config_byte(adapter->pdev, 0xFE, &data);
+ adapter->is_virtfn = (data != 0xAA);
+ }
}
static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
@@ -431,9 +478,8 @@ static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
mac[5] = (u8)(addr & 0xFF);
mac[4] = (u8)((addr >> 8) & 0xFF);
mac[3] = (u8)((addr >> 16) & 0xFF);
- mac[2] = 0xC9;
- mac[1] = 0x00;
- mac[0] = 0x00;
+ /* Use the OUI from the current MAC address */
+ memcpy(mac, adapter->netdev->dev_addr, 3);
}
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 36eca1ce75d4..5a4a87e7c5ea 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2010 ServerEngines
+ * Copyright (C) 2005 - 2011 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -8,21 +8,30 @@
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
- * linux-drivers@serverengines.com
+ * linux-drivers@emulex.com
*
- * ServerEngines
- * 209 N. Fair Oaks Ave
- * Sunnyvale, CA 94085
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
*/
#include "be.h"
#include "be_cmds.h"
+/* Must be a power of 2 or else MODULO will BUG_ON */
+static int be_get_temp_freq = 32;
+
static void be_mcc_notify(struct be_adapter *adapter)
{
struct be_queue_info *mccq = &adapter->mcc_obj.q;
u32 val = 0;
+ if (adapter->eeh_err) {
+ dev_info(&adapter->pdev->dev,
+ "Error in Card Detected! Cannot issue commands\n");
+ return;
+ }
+
val |= mccq->id & DB_MCCQ_RING_ID_MASK;
val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
@@ -75,7 +84,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
be_dws_le_to_cpu(&resp->hw_stats,
sizeof(resp->hw_stats));
netdev_stats_update(adapter);
- adapter->stats_ioctl_sent = false;
+ adapter->stats_cmd_sent = false;
}
} else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
(compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
@@ -102,6 +111,7 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
{
if (evt->valid) {
adapter->vlan_prio_bmap = evt->available_priority_bmap;
+ adapter->recommended_prio &= ~VLAN_PRIO_MASK;
adapter->recommended_prio =
evt->reco_default_priority << VLAN_PRIO_SHIFT;
}
@@ -117,6 +127,16 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
}
}
+/*Grp5 PVID evt*/
+static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
+ struct be_async_event_grp5_pvid_state *evt)
+{
+ if (evt->enabled)
+ adapter->pvid = evt->tag;
+ else
+ adapter->pvid = 0;
+}
+
static void be_async_grp5_evt_process(struct be_adapter *adapter,
u32 trailer, struct be_mcc_compl *evt)
{
@@ -134,6 +154,10 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
be_async_grp5_qos_speed_process(adapter,
(struct be_async_event_grp5_qos_link_speed *)evt);
break;
+ case ASYNC_EVENT_PVID_STATE:
+ be_async_grp5_pvid_state_process(adapter,
+ (struct be_async_event_grp5_pvid_state *)evt);
+ break;
default:
dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
break;
@@ -216,6 +240,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
int i, num, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+ if (adapter->eeh_err)
+ return -EIO;
+
for (i = 0; i < mcc_timeout; i++) {
num = be_process_mcc(adapter, &status);
if (num)
@@ -245,6 +272,12 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
int msecs = 0;
u32 ready;
+ if (adapter->eeh_err) {
+ dev_err(&adapter->pdev->dev,
+ "Error detected in card.Cannot issue commands\n");
+ return -EIO;
+ }
+
do {
ready = ioread32(db);
if (ready == 0xffffffff) {
@@ -323,7 +356,12 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
{
- u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
+ u32 sem;
+
+ if (lancer_chip(adapter))
+ sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
+ else
+ sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
*stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
@@ -462,7 +500,8 @@ int be_cmd_fw_init(struct be_adapter *adapter)
u8 *wrb;
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = (u8 *)wrb_from_mbox(adapter);
*wrb++ = 0xFF;
@@ -476,7 +515,7 @@ int be_cmd_fw_init(struct be_adapter *adapter)
status = be_mbox_notify_wait(adapter);
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -491,7 +530,8 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
if (adapter->eeh_err)
return -EIO;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = (u8 *)wrb_from_mbox(adapter);
*wrb++ = 0xFF;
@@ -505,7 +545,7 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
status = be_mbox_notify_wait(adapter);
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
int be_cmd_eq_create(struct be_adapter *adapter,
@@ -516,7 +556,8 @@ int be_cmd_eq_create(struct be_adapter *adapter,
struct be_dma_mem *q_mem = &eq->dma_mem;
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -546,7 +587,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
eq->created = true;
}
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -558,7 +599,8 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
struct be_cmd_req_mac_query *req;
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -583,13 +625,13 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
}
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
/* Uses synchronous MCCQ */
int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
- u32 if_id, u32 *pmac_id)
+ u32 if_id, u32 *pmac_id, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_pmac_add *req;
@@ -610,6 +652,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
+ req->hdr.domain = domain;
req->if_id = cpu_to_le32(if_id);
memcpy(req->mac_address, mac_addr, ETH_ALEN);
@@ -625,7 +668,7 @@ err:
}
/* Uses synchronous MCCQ */
-int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_pmac_del *req;
@@ -646,6 +689,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
+ req->hdr.domain = dom;
req->if_id = cpu_to_le32(if_id);
req->pmac_id = cpu_to_le32(pmac_id);
@@ -667,7 +711,8 @@ int be_cmd_cq_create(struct be_adapter *adapter,
void *ctxt;
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -680,16 +725,36 @@ int be_cmd_cq_create(struct be_adapter *adapter,
OPCODE_COMMON_CQ_CREATE, sizeof(*req));
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+ if (lancer_chip(adapter)) {
+ req->hdr.version = 2;
+ req->page_size = 1; /* 1 for 4K */
+ AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
+ coalesce_wm);
+ AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
+ no_delay);
+ AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
+ __ilog2_u32(cq->len/256));
+ AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
+ ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
+ ctxt, eq->id);
+ AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
+ } else {
+ AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
+ coalesce_wm);
+ AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
+ ctxt, no_delay);
+ AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
+ __ilog2_u32(cq->len/256));
+ AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_be, solevent,
+ ctxt, sol_evts);
+ AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
+ AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
+ }
- AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
- AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
- AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
- __ilog2_u32(cq->len/256));
- AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
- AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
- AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -701,7 +766,7 @@ int be_cmd_cq_create(struct be_adapter *adapter,
cq->created = true;
}
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -724,7 +789,8 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
void *ctxt;
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -737,13 +803,27 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+ if (lancer_chip(adapter)) {
+ req->hdr.version = 1;
+ req->cq_id = cpu_to_le16(cq->id);
+
+ AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
+ be_encoded_q_len(mccq->len));
+ AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
+ ctxt, cq->id);
+ AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
+ ctxt, 1);
+
+ } else {
+ AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
+ be_encoded_q_len(mccq->len));
+ AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
+ }
- AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
- be_encoded_q_len(mccq->len));
- AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
- req->async_event_bitmap[0] |= 0x00000022;
+ req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -754,7 +834,7 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
mccq->id = le16_to_cpu(resp->id);
mccq->created = true;
}
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -769,7 +849,8 @@ int be_cmd_txq_create(struct be_adapter *adapter,
void *ctxt;
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -781,6 +862,12 @@ int be_cmd_txq_create(struct be_adapter *adapter,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
sizeof(*req));
+ if (lancer_chip(adapter)) {
+ req->hdr.version = 1;
+ AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
+ adapter->if_handle);
+ }
+
req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
req->ulp_num = BE_ULP1_NUM;
req->type = BE_ETH_TX_RING_TYPE_STANDARD;
@@ -801,7 +888,7 @@ int be_cmd_txq_create(struct be_adapter *adapter,
txq->created = true;
}
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -816,7 +903,8 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_dma_mem *q_mem = &rxq->dma_mem;
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -843,7 +931,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
*rss_id = resp->rss_id;
}
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -862,7 +950,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
if (adapter->eeh_err)
return -EIO;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -899,7 +988,7 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
status = be_mbox_notify_wait(adapter);
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -915,7 +1004,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
struct be_cmd_req_if_create *req;
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -941,12 +1031,12 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
*pmac_id = le32_to_cpu(resp->pmac_id);
}
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
/* Uses mbox */
-int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
+int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_if_destroy *req;
@@ -955,7 +1045,8 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
if (adapter->eeh_err)
return -EIO;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -966,11 +1057,12 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
+ req->hdr.domain = domain;
req->interface_id = cpu_to_le32(interface_id);
status = be_mbox_notify_wait(adapter);
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -986,6 +1078,9 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
struct be_sge *sge;
int status = 0;
+ if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
+ be_cmd_get_die_temperature(adapter);
+
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -1006,7 +1101,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
sge->len = cpu_to_le32(nonemb_cmd->size);
be_mcc_notify(adapter);
- adapter->stats_ioctl_sent = true;
+ adapter->stats_cmd_sent = true;
err:
spin_unlock_bh(&adapter->mcc_lock);
@@ -1053,6 +1148,44 @@ err:
return status;
}
+/* Uses synchronous mcc */
+int be_cmd_get_die_temperature(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_cntl_addnl_attribs *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_cntl_addnl_attribs *resp =
+ embedded_payload(wrb);
+ adapter->drv_stats.be_on_die_temperature =
+ resp->on_die_temperature;
+ }
+ /* If IOCTL fails once, do not bother issuing it again */
+ else
+ be_get_temp_freq = 0;
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
/* Uses Mbox */
int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
{
@@ -1060,7 +1193,8 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
struct be_cmd_req_get_fw_version *req;
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -1077,7 +1211,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
}
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -1235,7 +1369,7 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
i = 0;
netdev_for_each_mc_addr(ha, netdev)
- memcpy(req->mac[i].byte, ha->addr, ETH_ALEN);
+ memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
} else {
req->promiscuous = 1;
}
@@ -1322,7 +1456,8 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
struct be_cmd_req_query_fw_cfg *req;
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -1341,7 +1476,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
*caps = le32_to_cpu(resp->function_caps);
}
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -1352,7 +1487,8 @@ int be_cmd_reset_function(struct be_adapter *adapter)
struct be_cmd_req_hdr *req;
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -1365,7 +1501,7 @@ int be_cmd_reset_function(struct be_adapter *adapter)
status = be_mbox_notify_wait(adapter);
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -1376,7 +1512,8 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
u32 myhash[10];
int status;
- spin_lock(&adapter->mbox_lock);
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@@ -1396,7 +1533,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
status = be_mbox_notify_wait(adapter);
- spin_unlock(&adapter->mbox_lock);
+ mutex_unlock(&adapter->mbox_lock);
return status;
}
@@ -1732,6 +1869,10 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
req = nonemb_cmd->va;
sge = nonembedded_sgl(wrb);
@@ -1747,6 +1888,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
+err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1809,8 +1951,8 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
OPCODE_COMMON_SET_QOS, sizeof(*req));
req->hdr.domain = domain;
- req->valid_bits = BE_QOS_BITS_NIC;
- req->max_bps_nic = bps;
+ req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
+ req->max_bps_nic = cpu_to_le32(bps);
status = be_mcc_notify_wait(adapter);
@@ -1818,3 +1960,96 @@ err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
+
+int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_cntl_attribs *req;
+ struct be_cmd_resp_cntl_attribs *resp;
+ struct be_sge *sge;
+ int status;
+ int payload_len = max(sizeof(*req), sizeof(*resp));
+ struct mgmt_controller_attrib *attribs;
+ struct be_dma_mem attribs_cmd;
+
+ memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
+ attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
+ attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
+ &attribs_cmd.dma);
+ if (!attribs_cmd.va) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure\n");
+ return -ENOMEM;
+ }
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = attribs_cmd.va;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, payload_len, false, 1,
+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
+ sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
+ sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(attribs_cmd.size);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ attribs = (struct mgmt_controller_attrib *)( attribs_cmd.va +
+ sizeof(struct be_cmd_resp_hdr));
+ adapter->hba_port_num = attribs->hba_attribs.phy_port;
+ }
+
+err:
+ mutex_unlock(&adapter->mbox_lock);
+ pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
+ attribs_cmd.dma);
+ return status;
+}
+
+/* Uses mbox */
+int be_cmd_check_native_mode(struct be_adapter *adapter)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_func_cap *req;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req));
+
+ req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
+ CAPABILITY_BE3_NATIVE_ERX_API);
+ req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
+ adapter->be3_native = le32_to_cpu(resp->cap_flags) &
+ CAPABILITY_BE3_NATIVE_ERX_API;
+ }
+err:
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 8469ff061f30..4f254cfaabe2 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2010 ServerEngines
+ * Copyright (C) 2005 - 2011 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -8,11 +8,11 @@
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
- * linux-drivers@serverengines.com
+ * linux-drivers@emulex.com
*
- * ServerEngines
- * 209 N. Fair Oaks Ave
- * Sunnyvale, CA 94085
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
*/
/*
@@ -88,6 +88,7 @@ struct be_mcc_compl {
#define ASYNC_EVENT_CODE_GRP_5 0x5
#define ASYNC_EVENT_QOS_SPEED 0x1
#define ASYNC_EVENT_COS_PRIORITY 0x2
+#define ASYNC_EVENT_PVID_STATE 0x3
struct be_async_event_trailer {
u32 code;
};
@@ -134,6 +135,18 @@ struct be_async_event_grp5_cos_priority {
struct be_async_event_trailer trailer;
} __packed;
+/* When the event code of an async trailer is GRP5 and event type is
+ * PVID state, the mcc_compl must be interpreted as follows
+ */
+struct be_async_event_grp5_pvid_state {
+ u8 enabled;
+ u8 rsvd0;
+ u16 tag;
+ u32 event_tag;
+ u32 rsvd1;
+ struct be_async_event_trailer trailer;
+} __packed;
+
struct be_mcc_mailbox {
struct be_mcc_wrb wrb;
struct be_mcc_compl compl;
@@ -156,6 +169,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_SET_QOS 28
#define OPCODE_COMMON_MCC_CREATE_EXT 90
#define OPCODE_COMMON_SEEPROM_READ 30
+#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
#define OPCODE_COMMON_NTWK_RX_FILTER 34
#define OPCODE_COMMON_GET_FW_VERSION 35
#define OPCODE_COMMON_SET_FLOW_CONTROL 36
@@ -176,6 +190,8 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_BEACON_STATE 70
#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
#define OPCODE_COMMON_GET_PHY_DETAILS 102
+#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
+#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
#define OPCODE_ETH_RSS_CONFIG 1
#define OPCODE_ETH_ACPI_CONFIG 2
@@ -309,7 +325,7 @@ struct be_cmd_req_pmac_del {
/******************** Create CQ ***************************/
/* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field */
-struct amap_cq_context {
+struct amap_cq_context_be {
u8 cidx[11]; /* dword 0*/
u8 rsvd0; /* dword 0*/
u8 coalescwm[2]; /* dword 0*/
@@ -332,14 +348,32 @@ struct amap_cq_context {
u8 rsvd5[32]; /* dword 3*/
} __packed;
+struct amap_cq_context_lancer {
+ u8 rsvd0[12]; /* dword 0*/
+ u8 coalescwm[2]; /* dword 0*/
+ u8 nodelay; /* dword 0*/
+ u8 rsvd1[12]; /* dword 0*/
+ u8 count[2]; /* dword 0*/
+ u8 valid; /* dword 0*/
+ u8 rsvd2; /* dword 0*/
+ u8 eventable; /* dword 0*/
+ u8 eqid[16]; /* dword 1*/
+ u8 rsvd3[15]; /* dword 1*/
+ u8 armed; /* dword 1*/
+ u8 rsvd4[32]; /* dword 2*/
+ u8 rsvd5[32]; /* dword 3*/
+} __packed;
+
struct be_cmd_req_cq_create {
struct be_cmd_req_hdr hdr;
u16 num_pages;
- u16 rsvd0;
- u8 context[sizeof(struct amap_cq_context) / 8];
+ u8 page_size;
+ u8 rsvd0;
+ u8 context[sizeof(struct amap_cq_context_be) / 8];
struct phys_addr pages[8];
} __packed;
+
struct be_cmd_resp_cq_create {
struct be_cmd_resp_hdr hdr;
u16 cq_id;
@@ -349,7 +383,7 @@ struct be_cmd_resp_cq_create {
/******************** Create MCCQ ***************************/
/* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field */
-struct amap_mcc_context {
+struct amap_mcc_context_be {
u8 con_index[14];
u8 rsvd0[2];
u8 ring_size[4];
@@ -364,12 +398,23 @@ struct amap_mcc_context {
u8 rsvd2[32];
} __packed;
+struct amap_mcc_context_lancer {
+ u8 async_cq_id[16];
+ u8 ring_size[4];
+ u8 rsvd0[12];
+ u8 rsvd1[31];
+ u8 valid;
+ u8 async_cq_valid[1];
+ u8 rsvd2[31];
+ u8 rsvd3[32];
+} __packed;
+
struct be_cmd_req_mcc_create {
struct be_cmd_req_hdr hdr;
u16 num_pages;
- u16 rsvd0;
+ u16 cq_id;
u32 async_event_bitmap[1];
- u8 context[sizeof(struct amap_mcc_context) / 8];
+ u8 context[sizeof(struct amap_mcc_context_be) / 8];
struct phys_addr pages[8];
} __packed;
@@ -386,7 +431,7 @@ struct be_cmd_resp_mcc_create {
/* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field */
struct amap_tx_context {
- u8 rsvd0[16]; /* dword 0 */
+ u8 if_id[16]; /* dword 0 */
u8 tx_ring_size[4]; /* dword 0 */
u8 rsvd1[26]; /* dword 0 */
u8 pci_func_id[8]; /* dword 1 */
@@ -474,7 +519,8 @@ enum be_if_flags {
BE_IF_FLAGS_VLAN = 0x100,
BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
- BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800
+ BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800,
+ BE_IF_FLAGS_MULTICAST = 0x1000
};
/* An RX interface is an object with one or more MAC addresses and
@@ -590,7 +636,10 @@ struct be_rxf_stats {
u32 rx_drops_invalid_ring; /* dword 145*/
u32 forwarded_packets; /* dword 146*/
u32 rx_drops_mtu; /* dword 147*/
- u32 rsvd0[15];
+ u32 rsvd0[7];
+ u32 port0_jabber_events;
+ u32 port1_jabber_events;
+ u32 rsvd1[6];
};
struct be_erx_stats {
@@ -601,10 +650,16 @@ struct be_erx_stats {
u32 debug_pmem_pbuf_dealloc; /* dword 47*/
};
+struct be_pmem_stats {
+ u32 eth_red_drops;
+ u32 rsvd[4];
+};
+
struct be_hw_stats {
struct be_rxf_stats rxf;
u32 rsvd[48];
struct be_erx_stats erx;
+ struct be_pmem_stats pmem;
};
struct be_cmd_req_get_stats {
@@ -617,6 +672,20 @@ struct be_cmd_resp_get_stats {
struct be_hw_stats hw_stats;
};
+struct be_cmd_req_get_cntl_addnl_attribs {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct be_cmd_resp_get_cntl_addnl_attribs {
+ struct be_cmd_resp_hdr hdr;
+ u16 ipl_file_number;
+ u8 ipl_file_version;
+ u8 rsvd0;
+ u8 on_die_temperature; /* in degrees centigrade*/
+ u8 rsvd1[3];
+};
+
struct be_cmd_req_vlan_config {
struct be_cmd_req_hdr hdr;
u8 interface_id;
@@ -964,17 +1033,47 @@ struct be_cmd_resp_set_qos {
u32 rsvd;
};
+/*********************** Controller Attributes ***********************/
+struct be_cmd_req_cntl_attribs {
+ struct be_cmd_req_hdr hdr;
+};
+
+struct be_cmd_resp_cntl_attribs {
+ struct be_cmd_resp_hdr hdr;
+ struct mgmt_controller_attrib attribs;
+};
+
+/*********************** Set driver function ***********************/
+#define CAPABILITY_SW_TIMESTAMPS 2
+#define CAPABILITY_BE3_NATIVE_ERX_API 4
+
+struct be_cmd_req_set_func_cap {
+ struct be_cmd_req_hdr hdr;
+ u32 valid_cap_flags;
+ u32 cap_flags;
+ u8 rsvd[212];
+};
+
+struct be_cmd_resp_set_func_cap {
+ struct be_cmd_resp_hdr hdr;
+ u32 valid_cap_flags;
+ u32 cap_flags;
+ u8 rsvd[212];
+};
+
extern int be_pci_fnum_get(struct be_adapter *adapter);
extern int be_cmd_POST(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
u8 type, bool permanent, u32 if_handle);
extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
- u32 if_id, u32 *pmac_id);
-extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
+ u32 if_id, u32 *pmac_id, u32 domain);
+extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
+ u32 pmac_id, u32 domain);
extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
u32 en_flags, u8 *mac, bool pmac_invalid,
u32 *if_handle, u32 *pmac_id, u32 domain);
-extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
+extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
+ u32 domain);
extern int be_cmd_eq_create(struct be_adapter *adapter,
struct be_queue_info *eq, int eq_delay);
extern int be_cmd_cq_create(struct be_adapter *adapter,
@@ -1046,4 +1145,7 @@ extern int be_cmd_get_phy_info(struct be_adapter *adapter,
struct be_dma_mem *cmd);
extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
extern void be_detect_dump_ue(struct be_adapter *adapter);
+extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
+extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
+extern int be_cmd_check_native_mode(struct be_adapter *adapter);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 0f46366ecc48..aac248fbd18b 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2010 ServerEngines
+ * Copyright (C) 2005 - 2011 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -8,11 +8,11 @@
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
- * linux-drivers@serverengines.com
+ * linux-drivers@emulex.com
*
- * ServerEngines
- * 209 N. Fair Oaks Ave
- * Sunnyvale, CA 94085
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
*/
#include "be.h"
@@ -26,7 +26,8 @@ struct be_ethtool_stat {
int offset;
};
-enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT};
+enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT,
+ PMEMSTAT, DRVSTAT};
#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
offsetof(_struct, field)
#define NETSTAT_INFO(field) #field, NETSTAT,\
@@ -43,6 +44,11 @@ enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT};
field)
#define ERXSTAT_INFO(field) #field, ERXSTAT,\
FIELDINFO(struct be_erx_stats, field)
+#define PMEMSTAT_INFO(field) #field, PMEMSTAT,\
+ FIELDINFO(struct be_pmem_stats, field)
+#define DRVSTAT_INFO(field) #field, DRVSTAT,\
+ FIELDINFO(struct be_drv_stats, \
+ field)
static const struct be_ethtool_stat et_stats[] = {
{NETSTAT_INFO(rx_packets)},
@@ -99,7 +105,11 @@ static const struct be_ethtool_stat et_stats[] = {
{MISCSTAT_INFO(rx_drops_too_many_frags)},
{MISCSTAT_INFO(rx_drops_invalid_ring)},
{MISCSTAT_INFO(forwarded_packets)},
- {MISCSTAT_INFO(rx_drops_mtu)}
+ {MISCSTAT_INFO(rx_drops_mtu)},
+ {MISCSTAT_INFO(port0_jabber_events)},
+ {MISCSTAT_INFO(port1_jabber_events)},
+ {PMEMSTAT_INFO(eth_red_drops)},
+ {DRVSTAT_INFO(be_on_die_temperature)}
};
#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
@@ -121,7 +131,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
"MAC Loopback test",
"PHY Loopback test",
"External Loopback test",
- "DDR DMA test"
+ "DDR DMA test",
"Link test"
};
@@ -276,6 +286,12 @@ be_get_ethtool_stats(struct net_device *netdev,
case MISCSTAT:
p = &hw_stats->rxf;
break;
+ case PMEMSTAT:
+ p = &hw_stats->pmem;
+ break;
+ case DRVSTAT:
+ p = &adapter->drv_stats;
+ break;
}
p = (u8 *)p + et_stats[i].offset;
@@ -376,8 +392,9 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
}
phy_cmd.size = sizeof(struct be_cmd_req_get_phy_info);
- phy_cmd.va = pci_alloc_consistent(adapter->pdev, phy_cmd.size,
- &phy_cmd.dma);
+ phy_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ phy_cmd.size, &phy_cmd.dma,
+ GFP_KERNEL);
if (!phy_cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
return -ENOMEM;
@@ -416,8 +433,8 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
adapter->port_type = ecmd->port;
adapter->transceiver = ecmd->transceiver;
adapter->autoneg = ecmd->autoneg;
- pci_free_consistent(adapter->pdev, phy_cmd.size,
- phy_cmd.va, phy_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, phy_cmd.size, phy_cmd.va,
+ phy_cmd.dma);
} else {
ecmd->speed = adapter->link_speed;
ecmd->port = adapter->port_type;
@@ -496,7 +513,7 @@ be_phys_id(struct net_device *netdev, u32 data)
int status;
u32 cur;
- be_cmd_get_beacon_state(adapter, adapter->port_num, &cur);
+ be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &cur);
if (cur == BEACON_STATE_ENABLED)
return 0;
@@ -504,23 +521,34 @@ be_phys_id(struct net_device *netdev, u32 data)
if (data < 2)
data = 2;
- status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0,
+ status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
BEACON_STATE_ENABLED);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(data*HZ);
- status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0,
+ status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
BEACON_STATE_DISABLED);
return status;
}
+static bool
+be_is_wol_supported(struct be_adapter *adapter)
+{
+ if (!be_physfn(adapter))
+ return false;
+ else
+ return true;
+}
+
static void
be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
- wol->supported = WAKE_MAGIC;
+ if (be_is_wol_supported(adapter))
+ wol->supported = WAKE_MAGIC;
+
if (adapter->wol)
wol->wolopts = WAKE_MAGIC;
else
@@ -536,7 +564,7 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
- if (wol->wolopts & WAKE_MAGIC)
+ if ((wol->wolopts & WAKE_MAGIC) && be_is_wol_supported(adapter))
adapter->wol = true;
else
adapter->wol = false;
@@ -549,11 +577,13 @@ be_test_ddr_dma(struct be_adapter *adapter)
{
int ret, i;
struct be_dma_mem ddrdma_cmd;
- u64 pattern[2] = {0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL};
+ static const u64 pattern[2] = {
+ 0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL
+ };
ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
- ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
- &ddrdma_cmd.dma);
+ ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
+ &ddrdma_cmd.dma, GFP_KERNEL);
if (!ddrdma_cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
return -ENOMEM;
@@ -567,20 +597,20 @@ be_test_ddr_dma(struct be_adapter *adapter)
}
err:
- pci_free_consistent(adapter->pdev, ddrdma_cmd.size,
- ddrdma_cmd.va, ddrdma_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
+ ddrdma_cmd.dma);
return ret;
}
static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
u64 *status)
{
- be_cmd_set_loopback(adapter, adapter->port_num,
+ be_cmd_set_loopback(adapter, adapter->hba_port_num,
loopback_type, 1);
- *status = be_cmd_loopback_test(adapter, adapter->port_num,
+ *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
loopback_type, 1500,
2, 0xabc);
- be_cmd_set_loopback(adapter, adapter->port_num,
+ be_cmd_set_loopback(adapter, adapter->hba_port_num,
BE_NO_LOOPBACK, 1);
return *status;
}
@@ -619,7 +649,8 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
&qos_link_speed) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
data[4] = -1;
- } else if (mac_speed) {
+ } else if (!mac_speed) {
+ test->flags |= ETH_TEST_FL_FAILED;
data[4] = 1;
}
}
@@ -660,8 +691,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
- eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size,
- &eeprom_cmd.dma);
+ eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
+ &eeprom_cmd.dma, GFP_KERNEL);
if (!eeprom_cmd.va) {
dev_err(&adapter->pdev->dev,
@@ -675,8 +706,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
}
- pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va,
- eeprom_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
+ eeprom_cmd.dma);
return status;
}
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index a2ec5df0d733..d4344a06090b 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2010 ServerEngines
+ * Copyright (C) 2005 - 2011 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -8,11 +8,11 @@
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
- * linux-drivers@serverengines.com
+ * linux-drivers@emulex.com
*
- * ServerEngines
- * 209 N. Fair Oaks Ave
- * Sunnyvale, CA 94085
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
*/
/********* Mailbox door bell *************/
@@ -32,16 +32,30 @@
#define MPU_EP_CONTROL 0
/********** MPU semphore ******************/
-#define MPU_EP_SEMAPHORE_OFFSET 0xac
-#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
-#define EP_SEMAPHORE_POST_ERR_MASK 0x1
-#define EP_SEMAPHORE_POST_ERR_SHIFT 31
+#define MPU_EP_SEMAPHORE_OFFSET 0xac
+#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400
+#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
+#define EP_SEMAPHORE_POST_ERR_MASK 0x1
+#define EP_SEMAPHORE_POST_ERR_SHIFT 31
+
/* MPU semphore POST stage values */
#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
+
+/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
+#define SLIPORT_STATUS_OFFSET 0x404
+#define SLIPORT_CONTROL_OFFSET 0x408
+
+#define SLIPORT_STATUS_ERR_MASK 0x80000000
+#define SLIPORT_STATUS_RN_MASK 0x01000000
+#define SLIPORT_STATUS_RDY_MASK 0x00800000
+
+
+#define SLI_PORT_CONTROL_IP_MASK 0x08000000
+
/********* Memory BAR register ************/
#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
@@ -66,6 +80,28 @@
#define PCICFG_UE_STATUS_LOW_MASK 0xA8
#define PCICFG_UE_STATUS_HI_MASK 0xAC
+/******** SLI_INTF ***********************/
+#define SLI_INTF_REG_OFFSET 0x58
+#define SLI_INTF_VALID_MASK 0xE0000000
+#define SLI_INTF_VALID 0xC0000000
+#define SLI_INTF_HINT2_MASK 0x1F000000
+#define SLI_INTF_HINT2_SHIFT 24
+#define SLI_INTF_HINT1_MASK 0x00FF0000
+#define SLI_INTF_HINT1_SHIFT 16
+#define SLI_INTF_FAMILY_MASK 0x00000F00
+#define SLI_INTF_FAMILY_SHIFT 8
+#define SLI_INTF_IF_TYPE_MASK 0x0000F000
+#define SLI_INTF_IF_TYPE_SHIFT 12
+#define SLI_INTF_REV_MASK 0x000000F0
+#define SLI_INTF_REV_SHIFT 4
+#define SLI_INTF_FT_MASK 0x00000001
+
+
+/* SLI family */
+#define BE_SLI_FAMILY 0x0
+#define LANCER_A0_SLI_FAMILY 0xA
+
+
/********* ISR0 Register offset **********/
#define CEV_ISR0_OFFSET 0xC18
#define CEV_ISR_SIZE 4
@@ -73,6 +109,9 @@
/********* Event Q door bell *************/
#define DB_EQ_OFFSET DB_CQ_OFFSET
#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
+#define DB_EQ_RING_ID_EXT_MASK 0x3e00 /* bits 9-13 */
+#define DB_EQ_RING_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 placing at 11-15 */
+
/* Clear the interrupt for this eq */
#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
/* Must be 1 */
@@ -85,6 +124,10 @@
/********* Compl Q door bell *************/
#define DB_CQ_OFFSET 0x120
#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
+#define DB_CQ_RING_ID_EXT_MASK 0x7C00 /* bits 10-14 */
+#define DB_CQ_RING_ID_EXT_MASK_SHIFT (1) /* qid bits 10-14
+ placing at 11-15 */
+
/* Number of event entries processed */
#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
/* Rearm bit */
@@ -258,10 +301,10 @@ struct be_eth_rx_d {
/* RX Compl Queue Descriptor */
-/* Pseudo amap definition for eth_rx_compl in which each bit of the
- * actual structure is defined as a byte: used to calculate
+/* Pseudo amap definition for BE2 and BE3 legacy mode eth_rx_compl in which
+ * each bit of the actual structure is defined as a byte: used to calculate
* offset/shift/mask of each field */
-struct amap_eth_rx_compl {
+struct amap_eth_rx_compl_v0 {
u8 vlan_tag[16]; /* dword 0 */
u8 pktsize[14]; /* dword 0 */
u8 port; /* dword 0 */
@@ -292,10 +335,92 @@ struct amap_eth_rx_compl {
u8 rsshash[32]; /* dword 3 */
} __packed;
+/* Pseudo amap definition for BE3 native mode eth_rx_compl in which
+ * each bit of the actual structure is defined as a byte: used to calculate
+ * offset/shift/mask of each field */
+struct amap_eth_rx_compl_v1 {
+ u8 vlan_tag[16]; /* dword 0 */
+ u8 pktsize[14]; /* dword 0 */
+ u8 vtp; /* dword 0 */
+ u8 ip_opt; /* dword 0 */
+ u8 err; /* dword 1 */
+ u8 rsshp; /* dword 1 */
+ u8 ipf; /* dword 1 */
+ u8 tcpf; /* dword 1 */
+ u8 udpf; /* dword 1 */
+ u8 ipcksm; /* dword 1 */
+ u8 l4_cksm; /* dword 1 */
+ u8 ip_version; /* dword 1 */
+ u8 macdst[7]; /* dword 1 */
+ u8 rsvd0; /* dword 1 */
+ u8 fragndx[10]; /* dword 1 */
+ u8 ct[2]; /* dword 1 */
+ u8 sw; /* dword 1 */
+ u8 numfrags[3]; /* dword 1 */
+ u8 rss_flush; /* dword 2 */
+ u8 cast_enc[2]; /* dword 2 */
+ u8 vtm; /* dword 2 */
+ u8 rss_bank; /* dword 2 */
+ u8 port[2]; /* dword 2 */
+ u8 vntagp; /* dword 2 */
+ u8 header_len[8]; /* dword 2 */
+ u8 header_split[2]; /* dword 2 */
+ u8 rsvd1[13]; /* dword 2 */
+ u8 valid; /* dword 2 */
+ u8 rsshash[32]; /* dword 3 */
+} __packed;
+
struct be_eth_rx_compl {
u32 dw[4];
};
+struct mgmt_hba_attribs {
+ u8 flashrom_version_string[32];
+ u8 manufacturer_name[32];
+ u32 supported_modes;
+ u32 rsvd0[3];
+ u8 ncsi_ver_string[12];
+ u32 default_extended_timeout;
+ u8 controller_model_number[32];
+ u8 controller_description[64];
+ u8 controller_serial_number[32];
+ u8 ip_version_string[32];
+ u8 firmware_version_string[32];
+ u8 bios_version_string[32];
+ u8 redboot_version_string[32];
+ u8 driver_version_string[32];
+ u8 fw_on_flash_version_string[32];
+ u32 functionalities_supported;
+ u16 max_cdblength;
+ u8 asic_revision;
+ u8 generational_guid[16];
+ u8 hba_port_count;
+ u16 default_link_down_timeout;
+ u8 iscsi_ver_min_max;
+ u8 multifunction_device;
+ u8 cache_valid;
+ u8 hba_status;
+ u8 max_domains_supported;
+ u8 phy_port;
+ u32 firmware_post_status;
+ u32 hba_mtu[8];
+ u32 rsvd1[4];
+};
+
+struct mgmt_controller_attrib {
+ struct mgmt_hba_attribs hba_attribs;
+ u16 pci_vendor_id;
+ u16 pci_device_id;
+ u16 pci_sub_vendor_id;
+ u16 pci_sub_system_id;
+ u8 pci_bus_number;
+ u8 pci_device_number;
+ u8 pci_function_number;
+ u8 interface_type;
+ u64 unique_identifier;
+ u32 rsvd0[5];
+};
+
struct controller_id {
u32 vendor;
u32 device;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 93354eee2cfd..a71163f1e34b 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2010 ServerEngines
+ * Copyright (C) 2005 - 2011 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -8,11 +8,11 @@
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
- * linux-drivers@serverengines.com
+ * linux-drivers@emulex.com
*
- * ServerEngines
- * 209 N. Fair Oaks Ave
- * Sunnyvale, CA 94085
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
*/
#include "be.h"
@@ -25,9 +25,9 @@ MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
MODULE_AUTHOR("ServerEngines Corporation");
MODULE_LICENSE("GPL");
-static unsigned int rx_frag_size = 2048;
+static ushort rx_frag_size = 2048;
static unsigned int num_vfs;
-module_param(rx_frag_size, uint, S_IRUGO);
+module_param(rx_frag_size, ushort, S_IRUGO);
module_param(num_vfs, uint, S_IRUGO);
MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
@@ -41,6 +41,7 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
+ { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -124,8 +125,8 @@ static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
{
struct be_dma_mem *mem = &q->dma_mem;
if (mem->va)
- pci_free_consistent(adapter->pdev, mem->size,
- mem->va, mem->dma);
+ dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+ mem->dma);
}
static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
@@ -137,7 +138,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
q->len = len;
q->entry_size = entry_size;
mem->size = len * entry_size;
- mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
+ mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
+ GFP_KERNEL);
if (!mem->va)
return -1;
memset(mem->va, 0, mem->size);
@@ -188,6 +190,8 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
{
u32 val = 0;
val |= qid & DB_EQ_RING_ID_MASK;
+ val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
+ DB_EQ_RING_ID_EXT_MASK_SHIFT);
if (adapter->eeh_err)
return;
@@ -205,6 +209,8 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
{
u32 val = 0;
val |= qid & DB_CQ_RING_ID_MASK;
+ val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
+ DB_CQ_RING_ID_EXT_MASK_SHIFT);
if (adapter->eeh_err)
return;
@@ -230,12 +236,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (!be_physfn(adapter))
goto netdev_addr;
- status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
+ status = be_cmd_pmac_del(adapter, adapter->if_handle,
+ adapter->pmac_id, 0);
if (status)
return status;
status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
- adapter->if_handle, &adapter->pmac_id);
+ adapter->if_handle, &adapter->pmac_id, 0);
netdev_addr:
if (!status)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -307,11 +314,9 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up)
if (adapter->link_up != link_up) {
adapter->link_speed = -1;
if (link_up) {
- netif_start_queue(netdev);
netif_carrier_on(netdev);
printk(KERN_INFO "%s: Link up\n", netdev->name);
} else {
- netif_stop_queue(netdev);
netif_carrier_off(netdev);
printk(KERN_INFO "%s: Link down\n", netdev->name);
}
@@ -404,7 +409,8 @@ static void be_tx_stats_update(struct be_adapter *adapter,
}
/* Determine number of WRB entries needed to xmit data in an skb */
-static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
+static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
+ bool *dummy)
{
int cnt = (skb->len > skb->data_len);
@@ -412,12 +418,13 @@ static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
/* to account for hdr wrb */
cnt++;
- if (cnt & 1) {
+ if (lancer_chip(adapter) || !(cnt & 1)) {
+ *dummy = false;
+ } else {
/* add a dummy to make it an even num */
cnt++;
*dummy = true;
- } else
- *dummy = false;
+ }
BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
return cnt;
}
@@ -443,8 +450,18 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
hdr, skb_shinfo(skb)->gso_size);
- if (skb_is_gso_v6(skb))
+ if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
+ if (lancer_chip(adapter) && adapter->sli_family ==
+ LANCER_A0_SLI_FAMILY) {
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
+ if (is_tcp_pkt(skb))
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb,
+ tcpcs, hdr, 1);
+ else if (is_udp_pkt(skb))
+ AMAP_SET_BITS(struct amap_eth_hdr_wrb,
+ udpcs, hdr, 1);
+ }
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (is_tcp_pkt(skb))
AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
@@ -469,7 +486,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
}
-static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
+static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
bool unmap_single)
{
dma_addr_t dma;
@@ -479,11 +496,10 @@ static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
if (wrb->frag_len) {
if (unmap_single)
- pci_unmap_single(pdev, dma, wrb->frag_len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(dev, dma, wrb->frag_len,
+ DMA_TO_DEVICE);
else
- pci_unmap_page(pdev, dma, wrb->frag_len,
- PCI_DMA_TODEVICE);
+ dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
}
}
@@ -492,7 +508,7 @@ static int make_tx_wrbs(struct be_adapter *adapter,
{
dma_addr_t busaddr;
int i, copied = 0;
- struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = &adapter->pdev->dev;
struct sk_buff *first_skb = skb;
struct be_queue_info *txq = &adapter->tx_obj.q;
struct be_eth_wrb *wrb;
@@ -506,9 +522,8 @@ static int make_tx_wrbs(struct be_adapter *adapter,
if (skb->len > skb->data_len) {
int len = skb_headlen(skb);
- busaddr = pci_map_single(pdev, skb->data, len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, busaddr))
+ busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, busaddr))
goto dma_err;
map_single = true;
wrb = queue_head_node(txq);
@@ -521,10 +536,9 @@ static int make_tx_wrbs(struct be_adapter *adapter,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *frag =
&skb_shinfo(skb)->frags[i];
- busaddr = pci_map_page(pdev, frag->page,
- frag->page_offset,
- frag->size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, busaddr))
+ busaddr = dma_map_page(dev, frag->page, frag->page_offset,
+ frag->size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, busaddr))
goto dma_err;
wrb = queue_head_node(txq);
wrb_fill(wrb, busaddr, frag->size);
@@ -548,7 +562,7 @@ dma_err:
txq->head = map_head;
while (copied) {
wrb = queue_head_node(txq);
- unmap_tx_frag(pdev, wrb, map_single);
+ unmap_tx_frag(dev, wrb, map_single);
map_single = false;
copied -= wrb->frag_len;
queue_head_inc(txq);
@@ -566,7 +580,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
u32 start = txq->head;
bool dummy_wrb, stopped = false;
- wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
+ wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
if (copied) {
@@ -728,11 +742,11 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
status = be_cmd_pmac_del(adapter,
adapter->vf_cfg[vf].vf_if_handle,
- adapter->vf_cfg[vf].vf_pmac_id);
+ adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
status = be_cmd_pmac_add(adapter, mac,
adapter->vf_cfg[vf].vf_if_handle,
- &adapter->vf_cfg[vf].vf_pmac_id);
+ &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
if (status)
dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
@@ -807,7 +821,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
rate = 10000;
adapter->vf_cfg[vf].vf_tx_rate = rate;
- status = be_cmd_set_qos(adapter, rate / 10, vf);
+ status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
if (status)
dev_info(&adapter->pdev->dev,
@@ -837,28 +851,26 @@ static void be_rx_rate_update(struct be_rx_obj *rxo)
}
static void be_rx_stats_update(struct be_rx_obj *rxo,
- u32 pktsize, u16 numfrags, u8 pkt_type)
+ struct be_rx_compl_info *rxcp)
{
struct be_rx_stats *stats = &rxo->stats;
stats->rx_compl++;
- stats->rx_frags += numfrags;
- stats->rx_bytes += pktsize;
+ stats->rx_frags += rxcp->num_rcvd;
+ stats->rx_bytes += rxcp->pkt_size;
stats->rx_pkts++;
- if (pkt_type == BE_MULTICAST_PACKET)
+ if (rxcp->pkt_type == BE_MULTICAST_PACKET)
stats->rx_mcast_pkts++;
+ if (rxcp->err)
+ stats->rxcp_err++;
}
-static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
+static inline bool csum_passed(struct be_rx_compl_info *rxcp)
{
- u8 l4_cksm, ipv6, ipcksm;
-
- l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
- ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
- ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
-
- /* Ignore ipcksm for ipv6 pkts */
- return l4_cksm && (ipcksm || ipv6);
+ /* L4 checksum is not reliable for non TCP/UDP packets.
+ * Also ignore ipcksm for ipv6 pkts */
+ return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
+ (rxcp->ip_csum || rxcp->ipv6);
}
static struct be_rx_page_info *
@@ -873,8 +885,9 @@ get_rx_page_info(struct be_adapter *adapter,
BUG_ON(!rx_page_info->page);
if (rx_page_info->last_page_user) {
- pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
- adapter->big_page_size, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&adapter->pdev->dev,
+ dma_unmap_addr(rx_page_info, bus),
+ adapter->big_page_size, DMA_FROM_DEVICE);
rx_page_info->last_page_user = false;
}
@@ -885,20 +898,17 @@ get_rx_page_info(struct be_adapter *adapter,
/* Throwaway the data in the Rx completion */
static void be_rx_compl_discard(struct be_adapter *adapter,
struct be_rx_obj *rxo,
- struct be_eth_rx_compl *rxcp)
+ struct be_rx_compl_info *rxcp)
{
struct be_queue_info *rxq = &rxo->q;
struct be_rx_page_info *page_info;
- u16 rxq_idx, i, num_rcvd;
-
- rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
- num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
+ u16 i, num_rcvd = rxcp->num_rcvd;
for (i = 0; i < num_rcvd; i++) {
- page_info = get_rx_page_info(adapter, rxo, rxq_idx);
+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
put_page(page_info->page);
memset(page_info, 0, sizeof(*page_info));
- index_inc(&rxq_idx, rxq->len);
+ index_inc(&rxcp->rxq_idx, rxq->len);
}
}
@@ -907,30 +917,23 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
* indicated by rxcp.
*/
static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
- struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
- u16 num_rcvd)
+ struct sk_buff *skb, struct be_rx_compl_info *rxcp)
{
struct be_queue_info *rxq = &rxo->q;
struct be_rx_page_info *page_info;
- u16 rxq_idx, i, j;
- u32 pktsize, hdr_len, curr_frag_len, size;
+ u16 i, j;
+ u16 hdr_len, curr_frag_len, remaining;
u8 *start;
- u8 pkt_type;
-
- rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
- pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
- pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
-
- page_info = get_rx_page_info(adapter, rxo, rxq_idx);
+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
start = page_address(page_info->page) + page_info->page_offset;
prefetch(start);
/* Copy data in the first descriptor of this completion */
- curr_frag_len = min(pktsize, rx_frag_size);
+ curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
/* Copy the header portion into skb_data */
- hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
+ hdr_len = min(BE_HDR_LEN, curr_frag_len);
memcpy(skb->data, start, hdr_len);
skb->len = curr_frag_len;
if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
@@ -949,19 +952,17 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
}
page_info->page = NULL;
- if (pktsize <= rx_frag_size) {
- BUG_ON(num_rcvd != 1);
- goto done;
+ if (rxcp->pkt_size <= rx_frag_size) {
+ BUG_ON(rxcp->num_rcvd != 1);
+ return;
}
/* More frags present for this completion */
- size = pktsize;
- for (i = 1, j = 0; i < num_rcvd; i++) {
- size -= curr_frag_len;
- index_inc(&rxq_idx, rxq->len);
- page_info = get_rx_page_info(adapter, rxo, rxq_idx);
-
- curr_frag_len = min(size, rx_frag_size);
+ index_inc(&rxcp->rxq_idx, rxq->len);
+ remaining = rxcp->pkt_size - curr_frag_len;
+ for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
+ curr_frag_len = min(remaining, rx_frag_size);
/* Coalesce all frags from the same physical page in one slot */
if (page_info->page_offset == 0) {
@@ -980,28 +981,19 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
skb->len += curr_frag_len;
skb->data_len += curr_frag_len;
+ remaining -= curr_frag_len;
+ index_inc(&rxcp->rxq_idx, rxq->len);
page_info->page = NULL;
}
BUG_ON(j > MAX_SKB_FRAGS);
-
-done:
- be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
}
/* Process the RX completion indicated by rxcp when GRO is disabled */
static void be_rx_compl_process(struct be_adapter *adapter,
struct be_rx_obj *rxo,
- struct be_eth_rx_compl *rxcp)
+ struct be_rx_compl_info *rxcp)
{
struct sk_buff *skb;
- u32 vlanf, vid;
- u16 num_rcvd;
- u8 vtm;
-
- num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
- /* Is it a flush compl that has no data */
- if (unlikely(num_rcvd == 0))
- return;
skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
if (unlikely(!skb)) {
@@ -1011,7 +1003,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
return;
}
- skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
+ skb_fill_rx_data(adapter, rxo, skb, rxcp);
if (likely(adapter->rx_csum && csum_passed(rxcp)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1021,22 +1013,12 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb->truesize = skb->len + sizeof(struct sk_buff);
skb->protocol = eth_type_trans(skb, adapter->netdev);
- vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
- vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
-
- /* vlanf could be wrongly set in some cards.
- * ignore if vtm is not set */
- if ((adapter->function_mode & 0x400) && !vtm)
- vlanf = 0;
-
- if (unlikely(vlanf)) {
+ if (unlikely(rxcp->vlanf)) {
if (!adapter->vlan_grp || adapter->vlans_added == 0) {
kfree_skb(skb);
return;
}
- vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
- vid = swab16(vid);
- vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
+ vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid);
} else {
netif_receive_skb(skb);
}
@@ -1045,32 +1027,14 @@ static void be_rx_compl_process(struct be_adapter *adapter,
/* Process the RX completion indicated by rxcp when GRO is enabled */
static void be_rx_compl_process_gro(struct be_adapter *adapter,
struct be_rx_obj *rxo,
- struct be_eth_rx_compl *rxcp)
+ struct be_rx_compl_info *rxcp)
{
struct be_rx_page_info *page_info;
struct sk_buff *skb = NULL;
struct be_queue_info *rxq = &rxo->q;
struct be_eq_obj *eq_obj = &rxo->rx_eq;
- u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
- u16 i, rxq_idx = 0, vid, j;
- u8 vtm;
- u8 pkt_type;
-
- num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
- /* Is it a flush compl that has no data */
- if (unlikely(num_rcvd == 0))
- return;
-
- pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
- vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
- rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
- vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
- pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
-
- /* vlanf could be wrongly set in some cards.
- * ignore if vtm is not set */
- if ((adapter->function_mode & 0x400) && !vtm)
- vlanf = 0;
+ u16 remaining, curr_frag_len;
+ u16 i, j;
skb = napi_get_frags(&eq_obj->napi);
if (!skb) {
@@ -1078,9 +1042,9 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
return;
}
- remaining = pkt_size;
- for (i = 0, j = -1; i < num_rcvd; i++) {
- page_info = get_rx_page_info(adapter, rxo, rxq_idx);
+ remaining = rxcp->pkt_size;
+ for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
curr_frag_len = min(remaining, rx_frag_size);
@@ -1098,69 +1062,125 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
skb_shinfo(skb)->frags[j].size += curr_frag_len;
remaining -= curr_frag_len;
- index_inc(&rxq_idx, rxq->len);
+ index_inc(&rxcp->rxq_idx, rxq->len);
memset(page_info, 0, sizeof(*page_info));
}
BUG_ON(j > MAX_SKB_FRAGS);
skb_shinfo(skb)->nr_frags = j + 1;
- skb->len = pkt_size;
- skb->data_len = pkt_size;
- skb->truesize += pkt_size;
+ skb->len = rxcp->pkt_size;
+ skb->data_len = rxcp->pkt_size;
+ skb->truesize += rxcp->pkt_size;
skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (likely(!vlanf)) {
+ if (likely(!rxcp->vlanf))
napi_gro_frags(&eq_obj->napi);
- } else {
- vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
- vid = swab16(vid);
+ else
+ vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid);
+}
+
+static void be_parse_rx_compl_v1(struct be_adapter *adapter,
+ struct be_eth_rx_compl *compl,
+ struct be_rx_compl_info *rxcp)
+{
+ rxcp->pkt_size =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
+ rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
+ rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
+ rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
+ rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
+ rxcp->ip_csum =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
+ rxcp->l4_csum =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
+ rxcp->ipv6 =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
+ rxcp->rxq_idx =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
+ rxcp->num_rcvd =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
+ rxcp->pkt_type =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
+ rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl);
+ rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, compl);
+}
+
+static void be_parse_rx_compl_v0(struct be_adapter *adapter,
+ struct be_eth_rx_compl *compl,
+ struct be_rx_compl_info *rxcp)
+{
+ rxcp->pkt_size =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
+ rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
+ rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
+ rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
+ rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
+ rxcp->ip_csum =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
+ rxcp->l4_csum =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
+ rxcp->ipv6 =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
+ rxcp->rxq_idx =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
+ rxcp->num_rcvd =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
+ rxcp->pkt_type =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
+ rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl);
+ rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, compl);
+}
+
+static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
+{
+ struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
+ struct be_rx_compl_info *rxcp = &rxo->rxcp;
+ struct be_adapter *adapter = rxo->adapter;
- if (!adapter->vlan_grp || adapter->vlans_added == 0)
- return;
+ /* For checking the valid bit it is Ok to use either definition as the
+ * valid bit is at the same position in both v0 and v1 Rx compl */
+ if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
+ return NULL;
- vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
- }
+ rmb();
+ be_dws_le_to_cpu(compl, sizeof(*compl));
- be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
-}
+ if (adapter->be3_native)
+ be_parse_rx_compl_v1(adapter, compl, rxcp);
+ else
+ be_parse_rx_compl_v0(adapter, compl, rxcp);
-static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
-{
- struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
+ /* vlanf could be wrongly set in some cards. ignore if vtm is not set */
+ if ((adapter->function_mode & 0x400) && !rxcp->vtm)
+ rxcp->vlanf = 0;
- if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
- return NULL;
+ if (!lancer_chip(adapter))
+ rxcp->vid = swab16(rxcp->vid);
- rmb();
- be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
+ if ((adapter->pvid == rxcp->vid) && !adapter->vlan_tag[rxcp->vid])
+ rxcp->vlanf = 0;
+
+ /* As the compl has been parsed, reset it; we wont touch it again */
+ compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
queue_tail_inc(&rxo->cq);
return rxcp;
}
-/* To reset the valid bit, we need to reset the whole word as
- * when walking the queue the valid entries are little-endian
- * and invalid entries are host endian
- */
-static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
+static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
{
- rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
-}
-
-static inline struct page *be_alloc_pages(u32 size)
-{
- gfp_t alloc_flags = GFP_ATOMIC;
u32 order = get_order(size);
+
if (order > 0)
- alloc_flags |= __GFP_COMP;
- return alloc_pages(alloc_flags, order);
+ gfp |= __GFP_COMP;
+ return alloc_pages(gfp, order);
}
/*
* Allocate a page, split it to fragments of size rx_frag_size and post as
* receive buffers to BE
*/
-static void be_post_rx_frags(struct be_rx_obj *rxo)
+static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
{
struct be_adapter *adapter = rxo->adapter;
struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
@@ -1174,14 +1194,14 @@ static void be_post_rx_frags(struct be_rx_obj *rxo)
page_info = &rxo->page_info_tbl[rxq->head];
for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
if (!pagep) {
- pagep = be_alloc_pages(adapter->big_page_size);
+ pagep = be_alloc_pages(adapter->big_page_size, gfp);
if (unlikely(!pagep)) {
rxo->stats.rx_post_fail++;
break;
}
- page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
- adapter->big_page_size,
- PCI_DMA_FROMDEVICE);
+ page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
+ 0, adapter->big_page_size,
+ DMA_FROM_DEVICE);
page_info->page_offset = 0;
} else {
get_page(pagep);
@@ -1254,8 +1274,8 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
do {
cur_index = txq->tail;
wrb = queue_tail_node(txq);
- unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
- skb_headlen(sent_skb)));
+ unmap_tx_frag(&adapter->pdev->dev, wrb,
+ (unmap_skb_hdr && skb_headlen(sent_skb)));
unmap_skb_hdr = false;
num_wrbs++;
@@ -1323,14 +1343,13 @@ static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
struct be_rx_page_info *page_info;
struct be_queue_info *rxq = &rxo->q;
struct be_queue_info *rx_cq = &rxo->cq;
- struct be_eth_rx_compl *rxcp;
+ struct be_rx_compl_info *rxcp;
u16 tail;
/* First cleanup pending rx completions */
while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
be_rx_compl_discard(adapter, rxo, rxcp);
- be_rx_compl_reset(rxcp);
- be_cq_notify(adapter, rx_cq->id, true, 1);
+ be_cq_notify(adapter, rx_cq->id, false, 1);
}
/* Then free posted rx buffer that were not used */
@@ -1381,7 +1400,8 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
sent_skb = sent_skbs[txq->tail];
end_idx = txq->tail;
index_adv(&end_idx,
- wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
+ wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
+ txq->len);
be_tx_compl_process(adapter, end_idx);
}
}
@@ -1476,7 +1496,9 @@ static int be_tx_queues_create(struct be_adapter *adapter)
/* Ask BE to create Tx Event queue */
if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
goto tx_eq_free;
- adapter->base_eq_id = adapter->tx_eq.q.id;
+
+ adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
+
/* Alloc TX eth compl queue */
cq = &adapter->tx_obj.cq;
@@ -1568,6 +1590,8 @@ static int be_rx_queues_create(struct be_adapter *adapter)
if (rc)
goto err;
+ rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
+
/* CQ */
cq = &rxo->cq;
rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
@@ -1578,7 +1602,6 @@ static int be_rx_queues_create(struct be_adapter *adapter)
rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
if (rc)
goto err;
-
/* Rx Q */
q = &rxo->q;
rc = be_queue_alloc(adapter, q, RX_Q_LEN,
@@ -1611,29 +1634,45 @@ err:
return -1;
}
-/* There are 8 evt ids per func. Retruns the evt id's bit number */
-static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
+static bool event_peek(struct be_eq_obj *eq_obj)
{
- return eq_id - adapter->base_eq_id;
+ struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
+ if (!eqe->evt)
+ return false;
+ else
+ return true;
}
static irqreturn_t be_intx(int irq, void *dev)
{
struct be_adapter *adapter = dev;
struct be_rx_obj *rxo;
- int isr, i;
+ int isr, i, tx = 0 , rx = 0;
+
+ if (lancer_chip(adapter)) {
+ if (event_peek(&adapter->tx_eq))
+ tx = event_handle(adapter, &adapter->tx_eq);
+ for_all_rx_queues(adapter, rxo, i) {
+ if (event_peek(&rxo->rx_eq))
+ rx |= event_handle(adapter, &rxo->rx_eq);
+ }
- isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
- (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
- if (!isr)
- return IRQ_NONE;
+ if (!(tx || rx))
+ return IRQ_NONE;
- if ((1 << be_evt_bit_get(adapter, adapter->tx_eq.q.id) & isr))
- event_handle(adapter, &adapter->tx_eq);
+ } else {
+ isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
+ (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
+ if (!isr)
+ return IRQ_NONE;
- for_all_rx_queues(adapter, rxo, i) {
- if ((1 << be_evt_bit_get(adapter, rxo->rx_eq.q.id) & isr))
- event_handle(adapter, &rxo->rx_eq);
+ if ((1 << adapter->tx_eq.msix_vec_idx & isr))
+ event_handle(adapter, &adapter->tx_eq);
+
+ for_all_rx_queues(adapter, rxo, i) {
+ if ((1 << rxo->rx_eq.msix_vec_idx & isr))
+ event_handle(adapter, &rxo->rx_eq);
+ }
}
return IRQ_HANDLED;
@@ -1658,16 +1697,9 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
return IRQ_HANDLED;
}
-static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
- struct be_eth_rx_compl *rxcp)
+static inline bool do_gro(struct be_rx_compl_info *rxcp)
{
- int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
- int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
-
- if (err)
- rxo->stats.rxcp_err++;
-
- return (tcp_frame && !err) ? true : false;
+ return (rxcp->tcpf && !rxcp->err) ? true : false;
}
static int be_poll_rx(struct napi_struct *napi, int budget)
@@ -1676,7 +1708,7 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
struct be_adapter *adapter = rxo->adapter;
struct be_queue_info *rx_cq = &rxo->cq;
- struct be_eth_rx_compl *rxcp;
+ struct be_rx_compl_info *rxcp;
u32 work_done;
rxo->stats.rx_polls++;
@@ -1685,17 +1717,19 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
if (!rxcp)
break;
- if (do_gro(adapter, rxo, rxcp))
- be_rx_compl_process_gro(adapter, rxo, rxcp);
- else
- be_rx_compl_process(adapter, rxo, rxcp);
-
- be_rx_compl_reset(rxcp);
+ /* Ignore flush completions */
+ if (rxcp->num_rcvd) {
+ if (do_gro(rxcp))
+ be_rx_compl_process_gro(adapter, rxo, rxcp);
+ else
+ be_rx_compl_process(adapter, rxo, rxcp);
+ }
+ be_rx_stats_update(rxo, rxcp);
}
/* Refill the queue */
if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
- be_post_rx_frags(rxo);
+ be_post_rx_frags(rxo, GFP_ATOMIC);
/* All consumed */
if (work_done < budget) {
@@ -1775,6 +1809,7 @@ void be_detect_dump_ue(struct be_adapter *adapter)
if (ue_status_lo || ue_status_hi) {
adapter->ue_detected = true;
+ adapter->eeh_err = true;
dev_err(&adapter->pdev->dev, "UE Detected!!\n");
}
@@ -1813,10 +1848,14 @@ static void be_worker(struct work_struct *work)
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
}
+
+ if (!adapter->ue_detected && !lancer_chip(adapter))
+ be_detect_dump_ue(adapter);
+
goto reschedule;
}
- if (!adapter->stats_ioctl_sent)
+ if (!adapter->stats_cmd_sent)
be_cmd_get_stats(adapter, &adapter->stats_cmd);
be_tx_rate_update(adapter);
@@ -1827,11 +1866,10 @@ static void be_worker(struct work_struct *work)
if (rxo->rx_post_starved) {
rxo->rx_post_starved = false;
- be_post_rx_frags(rxo);
+ be_post_rx_frags(rxo, GFP_KERNEL);
}
}
-
- if (!adapter->ue_detected)
+ if (!adapter->ue_detected && !lancer_chip(adapter))
be_detect_dump_ue(adapter);
reschedule:
@@ -1910,10 +1948,10 @@ static void be_sriov_disable(struct be_adapter *adapter)
#endif
}
-static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
+static inline int be_msix_vec_get(struct be_adapter *adapter,
+ struct be_eq_obj *eq_obj)
{
- return adapter->msix_entries[
- be_evt_bit_get(adapter, eq_id)].vector;
+ return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
}
static int be_request_irq(struct be_adapter *adapter,
@@ -1924,14 +1962,14 @@ static int be_request_irq(struct be_adapter *adapter,
int vec;
sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
- vec = be_msix_vec_get(adapter, eq_obj->q.id);
+ vec = be_msix_vec_get(adapter, eq_obj);
return request_irq(vec, handler, 0, eq_obj->desc, context);
}
static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
void *context)
{
- int vec = be_msix_vec_get(adapter, eq_obj->q.id);
+ int vec = be_msix_vec_get(adapter, eq_obj);
free_irq(vec, context);
}
@@ -2032,18 +2070,30 @@ static int be_close(struct net_device *netdev)
be_async_mcc_disable(adapter);
- netif_stop_queue(netdev);
netif_carrier_off(netdev);
adapter->link_up = false;
- be_intr_set(adapter, false);
+ if (!lancer_chip(adapter))
+ be_intr_set(adapter, false);
+
+ for_all_rx_queues(adapter, rxo, i)
+ napi_disable(&rxo->rx_eq.napi);
+
+ napi_disable(&tx_eq->napi);
+
+ if (lancer_chip(adapter)) {
+ be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
+ be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
+ for_all_rx_queues(adapter, rxo, i)
+ be_cq_notify(adapter, rxo->cq.id, false, 0);
+ }
if (adapter->msix_enabled) {
- vec = be_msix_vec_get(adapter, tx_eq->q.id);
+ vec = be_msix_vec_get(adapter, tx_eq);
synchronize_irq(vec);
for_all_rx_queues(adapter, rxo, i) {
- vec = be_msix_vec_get(adapter, rxo->rx_eq.q.id);
+ vec = be_msix_vec_get(adapter, &rxo->rx_eq);
synchronize_irq(vec);
}
} else {
@@ -2051,11 +2101,6 @@ static int be_close(struct net_device *netdev)
}
be_irq_unregister(adapter);
- for_all_rx_queues(adapter, rxo, i)
- napi_disable(&rxo->rx_eq.napi);
-
- napi_disable(&tx_eq->napi);
-
/* Wait for all pending tx completions to arrive so that
* all tx skbs are freed.
*/
@@ -2075,14 +2120,15 @@ static int be_open(struct net_device *netdev)
u16 link_speed;
for_all_rx_queues(adapter, rxo, i) {
- be_post_rx_frags(rxo);
+ be_post_rx_frags(rxo, GFP_KERNEL);
napi_enable(&rxo->rx_eq.napi);
}
napi_enable(&tx_eq->napi);
be_irq_register(adapter);
- be_intr_set(adapter, true);
+ if (!lancer_chip(adapter))
+ be_intr_set(adapter, true);
/* The evt queues are created in unarmed state; arm them */
for_all_rx_queues(adapter, rxo, i) {
@@ -2126,7 +2172,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
memset(mac, 0, ETH_ALEN);
cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_KERNEL);
if (cmd.va == NULL)
return -1;
memset(cmd.va, 0, cmd.size);
@@ -2137,8 +2184,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
if (status) {
dev_err(&adapter->pdev->dev,
"Could not enable Wake-on-lan\n");
- pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
- cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+ cmd.dma);
return status;
}
status = be_cmd_enable_magic_wol(adapter,
@@ -2151,7 +2198,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
}
- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
return status;
}
@@ -2172,7 +2219,8 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
for (vf = 0; vf < num_vfs; vf++) {
status = be_cmd_pmac_add(adapter, mac,
adapter->vf_cfg[vf].vf_if_handle,
- &adapter->vf_cfg[vf].vf_pmac_id);
+ &adapter->vf_cfg[vf].vf_pmac_id,
+ vf + 1);
if (status)
dev_err(&adapter->pdev->dev,
"Mac address add failed for VF %d\n", vf);
@@ -2192,7 +2240,7 @@ static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
be_cmd_pmac_del(adapter,
adapter->vf_cfg[vf].vf_if_handle,
- adapter->vf_cfg[vf].vf_pmac_id);
+ adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
}
}
@@ -2203,7 +2251,9 @@ static int be_setup(struct be_adapter *adapter)
int status;
u8 mac[ETH_ALEN];
- cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST;
if (be_physfn(adapter)) {
cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
@@ -2224,22 +2274,26 @@ static int be_setup(struct be_adapter *adapter)
goto do_none;
if (be_physfn(adapter)) {
- while (vf < num_vfs) {
- cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
- | BE_IF_FLAGS_BROADCAST;
- status = be_cmd_if_create(adapter, cap_flags, en_flags,
- mac, true,
+ if (adapter->sriov_enabled) {
+ while (vf < num_vfs) {
+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST;
+ status = be_cmd_if_create(adapter, cap_flags,
+ en_flags, mac, true,
&adapter->vf_cfg[vf].vf_if_handle,
NULL, vf+1);
- if (status) {
- dev_err(&adapter->pdev->dev,
- "Interface Create failed for VF %d\n", vf);
- goto if_destroy;
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Interface Create failed for VF %d\n",
+ vf);
+ goto if_destroy;
+ }
+ adapter->vf_cfg[vf].vf_pmac_id =
+ BE_INVALID_PMAC_ID;
+ vf++;
}
- adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
- vf++;
}
- } else if (!be_physfn(adapter)) {
+ } else {
status = be_cmd_mac_addr_query(adapter, mac,
MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
if (!status) {
@@ -2260,44 +2314,46 @@ static int be_setup(struct be_adapter *adapter)
if (status != 0)
goto rx_qs_destroy;
- if (be_physfn(adapter)) {
- status = be_vf_eth_addr_config(adapter);
- if (status)
- goto mcc_q_destroy;
- }
-
adapter->link_speed = -1;
return 0;
-mcc_q_destroy:
- if (be_physfn(adapter))
- be_vf_eth_addr_rem(adapter);
be_mcc_queues_destroy(adapter);
rx_qs_destroy:
be_rx_queues_destroy(adapter);
tx_qs_destroy:
be_tx_queues_destroy(adapter);
if_destroy:
- for (vf = 0; vf < num_vfs; vf++)
- if (adapter->vf_cfg[vf].vf_if_handle)
- be_cmd_if_destroy(adapter,
- adapter->vf_cfg[vf].vf_if_handle);
- be_cmd_if_destroy(adapter, adapter->if_handle);
+ if (be_physfn(adapter) && adapter->sriov_enabled)
+ for (vf = 0; vf < num_vfs; vf++)
+ if (adapter->vf_cfg[vf].vf_if_handle)
+ be_cmd_if_destroy(adapter,
+ adapter->vf_cfg[vf].vf_if_handle,
+ vf + 1);
+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
do_none:
return status;
}
static int be_clear(struct be_adapter *adapter)
{
- if (be_physfn(adapter))
+ int vf;
+
+ if (be_physfn(adapter) && adapter->sriov_enabled)
be_vf_eth_addr_rem(adapter);
be_mcc_queues_destroy(adapter);
be_rx_queues_destroy(adapter);
be_tx_queues_destroy(adapter);
- be_cmd_if_destroy(adapter, adapter->if_handle);
+ if (be_physfn(adapter) && adapter->sriov_enabled)
+ for (vf = 0; vf < num_vfs; vf++)
+ if (adapter->vf_cfg[vf].vf_if_handle)
+ be_cmd_if_destroy(adapter,
+ adapter->vf_cfg[vf].vf_if_handle,
+ vf + 1);
+
+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
/* tell fw we're done with firing cmds */
be_cmd_fw_clean(adapter);
@@ -2343,10 +2399,10 @@ static int be_flash_data(struct be_adapter *adapter,
int num_bytes;
const u8 *p = fw->data;
struct be_cmd_write_flashrom *req = flash_cmd->va;
- struct flash_comp *pflashcomp;
+ const struct flash_comp *pflashcomp;
int num_comp;
- struct flash_comp gen3_flash_types[9] = {
+ static const struct flash_comp gen3_flash_types[9] = {
{ FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
FLASH_IMAGE_MAX_SIZE_g3},
{ FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
@@ -2366,7 +2422,7 @@ static int be_flash_data(struct be_adapter *adapter,
{ FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
FLASH_NCSI_IMAGE_MAX_SIZE_g3}
};
- struct flash_comp gen2_flash_types[8] = {
+ static const struct flash_comp gen2_flash_types[8] = {
{ FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
FLASH_IMAGE_MAX_SIZE_g2},
{ FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
@@ -2388,11 +2444,11 @@ static int be_flash_data(struct be_adapter *adapter,
if (adapter->generation == BE_GEN3) {
pflashcomp = gen3_flash_types;
filehdr_size = sizeof(struct flash_file_hdr_g3);
- num_comp = 9;
+ num_comp = ARRAY_SIZE(gen3_flash_types);
} else {
pflashcomp = gen2_flash_types;
filehdr_size = sizeof(struct flash_file_hdr_g2);
- num_comp = 8;
+ num_comp = ARRAY_SIZE(gen2_flash_types);
}
for (i = 0; i < num_comp; i++) {
if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
@@ -2400,8 +2456,8 @@ static int be_flash_data(struct be_adapter *adapter,
continue;
if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
(!be_flash_redboot(adapter, fw->data,
- pflashcomp[i].offset, pflashcomp[i].size,
- filehdr_size)))
+ pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
+ (num_of_images * sizeof(struct image_hdr)))))
continue;
p = fw->data;
p += filehdr_size + pflashcomp[i].offset
@@ -2475,8 +2531,8 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
- flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
- &flash_cmd.dma);
+ flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
+ &flash_cmd.dma, GFP_KERNEL);
if (!flash_cmd.va) {
status = -ENOMEM;
dev_err(&adapter->pdev->dev,
@@ -2505,8 +2561,8 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
status = -1;
}
- pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
- flash_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
+ flash_cmd.dma);
if (status) {
dev_err(&adapter->pdev->dev, "Firmware load error\n");
goto fw_exit;
@@ -2543,10 +2599,15 @@ static void be_netdev_init(struct net_device *netdev)
int i;
netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_GRO | NETIF_F_TSO6;
- netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
+ netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+ if (lancer_chip(adapter))
+ netdev->vlan_features |= NETIF_F_TSO6;
netdev->flags |= IFF_MULTICAST;
@@ -2568,8 +2629,6 @@ static void be_netdev_init(struct net_device *netdev)
netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
BE_NAPI_WEIGHT);
-
- netif_stop_queue(netdev);
}
static void be_unmap_pci_bars(struct be_adapter *adapter)
@@ -2587,6 +2646,15 @@ static int be_map_pci_bars(struct be_adapter *adapter)
u8 __iomem *addr;
int pcicfg_reg, db_reg;
+ if (lancer_chip(adapter)) {
+ addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
+ pci_resource_len(adapter->pdev, 0));
+ if (addr == NULL)
+ return -ENOMEM;
+ adapter->db = addr;
+ return 0;
+ }
+
if (be_physfn(adapter)) {
addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
pci_resource_len(adapter->pdev, 2));
@@ -2635,13 +2703,13 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
be_unmap_pci_bars(adapter);
if (mem->va)
- pci_free_consistent(adapter->pdev, mem->size,
- mem->va, mem->dma);
+ dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+ mem->dma);
mem = &adapter->mc_cmd_mem;
if (mem->va)
- pci_free_consistent(adapter->pdev, mem->size,
- mem->va, mem->dma);
+ dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+ mem->dma);
}
static int be_ctrl_init(struct be_adapter *adapter)
@@ -2656,8 +2724,10 @@ static int be_ctrl_init(struct be_adapter *adapter)
goto done;
mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
- mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
- mbox_mem_alloc->size, &mbox_mem_alloc->dma);
+ mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
+ mbox_mem_alloc->size,
+ &mbox_mem_alloc->dma,
+ GFP_KERNEL);
if (!mbox_mem_alloc->va) {
status = -ENOMEM;
goto unmap_pci_bars;
@@ -2669,15 +2739,16 @@ static int be_ctrl_init(struct be_adapter *adapter)
memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
- mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
- &mc_cmd_mem->dma);
+ mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
+ mc_cmd_mem->size, &mc_cmd_mem->dma,
+ GFP_KERNEL);
if (mc_cmd_mem->va == NULL) {
status = -ENOMEM;
goto free_mbox;
}
memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
- spin_lock_init(&adapter->mbox_lock);
+ mutex_init(&adapter->mbox_lock);
spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
@@ -2686,8 +2757,8 @@ static int be_ctrl_init(struct be_adapter *adapter)
return 0;
free_mbox:
- pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
- mbox_mem_alloc->va, mbox_mem_alloc->dma);
+ dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
+ mbox_mem_alloc->va, mbox_mem_alloc->dma);
unmap_pci_bars:
be_unmap_pci_bars(adapter);
@@ -2701,8 +2772,8 @@ static void be_stats_cleanup(struct be_adapter *adapter)
struct be_dma_mem *cmd = &adapter->stats_cmd;
if (cmd->va)
- pci_free_consistent(adapter->pdev, cmd->size,
- cmd->va, cmd->dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd->size,
+ cmd->va, cmd->dma);
}
static int be_stats_init(struct be_adapter *adapter)
@@ -2710,7 +2781,8 @@ static int be_stats_init(struct be_adapter *adapter)
struct be_dma_mem *cmd = &adapter->stats_cmd;
cmd->size = sizeof(struct be_cmd_req_get_stats);
- cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
+ cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
+ GFP_KERNEL);
if (cmd->va == NULL)
return -1;
memset(cmd->va, 0, cmd->size);
@@ -2780,9 +2852,100 @@ static int be_get_config(struct be_adapter *adapter)
else
adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
+ status = be_cmd_get_cntl_attributes(adapter);
+ if (status)
+ return status;
+
+ be_cmd_check_native_mode(adapter);
return 0;
}
+static int be_dev_family_check(struct be_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ u32 sli_intf = 0, if_type;
+
+ switch (pdev->device) {
+ case BE_DEVICE_ID1:
+ case OC_DEVICE_ID1:
+ adapter->generation = BE_GEN2;
+ break;
+ case BE_DEVICE_ID2:
+ case OC_DEVICE_ID2:
+ adapter->generation = BE_GEN3;
+ break;
+ case OC_DEVICE_ID3:
+ pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
+ if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
+ SLI_INTF_IF_TYPE_SHIFT;
+
+ if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
+ if_type != 0x02) {
+ dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
+ return -EINVAL;
+ }
+ if (num_vfs > 0) {
+ dev_err(&pdev->dev, "VFs not supported\n");
+ return -EINVAL;
+ }
+ adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
+ SLI_INTF_FAMILY_SHIFT);
+ adapter->generation = BE_GEN3;
+ break;
+ default:
+ adapter->generation = 0;
+ }
+ return 0;
+}
+
+static int lancer_wait_ready(struct be_adapter *adapter)
+{
+#define SLIPORT_READY_TIMEOUT 500
+ u32 sliport_status;
+ int status = 0, i;
+
+ for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ if (sliport_status & SLIPORT_STATUS_RDY_MASK)
+ break;
+
+ msleep(20);
+ }
+
+ if (i == SLIPORT_READY_TIMEOUT)
+ status = -1;
+
+ return status;
+}
+
+static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
+{
+ int status;
+ u32 sliport_status, err, reset_needed;
+ status = lancer_wait_ready(adapter);
+ if (!status) {
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ err = sliport_status & SLIPORT_STATUS_ERR_MASK;
+ reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
+ if (err && reset_needed) {
+ iowrite32(SLI_PORT_CONTROL_IP_MASK,
+ adapter->db + SLIPORT_CONTROL_OFFSET);
+
+ /* check adapter has corrected the error */
+ status = lancer_wait_ready(adapter);
+ sliport_status = ioread32(adapter->db +
+ SLIPORT_STATUS_OFFSET);
+ sliport_status &= (SLIPORT_STATUS_ERR_MASK |
+ SLIPORT_STATUS_RN_MASK);
+ if (status || sliport_status)
+ status = -1;
+ } else if (err || reset_needed) {
+ status = -1;
+ }
+ }
+ return status;
+}
+
static int __devinit be_probe(struct pci_dev *pdev,
const struct pci_device_id *pdev_id)
{
@@ -2805,30 +2968,21 @@ static int __devinit be_probe(struct pci_dev *pdev,
goto rel_reg;
}
adapter = netdev_priv(netdev);
-
- switch (pdev->device) {
- case BE_DEVICE_ID1:
- case OC_DEVICE_ID1:
- adapter->generation = BE_GEN2;
- break;
- case BE_DEVICE_ID2:
- case OC_DEVICE_ID2:
- adapter->generation = BE_GEN3;
- break;
- default:
- adapter->generation = 0;
- }
-
adapter->pdev = pdev;
pci_set_drvdata(pdev, adapter);
+
+ status = be_dev_family_check(adapter);
+ if (status)
+ goto free_netdev;
+
adapter->netdev = netdev;
SET_NETDEV_DEV(netdev, &pdev->dev);
- status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!status) {
netdev->features |= NETIF_F_HIGHDMA;
} else {
- status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (status) {
dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
goto free_netdev;
@@ -2841,6 +2995,14 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status)
goto free_netdev;
+ if (lancer_chip(adapter)) {
+ status = lancer_test_and_set_rdy_state(adapter);
+ if (status) {
+ dev_err(&pdev->dev, "Adapter in non recoverable error\n");
+ goto free_netdev;
+ }
+ }
+
/* sync up with fw's ready state */
if (be_physfn(adapter)) {
status = be_cmd_POST(adapter);
@@ -2853,11 +3015,9 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status)
goto ctrl_clean;
- if (be_physfn(adapter)) {
- status = be_cmd_reset_function(adapter);
- if (status)
- goto ctrl_clean;
- }
+ status = be_cmd_reset_function(adapter);
+ if (status)
+ goto ctrl_clean;
status = be_stats_init(adapter);
if (status)
@@ -2881,10 +3041,18 @@ static int __devinit be_probe(struct pci_dev *pdev,
goto unsetup;
netif_carrier_off(netdev);
+ if (be_physfn(adapter) && adapter->sriov_enabled) {
+ status = be_vf_eth_addr_config(adapter);
+ if (status)
+ goto unreg_netdev;
+ }
+
dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
+unreg_netdev:
+ unregister_netdev(netdev);
unsetup:
be_clear(adapter);
msix_disable:
@@ -2895,7 +3063,7 @@ ctrl_clean:
be_ctrl_cleanup(adapter);
free_netdev:
be_sriov_disable(adapter);
- free_netdev(adapter->netdev);
+ free_netdev(netdev);
pci_set_drvdata(pdev, NULL);
rel_reg:
pci_release_regions(pdev);
@@ -2911,6 +3079,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
struct be_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
+ cancel_delayed_work_sync(&adapter->work);
if (adapter->wol)
be_setup_wol(adapter, true);
@@ -2923,6 +3092,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
be_clear(adapter);
+ be_msix_disable(adapter);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -2944,6 +3114,7 @@ static int be_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, 0);
pci_restore_state(pdev);
+ be_msix_enable(adapter);
/* tell fw we're ready to fire cmds */
status = be_cmd_fw_init(adapter);
if (status)
@@ -2959,6 +3130,8 @@ static int be_resume(struct pci_dev *pdev)
if (adapter->wol)
be_setup_wol(adapter, false);
+
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
}
@@ -2970,6 +3143,9 @@ static void be_shutdown(struct pci_dev *pdev)
struct be_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
+ if (netif_running(netdev))
+ cancel_delayed_work_sync(&adapter->work);
+
netif_device_detach(netdev);
be_cmd_reset_function(adapter);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index ce1e5e9d06f6..22abfb39d813 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -8,6 +8,11 @@
* Licensed under the GPL-2 or later.
*/
+#define DRV_VERSION "1.1"
+#define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -41,12 +46,7 @@
#include "bfin_mac.h"
-#define DRV_NAME "bfin_mac"
-#define DRV_VERSION "1.1"
-#define DRV_AUTHOR "Bryan Wu, Luke Yang"
-#define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
-
-MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_AUTHOR("Bryan Wu, Luke Yang");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(DRV_DESC);
MODULE_ALIAS("platform:bfin_mac");
@@ -189,8 +189,7 @@ static int desc_list_init(void)
/* allocate a new skb for next time receive */
new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) {
- printk(KERN_NOTICE DRV_NAME
- ": init: low on mem - packet dropped\n");
+ pr_notice("init: low on mem - packet dropped\n");
goto init_error;
}
skb_reserve(new_skb, NET_IP_ALIGN);
@@ -240,7 +239,7 @@ static int desc_list_init(void)
init_error:
desc_list_free();
- printk(KERN_ERR DRV_NAME ": kmalloc failed\n");
+ pr_err("kmalloc failed\n");
return -ENOMEM;
}
@@ -259,8 +258,7 @@ static int bfin_mdio_poll(void)
while ((bfin_read_EMAC_STAADD()) & STABUSY) {
udelay(1);
if (timeout_cnt-- < 0) {
- printk(KERN_ERR DRV_NAME
- ": wait MDC/MDIO transaction to complete timeout\n");
+ pr_err("wait MDC/MDIO transaction to complete timeout\n");
return -ETIMEDOUT;
}
}
@@ -350,9 +348,9 @@ static void bfin_mac_adjust_link(struct net_device *dev)
opmode &= ~RMII_10;
break;
default:
- printk(KERN_WARNING
- "%s: Ack! Speed (%d) is not 10/100!\n",
- DRV_NAME, phydev->speed);
+ netdev_warn(dev,
+ "Ack! Speed (%d) is not 10/100!\n",
+ phydev->speed);
break;
}
bfin_write_EMAC_OPMODE(opmode);
@@ -417,14 +415,13 @@ static int mii_probe(struct net_device *dev, int phy_mode)
/* now we are supposed to have a proper phydev, to attach to... */
if (!phydev) {
- printk(KERN_INFO "%s: Don't found any phy device at all\n",
- dev->name);
+ netdev_err(dev, "no phy device found\n");
return -ENODEV;
}
if (phy_mode != PHY_INTERFACE_MODE_RMII &&
phy_mode != PHY_INTERFACE_MODE_MII) {
- printk(KERN_INFO "%s: Invalid phy interface mode\n", dev->name);
+ netdev_err(dev, "invalid phy interface mode\n");
return -EINVAL;
}
@@ -432,7 +429,7 @@ static int mii_probe(struct net_device *dev, int phy_mode)
0, phy_mode);
if (IS_ERR(phydev)) {
- printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ netdev_err(dev, "could not attach PHY\n");
return PTR_ERR(phydev);
}
@@ -453,11 +450,10 @@ static int mii_probe(struct net_device *dev, int phy_mode)
lp->old_duplex = -1;
lp->phydev = phydev;
- printk(KERN_INFO "%s: attached PHY driver [%s] "
- "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)"
- "@sclk=%dMHz)\n",
- DRV_NAME, phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
- MDC_CLK, mdc_div, sclk/1000000);
+ pr_info("attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n",
+ phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
+ MDC_CLK, mdc_div, sclk/1000000);
return 0;
}
@@ -502,7 +498,7 @@ bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
+ strcpy(info->driver, KBUILD_MODNAME);
strcpy(info->version, DRV_VERSION);
strcpy(info->fw_version, "N/A");
strcpy(info->bus_info, dev_name(&dev->dev));
@@ -562,7 +558,7 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = {
};
/**************************************************************************/
-void setup_system_regs(struct net_device *dev)
+static void setup_system_regs(struct net_device *dev)
{
struct bfin_mac_local *lp = netdev_priv(dev);
int i;
@@ -592,6 +588,10 @@ void setup_system_regs(struct net_device *dev)
bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
+ /* Set vlan regs to let 1522 bytes long packets pass through */
+ bfin_write_EMAC_VLAN1(lp->vlan1_mask);
+ bfin_write_EMAC_VLAN2(lp->vlan2_mask);
+
/* Initialize the TX DMA channel registers */
bfin_write_DMA2_X_COUNT(0);
bfin_write_DMA2_X_MODIFY(4);
@@ -827,8 +827,7 @@ static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
udelay(1);
if (timeout_cnt == 0)
- printk(KERN_ERR DRV_NAME
- ": fails to timestamp the TX packet\n");
+ netdev_err(netdev, "timestamp the TX packet failed\n");
else {
struct skb_shared_hwtstamps shhwtstamps;
u64 ns;
@@ -1083,8 +1082,7 @@ static void bfin_mac_rx(struct net_device *dev)
* we which case we simply drop the packet
*/
if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
- printk(KERN_NOTICE DRV_NAME
- ": rx: receive error - packet dropped\n");
+ netdev_notice(dev, "rx: receive error - packet dropped\n");
dev->stats.rx_dropped++;
goto out;
}
@@ -1094,8 +1092,7 @@ static void bfin_mac_rx(struct net_device *dev)
new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) {
- printk(KERN_NOTICE DRV_NAME
- ": rx: low on mem - packet dropped\n");
+ netdev_notice(dev, "rx: low on mem - packet dropped\n");
dev->stats.rx_dropped++;
goto out;
}
@@ -1213,7 +1210,7 @@ static int bfin_mac_enable(struct phy_device *phydev)
int ret;
u32 opmode;
- pr_debug("%s: %s\n", DRV_NAME, __func__);
+ pr_debug("%s\n", __func__);
/* Set RX DMA */
bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
@@ -1287,19 +1284,12 @@ static void bfin_mac_multicast_hash(struct net_device *dev)
{
u32 emac_hashhi, emac_hashlo;
struct netdev_hw_addr *ha;
- char *addrs;
u32 crc;
emac_hashhi = emac_hashlo = 0;
netdev_for_each_mc_addr(ha, dev) {
- addrs = ha->addr;
-
- /* skip non-multicast addresses */
- if (!(*addrs & 1))
- continue;
-
- crc = ether_crc(ETH_ALEN, addrs);
+ crc = ether_crc(ETH_ALEN, ha->addr);
crc >>= 26;
if (crc & 0x20)
@@ -1323,7 +1313,7 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
u32 sysctl;
if (dev->flags & IFF_PROMISC) {
- printk(KERN_INFO "%s: set to promisc mode\n", dev->name);
+ netdev_info(dev, "set promisc mode\n");
sysctl = bfin_read_EMAC_OPMODE();
sysctl |= PR;
bfin_write_EMAC_OPMODE(sysctl);
@@ -1393,7 +1383,7 @@ static int bfin_mac_open(struct net_device *dev)
* address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
*/
if (!is_valid_ether_addr(dev->dev_addr)) {
- printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n");
+ netdev_warn(dev, "no valid ethernet hw addr\n");
return -EINVAL;
}
@@ -1527,6 +1517,9 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
goto out_err_mii_probe;
}
+ lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask;
+ lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask;
+
/* Fill in the fields of the device structure with ethernet values. */
ether_setup(ndev);
@@ -1558,7 +1551,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
bfin_mac_hwtstamp_init(ndev);
/* now, print out the card info, in a short format.. */
- dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
+ netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
return 0;
@@ -1650,7 +1643,7 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
* so set the GPIO pins to Ethernet mode
*/
pin_req = mii_bus_pd->mac_peripherals;
- rc = peripheral_request_list(pin_req, DRV_NAME);
+ rc = peripheral_request_list(pin_req, KBUILD_MODNAME);
if (rc) {
dev_err(&pdev->dev, "Requesting peripherals failed!\n");
return rc;
@@ -1739,7 +1732,7 @@ static struct platform_driver bfin_mac_driver = {
.resume = bfin_mac_resume,
.suspend = bfin_mac_suspend,
.driver = {
- .name = DRV_NAME,
+ .name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
};
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index aed68bed2365..f8559ac9a403 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -17,7 +17,14 @@
#include <linux/etherdevice.h>
#include <linux/bfin_mac.h>
+/*
+ * Disable hardware checksum for bug #5600 if writeback cache is
+ * enabled. Otherwize, corrupted RX packet will be sent up stack
+ * without error mark.
+ */
+#ifndef CONFIG_BFIN_EXTMEM_WRITEBACK
#define BFIN_MAC_CSUM_OFFLOAD
+#endif
#define TX_RECLAIM_JIFFIES (HZ / 5)
@@ -68,7 +75,6 @@ struct bfin_mac_local {
*/
struct net_device_stats stats;
- unsigned char Mac[6]; /* MAC address of the board */
spinlock_t lock;
int wol; /* Wake On Lan */
@@ -76,6 +82,9 @@ struct bfin_mac_local {
struct timer_list tx_reclaim_timer;
struct net_device *ndev;
+ /* Data for EMAC_VLAN1 regs */
+ u16 vlan1_mask, vlan2_mask;
+
/* MII and PHY stuffs */
int old_link; /* used by bf537_adjust_link */
int old_speed;
diff --git a/drivers/net/bna/bfa_defs.h b/drivers/net/bna/bfa_defs.h
index 29c1b8de2c2d..2ea0dfe1cedc 100644
--- a/drivers/net/bna/bfa_defs.h
+++ b/drivers/net/bna/bfa_defs.h
@@ -112,16 +112,18 @@ struct bfa_ioc_pci_attr {
* IOC states
*/
enum bfa_ioc_state {
- BFA_IOC_RESET = 1, /*!< IOC is in reset state */
- BFA_IOC_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
- BFA_IOC_HWINIT = 3, /*!< IOC h/w is being initialized */
- BFA_IOC_GETATTR = 4, /*!< IOC is being configured */
- BFA_IOC_OPERATIONAL = 5, /*!< IOC is operational */
- BFA_IOC_INITFAIL = 6, /*!< IOC hardware failure */
- BFA_IOC_HBFAIL = 7, /*!< IOC heart-beat failure */
- BFA_IOC_DISABLING = 8, /*!< IOC is being disabled */
- BFA_IOC_DISABLED = 9, /*!< IOC is disabled */
- BFA_IOC_FWMISMATCH = 10, /*!< IOC f/w different from drivers */
+ BFA_IOC_UNINIT = 1, /*!< IOC is in uninit state */
+ BFA_IOC_RESET = 2, /*!< IOC is in reset state */
+ BFA_IOC_SEMWAIT = 3, /*!< Waiting for IOC h/w semaphore */
+ BFA_IOC_HWINIT = 4, /*!< IOC h/w is being initialized */
+ BFA_IOC_GETATTR = 5, /*!< IOC is being configured */
+ BFA_IOC_OPERATIONAL = 6, /*!< IOC is operational */
+ BFA_IOC_INITFAIL = 7, /*!< IOC hardware failure */
+ BFA_IOC_FAIL = 8, /*!< IOC heart-beat failure */
+ BFA_IOC_DISABLING = 9, /*!< IOC is being disabled */
+ BFA_IOC_DISABLED = 10, /*!< IOC is disabled */
+ BFA_IOC_FWMISMATCH = 11, /*!< IOC f/w different from drivers */
+ BFA_IOC_ENABLING = 12, /*!< IOC is being enabled */
};
/**
diff --git a/drivers/net/bna/bfa_defs_mfg_comm.h b/drivers/net/bna/bfa_defs_mfg_comm.h
index 987978fcb3fe..fdd677618361 100644
--- a/drivers/net/bna/bfa_defs_mfg_comm.h
+++ b/drivers/net/bna/bfa_defs_mfg_comm.h
@@ -95,28 +95,6 @@ enum {
(type) == BFA_MFG_TYPE_CNA10P1 || \
bfa_mfg_is_mezz(type)))
-/**
- * Check if the card having old wwn/mac handling
- */
-#define bfa_mfg_is_old_wwn_mac_model(type) (( \
- (type) == BFA_MFG_TYPE_FC8P2 || \
- (type) == BFA_MFG_TYPE_FC8P1 || \
- (type) == BFA_MFG_TYPE_FC4P2 || \
- (type) == BFA_MFG_TYPE_FC4P1 || \
- (type) == BFA_MFG_TYPE_CNA10P2 || \
- (type) == BFA_MFG_TYPE_CNA10P1 || \
- (type) == BFA_MFG_TYPE_JAYHAWK || \
- (type) == BFA_MFG_TYPE_WANCHESE))
-
-#define bfa_mfg_increment_wwn_mac(m, i) \
-do { \
- u32 t = ((m)[0] << 16) | ((m)[1] << 8) | (m)[2]; \
- t += (i); \
- (m)[0] = (t >> 16) & 0xFF; \
- (m)[1] = (t >> 8) & 0xFF; \
- (m)[2] = t & 0xFF; \
-} while (0)
-
#define bfa_mfg_adapter_prop_init_flash(card_type, prop) \
do { \
switch ((card_type)) { \
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
index e94e5aa97515..34933cb9569f 100644
--- a/drivers/net/bna/bfa_ioc.c
+++ b/drivers/net/bna/bfa_ioc.c
@@ -26,25 +26,6 @@
* IOC local definitions
*/
-#define bfa_ioc_timer_start(__ioc) \
- mod_timer(&(__ioc)->ioc_timer, jiffies + \
- msecs_to_jiffies(BFA_IOC_TOV))
-#define bfa_ioc_timer_stop(__ioc) del_timer(&(__ioc)->ioc_timer)
-
-#define bfa_ioc_recovery_timer_start(__ioc) \
- mod_timer(&(__ioc)->ioc_timer, jiffies + \
- msecs_to_jiffies(BFA_IOC_TOV_RECOVER))
-
-#define bfa_sem_timer_start(__ioc) \
- mod_timer(&(__ioc)->sem_timer, jiffies + \
- msecs_to_jiffies(BFA_IOC_HWSEM_TOV))
-#define bfa_sem_timer_stop(__ioc) del_timer(&(__ioc)->sem_timer)
-
-#define bfa_hb_timer_start(__ioc) \
- mod_timer(&(__ioc)->hb_timer, jiffies + \
- msecs_to_jiffies(BFA_IOC_HB_TOV))
-#define bfa_hb_timer_stop(__ioc) del_timer(&(__ioc)->hb_timer)
-
/**
* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
*/
@@ -55,11 +36,16 @@
((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
-#define bfa_ioc_notify_hbfail(__ioc) \
- ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
-
-#define bfa_ioc_is_optrom(__ioc) \
- (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
+#define bfa_ioc_notify_fail(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
+#define bfa_ioc_sync_join(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
+#define bfa_ioc_sync_leave(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
+#define bfa_ioc_sync_ack(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
+#define bfa_ioc_sync_complete(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
#define bfa_ioc_mbox_cmd_pending(__ioc) \
(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
@@ -85,6 +71,12 @@ static void bfa_ioc_recover(struct bfa_ioc *ioc);
static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
+static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
+static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
+static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
+static void bfa_ioc_pf_initfailed(struct bfa_ioc *ioc);
+static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
+static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
u32 boot_param);
static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
@@ -101,72 +93,173 @@ static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
char *manufacturer);
static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
-static mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc);
/**
- * IOC state machine events
+ * IOC state machine definitions/declarations
*/
enum ioc_event {
- IOC_E_ENABLE = 1, /*!< IOC enable request */
- IOC_E_DISABLE = 2, /*!< IOC disable request */
- IOC_E_TIMEOUT = 3, /*!< f/w response timeout */
- IOC_E_FWREADY = 4, /*!< f/w initialization done */
- IOC_E_FWRSP_GETATTR = 5, /*!< IOC get attribute response */
- IOC_E_FWRSP_ENABLE = 6, /*!< enable f/w response */
- IOC_E_FWRSP_DISABLE = 7, /*!< disable f/w response */
- IOC_E_HBFAIL = 8, /*!< heartbeat failure */
- IOC_E_HWERROR = 9, /*!< hardware error interrupt */
- IOC_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
- IOC_E_DETACH = 11, /*!< driver detach cleanup */
+ IOC_E_RESET = 1, /*!< IOC reset request */
+ IOC_E_ENABLE = 2, /*!< IOC enable request */
+ IOC_E_DISABLE = 3, /*!< IOC disable request */
+ IOC_E_DETACH = 4, /*!< driver detach cleanup */
+ IOC_E_ENABLED = 5, /*!< f/w enabled */
+ IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */
+ IOC_E_DISABLED = 7, /*!< f/w disabled */
+ IOC_E_INITFAILED = 8, /*!< failure notice by iocpf sm */
+ IOC_E_PFAILED = 9, /*!< failure notice by iocpf sm */
+ IOC_E_HBFAIL = 10, /*!< heartbeat failure */
+ IOC_E_HWERROR = 11, /*!< hardware error interrupt */
+ IOC_E_TIMEOUT = 12, /*!< timeout */
};
+bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
static struct bfa_sm_table ioc_sm_table[] = {
+ {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
- {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
- {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
- {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
- {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
- {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
+ {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
- {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
- {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
+ {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
+ {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
};
/**
+ * IOCPF state machine definitions/declarations
+ */
+
+/*
+ * Forward declareations for iocpf state machine
+ */
+static void bfa_iocpf_enable(struct bfa_ioc *ioc);
+static void bfa_iocpf_disable(struct bfa_ioc *ioc);
+static void bfa_iocpf_fail(struct bfa_ioc *ioc);
+static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
+static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
+static void bfa_iocpf_stop(struct bfa_ioc *ioc);
+
+/**
+ * IOCPF state machine events
+ */
+enum iocpf_event {
+ IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
+ IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
+ IOCPF_E_STOP = 3, /*!< stop on driver detach */
+ IOCPF_E_FWREADY = 4, /*!< f/w initialization done */
+ IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */
+ IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */
+ IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */
+ IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */
+ IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */
+ IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
+ IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */
+};
+
+/**
+ * IOCPF states
+ */
+enum bfa_iocpf_state {
+ BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */
+ BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
+ BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */
+ BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */
+ BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */
+ BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */
+ BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */
+ BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */
+ BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */
+};
+
+bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
+ enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
+ enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
+
+static struct bfa_sm_table iocpf_sm_table[] = {
+ {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
+ {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
+ {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
+ {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
+ {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
+ {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
+ {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
+ {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
+ {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
+ {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
+ {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
+ {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
+ {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
+ {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
+};
+
+/**
+ * IOC State Machine
+ */
+
+/**
+ * Beginning state. IOC uninit state.
+ */
+static void
+bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
+{
+}
+
+/**
+ * IOC is in uninit state.
+ */
+static void
+bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_RESET:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
* Reset entry actions -- initialize state machine
*/
static void
bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
{
- ioc->retry_count = 0;
- ioc->auto_recover = bfa_nw_auto_recover;
+ bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
}
/**
- * Beginning state. IOC is in reset state.
+ * IOC is in reset state.
*/
static void
bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
{
switch (event) {
case IOC_E_ENABLE:
- bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
break;
case IOC_E_DISABLE:
@@ -174,6 +267,7 @@ bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
break;
case IOC_E_DETACH:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
break;
default:
@@ -181,42 +275,43 @@ bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
}
}
-/**
- * Semaphore should be acquired for version check.
- */
static void
-bfa_ioc_sm_fwcheck_entry(struct bfa_ioc *ioc)
+bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
{
- bfa_ioc_hw_sem_get(ioc);
+ bfa_iocpf_enable(ioc);
}
/**
- * Awaiting h/w semaphore to continue with version check.
+ * Host IOC function is being enabled, awaiting response from firmware.
+ * Semaphore is acquired.
*/
static void
-bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event)
+bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
{
switch (event) {
- case IOC_E_SEMLOCKED:
- if (bfa_ioc_firmware_lock(ioc)) {
- ioc->retry_count = 0;
- bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
- } else {
- bfa_nw_ioc_hw_sem_release(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
- }
+ case IOC_E_ENABLED:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
+ break;
+
+ case IOC_E_PFAILED:
+ /* !!! fall through !!! */
+ case IOC_E_HWERROR:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
+ if (event != IOC_E_PFAILED)
+ bfa_iocpf_initfail(ioc);
break;
case IOC_E_DISABLE:
- bfa_ioc_disable_comp(ioc);
- /* fall through */
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
case IOC_E_DETACH:
- bfa_ioc_hw_sem_get_cancel(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ bfa_iocpf_stop(ioc);
break;
- case IOC_E_FWREADY:
+ case IOC_E_ENABLE:
break;
default:
@@ -225,41 +320,85 @@ bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event)
}
/**
- * Notify enable completion callback and generate mismatch AEN.
+ * Semaphore should be acquired for version check.
*/
static void
-bfa_ioc_sm_mismatch_entry(struct bfa_ioc *ioc)
+bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
{
- /**
- * Provide enable completion callback and AEN notification only once.
- */
- if (ioc->retry_count == 0)
- ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
- ioc->retry_count++;
- bfa_ioc_timer_start(ioc);
+ mod_timer(&ioc->ioc_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_TOV));
+ bfa_ioc_send_getattr(ioc);
}
/**
- * Awaiting firmware version match.
+ * IOC configuration in progress. Timer is active.
*/
static void
-bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event)
+bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
{
switch (event) {
+ case IOC_E_FWRSP_GETATTR:
+ del_timer(&ioc->ioc_timer);
+ bfa_ioc_check_attr_wwns(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
+ break;
+
+ case IOC_E_PFAILED:
+ case IOC_E_HWERROR:
+ del_timer(&ioc->ioc_timer);
+ /* fall through */
case IOC_E_TIMEOUT:
- bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
+ if (event != IOC_E_PFAILED)
+ bfa_iocpf_getattrfail(ioc);
break;
case IOC_E_DISABLE:
- bfa_ioc_disable_comp(ioc);
- /* fall through */
+ del_timer(&ioc->ioc_timer);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
- case IOC_E_DETACH:
- bfa_ioc_timer_stop(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ case IOC_E_ENABLE:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
+{
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
+ bfa_ioc_hb_monitor(ioc);
+}
+
+static void
+bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
+{
+ switch (event) {
+ case IOC_E_ENABLE:
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_ioc_hb_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
- case IOC_E_FWREADY:
+ case IOC_E_PFAILED:
+ case IOC_E_HWERROR:
+ bfa_ioc_hb_stop(ioc);
+ /* !!! fall through !!! */
+ case IOC_E_HBFAIL:
+ bfa_ioc_fail_notify(ioc);
+ if (ioc->iocpf.auto_recover)
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
+ else
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+
+ if (event != IOC_E_PFAILED)
+ bfa_iocpf_fail(ioc);
break;
default:
@@ -267,30 +406,61 @@ bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event)
}
}
+static void
+bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
+{
+ bfa_iocpf_disable(ioc);
+}
+
/**
- * Request for semaphore.
+ * IOC is being desabled
*/
static void
-bfa_ioc_sm_semwait_entry(struct bfa_ioc *ioc)
+bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
{
- bfa_ioc_hw_sem_get(ioc);
+ switch (event) {
+ case IOC_E_DISABLED:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ case IOC_E_HWERROR:
+ /*
+ * No state change. Will move to disabled state
+ * after iocpf sm completes failure processing and
+ * moves to disabled state.
+ */
+ bfa_iocpf_fail(ioc);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
}
/**
- * Awaiting semaphore for h/w initialzation.
+ * IOC desable completion entry.
*/
static void
-bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event)
+bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
+{
+ bfa_ioc_disable_comp(ioc);
+}
+
+static void
+bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
{
switch (event) {
- case IOC_E_SEMLOCKED:
- ioc->retry_count = 0;
- bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
+ case IOC_E_ENABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
break;
case IOC_E_DISABLE:
- bfa_ioc_hw_sem_get_cancel(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ ioc->cbfn->disable_cbfn(ioc->bfa);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ bfa_iocpf_stop(ioc);
break;
default:
@@ -299,46 +469,45 @@ bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event)
}
static void
-bfa_ioc_sm_hwinit_entry(struct bfa_ioc *ioc)
+bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
{
- bfa_ioc_timer_start(ioc);
- bfa_ioc_reset(ioc, false);
}
/**
- * @brief
- * Hardware is being initialized. Interrupts are enabled.
- * Holding hardware semaphore lock.
+ * Hardware initialization retry.
*/
static void
-bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event)
+bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
{
switch (event) {
- case IOC_E_FWREADY:
- bfa_ioc_timer_stop(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
+ case IOC_E_ENABLED:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
break;
+ case IOC_E_PFAILED:
case IOC_E_HWERROR:
- bfa_ioc_timer_stop(ioc);
- /* fall through */
+ /**
+ * Initialization retry failed.
+ */
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ if (event != IOC_E_PFAILED)
+ bfa_iocpf_initfail(ioc);
+ break;
- case IOC_E_TIMEOUT:
- ioc->retry_count++;
- if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
- bfa_ioc_timer_start(ioc);
- bfa_ioc_reset(ioc, true);
- break;
- }
+ case IOC_E_INITFAILED:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+ break;
- bfa_nw_ioc_hw_sem_release(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ case IOC_E_ENABLE:
break;
case IOC_E_DISABLE:
- bfa_nw_ioc_hw_sem_release(ioc);
- bfa_ioc_timer_stop(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ bfa_iocpf_stop(ioc);
break;
default:
@@ -347,51 +516,248 @@ bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event)
}
static void
-bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
+bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
{
- bfa_ioc_timer_start(ioc);
- bfa_ioc_send_enable(ioc);
}
/**
- * Host IOC function is being enabled, awaiting response from firmware.
- * Semaphore is acquired.
+ * IOC failure.
*/
static void
-bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
+bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
{
switch (event) {
- case IOC_E_FWRSP_ENABLE:
- bfa_ioc_timer_stop(ioc);
- bfa_nw_ioc_hw_sem_release(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
+ case IOC_E_ENABLE:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ bfa_iocpf_stop(ioc);
break;
case IOC_E_HWERROR:
- bfa_ioc_timer_stop(ioc);
- /* fall through */
+ /* HB failure notification, ignore. */
+ break;
- case IOC_E_TIMEOUT:
- ioc->retry_count++;
- if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
- writel(BFI_IOC_UNINIT,
- ioc->ioc_regs.ioc_fwstate);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
- break;
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * IOCPF State Machine
+ */
+
+/**
+ * Reset entry actions -- initialize state machine
+ */
+static void
+bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
+{
+ iocpf->retry_count = 0;
+ iocpf->auto_recover = bfa_nw_auto_recover;
+}
+
+/**
+ * Beginning state. IOC is in reset state.
+ */
+static void
+bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+ switch (event) {
+ case IOCPF_E_ENABLE:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
+ break;
+
+ case IOCPF_E_STOP:
+ break;
+
+ default:
+ bfa_sm_fault(iocpf->ioc, event);
+ }
+}
+
+/**
+ * Semaphore should be acquired for version check.
+ */
+static void
+bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
+{
+ bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/**
+ * Awaiting h/w semaphore to continue with version check.
+ */
+static void
+bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc *ioc = iocpf->ioc;
+
+ switch (event) {
+ case IOCPF_E_SEMLOCKED:
+ if (bfa_ioc_firmware_lock(ioc)) {
+ if (bfa_ioc_sync_complete(ioc)) {
+ iocpf->retry_count = 0;
+ bfa_ioc_sync_join(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+ } else {
+ bfa_ioc_firmware_unlock(ioc);
+ bfa_nw_ioc_hw_sem_release(ioc);
+ mod_timer(&ioc->sem_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
+ }
+ } else {
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
}
+ break;
- bfa_nw_ioc_hw_sem_release(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ case IOCPF_E_DISABLE:
+ bfa_ioc_hw_sem_get_cancel(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ bfa_ioc_pf_disabled(ioc);
break;
- case IOC_E_DISABLE:
- bfa_ioc_timer_stop(ioc);
+ case IOCPF_E_STOP:
+ bfa_ioc_hw_sem_get_cancel(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * Notify enable completion callback
+ */
+static void
+bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
+{
+ /* Call only the first time sm enters fwmismatch state. */
+ if (iocpf->retry_count == 0)
+ bfa_ioc_pf_fwmismatch(iocpf->ioc);
+
+ iocpf->retry_count++;
+ mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_TOV));
+}
+
+/**
+ * Awaiting firmware version match.
+ */
+static void
+bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc *ioc = iocpf->ioc;
+
+ switch (event) {
+ case IOCPF_E_TIMEOUT:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
+ break;
+
+ case IOCPF_E_DISABLE:
+ del_timer(&ioc->iocpf_timer);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ bfa_ioc_pf_disabled(ioc);
+ break;
+
+ case IOCPF_E_STOP:
+ del_timer(&ioc->iocpf_timer);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * Request for semaphore.
+ */
+static void
+bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
+{
+ bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/**
+ * Awaiting semaphore for h/w initialzation.
+ */
+static void
+bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc *ioc = iocpf->ioc;
+
+ switch (event) {
+ case IOCPF_E_SEMLOCKED:
+ if (bfa_ioc_sync_complete(ioc)) {
+ bfa_ioc_sync_join(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+ } else {
+ bfa_nw_ioc_hw_sem_release(ioc);
+ mod_timer(&ioc->sem_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
+ }
+ break;
+
+ case IOCPF_E_DISABLE:
+ bfa_ioc_hw_sem_get_cancel(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+static void
+bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
+{
+ mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_TOV));
+ bfa_ioc_reset(iocpf->ioc, 0);
+}
+
+/**
+ * Hardware is being initialized. Interrupts are enabled.
+ * Holding hardware semaphore lock.
+ */
+static void
+bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc *ioc = iocpf->ioc;
+
+ switch (event) {
+ case IOCPF_E_FWREADY:
+ del_timer(&ioc->iocpf_timer);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
+ break;
+
+ case IOCPF_E_INITFAIL:
+ del_timer(&ioc->iocpf_timer);
+ /*
+ * !!! fall through !!!
+ */
+
+ case IOCPF_E_TIMEOUT:
bfa_nw_ioc_hw_sem_release(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ if (event == IOCPF_E_TIMEOUT)
+ bfa_ioc_pf_failed(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
break;
- case IOC_E_FWREADY:
- bfa_ioc_send_enable(ioc);
+ case IOCPF_E_DISABLE:
+ del_timer(&ioc->iocpf_timer);
+ bfa_ioc_sync_leave(ioc);
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
break;
default:
@@ -400,37 +766,49 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
}
static void
-bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
+bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
{
- bfa_ioc_timer_start(ioc);
- bfa_ioc_send_getattr(ioc);
+ mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_TOV));
+ bfa_ioc_send_enable(iocpf->ioc);
}
/**
- * @brief
- * IOC configuration in progress. Timer is active.
+ * Host IOC function is being enabled, awaiting response from firmware.
+ * Semaphore is acquired.
*/
static void
-bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
+bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
+ struct bfa_ioc *ioc = iocpf->ioc;
+
switch (event) {
- case IOC_E_FWRSP_GETATTR:
- bfa_ioc_timer_stop(ioc);
- bfa_ioc_check_attr_wwns(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
+ case IOCPF_E_FWRSP_ENABLE:
+ del_timer(&ioc->iocpf_timer);
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
break;
- case IOC_E_HWERROR:
- bfa_ioc_timer_stop(ioc);
- /* fall through */
+ case IOCPF_E_INITFAIL:
+ del_timer(&ioc->iocpf_timer);
+ /*
+ * !!! fall through !!!
+ */
+ case IOCPF_E_TIMEOUT:
+ bfa_nw_ioc_hw_sem_release(ioc);
+ if (event == IOCPF_E_TIMEOUT)
+ bfa_ioc_pf_failed(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
+ break;
- case IOC_E_TIMEOUT:
- bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ case IOCPF_E_DISABLE:
+ del_timer(&ioc->iocpf_timer);
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
break;
- case IOC_E_DISABLE:
- bfa_ioc_timer_stop(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ case IOCPF_E_FWREADY:
+ bfa_ioc_send_enable(ioc);
break;
default:
@@ -438,36 +816,42 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
}
}
+static bool
+bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
+{
+ return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
+}
+
static void
-bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
+bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
{
- ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
- bfa_ioc_hb_monitor(ioc);
+ bfa_ioc_pf_enabled(iocpf->ioc);
}
static void
-bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
+bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
+ struct bfa_ioc *ioc = iocpf->ioc;
+
switch (event) {
- case IOC_E_ENABLE:
+ case IOCPF_E_DISABLE:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
break;
- case IOC_E_DISABLE:
- bfa_ioc_hb_stop(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ case IOCPF_E_GETATTRFAIL:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
break;
- case IOC_E_HWERROR:
- case IOC_E_FWREADY:
- /**
- * Hard error or IOC recovery by other function.
- * Treat it same as heartbeat failure.
- */
- bfa_ioc_hb_stop(ioc);
- /* !!! fall through !!! */
+ case IOCPF_E_FAIL:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
+ break;
- case IOC_E_HBFAIL:
- bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
+ case IOCPF_E_FWREADY:
+ bfa_ioc_pf_failed(ioc);
+ if (bfa_nw_ioc_is_operational(ioc))
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
+ else
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
break;
default:
@@ -476,33 +860,40 @@ bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
}
static void
-bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
+bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
{
- bfa_ioc_timer_start(ioc);
- bfa_ioc_send_disable(ioc);
+ mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
+ msecs_to_jiffies(BFA_IOC_TOV));
+ bfa_ioc_send_disable(iocpf->ioc);
}
/**
* IOC is being disabled
*/
static void
-bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
+bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
+ struct bfa_ioc *ioc = iocpf->ioc;
+
switch (event) {
- case IOC_E_FWRSP_DISABLE:
- bfa_ioc_timer_stop(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ case IOCPF_E_FWRSP_DISABLE:
+ case IOCPF_E_FWREADY:
+ del_timer(&ioc->iocpf_timer);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
- case IOC_E_HWERROR:
- bfa_ioc_timer_stop(ioc);
+ case IOCPF_E_FAIL:
+ del_timer(&ioc->iocpf_timer);
/*
* !!! fall through !!!
*/
- case IOC_E_TIMEOUT:
+ case IOCPF_E_TIMEOUT:
writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+ break;
+
+ case IOCPF_E_FWRSP_ENABLE:
break;
default:
@@ -510,33 +901,58 @@ bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
}
}
-/**
- * IOC disable completion entry.
- */
static void
-bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
+bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
{
- bfa_ioc_disable_comp(ioc);
+ bfa_ioc_hw_sem_get(iocpf->ioc);
}
+/**
+ * IOC hb ack request is being removed.
+ */
static void
-bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
+bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
+ struct bfa_ioc *ioc = iocpf->ioc;
+
switch (event) {
- case IOC_E_ENABLE:
- bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
+ case IOCPF_E_SEMLOCKED:
+ bfa_ioc_sync_leave(ioc);
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
break;
- case IOC_E_DISABLE:
- ioc->cbfn->disable_cbfn(ioc->bfa);
+ case IOCPF_E_FAIL:
break;
- case IOC_E_FWREADY:
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
+/**
+ * IOC disable completion entry.
+ */
+static void
+bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
+{
+ bfa_ioc_pf_disabled(iocpf->ioc);
+}
+
+static void
+bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc *ioc = iocpf->ioc;
+
+ switch (event) {
+ case IOCPF_E_ENABLE:
+ iocpf->retry_count = 0;
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
break;
- case IOC_E_DETACH:
+ case IOCPF_E_STOP:
bfa_ioc_firmware_unlock(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
break;
default:
@@ -545,33 +961,50 @@ bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
}
static void
-bfa_ioc_sm_initfail_entry(struct bfa_ioc *ioc)
+bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
{
- ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
- bfa_ioc_timer_start(ioc);
+ bfa_ioc_hw_sem_get(iocpf->ioc);
}
/**
- * @brief
* Hardware initialization failed.
*/
static void
-bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event)
+bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
+ struct bfa_ioc *ioc = iocpf->ioc;
+
switch (event) {
- case IOC_E_DISABLE:
- bfa_ioc_timer_stop(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ case IOCPF_E_SEMLOCKED:
+ bfa_ioc_notify_fail(ioc);
+ bfa_ioc_sync_ack(ioc);
+ iocpf->retry_count++;
+ if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
+ bfa_ioc_sync_leave(ioc);
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
+ } else {
+ if (bfa_ioc_sync_complete(ioc))
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+ else {
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
+ }
+ }
break;
- case IOC_E_DETACH:
- bfa_ioc_timer_stop(ioc);
+ case IOCPF_E_DISABLE:
+ bfa_ioc_hw_sem_get_cancel(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+ break;
+
+ case IOCPF_E_STOP:
+ bfa_ioc_hw_sem_get_cancel(ioc);
bfa_ioc_firmware_unlock(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
break;
- case IOC_E_TIMEOUT:
- bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
+ case IOCPF_E_FAIL:
break;
default:
@@ -580,80 +1013,108 @@ bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event)
}
static void
-bfa_ioc_sm_hbfail_entry(struct bfa_ioc *ioc)
+bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
{
- struct list_head *qe;
- struct bfa_ioc_hbfail_notify *notify;
+ bfa_ioc_pf_initfailed(iocpf->ioc);
+}
- /**
- * Mark IOC as failed in hardware and stop firmware.
- */
- bfa_ioc_lpu_stop(ioc);
- writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+/**
+ * Hardware initialization failed.
+ */
+static void
+bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+ struct bfa_ioc *ioc = iocpf->ioc;
- /**
- * Notify other functions on HB failure.
- */
- bfa_ioc_notify_hbfail(ioc);
+ switch (event) {
+ case IOCPF_E_DISABLE:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+ break;
- /**
- * Notify driver and common modules registered for notification.
- */
- ioc->cbfn->hbfail_cbfn(ioc->bfa);
- list_for_each(qe, &ioc->hb_notify_q) {
- notify = (struct bfa_ioc_hbfail_notify *) qe;
- notify->cbfn(notify->cbarg);
+ case IOCPF_E_STOP:
+ bfa_ioc_firmware_unlock(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
}
+}
+static void
+bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
+{
/**
- * Flush any queued up mailbox requests.
+ * Mark IOC as failed in hardware and stop firmware.
*/
- bfa_ioc_mbox_hbfail(ioc);
+ bfa_ioc_lpu_stop(iocpf->ioc);
/**
- * Trigger auto-recovery after a delay.
+ * Flush any queued up mailbox requests.
*/
- if (ioc->auto_recover)
- mod_timer(&ioc->ioc_timer, jiffies +
- msecs_to_jiffies(BFA_IOC_TOV_RECOVER));
+ bfa_ioc_mbox_hbfail(iocpf->ioc);
+ bfa_ioc_hw_sem_get(iocpf->ioc);
}
/**
- * @brief
- * IOC heartbeat failure.
+ * IOC is in failed state.
*/
static void
-bfa_ioc_sm_hbfail(struct bfa_ioc *ioc, enum ioc_event event)
+bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
- switch (event) {
+ struct bfa_ioc *ioc = iocpf->ioc;
- case IOC_E_ENABLE:
- ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ switch (event) {
+ case IOCPF_E_SEMLOCKED:
+ iocpf->retry_count = 0;
+ bfa_ioc_sync_ack(ioc);
+ bfa_ioc_notify_fail(ioc);
+ if (!iocpf->auto_recover) {
+ bfa_ioc_sync_leave(ioc);
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ } else {
+ if (bfa_ioc_sync_complete(ioc))
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+ else {
+ bfa_nw_ioc_hw_sem_release(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
+ }
+ }
break;
- case IOC_E_DISABLE:
- if (ioc->auto_recover)
- bfa_ioc_timer_stop(ioc);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ case IOCPF_E_DISABLE:
+ bfa_ioc_hw_sem_get_cancel(ioc);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
- case IOC_E_TIMEOUT:
- bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
+ case IOCPF_E_FAIL:
break;
- case IOC_E_FWREADY:
- /**
- * Recovery is already initiated by other function.
- */
- break;
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
- case IOC_E_HWERROR:
- /*
- * HB failure notification, ignore.
- */
+static void
+bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
+{
+}
+
+/**
+ * @brief
+ * IOC is in failed state.
+ */
+static void
+bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
+{
+ switch (event) {
+ case IOCPF_E_DISABLE:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
break;
+
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(iocpf->ioc, event);
}
}
@@ -678,14 +1139,6 @@ bfa_ioc_disable_comp(struct bfa_ioc *ioc)
}
}
-void
-bfa_nw_ioc_sem_timeout(void *ioc_arg)
-{
- struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
-
- bfa_ioc_hw_sem_get(ioc);
-}
-
bool
bfa_nw_ioc_sem_get(void __iomem *sem_reg)
{
@@ -725,7 +1178,7 @@ bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
*/
r32 = readl(ioc->ioc_regs.ioc_sem_reg);
if (r32 == 0) {
- bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
return;
}
@@ -865,12 +1318,6 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
{
struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
- /**
- * If bios/efi boot (flash based) -- return true
- */
- if (bfa_ioc_is_optrom(ioc))
- return true;
-
bfa_nw_ioc_fwver_get(ioc, &fwhdr);
drv_fwhdr = (struct bfi_ioc_image_hdr *)
bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
@@ -934,20 +1381,15 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
/**
* If IOC function is disabled and firmware version is same,
* just re-enable IOC.
- *
- * If option rom, IOC must not be in operational state. With
- * convergence, IOC will be in operational state when 2nd driver
- * is loaded.
*/
- if (ioc_fwstate == BFI_IOC_DISABLED ||
- (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
+ if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
/**
* When using MSI-X any pending firmware ready event should
* be flushed. Otherwise MSI-X interrupts are not delivered.
*/
bfa_ioc_msgflush(ioc);
ioc->cbfn->reset_cbfn(ioc->bfa);
- bfa_fsm_send_event(ioc, IOC_E_FWREADY);
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
return;
}
@@ -1033,7 +1475,6 @@ bfa_nw_ioc_hb_check(void *cbarg)
hb_count = readl(ioc->ioc_regs.heartbeat);
if (ioc->hb_count == hb_count) {
- pr_crit("Firmware heartbeat failure at %d", hb_count);
bfa_ioc_recover(ioc);
return;
} else {
@@ -1078,11 +1519,6 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
*/
bfa_ioc_lmem_init(ioc);
- /**
- * Flash based firmware boot
- */
- if (bfa_ioc_is_optrom(ioc))
- boot_type = BFI_BOOT_TYPE_FLASH;
fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
pgnum = bfa_ioc_smem_pgnum(ioc, loff);
@@ -1209,6 +1645,55 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
bfa_q_deq(&mod->cmd_q, &cmd);
}
+static void
+bfa_ioc_fail_notify(struct bfa_ioc *ioc)
+{
+ struct list_head *qe;
+ struct bfa_ioc_hbfail_notify *notify;
+
+ /**
+ * Notify driver and common modules registered for notification.
+ */
+ ioc->cbfn->hbfail_cbfn(ioc->bfa);
+ list_for_each(qe, &ioc->hb_notify_q) {
+ notify = (struct bfa_ioc_hbfail_notify *) qe;
+ notify->cbfn(notify->cbarg);
+ }
+}
+
+static void
+bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(ioc, IOC_E_ENABLED);
+}
+
+static void
+bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(ioc, IOC_E_DISABLED);
+}
+
+static void
+bfa_ioc_pf_initfailed(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(ioc, IOC_E_INITFAILED);
+}
+
+static void
+bfa_ioc_pf_failed(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(ioc, IOC_E_PFAILED);
+}
+
+static void
+bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
+{
+ /**
+ * Provide enable completion callback and AEN notification.
+ */
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+}
+
/**
* IOC public
*/
@@ -1304,6 +1789,7 @@ static void
bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
{
union bfi_ioc_i2h_msg_u *msg;
+ struct bfa_iocpf *iocpf = &ioc->iocpf;
msg = (union bfi_ioc_i2h_msg_u *) m;
@@ -1314,15 +1800,15 @@ bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
break;
case BFI_IOC_I2H_READY_EVENT:
- bfa_fsm_send_event(ioc, IOC_E_FWREADY);
+ bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
break;
case BFI_IOC_I2H_ENABLE_REPLY:
- bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
+ bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
break;
case BFI_IOC_I2H_DISABLE_REPLY:
- bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
+ bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
break;
case BFI_IOC_I2H_GETATTR_REPLY:
@@ -1348,11 +1834,13 @@ bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
ioc->fcmode = false;
ioc->pllinit = false;
ioc->dbg_fwsave_once = true;
+ ioc->iocpf.ioc = ioc;
bfa_ioc_mbox_attach(ioc);
INIT_LIST_HEAD(&ioc->hb_notify_q);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ bfa_fsm_send_event(ioc, IOC_E_RESET);
}
/**
@@ -1657,7 +2145,40 @@ bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
static enum bfa_ioc_state
bfa_ioc_get_state(struct bfa_ioc *ioc)
{
- return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+ enum bfa_iocpf_state iocpf_st;
+ enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+
+ if (ioc_st == BFA_IOC_ENABLING ||
+ ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
+
+ iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
+
+ switch (iocpf_st) {
+ case BFA_IOCPF_SEMWAIT:
+ ioc_st = BFA_IOC_SEMWAIT;
+ break;
+
+ case BFA_IOCPF_HWINIT:
+ ioc_st = BFA_IOC_HWINIT;
+ break;
+
+ case BFA_IOCPF_FWMISMATCH:
+ ioc_st = BFA_IOC_FWMISMATCH;
+ break;
+
+ case BFA_IOCPF_FAIL:
+ ioc_st = BFA_IOC_FAIL;
+ break;
+
+ case BFA_IOCPF_INITFAIL:
+ ioc_st = BFA_IOC_INITFAIL;
+ break;
+
+ default:
+ break;
+ }
+ }
+ return ioc_st;
}
void
@@ -1689,28 +2210,7 @@ bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
mac_t
bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
{
- /*
- * Currently mfg mac is used as FCoE enode mac (not configured by PBC)
- */
- if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
- return bfa_ioc_get_mfg_mac(ioc);
- else
- return ioc->attr->mac;
-}
-
-static mac_t
-bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc)
-{
- mac_t m;
-
- m = ioc->attr->mfg_mac;
- if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
- m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
- else
- bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
- bfa_ioc_pcifn(ioc));
-
- return m;
+ return ioc->attr->mac;
}
/**
@@ -1719,8 +2219,13 @@ bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc)
static void
bfa_ioc_recover(struct bfa_ioc *ioc)
{
- bfa_ioc_stats(ioc, ioc_hbfails);
- bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
+ u16 bdf;
+
+ bdf = (ioc->pcidev.pci_slot << 8 | ioc->pcidev.pci_func << 3 |
+ ioc->pcidev.device_id);
+
+ pr_crit("Firmware heartbeat failure at %d", bdf);
+ BUG_ON(1);
}
static void
@@ -1728,5 +2233,61 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
{
if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
return;
+}
+
+/**
+ * @dg hal_iocpf_pvt BFA IOC PF private functions
+ * @{
+ */
+
+static void
+bfa_iocpf_enable(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
+}
+
+static void
+bfa_iocpf_disable(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
+}
+
+static void
+bfa_iocpf_fail(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
+}
+
+static void
+bfa_iocpf_initfail(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
+}
+
+static void
+bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
+}
+
+static void
+bfa_iocpf_stop(struct bfa_ioc *ioc)
+{
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
+}
+void
+bfa_nw_iocpf_timeout(void *ioc_arg)
+{
+ struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
+
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
+}
+
+void
+bfa_nw_iocpf_sem_timeout(void *ioc_arg)
+{
+ struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
+
+ bfa_ioc_hw_sem_get(ioc);
}
diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/bna/bfa_ioc.h
index a73d84ec808c..e4974bc24ef6 100644
--- a/drivers/net/bna/bfa_ioc.h
+++ b/drivers/net/bna/bfa_ioc.h
@@ -26,16 +26,7 @@
#define BFA_IOC_TOV 3000 /* msecs */
#define BFA_IOC_HWSEM_TOV 500 /* msecs */
#define BFA_IOC_HB_TOV 500 /* msecs */
-#define BFA_IOC_HWINIT_MAX 2
-#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
-
-/**
- * Generic Scatter Gather Element used by driver
- */
-struct bfa_sge {
- u32 sg_len;
- void *sg_addr;
-};
+#define BFA_IOC_HWINIT_MAX 5
/**
* PCI device information required by IOC
@@ -65,19 +56,6 @@ struct bfa_dma {
#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
/**
- * @brief BFA dma address assignment macro
- */
-#define bfa_dma_addr_set(dma_addr, pa) \
- __bfa_dma_addr_set(&dma_addr, (u64)pa)
-
-static inline void
-__bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
-{
- dma_addr->a32.addr_lo = (u32) pa;
- dma_addr->a32.addr_hi = (u32) (upper_32_bits(pa));
-}
-
-/**
* @brief BFA dma address assignment macro. (big endian format)
*/
#define bfa_dma_be_addr_set(dma_addr, pa) \
@@ -105,8 +83,11 @@ struct bfa_ioc_regs {
void __iomem *host_page_num_fn;
void __iomem *heartbeat;
void __iomem *ioc_fwstate;
+ void __iomem *alt_ioc_fwstate;
void __iomem *ll_halt;
+ void __iomem *alt_ll_halt;
void __iomem *err_set;
+ void __iomem *ioc_fail_sync;
void __iomem *shirq_isr_next;
void __iomem *shirq_msk_next;
void __iomem *smem_page_start;
@@ -165,16 +146,22 @@ struct bfa_ioc_hbfail_notify {
(__notify)->cbarg = (__cbarg); \
} while (0)
+struct bfa_iocpf {
+ bfa_fsm_t fsm;
+ struct bfa_ioc *ioc;
+ u32 retry_count;
+ bool auto_recover;
+};
+
struct bfa_ioc {
bfa_fsm_t fsm;
struct bfa *bfa;
struct bfa_pcidev pcidev;
- struct bfa_timer_mod *timer_mod;
struct timer_list ioc_timer;
+ struct timer_list iocpf_timer;
struct timer_list sem_timer;
struct timer_list hb_timer;
u32 hb_count;
- u32 retry_count;
struct list_head hb_notify_q;
void *dbg_fwsave;
int dbg_fwsave_len;
@@ -182,7 +169,6 @@ struct bfa_ioc {
enum bfi_mclass ioc_mc;
struct bfa_ioc_regs ioc_regs;
struct bfa_ioc_drv_stats stats;
- bool auto_recover;
bool fcmode;
bool ctdev;
bool cna;
@@ -195,6 +181,7 @@ struct bfa_ioc {
struct bfa_ioc_cbfn *cbfn;
struct bfa_ioc_mbox_mod mbox_mod;
struct bfa_ioc_hwif *ioc_hwif;
+ struct bfa_iocpf iocpf;
};
struct bfa_ioc_hwif {
@@ -205,8 +192,12 @@ struct bfa_ioc_hwif {
void (*ioc_map_port) (struct bfa_ioc *ioc);
void (*ioc_isr_mode_set) (struct bfa_ioc *ioc,
bool msix);
- void (*ioc_notify_hbfail) (struct bfa_ioc *ioc);
+ void (*ioc_notify_fail) (struct bfa_ioc *ioc);
void (*ioc_ownership_reset) (struct bfa_ioc *ioc);
+ void (*ioc_sync_join) (struct bfa_ioc *ioc);
+ void (*ioc_sync_leave) (struct bfa_ioc *ioc);
+ void (*ioc_sync_ack) (struct bfa_ioc *ioc);
+ bool (*ioc_sync_complete) (struct bfa_ioc *ioc);
};
#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
@@ -271,7 +262,6 @@ void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
-
void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
struct bfa_ioc_hbfail_notify *notify);
@@ -289,7 +279,8 @@ mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
*/
void bfa_nw_ioc_timeout(void *ioc);
void bfa_nw_ioc_hb_check(void *ioc);
-void bfa_nw_ioc_sem_timeout(void *ioc);
+void bfa_nw_iocpf_timeout(void *ioc);
+void bfa_nw_iocpf_sem_timeout(void *ioc);
/*
* F/W Image Size & Chunk
diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
index 121cfd6d48b1..469997c4ffd1 100644
--- a/drivers/net/bna/bfa_ioc_ct.c
+++ b/drivers/net/bna/bfa_ioc_ct.c
@@ -22,6 +22,15 @@
#include "bfi_ctreg.h"
#include "bfa_defs.h"
+#define bfa_ioc_ct_sync_pos(__ioc) \
+ ((u32) (1 << bfa_ioc_pcifn(__ioc)))
+#define BFA_IOC_SYNC_REQD_SH 16
+#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
+#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
+#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
+#define bfa_ioc_ct_sync_reqd_pos(__ioc) \
+ (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
+
/*
* forward declarations
*/
@@ -30,8 +39,12 @@ static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
-static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
+static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
+static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
static struct bfa_ioc_hwif nw_hwif_ct;
@@ -48,8 +61,12 @@ bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
- nw_hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
+ nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+ nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
+ nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
+ nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
+ nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
ioc->ioc_hwif = &nw_hwif_ct;
}
@@ -86,6 +103,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
if (usecnt == 0) {
writel(1, ioc->ioc_regs.ioc_usage_reg);
bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ writel(0, ioc->ioc_regs.ioc_fail_sync);
return true;
}
@@ -149,12 +167,14 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
* Notify other functions on HB failure.
*/
static void
-bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc)
+bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
{
if (ioc->cna) {
writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
+ writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
/* Wait for halt to take effect */
readl(ioc->ioc_regs.ll_halt);
+ readl(ioc->ioc_regs.alt_ll_halt);
} else {
writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
readl(ioc->ioc_regs.err_set);
@@ -206,15 +226,19 @@ bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
if (ioc->port_id == 0) {
ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+ ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
+ ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
} else {
ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
+ ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
+ ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
}
/*
@@ -232,6 +256,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
+ ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
/**
* sram memory access
@@ -317,6 +342,77 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
bfa_nw_ioc_hw_sem_release(ioc);
}
+/**
+ * Synchronized IOC failure processing routines
+ */
+static void
+bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
+{
+ u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
+
+ writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
+}
+
+static void
+bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
+{
+ u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
+ bfa_ioc_ct_sync_pos(ioc);
+
+ writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
+}
+
+static void
+bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
+{
+ u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+
+ writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
+}
+
+static bool
+bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
+{
+ u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+ u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
+ u32 tmp_ackd;
+
+ if (sync_ackd == 0)
+ return true;
+
+ /**
+ * The check below is to see whether any other PCI fn
+ * has reinitialized the ASIC (reset sync_ackd bits)
+ * and failed again while this IOC was waiting for hw
+ * semaphore (in bfa_iocpf_sm_semwait()).
+ */
+ tmp_ackd = sync_ackd;
+ if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
+ !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
+ sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
+
+ if (sync_reqd == sync_ackd) {
+ writel(bfa_ioc_ct_clear_sync_ackd(r32),
+ ioc->ioc_regs.ioc_fail_sync);
+ writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
+ return true;
+ }
+
+ /**
+ * If another PCI fn reinitialized and failed again while
+ * this IOC was waiting for hw sem, the sync_ackd bit for
+ * this IOC need to be set again to allow reinitialization.
+ */
+ if (tmp_ackd != sync_ackd)
+ writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
+
+ return false;
+}
+
static enum bfa_status
bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode)
{
diff --git a/drivers/net/bna/bfi_ctreg.h b/drivers/net/bna/bfi_ctreg.h
index 404ea351d4a1..5130d7918660 100644
--- a/drivers/net/bna/bfi_ctreg.h
+++ b/drivers/net/bna/bfi_ctreg.h
@@ -535,6 +535,7 @@ enum {
#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
+#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG
#define CPE_DEPTH_Q(__n) \
(CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
@@ -552,22 +553,30 @@ enum {
(RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
#define RME_CI_PTR_Q(__n) \
(RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
-#define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \
- * (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
-#define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \
- * (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
-#define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \
- * (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
-#define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \
- * (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
-#define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \
- * (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
-#define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \
- * (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
-#define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \
- * (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
-#define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \
- * (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
+#define HQM_QSET_RXQ_DRBL_P0(__n) \
+ (HQM_QSET0_RXQ_DRBL_P0 + (__n) * \
+ (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
+#define HQM_QSET_TXQ_DRBL_P0(__n) \
+ (HQM_QSET0_TXQ_DRBL_P0 + (__n) * \
+ (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
+#define HQM_QSET_IB_DRBL_1_P0(__n) \
+ (HQM_QSET0_IB_DRBL_1_P0 + (__n) * \
+ (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
+#define HQM_QSET_IB_DRBL_2_P0(__n) \
+ (HQM_QSET0_IB_DRBL_2_P0 + (__n) * \
+ (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
+#define HQM_QSET_RXQ_DRBL_P1(__n) \
+ (HQM_QSET0_RXQ_DRBL_P1 + (__n) * \
+ (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
+#define HQM_QSET_TXQ_DRBL_P1(__n) \
+ (HQM_QSET0_TXQ_DRBL_P1 + (__n) * \
+ (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
+#define HQM_QSET_IB_DRBL_1_P1(__n) \
+ (HQM_QSET0_IB_DRBL_1_P1 + (__n) * \
+ (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
+#define HQM_QSET_IB_DRBL_2_P1(__n) \
+ (HQM_QSET0_IB_DRBL_2_P1 + (__n) * \
+ (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
diff --git a/drivers/net/bna/bna.h b/drivers/net/bna/bna.h
index df6676bbc84e..a287f89b0289 100644
--- a/drivers/net/bna/bna.h
+++ b/drivers/net/bna/bna.h
@@ -32,8 +32,6 @@ extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
/* Log string size */
#define BNA_MESSAGE_SIZE 256
-#define bna_device_timer(_dev) bfa_timer_beat(&((_dev)->timer_mod))
-
/* MBOX API for PORT, TX, RX */
#define bna_mbox_qe_fill(_qe, _cmd, _cmd_len, _cbfn, _cbarg) \
do { \
@@ -390,8 +388,8 @@ void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe);
/* API for RX */
int bna_port_mtu_get(struct bna_port *port);
-void bna_llport_admin_up(struct bna_llport *llport);
-void bna_llport_admin_down(struct bna_llport *llport);
+void bna_llport_rx_started(struct bna_llport *llport);
+void bna_llport_rx_stopped(struct bna_llport *llport);
/* API for BNAD */
void bna_port_enable(struct bna_port *port);
diff --git a/drivers/net/bna/bna_ctrl.c b/drivers/net/bna/bna_ctrl.c
index 07b26598546e..e1527472b961 100644
--- a/drivers/net/bna/bna_ctrl.c
+++ b/drivers/net/bna/bna_ctrl.c
@@ -59,14 +59,70 @@ bna_port_cb_link_down(struct bna_port *port, int status)
port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
}
+static inline int
+llport_can_be_up(struct bna_llport *llport)
+{
+ int ready = 0;
+ if (llport->type == BNA_PORT_T_REGULAR)
+ ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) &&
+ (llport->flags & BNA_LLPORT_F_RX_STARTED) &&
+ (llport->flags & BNA_LLPORT_F_PORT_ENABLED));
+ else
+ ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) &&
+ (llport->flags & BNA_LLPORT_F_RX_STARTED) &&
+ !(llport->flags & BNA_LLPORT_F_PORT_ENABLED));
+ return ready;
+}
+
+#define llport_is_up llport_can_be_up
+
+enum bna_llport_event {
+ LLPORT_E_START = 1,
+ LLPORT_E_STOP = 2,
+ LLPORT_E_FAIL = 3,
+ LLPORT_E_UP = 4,
+ LLPORT_E_DOWN = 5,
+ LLPORT_E_FWRESP_UP_OK = 6,
+ LLPORT_E_FWRESP_UP_FAIL = 7,
+ LLPORT_E_FWRESP_DOWN = 8
+};
+
+static void
+bna_llport_cb_port_enabled(struct bna_llport *llport)
+{
+ llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
+
+ if (llport_can_be_up(llport))
+ bfa_fsm_send_event(llport, LLPORT_E_UP);
+}
+
+static void
+bna_llport_cb_port_disabled(struct bna_llport *llport)
+{
+ int llport_up = llport_is_up(llport);
+
+ llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
+
+ if (llport_up)
+ bfa_fsm_send_event(llport, LLPORT_E_DOWN);
+}
+
/**
* MBOX
*/
static int
bna_is_aen(u8 msg_id)
{
- return msg_id == BFI_LL_I2H_LINK_DOWN_AEN ||
- msg_id == BFI_LL_I2H_LINK_UP_AEN;
+ switch (msg_id) {
+ case BFI_LL_I2H_LINK_DOWN_AEN:
+ case BFI_LL_I2H_LINK_UP_AEN:
+ case BFI_LL_I2H_PORT_ENABLE_AEN:
+ case BFI_LL_I2H_PORT_DISABLE_AEN:
+ return 1;
+
+ default:
+ return 0;
+ }
}
static void
@@ -81,6 +137,12 @@ bna_mbox_aen_callback(struct bna *bna, struct bfi_mbmsg *msg)
case BFI_LL_I2H_LINK_DOWN_AEN:
bna_port_cb_link_down(&bna->port, aen->reason);
break;
+ case BFI_LL_I2H_PORT_ENABLE_AEN:
+ bna_llport_cb_port_enabled(&bna->port.llport);
+ break;
+ case BFI_LL_I2H_PORT_DISABLE_AEN:
+ bna_llport_cb_port_disabled(&bna->port.llport);
+ break;
default:
break;
}
@@ -251,16 +313,6 @@ static void bna_llport_start(struct bna_llport *llport);
static void bna_llport_stop(struct bna_llport *llport);
static void bna_llport_fail(struct bna_llport *llport);
-enum bna_llport_event {
- LLPORT_E_START = 1,
- LLPORT_E_STOP = 2,
- LLPORT_E_FAIL = 3,
- LLPORT_E_UP = 4,
- LLPORT_E_DOWN = 5,
- LLPORT_E_FWRESP_UP = 6,
- LLPORT_E_FWRESP_DOWN = 7
-};
-
enum bna_llport_state {
BNA_LLPORT_STOPPED = 1,
BNA_LLPORT_DOWN = 2,
@@ -320,7 +372,7 @@ bna_llport_sm_stopped(struct bna_llport *llport,
/* No-op */
break;
- case LLPORT_E_FWRESP_UP:
+ case LLPORT_E_FWRESP_UP_OK:
case LLPORT_E_FWRESP_DOWN:
/**
* These events are received due to flushing of mbox when
@@ -366,6 +418,7 @@ bna_llport_sm_down(struct bna_llport *llport,
static void
bna_llport_sm_up_resp_wait_entry(struct bna_llport *llport)
{
+ BUG_ON(!llport_can_be_up(llport));
/**
* NOTE: Do not call bna_fw_llport_up() here. That will over step
* mbox due to down_resp_wait -> up_resp_wait transition on event
@@ -390,10 +443,14 @@ bna_llport_sm_up_resp_wait(struct bna_llport *llport,
bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
break;
- case LLPORT_E_FWRESP_UP:
+ case LLPORT_E_FWRESP_UP_OK:
bfa_fsm_set_state(llport, bna_llport_sm_up);
break;
+ case LLPORT_E_FWRESP_UP_FAIL:
+ bfa_fsm_set_state(llport, bna_llport_sm_down);
+ break;
+
case LLPORT_E_FWRESP_DOWN:
/* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */
bna_fw_llport_up(llport);
@@ -431,11 +488,12 @@ bna_llport_sm_down_resp_wait(struct bna_llport *llport,
bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
break;
- case LLPORT_E_FWRESP_UP:
+ case LLPORT_E_FWRESP_UP_OK:
/* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */
bna_fw_llport_down(llport);
break;
+ case LLPORT_E_FWRESP_UP_FAIL:
case LLPORT_E_FWRESP_DOWN:
bfa_fsm_set_state(llport, bna_llport_sm_down);
break;
@@ -496,11 +554,12 @@ bna_llport_sm_last_resp_wait(struct bna_llport *llport,
/* No-op */
break;
- case LLPORT_E_FWRESP_UP:
+ case LLPORT_E_FWRESP_UP_OK:
/* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */
bna_fw_llport_down(llport);
break;
+ case LLPORT_E_FWRESP_UP_FAIL:
case LLPORT_E_FWRESP_DOWN:
bfa_fsm_set_state(llport, bna_llport_sm_stopped);
break;
@@ -541,7 +600,14 @@ bna_fw_cb_llport_up(void *arg, int status)
struct bna_llport *llport = (struct bna_llport *)arg;
bfa_q_qe_init(&llport->mbox_qe.qe);
- bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP);
+ if (status == BFI_LL_CMD_FAIL) {
+ if (llport->type == BNA_PORT_T_REGULAR)
+ llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
+ else
+ llport->flags &= ~BNA_LLPORT_F_ADMIN_UP;
+ bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_FAIL);
+ } else
+ bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_OK);
}
static void
@@ -588,13 +654,14 @@ bna_port_cb_llport_stopped(struct bna_port *port,
static void
bna_llport_init(struct bna_llport *llport, struct bna *bna)
{
- llport->flags |= BNA_LLPORT_F_ENABLED;
+ llport->flags |= BNA_LLPORT_F_ADMIN_UP;
+ llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
llport->type = BNA_PORT_T_REGULAR;
llport->bna = bna;
llport->link_status = BNA_LINK_DOWN;
- llport->admin_up_count = 0;
+ llport->rx_started_count = 0;
llport->stop_cbfn = NULL;
@@ -606,7 +673,8 @@ bna_llport_init(struct bna_llport *llport, struct bna *bna)
static void
bna_llport_uninit(struct bna_llport *llport)
{
- llport->flags &= ~BNA_LLPORT_F_ENABLED;
+ llport->flags &= ~BNA_LLPORT_F_ADMIN_UP;
+ llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
llport->bna = NULL;
}
@@ -628,6 +696,8 @@ bna_llport_stop(struct bna_llport *llport)
static void
bna_llport_fail(struct bna_llport *llport)
{
+ /* Reset the physical port status to enabled */
+ llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
bfa_fsm_send_event(llport, LLPORT_E_FAIL);
}
@@ -638,25 +708,31 @@ bna_llport_state_get(struct bna_llport *llport)
}
void
-bna_llport_admin_up(struct bna_llport *llport)
+bna_llport_rx_started(struct bna_llport *llport)
{
- llport->admin_up_count++;
+ llport->rx_started_count++;
- if (llport->admin_up_count == 1) {
- llport->flags |= BNA_LLPORT_F_RX_ENABLED;
- if (llport->flags & BNA_LLPORT_F_ENABLED)
+ if (llport->rx_started_count == 1) {
+
+ llport->flags |= BNA_LLPORT_F_RX_STARTED;
+
+ if (llport_can_be_up(llport))
bfa_fsm_send_event(llport, LLPORT_E_UP);
}
}
void
-bna_llport_admin_down(struct bna_llport *llport)
+bna_llport_rx_stopped(struct bna_llport *llport)
{
- llport->admin_up_count--;
+ int llport_up = llport_is_up(llport);
+
+ llport->rx_started_count--;
- if (llport->admin_up_count == 0) {
- llport->flags &= ~BNA_LLPORT_F_RX_ENABLED;
- if (llport->flags & BNA_LLPORT_F_ENABLED)
+ if (llport->rx_started_count == 0) {
+
+ llport->flags &= ~BNA_LLPORT_F_RX_STARTED;
+
+ if (llport_up)
bfa_fsm_send_event(llport, LLPORT_E_DOWN);
}
}
@@ -2056,37 +2132,6 @@ rxf_fltr_mbox_cmd(struct bna_rxf *rxf, u8 cmd, enum bna_status status)
bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
}
-static void
-__rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status)
-{
- struct bna_rx_fndb_ram *rx_fndb_ram;
- u32 ctrl_flags;
- int i;
-
- rx_fndb_ram = (struct bna_rx_fndb_ram *)
- BNA_GET_MEM_BASE_ADDR(rxf->rx->bna->pcidev.pci_bar_kva,
- RX_FNDB_RAM_BASE_OFFSET);
-
- for (i = 0; i < BFI_MAX_RXF; i++) {
- if (status == BNA_STATUS_T_ENABLED) {
- if (i == rxf->rxf_id)
- continue;
-
- ctrl_flags =
- readl(&rx_fndb_ram[i].control_flags);
- ctrl_flags |= BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
- writel(ctrl_flags,
- &rx_fndb_ram[i].control_flags);
- } else {
- ctrl_flags =
- readl(&rx_fndb_ram[i].control_flags);
- ctrl_flags &= ~BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
- writel(ctrl_flags,
- &rx_fndb_ram[i].control_flags);
- }
- }
-}
-
int
rxf_process_packet_filter_ucast(struct bna_rxf *rxf)
{
@@ -2153,46 +2198,6 @@ rxf_process_packet_filter_promisc(struct bna_rxf *rxf)
}
int
-rxf_process_packet_filter_default(struct bna_rxf *rxf)
-{
- struct bna *bna = rxf->rx->bna;
-
- /* Enable/disable default mode */
- if (is_default_enable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask)) {
- /* move default configuration from pending -> active */
- default_inactive(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- rxf->rxmode_active |= BNA_RXMODE_DEFAULT;
-
- /* Disable VLAN filter to allow all VLANs */
- __rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED);
- /* Redirect all other RxF vlan filtering to this one */
- __rxf_default_function_config(rxf, BNA_STATUS_T_ENABLED);
- rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
- BNA_STATUS_T_ENABLED);
- return 1;
- } else if (is_default_disable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask)) {
- /* move default configuration from pending -> active */
- default_inactive(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
- bna->rxf_default_id = BFI_MAX_RXF;
-
- /* Revert VLAN filter */
- __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
- /* Stop RxF vlan filter table redirection */
- __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
- rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
- BNA_STATUS_T_DISABLED);
- return 1;
- }
-
- return 0;
-}
-
-int
rxf_process_packet_filter_allmulti(struct bna_rxf *rxf)
{
/* Enable/disable allmulti mode */
@@ -2289,48 +2294,6 @@ rxf_clear_packet_filter_promisc(struct bna_rxf *rxf)
}
int
-rxf_clear_packet_filter_default(struct bna_rxf *rxf)
-{
- struct bna *bna = rxf->rx->bna;
-
- /* 8. Execute pending default mode disable command */
- if (is_default_disable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask)) {
- /* move default configuration from pending -> active */
- default_inactive(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
- bna->rxf_default_id = BFI_MAX_RXF;
-
- /* Revert VLAN filter */
- __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
- /* Stop RxF vlan filter table redirection */
- __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
- rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
- BNA_STATUS_T_DISABLED);
- return 1;
- }
-
- /* 9. Clear active default mode; move it to pending enable */
- if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
- /* move default configuration from active -> pending */
- default_enable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
-
- /* Revert VLAN filter */
- __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
- /* Stop RxF vlan filter table redirection */
- __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
- rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
- BNA_STATUS_T_DISABLED);
- return 1;
- }
-
- return 0;
-}
-
-int
rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf)
{
/* 10. Execute pending allmulti mode disable command */
@@ -2405,28 +2368,6 @@ rxf_reset_packet_filter_promisc(struct bna_rxf *rxf)
}
void
-rxf_reset_packet_filter_default(struct bna_rxf *rxf)
-{
- struct bna *bna = rxf->rx->bna;
-
- /* 8. Clear pending default mode disable */
- if (is_default_disable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask)) {
- default_inactive(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
- bna->rxf_default_id = BFI_MAX_RXF;
- }
-
- /* 9. Move default mode config from active -> pending */
- if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
- default_enable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
- }
-}
-
-void
rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf)
{
/* 10. Clear pending allmulti mode disable */
@@ -2523,76 +2464,6 @@ rxf_promisc_disable(struct bna_rxf *rxf)
* 1 = need h/w change
*/
static int
-rxf_default_enable(struct bna_rxf *rxf)
-{
- struct bna *bna = rxf->rx->bna;
- int ret = 0;
-
- /* There can not be any pending disable command */
-
- /* Do nothing if pending enable or already enabled */
- if (is_default_enable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask) ||
- (rxf->rxmode_active & BNA_RXMODE_DEFAULT)) {
- /* Schedule enable */
- } else {
- /* Default mode should not be active in the system */
- default_enable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- bna->rxf_default_id = rxf->rxf_id;
- ret = 1;
- }
-
- return ret;
-}
-
-/**
- * Should only be called by bna_rxf_mode_set.
- * Helps deciding if h/w configuration is needed or not.
- * Returns:
- * 0 = no h/w change
- * 1 = need h/w change
- */
-static int
-rxf_default_disable(struct bna_rxf *rxf)
-{
- struct bna *bna = rxf->rx->bna;
- int ret = 0;
-
- /* There can not be any pending disable */
-
- /* Turn off pending enable command , if any */
- if (is_default_enable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask)) {
- /* Promisc mode should not be active */
- /* system default state should be pending */
- default_inactive(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- /* Remove the default state from the system */
- bna->rxf_default_id = BFI_MAX_RXF;
-
- /* Schedule disable */
- } else if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
- /* Default mode should be active in the system */
- default_disable(rxf->rxmode_pending,
- rxf->rxmode_pending_bitmask);
- ret = 1;
-
- /* Do nothing if already disabled */
- } else {
- }
-
- return ret;
-}
-
-/**
- * Should only be called by bna_rxf_mode_set.
- * Helps deciding if h/w configuration is needed or not.
- * Returns:
- * 0 = no h/w change
- * 1 = need h/w change
- */
-static int
rxf_allmulti_enable(struct bna_rxf *rxf)
{
int ret = 0;
@@ -2654,38 +2525,13 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
struct bna_rxf *rxf = &rx->rxf;
int need_hw_config = 0;
- /* Error checks */
+ /* Process the commands */
if (is_promisc_enable(new_mode, bitmask)) {
/* If promisc mode is already enabled elsewhere in the system */
if ((rx->bna->rxf_promisc_id != BFI_MAX_RXF) &&
(rx->bna->rxf_promisc_id != rxf->rxf_id))
goto err_return;
-
- /* If default mode is already enabled in the system */
- if (rx->bna->rxf_default_id != BFI_MAX_RXF)
- goto err_return;
-
- /* Trying to enable promiscuous and default mode together */
- if (is_default_enable(new_mode, bitmask))
- goto err_return;
- }
-
- if (is_default_enable(new_mode, bitmask)) {
- /* If default mode is already enabled elsewhere in the system */
- if ((rx->bna->rxf_default_id != BFI_MAX_RXF) &&
- (rx->bna->rxf_default_id != rxf->rxf_id)) {
- goto err_return;
- }
-
- /* If promiscuous mode is already enabled in the system */
- if (rx->bna->rxf_promisc_id != BFI_MAX_RXF)
- goto err_return;
- }
-
- /* Process the commands */
-
- if (is_promisc_enable(new_mode, bitmask)) {
if (rxf_promisc_enable(rxf))
need_hw_config = 1;
} else if (is_promisc_disable(new_mode, bitmask)) {
@@ -2693,14 +2539,6 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
need_hw_config = 1;
}
- if (is_default_enable(new_mode, bitmask)) {
- if (rxf_default_enable(rxf))
- need_hw_config = 1;
- } else if (is_default_disable(new_mode, bitmask)) {
- if (rxf_default_disable(rxf))
- need_hw_config = 1;
- }
-
if (is_allmulti_enable(new_mode, bitmask)) {
if (rxf_allmulti_enable(rxf))
need_hw_config = 1;
@@ -3126,7 +2964,6 @@ bna_init(struct bna *bna, struct bnad *bnad, struct bfa_pcidev *pcidev,
bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
- bna->rxf_default_id = BFI_MAX_RXF;
bna->rxf_promisc_id = BFI_MAX_RXF;
/* Mbox q element for posting stat request to f/w */
diff --git a/drivers/net/bna/bna_txrx.c b/drivers/net/bna/bna_txrx.c
index ad93fdb0f427..58c7664040dc 100644
--- a/drivers/net/bna/bna_txrx.c
+++ b/drivers/net/bna/bna_txrx.c
@@ -1226,8 +1226,7 @@ rxf_process_packet_filter_vlan(struct bna_rxf *rxf)
/* Apply the VLAN filter */
if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) {
rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING;
- if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC) &&
- !(rxf->rxmode_active & BNA_RXMODE_DEFAULT))
+ if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))
__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
}
@@ -1276,9 +1275,6 @@ rxf_process_packet_filter(struct bna_rxf *rxf)
if (rxf_process_packet_filter_promisc(rxf))
return 1;
- if (rxf_process_packet_filter_default(rxf))
- return 1;
-
if (rxf_process_packet_filter_allmulti(rxf))
return 1;
@@ -1340,9 +1336,6 @@ rxf_clear_packet_filter(struct bna_rxf *rxf)
if (rxf_clear_packet_filter_promisc(rxf))
return 1;
- if (rxf_clear_packet_filter_default(rxf))
- return 1;
-
if (rxf_clear_packet_filter_allmulti(rxf))
return 1;
@@ -1389,8 +1382,6 @@ rxf_reset_packet_filter(struct bna_rxf *rxf)
rxf_reset_packet_filter_promisc(rxf);
- rxf_reset_packet_filter_default(rxf);
-
rxf_reset_packet_filter_allmulti(rxf);
}
@@ -1441,12 +1432,16 @@ bna_rxf_init(struct bna_rxf *rxf,
memset(rxf->vlan_filter_table, 0,
(sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32)));
+ /* Set up VLAN 0 for pure priority tagged packets */
+ rxf->vlan_filter_table[0] |= 1;
+
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
}
static void
bna_rxf_uninit(struct bna_rxf *rxf)
{
+ struct bna *bna = rxf->rx->bna;
struct bna_mac *mac;
bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment);
@@ -1473,6 +1468,27 @@ bna_rxf_uninit(struct bna_rxf *rxf)
bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
}
+ /* Turn off pending promisc mode */
+ if (is_promisc_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ /* system promisc state should be pending */
+ BUG_ON(!(bna->rxf_promisc_id == rxf->rxf_id));
+ promisc_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ bna->rxf_promisc_id = BFI_MAX_RXF;
+ }
+ /* Promisc mode should not be active */
+ BUG_ON(rxf->rxmode_active & BNA_RXMODE_PROMISC);
+
+ /* Turn off pending all-multi mode */
+ if (is_allmulti_enable(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask)) {
+ allmulti_inactive(rxf->rxmode_pending,
+ rxf->rxmode_pending_bitmask);
+ }
+ /* Allmulti mode should not be active */
+ BUG_ON(rxf->rxmode_active & BNA_RXMODE_ALLMULTI);
+
rxf->rx = NULL;
}
@@ -1947,7 +1963,7 @@ bna_rx_sm_started_entry(struct bna_rx *rx)
bna_ib_ack(&rxp->cq.ib->door_bell, 0);
}
- bna_llport_admin_up(&rx->bna->port.llport);
+ bna_llport_rx_started(&rx->bna->port.llport);
}
void
@@ -1955,13 +1971,13 @@ bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
{
switch (event) {
case RX_E_FAIL:
- bna_llport_admin_down(&rx->bna->port.llport);
+ bna_llport_rx_stopped(&rx->bna->port.llport);
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
rx_ib_fail(rx);
bna_rxf_fail(&rx->rxf);
break;
case RX_E_STOP:
- bna_llport_admin_down(&rx->bna->port.llport);
+ bna_llport_rx_stopped(&rx->bna->port.llport);
bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
break;
default:
@@ -3373,7 +3389,7 @@ __bna_txq_start(struct bna_tx *tx, struct bna_txq *txq)
txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE;
txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) |
- (txq->priority & 0x3));
+ (txq->priority & 0x7));
txq_cfg.wvc_n_cquota_n_rquota =
((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) |
(BFI_TX_MAX_WRR_QUOTA & 0xfff));
diff --git a/drivers/net/bna/bna_types.h b/drivers/net/bna/bna_types.h
index 6877310f6ef4..b9c134f7ad31 100644
--- a/drivers/net/bna/bna_types.h
+++ b/drivers/net/bna/bna_types.h
@@ -165,8 +165,7 @@ enum bna_rxp_type {
enum bna_rxmode {
BNA_RXMODE_PROMISC = 1,
- BNA_RXMODE_DEFAULT = 2,
- BNA_RXMODE_ALLMULTI = 4
+ BNA_RXMODE_ALLMULTI = 2
};
enum bna_rx_event {
@@ -249,8 +248,9 @@ enum bna_link_status {
};
enum bna_llport_flags {
- BNA_LLPORT_F_ENABLED = 1,
- BNA_LLPORT_F_RX_ENABLED = 2
+ BNA_LLPORT_F_ADMIN_UP = 1,
+ BNA_LLPORT_F_PORT_ENABLED = 2,
+ BNA_LLPORT_F_RX_STARTED = 4
};
enum bna_port_flags {
@@ -405,7 +405,7 @@ struct bna_llport {
enum bna_link_status link_status;
- int admin_up_count;
+ int rx_started_count;
void (*stop_cbfn)(struct bna_port *, enum bna_cb_status);
@@ -1117,7 +1117,6 @@ struct bna {
struct bna_rit_mod rit_mod;
- int rxf_default_id;
int rxf_promisc_id;
struct bna_mbox_qe mbox_qe;
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index 7e839b9cec22..9f356d5d0f33 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -70,6 +70,8 @@ do { \
(sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
} while (0)
+#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
+
/*
* Reinitialize completions in CQ, once Rx is taken down
*/
@@ -107,7 +109,7 @@ static void
bnad_free_all_txbufs(struct bnad *bnad,
struct bna_tcb *tcb)
{
- u16 unmap_cons;
+ u32 unmap_cons;
struct bnad_unmap_q *unmap_q = tcb->unmap_q;
struct bnad_skb_unmap *unmap_array;
struct sk_buff *skb = NULL;
@@ -124,22 +126,25 @@ bnad_free_all_txbufs(struct bnad *bnad,
}
unmap_array[unmap_cons].skb = NULL;
- pci_unmap_single(bnad->pcidev,
- pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_unmap_single(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr), skb_headlen(skb),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
+
+ dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+ if (++unmap_cons >= unmap_q->q_depth)
+ break;
- pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
- unmap_cons++;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- pci_unmap_page(bnad->pcidev,
- pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_unmap_page(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr),
skb_shinfo(skb)->frags[i].size,
- PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
0);
- unmap_cons++;
+ if (++unmap_cons >= unmap_q->q_depth)
+ break;
}
dev_kfree_skb_any(skb);
}
@@ -167,11 +172,11 @@ bnad_free_txbufs(struct bnad *bnad,
/*
* Just return if TX is stopped. This check is useful
* when bnad_free_txbufs() runs out of a tasklet scheduled
- * before bnad_cb_tx_cleanup() cleared BNAD_RF_TX_STARTED bit
+ * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
* but this routine runs actually after the cleanup has been
* executed.
*/
- if (!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
+ if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
return 0;
updated_hw_cons = *(tcb->hw_consumer_index);
@@ -194,23 +199,23 @@ bnad_free_txbufs(struct bnad *bnad,
sent_bytes += skb->len;
wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
- pci_unmap_single(bnad->pcidev,
- pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_unmap_single(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr), skb_headlen(skb),
- PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
prefetch(&unmap_array[unmap_cons + 1]);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
prefetch(&unmap_array[unmap_cons + 1]);
- pci_unmap_page(bnad->pcidev,
- pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_unmap_page(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr),
skb_shinfo(skb)->frags[i].size,
- PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
0);
BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
}
@@ -239,7 +244,7 @@ bnad_tx_free_tasklet(unsigned long bnad_ptr)
{
struct bnad *bnad = (struct bnad *)bnad_ptr;
struct bna_tcb *tcb;
- u32 acked;
+ u32 acked = 0;
int i, j;
for (i = 0; i < bnad->num_tx; i++) {
@@ -252,10 +257,26 @@ bnad_tx_free_tasklet(unsigned long bnad_ptr)
(!test_and_set_bit(BNAD_TXQ_FREE_SENT,
&tcb->flags))) {
acked = bnad_free_txbufs(bnad, tcb);
- bna_ib_ack(tcb->i_dbell, acked);
+ if (likely(test_bit(BNAD_TXQ_TX_STARTED,
+ &tcb->flags)))
+ bna_ib_ack(tcb->i_dbell, acked);
smp_mb__before_clear_bit();
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
}
+ if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
+ &tcb->flags)))
+ continue;
+ if (netif_queue_stopped(bnad->netdev)) {
+ if (acked && netif_carrier_ok(bnad->netdev) &&
+ BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
+ BNAD_NETIF_WAKE_THRESHOLD) {
+ netif_wake_queue(bnad->netdev);
+ /* TODO */
+ /* Counters for individual TxQs? */
+ BNAD_UPDATE_CTR(bnad,
+ netif_queue_wakeup);
+ }
+ }
}
}
}
@@ -264,7 +285,7 @@ static u32
bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
{
struct net_device *netdev = bnad->netdev;
- u32 sent;
+ u32 sent = 0;
if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
return 0;
@@ -275,12 +296,15 @@ bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
netif_carrier_ok(netdev) &&
BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
BNAD_NETIF_WAKE_THRESHOLD) {
- netif_wake_queue(netdev);
- BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+ if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
+ netif_wake_queue(netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+ }
}
+ }
+
+ if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
bna_ib_ack(tcb->i_dbell, sent);
- } else
- bna_ib_ack(tcb->i_dbell, 0);
smp_mb__before_clear_bit();
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
@@ -313,25 +337,27 @@ bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
}
static void
-bnad_free_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
+bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
{
struct bnad_unmap_q *unmap_q;
+ struct bnad_skb_unmap *unmap_array;
struct sk_buff *skb;
+ int unmap_cons;
unmap_q = rcb->unmap_q;
- while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
- skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
- BUG_ON(!(skb));
- unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
- pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
- unmap_array[unmap_q->consumer_index],
- dma_addr), rcb->rxq->buffer_size +
- NET_IP_ALIGN, PCI_DMA_FROMDEVICE);
+ unmap_array = unmap_q->unmap_array;
+ for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
+ skb = unmap_array[unmap_cons].skb;
+ if (!skb)
+ continue;
+ unmap_array[unmap_cons].skb = NULL;
+ dma_unmap_single(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
+ dma_addr),
+ rcb->rxq->buffer_size,
+ DMA_FROM_DEVICE);
dev_kfree_skb(skb);
- BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
- BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
}
-
bnad_reset_rcb(bnad, rcb);
}
@@ -368,9 +394,10 @@ bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
skb->dev = bnad->netdev;
skb_reserve(skb, NET_IP_ALIGN);
unmap_array[unmap_prod].skb = skb;
- dma_addr = pci_map_single(bnad->pcidev, skb->data,
- rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE);
- pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
+ dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
+ rcb->rxq->buffer_size,
+ DMA_FROM_DEVICE);
+ dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
dma_addr);
BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -385,41 +412,9 @@ finishing:
unmap_q->producer_index = unmap_prod;
rcb->producer_index = unmap_prod;
smp_mb();
- bna_rxq_prod_indx_doorbell(rcb);
- }
-}
-
-/*
- * Locking is required in the enable path
- * because it is called from a napi poll
- * context, where the bna_lock is not held
- * unlike the IRQ context.
- */
-static void
-bnad_enable_txrx_irqs(struct bnad *bnad)
-{
- struct bna_tcb *tcb;
- struct bna_ccb *ccb;
- int i, j;
- unsigned long flags;
-
- spin_lock_irqsave(&bnad->bna_lock, flags);
- for (i = 0; i < bnad->num_tx; i++) {
- for (j = 0; j < bnad->num_txq_per_tx; j++) {
- tcb = bnad->tx_info[i].tcb[j];
- bna_ib_coalescing_timer_set(tcb->i_dbell,
- tcb->txq->ib->ib_config.coalescing_timeo);
- bna_ib_ack(tcb->i_dbell, 0);
- }
- }
-
- for (i = 0; i < bnad->num_rx; i++) {
- for (j = 0; j < bnad->num_rxp_per_rx; j++) {
- ccb = bnad->rx_info[i].rx_ctrl[j].ccb;
- bnad_enable_rx_irq_unsafe(ccb);
- }
+ if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
+ bna_rxq_prod_indx_doorbell(rcb);
}
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
static inline void
@@ -443,11 +438,15 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
struct bna_rcb *rcb = NULL;
unsigned int wi_range, packets = 0, wis = 0;
struct bnad_unmap_q *unmap_q;
+ struct bnad_skb_unmap *unmap_array;
struct sk_buff *skb;
- u32 flags;
+ u32 flags, unmap_cons;
u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
+ if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
+ return 0;
+
prefetch(bnad->netdev);
BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
wi_range);
@@ -462,17 +461,17 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
rcb = ccb->rcb[1];
unmap_q = rcb->unmap_q;
+ unmap_array = unmap_q->unmap_array;
+ unmap_cons = unmap_q->consumer_index;
- skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+ skb = unmap_array[unmap_cons].skb;
BUG_ON(!(skb));
- unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
- pci_unmap_single(bnad->pcidev,
- pci_unmap_addr(&unmap_q->
- unmap_array[unmap_q->
- consumer_index],
+ unmap_array[unmap_cons].skb = NULL;
+ dma_unmap_single(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr),
- rcb->rxq->buffer_size,
- PCI_DMA_FROMDEVICE);
+ rcb->rxq->buffer_size,
+ DMA_FROM_DEVICE);
BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
/* Should be more efficient ? Performance ? */
@@ -544,12 +543,15 @@ next:
BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
if (likely(ccb)) {
- bna_ib_ack(ccb->i_dbell, packets);
+ if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
+ bna_ib_ack(ccb->i_dbell, packets);
bnad_refill_rxq(bnad, ccb->rcb[0]);
if (ccb->rcb[1])
bnad_refill_rxq(bnad, ccb->rcb[1]);
- } else
- bna_ib_ack(ccb->i_dbell, 0);
+ } else {
+ if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
+ bna_ib_ack(ccb->i_dbell, 0);
+ }
return packets;
}
@@ -557,6 +559,9 @@ next:
static void
bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
{
+ if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
+ return;
+
bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
bna_ib_ack(ccb->i_dbell, 0);
}
@@ -566,7 +571,8 @@ bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
{
unsigned long flags;
- spin_lock_irqsave(&bnad->bna_lock, flags); /* Because of polling context */
+ /* Because of polling context */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
bnad_enable_rx_irq_unsafe(ccb);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -575,9 +581,11 @@ static void
bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
{
struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
- if (likely(napi_schedule_prep((&rx_ctrl->napi)))) {
+ struct napi_struct *napi = &rx_ctrl->napi;
+
+ if (likely(napi_schedule_prep(napi))) {
bnad_disable_rx_irq(bnad, ccb);
- __napi_schedule((&rx_ctrl->napi));
+ __napi_schedule(napi);
}
BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
}
@@ -602,12 +610,11 @@ bnad_msix_mbox_handler(int irq, void *data)
{
u32 intr_status;
unsigned long flags;
- struct net_device *netdev = data;
- struct bnad *bnad;
+ struct bnad *bnad = (struct bnad *)data;
- bnad = netdev_priv(netdev);
+ if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
+ return IRQ_HANDLED;
- /* BNA_ISR_GET(bnad); Inc Ref count */
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_intr_status_get(&bnad->bna, intr_status);
@@ -617,7 +624,6 @@ bnad_msix_mbox_handler(int irq, void *data)
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- /* BNAD_ISR_PUT(bnad); Dec Ref count */
return IRQ_HANDLED;
}
@@ -627,8 +633,7 @@ bnad_isr(int irq, void *data)
int i, j;
u32 intr_status;
unsigned long flags;
- struct net_device *netdev = data;
- struct bnad *bnad = netdev_priv(netdev);
+ struct bnad *bnad = (struct bnad *)data;
struct bnad_rx_info *rx_info;
struct bnad_rx_ctrl *rx_ctrl;
@@ -642,16 +647,21 @@ bnad_isr(int irq, void *data)
spin_lock_irqsave(&bnad->bna_lock, flags);
- if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
+ if (BNA_IS_MBOX_ERR_INTR(intr_status))
bna_mbox_handler(&bnad->bna, intr_status);
- if (!BNA_IS_INTX_DATA_INTR(intr_status)) {
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
- goto done;
- }
- }
+
spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (!BNA_IS_INTX_DATA_INTR(intr_status))
+ return IRQ_HANDLED;
+
/* Process data interrupts */
+ /* Tx processing */
+ for (i = 0; i < bnad->num_tx; i++) {
+ for (j = 0; j < bnad->num_txq_per_tx; j++)
+ bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
+ }
+ /* Rx processing */
for (i = 0; i < bnad->num_rx; i++) {
rx_info = &bnad->rx_info[i];
if (!rx_info->rx)
@@ -663,7 +673,6 @@ bnad_isr(int irq, void *data)
rx_ctrl->ccb);
}
}
-done:
return IRQ_HANDLED;
}
@@ -674,11 +683,7 @@ done:
static void
bnad_enable_mbox_irq(struct bnad *bnad)
{
- int irq = BNAD_GET_MBOX_IRQ(bnad);
-
- if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
- if (bnad->cfg_flags & BNAD_CF_MSIX)
- enable_irq(irq);
+ clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
}
@@ -690,14 +695,19 @@ bnad_enable_mbox_irq(struct bnad *bnad)
static void
bnad_disable_mbox_irq(struct bnad *bnad)
{
- int irq = BNAD_GET_MBOX_IRQ(bnad);
+ set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
+ BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
+}
- if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
- if (bnad->cfg_flags & BNAD_CF_MSIX)
- disable_irq_nosync(irq);
+static void
+bnad_set_netdev_perm_addr(struct bnad *bnad)
+{
+ struct net_device *netdev = bnad->netdev;
- BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
+ memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
+ if (is_zero_ether_addr(netdev->dev_addr))
+ memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
}
/* Control Path Handlers */
@@ -755,11 +765,14 @@ bnad_cb_port_link_status(struct bnad *bnad,
if (link_up) {
if (!netif_carrier_ok(bnad->netdev)) {
+ struct bna_tcb *tcb = bnad->tx_info[0].tcb[0];
+ if (!tcb)
+ return;
pr_warn("bna: %s link up\n",
bnad->netdev->name);
netif_carrier_on(bnad->netdev);
BNAD_UPDATE_CTR(bnad, link_toggle);
- if (test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) {
+ if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
/* Force an immediate Transmit Schedule */
pr_info("bna: %s TX_STARTED\n",
bnad->netdev->name);
@@ -807,6 +820,18 @@ bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
{
struct bnad_tx_info *tx_info =
(struct bnad_tx_info *)tcb->txq->tx->priv;
+ struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+
+ while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
+ cpu_relax();
+
+ bnad_free_all_txbufs(bnad, tcb);
+
+ unmap_q->producer_index = 0;
+ unmap_q->consumer_index = 0;
+
+ smp_mb__before_clear_bit();
+ clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
tx_info->tcb[tcb->id] = NULL;
}
@@ -822,6 +847,12 @@ bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
}
static void
+bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
+{
+ bnad_free_all_rxbufs(bnad, rcb);
+}
+
+static void
bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
{
struct bnad_rx_info *rx_info =
@@ -849,7 +880,7 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
if (tx_info != &bnad->tx_info[0])
return;
- clear_bit(BNAD_RF_TX_STARTED, &bnad->run_flags);
+ clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
netif_stop_queue(bnad->netdev);
pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
}
@@ -857,30 +888,15 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
static void
bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
{
- if (test_and_set_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
- return;
-
- if (netif_carrier_ok(bnad->netdev)) {
- pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
- netif_wake_queue(bnad->netdev);
- BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
- }
-}
-
-static void
-bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
-{
- struct bnad_unmap_q *unmap_q;
+ struct bnad_unmap_q *unmap_q = tcb->unmap_q;
- if (!tcb || (!tcb->unmap_q))
+ if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
return;
- unmap_q = tcb->unmap_q;
- if (!unmap_q->unmap_array)
- return;
+ clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags);
- if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
- return;
+ while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
+ cpu_relax();
bnad_free_all_txbufs(bnad, tcb);
@@ -889,21 +905,45 @@ bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
smp_mb__before_clear_bit();
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+
+ /*
+ * Workaround for first device enable failure & we
+ * get a 0 MAC address. We try to get the MAC address
+ * again here.
+ */
+ if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
+ bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr);
+ bnad_set_netdev_perm_addr(bnad);
+ }
+
+ set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
+
+ if (netif_carrier_ok(bnad->netdev)) {
+ pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
+ netif_wake_queue(bnad->netdev);
+ BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+ }
+}
+
+static void
+bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
+{
+ /* Delay only once for the whole Tx Path Shutdown */
+ if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags))
+ mdelay(BNAD_TXRX_SYNC_MDELAY);
}
static void
bnad_cb_rx_cleanup(struct bnad *bnad,
struct bna_ccb *ccb)
{
- bnad_cq_cmpl_init(bnad, ccb);
-
- bnad_free_rxbufs(bnad, ccb->rcb[0]);
clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
- if (ccb->rcb[1]) {
- bnad_free_rxbufs(bnad, ccb->rcb[1]);
+ if (ccb->rcb[1])
clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
- }
+
+ if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags))
+ mdelay(BNAD_TXRX_SYNC_MDELAY);
}
static void
@@ -911,6 +951,13 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
{
struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+ clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags);
+
+ if (rcb == rcb->cq->ccb->rcb[0])
+ bnad_cq_cmpl_init(bnad, rcb->cq->ccb);
+
+ bnad_free_all_rxbufs(bnad, rcb);
+
set_bit(BNAD_RXQ_STARTED, &rcb->flags);
/* Now allocate & post buffers for this RCB */
@@ -973,9 +1020,9 @@ bnad_mem_free(struct bnad *bnad,
if (mem_info->mem_type == BNA_MEM_T_DMA) {
BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
dma_pa);
- pci_free_consistent(bnad->pcidev,
- mem_info->mdl[i].len,
- mem_info->mdl[i].kva, dma_pa);
+ dma_free_coherent(&bnad->pcidev->dev,
+ mem_info->mdl[i].len,
+ mem_info->mdl[i].kva, dma_pa);
} else
kfree(mem_info->mdl[i].kva);
}
@@ -1005,8 +1052,9 @@ bnad_mem_alloc(struct bnad *bnad,
for (i = 0; i < mem_info->num; i++) {
mem_info->mdl[i].len = mem_info->len;
mem_info->mdl[i].kva =
- pci_alloc_consistent(bnad->pcidev,
- mem_info->len, &dma_pa);
+ dma_alloc_coherent(&bnad->pcidev->dev,
+ mem_info->len, &dma_pa,
+ GFP_KERNEL);
if (mem_info->mdl[i].kva == NULL)
goto err_return;
@@ -1047,7 +1095,7 @@ bnad_mbox_irq_free(struct bnad *bnad,
spin_unlock_irqrestore(&bnad->bna_lock, flags);
irq = BNAD_GET_MBOX_IRQ(bnad);
- free_irq(irq, bnad->netdev);
+ free_irq(irq, bnad);
kfree(intr_info->idl);
}
@@ -1061,7 +1109,7 @@ static int
bnad_mbox_irq_alloc(struct bnad *bnad,
struct bna_intr_info *intr_info)
{
- int err;
+ int err = 0;
unsigned long flags;
u32 irq;
irq_handler_t irq_handler;
@@ -1096,22 +1144,17 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
*/
set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
+ BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
+
err = request_irq(irq, irq_handler, flags,
- bnad->mbox_irq_name, bnad->netdev);
+ bnad->mbox_irq_name, bnad);
if (err) {
kfree(intr_info->idl);
intr_info->idl = NULL;
- return err;
}
- spin_lock_irqsave(&bnad->bna_lock, flags);
-
- if (bnad->cfg_flags & BNAD_CF_MSIX)
- disable_irq_nosync(irq);
-
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
- return 0;
+ return err;
}
static void
@@ -1388,13 +1431,24 @@ bnad_ioc_hb_check(unsigned long data)
}
static void
-bnad_ioc_sem_timeout(unsigned long data)
+bnad_iocpf_timeout(unsigned long data)
+{
+ struct bnad *bnad = (struct bnad *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+static void
+bnad_iocpf_sem_timeout(unsigned long data)
{
struct bnad *bnad = (struct bnad *)data;
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bfa_nw_ioc_sem_timeout((void *) &bnad->bna.device.ioc);
+ bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -1555,62 +1609,19 @@ poll_exit:
return rcvd;
}
-static int
-bnad_napi_poll_txrx(struct napi_struct *napi, int budget)
-{
- struct bnad_rx_ctrl *rx_ctrl =
- container_of(napi, struct bnad_rx_ctrl, napi);
- struct bna_ccb *ccb;
- struct bnad *bnad;
- int rcvd = 0;
- int i, j;
-
- ccb = rx_ctrl->ccb;
-
- bnad = ccb->bnad;
-
- if (!netif_carrier_ok(bnad->netdev))
- goto poll_exit;
-
- /* Handle Tx Completions, if any */
- for (i = 0; i < bnad->num_tx; i++) {
- for (j = 0; j < bnad->num_txq_per_tx; j++)
- bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
- }
-
- /* Handle Rx Completions */
- rcvd = bnad_poll_cq(bnad, ccb, budget);
- if (rcvd == budget)
- return rcvd;
-poll_exit:
- napi_complete((napi));
-
- BNAD_UPDATE_CTR(bnad, netif_rx_complete);
-
- bnad_enable_txrx_irqs(bnad);
- return rcvd;
-}
-
static void
bnad_napi_enable(struct bnad *bnad, u32 rx_id)
{
- int (*napi_poll) (struct napi_struct *, int);
struct bnad_rx_ctrl *rx_ctrl;
int i;
- unsigned long flags;
-
- spin_lock_irqsave(&bnad->bna_lock, flags);
- if (bnad->cfg_flags & BNAD_CF_MSIX)
- napi_poll = bnad_napi_poll_rx;
- else
- napi_poll = bnad_napi_poll_txrx;
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
/* Initialize & enable NAPI */
for (i = 0; i < bnad->num_rxp_per_rx; i++) {
rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
+
netif_napi_add(bnad->netdev, &rx_ctrl->napi,
- napi_poll, 64);
+ bnad_napi_poll_rx, 64);
+
napi_enable(&rx_ctrl->napi);
}
}
@@ -1825,6 +1836,7 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
/* Initialize the Rx event handlers */
rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
+ rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
rx_cbfn.rcb_destroy_cbfn = NULL;
rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
@@ -1968,6 +1980,27 @@ bnad_enable_default_bcast(struct bnad *bnad)
return 0;
}
+/* Called with bnad_conf_lock() held */
+static void
+bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
+{
+ u16 vlan_id;
+ unsigned long flags;
+
+ if (!bnad->vlan_grp)
+ return;
+
+ BUG_ON(!(VLAN_N_VID == (BFI_MAX_VLAN + 1)));
+
+ for (vlan_id = 0; vlan_id < VLAN_N_VID; vlan_id++) {
+ if (!vlan_group_get_device(bnad->vlan_grp, vlan_id))
+ continue;
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vlan_id);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ }
+}
+
/* Statistics utilities */
void
bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
@@ -2152,16 +2185,6 @@ bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
bnad->num_rxp_per_rx = 1;
}
-static void
-bnad_set_netdev_perm_addr(struct bnad *bnad)
-{
- struct net_device *netdev = bnad->netdev;
-
- memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
- if (is_zero_ether_addr(netdev->dev_addr))
- memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
-}
-
/* Enable / disable device */
static void
bnad_device_disable(struct bnad *bnad)
@@ -2353,6 +2376,9 @@ bnad_open(struct net_device *netdev)
/* Enable broadcast */
bnad_enable_default_bcast(bnad);
+ /* Restore VLANs, if any */
+ bnad_restore_vlans(bnad, 0);
+
/* Set the UCAST address */
spin_lock_irqsave(&bnad->bna_lock, flags);
bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
@@ -2433,21 +2459,21 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+ tx_id = 0;
+
+ tx_info = &bnad->tx_info[tx_id];
+ tcb = tx_info->tcb[tx_id];
+ unmap_q = tcb->unmap_q;
+
/*
* Takes care of the Tx that is scheduled between clearing the flag
* and the netif_stop_queue() call.
*/
- if (unlikely(!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))) {
+ if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
- tx_id = 0;
-
- tx_info = &bnad->tx_info[tx_id];
- tcb = tx_info->tcb[tx_id];
- unmap_q = tcb->unmap_q;
-
vectors = 1 + skb_shinfo(skb)->nr_frags;
if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
dev_kfree_skb(skb);
@@ -2462,7 +2488,8 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
tcb->consumer_index &&
!test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
acked = bnad_free_txbufs(bnad, tcb);
- bna_ib_ack(tcb->i_dbell, acked);
+ if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
+ bna_ib_ack(tcb->i_dbell, acked);
smp_mb__before_clear_bit();
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
} else {
@@ -2579,9 +2606,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
unmap_q->unmap_array[unmap_prod].skb = skb;
BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
txqent->vector[vect_id].length = htons(skb_headlen(skb));
- dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
- PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+ dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
dma_addr);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
@@ -2609,11 +2636,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
txqent->vector[vect_id].length = htons(size);
- dma_addr =
- pci_map_page(bnad->pcidev, frag->page,
- frag->page_offset, size,
- PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+ dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
+ frag->page_offset, size, DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
dma_addr);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -2624,6 +2649,10 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
tcb->producer_index = txq_prod;
smp_mb();
+
+ if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
+ return NETDEV_TX_OK;
+
bna_txq_prod_indx_doorbell(tcb);
if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
@@ -2997,14 +3026,14 @@ bnad_pci_init(struct bnad *bnad,
err = pci_request_regions(pdev, BNAD_NAME);
if (err)
goto disable_device;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
*using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err)
goto release_regions;
}
@@ -3032,7 +3061,7 @@ static int __devinit
bnad_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pcidev_id)
{
- bool using_dac;
+ bool using_dac = false;
int err;
struct bnad *bnad;
struct bna *bna;
@@ -3066,7 +3095,7 @@ bnad_pci_probe(struct pci_dev *pdev,
/*
* PCI initialization
* Output : using_dac = 1 for 64 bit DMA
- * = 0 for 32 bit DMA
+ * = 0 for 32 bit DMA
*/
err = bnad_pci_init(bnad, pdev, &using_dac);
if (err)
@@ -3084,6 +3113,9 @@ bnad_pci_probe(struct pci_dev *pdev,
/* Initialize netdev structure, set up ethtool ops */
bnad_netdev_init(bnad, using_dac);
+ /* Set link to down state */
+ netif_carrier_off(netdev);
+
bnad_enable_msix(bnad);
/* Get resource requirement form bna */
@@ -3115,11 +3147,13 @@ bnad_pci_probe(struct pci_dev *pdev,
((unsigned long)bnad));
setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
((unsigned long)bnad));
- setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_ioc_sem_timeout,
+ setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout,
+ ((unsigned long)bnad));
+ setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout,
((unsigned long)bnad));
/* Now start the timer before calling IOC */
- mod_timer(&bnad->bna.device.ioc.ioc_timer,
+ mod_timer(&bnad->bna.device.ioc.iocpf_timer,
jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
/*
@@ -3137,11 +3171,6 @@ bnad_pci_probe(struct pci_dev *pdev,
mutex_unlock(&bnad->conf_mutex);
- /*
- * Make sure the link appears down to the stack
- */
- netif_carrier_off(netdev);
-
/* Finally, reguister with net_device layer */
err = register_netdev(netdev);
if (err) {
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
index ebc3a9078642..a89117fa4970 100644
--- a/drivers/net/bna/bnad.h
+++ b/drivers/net/bna/bnad.h
@@ -51,6 +51,7 @@
*/
struct bnad_rx_ctrl {
struct bna_ccb *ccb;
+ unsigned long flags;
struct napi_struct napi;
};
@@ -64,7 +65,7 @@ struct bnad_rx_ctrl {
#define BNAD_NAME "bna"
#define BNAD_NAME_LEN 64
-#define BNAD_VERSION "2.3.2.0"
+#define BNAD_VERSION "2.3.2.3"
#define BNAD_MAILBOX_MSIX_VECTORS 1
@@ -82,6 +83,7 @@ struct bnad_rx_ctrl {
/* Bit positions for tcb->flags */
#define BNAD_TXQ_FREE_SENT 0
+#define BNAD_TXQ_TX_STARTED 1
/* Bit positions for rcb->flags */
#define BNAD_RXQ_REFILL 0
@@ -124,6 +126,7 @@ struct bnad_completion {
struct bnad_drv_stats {
u64 netif_queue_stop;
u64 netif_queue_wakeup;
+ u64 netif_queue_stopped;
u64 tso4;
u64 tso6;
u64 tso_err;
@@ -178,7 +181,7 @@ struct bnad_rx_info {
/* Unmap queues for Tx / Rx cleanup */
struct bnad_skb_unmap {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(dma_addr)
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
};
struct bnad_unmap_q {
@@ -199,12 +202,12 @@ struct bnad_unmap_q {
/* Set, tested & cleared using xxx_bit() functions */
/* Values indicated bit positions */
#define BNAD_RF_CEE_RUNNING 1
-#define BNAD_RF_HW_ERROR 2
-#define BNAD_RF_MBOX_IRQ_DISABLED 3
-#define BNAD_RF_TX_STARTED 4
-#define BNAD_RF_RX_STARTED 5
-#define BNAD_RF_DIM_TIMER_RUNNING 6
-#define BNAD_RF_STATS_TIMER_RUNNING 7
+#define BNAD_RF_MBOX_IRQ_DISABLED 2
+#define BNAD_RF_RX_STARTED 3
+#define BNAD_RF_DIM_TIMER_RUNNING 4
+#define BNAD_RF_STATS_TIMER_RUNNING 5
+#define BNAD_RF_TX_SHUTDOWN_DELAYED 6
+#define BNAD_RF_RX_SHUTDOWN_DELAYED 7
struct bnad {
struct net_device *netdev;
@@ -306,8 +309,10 @@ extern void bnad_cleanup_rx(struct bnad *bnad, uint rx_id);
extern void bnad_dim_timer_start(struct bnad *bnad);
/* Statistics */
-extern void bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats);
-extern void bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats);
+extern void bnad_netdev_qstats_fill(struct bnad *bnad,
+ struct rtnl_link_stats64 *stats);
+extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
+ struct rtnl_link_stats64 *stats);
/**
* MACROS
@@ -320,9 +325,11 @@ extern void bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64
#define bnad_enable_rx_irq_unsafe(_ccb) \
{ \
- bna_ib_coalescing_timer_set((_ccb)->i_dbell, \
- (_ccb)->rx_coalescing_timeo); \
- bna_ib_ack((_ccb)->i_dbell, 0); \
+ if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) {\
+ bna_ib_coalescing_timer_set((_ccb)->i_dbell, \
+ (_ccb)->rx_coalescing_timeo); \
+ bna_ib_ack((_ccb)->i_dbell, 0); \
+ } \
}
#define bnad_dim_timer_running(_bnad) \
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
index 11fa2ea842c1..142d6047da27 100644
--- a/drivers/net/bna/bnad_ethtool.c
+++ b/drivers/net/bna/bnad_ethtool.c
@@ -68,6 +68,7 @@ static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"netif_queue_stop",
"netif_queue_wakeup",
+ "netif_queue_stopped",
"tso4",
"tso6",
"tso_err",
@@ -274,7 +275,6 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
if (ioc_attr) {
- memset(ioc_attr, 0, sizeof(*ioc_attr));
spin_lock_irqsave(&bnad->bna_lock, flags);
bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -330,10 +330,6 @@ do { \
BNAD_GET_REG(PCIE_MISC_REG);
- BNAD_GET_REG(HOST_SEM0_REG);
- BNAD_GET_REG(HOST_SEM1_REG);
- BNAD_GET_REG(HOST_SEM2_REG);
- BNAD_GET_REG(HOST_SEM3_REG);
BNAD_GET_REG(HOST_SEM0_INFO_REG);
BNAD_GET_REG(HOST_SEM1_INFO_REG);
BNAD_GET_REG(HOST_SEM2_INFO_REG);
@@ -1184,6 +1180,9 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
bi = sizeof(*net_stats64) / sizeof(u64);
+ /* Get netif_queue_stopped from stack */
+ bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
+
/* Fill driver stats into ethtool buffers */
stats64 = (u64 *)&bnad->stats.drv_stats;
for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 062600be073b..d1865cc97313 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -1,6 +1,6 @@
/* bnx2.c: Broadcom NX2 network driver.
*
- * Copyright (c) 2004-2010 Broadcom Corporation
+ * Copyright (c) 2004-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -56,11 +56,11 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.0.18"
-#define DRV_MODULE_RELDATE "Oct 7, 2010"
-#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.0.15.fw"
+#define DRV_MODULE_VERSION "2.1.6"
+#define DRV_MODULE_RELDATE "Mar 7, 2011"
+#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
-#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.0.17.fw"
+#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1a.fw"
#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
@@ -435,7 +435,8 @@ bnx2_cnic_stop(struct bnx2 *bp)
struct cnic_ctl_info info;
mutex_lock(&bp->cnic_lock);
- c_ops = bp->cnic_ops;
+ c_ops = rcu_dereference_protected(bp->cnic_ops,
+ lockdep_is_held(&bp->cnic_lock));
if (c_ops) {
info.cmd = CNIC_CTL_STOP_CMD;
c_ops->cnic_ctl(bp->cnic_data, &info);
@@ -450,7 +451,8 @@ bnx2_cnic_start(struct bnx2 *bp)
struct cnic_ctl_info info;
mutex_lock(&bp->cnic_lock);
- c_ops = bp->cnic_ops;
+ c_ops = rcu_dereference_protected(bp->cnic_ops,
+ lockdep_is_held(&bp->cnic_lock));
if (c_ops) {
if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
@@ -766,13 +768,10 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
int j;
rxr->rx_buf_ring =
- vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
+ vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
if (rxr->rx_buf_ring == NULL)
return -ENOMEM;
- memset(rxr->rx_buf_ring, 0,
- SW_RXBD_RING_SIZE * bp->rx_max_ring);
-
for (j = 0; j < bp->rx_max_ring; j++) {
rxr->rx_desc_ring[j] =
dma_alloc_coherent(&bp->pdev->dev,
@@ -785,13 +784,11 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
}
if (bp->rx_pg_ring_size) {
- rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
+ rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
bp->rx_max_pg_ring);
if (rxr->rx_pg_ring == NULL)
return -ENOMEM;
- memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
- bp->rx_max_pg_ring);
}
for (j = 0; j < bp->rx_max_pg_ring; j++) {
@@ -4645,13 +4642,28 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
/* Wait for the current PCI transaction to complete before
* issuing a reset. */
- REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
- BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
- BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
- BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
- BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
- val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
- udelay(5);
+ if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
+ (CHIP_NUM(bp) == CHIP_NUM_5708)) {
+ REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
+ BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
+ BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
+ BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
+ BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
+ val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
+ udelay(5);
+ } else { /* 5709 */
+ val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
+ val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
+ REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
+ val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
+
+ for (i = 0; i < 100; i++) {
+ msleep(1);
+ val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
+ if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
+ break;
+ }
+ }
/* Wait for the firmware to tell us it is ok to issue a reset. */
bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
@@ -4673,7 +4685,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
- pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
+ REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
} else {
val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
@@ -6086,7 +6098,7 @@ bnx2_request_irq(struct bnx2 *bp)
}
static void
-bnx2_free_irq(struct bnx2 *bp)
+__bnx2_free_irq(struct bnx2 *bp)
{
struct bnx2_irq *irq;
int i;
@@ -6097,6 +6109,13 @@ bnx2_free_irq(struct bnx2 *bp)
free_irq(irq->vector, &bp->bnx2_napi[i]);
irq->requested = 0;
}
+}
+
+static void
+bnx2_free_irq(struct bnx2 *bp)
+{
+
+ __bnx2_free_irq(bp);
if (bp->flags & BNX2_FLAG_USING_MSI)
pci_disable_msi(bp->pdev);
else if (bp->flags & BNX2_FLAG_USING_MSIX)
@@ -6801,28 +6820,30 @@ bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
u32 *p = _p, i, offset;
u8 *orig_p = _p;
struct bnx2 *bp = netdev_priv(dev);
- u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
- 0x0800, 0x0880, 0x0c00, 0x0c10,
- 0x0c30, 0x0d08, 0x1000, 0x101c,
- 0x1040, 0x1048, 0x1080, 0x10a4,
- 0x1400, 0x1490, 0x1498, 0x14f0,
- 0x1500, 0x155c, 0x1580, 0x15dc,
- 0x1600, 0x1658, 0x1680, 0x16d8,
- 0x1800, 0x1820, 0x1840, 0x1854,
- 0x1880, 0x1894, 0x1900, 0x1984,
- 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
- 0x1c80, 0x1c94, 0x1d00, 0x1d84,
- 0x2000, 0x2030, 0x23c0, 0x2400,
- 0x2800, 0x2820, 0x2830, 0x2850,
- 0x2b40, 0x2c10, 0x2fc0, 0x3058,
- 0x3c00, 0x3c94, 0x4000, 0x4010,
- 0x4080, 0x4090, 0x43c0, 0x4458,
- 0x4c00, 0x4c18, 0x4c40, 0x4c54,
- 0x4fc0, 0x5010, 0x53c0, 0x5444,
- 0x5c00, 0x5c18, 0x5c80, 0x5c90,
- 0x5fc0, 0x6000, 0x6400, 0x6428,
- 0x6800, 0x6848, 0x684c, 0x6860,
- 0x6888, 0x6910, 0x8000 };
+ static const u32 reg_boundaries[] = {
+ 0x0000, 0x0098, 0x0400, 0x045c,
+ 0x0800, 0x0880, 0x0c00, 0x0c10,
+ 0x0c30, 0x0d08, 0x1000, 0x101c,
+ 0x1040, 0x1048, 0x1080, 0x10a4,
+ 0x1400, 0x1490, 0x1498, 0x14f0,
+ 0x1500, 0x155c, 0x1580, 0x15dc,
+ 0x1600, 0x1658, 0x1680, 0x16d8,
+ 0x1800, 0x1820, 0x1840, 0x1854,
+ 0x1880, 0x1894, 0x1900, 0x1984,
+ 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
+ 0x1c80, 0x1c94, 0x1d00, 0x1d84,
+ 0x2000, 0x2030, 0x23c0, 0x2400,
+ 0x2800, 0x2820, 0x2830, 0x2850,
+ 0x2b40, 0x2c10, 0x2fc0, 0x3058,
+ 0x3c00, 0x3c94, 0x4000, 0x4010,
+ 0x4080, 0x4090, 0x43c0, 0x4458,
+ 0x4c00, 0x4c18, 0x4c40, 0x4c54,
+ 0x4fc0, 0x5010, 0x53c0, 0x5444,
+ 0x5c00, 0x5c18, 0x5c80, 0x5c90,
+ 0x5fc0, 0x6000, 0x6400, 0x6428,
+ 0x6800, 0x6848, 0x684c, 0x6860,
+ 0x6888, 0x6910, 0x8000
+ };
regs->version = 0;
@@ -7080,6 +7101,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
bnx2_netif_stop(bp, true);
bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
+ __bnx2_free_irq(bp);
bnx2_free_skbs(bp);
bnx2_free_mem(bp);
}
@@ -7092,6 +7114,9 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
rc = bnx2_alloc_mem(bp);
if (!rc)
+ rc = bnx2_request_irq(bp);
+
+ if (!rc)
rc = bnx2_init_nic(bp, 0);
if (rc) {
@@ -7530,6 +7555,10 @@ bnx2_set_flags(struct net_device *dev, u32 data)
!(data & ETH_FLAG_RXVLAN))
return -EINVAL;
+ /* TSO with VLAN tag won't work with current firmware */
+ if (!(data & ETH_FLAG_TXVLAN))
+ return -EINVAL;
+
rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN |
ETH_FLAG_TXVLAN);
if (rc)
@@ -7914,15 +7943,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
goto err_out_release;
}
+ bnx2_set_power_state(bp, PCI_D0);
+
/* Configure byte swap and enable write to the reg_window registers.
* Rely on CPU to do target byte swapping on big endian systems
* The chip's target access swapping will not swap all accesses
*/
- pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
- BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
- BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
-
- bnx2_set_power_state(bp, PCI_D0);
+ REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
+ BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
+ BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
@@ -7939,11 +7968,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
/* AER (Advanced Error Reporting) hooks */
err = pci_enable_pcie_error_reporting(pdev);
- if (err) {
- dev_err(&pdev->dev, "pci_enable_pcie_error_reporting "
- "failed 0x%x\n", err);
- /* non-fatal, continue */
- }
+ if (!err)
+ bp->flags |= BNX2_FLAG_AER_ENABLED;
} else {
bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
@@ -8206,8 +8232,10 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
return 0;
err_out_unmap:
- if (bp->flags & BNX2_FLAG_PCIE)
+ if (bp->flags & BNX2_FLAG_AER_ENABLED) {
pci_disable_pcie_error_reporting(pdev);
+ bp->flags &= ~BNX2_FLAG_AER_ENABLED;
+ }
if (bp->regview) {
iounmap(bp->regview);
@@ -8289,7 +8317,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
#endif
};
-static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
+static void inline vlan_features_add(struct net_device *dev, u32 flags)
{
dev->vlan_features |= flags;
}
@@ -8383,8 +8411,6 @@ bnx2_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2 *bp = netdev_priv(dev);
- flush_scheduled_work();
-
unregister_netdev(dev);
if (bp->mips_firmware)
@@ -8397,8 +8423,10 @@ bnx2_remove_one(struct pci_dev *pdev)
kfree(bp->temp_stats_blk);
- if (bp->flags & BNX2_FLAG_PCIE)
+ if (bp->flags & BNX2_FLAG_AER_ENABLED) {
pci_disable_pcie_error_reporting(pdev);
+ bp->flags &= ~BNX2_FLAG_AER_ENABLED;
+ }
free_netdev(dev);
@@ -8421,7 +8449,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
if (!netif_running(dev))
return 0;
- flush_scheduled_work();
+ cancel_work_sync(&bp->reset_task);
bnx2_netif_stop(bp, true);
netif_device_detach(dev);
del_timer_sync(&bp->timer);
@@ -8514,7 +8542,7 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
}
rtnl_unlock();
- if (!(bp->flags & BNX2_FLAG_PCIE))
+ if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
return result;
err = pci_cleanup_aer_uncorrect_error_status(pdev);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index bf4c3421067d..68020451dc4f 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -1,6 +1,6 @@
/* bnx2.h: Broadcom NX2 network driver.
*
- * Copyright (c) 2004-2009 Broadcom Corporation
+ * Copyright (c) 2004-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -461,6 +461,8 @@ struct l2_fhdr {
#define BNX2_PCICFG_MAILBOX_QUEUE_ADDR 0x00000090
#define BNX2_PCICFG_MAILBOX_QUEUE_DATA 0x00000094
+#define BNX2_PCICFG_DEVICE_CONTROL 0x000000b4
+#define BNX2_PCICFG_DEVICE_STATUS_NO_PEND ((1L<<5)<<16)
/*
* pci_reg definition
@@ -6205,6 +6207,8 @@ struct l2_fhdr {
#define BNX2_CP_SCRATCH 0x001a0000
+#define BNX2_FW_MAX_ISCSI_CONN 0x001a0080
+
/*
* mcp_reg definition
@@ -6739,6 +6743,7 @@ struct bnx2 {
#define BNX2_FLAG_JUMBO_BROKEN 0x00000800
#define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000
#define BNX2_FLAG_BROKEN_STATS 0x00002000
+#define BNX2_FLAG_AER_ENABLED 0x00004000
struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC];
@@ -6756,7 +6761,7 @@ struct bnx2 {
u32 tx_wake_thresh;
#ifdef BCM_CNIC
- struct cnic_ops *cnic_ops;
+ struct cnic_ops __rcu *cnic_ops;
void *cnic_data;
#endif
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile
index 084afce89ae9..bb83a2961273 100644
--- a/drivers/net/bnx2x/Makefile
+++ b/drivers/net/bnx2x/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_BNX2X) += bnx2x.o
-bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o
+bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 863e73a85fbe..b7ff87b35fbb 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -13,6 +13,8 @@
#ifndef BNX2X_H
#define BNX2X_H
+#include <linux/netdevice.h>
+#include <linux/types.h>
/* compilation time flags */
@@ -20,15 +22,17 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.60.00-4"
-#define DRV_MODULE_RELDATE "2010/11/01"
+#define DRV_MODULE_VERSION "1.62.11-0"
+#define DRV_MODULE_RELDATE "2011/01/31"
#define BNX2X_BC_VER 0x040200
#define BNX2X_MULTI_QUEUE
#define BNX2X_NEW_NAPI
-
+#if defined(CONFIG_DCB)
+#define BCM_DCBNL
+#endif
#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
#define BCM_CNIC 1
#include "../cnic_if.h"
@@ -48,6 +52,7 @@
#include "bnx2x_fw_defs.h"
#include "bnx2x_hsi.h"
#include "bnx2x_link.h"
+#include "bnx2x_dcb.h"
#include "bnx2x_stats.h"
/* error/debug prints */
@@ -124,6 +129,7 @@ void bnx2x_panic_dump(struct bnx2x *bp);
#endif
#define bnx2x_mc_addr(ha) ((ha)->addr)
+#define bnx2x_uc_addr(ha) ((ha)->addr)
#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
#define U64_HI(x) (u32)(((u64)(x)) >> 32)
@@ -199,10 +205,25 @@ void bnx2x_panic_dump(struct bnx2x *bp);
/* EQ completions */
#define HC_SP_INDEX_EQ_CONS 7
+/* FCoE L2 connection completions */
+#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS 6
+#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS 4
/* iSCSI L2 */
#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
+/* Special clients parameters */
+
+/* SB indices */
+/* FCoE L2 */
+#define BNX2X_FCOE_L2_RX_INDEX \
+ (&bp->def_status_blk->sp_sb.\
+ index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS])
+
+#define BNX2X_FCOE_L2_TX_INDEX \
+ (&bp->def_status_blk->sp_sb.\
+ index_values[HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS])
+
/**
* CIDs and CLIDs:
* CLIDs below is a CLID for func 0, then the CLID for other
@@ -215,12 +236,19 @@ void bnx2x_panic_dump(struct bnx2x *bp);
#define BNX2X_ISCSI_ETH_CL_ID 17
#define BNX2X_ISCSI_ETH_CID 17
+/* FCoE L2 */
+#define BNX2X_FCOE_ETH_CL_ID 18
+#define BNX2X_FCOE_ETH_CID 18
+
/** Additional rings budgeting */
#ifdef BCM_CNIC
#define CNIC_CONTEXT_USE 1
+#define FCOE_CONTEXT_USE 1
#else
#define CNIC_CONTEXT_USE 0
+#define FCOE_CONTEXT_USE 0
#endif /* BCM_CNIC */
+#define NONE_ETH_CONTEXT_USE (FCOE_CONTEXT_USE)
#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
@@ -314,6 +342,8 @@ struct bnx2x_fastpath {
/* chip independed shortcut into rx_prods_offset memory */
u32 ustorm_rx_prods_offset;
+ u32 rx_buf_size;
+
dma_addr_t status_blk_mapping;
struct sw_tx_bd *tx_buf_ring;
@@ -402,6 +432,21 @@ struct bnx2x_fastpath {
#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
+/* Use 2500 as a mini-jumbo MTU for FCoE */
+#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
+
+#ifdef BCM_CNIC
+/* FCoE L2 `fastpath' is right after the eth entries */
+#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp)
+#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX])
+#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
+#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX)
+#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX)
+#else
+#define IS_FCOE_FP(fp) false
+#define IS_FCOE_IDX(idx) false
+#endif
+
/* MC hsi */
#define MAX_FETCH_BD 13 /* HW max BDs per packet */
@@ -598,6 +643,7 @@ struct bnx2x_common {
#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0)
#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f)
+#define CHIP_PARITY_ENABLED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
int flash_size;
#define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
@@ -669,8 +715,14 @@ struct bnx2x_port {
enum {
CAM_ETH_LINE = 0,
CAM_ISCSI_ETH_LINE,
- CAM_MAX_PF_LINE = CAM_ISCSI_ETH_LINE
+ CAM_FIP_ETH_LINE,
+ CAM_FIP_MCAST_LINE,
+ CAM_MAX_PF_LINE = CAM_FIP_MCAST_LINE
};
+/* number of MACs per function in NIG memory - used for SI mode */
+#define NIG_LLH_FUNC_MEM_SIZE 16
+/* number of entries in NIG_REG_LLHX_FUNC_MEM */
+#define NIG_LLH_FUNC_MEM_MAX_OFFSET 8
#define BNX2X_VF_ID_INVALID 0xFF
@@ -710,6 +762,14 @@ enum {
*/
#define L2_FP_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE)
+/*
+ * The number of FP-SB allocated by the driver == max number of regular L2
+ * queues + 1 for the CNIC which also consumes an FP-SB
+ */
+#define FP_SB_COUNT(cid_cnt) ((cid_cnt) - FCOE_CONTEXT_USE)
+#define NUM_IGU_SB_REQUIRED(cid_cnt) \
+ (FP_SB_COUNT(cid_cnt) - NONE_ETH_CONTEXT_USE)
+
union cdu_context {
struct eth_context eth;
char pad[1024];
@@ -722,7 +782,8 @@ union cdu_context {
#ifdef BCM_CNIC
#define CNIC_ISCSI_CID_MAX 256
-#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX)
+#define CNIC_FCOE_CID_MAX 2048
+#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX)
#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
#endif
@@ -756,6 +817,7 @@ struct bnx2x_slowpath {
struct eth_stats_query fw_stats;
struct mac_configuration_cmd mac_config;
struct mac_configuration_cmd mcast_config;
+ struct mac_configuration_cmd uc_mac_config;
struct client_init_ramrod_data client_init_data;
/* used by dmae command executer */
@@ -770,6 +832,8 @@ struct bnx2x_slowpath {
u32 wb_comp;
u32 wb_data[4];
+ /* pfc configuration for DCBX ramrod */
+ struct flow_control_configuration pfc_config;
};
#define bnx2x_sp(bp, var) (&bp->slowpath->var)
@@ -855,7 +919,6 @@ struct bnx2x {
int tx_ring_size;
u32 rx_csum;
- u32 rx_buf_size;
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
#define ETH_MIN_PACKET_SIZE 60
@@ -883,7 +946,7 @@ struct bnx2x {
struct eth_spe *spq_prod_bd;
struct eth_spe *spq_last_bd;
__le16 *dsb_sp_prod;
- atomic_t spq_left; /* serialize spq */
+ atomic_t cq_spq_left; /* ETH_XXX ramrods credit */
/* used to synchronize spq accesses */
spinlock_t spq_lock;
@@ -893,6 +956,7 @@ struct bnx2x {
u16 eq_prod;
u16 eq_cons;
__le16 *eq_cons_sb;
+ atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */
/* Flags for marking that there is a STAT_QUERY or
SET_MAC ramrod pending */
@@ -918,6 +982,14 @@ struct bnx2x {
#define DISABLE_MSI_FLAG 0x200
#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
#define MF_FUNC_DIS 0x1000
+#define FCOE_MACS_SET 0x2000
+#define NO_FCOE_FLAG 0x4000
+#define NO_ISCSI_OOO_FLAG 0x8000
+#define NO_ISCSI_FLAG 0x10000
+
+#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
+#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
+#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
int pf_num; /* absolute PF number */
int pfid; /* per-path PF number */
@@ -967,6 +1039,8 @@ struct bnx2x {
u16 mf_ov;
u8 mf_mode;
#define IS_MF(bp) (bp->mf_mode != 0)
+#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI)
+#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD)
u8 wol;
@@ -1002,6 +1076,7 @@ struct bnx2x {
int num_queues;
int disable_tpa;
int int_mode;
+ u32 *rx_indir_table;
struct tstorm_eth_mac_filter_config mac_filters;
#define BNX2X_ACCEPT_NONE 0x0000
@@ -1010,6 +1085,7 @@ struct bnx2x {
#define BNX2X_ACCEPT_ALL_UNICAST 0x0004
#define BNX2X_ACCEPT_ALL_MULTICAST 0x0008
#define BNX2X_ACCEPT_BROADCAST 0x0010
+#define BNX2X_ACCEPT_UNMATCHED_UCAST 0x0020
#define BNX2X_PROMISCUOUS_MODE 0x10000
u32 rx_mode;
@@ -1047,7 +1123,7 @@ struct bnx2x {
#define BNX2X_CNIC_FLAG_MAC_SET 1
void *t2;
dma_addr_t t2_mapping;
- struct cnic_ops *cnic_ops;
+ struct cnic_ops __rcu *cnic_ops;
void *cnic_data;
u32 cnic_tag;
struct cnic_eth_dev cnic_eth_dev;
@@ -1062,12 +1138,12 @@ struct bnx2x {
u16 cnic_kwq_pending;
u16 cnic_spq_pending;
struct mutex cnic_mutex;
- u8 iscsi_mac[6];
+ u8 fip_mac[ETH_ALEN];
#endif
int dmae_ready;
/* used to synchronize dmae accesses */
- struct mutex dmae_mutex;
+ spinlock_t dmae_lock;
/* used to protect the FW mail box */
struct mutex fw_mb_mutex;
@@ -1122,6 +1198,32 @@ struct bnx2x {
char fw_ver[32];
const struct firmware *firmware;
+ /* LLDP params */
+ struct bnx2x_config_lldp_params lldp_config_params;
+
+ /* DCB support on/off */
+ u16 dcb_state;
+#define BNX2X_DCB_STATE_OFF 0
+#define BNX2X_DCB_STATE_ON 1
+
+ /* DCBX engine mode */
+ int dcbx_enabled;
+#define BNX2X_DCBX_ENABLED_OFF 0
+#define BNX2X_DCBX_ENABLED_ON_NEG_OFF 1
+#define BNX2X_DCBX_ENABLED_ON_NEG_ON 2
+#define BNX2X_DCBX_ENABLED_INVALID (-1)
+
+ bool dcbx_mode_uset;
+
+ struct bnx2x_config_dcbx_params dcbx_config_params;
+
+ struct bnx2x_dcbx_port_params dcbx_port_params;
+ int dcb_version;
+
+ /* DCBX Negotation results */
+ struct dcbx_features dcbx_local_feat;
+ u32 dcbx_error;
+ u32 pending_max;
};
/**
@@ -1152,10 +1254,17 @@ struct bnx2x {
#define RSS_IPV6_TCP_CAP 0x0008
#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
+#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NONE_ETH_CONTEXT_USE)
+
+/* ethtool statistics are displayed for all regular ethernet queues and the
+ * fcoe L2 queue if not disabled
+ */
+#define BNX2X_NUM_STAT_QUEUES(bp) (NO_FCOE(bp) ? BNX2X_NUM_ETH_QUEUES(bp) : \
+ (BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE))
+
#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
#define BNX2X_MAX_QUEUES(bp) (bp->igu_sb_cnt - CNIC_CONTEXT_USE)
-#define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1)
#define RSS_IPV4_CAP_MASK \
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
@@ -1248,6 +1357,7 @@ struct bnx2x_client_ramrod_params {
u16 cl_id;
u32 cid;
u8 poll;
+#define CLIENT_IS_FCOE 0x01
#define CLIENT_IS_LEADING_RSS 0x02
u8 flags;
};
@@ -1280,11 +1390,54 @@ struct bnx2x_func_init_params {
u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
};
+#define for_each_eth_queue(bp, var) \
+ for (var = 0; var < BNX2X_NUM_ETH_QUEUES(bp); var++)
+
+#define for_each_nondefault_eth_queue(bp, var) \
+ for (var = 1; var < BNX2X_NUM_ETH_QUEUES(bp); var++)
+
+#define for_each_napi_queue(bp, var) \
+ for (var = 0; \
+ var < BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE; var++) \
+ if (skip_queue(bp, var)) \
+ continue; \
+ else
+
#define for_each_queue(bp, var) \
- for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++)
+ for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
+ if (skip_queue(bp, var)) \
+ continue; \
+ else
+
+#define for_each_rx_queue(bp, var) \
+ for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
+ if (skip_rx_queue(bp, var)) \
+ continue; \
+ else
+
+#define for_each_tx_queue(bp, var) \
+ for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
+ if (skip_tx_queue(bp, var)) \
+ continue; \
+ else
+
#define for_each_nondefault_queue(bp, var) \
- for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++)
+ for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++) \
+ if (skip_queue(bp, var)) \
+ continue; \
+ else
+
+/* skip rx queue
+ * if FCOE l2 support is disabled and this is the fcoe L2 queue
+ */
+#define skip_rx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
+
+/* skip tx queue
+ * if FCOE l2 support is disabled and this is the fcoe L2 queue
+ */
+#define skip_tx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
+#define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
#define WAIT_RAMROD_POLL 0x01
#define WAIT_RAMROD_COMMON 0x02
@@ -1307,6 +1460,12 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
void bnx2x_calc_fc_adv(struct bnx2x *bp);
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
u32 data_hi, u32 data_lo, int common);
+
+/* Clears multicast and unicast list configuration in the chip. */
+void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp);
+void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp);
+void bnx2x_invalidate_uc_list(struct bnx2x *bp);
+
void bnx2x_update_coalesce(struct bnx2x *bp);
int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
@@ -1329,7 +1488,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_ILT_ZALLOC(x, y, size) \
do { \
- x = pci_alloc_consistent(bp->pdev, size, y); \
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
if (x) \
memset(x, 0, size); \
} while (0)
@@ -1337,7 +1496,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_ILT_FREE(x, y, size) \
do { \
if (x) { \
- pci_free_consistent(bp->pdev, size, x, y); \
+ dma_free_coherent(&bp->pdev->dev, size, x, y); \
x = NULL; \
y = 0; \
} \
@@ -1473,19 +1632,23 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_BTR 4
#define MAX_SPQ_PENDING 8
-
-/* CMNG constants
- derived from lab experiments, and not from system spec calculations !!! */
-#define DEF_MIN_RATE 100
-/* resolution of the rate shaping timer - 100 usec */
-#define RS_PERIODIC_TIMEOUT_USEC 100
-/* resolution of fairness algorithm in usecs -
- coefficient for calculating the actual t fair */
-#define T_FAIR_COEF 10000000
+/* CMNG constants, as derived from system spec calculations */
+/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
+#define DEF_MIN_RATE 100
+/* resolution of the rate shaping timer - 400 usec */
+#define RS_PERIODIC_TIMEOUT_USEC 400
/* number of bytes in single QM arbitration cycle -
- coefficient for calculating the fairness timer */
-#define QM_ARB_BYTES 40000
-#define FAIR_MEM 2
+ * coefficient for calculating the fairness timer */
+#define QM_ARB_BYTES 160000
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES 100
+/* how many bytes above threshold for the minimal credit of Min algorithm*/
+#define MIN_ABOVE_THRESH 32768
+/* Fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair */
+#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
+/* Memory of fairness algorithm . 2 cycles */
+#define FAIR_MEM 2
#define ATTN_NIG_FOR_FUNC (1L << 8)
@@ -1608,10 +1771,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
(T_ETH_MAC_COMMAND_INVALIDATE))
-#define CAM_INVALIDATE(x) \
- (x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE)
-
-
/* Number of u32 elements in MC hash array */
#define MC_HASH_SIZE 8
#define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \
@@ -1646,5 +1805,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */
extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
+void bnx2x_push_indir_table(struct bnx2x *bp);
#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 94d5f59d5a6f..e83ac6dd6fc0 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -232,7 +232,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
/* move empty skb from pool to prod and map it */
prod_rx_buf->skb = fp->tpa_pool[queue].skb;
mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
- bp->rx_buf_size, DMA_FROM_DEVICE);
+ fp->rx_buf_size, DMA_FROM_DEVICE);
dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
/* move partial skb from cons to pool (don't unmap yet) */
@@ -259,10 +259,44 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
#endif
}
+/* Timestamp option length allowed for TPA aggregation:
+ *
+ * nop nop kind length echo val
+ */
+#define TPA_TSTAMP_OPT_LEN 12
+/**
+ * Calculate the approximate value of the MSS for this
+ * aggregation using the first packet of it.
+ *
+ * @param bp
+ * @param parsing_flags Parsing flags from the START CQE
+ * @param len_on_bd Total length of the first packet for the
+ * aggregation.
+ */
+static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
+ u16 len_on_bd)
+{
+ /* TPA arrgregation won't have an IP options and TCP options
+ * other than timestamp.
+ */
+ u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
+
+
+ /* Check if there was a TCP timestamp, if there is it's will
+ * always be 12 bytes length: nop nop kind length echo val.
+ *
+ * Otherwise FW would close the aggregation.
+ */
+ if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
+ hdrs_len += TPA_TSTAMP_OPT_LEN;
+
+ return len_on_bd - hdrs_len;
+}
+
static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct sk_buff *skb,
struct eth_fast_path_rx_cqe *fp_cqe,
- u16 cqe_idx)
+ u16 cqe_idx, u16 parsing_flags)
{
struct sw_rx_page *rx_pg, old_rx_pg;
u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
@@ -275,8 +309,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* This is needed in order to enable forwarding support */
if (frag_size)
- skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
- max(frag_size, (u32)len_on_bd));
+ skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
+ len_on_bd);
#ifdef BNX2X_STOP_ON_ERROR
if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
@@ -333,26 +367,28 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
struct sk_buff *skb = rx_buf->skb;
/* alloc new skb */
- struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+ struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
/* Unmap skb in the pool anyway, as we are going to change
pool entry status to BNX2X_TPA_STOP even if new skb allocation
fails. */
dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, DMA_FROM_DEVICE);
+ fp->rx_buf_size, DMA_FROM_DEVICE);
if (likely(new_skb)) {
/* fix ip xsum and give it to the stack */
/* (no need to map the new skb) */
+ u16 parsing_flags =
+ le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
prefetch(skb);
prefetch(((char *)(skb)) + L1_CACHE_BYTES);
#ifdef BNX2X_STOP_ON_ERROR
- if (pad + len > bp->rx_buf_size) {
+ if (pad + len > fp->rx_buf_size) {
BNX2X_ERR("skb_put is about to fail... "
"pad %d len %d rx_buf_size %d\n",
- pad, len, bp->rx_buf_size);
+ pad, len, fp->rx_buf_size);
bnx2x_panic();
return;
}
@@ -373,9 +409,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
if (!bnx2x_fill_frag_skb(bp, fp, skb,
- &cqe->fast_path_cqe, cqe_idx)) {
- if ((le16_to_cpu(cqe->fast_path_cqe.
- pars_flags.flags) & PARSING_FLAGS_VLAN))
+ &cqe->fast_path_cqe, cqe_idx,
+ parsing_flags)) {
+ if (parsing_flags & PARSING_FLAGS_VLAN)
__vlan_hwaccel_put_tag(skb,
le16_to_cpu(cqe->fast_path_cqe.
vlan_tag));
@@ -582,7 +618,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size,
+ fp->rx_buf_size,
DMA_FROM_DEVICE);
skb_reserve(skb, pad);
skb_put(skb, len);
@@ -698,6 +734,30 @@ void bnx2x_release_phy_lock(struct bnx2x *bp)
mutex_unlock(&bp->port.phy_mutex);
}
+/* calculates MF speed according to current linespeed and MF configuration */
+u16 bnx2x_get_mf_speed(struct bnx2x *bp)
+{
+ u16 line_speed = bp->link_vars.line_speed;
+ if (IS_MF(bp)) {
+ u16 maxCfg = bnx2x_extract_max_cfg(bp,
+ bp->mf_config[BP_VN(bp)]);
+
+ /* Calculate the current MAX line speed limit for the MF
+ * devices
+ */
+ if (IS_MF_SI(bp))
+ line_speed = (line_speed * maxCfg) / 100;
+ else { /* SD mode */
+ u16 vn_max_rate = maxCfg * 100;
+
+ if (vn_max_rate < line_speed)
+ line_speed = vn_max_rate;
+ }
+ }
+
+ return line_speed;
+}
+
void bnx2x_link_report(struct bnx2x *bp)
{
if (bp->flags & MF_FUNC_DIS) {
@@ -713,17 +773,8 @@ void bnx2x_link_report(struct bnx2x *bp)
netif_carrier_on(bp->dev);
netdev_info(bp->dev, "NIC Link is Up, ");
- line_speed = bp->link_vars.line_speed;
- if (IS_MF(bp)) {
- u16 vn_max_rate;
+ line_speed = bnx2x_get_mf_speed(bp);
- vn_max_rate =
- ((bp->mf_config[BP_VN(bp)] &
- FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
- if (vn_max_rate < line_speed)
- line_speed = vn_max_rate;
- }
pr_cont("%d Mbps ", line_speed);
if (bp->link_vars.duplex == DUPLEX_FULL)
@@ -807,19 +858,16 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
u16 ring_prod;
int i, j;
- bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
- IP_HEADER_ALIGNMENT_PADDING;
-
- DP(NETIF_MSG_IFUP,
- "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
-
- for_each_queue(bp, j) {
+ for_each_rx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
+ DP(NETIF_MSG_IFUP,
+ "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
+
if (!fp->disable_tpa) {
for (i = 0; i < max_agg_queues; i++) {
fp->tpa_pool[i].skb =
- netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+ netdev_alloc_skb(bp->dev, fp->rx_buf_size);
if (!fp->tpa_pool[i].skb) {
BNX2X_ERR("Failed to allocate TPA "
"skb pool for queue[%d] - "
@@ -866,7 +914,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
}
}
- for_each_queue(bp, j) {
+ for_each_rx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
fp->rx_bd_cons = 0;
@@ -897,7 +945,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
{
int i;
- for_each_queue(bp, i) {
+ for_each_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
u16 bd_cons = fp->tx_bd_cons;
@@ -915,7 +963,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
{
int i, j;
- for_each_queue(bp, j) {
+ for_each_rx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
for (i = 0; i < NUM_RX_BD; i++) {
@@ -927,7 +975,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, DMA_FROM_DEVICE);
+ fp->rx_buf_size, DMA_FROM_DEVICE);
rx_buf->skb = NULL;
dev_kfree_skb(skb);
@@ -945,6 +993,23 @@ void bnx2x_free_skbs(struct bnx2x *bp)
bnx2x_free_rx_skbs(bp);
}
+void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
+{
+ /* load old values */
+ u32 mf_cfg = bp->mf_config[BP_VN(bp)];
+
+ if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
+ /* leave all but MAX value */
+ mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
+
+ /* set new MAX value */
+ mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
+ & FUNC_MF_CFG_MAX_BW_MASK;
+
+ bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
+ }
+}
+
static void bnx2x_free_msix_irqs(struct bnx2x *bp)
{
int i, offset = 1;
@@ -956,7 +1021,7 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
#ifdef BCM_CNIC
offset++;
#endif
- for_each_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
"state %x\n", i, bp->msix_table[i + offset].vector,
bnx2x_fp(bp, i, state));
@@ -990,14 +1055,14 @@ int bnx2x_enable_msix(struct bnx2x *bp)
bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
msix_vec++;
#endif
- for_each_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
bp->msix_table[msix_vec].entry = msix_vec;
DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
"(fastpath #%u)\n", msix_vec, msix_vec, i);
msix_vec++;
}
- req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
+ req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
@@ -1053,7 +1118,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
#ifdef BCM_CNIC
offset++;
#endif
- for_each_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
bp->dev->name, i);
@@ -1070,7 +1135,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
fp->state = BNX2X_FP_STATE_IRQ;
}
- i = BNX2X_NUM_QUEUES(bp);
+ i = BNX2X_NUM_ETH_QUEUES(bp);
offset = 1 + CNIC_CONTEXT_USE;
netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
" ... fp[%d] %d\n",
@@ -1117,7 +1182,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
{
int i;
- for_each_queue(bp, i)
+ for_each_napi_queue(bp, i)
napi_enable(&bnx2x_fp(bp, i, napi));
}
@@ -1125,7 +1190,7 @@ static void bnx2x_napi_disable(struct bnx2x *bp)
{
int i;
- for_each_queue(bp, i)
+ for_each_napi_queue(bp, i)
napi_disable(&bnx2x_fp(bp, i, napi));
}
@@ -1153,6 +1218,35 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
netif_tx_disable(bp->dev);
}
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+#ifdef BCM_CNIC
+ struct bnx2x *bp = netdev_priv(dev);
+ if (NO_FCOE(bp))
+ return skb_tx_hash(dev, skb);
+ else {
+ struct ethhdr *hdr = (struct ethhdr *)skb->data;
+ u16 ether_type = ntohs(hdr->h_proto);
+
+ /* Skip VLAN tag if present */
+ if (ether_type == ETH_P_8021Q) {
+ struct vlan_ethhdr *vhdr =
+ (struct vlan_ethhdr *)skb->data;
+
+ ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
+ }
+
+ /* If ethertype is FCoE or FIP - use FCoE ring */
+ if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
+ return bnx2x_fcoe(bp, index);
+ }
+#endif
+ /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
+ */
+ return __skb_tx_hash(dev, skb,
+ dev->real_num_tx_queues - FCOE_CONTEXT_USE);
+}
+
void bnx2x_set_num_queues(struct bnx2x *bp)
{
switch (bp->multi_mode) {
@@ -1167,8 +1261,23 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
bp->num_queues = 1;
break;
}
+
+ /* Add special queues */
+ bp->num_queues += NONE_ETH_CONTEXT_USE;
}
+#ifdef BCM_CNIC
+static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
+{
+ if (!NO_FCOE(bp)) {
+ if (!IS_MF_SD(bp))
+ bnx2x_set_fip_eth_mac_addr(bp, 1);
+ bnx2x_set_all_enode_macs(bp, 1);
+ bp->flags |= FCOE_MACS_SET;
+ }
+}
+#endif
+
static void bnx2x_release_firmware(struct bnx2x *bp)
{
kfree(bp->init_ops_offsets);
@@ -1177,6 +1286,45 @@ static void bnx2x_release_firmware(struct bnx2x *bp)
release_firmware(bp->firmware);
}
+static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
+{
+ int rc, num = bp->num_queues;
+
+#ifdef BCM_CNIC
+ if (NO_FCOE(bp))
+ num -= FCOE_CONTEXT_USE;
+
+#endif
+ netif_set_real_num_tx_queues(bp->dev, num);
+ rc = netif_set_real_num_rx_queues(bp->dev, num);
+ return rc;
+}
+
+static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ /* Always use a mini-jumbo MTU for the FCoE L2 ring */
+ if (IS_FCOE_IDX(i))
+ /*
+ * Although there are no IP frames expected to arrive to
+ * this ring we still want to add an
+ * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
+ * overrun attack.
+ */
+ fp->rx_buf_size =
+ BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
+ BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+ else
+ fp->rx_buf_size =
+ bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
+ IP_HEADER_ALIGNMENT_PADDING;
+ }
+}
+
/* must be called with rtnl_lock */
int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
{
@@ -1200,13 +1348,15 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* must be called before memory allocation and HW init */
bnx2x_ilt_set_info(bp);
+ /* Set the receive queues buffer size */
+ bnx2x_set_rx_buf_size(bp);
+
if (bnx2x_alloc_mem(bp))
return -ENOMEM;
- netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
- rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
+ rc = bnx2x_set_real_num_queues(bp);
if (rc) {
- BNX2X_ERR("Unable to update real_num_rx_queues\n");
+ BNX2X_ERR("Unable to set real_num_queues\n");
goto load_error0;
}
@@ -1214,6 +1364,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bnx2x_fp(bp, i, disable_tpa) =
((bp->flags & TPA_ENABLE_FLAG) == 0);
+#ifdef BCM_CNIC
+ /* We don't want TPA on FCoE L2 ring */
+ bnx2x_fcoe(bp, disable_tpa) = 1;
+#endif
bnx2x_napi_enable(bp);
/* Send LOAD_REQUEST command to MCP
@@ -1296,6 +1450,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
}
+ bnx2x_dcbx_init(bp);
+
bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
rc = bnx2x_func_start(bp);
@@ -1344,30 +1500,46 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Now when Clients are configured we are ready to work */
bp->state = BNX2X_STATE_OPEN;
+#ifdef BCM_CNIC
+ bnx2x_set_fcoe_eth_macs(bp);
+#endif
+
bnx2x_set_eth_mac(bp, 1);
+ /* Clear MC configuration */
+ if (CHIP_IS_E1(bp))
+ bnx2x_invalidate_e1_mc_list(bp);
+ else
+ bnx2x_invalidate_e1h_mc_list(bp);
+
+ /* Clear UC lists configuration */
+ bnx2x_invalidate_uc_list(bp);
+
+ if (bp->pending_max) {
+ bnx2x_update_max_mf_config(bp, bp->pending_max);
+ bp->pending_max = 0;
+ }
+
if (bp->port.pmf)
bnx2x_initial_phy_init(bp, load_mode);
+ /* Initialize Rx filtering */
+ bnx2x_set_rx_mode(bp->dev);
+
/* Start fast path */
switch (load_mode) {
case LOAD_NORMAL:
/* Tx queue should be only reenabled */
netif_tx_wake_all_queues(bp->dev);
/* Initialize the receive filter. */
- bnx2x_set_rx_mode(bp->dev);
break;
case LOAD_OPEN:
netif_tx_start_all_queues(bp->dev);
smp_mb__after_clear_bit();
- /* Initialize the receive filter. */
- bnx2x_set_rx_mode(bp->dev);
break;
case LOAD_DIAG:
- /* Initialize the receive filter. */
- bnx2x_set_rx_mode(bp->dev);
bp->state = BNX2X_STATE_DIAG;
break;
@@ -1402,7 +1574,7 @@ load_error3:
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
- for_each_queue(bp, i)
+ for_each_rx_queue(bp, i)
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
/* Release IRQs */
@@ -1473,7 +1645,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
- for_each_queue(bp, i)
+ for_each_rx_queue(bp, i)
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
bnx2x_free_mem(bp);
@@ -1577,6 +1749,17 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
/* Fall out from the NAPI loop if needed */
if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+#ifdef BCM_CNIC
+ /* No need to update SB for FCoE L2 ring as long as
+ * it's connected to the default SB and the SB
+ * has been updated when NAPI was scheduled.
+ */
+ if (IS_FCOE_FP(fp)) {
+ napi_complete(napi);
+ break;
+ }
+#endif
+
bnx2x_update_fpsb_idx(fp);
/* bnx2x_has_rx_work() reads the status block,
* thus we need to ensure that status block indices
@@ -1692,11 +1875,10 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
}
}
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
- rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
-
- else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
- rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
+ if (skb_is_gso_v6(skb))
+ rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
+ else if (skb_is_gso(skb))
+ rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
return rc;
}
@@ -1782,15 +1964,15 @@ exit_lbl:
}
#endif
-static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
- struct eth_tx_parse_bd_e2 *pbd,
- u32 xmit_type)
+static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
+ u32 xmit_type)
{
- pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
- ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
+ *parsing_data |= (skb_shinfo(skb)->gso_size <<
+ ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
+ ETH_TX_PARSE_BD_E2_LSO_MSS;
if ((xmit_type & XMIT_GSO_V6) &&
(ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
- pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
+ *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
}
/**
@@ -1835,15 +2017,15 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
* @return header len
*/
static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
- struct eth_tx_parse_bd_e2 *pbd,
- u32 xmit_type)
+ u32 *parsing_data, u32 xmit_type)
{
- pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
- ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
+ *parsing_data |= ((tcp_hdrlen(skb)/4) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
- pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
- skb->data) / 2) <<
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
+ *parsing_data |= ((((u8 *)tcp_hdr(skb) - skb->data) / 2) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
}
@@ -1912,6 +2094,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
+ u32 pbd_e2_parsing_data = 0;
u16 pkt_prod, bd_prod;
int nbd, fp_index;
dma_addr_t mapping;
@@ -2033,8 +2216,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
/* Set PBD in checksum offload case */
if (xmit_type & XMIT_CSUM)
- hlen = bnx2x_set_pbd_csum_e2(bp,
- skb, pbd_e2, xmit_type);
+ hlen = bnx2x_set_pbd_csum_e2(bp, skb,
+ &pbd_e2_parsing_data,
+ xmit_type);
} else {
pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
@@ -2076,10 +2260,18 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
hlen, bd_prod, ++nbd);
if (CHIP_IS_E2(bp))
- bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
+ bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
+ xmit_type);
else
bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
}
+
+ /* Set the PBD's parsing_data field if not zero
+ * (for the chips newer than 57711).
+ */
+ if (pbd_e2_parsing_data)
+ pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
+
tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
/* Handle fragmented skb */
@@ -2232,7 +2424,7 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
bp->fp = fp;
/* msix table */
- tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
+ tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
GFP_KERNEL);
if (!tbl)
goto alloc_err;
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 6b28739c5302..ef37b98d6146 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -73,6 +73,16 @@ void bnx2x__link_status_update(struct bnx2x *bp);
void bnx2x_link_report(struct bnx2x *bp);
/**
+ * calculates MF speed according to current linespeed and MF
+ * configuration
+ *
+ * @param bp
+ *
+ * @return u16
+ */
+u16 bnx2x_get_mf_speed(struct bnx2x *bp);
+
+/**
* MSI-X slowpath interrupt handler
*
* @param irq
@@ -232,6 +242,30 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
*/
void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
+#ifdef BCM_CNIC
+/**
+ * Set/Clear FIP MAC(s) at the next enties in the CAM after the ETH
+ * MAC(s). This function will wait until the ramdord completion
+ * returns.
+ *
+ * @param bp driver handle
+ * @param set set or clear the CAM entry
+ *
+ * @return 0 if cussess, -ENODEV if ramrod doesn't return.
+ */
+int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set);
+
+/**
+ * Set/Clear ALL_ENODE mcast MAC.
+ *
+ * @param bp
+ * @param set
+ *
+ * @return int
+ */
+int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set);
+#endif
+
/**
* Set MAC filtering configurations.
*
@@ -290,6 +324,13 @@ int bnx2x_func_start(struct bnx2x *bp);
void bnx2x_ilt_set_info(struct bnx2x *bp);
/**
+ * Inintialize dcbx protocol
+ *
+ * @param bp
+ */
+void bnx2x_dcbx_init(struct bnx2x *bp);
+
+/**
* Set power state to the requested value. Currently only D0 and
* D3hot are supported.
*
@@ -300,6 +341,15 @@ void bnx2x_ilt_set_info(struct bnx2x *bp);
*/
int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
+/**
+ * Updates MAX part of MF configuration in HW
+ * (if required)
+ *
+ * @param bp
+ * @param value
+ */
+void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
+
/* dev_close main block */
int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
@@ -309,6 +359,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
/* hard_xmit callback */
netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
+/* select_queue callback */
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
+
int bnx2x_change_mac_addr(struct net_device *dev, void *p);
/* NAPI poll Rx part */
@@ -685,7 +738,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
int i;
/* Add NAPI objects */
- for_each_queue(bp, i)
+ for_each_napi_queue(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
bnx2x_poll, BNX2X_NAPI_WEIGHT);
}
@@ -694,7 +747,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
{
int i;
- for_each_queue(bp, i)
+ for_each_napi_queue(bp, i)
netif_napi_del(&bnx2x_fp(bp, i, napi));
}
@@ -778,11 +831,11 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
dma_addr_t mapping;
- skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+ skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
if (unlikely(skb == NULL))
return -ENOMEM;
- mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
+ mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
dev_kfree_skb(skb);
@@ -848,7 +901,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
if (fp->tpa_state[i] == BNX2X_TPA_START)
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, DMA_FROM_DEVICE);
+ fp->rx_buf_size, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
rx_buf->skb = NULL;
@@ -860,7 +913,7 @@ static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
{
int i, j;
- for_each_queue(bp, j) {
+ for_each_tx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
for (i = 1; i <= NUM_TX_RINGS; i++) {
@@ -939,7 +992,30 @@ static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
}
}
+#ifdef BCM_CNIC
+static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
+{
+ bnx2x_fcoe(bp, cl_id) = BNX2X_FCOE_ETH_CL_ID +
+ BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
+ bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
+ bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
+ bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
+ bnx2x_fcoe(bp, bp) = bp;
+ bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
+ bnx2x_fcoe(bp, index) = FCOE_IDX;
+ bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
+ bnx2x_fcoe(bp, tx_cons_sb) = BNX2X_FCOE_L2_TX_INDEX;
+ /* qZone id equals to FW (per path) client id */
+ bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fcoe(bp, cl_id) +
+ BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
+ ETH_MAX_RX_CLIENTS_E1H);
+ /* init shortcut */
+ bnx2x_fcoe(bp, ustorm_rx_prods_offset) = CHIP_IS_E2(bp) ?
+ USTORM_RX_PRODS_E2_OFFSET(bnx2x_fcoe(bp, cl_qzone_id)) :
+ USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), bnx2x_fcoe_fp(bp)->cl_id);
+}
+#endif
static inline void __storm_memset_struct(struct bnx2x *bp,
u32 addr, size_t size, u32 *data)
@@ -977,4 +1053,24 @@ static inline void storm_memset_cmng(struct bnx2x *bp,
void bnx2x_acquire_phy_lock(struct bnx2x *bp);
void bnx2x_release_phy_lock(struct bnx2x *bp);
+/**
+ * Extracts MAX BW part from MF configuration.
+ *
+ * @param bp
+ * @param mf_cfg
+ *
+ * @return u16
+ */
+static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
+{
+ u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT;
+ if (!max_cfg) {
+ BNX2X_ERR("Illegal configuration detected for Max BW - "
+ "using 100 instead\n");
+ max_cfg = 100;
+ }
+ return max_cfg;
+}
+
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
new file mode 100644
index 000000000000..9a24d79c71d9
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_dcb.c
@@ -0,0 +1,2145 @@
+/* bnx2x_dcb.c: Broadcom Everest network driver.
+ *
+ * Copyright 2009-2010 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Dmitry Kravkov
+ *
+ */
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#ifdef BCM_DCBNL
+#include <linux/dcbnl.h>
+#endif
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_dcb.h"
+
+
+/* forward declarations of dcbx related functions */
+static void bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
+static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
+static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
+static void bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
+static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
+ u32 *set_configuration_ets_pg,
+ u32 *pri_pg_tbl);
+static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
+ u32 *pg_pri_orginal_spread,
+ struct pg_help_data *help_data);
+static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
+ struct pg_help_data *help_data,
+ struct dcbx_ets_feature *ets,
+ u32 *pg_pri_orginal_spread);
+static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ struct dcbx_ets_feature *ets);
+static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp);
+
+
+static void bnx2x_pfc_set(struct bnx2x *bp)
+{
+ struct bnx2x_nig_brb_pfc_port_params pfc_params = {0};
+ u32 pri_bit, val = 0;
+ u8 pri;
+
+ /* Tx COS configuration */
+ if (bp->dcbx_port_params.ets.cos_params[0].pauseable)
+ pfc_params.rx_cos0_priority_mask =
+ bp->dcbx_port_params.ets.cos_params[0].pri_bitmask;
+ if (bp->dcbx_port_params.ets.cos_params[1].pauseable)
+ pfc_params.rx_cos1_priority_mask =
+ bp->dcbx_port_params.ets.cos_params[1].pri_bitmask;
+
+
+ /**
+ * Rx COS configuration
+ * Changing PFC RX configuration .
+ * In RX COS0 will always be configured to lossy and COS1 to lossless
+ */
+ for (pri = 0 ; pri < MAX_PFC_PRIORITIES ; pri++) {
+ pri_bit = 1 << pri;
+
+ if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp))
+ val |= 1 << (pri * 4);
+ }
+
+ pfc_params.pkt_priority_to_cos = val;
+
+ /* RX COS0 */
+ pfc_params.llfc_low_priority_classes = 0;
+ /* RX COS1 */
+ pfc_params.llfc_high_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp);
+
+ /* BRB configuration */
+ pfc_params.cos0_pauseable = false;
+ pfc_params.cos1_pauseable = true;
+
+ bnx2x_acquire_phy_lock(bp);
+ bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED;
+ bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &pfc_params);
+ bnx2x_release_phy_lock(bp);
+}
+
+static void bnx2x_pfc_clear(struct bnx2x *bp)
+{
+ struct bnx2x_nig_brb_pfc_port_params nig_params = {0};
+ nig_params.pause_enable = 1;
+#ifdef BNX2X_SAFC
+ if (bp->flags & SAFC_TX_FLAG) {
+ u32 high = 0, low = 0;
+ int i;
+
+ for (i = 0; i < BNX2X_MAX_PRIORITY; i++) {
+ if (bp->pri_map[i] == 1)
+ high |= (1 << i);
+ if (bp->pri_map[i] == 0)
+ low |= (1 << i);
+ }
+
+ nig_params.llfc_low_priority_classes = high;
+ nig_params.llfc_low_priority_classes = low;
+
+ nig_params.pause_enable = 0;
+ nig_params.llfc_enable = 1;
+ nig_params.llfc_out_en = 1;
+ }
+#endif /* BNX2X_SAFC */
+ bnx2x_acquire_phy_lock(bp);
+ bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED;
+ bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params);
+ bnx2x_release_phy_lock(bp);
+}
+
+static void bnx2x_dump_dcbx_drv_param(struct bnx2x *bp,
+ struct dcbx_features *features,
+ u32 error)
+{
+ u8 i = 0;
+ DP(NETIF_MSG_LINK, "local_mib.error %x\n", error);
+
+ /* PG */
+ DP(NETIF_MSG_LINK,
+ "local_mib.features.ets.enabled %x\n", features->ets.enabled);
+ for (i = 0; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++)
+ DP(NETIF_MSG_LINK,
+ "local_mib.features.ets.pg_bw_tbl[%d] %d\n", i,
+ DCBX_PG_BW_GET(features->ets.pg_bw_tbl, i));
+ for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++)
+ DP(NETIF_MSG_LINK,
+ "local_mib.features.ets.pri_pg_tbl[%d] %d\n", i,
+ DCBX_PRI_PG_GET(features->ets.pri_pg_tbl, i));
+
+ /* pfc */
+ DP(NETIF_MSG_LINK, "dcbx_features.pfc.pri_en_bitmap %x\n",
+ features->pfc.pri_en_bitmap);
+ DP(NETIF_MSG_LINK, "dcbx_features.pfc.pfc_caps %x\n",
+ features->pfc.pfc_caps);
+ DP(NETIF_MSG_LINK, "dcbx_features.pfc.enabled %x\n",
+ features->pfc.enabled);
+
+ DP(NETIF_MSG_LINK, "dcbx_features.app.default_pri %x\n",
+ features->app.default_pri);
+ DP(NETIF_MSG_LINK, "dcbx_features.app.tc_supported %x\n",
+ features->app.tc_supported);
+ DP(NETIF_MSG_LINK, "dcbx_features.app.enabled %x\n",
+ features->app.enabled);
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ DP(NETIF_MSG_LINK,
+ "dcbx_features.app.app_pri_tbl[%x].app_id %x\n",
+ i, features->app.app_pri_tbl[i].app_id);
+ DP(NETIF_MSG_LINK,
+ "dcbx_features.app.app_pri_tbl[%x].pri_bitmap %x\n",
+ i, features->app.app_pri_tbl[i].pri_bitmap);
+ DP(NETIF_MSG_LINK,
+ "dcbx_features.app.app_pri_tbl[%x].appBitfield %x\n",
+ i, features->app.app_pri_tbl[i].appBitfield);
+ }
+}
+
+static void bnx2x_dcbx_get_ap_priority(struct bnx2x *bp,
+ u8 pri_bitmap,
+ u8 llfc_traf_type)
+{
+ u32 pri = MAX_PFC_PRIORITIES;
+ u32 index = MAX_PFC_PRIORITIES - 1;
+ u32 pri_mask;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+
+ /* Choose the highest priority */
+ while ((MAX_PFC_PRIORITIES == pri) && (0 != index)) {
+ pri_mask = 1 << index;
+ if (GET_FLAGS(pri_bitmap, pri_mask))
+ pri = index ;
+ index--;
+ }
+
+ if (pri < MAX_PFC_PRIORITIES)
+ ttp[llfc_traf_type] = max_t(u32, ttp[llfc_traf_type], pri);
+}
+
+static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp,
+ struct dcbx_app_priority_feature *app,
+ u32 error) {
+ u8 index;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+
+ if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR))
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_ERROR\n");
+
+ if (app->enabled && !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR)) {
+
+ bp->dcbx_port_params.app.enabled = true;
+
+ for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
+ ttp[index] = 0;
+
+ if (app->default_pri < MAX_PFC_PRIORITIES)
+ ttp[LLFC_TRAFFIC_TYPE_NW] = app->default_pri;
+
+ for (index = 0 ; index < DCBX_MAX_APP_PROTOCOL; index++) {
+ struct dcbx_app_priority_entry *entry =
+ app->app_pri_tbl;
+
+ if (GET_FLAGS(entry[index].appBitfield,
+ DCBX_APP_SF_ETH_TYPE) &&
+ ETH_TYPE_FCOE == entry[index].app_id)
+ bnx2x_dcbx_get_ap_priority(bp,
+ entry[index].pri_bitmap,
+ LLFC_TRAFFIC_TYPE_FCOE);
+
+ if (GET_FLAGS(entry[index].appBitfield,
+ DCBX_APP_SF_PORT) &&
+ TCP_PORT_ISCSI == entry[index].app_id)
+ bnx2x_dcbx_get_ap_priority(bp,
+ entry[index].pri_bitmap,
+ LLFC_TRAFFIC_TYPE_ISCSI);
+ }
+ } else {
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_DISABLED\n");
+ bp->dcbx_port_params.app.enabled = false;
+ for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
+ ttp[index] = INVALID_TRAFFIC_TYPE_PRIORITY;
+ }
+}
+
+static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
+ struct dcbx_ets_feature *ets,
+ u32 error) {
+ int i = 0;
+ u32 pg_pri_orginal_spread[DCBX_MAX_NUM_PG_BW_ENTRIES] = {0};
+ struct pg_help_data pg_help_data;
+ struct bnx2x_dcbx_cos_params *cos_params =
+ bp->dcbx_port_params.ets.cos_params;
+
+ memset(&pg_help_data, 0, sizeof(struct pg_help_data));
+
+
+ if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR))
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ERROR\n");
+
+
+ /* Clean up old settings of ets on COS */
+ for (i = 0; i < E2_NUM_OF_COS ; i++) {
+
+ cos_params[i].pauseable = false;
+ cos_params[i].strict = BNX2X_DCBX_COS_NOT_STRICT;
+ cos_params[i].bw_tbl = DCBX_INVALID_COS_BW;
+ cos_params[i].pri_bitmask = DCBX_PFC_PRI_GET_NON_PAUSE(bp, 0);
+ }
+
+ if (bp->dcbx_port_params.app.enabled &&
+ !GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR) &&
+ ets->enabled) {
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ENABLE\n");
+ bp->dcbx_port_params.ets.enabled = true;
+
+ bnx2x_dcbx_get_ets_pri_pg_tbl(bp,
+ pg_pri_orginal_spread,
+ ets->pri_pg_tbl);
+
+ bnx2x_dcbx_get_num_pg_traf_type(bp,
+ pg_pri_orginal_spread,
+ &pg_help_data);
+
+ bnx2x_dcbx_fill_cos_params(bp, &pg_help_data,
+ ets, pg_pri_orginal_spread);
+
+ } else {
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_DISABLED\n");
+ bp->dcbx_port_params.ets.enabled = false;
+ ets->pri_pg_tbl[0] = 0;
+
+ for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES ; i++)
+ DCBX_PG_BW_SET(ets->pg_bw_tbl, i, 1);
+ }
+}
+
+static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
+ struct dcbx_pfc_feature *pfc, u32 error)
+{
+
+ if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR))
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_ERROR\n");
+
+ if (bp->dcbx_port_params.app.enabled &&
+ !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR) &&
+ pfc->enabled) {
+ bp->dcbx_port_params.pfc.enabled = true;
+ bp->dcbx_port_params.pfc.priority_non_pauseable_mask =
+ ~(pfc->pri_en_bitmap);
+ } else {
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_DISABLED\n");
+ bp->dcbx_port_params.pfc.enabled = false;
+ bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0;
+ }
+}
+
+static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp,
+ struct dcbx_features *features,
+ u32 error)
+{
+ bnx2x_dcbx_get_ap_feature(bp, &features->app, error);
+
+ bnx2x_dcbx_get_pfc_feature(bp, &features->pfc, error);
+
+ bnx2x_dcbx_get_ets_feature(bp, &features->ets, error);
+}
+
+#define DCBX_LOCAL_MIB_MAX_TRY_READ (100)
+static int bnx2x_dcbx_read_mib(struct bnx2x *bp,
+ u32 *base_mib_addr,
+ u32 offset,
+ int read_mib_type)
+{
+ int max_try_read = 0, i;
+ u32 *buff, mib_size, prefix_seq_num, suffix_seq_num;
+ struct lldp_remote_mib *remote_mib ;
+ struct lldp_local_mib *local_mib;
+
+
+ switch (read_mib_type) {
+ case DCBX_READ_LOCAL_MIB:
+ mib_size = sizeof(struct lldp_local_mib);
+ break;
+ case DCBX_READ_REMOTE_MIB:
+ mib_size = sizeof(struct lldp_remote_mib);
+ break;
+ default:
+ return 1; /*error*/
+ }
+
+ offset += BP_PORT(bp) * mib_size;
+
+ do {
+ buff = base_mib_addr;
+ for (i = 0; i < mib_size; i += 4, buff++)
+ *buff = REG_RD(bp, offset + i);
+
+ max_try_read++;
+
+ switch (read_mib_type) {
+ case DCBX_READ_LOCAL_MIB:
+ local_mib = (struct lldp_local_mib *) base_mib_addr;
+ prefix_seq_num = local_mib->prefix_seq_num;
+ suffix_seq_num = local_mib->suffix_seq_num;
+ break;
+ case DCBX_READ_REMOTE_MIB:
+ remote_mib = (struct lldp_remote_mib *) base_mib_addr;
+ prefix_seq_num = remote_mib->prefix_seq_num;
+ suffix_seq_num = remote_mib->suffix_seq_num;
+ break;
+ default:
+ return 1; /*error*/
+ }
+ } while ((prefix_seq_num != suffix_seq_num) &&
+ (max_try_read < DCBX_LOCAL_MIB_MAX_TRY_READ));
+
+ if (max_try_read >= DCBX_LOCAL_MIB_MAX_TRY_READ) {
+ BNX2X_ERR("MIB could not be read\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp)) {
+ if (BP_PORT(bp)) {
+ BNX2X_ERR("4 port mode is not supported");
+ return;
+ }
+
+ if (bp->dcbx_port_params.pfc.enabled)
+
+ /* 1. Fills up common PFC structures if required.*/
+ /* 2. Configure NIG, MAC and BRB via the elink:
+ * elink must first check if BMAC is not in reset
+ * and only then configures the BMAC
+ * Or, configure EMAC.
+ */
+ bnx2x_pfc_set(bp);
+
+ else
+ bnx2x_pfc_clear(bp);
+ }
+}
+
+static void bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
+{
+ DP(NETIF_MSG_LINK, "sending STOP TRAFFIC\n");
+ bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
+ 0 /* connectionless */,
+ 0 /* dataHi is zero */,
+ 0 /* dataLo is zero */,
+ 1 /* common */);
+}
+
+static void bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
+{
+ bnx2x_pfc_fw_struct_e2(bp);
+ DP(NETIF_MSG_LINK, "sending START TRAFFIC\n");
+ bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC,
+ 0, /* connectionless */
+ U64_HI(bnx2x_sp_mapping(bp, pfc_config)),
+ U64_LO(bnx2x_sp_mapping(bp, pfc_config)),
+ 1 /* commmon */);
+}
+
+static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
+{
+ struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
+ u8 status = 0;
+
+ bnx2x_ets_disabled(&bp->link_params);
+
+ if (!ets->enabled)
+ return;
+
+ if ((ets->num_of_cos == 0) || (ets->num_of_cos > E2_NUM_OF_COS)) {
+ BNX2X_ERR("illegal num of cos= %x", ets->num_of_cos);
+ return;
+ }
+
+ /* valid COS entries */
+ if (ets->num_of_cos == 1) /* no ETS */
+ return;
+
+ /* sanity */
+ if (((BNX2X_DCBX_COS_NOT_STRICT == ets->cos_params[0].strict) &&
+ (DCBX_INVALID_COS_BW == ets->cos_params[0].bw_tbl)) ||
+ ((BNX2X_DCBX_COS_NOT_STRICT == ets->cos_params[1].strict) &&
+ (DCBX_INVALID_COS_BW == ets->cos_params[1].bw_tbl))) {
+ BNX2X_ERR("all COS should have at least bw_limit or strict"
+ "ets->cos_params[0].strict= %x"
+ "ets->cos_params[0].bw_tbl= %x"
+ "ets->cos_params[1].strict= %x"
+ "ets->cos_params[1].bw_tbl= %x",
+ ets->cos_params[0].strict,
+ ets->cos_params[0].bw_tbl,
+ ets->cos_params[1].strict,
+ ets->cos_params[1].bw_tbl);
+ return;
+ }
+ /* If we join a group and there is bw_tbl and strict then bw rules */
+ if ((DCBX_INVALID_COS_BW != ets->cos_params[0].bw_tbl) &&
+ (DCBX_INVALID_COS_BW != ets->cos_params[1].bw_tbl)) {
+ u32 bw_tbl_0 = ets->cos_params[0].bw_tbl;
+ u32 bw_tbl_1 = ets->cos_params[1].bw_tbl;
+ /* Do not allow 0-100 configuration
+ * since PBF does not support it
+ * force 1-99 instead
+ */
+ if (bw_tbl_0 == 0) {
+ bw_tbl_0 = 1;
+ bw_tbl_1 = 99;
+ } else if (bw_tbl_1 == 0) {
+ bw_tbl_1 = 1;
+ bw_tbl_0 = 99;
+ }
+
+ bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1);
+ } else {
+ if (ets->cos_params[0].strict == BNX2X_DCBX_COS_HIGH_STRICT)
+ status = bnx2x_ets_strict(&bp->link_params, 0);
+ else if (ets->cos_params[1].strict
+ == BNX2X_DCBX_COS_HIGH_STRICT)
+ status = bnx2x_ets_strict(&bp->link_params, 1);
+
+ if (status)
+ BNX2X_ERR("update_ets_params failed\n");
+ }
+}
+
+static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
+{
+ struct lldp_local_mib local_mib = {0};
+ u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset);
+ int rc;
+
+ DP(NETIF_MSG_LINK, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset);
+
+ if (SHMEM_DCBX_NEG_RES_NONE == dcbx_neg_res_offset) {
+ BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n");
+ return -EINVAL;
+ }
+ rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset,
+ DCBX_READ_LOCAL_MIB);
+
+ if (rc) {
+ BNX2X_ERR("Faild to read local mib from FW\n");
+ return rc;
+ }
+
+ /* save features and error */
+ bp->dcbx_local_feat = local_mib.features;
+ bp->dcbx_error = local_mib.error;
+ return 0;
+}
+
+
+#ifdef BCM_DCBNL
+static inline
+u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent)
+{
+ u8 pri;
+
+ /* Choose the highest priority */
+ for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--)
+ if (ent->pri_bitmap & (1 << pri))
+ break;
+ return pri;
+}
+
+static inline
+u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent)
+{
+ return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) ==
+ DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM :
+ DCB_APP_IDTYPE_ETHTYPE;
+}
+
+static inline
+void bnx2x_dcbx_invalidate_local_apps(struct bnx2x *bp)
+{
+ int i;
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
+ bp->dcbx_local_feat.app.app_pri_tbl[i].appBitfield &=
+ ~DCBX_APP_ENTRY_VALID;
+}
+
+int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
+{
+ int i, err = 0;
+
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) {
+ struct dcbx_app_priority_entry *ent =
+ &bp->dcbx_local_feat.app.app_pri_tbl[i];
+
+ if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
+ u8 up = bnx2x_dcbx_dcbnl_app_up(ent);
+
+ /* avoid invalid user-priority */
+ if (up) {
+ struct dcb_app app;
+ app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
+ app.protocol = ent->app_id;
+ app.priority = delall ? 0 : up;
+ err = dcb_setapp(bp->dev, &app);
+ }
+ }
+ }
+ return err;
+}
+#endif
+
+void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
+{
+ switch (state) {
+ case BNX2X_DCBX_STATE_NEG_RECEIVED:
+ {
+ DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
+#ifdef BCM_DCBNL
+ /**
+ * Delete app tlvs from dcbnl before reading new
+ * negotiation results
+ */
+ bnx2x_dcbnl_update_applist(bp, true);
+#endif
+ /* Read neg results if dcbx is in the FW */
+ if (bnx2x_dcbx_read_shmem_neg_results(bp))
+ return;
+
+ bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+ bp->dcbx_error);
+
+ bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+ bp->dcbx_error);
+
+ if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
+#ifdef BCM_DCBNL
+ /**
+ * Add new app tlvs to dcbnl
+ */
+ bnx2x_dcbnl_update_applist(bp, false);
+#endif
+ bnx2x_dcbx_stop_hw_tx(bp);
+ return;
+ }
+ /* fall through */
+#ifdef BCM_DCBNL
+ /**
+ * Invalidate the local app tlvs if they are not added
+ * to the dcbnl app list to avoid deleting them from
+ * the list later on
+ */
+ bnx2x_dcbx_invalidate_local_apps(bp);
+#endif
+ }
+ case BNX2X_DCBX_STATE_TX_PAUSED:
+ DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
+ bnx2x_pfc_set_pfc(bp);
+
+ bnx2x_dcbx_update_ets_params(bp);
+ if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
+ bnx2x_dcbx_resume_hw_tx(bp);
+ return;
+ }
+ /* fall through */
+ case BNX2X_DCBX_STATE_TX_RELEASED:
+ DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
+ if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD)
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
+
+ return;
+ default:
+ BNX2X_ERR("Unknown DCBX_STATE\n");
+ }
+}
+
+
+#define LLDP_STATS_OFFSET(bp) (BP_PORT(bp)*\
+ sizeof(struct lldp_dcbx_stat))
+
+/* calculate struct offset in array according to chip information */
+#define LLDP_PARAMS_OFFSET(bp) (BP_PORT(bp)*sizeof(struct lldp_params))
+
+#define LLDP_ADMIN_MIB_OFFSET(bp) (PORT_MAX*sizeof(struct lldp_params) + \
+ BP_PORT(bp)*sizeof(struct lldp_admin_mib))
+
+static void bnx2x_dcbx_lldp_updated_params(struct bnx2x *bp,
+ u32 dcbx_lldp_params_offset)
+{
+ struct lldp_params lldp_params = {0};
+ u32 i = 0, *buff = NULL;
+ u32 offset = dcbx_lldp_params_offset + LLDP_PARAMS_OFFSET(bp);
+
+ DP(NETIF_MSG_LINK, "lldp_offset 0x%x\n", offset);
+
+ if ((bp->lldp_config_params.overwrite_settings ==
+ BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE)) {
+ /* Read the data first */
+ buff = (u32 *)&lldp_params;
+ for (i = 0; i < sizeof(struct lldp_params); i += 4, buff++)
+ *buff = REG_RD(bp, (offset + i));
+
+ lldp_params.msg_tx_hold =
+ (u8)bp->lldp_config_params.msg_tx_hold;
+ lldp_params.msg_fast_tx_interval =
+ (u8)bp->lldp_config_params.msg_fast_tx;
+ lldp_params.tx_crd_max =
+ (u8)bp->lldp_config_params.tx_credit_max;
+ lldp_params.msg_tx_interval =
+ (u8)bp->lldp_config_params.msg_tx_interval;
+ lldp_params.tx_fast =
+ (u8)bp->lldp_config_params.tx_fast;
+
+ /* Write the data.*/
+ buff = (u32 *)&lldp_params;
+ for (i = 0; i < sizeof(struct lldp_params); i += 4, buff++)
+ REG_WR(bp, (offset + i) , *buff);
+
+
+ } else if (BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE ==
+ bp->lldp_config_params.overwrite_settings)
+ bp->lldp_config_params.overwrite_settings =
+ BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID;
+}
+
+static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
+ u32 dcbx_lldp_params_offset)
+{
+ struct lldp_admin_mib admin_mib;
+ u32 i, other_traf_type = PREDEFINED_APP_IDX_MAX, traf_type = 0;
+ u32 *buff;
+ u32 offset = dcbx_lldp_params_offset + LLDP_ADMIN_MIB_OFFSET(bp);
+
+ /*shortcuts*/
+ struct dcbx_features *af = &admin_mib.features;
+ struct bnx2x_config_dcbx_params *dp = &bp->dcbx_config_params;
+
+ memset(&admin_mib, 0, sizeof(struct lldp_admin_mib));
+ buff = (u32 *)&admin_mib;
+ /* Read the data first */
+ for (i = 0; i < sizeof(struct lldp_admin_mib); i += 4, buff++)
+ *buff = REG_RD(bp, (offset + i));
+
+ if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON)
+ SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
+
+ if ((BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE ==
+ dp->overwrite_settings)) {
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_CEE_VERSION_MASK);
+ admin_mib.ver_cfg_flags |=
+ (dp->admin_dcbx_version << DCBX_CEE_VERSION_SHIFT) &
+ DCBX_CEE_VERSION_MASK;
+
+ af->ets.enabled = (u8)dp->admin_ets_enable;
+
+ af->pfc.enabled = (u8)dp->admin_pfc_enable;
+
+ /* FOR IEEE dp->admin_tc_supported_tx_enable */
+ if (dp->admin_ets_configuration_tx_enable)
+ SET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_ETS_CONFIG_TX_ENABLED);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_ETS_CONFIG_TX_ENABLED);
+ /* For IEEE admin_ets_recommendation_tx_enable */
+ if (dp->admin_pfc_tx_enable)
+ SET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_PFC_CONFIG_TX_ENABLED);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_PFC_CONFIG_TX_ENABLED);
+
+ if (dp->admin_application_priority_tx_enable)
+ SET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_APP_CONFIG_TX_ENABLED);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags,
+ DCBX_APP_CONFIG_TX_ENABLED);
+
+ if (dp->admin_ets_willing)
+ SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
+ /* For IEEE admin_ets_reco_valid */
+ if (dp->admin_pfc_willing)
+ SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
+
+ if (dp->admin_app_priority_willing)
+ SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
+ else
+ RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
+
+ for (i = 0 ; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++) {
+ DCBX_PG_BW_SET(af->ets.pg_bw_tbl, i,
+ (u8)dp->admin_configuration_bw_precentage[i]);
+
+ DP(NETIF_MSG_LINK, "pg_bw_tbl[%d] = %02x\n",
+ i, DCBX_PG_BW_GET(af->ets.pg_bw_tbl, i));
+ }
+
+ for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
+ DCBX_PRI_PG_SET(af->ets.pri_pg_tbl, i,
+ (u8)dp->admin_configuration_ets_pg[i]);
+
+ DP(NETIF_MSG_LINK, "pri_pg_tbl[%d] = %02x\n",
+ i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i));
+ }
+
+ /*For IEEE admin_recommendation_bw_precentage
+ *For IEEE admin_recommendation_ets_pg */
+ af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
+ for (i = 0; i < 4; i++) {
+ if (dp->admin_priority_app_table[i].valid) {
+ struct bnx2x_admin_priority_app_table *table =
+ dp->admin_priority_app_table;
+ if ((ETH_TYPE_FCOE == table[i].app_id) &&
+ (TRAFFIC_TYPE_ETH == table[i].traffic_type))
+ traf_type = FCOE_APP_IDX;
+ else if ((TCP_PORT_ISCSI == table[i].app_id) &&
+ (TRAFFIC_TYPE_PORT == table[i].traffic_type))
+ traf_type = ISCSI_APP_IDX;
+ else
+ traf_type = other_traf_type++;
+
+ af->app.app_pri_tbl[traf_type].app_id =
+ table[i].app_id;
+
+ af->app.app_pri_tbl[traf_type].pri_bitmap =
+ (u8)(1 << table[i].priority);
+
+ af->app.app_pri_tbl[traf_type].appBitfield =
+ (DCBX_APP_ENTRY_VALID);
+
+ af->app.app_pri_tbl[traf_type].appBitfield |=
+ (TRAFFIC_TYPE_ETH == table[i].traffic_type) ?
+ DCBX_APP_SF_ETH_TYPE : DCBX_APP_SF_PORT;
+ }
+ }
+
+ af->app.default_pri = (u8)dp->admin_default_priority;
+
+ } else if (BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE ==
+ dp->overwrite_settings)
+ dp->overwrite_settings = BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID;
+
+ /* Write the data. */
+ buff = (u32 *)&admin_mib;
+ for (i = 0; i < sizeof(struct lldp_admin_mib); i += 4, buff++)
+ REG_WR(bp, (offset + i), *buff);
+}
+
+void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
+{
+ if (CHIP_IS_E2(bp) && !CHIP_MODE_IS_4_PORT(bp)) {
+ bp->dcb_state = dcb_on;
+ bp->dcbx_enabled = dcbx_enabled;
+ } else {
+ bp->dcb_state = false;
+ bp->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID;
+ }
+ DP(NETIF_MSG_LINK, "DCB state [%s:%s]\n",
+ dcb_on ? "ON" : "OFF",
+ dcbx_enabled == BNX2X_DCBX_ENABLED_OFF ? "user-mode" :
+ dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF ? "on-chip static" :
+ dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON ?
+ "on-chip with negotiation" : "invalid");
+}
+
+void bnx2x_dcbx_init_params(struct bnx2x *bp)
+{
+ bp->dcbx_config_params.admin_dcbx_version = 0x0; /* 0 - CEE; 1 - IEEE */
+ bp->dcbx_config_params.admin_ets_willing = 1;
+ bp->dcbx_config_params.admin_pfc_willing = 1;
+ bp->dcbx_config_params.overwrite_settings = 1;
+ bp->dcbx_config_params.admin_ets_enable = 1;
+ bp->dcbx_config_params.admin_pfc_enable = 1;
+ bp->dcbx_config_params.admin_tc_supported_tx_enable = 1;
+ bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
+ bp->dcbx_config_params.admin_pfc_tx_enable = 1;
+ bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
+ bp->dcbx_config_params.admin_ets_reco_valid = 1;
+ bp->dcbx_config_params.admin_app_priority_willing = 1;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 00;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 50;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 50;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[0] = 1;
+ bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[3] = 2;
+ bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0;
+ bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 1;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 2;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 7;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 5;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 6;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 7;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[3] = 3;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[4] = 4;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6;
+ bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7;
+ bp->dcbx_config_params.admin_pfc_bitmap = 0x8; /* FCoE(3) enable */
+ bp->dcbx_config_params.admin_priority_app_table[0].valid = 1;
+ bp->dcbx_config_params.admin_priority_app_table[1].valid = 1;
+ bp->dcbx_config_params.admin_priority_app_table[2].valid = 0;
+ bp->dcbx_config_params.admin_priority_app_table[3].valid = 0;
+ bp->dcbx_config_params.admin_priority_app_table[0].priority = 3;
+ bp->dcbx_config_params.admin_priority_app_table[1].priority = 0;
+ bp->dcbx_config_params.admin_priority_app_table[2].priority = 0;
+ bp->dcbx_config_params.admin_priority_app_table[3].priority = 0;
+ bp->dcbx_config_params.admin_priority_app_table[0].traffic_type = 0;
+ bp->dcbx_config_params.admin_priority_app_table[1].traffic_type = 1;
+ bp->dcbx_config_params.admin_priority_app_table[2].traffic_type = 0;
+ bp->dcbx_config_params.admin_priority_app_table[3].traffic_type = 0;
+ bp->dcbx_config_params.admin_priority_app_table[0].app_id = 0x8906;
+ bp->dcbx_config_params.admin_priority_app_table[1].app_id = 3260;
+ bp->dcbx_config_params.admin_priority_app_table[2].app_id = 0;
+ bp->dcbx_config_params.admin_priority_app_table[3].app_id = 0;
+ bp->dcbx_config_params.admin_default_priority =
+ bp->dcbx_config_params.admin_priority_app_table[1].priority;
+}
+
+void bnx2x_dcbx_init(struct bnx2x *bp)
+{
+ u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE;
+
+ if (bp->dcbx_enabled <= 0)
+ return;
+
+ /* validate:
+ * chip of good for dcbx version,
+ * dcb is wanted
+ * the function is pmf
+ * shmem2 contains DCBX support fields
+ */
+ DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n",
+ bp->dcb_state, bp->port.pmf);
+
+ if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf &&
+ SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
+ dcbx_lldp_params_offset =
+ SHMEM2_RD(bp, dcbx_lldp_params_offset);
+
+ DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n",
+ dcbx_lldp_params_offset);
+
+ if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
+ bnx2x_dcbx_lldp_updated_params(bp,
+ dcbx_lldp_params_offset);
+
+ bnx2x_dcbx_admin_mib_updated_params(bp,
+ dcbx_lldp_params_offset);
+
+ /* set default configuration BC has */
+ bnx2x_dcbx_set_params(bp,
+ BNX2X_DCBX_STATE_NEG_RECEIVED);
+
+ bnx2x_fw_command(bp,
+ DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
+ }
+ }
+}
+
+void bnx2x_dcb_init_intmem_pfc(struct bnx2x *bp)
+{
+ struct priority_cos pricos[MAX_PFC_TRAFFIC_TYPES];
+ u32 i = 0, addr;
+ memset(pricos, 0, sizeof(pricos));
+ /* Default initialization */
+ for (i = 0; i < MAX_PFC_TRAFFIC_TYPES; i++)
+ pricos[i].priority = LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED;
+
+ /* Store per port struct to internal memory */
+ addr = BAR_XSTRORM_INTMEM +
+ XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
+ offsetof(struct cmng_struct_per_port,
+ traffic_type_to_priority_cos);
+ __storm_memset_struct(bp, addr, sizeof(pricos), (u32 *)pricos);
+
+
+ /* LLFC disabled.*/
+ REG_WR8(bp , BAR_XSTRORM_INTMEM +
+ XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
+ offsetof(struct cmng_struct_per_port, llfc_mode),
+ LLFC_MODE_NONE);
+
+ /* DCBX disabled.*/
+ REG_WR8(bp , BAR_XSTRORM_INTMEM +
+ XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
+ offsetof(struct cmng_struct_per_port, dcb_enabled),
+ DCB_DISABLED);
+}
+
+static void
+bnx2x_dcbx_print_cos_params(struct bnx2x *bp,
+ struct flow_control_configuration *pfc_fw_cfg)
+{
+ u8 pri = 0;
+ u8 cos = 0;
+
+ DP(NETIF_MSG_LINK,
+ "pfc_fw_cfg->dcb_version %x\n", pfc_fw_cfg->dcb_version);
+ DP(NETIF_MSG_LINK,
+ "pdev->params.dcbx_port_params.pfc."
+ "priority_non_pauseable_mask %x\n",
+ bp->dcbx_port_params.pfc.priority_non_pauseable_mask);
+
+ for (cos = 0 ; cos < bp->dcbx_port_params.ets.num_of_cos ; cos++) {
+ DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
+ "cos_params[%d].pri_bitmask %x\n", cos,
+ bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask);
+
+ DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
+ "cos_params[%d].bw_tbl %x\n", cos,
+ bp->dcbx_port_params.ets.cos_params[cos].bw_tbl);
+
+ DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
+ "cos_params[%d].strict %x\n", cos,
+ bp->dcbx_port_params.ets.cos_params[cos].strict);
+
+ DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
+ "cos_params[%d].pauseable %x\n", cos,
+ bp->dcbx_port_params.ets.cos_params[cos].pauseable);
+ }
+
+ for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
+ DP(NETIF_MSG_LINK,
+ "pfc_fw_cfg->traffic_type_to_priority_cos[%d]."
+ "priority %x\n", pri,
+ pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority);
+
+ DP(NETIF_MSG_LINK,
+ "pfc_fw_cfg->traffic_type_to_priority_cos[%d].cos %x\n",
+ pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].cos);
+ }
+}
+
+/* fills help_data according to pg_info */
+static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
+ u32 *pg_pri_orginal_spread,
+ struct pg_help_data *help_data)
+{
+ bool pg_found = false;
+ u32 i, traf_type, add_traf_type, add_pg;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+ struct pg_entry_help_data *data = help_data->data; /*shotcut*/
+
+ /* Set to invalid */
+ for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
+ data[i].pg = DCBX_ILLEGAL_PG;
+
+ for (add_traf_type = 0;
+ add_traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX; add_traf_type++) {
+ pg_found = false;
+ if (ttp[add_traf_type] < MAX_PFC_PRIORITIES) {
+ add_pg = (u8)pg_pri_orginal_spread[ttp[add_traf_type]];
+ for (traf_type = 0;
+ traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX;
+ traf_type++) {
+ if (data[traf_type].pg == add_pg) {
+ if (!(data[traf_type].pg_priority &
+ (1 << ttp[add_traf_type])))
+ data[traf_type].
+ num_of_dif_pri++;
+ data[traf_type].pg_priority |=
+ (1 << ttp[add_traf_type]);
+ pg_found = true;
+ break;
+ }
+ }
+ if (false == pg_found) {
+ data[help_data->num_of_pg].pg = add_pg;
+ data[help_data->num_of_pg].pg_priority =
+ (1 << ttp[add_traf_type]);
+ data[help_data->num_of_pg].num_of_dif_pri = 1;
+ help_data->num_of_pg++;
+ }
+ }
+ DP(NETIF_MSG_LINK,
+ "add_traf_type %d pg_found %s num_of_pg %d\n",
+ add_traf_type, (false == pg_found) ? "NO" : "YES",
+ help_data->num_of_pg);
+ }
+}
+
+
+/*******************************************************************************
+ * Description: single priority group
+ *
+ * Return:
+ ******************************************************************************/
+static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u32 pri_join_mask)
+{
+ /* Only one priority than only one COS */
+ cos_data->data[0].pausable =
+ IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+ cos_data->data[0].pri_join_mask = pri_join_mask;
+ cos_data->data[0].cos_bw = 100;
+ cos_data->num_of_cos = 1;
+}
+
+/*******************************************************************************
+ * Description: updating the cos bw
+ *
+ * Return:
+ ******************************************************************************/
+static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp,
+ struct cos_entry_help_data *data,
+ u8 pg_bw)
+{
+ if (data->cos_bw == DCBX_INVALID_COS_BW)
+ data->cos_bw = pg_bw;
+ else
+ data->cos_bw += pg_bw;
+}
+
+/*******************************************************************************
+ * Description: single priority group
+ *
+ * Return:
+ ******************************************************************************/
+static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ struct dcbx_ets_feature *ets)
+{
+ u32 pri_tested = 0;
+ u8 i = 0;
+ u8 entry = 0;
+ u8 pg_entry = 0;
+ u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX;
+
+ cos_data->data[0].pausable = true;
+ cos_data->data[1].pausable = false;
+ cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0;
+
+ for (i = 0 ; i < num_of_pri ; i++) {
+ pri_tested = 1 << bp->dcbx_port_params.
+ app.traffic_type_priority[i];
+
+ if (pri_tested & DCBX_PFC_PRI_NON_PAUSE_MASK(bp)) {
+ cos_data->data[1].pri_join_mask |= pri_tested;
+ entry = 1;
+ } else {
+ cos_data->data[0].pri_join_mask |= pri_tested;
+ entry = 0;
+ }
+ pg_entry = (u8)pg_pri_orginal_spread[bp->dcbx_port_params.
+ app.traffic_type_priority[i]];
+ /* There can be only one strict pg */
+ if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES)
+ bnx2x_dcbx_add_to_cos_bw(bp, &cos_data->data[entry],
+ DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry));
+ else
+ /* If we join a group and one is strict
+ * than the bw rulls */
+ cos_data->data[entry].strict =
+ BNX2X_DCBX_COS_HIGH_STRICT;
+ }
+ if ((0 == cos_data->data[0].pri_join_mask) &&
+ (0 == cos_data->data[1].pri_join_mask))
+ BNX2X_ERR("dcbx error: Both groups must have priorities\n");
+}
+
+
+#ifndef POWER_OF_2
+#define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1))))
+#endif
+
+static void bxn2x_dcbx_single_pg_to_cos_params(struct bnx2x *bp,
+ struct pg_help_data *pg_help_data,
+ struct cos_help_data *cos_data,
+ u32 pri_join_mask,
+ u8 num_of_dif_pri)
+{
+ u8 i = 0;
+ u32 pri_tested = 0;
+ u32 pri_mask_without_pri = 0;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+ /*debug*/
+ if (num_of_dif_pri == 1) {
+ bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask);
+ return;
+ }
+ /* single priority group */
+ if (pg_help_data->data[0].pg < DCBX_MAX_NUM_PG_BW_ENTRIES) {
+ /* If there are both pauseable and non-pauseable priorities,
+ * the pauseable priorities go to the first queue and
+ * the non-pauseable priorities go to the second queue.
+ */
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
+ /* Pauseable */
+ cos_data->data[0].pausable = true;
+ /* Non pauseable.*/
+ cos_data->data[1].pausable = false;
+
+ if (2 == num_of_dif_pri) {
+ cos_data->data[0].cos_bw = 50;
+ cos_data->data[1].cos_bw = 50;
+ }
+
+ if (3 == num_of_dif_pri) {
+ if (POWER_OF_2(DCBX_PFC_PRI_GET_PAUSE(bp,
+ pri_join_mask))) {
+ cos_data->data[0].cos_bw = 33;
+ cos_data->data[1].cos_bw = 67;
+ } else {
+ cos_data->data[0].cos_bw = 67;
+ cos_data->data[1].cos_bw = 33;
+ }
+ }
+
+ } else if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask)) {
+ /* If there are only pauseable priorities,
+ * then one/two priorities go to the first queue
+ * and one priority goes to the second queue.
+ */
+ if (2 == num_of_dif_pri) {
+ cos_data->data[0].cos_bw = 50;
+ cos_data->data[1].cos_bw = 50;
+ } else {
+ cos_data->data[0].cos_bw = 67;
+ cos_data->data[1].cos_bw = 33;
+ }
+ cos_data->data[1].pausable = true;
+ cos_data->data[0].pausable = true;
+ /* All priorities except FCOE */
+ cos_data->data[0].pri_join_mask = (pri_join_mask &
+ ((u8)~(1 << ttp[LLFC_TRAFFIC_TYPE_FCOE])));
+ /* Only FCOE priority.*/
+ cos_data->data[1].pri_join_mask =
+ (1 << ttp[LLFC_TRAFFIC_TYPE_FCOE]);
+ } else
+ /* If there are only non-pauseable priorities,
+ * they will all go to the same queue.
+ */
+ bnx2x_dcbx_ets_disabled_entry_data(bp,
+ cos_data, pri_join_mask);
+ } else {
+ /* priority group which is not BW limited (PG#15):*/
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
+ /* If there are both pauseable and non-pauseable
+ * priorities, the pauseable priorities go to the first
+ * queue and the non-pauseable priorities
+ * go to the second queue.
+ */
+ if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) >
+ DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) {
+ cos_data->data[0].strict =
+ BNX2X_DCBX_COS_HIGH_STRICT;
+ cos_data->data[1].strict =
+ BNX2X_DCBX_COS_LOW_STRICT;
+ } else {
+ cos_data->data[0].strict =
+ BNX2X_DCBX_COS_LOW_STRICT;
+ cos_data->data[1].strict =
+ BNX2X_DCBX_COS_HIGH_STRICT;
+ }
+ /* Pauseable */
+ cos_data->data[0].pausable = true;
+ /* Non pause-able.*/
+ cos_data->data[1].pausable = false;
+ } else {
+ /* If there are only pauseable priorities or
+ * only non-pauseable,* the lower priorities go
+ * to the first queue and the higherpriorities go
+ * to the second queue.
+ */
+ cos_data->data[0].pausable =
+ cos_data->data[1].pausable =
+ IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+
+ for (i = 0 ; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) {
+ pri_tested = 1 << bp->dcbx_port_params.
+ app.traffic_type_priority[i];
+ /* Remove priority tested */
+ pri_mask_without_pri =
+ (pri_join_mask & ((u8)(~pri_tested)));
+ if (pri_mask_without_pri < pri_tested)
+ break;
+ }
+
+ if (i == LLFC_DRIVER_TRAFFIC_TYPE_MAX)
+ BNX2X_ERR("Invalid value for pri_join_mask -"
+ " could not find a priority\n");
+
+ cos_data->data[0].pri_join_mask = pri_mask_without_pri;
+ cos_data->data[1].pri_join_mask = pri_tested;
+ /* Both queues are strict priority,
+ * and that with the highest priority
+ * gets the highest strict priority in the arbiter.
+ */
+ cos_data->data[0].strict = BNX2X_DCBX_COS_LOW_STRICT;
+ cos_data->data[1].strict = BNX2X_DCBX_COS_HIGH_STRICT;
+ }
+ }
+}
+
+static void bnx2x_dcbx_two_pg_to_cos_params(
+ struct bnx2x *bp,
+ struct pg_help_data *pg_help_data,
+ struct dcbx_ets_feature *ets,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ u32 pri_join_mask,
+ u8 num_of_dif_pri)
+{
+ u8 i = 0;
+ u8 pg[E2_NUM_OF_COS] = {0};
+
+ /* If there are both pauseable and non-pauseable priorities,
+ * the pauseable priorities go to the first queue and
+ * the non-pauseable priorities go to the second queue.
+ */
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp,
+ pg_help_data->data[0].pg_priority) ||
+ IS_DCBX_PFC_PRI_MIX_PAUSE(bp,
+ pg_help_data->data[1].pg_priority)) {
+ /* If one PG contains both pauseable and
+ * non-pauseable priorities then ETS is disabled.
+ */
+ bnx2x_dcbx_separate_pauseable_from_non(bp, cos_data,
+ pg_pri_orginal_spread, ets);
+ bp->dcbx_port_params.ets.enabled = false;
+ return;
+ }
+
+ /* Pauseable */
+ cos_data->data[0].pausable = true;
+ /* Non pauseable. */
+ cos_data->data[1].pausable = false;
+ if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp,
+ pg_help_data->data[0].pg_priority)) {
+ /* 0 is pauseable */
+ cos_data->data[0].pri_join_mask =
+ pg_help_data->data[0].pg_priority;
+ pg[0] = pg_help_data->data[0].pg;
+ cos_data->data[1].pri_join_mask =
+ pg_help_data->data[1].pg_priority;
+ pg[1] = pg_help_data->data[1].pg;
+ } else {/* 1 is pauseable */
+ cos_data->data[0].pri_join_mask =
+ pg_help_data->data[1].pg_priority;
+ pg[0] = pg_help_data->data[1].pg;
+ cos_data->data[1].pri_join_mask =
+ pg_help_data->data[0].pg_priority;
+ pg[1] = pg_help_data->data[0].pg;
+ }
+ } else {
+ /* If there are only pauseable priorities or
+ * only non-pauseable, each PG goes to a queue.
+ */
+ cos_data->data[0].pausable = cos_data->data[1].pausable =
+ IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+ cos_data->data[0].pri_join_mask =
+ pg_help_data->data[0].pg_priority;
+ pg[0] = pg_help_data->data[0].pg;
+ cos_data->data[1].pri_join_mask =
+ pg_help_data->data[1].pg_priority;
+ pg[1] = pg_help_data->data[1].pg;
+ }
+
+ /* There can be only one strict pg */
+ for (i = 0 ; i < E2_NUM_OF_COS; i++) {
+ if (pg[i] < DCBX_MAX_NUM_PG_BW_ENTRIES)
+ cos_data->data[i].cos_bw =
+ DCBX_PG_BW_GET(ets->pg_bw_tbl, pg[i]);
+ else
+ cos_data->data[i].strict = BNX2X_DCBX_COS_HIGH_STRICT;
+ }
+}
+
+/*******************************************************************************
+ * Description: Still
+ *
+ * Return:
+ ******************************************************************************/
+static void bnx2x_dcbx_three_pg_to_cos_params(
+ struct bnx2x *bp,
+ struct pg_help_data *pg_help_data,
+ struct dcbx_ets_feature *ets,
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ u32 pri_join_mask,
+ u8 num_of_dif_pri)
+{
+ u8 i = 0;
+ u32 pri_tested = 0;
+ u8 entry = 0;
+ u8 pg_entry = 0;
+ bool b_found_strict = false;
+ u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX;
+
+ cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0;
+ /* If there are both pauseable and non-pauseable priorities,
+ * the pauseable priorities go to the first queue and the
+ * non-pauseable priorities go to the second queue.
+ */
+ if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask))
+ bnx2x_dcbx_separate_pauseable_from_non(bp,
+ cos_data, pg_pri_orginal_spread, ets);
+ else {
+ /* If two BW-limited PG-s were combined to one queue,
+ * the BW is their sum.
+ *
+ * If there are only pauseable priorities or only non-pauseable,
+ * and there are both BW-limited and non-BW-limited PG-s,
+ * the BW-limited PG/s go to one queue and the non-BW-limited
+ * PG/s go to the second queue.
+ *
+ * If there are only pauseable priorities or only non-pauseable
+ * and all are BW limited, then two priorities go to the first
+ * queue and one priority goes to the second queue.
+ *
+ * We will join this two cases:
+ * if one is BW limited it will go to the secoend queue
+ * otherwise the last priority will get it
+ */
+
+ cos_data->data[0].pausable = cos_data->data[1].pausable =
+ IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+
+ for (i = 0 ; i < num_of_pri; i++) {
+ pri_tested = 1 << bp->dcbx_port_params.
+ app.traffic_type_priority[i];
+ pg_entry = (u8)pg_pri_orginal_spread[bp->
+ dcbx_port_params.app.traffic_type_priority[i]];
+
+ if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) {
+ entry = 0;
+
+ if (i == (num_of_pri-1) &&
+ false == b_found_strict)
+ /* last entry will be handled separately
+ * If no priority is strict than last
+ * enty goes to last queue.*/
+ entry = 1;
+ cos_data->data[entry].pri_join_mask |=
+ pri_tested;
+ bnx2x_dcbx_add_to_cos_bw(bp,
+ &cos_data->data[entry],
+ DCBX_PG_BW_GET(ets->pg_bw_tbl,
+ pg_entry));
+ } else {
+ b_found_strict = true;
+ cos_data->data[1].pri_join_mask |= pri_tested;
+ /* If we join a group and one is strict
+ * than the bw rulls */
+ cos_data->data[1].strict =
+ BNX2X_DCBX_COS_HIGH_STRICT;
+ }
+ }
+ }
+}
+
+
+static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
+ struct pg_help_data *help_data,
+ struct dcbx_ets_feature *ets,
+ u32 *pg_pri_orginal_spread)
+{
+ struct cos_help_data cos_data ;
+ u8 i = 0;
+ u32 pri_join_mask = 0;
+ u8 num_of_dif_pri = 0;
+
+ memset(&cos_data, 0, sizeof(cos_data));
+ /* Validate the pg value */
+ for (i = 0; i < help_data->num_of_pg ; i++) {
+ if (DCBX_STRICT_PRIORITY != help_data->data[i].pg &&
+ DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg)
+ BNX2X_ERR("Invalid pg[%d] data %x\n", i,
+ help_data->data[i].pg);
+ pri_join_mask |= help_data->data[i].pg_priority;
+ num_of_dif_pri += help_data->data[i].num_of_dif_pri;
+ }
+
+ /* default settings */
+ cos_data.num_of_cos = 2;
+ for (i = 0; i < E2_NUM_OF_COS ; i++) {
+ cos_data.data[i].pri_join_mask = pri_join_mask;
+ cos_data.data[i].pausable = false;
+ cos_data.data[i].strict = BNX2X_DCBX_COS_NOT_STRICT;
+ cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW;
+ }
+
+ switch (help_data->num_of_pg) {
+ case 1:
+
+ bxn2x_dcbx_single_pg_to_cos_params(
+ bp,
+ help_data,
+ &cos_data,
+ pri_join_mask,
+ num_of_dif_pri);
+ break;
+ case 2:
+ bnx2x_dcbx_two_pg_to_cos_params(
+ bp,
+ help_data,
+ ets,
+ &cos_data,
+ pg_pri_orginal_spread,
+ pri_join_mask,
+ num_of_dif_pri);
+ break;
+
+ case 3:
+ bnx2x_dcbx_three_pg_to_cos_params(
+ bp,
+ help_data,
+ ets,
+ &cos_data,
+ pg_pri_orginal_spread,
+ pri_join_mask,
+ num_of_dif_pri);
+
+ break;
+ default:
+ BNX2X_ERR("Wrong pg_help_data.num_of_pg\n");
+ bnx2x_dcbx_ets_disabled_entry_data(bp,
+ &cos_data, pri_join_mask);
+ }
+
+ for (i = 0; i < cos_data.num_of_cos ; i++) {
+ struct bnx2x_dcbx_cos_params *params =
+ &bp->dcbx_port_params.ets.cos_params[i];
+
+ params->pauseable = cos_data.data[i].pausable;
+ params->strict = cos_data.data[i].strict;
+ params->bw_tbl = cos_data.data[i].cos_bw;
+ if (params->pauseable) {
+ params->pri_bitmask =
+ DCBX_PFC_PRI_GET_PAUSE(bp,
+ cos_data.data[i].pri_join_mask);
+ DP(NETIF_MSG_LINK, "COS %d PAUSABLE prijoinmask 0x%x\n",
+ i, cos_data.data[i].pri_join_mask);
+ } else {
+ params->pri_bitmask =
+ DCBX_PFC_PRI_GET_NON_PAUSE(bp,
+ cos_data.data[i].pri_join_mask);
+ DP(NETIF_MSG_LINK, "COS %d NONPAUSABLE prijoinmask "
+ "0x%x\n",
+ i, cos_data.data[i].pri_join_mask);
+ }
+ }
+
+ bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ;
+}
+
+static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
+ u32 *set_configuration_ets_pg,
+ u32 *pri_pg_tbl)
+{
+ int i;
+
+ for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
+ set_configuration_ets_pg[i] = DCBX_PRI_PG_GET(pri_pg_tbl, i);
+
+ DP(NETIF_MSG_LINK, "set_configuration_ets_pg[%d] = 0x%x\n",
+ i, set_configuration_ets_pg[i]);
+ }
+}
+
+/*******************************************************************************
+ * Description: Fill pfc_config struct that will be sent in DCBX start ramrod
+ *
+ * Return:
+ ******************************************************************************/
+static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp)
+{
+ struct flow_control_configuration *pfc_fw_cfg = NULL;
+ u16 pri_bit = 0;
+ u8 cos = 0, pri = 0;
+ struct priority_cos *tt2cos;
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+
+ pfc_fw_cfg = (struct flow_control_configuration *)
+ bnx2x_sp(bp, pfc_config);
+ memset(pfc_fw_cfg, 0, sizeof(struct flow_control_configuration));
+
+ /*shortcut*/
+ tt2cos = pfc_fw_cfg->traffic_type_to_priority_cos;
+
+ /* Fw version should be incremented each update */
+ pfc_fw_cfg->dcb_version = ++bp->dcb_version;
+ pfc_fw_cfg->dcb_enabled = DCB_ENABLED;
+
+ /* Default initialization */
+ for (pri = 0; pri < MAX_PFC_TRAFFIC_TYPES ; pri++) {
+ tt2cos[pri].priority = LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED;
+ tt2cos[pri].cos = 0;
+ }
+
+ /* Fill priority parameters */
+ for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
+ tt2cos[pri].priority = ttp[pri];
+ pri_bit = 1 << tt2cos[pri].priority;
+
+ /* Fill COS parameters based on COS calculated to
+ * make it more generally for future use */
+ for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++)
+ if (bp->dcbx_port_params.ets.cos_params[cos].
+ pri_bitmask & pri_bit)
+ tt2cos[pri].cos = cos;
+ }
+ bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg);
+}
+/* DCB netlink */
+#ifdef BCM_DCBNL
+
+#define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \
+ DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC)
+
+static inline bool bnx2x_dcbnl_set_valid(struct bnx2x *bp)
+{
+ /* validate dcbnl call that may change HW state:
+ * DCB is on and DCBX mode was SUCCESSFULLY set by the user.
+ */
+ return bp->dcb_state && bp->dcbx_mode_uset;
+}
+
+static u8 bnx2x_dcbnl_get_state(struct net_device *netdev)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "state = %d\n", bp->dcb_state);
+ return bp->dcb_state;
+}
+
+static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off");
+
+ bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled);
+ return 0;
+}
+
+static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev,
+ u8 *perm_addr)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "GET-PERM-ADDR\n");
+
+ /* first the HW mac address */
+ memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
+
+#ifdef BCM_CNIC
+ /* second SAN address */
+ memcpy(perm_addr+netdev->addr_len, bp->fip_mac, netdev->addr_len);
+#endif
+}
+
+static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
+ u8 prio_type, u8 pgid, u8 bw_pct,
+ u8 up_map)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, pgid);
+ if (!bnx2x_dcbnl_set_valid(bp) || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES)
+ return;
+
+ /**
+ * bw_pct ingnored - band-width percentage devision between user
+ * priorities within the same group is not
+ * standard and hence not supported
+ *
+ * prio_type igonred - priority levels within the same group are not
+ * standard and hence are not supported. According
+ * to the standard pgid 15 is dedicated to strict
+ * prioirty traffic (on the port level).
+ *
+ * up_map ignored
+ */
+
+ bp->dcbx_config_params.admin_configuration_ets_pg[prio] = pgid;
+ bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
+}
+
+static void bnx2x_dcbnl_set_pg_bwgcfg_tx(struct net_device *netdev,
+ int pgid, u8 bw_pct)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "pgid[%d] = %d\n", pgid, bw_pct);
+
+ if (!bnx2x_dcbnl_set_valid(bp) || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES)
+ return;
+
+ bp->dcbx_config_params.admin_configuration_bw_precentage[pgid] = bw_pct;
+ bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
+}
+
+static void bnx2x_dcbnl_set_pg_tccfg_rx(struct net_device *netdev, int prio,
+ u8 prio_type, u8 pgid, u8 bw_pct,
+ u8 up_map)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n");
+}
+
+static void bnx2x_dcbnl_set_pg_bwgcfg_rx(struct net_device *netdev,
+ int pgid, u8 bw_pct)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n");
+}
+
+static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "prio = %d\n", prio);
+
+ /**
+ * bw_pct ingnored - band-width percentage devision between user
+ * priorities within the same group is not
+ * standard and hence not supported
+ *
+ * prio_type igonred - priority levels within the same group are not
+ * standard and hence are not supported. According
+ * to the standard pgid 15 is dedicated to strict
+ * prioirty traffic (on the port level).
+ *
+ * up_map ignored
+ */
+ *up_map = *bw_pct = *prio_type = *pgid = 0;
+
+ if (!bp->dcb_state || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES)
+ return;
+
+ *pgid = DCBX_PRI_PG_GET(bp->dcbx_local_feat.ets.pri_pg_tbl, prio);
+}
+
+static void bnx2x_dcbnl_get_pg_bwgcfg_tx(struct net_device *netdev,
+ int pgid, u8 *bw_pct)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "pgid = %d\n", pgid);
+
+ *bw_pct = 0;
+
+ if (!bp->dcb_state || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES)
+ return;
+
+ *bw_pct = DCBX_PG_BW_GET(bp->dcbx_local_feat.ets.pg_bw_tbl, pgid);
+}
+
+static void bnx2x_dcbnl_get_pg_tccfg_rx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n");
+
+ *prio_type = *pgid = *bw_pct = *up_map = 0;
+}
+
+static void bnx2x_dcbnl_get_pg_bwgcfg_rx(struct net_device *netdev,
+ int pgid, u8 *bw_pct)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n");
+
+ *bw_pct = 0;
+}
+
+static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
+ u8 setting)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, setting);
+
+ if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES)
+ return;
+
+ bp->dcbx_config_params.admin_pfc_bitmap |= ((setting ? 1 : 0) << prio);
+
+ if (setting)
+ bp->dcbx_config_params.admin_pfc_tx_enable = 1;
+}
+
+static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
+ u8 *setting)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "prio = %d\n", prio);
+
+ *setting = 0;
+
+ if (!bp->dcb_state || prio >= MAX_PFC_PRIORITIES)
+ return;
+
+ *setting = (bp->dcbx_local_feat.pfc.pri_en_bitmap >> prio) & 0x1;
+}
+
+static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ int rc = 0;
+
+ DP(NETIF_MSG_LINK, "SET-ALL\n");
+
+ if (!bnx2x_dcbnl_set_valid(bp))
+ return 1;
+
+ if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+ netdev_err(bp->dev, "Handling parity error recovery. "
+ "Try again later\n");
+ return 1;
+ }
+ if (netif_running(bp->dev)) {
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+ }
+ DP(NETIF_MSG_LINK, "set_dcbx_params done (%d)\n", rc);
+ if (rc)
+ return 1;
+
+ return 0;
+}
+
+static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u8 rval = 0;
+
+ if (bp->dcb_state) {
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PFC:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_UP2TC:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ *cap = 0x80; /* 8 priorities for PGs */
+ break;
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80; /* 8 priorities for PFC */
+ break;
+ case DCB_CAP_ATTR_GSP:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_BCN:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = BNX2X_DCBX_CAPS;
+ default:
+ rval = -EINVAL;
+ break;
+ }
+ } else
+ rval = -EINVAL;
+
+ DP(NETIF_MSG_LINK, "capid %d:%x\n", capid, *cap);
+ return rval;
+}
+
+static u8 bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u8 rval = 0;
+
+ DP(NETIF_MSG_LINK, "tcid %d\n", tcid);
+
+ if (bp->dcb_state) {
+ switch (tcid) {
+ case DCB_NUMTCS_ATTR_PG:
+ *num = E2_NUM_OF_COS;
+ break;
+ case DCB_NUMTCS_ATTR_PFC:
+ *num = E2_NUM_OF_COS;
+ break;
+ default:
+ rval = -EINVAL;
+ break;
+ }
+ } else
+ rval = -EINVAL;
+
+ return rval;
+}
+
+static u8 bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "num tcs = %d; Not supported\n", num);
+ return -EINVAL;
+}
+
+static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
+
+ if (!bp->dcb_state)
+ return 0;
+
+ return bp->dcbx_local_feat.pfc.enabled;
+}
+
+static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off");
+
+ if (!bnx2x_dcbnl_set_valid(bp))
+ return;
+
+ bp->dcbx_config_params.admin_pfc_tx_enable =
+ bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0);
+}
+
+static void bnx2x_admin_app_set_ent(
+ struct bnx2x_admin_priority_app_table *app_ent,
+ u8 idtype, u16 idval, u8 up)
+{
+ app_ent->valid = 1;
+
+ switch (idtype) {
+ case DCB_APP_IDTYPE_ETHTYPE:
+ app_ent->traffic_type = TRAFFIC_TYPE_ETH;
+ break;
+ case DCB_APP_IDTYPE_PORTNUM:
+ app_ent->traffic_type = TRAFFIC_TYPE_PORT;
+ break;
+ default:
+ break; /* never gets here */
+ }
+ app_ent->app_id = idval;
+ app_ent->priority = up;
+}
+
+static bool bnx2x_admin_app_is_equal(
+ struct bnx2x_admin_priority_app_table *app_ent,
+ u8 idtype, u16 idval)
+{
+ if (!app_ent->valid)
+ return false;
+
+ switch (idtype) {
+ case DCB_APP_IDTYPE_ETHTYPE:
+ if (app_ent->traffic_type != TRAFFIC_TYPE_ETH)
+ return false;
+ break;
+ case DCB_APP_IDTYPE_PORTNUM:
+ if (app_ent->traffic_type != TRAFFIC_TYPE_PORT)
+ return false;
+ break;
+ default:
+ return false;
+ }
+ if (app_ent->app_id != idval)
+ return false;
+
+ return true;
+}
+
+static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
+{
+ int i, ff;
+
+ /* iterate over the app entries looking for idtype and idval */
+ for (i = 0, ff = -1; i < 4; i++) {
+ struct bnx2x_admin_priority_app_table *app_ent =
+ &bp->dcbx_config_params.admin_priority_app_table[i];
+ if (bnx2x_admin_app_is_equal(app_ent, idtype, idval))
+ break;
+
+ if (ff < 0 && !app_ent->valid)
+ ff = i;
+ }
+ if (i < 4)
+ /* if found overwrite up */
+ bp->dcbx_config_params.
+ admin_priority_app_table[i].priority = up;
+ else if (ff >= 0)
+ /* not found use first-free */
+ bnx2x_admin_app_set_ent(
+ &bp->dcbx_config_params.admin_priority_app_table[ff],
+ idtype, idval, up);
+ else
+ /* app table is full */
+ return -EBUSY;
+
+ /* up configured, if not 0 make sure feature is enabled */
+ if (up)
+ bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
+
+ return 0;
+}
+
+static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
+ u16 idval, u8 up)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+
+ DP(NETIF_MSG_LINK, "app_type %d, app_id %x, prio bitmap %d\n",
+ idtype, idval, up);
+
+ if (!bnx2x_dcbnl_set_valid(bp))
+ return -EINVAL;
+
+ /* verify idtype */
+ switch (idtype) {
+ case DCB_APP_IDTYPE_ETHTYPE:
+ case DCB_APP_IDTYPE_PORTNUM:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return bnx2x_set_admin_app_up(bp, idtype, idval, up);
+}
+
+static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u8 state;
+
+ state = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE;
+
+ if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF)
+ state |= DCB_CAP_DCBX_STATIC;
+
+ return state;
+}
+
+static u8 bnx2x_dcbnl_set_dcbx(struct net_device *netdev, u8 state)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ DP(NETIF_MSG_LINK, "state = %02x\n", state);
+
+ /* set dcbx mode */
+
+ if ((state & BNX2X_DCBX_CAPS) != state) {
+ BNX2X_ERR("Requested DCBX mode %x is beyond advertised "
+ "capabilities\n", state);
+ return 1;
+ }
+
+ if (bp->dcb_state != BNX2X_DCB_STATE_ON) {
+ BNX2X_ERR("DCB turned off, DCBX configuration is invalid\n");
+ return 1;
+ }
+
+ if (state & DCB_CAP_DCBX_STATIC)
+ bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_OFF;
+ else
+ bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_ON;
+
+ bp->dcbx_mode_uset = true;
+ return 0;
+}
+
+
+static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
+ u8 *flags)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u8 rval = 0;
+
+ DP(NETIF_MSG_LINK, "featid %d\n", featid);
+
+ if (bp->dcb_state) {
+ *flags = 0;
+ switch (featid) {
+ case DCB_FEATCFG_ATTR_PG:
+ if (bp->dcbx_local_feat.ets.enabled)
+ *flags |= DCB_FEATCFG_ENABLE;
+ if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR)
+ *flags |= DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ if (bp->dcbx_local_feat.pfc.enabled)
+ *flags |= DCB_FEATCFG_ENABLE;
+ if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR |
+ DCBX_LOCAL_PFC_MISMATCH))
+ *flags |= DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ if (bp->dcbx_local_feat.app.enabled)
+ *flags |= DCB_FEATCFG_ENABLE;
+ if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR |
+ DCBX_LOCAL_APP_MISMATCH))
+ *flags |= DCB_FEATCFG_ERROR;
+ break;
+ default:
+ rval = -EINVAL;
+ break;
+ }
+ } else
+ rval = -EINVAL;
+
+ return rval;
+}
+
+static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
+ u8 flags)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u8 rval = 0;
+
+ DP(NETIF_MSG_LINK, "featid = %d flags = %02x\n", featid, flags);
+
+ /* ignore the 'advertise' flag */
+ if (bnx2x_dcbnl_set_valid(bp)) {
+ switch (featid) {
+ case DCB_FEATCFG_ATTR_PG:
+ bp->dcbx_config_params.admin_ets_enable =
+ flags & DCB_FEATCFG_ENABLE ? 1 : 0;
+ bp->dcbx_config_params.admin_ets_willing =
+ flags & DCB_FEATCFG_WILLING ? 1 : 0;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ bp->dcbx_config_params.admin_pfc_enable =
+ flags & DCB_FEATCFG_ENABLE ? 1 : 0;
+ bp->dcbx_config_params.admin_pfc_willing =
+ flags & DCB_FEATCFG_WILLING ? 1 : 0;
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ /* ignore enable, always enabled */
+ bp->dcbx_config_params.admin_app_priority_willing =
+ flags & DCB_FEATCFG_WILLING ? 1 : 0;
+ break;
+ default:
+ rval = -EINVAL;
+ break;
+ }
+ } else
+ rval = -EINVAL;
+
+ return rval;
+}
+
+const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
+ .getstate = bnx2x_dcbnl_get_state,
+ .setstate = bnx2x_dcbnl_set_state,
+ .getpermhwaddr = bnx2x_dcbnl_get_perm_hw_addr,
+ .setpgtccfgtx = bnx2x_dcbnl_set_pg_tccfg_tx,
+ .setpgbwgcfgtx = bnx2x_dcbnl_set_pg_bwgcfg_tx,
+ .setpgtccfgrx = bnx2x_dcbnl_set_pg_tccfg_rx,
+ .setpgbwgcfgrx = bnx2x_dcbnl_set_pg_bwgcfg_rx,
+ .getpgtccfgtx = bnx2x_dcbnl_get_pg_tccfg_tx,
+ .getpgbwgcfgtx = bnx2x_dcbnl_get_pg_bwgcfg_tx,
+ .getpgtccfgrx = bnx2x_dcbnl_get_pg_tccfg_rx,
+ .getpgbwgcfgrx = bnx2x_dcbnl_get_pg_bwgcfg_rx,
+ .setpfccfg = bnx2x_dcbnl_set_pfc_cfg,
+ .getpfccfg = bnx2x_dcbnl_get_pfc_cfg,
+ .setall = bnx2x_dcbnl_set_all,
+ .getcap = bnx2x_dcbnl_get_cap,
+ .getnumtcs = bnx2x_dcbnl_get_numtcs,
+ .setnumtcs = bnx2x_dcbnl_set_numtcs,
+ .getpfcstate = bnx2x_dcbnl_get_pfc_state,
+ .setpfcstate = bnx2x_dcbnl_set_pfc_state,
+ .setapp = bnx2x_dcbnl_set_app_up,
+ .getdcbx = bnx2x_dcbnl_get_dcbx,
+ .setdcbx = bnx2x_dcbnl_set_dcbx,
+ .getfeatcfg = bnx2x_dcbnl_get_featcfg,
+ .setfeatcfg = bnx2x_dcbnl_set_featcfg,
+};
+
+#endif /* BCM_DCBNL */
diff --git a/drivers/net/bnx2x/bnx2x_dcb.h b/drivers/net/bnx2x/bnx2x_dcb.h
new file mode 100644
index 000000000000..71b8eda43bd0
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_dcb.h
@@ -0,0 +1,197 @@
+/* bnx2x_dcb.h: Broadcom Everest network driver.
+ *
+ * Copyright 2009-2010 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Dmitry Kravkov
+ *
+ */
+#ifndef BNX2X_DCB_H
+#define BNX2X_DCB_H
+
+#include "bnx2x_hsi.h"
+
+#define LLFC_DRIVER_TRAFFIC_TYPE_MAX 3 /* NW, iSCSI, FCoE */
+struct bnx2x_dcbx_app_params {
+ u32 enabled;
+ u32 traffic_type_priority[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
+};
+
+#define E2_NUM_OF_COS 2
+#define BNX2X_DCBX_COS_NOT_STRICT 0
+#define BNX2X_DCBX_COS_LOW_STRICT 1
+#define BNX2X_DCBX_COS_HIGH_STRICT 2
+
+struct bnx2x_dcbx_cos_params {
+ u32 bw_tbl;
+ u32 pri_bitmask;
+ u8 strict;
+ u8 pauseable;
+};
+
+struct bnx2x_dcbx_pg_params {
+ u32 enabled;
+ u8 num_of_cos; /* valid COS entries */
+ struct bnx2x_dcbx_cos_params cos_params[E2_NUM_OF_COS];
+};
+
+struct bnx2x_dcbx_pfc_params {
+ u32 enabled;
+ u32 priority_non_pauseable_mask;
+};
+
+struct bnx2x_dcbx_port_params {
+ struct bnx2x_dcbx_pfc_params pfc;
+ struct bnx2x_dcbx_pg_params ets;
+ struct bnx2x_dcbx_app_params app;
+};
+
+#define BNX2X_DCBX_CONFIG_INV_VALUE (0xFFFFFFFF)
+#define BNX2X_DCBX_OVERWRITE_SETTINGS_DISABLE 0
+#define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE 1
+#define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID (BNX2X_DCBX_CONFIG_INV_VALUE)
+
+/*******************************************************************************
+ * LLDP protocol configuration parameters.
+ ******************************************************************************/
+struct bnx2x_config_lldp_params {
+ u32 overwrite_settings;
+ u32 msg_tx_hold;
+ u32 msg_fast_tx;
+ u32 tx_credit_max;
+ u32 msg_tx_interval;
+ u32 tx_fast;
+};
+
+struct bnx2x_admin_priority_app_table {
+ u32 valid;
+ u32 priority;
+#define INVALID_TRAFFIC_TYPE_PRIORITY (0xFFFFFFFF)
+ u32 traffic_type;
+#define TRAFFIC_TYPE_ETH 0
+#define TRAFFIC_TYPE_PORT 1
+ u32 app_id;
+};
+
+/*******************************************************************************
+ * DCBX protocol configuration parameters.
+ ******************************************************************************/
+struct bnx2x_config_dcbx_params {
+ u32 overwrite_settings;
+ u32 admin_dcbx_version;
+ u32 admin_ets_enable;
+ u32 admin_pfc_enable;
+ u32 admin_tc_supported_tx_enable;
+ u32 admin_ets_configuration_tx_enable;
+ u32 admin_ets_recommendation_tx_enable;
+ u32 admin_pfc_tx_enable;
+ u32 admin_application_priority_tx_enable;
+ u32 admin_ets_willing;
+ u32 admin_ets_reco_valid;
+ u32 admin_pfc_willing;
+ u32 admin_app_priority_willing;
+ u32 admin_configuration_bw_precentage[8];
+ u32 admin_configuration_ets_pg[8];
+ u32 admin_recommendation_bw_precentage[8];
+ u32 admin_recommendation_ets_pg[8];
+ u32 admin_pfc_bitmap;
+ struct bnx2x_admin_priority_app_table admin_priority_app_table[4];
+ u32 admin_default_priority;
+};
+
+#define GET_FLAGS(flags, bits) ((flags) & (bits))
+#define SET_FLAGS(flags, bits) ((flags) |= (bits))
+#define RESET_FLAGS(flags, bits) ((flags) &= ~(bits))
+
+enum {
+ DCBX_READ_LOCAL_MIB,
+ DCBX_READ_REMOTE_MIB
+};
+
+#define ETH_TYPE_FCOE (0x8906)
+#define TCP_PORT_ISCSI (0xCBC)
+
+#define PFC_VALUE_FRAME_SIZE (512)
+#define PFC_QUANTA_IN_NANOSEC_FROM_SPEED_MEGA(mega_speed) \
+ ((1000 * PFC_VALUE_FRAME_SIZE)/(mega_speed))
+
+#define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD 130
+#define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD 170
+
+
+
+struct cos_entry_help_data {
+ u32 pri_join_mask;
+ u32 cos_bw;
+ u8 strict;
+ bool pausable;
+};
+
+struct cos_help_data {
+ struct cos_entry_help_data data[E2_NUM_OF_COS];
+ u8 num_of_cos;
+};
+
+#define DCBX_ILLEGAL_PG (0xFF)
+#define DCBX_PFC_PRI_MASK (0xFF)
+#define DCBX_STRICT_PRIORITY (15)
+#define DCBX_INVALID_COS_BW (0xFFFFFFFF)
+#define DCBX_PFC_PRI_NON_PAUSE_MASK(bp) \
+ ((bp)->dcbx_port_params.pfc.priority_non_pauseable_mask)
+#define DCBX_PFC_PRI_PAUSE_MASK(bp) \
+ ((u8)~DCBX_PFC_PRI_NON_PAUSE_MASK(bp))
+#define DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri) \
+ ((pg_pri) & (DCBX_PFC_PRI_PAUSE_MASK(bp)))
+#define DCBX_PFC_PRI_GET_NON_PAUSE(bp, pg_pri) \
+ (DCBX_PFC_PRI_NON_PAUSE_MASK(bp) & (pg_pri))
+#define IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_pri) \
+ (pg_pri == DCBX_PFC_PRI_GET_PAUSE((bp), (pg_pri)))
+#define IS_DCBX_PFC_PRI_ONLY_NON_PAUSE(bp, pg_pri)\
+ ((pg_pri) == DCBX_PFC_PRI_GET_NON_PAUSE((bp), (pg_pri)))
+#define IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pg_pri) \
+ (!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \
+ IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri))))
+
+
+struct pg_entry_help_data {
+ u8 num_of_dif_pri;
+ u8 pg;
+ u32 pg_priority;
+};
+
+struct pg_help_data {
+ struct pg_entry_help_data data[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
+ u8 num_of_pg;
+};
+
+/* forward DCB/PFC related declarations */
+struct bnx2x;
+void bnx2x_dcb_init_intmem_pfc(struct bnx2x *bp);
+void bnx2x_dcbx_update(struct work_struct *work);
+void bnx2x_dcbx_init_params(struct bnx2x *bp);
+void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled);
+
+enum {
+ BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1,
+ BNX2X_DCBX_STATE_TX_PAUSED = 0x2,
+ BNX2X_DCBX_STATE_TX_RELEASED = 0x4
+};
+void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
+
+/* DCB netlink */
+#ifdef BCM_DCBNL
+extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
+int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
+#endif /* BCM_DCBNL */
+
+#endif /* BNX2X_DCB_H */
diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h
index dc18c25ca9e5..fb3ff7c4d7ca 100644
--- a/drivers/net/bnx2x/bnx2x_dump.h
+++ b/drivers/net/bnx2x/bnx2x_dump.h
@@ -1,10 +1,16 @@
/* bnx2x_dump.h: Broadcom Everest network driver.
*
- * Copyright (c) 2009 Broadcom Corporation
+ * Copyright (c) 2011 Broadcom Corporation
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation.
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
*/
@@ -17,53 +23,53 @@
#define BNX2X_DUMP_H
-struct dump_sign {
- u32 time_stamp;
- u32 diag_ver;
- u32 grc_dump_ver;
-};
-#define TSTORM_WAITP_ADDR 0x1b8a80
-#define CSTORM_WAITP_ADDR 0x238a80
-#define XSTORM_WAITP_ADDR 0x2b8a80
-#define USTORM_WAITP_ADDR 0x338a80
-#define TSTORM_CAM_MODE 0x1b1440
+/*definitions */
+#define XSTORM_WAITP_ADDR 0x2b8a80
+#define TSTORM_WAITP_ADDR 0x1b8a80
+#define USTORM_WAITP_ADDR 0x338a80
+#define CSTORM_WAITP_ADDR 0x238a80
+#define TSTORM_CAM_MODE 0x1B1440
-#define RI_E1 0x1
-#define RI_E1H 0x2
+#define MAX_TIMER_PENDING 200
+#define TIMER_SCAN_DONT_CARE 0xFF
+#define RI_E1 0x1
+#define RI_E1H 0x2
#define RI_E2 0x4
-#define RI_ONLINE 0x100
+#define RI_ONLINE 0x100
#define RI_PATH0_DUMP 0x200
#define RI_PATH1_DUMP 0x400
-#define RI_E1_OFFLINE (RI_E1)
-#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
-#define RI_E1H_OFFLINE (RI_E1H)
-#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
-#define RI_E2_OFFLINE (RI_E2)
-#define RI_E2_ONLINE (RI_E2 | RI_ONLINE)
-#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H)
-#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
-#define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H)
-#define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE)
-#define RI_E1E2_OFFLINE (RI_E2 | RI_E1)
-#define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE)
-#define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2)
-#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
-
-#define MAX_TIMER_PENDING 200
-#define TIMER_SCAN_DONT_CARE 0xFF
+#define RI_E1_OFFLINE (RI_E1)
+#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
+#define RI_E1H_OFFLINE (RI_E1H)
+#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
+#define RI_E2_OFFLINE (RI_E2)
+#define RI_E2_ONLINE (RI_E2 | RI_ONLINE)
+#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H)
+#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
+#define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H)
+#define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE)
+#define RI_E1E2_OFFLINE (RI_E2 | RI_E1)
+#define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE)
+#define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2)
+#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
+struct dump_sign {
+ u32 time_stamp;
+ u32 diag_ver;
+ u32 grc_dump_ver;
+};
struct dump_hdr {
- u32 hdr_size; /* in dwords, excluding this field */
- struct dump_sign dump_sign;
- u32 xstorm_waitp;
- u32 tstorm_waitp;
- u32 ustorm_waitp;
- u32 cstorm_waitp;
- u16 info;
- u8 idle_chk;
- u8 reserved;
+ u32 hdr_size; /* in dwords, excluding this field */
+ struct dump_sign dump_sign;
+ u32 xstorm_waitp;
+ u32 tstorm_waitp;
+ u32 ustorm_waitp;
+ u32 cstorm_waitp;
+ u16 info;
+ u8 idle_chk;
+ u8 reserved;
};
struct reg_addr {
@@ -80,202 +86,185 @@ struct wreg_addr {
u16 info;
};
-
-#define REGS_COUNT 558
+#define REGS_COUNT 834
static const struct reg_addr reg_addrs[REGS_COUNT] = {
{ 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE },
{ 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE },
- { 0x8800, 6, RI_E1_ONLINE }, { 0xa000, 223, RI_ALL_ONLINE },
- { 0xa388, 1, RI_ALL_ONLINE }, { 0xa398, 1, RI_ALL_ONLINE },
- { 0xa39c, 7, RI_E1H_ONLINE }, { 0xa3c0, 3, RI_E1H_ONLINE },
- { 0xa3d0, 1, RI_E1H_ONLINE }, { 0xa3d8, 1, RI_E1H_ONLINE },
- { 0xa3e0, 1, RI_E1H_ONLINE }, { 0xa3e8, 1, RI_E1H_ONLINE },
- { 0xa3f0, 1, RI_E1H_ONLINE }, { 0xa3f8, 1, RI_E1H_ONLINE },
- { 0xa400, 69, RI_ALL_ONLINE }, { 0xa518, 1, RI_ALL_ONLINE },
- { 0xa520, 1, RI_ALL_ONLINE }, { 0xa528, 1, RI_ALL_ONLINE },
- { 0xa530, 1, RI_ALL_ONLINE }, { 0xa538, 1, RI_ALL_ONLINE },
- { 0xa540, 1, RI_ALL_ONLINE }, { 0xa548, 1, RI_ALL_ONLINE },
- { 0xa550, 1, RI_ALL_ONLINE }, { 0xa558, 1, RI_ALL_ONLINE },
- { 0xa560, 1, RI_ALL_ONLINE }, { 0xa568, 1, RI_ALL_ONLINE },
- { 0xa570, 1, RI_ALL_ONLINE }, { 0xa580, 1, RI_ALL_ONLINE },
- { 0xa590, 1, RI_ALL_ONLINE }, { 0xa5a0, 1, RI_ALL_ONLINE },
- { 0xa5c0, 1, RI_ALL_ONLINE }, { 0xa5e0, 1, RI_E1H_ONLINE },
- { 0xa5e8, 1, RI_E1H_ONLINE }, { 0xa5f0, 1, RI_E1H_ONLINE },
- { 0xa5f8, 10, RI_E1H_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE },
- { 0x103bc, 1, RI_ALL_ONLINE }, { 0x103cc, 1, RI_ALL_ONLINE },
- { 0x103dc, 1, RI_ALL_ONLINE }, { 0x10400, 57, RI_ALL_ONLINE },
- { 0x104e8, 2, RI_ALL_ONLINE }, { 0x104f4, 2, RI_ALL_ONLINE },
- { 0x10500, 146, RI_ALL_ONLINE }, { 0x10750, 2, RI_ALL_ONLINE },
- { 0x10760, 2, RI_ALL_ONLINE }, { 0x10770, 2, RI_ALL_ONLINE },
- { 0x10780, 2, RI_ALL_ONLINE }, { 0x10790, 2, RI_ALL_ONLINE },
- { 0x107a0, 2, RI_ALL_ONLINE }, { 0x107b0, 2, RI_ALL_ONLINE },
- { 0x107c0, 2, RI_ALL_ONLINE }, { 0x107d0, 2, RI_ALL_ONLINE },
- { 0x107e0, 2, RI_ALL_ONLINE }, { 0x10880, 2, RI_ALL_ONLINE },
- { 0x10900, 2, RI_ALL_ONLINE }, { 0x12000, 1, RI_ALL_ONLINE },
- { 0x14000, 1, RI_ALL_ONLINE }, { 0x16000, 26, RI_E1H_ONLINE },
- { 0x16070, 18, RI_E1H_ONLINE }, { 0x160c0, 27, RI_E1H_ONLINE },
- { 0x16140, 1, RI_E1H_ONLINE }, { 0x16160, 1, RI_E1H_ONLINE },
- { 0x16180, 2, RI_E1H_ONLINE }, { 0x161c0, 2, RI_E1H_ONLINE },
- { 0x16204, 5, RI_E1H_ONLINE }, { 0x18000, 1, RI_E1H_ONLINE },
- { 0x18008, 1, RI_E1H_ONLINE }, { 0x20000, 24, RI_ALL_ONLINE },
- { 0x20060, 8, RI_ALL_ONLINE }, { 0x20080, 138, RI_ALL_ONLINE },
- { 0x202b4, 1, RI_ALL_ONLINE }, { 0x202c4, 1, RI_ALL_ONLINE },
- { 0x20400, 2, RI_ALL_ONLINE }, { 0x2040c, 8, RI_ALL_ONLINE },
- { 0x2042c, 18, RI_E1H_ONLINE }, { 0x20480, 1, RI_ALL_ONLINE },
- { 0x20500, 1, RI_ALL_ONLINE }, { 0x20600, 1, RI_ALL_ONLINE },
- { 0x28000, 1, RI_ALL_ONLINE }, { 0x28004, 8191, RI_ALL_OFFLINE },
- { 0x30000, 1, RI_ALL_ONLINE }, { 0x30004, 16383, RI_ALL_OFFLINE },
- { 0x40000, 98, RI_ALL_ONLINE }, { 0x40194, 1, RI_ALL_ONLINE },
- { 0x401a4, 1, RI_ALL_ONLINE }, { 0x401a8, 11, RI_E1H_ONLINE },
- { 0x40200, 4, RI_ALL_ONLINE }, { 0x40400, 43, RI_ALL_ONLINE },
- { 0x404b8, 1, RI_ALL_ONLINE }, { 0x404c8, 1, RI_ALL_ONLINE },
- { 0x404cc, 3, RI_E1H_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE },
+ { 0x8800, 6, RI_ALL_ONLINE }, { 0x8818, 1, RI_E1HE2_ONLINE },
+ { 0x9000, 164, RI_E2_ONLINE }, { 0x9400, 33, RI_E2_ONLINE },
+ { 0xa000, 27, RI_ALL_ONLINE }, { 0xa06c, 1, RI_E1E1H_ONLINE },
+ { 0xa070, 71, RI_ALL_ONLINE }, { 0xa18c, 4, RI_E1E1H_ONLINE },
+ { 0xa19c, 62, RI_ALL_ONLINE }, { 0xa294, 2, RI_E1E1H_ONLINE },
+ { 0xa29c, 56, RI_ALL_ONLINE }, { 0xa39c, 7, RI_E1HE2_ONLINE },
+ { 0xa3c0, 3, RI_E1HE2_ONLINE }, { 0xa3d0, 1, RI_E1HE2_ONLINE },
+ { 0xa3d8, 1, RI_E1HE2_ONLINE }, { 0xa3e0, 1, RI_E1HE2_ONLINE },
+ { 0xa3e8, 1, RI_E1HE2_ONLINE }, { 0xa3f0, 1, RI_E1HE2_ONLINE },
+ { 0xa3f8, 1, RI_E1HE2_ONLINE }, { 0xa400, 43, RI_ALL_ONLINE },
+ { 0xa4ac, 2, RI_E1E1H_ONLINE }, { 0xa4b4, 1, RI_ALL_ONLINE },
+ { 0xa4b8, 2, RI_E1E1H_ONLINE }, { 0xa4c0, 3, RI_ALL_ONLINE },
+ { 0xa4cc, 5, RI_E1E1H_ONLINE }, { 0xa4e0, 9, RI_ALL_ONLINE },
+ { 0xa504, 1, RI_E1E1H_ONLINE }, { 0xa508, 3, RI_ALL_ONLINE },
+ { 0xa518, 1, RI_ALL_ONLINE }, { 0xa520, 1, RI_ALL_ONLINE },
+ { 0xa528, 1, RI_ALL_ONLINE }, { 0xa530, 1, RI_ALL_ONLINE },
+ { 0xa538, 1, RI_ALL_ONLINE }, { 0xa540, 1, RI_ALL_ONLINE },
+ { 0xa548, 1, RI_E1E1H_ONLINE }, { 0xa550, 1, RI_E1E1H_ONLINE },
+ { 0xa558, 1, RI_E1E1H_ONLINE }, { 0xa560, 1, RI_E1E1H_ONLINE },
+ { 0xa568, 1, RI_E1E1H_ONLINE }, { 0xa570, 1, RI_ALL_ONLINE },
+ { 0xa580, 1, RI_ALL_ONLINE }, { 0xa590, 1, RI_ALL_ONLINE },
+ { 0xa5a0, 1, RI_ALL_ONLINE }, { 0xa5c0, 1, RI_ALL_ONLINE },
+ { 0xa5e0, 1, RI_E1HE2_ONLINE }, { 0xa5e8, 1, RI_E1HE2_ONLINE },
+ { 0xa5f0, 1, RI_E1HE2_ONLINE }, { 0xa5f8, 10, RI_E1HE2_ONLINE },
+ { 0xa620, 111, RI_E2_ONLINE }, { 0xa800, 51, RI_E2_ONLINE },
+ { 0xa8d4, 4, RI_E2_ONLINE }, { 0xa8e8, 1, RI_E2_ONLINE },
+ { 0xa8f0, 1, RI_E2_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE },
+ { 0x10400, 57, RI_ALL_ONLINE }, { 0x104e8, 2, RI_ALL_ONLINE },
+ { 0x104f4, 2, RI_ALL_ONLINE }, { 0x10500, 146, RI_ALL_ONLINE },
+ { 0x10750, 2, RI_ALL_ONLINE }, { 0x10760, 2, RI_ALL_ONLINE },
+ { 0x10770, 2, RI_ALL_ONLINE }, { 0x10780, 2, RI_ALL_ONLINE },
+ { 0x10790, 2, RI_ALL_ONLINE }, { 0x107a0, 2, RI_ALL_ONLINE },
+ { 0x107b0, 2, RI_ALL_ONLINE }, { 0x107c0, 2, RI_ALL_ONLINE },
+ { 0x107d0, 2, RI_ALL_ONLINE }, { 0x107e0, 2, RI_ALL_ONLINE },
+ { 0x10880, 2, RI_ALL_ONLINE }, { 0x10900, 2, RI_ALL_ONLINE },
+ { 0x16000, 26, RI_E1HE2_ONLINE }, { 0x16070, 18, RI_E1HE2_ONLINE },
+ { 0x160c0, 27, RI_E1HE2_ONLINE }, { 0x16140, 1, RI_E1HE2_ONLINE },
+ { 0x16160, 1, RI_E1HE2_ONLINE }, { 0x16180, 2, RI_E1HE2_ONLINE },
+ { 0x161c0, 2, RI_E1HE2_ONLINE }, { 0x16204, 5, RI_E1HE2_ONLINE },
+ { 0x18000, 1, RI_E1HE2_ONLINE }, { 0x18008, 1, RI_E1HE2_ONLINE },
+ { 0x18010, 35, RI_E2_ONLINE }, { 0x180a4, 2, RI_E2_ONLINE },
+ { 0x180c0, 191, RI_E2_ONLINE }, { 0x18440, 1, RI_E2_ONLINE },
+ { 0x18460, 1, RI_E2_ONLINE }, { 0x18480, 2, RI_E2_ONLINE },
+ { 0x184c0, 2, RI_E2_ONLINE }, { 0x18500, 15, RI_E2_ONLINE },
+ { 0x20000, 24, RI_ALL_ONLINE }, { 0x20060, 8, RI_ALL_ONLINE },
+ { 0x20080, 94, RI_ALL_ONLINE }, { 0x201f8, 1, RI_E1E1H_ONLINE },
+ { 0x201fc, 1, RI_ALL_ONLINE }, { 0x20200, 1, RI_E1E1H_ONLINE },
+ { 0x20204, 1, RI_ALL_ONLINE }, { 0x20208, 1, RI_E1E1H_ONLINE },
+ { 0x2020c, 39, RI_ALL_ONLINE }, { 0x202c8, 1, RI_E2_ONLINE },
+ { 0x202d8, 4, RI_E2_ONLINE }, { 0x20400, 2, RI_ALL_ONLINE },
+ { 0x2040c, 8, RI_ALL_ONLINE }, { 0x2042c, 18, RI_E1HE2_ONLINE },
+ { 0x20480, 1, RI_ALL_ONLINE }, { 0x20500, 1, RI_ALL_ONLINE },
+ { 0x20600, 1, RI_ALL_ONLINE }, { 0x28000, 1, RI_ALL_ONLINE },
+ { 0x28004, 8191, RI_ALL_OFFLINE }, { 0x30000, 1, RI_ALL_ONLINE },
+ { 0x30004, 16383, RI_ALL_OFFLINE }, { 0x40000, 98, RI_ALL_ONLINE },
+ { 0x401a8, 8, RI_E1HE2_ONLINE }, { 0x401c8, 1, RI_E1H_ONLINE },
+ { 0x401cc, 2, RI_E1HE2_ONLINE }, { 0x401d4, 2, RI_E2_ONLINE },
+ { 0x40200, 4, RI_ALL_ONLINE }, { 0x40220, 18, RI_E2_ONLINE },
+ { 0x40400, 43, RI_ALL_ONLINE }, { 0x404cc, 3, RI_E1HE2_ONLINE },
+ { 0x404e0, 1, RI_E2_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE },
{ 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE },
{ 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE },
- { 0x42000, 164, RI_ALL_ONLINE }, { 0x4229c, 1, RI_ALL_ONLINE },
- { 0x422ac, 1, RI_ALL_ONLINE }, { 0x422bc, 1, RI_ALL_ONLINE },
- { 0x422d4, 5, RI_E1H_ONLINE }, { 0x42400, 49, RI_ALL_ONLINE },
- { 0x424c8, 38, RI_ALL_ONLINE }, { 0x42568, 2, RI_ALL_ONLINE },
- { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 20, RI_ALL_ONLINE },
- { 0x50050, 8, RI_ALL_ONLINE }, { 0x50070, 88, RI_ALL_ONLINE },
- { 0x501dc, 1, RI_ALL_ONLINE }, { 0x501ec, 1, RI_ALL_ONLINE },
- { 0x501f0, 4, RI_E1H_ONLINE }, { 0x50200, 2, RI_ALL_ONLINE },
- { 0x5020c, 7, RI_ALL_ONLINE }, { 0x50228, 6, RI_E1H_ONLINE },
- { 0x50240, 1, RI_ALL_ONLINE }, { 0x50280, 1, RI_ALL_ONLINE },
+ { 0x40550, 10, RI_E2_ONLINE }, { 0x40610, 2, RI_E2_ONLINE },
+ { 0x42000, 164, RI_ALL_ONLINE }, { 0x422c0, 4, RI_E2_ONLINE },
+ { 0x422d4, 5, RI_E1HE2_ONLINE }, { 0x422e8, 1, RI_E2_ONLINE },
+ { 0x42400, 49, RI_ALL_ONLINE }, { 0x424c8, 38, RI_ALL_ONLINE },
+ { 0x42568, 2, RI_ALL_ONLINE }, { 0x42640, 5, RI_E2_ONLINE },
+ { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 1, RI_ALL_ONLINE },
+ { 0x50004, 19, RI_ALL_ONLINE }, { 0x50050, 8, RI_ALL_ONLINE },
+ { 0x50070, 88, RI_ALL_ONLINE }, { 0x501f0, 4, RI_E1HE2_ONLINE },
+ { 0x50200, 2, RI_ALL_ONLINE }, { 0x5020c, 7, RI_ALL_ONLINE },
+ { 0x50228, 6, RI_E1HE2_ONLINE }, { 0x50240, 1, RI_ALL_ONLINE },
+ { 0x50280, 1, RI_ALL_ONLINE }, { 0x50300, 1, RI_E2_ONLINE },
+ { 0x5030c, 1, RI_E2_ONLINE }, { 0x50318, 1, RI_E2_ONLINE },
+ { 0x5031c, 1, RI_E2_ONLINE }, { 0x50320, 2, RI_E2_ONLINE },
{ 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE },
{ 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE },
- { 0x58004, 8191, RI_ALL_OFFLINE }, { 0x60000, 71, RI_ALL_ONLINE },
- { 0x60128, 1, RI_ALL_ONLINE }, { 0x60138, 1, RI_ALL_ONLINE },
- { 0x6013c, 24, RI_E1H_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE },
+ { 0x58004, 8191, RI_E1E1H_OFFLINE }, { 0x60000, 26, RI_ALL_ONLINE },
+ { 0x60068, 8, RI_E1E1H_ONLINE }, { 0x60088, 12, RI_ALL_ONLINE },
+ { 0x600b8, 9, RI_E1E1H_ONLINE }, { 0x600dc, 1, RI_ALL_ONLINE },
+ { 0x600e0, 5, RI_E1E1H_ONLINE }, { 0x600f4, 1, RI_ALL_ONLINE },
+ { 0x600f8, 1, RI_E1E1H_ONLINE }, { 0x600fc, 8, RI_ALL_ONLINE },
+ { 0x6013c, 24, RI_E1H_ONLINE }, { 0x6019c, 2, RI_E2_ONLINE },
+ { 0x601ac, 18, RI_E2_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE },
+ { 0x60204, 2, RI_ALL_OFFLINE }, { 0x60210, 13, RI_E2_ONLINE },
{ 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE },
- { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 21496, RI_ALL_OFFLINE },
- { 0x85000, 3, RI_ALL_ONLINE }, { 0x8500c, 4, RI_ALL_OFFLINE },
- { 0x8501c, 7, RI_ALL_ONLINE }, { 0x85038, 4, RI_ALL_OFFLINE },
- { 0x85048, 1, RI_ALL_ONLINE }, { 0x8504c, 109, RI_ALL_OFFLINE },
- { 0x85200, 32, RI_ALL_ONLINE }, { 0x85280, 11104, RI_ALL_OFFLINE },
- { 0xa0000, 16384, RI_ALL_ONLINE }, { 0xb0000, 16384, RI_E1H_ONLINE },
- { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc1028, 1, RI_ALL_ONLINE },
- { 0xc1038, 1, RI_ALL_ONLINE }, { 0xc1800, 2, RI_ALL_ONLINE },
- { 0xc2000, 164, RI_ALL_ONLINE }, { 0xc229c, 1, RI_ALL_ONLINE },
- { 0xc22ac, 1, RI_ALL_ONLINE }, { 0xc22bc, 1, RI_ALL_ONLINE },
+ { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 8184, RI_ALL_OFFLINE },
+ { 0x85000, 3, RI_ALL_ONLINE }, { 0x8501c, 7, RI_ALL_ONLINE },
+ { 0x85048, 1, RI_ALL_ONLINE }, { 0x85200, 32, RI_ALL_ONLINE },
+ { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2_ONLINE },
+ { 0xc1800, 2, RI_ALL_ONLINE }, { 0xc2000, 164, RI_ALL_ONLINE },
+ { 0xc22c0, 5, RI_E2_ONLINE }, { 0xc22d8, 4, RI_E2_ONLINE },
{ 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE },
{ 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE },
- { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42a0, 1, RI_ALL_ONLINE },
- { 0xc42b0, 1, RI_ALL_ONLINE }, { 0xc42c0, 1, RI_ALL_ONLINE },
- { 0xc42e0, 7, RI_E1H_ONLINE }, { 0xc4400, 51, RI_ALL_ONLINE },
- { 0xc44d0, 38, RI_ALL_ONLINE }, { 0xc4570, 2, RI_ALL_ONLINE },
+ { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42d8, 2, RI_E2_ONLINE },
+ { 0xc42e0, 7, RI_E1HE2_ONLINE }, { 0xc42fc, 1, RI_E2_ONLINE },
+ { 0xc4400, 51, RI_ALL_ONLINE }, { 0xc44d0, 38, RI_ALL_ONLINE },
+ { 0xc4570, 2, RI_ALL_ONLINE }, { 0xc4578, 5, RI_E2_ONLINE },
{ 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE },
{ 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE },
- { 0xd01e4, 1, RI_ALL_ONLINE }, { 0xd01f4, 1, RI_ALL_ONLINE },
- { 0xd0200, 2, RI_ALL_ONLINE }, { 0xd020c, 7, RI_ALL_ONLINE },
- { 0xd0228, 18, RI_E1H_ONLINE }, { 0xd0280, 1, RI_ALL_ONLINE },
- { 0xd0300, 1, RI_ALL_ONLINE }, { 0xd0400, 1, RI_ALL_ONLINE },
- { 0xd4000, 1, RI_ALL_ONLINE }, { 0xd4004, 2559, RI_ALL_OFFLINE },
- { 0xd8000, 1, RI_ALL_ONLINE }, { 0xd8004, 8191, RI_ALL_OFFLINE },
- { 0xe0000, 21, RI_ALL_ONLINE }, { 0xe0054, 8, RI_ALL_ONLINE },
- { 0xe0074, 85, RI_ALL_ONLINE }, { 0xe01d4, 1, RI_ALL_ONLINE },
- { 0xe01e4, 1, RI_ALL_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE },
- { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1H_ONLINE },
+ { 0xd01fc, 1, RI_E2_ONLINE }, { 0xd0200, 2, RI_ALL_ONLINE },
+ { 0xd020c, 7, RI_ALL_ONLINE }, { 0xd0228, 18, RI_E1HE2_ONLINE },
+ { 0xd0280, 1, RI_ALL_ONLINE }, { 0xd0300, 1, RI_ALL_ONLINE },
+ { 0xd0400, 1, RI_ALL_ONLINE }, { 0xd4000, 1, RI_ALL_ONLINE },
+ { 0xd4004, 2559, RI_ALL_OFFLINE }, { 0xd8000, 1, RI_ALL_ONLINE },
+ { 0xd8004, 8191, RI_ALL_OFFLINE }, { 0xe0000, 21, RI_ALL_ONLINE },
+ { 0xe0054, 8, RI_ALL_ONLINE }, { 0xe0074, 49, RI_ALL_ONLINE },
+ { 0xe0138, 1, RI_E1E1H_ONLINE }, { 0xe013c, 35, RI_ALL_ONLINE },
+ { 0xe01f4, 2, RI_E2_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE },
+ { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1HE2_ONLINE },
{ 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE },
{ 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE },
{ 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE },
{ 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE },
- { 0x10103c, 1, RI_ALL_ONLINE }, { 0x10104c, 1, RI_ALL_ONLINE },
- { 0x101050, 1, RI_E1H_ONLINE }, { 0x101100, 1, RI_ALL_ONLINE },
- { 0x101800, 8, RI_ALL_ONLINE }, { 0x102000, 18, RI_ALL_ONLINE },
- { 0x102054, 1, RI_ALL_ONLINE }, { 0x102064, 1, RI_ALL_ONLINE },
+ { 0x101050, 1, RI_E1HE2_ONLINE }, { 0x101054, 3, RI_E2_ONLINE },
+ { 0x101100, 1, RI_ALL_ONLINE }, { 0x101800, 8, RI_ALL_ONLINE },
+ { 0x102000, 18, RI_ALL_ONLINE }, { 0x102068, 6, RI_E2_ONLINE },
{ 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE },
- { 0x102400, 1, RI_ALL_ONLINE }, { 0x103000, 26, RI_ALL_ONLINE },
- { 0x103074, 1, RI_ALL_ONLINE }, { 0x103084, 1, RI_ALL_ONLINE },
- { 0x103094, 1, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1H_ONLINE },
+ { 0x1020e8, 9, RI_E2_ONLINE }, { 0x102400, 1, RI_ALL_ONLINE },
+ { 0x103000, 26, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1HE2_ONLINE },
+ { 0x1030ac, 10, RI_E2_ONLINE }, { 0x1030d8, 8, RI_E2_ONLINE },
+ { 0x103400, 1, RI_E2_ONLINE }, { 0x103404, 135, RI_E2_OFFLINE },
{ 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE },
- { 0x104108, 1, RI_ALL_ONLINE }, { 0x104118, 1, RI_ALL_ONLINE },
- { 0x104200, 17, RI_ALL_ONLINE }, { 0x104400, 64, RI_ALL_ONLINE },
- { 0x104500, 192, RI_ALL_OFFLINE }, { 0x104800, 64, RI_ALL_ONLINE },
- { 0x104900, 192, RI_ALL_OFFLINE }, { 0x105000, 7, RI_ALL_ONLINE },
- { 0x10501c, 1, RI_ALL_OFFLINE }, { 0x105020, 3, RI_ALL_ONLINE },
- { 0x10502c, 1, RI_ALL_OFFLINE }, { 0x105030, 3, RI_ALL_ONLINE },
- { 0x10503c, 1, RI_ALL_OFFLINE }, { 0x105040, 3, RI_ALL_ONLINE },
- { 0x10504c, 1, RI_ALL_OFFLINE }, { 0x105050, 3, RI_ALL_ONLINE },
- { 0x10505c, 1, RI_ALL_OFFLINE }, { 0x105060, 3, RI_ALL_ONLINE },
- { 0x10506c, 1, RI_ALL_OFFLINE }, { 0x105070, 3, RI_ALL_ONLINE },
- { 0x10507c, 1, RI_ALL_OFFLINE }, { 0x105080, 3, RI_ALL_ONLINE },
- { 0x10508c, 1, RI_ALL_OFFLINE }, { 0x105090, 3, RI_ALL_ONLINE },
- { 0x10509c, 1, RI_ALL_OFFLINE }, { 0x1050a0, 3, RI_ALL_ONLINE },
- { 0x1050ac, 1, RI_ALL_OFFLINE }, { 0x1050b0, 3, RI_ALL_ONLINE },
- { 0x1050bc, 1, RI_ALL_OFFLINE }, { 0x1050c0, 3, RI_ALL_ONLINE },
- { 0x1050cc, 1, RI_ALL_OFFLINE }, { 0x1050d0, 3, RI_ALL_ONLINE },
- { 0x1050dc, 1, RI_ALL_OFFLINE }, { 0x1050e0, 3, RI_ALL_ONLINE },
- { 0x1050ec, 1, RI_ALL_OFFLINE }, { 0x1050f0, 3, RI_ALL_ONLINE },
- { 0x1050fc, 1, RI_ALL_OFFLINE }, { 0x105100, 3, RI_ALL_ONLINE },
- { 0x10510c, 1, RI_ALL_OFFLINE }, { 0x105110, 3, RI_ALL_ONLINE },
- { 0x10511c, 1, RI_ALL_OFFLINE }, { 0x105120, 3, RI_ALL_ONLINE },
- { 0x10512c, 1, RI_ALL_OFFLINE }, { 0x105130, 3, RI_ALL_ONLINE },
- { 0x10513c, 1, RI_ALL_OFFLINE }, { 0x105140, 3, RI_ALL_ONLINE },
- { 0x10514c, 1, RI_ALL_OFFLINE }, { 0x105150, 3, RI_ALL_ONLINE },
- { 0x10515c, 1, RI_ALL_OFFLINE }, { 0x105160, 3, RI_ALL_ONLINE },
- { 0x10516c, 1, RI_ALL_OFFLINE }, { 0x105170, 3, RI_ALL_ONLINE },
- { 0x10517c, 1, RI_ALL_OFFLINE }, { 0x105180, 3, RI_ALL_ONLINE },
- { 0x10518c, 1, RI_ALL_OFFLINE }, { 0x105190, 3, RI_ALL_ONLINE },
- { 0x10519c, 1, RI_ALL_OFFLINE }, { 0x1051a0, 3, RI_ALL_ONLINE },
- { 0x1051ac, 1, RI_ALL_OFFLINE }, { 0x1051b0, 3, RI_ALL_ONLINE },
- { 0x1051bc, 1, RI_ALL_OFFLINE }, { 0x1051c0, 3, RI_ALL_ONLINE },
- { 0x1051cc, 1, RI_ALL_OFFLINE }, { 0x1051d0, 3, RI_ALL_ONLINE },
- { 0x1051dc, 1, RI_ALL_OFFLINE }, { 0x1051e0, 3, RI_ALL_ONLINE },
- { 0x1051ec, 1, RI_ALL_OFFLINE }, { 0x1051f0, 3, RI_ALL_ONLINE },
- { 0x1051fc, 1, RI_ALL_OFFLINE }, { 0x105200, 3, RI_ALL_ONLINE },
- { 0x10520c, 1, RI_ALL_OFFLINE }, { 0x105210, 3, RI_ALL_ONLINE },
- { 0x10521c, 1, RI_ALL_OFFLINE }, { 0x105220, 3, RI_ALL_ONLINE },
- { 0x10522c, 1, RI_ALL_OFFLINE }, { 0x105230, 3, RI_ALL_ONLINE },
- { 0x10523c, 1, RI_ALL_OFFLINE }, { 0x105240, 3, RI_ALL_ONLINE },
- { 0x10524c, 1, RI_ALL_OFFLINE }, { 0x105250, 3, RI_ALL_ONLINE },
- { 0x10525c, 1, RI_ALL_OFFLINE }, { 0x105260, 3, RI_ALL_ONLINE },
- { 0x10526c, 1, RI_ALL_OFFLINE }, { 0x105270, 3, RI_ALL_ONLINE },
- { 0x10527c, 1, RI_ALL_OFFLINE }, { 0x105280, 3, RI_ALL_ONLINE },
- { 0x10528c, 1, RI_ALL_OFFLINE }, { 0x105290, 3, RI_ALL_ONLINE },
- { 0x10529c, 1, RI_ALL_OFFLINE }, { 0x1052a0, 3, RI_ALL_ONLINE },
- { 0x1052ac, 1, RI_ALL_OFFLINE }, { 0x1052b0, 3, RI_ALL_ONLINE },
- { 0x1052bc, 1, RI_ALL_OFFLINE }, { 0x1052c0, 3, RI_ALL_ONLINE },
- { 0x1052cc, 1, RI_ALL_OFFLINE }, { 0x1052d0, 3, RI_ALL_ONLINE },
- { 0x1052dc, 1, RI_ALL_OFFLINE }, { 0x1052e0, 3, RI_ALL_ONLINE },
- { 0x1052ec, 1, RI_ALL_OFFLINE }, { 0x1052f0, 3, RI_ALL_ONLINE },
- { 0x1052fc, 1, RI_ALL_OFFLINE }, { 0x105300, 3, RI_ALL_ONLINE },
- { 0x10530c, 1, RI_ALL_OFFLINE }, { 0x105310, 3, RI_ALL_ONLINE },
- { 0x10531c, 1, RI_ALL_OFFLINE }, { 0x105320, 3, RI_ALL_ONLINE },
- { 0x10532c, 1, RI_ALL_OFFLINE }, { 0x105330, 3, RI_ALL_ONLINE },
- { 0x10533c, 1, RI_ALL_OFFLINE }, { 0x105340, 3, RI_ALL_ONLINE },
- { 0x10534c, 1, RI_ALL_OFFLINE }, { 0x105350, 3, RI_ALL_ONLINE },
- { 0x10535c, 1, RI_ALL_OFFLINE }, { 0x105360, 3, RI_ALL_ONLINE },
- { 0x10536c, 1, RI_ALL_OFFLINE }, { 0x105370, 3, RI_ALL_ONLINE },
- { 0x10537c, 1, RI_ALL_OFFLINE }, { 0x105380, 3, RI_ALL_ONLINE },
- { 0x10538c, 1, RI_ALL_OFFLINE }, { 0x105390, 3, RI_ALL_ONLINE },
- { 0x10539c, 1, RI_ALL_OFFLINE }, { 0x1053a0, 3, RI_ALL_ONLINE },
- { 0x1053ac, 1, RI_ALL_OFFLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
- { 0x1053bc, 1, RI_ALL_OFFLINE }, { 0x1053c0, 3, RI_ALL_ONLINE },
- { 0x1053cc, 1, RI_ALL_OFFLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
- { 0x1053dc, 1, RI_ALL_OFFLINE }, { 0x1053e0, 3, RI_ALL_ONLINE },
- { 0x1053ec, 1, RI_ALL_OFFLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
- { 0x1053fc, 769, RI_ALL_OFFLINE }, { 0x108000, 33, RI_ALL_ONLINE },
- { 0x108090, 1, RI_ALL_ONLINE }, { 0x1080a0, 1, RI_ALL_ONLINE },
- { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_ALL_ONLINE },
- { 0x108120, 5, RI_ALL_ONLINE }, { 0x108200, 74, RI_ALL_ONLINE },
- { 0x108400, 74, RI_ALL_ONLINE }, { 0x108800, 152, RI_ALL_ONLINE },
- { 0x109000, 1, RI_ALL_ONLINE }, { 0x120000, 347, RI_ALL_ONLINE },
- { 0x120578, 1, RI_ALL_ONLINE }, { 0x120588, 1, RI_ALL_ONLINE },
- { 0x120598, 1, RI_ALL_ONLINE }, { 0x12059c, 23, RI_E1H_ONLINE },
- { 0x120614, 1, RI_E1H_ONLINE }, { 0x12061c, 30, RI_E1H_ONLINE },
- { 0x12080c, 65, RI_ALL_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
- { 0x122000, 2, RI_ALL_ONLINE }, { 0x128000, 2, RI_E1H_ONLINE },
- { 0x140000, 114, RI_ALL_ONLINE }, { 0x1401d4, 1, RI_ALL_ONLINE },
- { 0x1401e4, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE },
- { 0x144000, 4, RI_ALL_ONLINE }, { 0x148000, 4, RI_ALL_ONLINE },
- { 0x14c000, 4, RI_ALL_ONLINE }, { 0x150000, 4, RI_ALL_ONLINE },
- { 0x154000, 4, RI_ALL_ONLINE }, { 0x158000, 4, RI_ALL_ONLINE },
- { 0x15c000, 7, RI_E1H_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
- { 0x161028, 1, RI_ALL_ONLINE }, { 0x161038, 1, RI_ALL_ONLINE },
- { 0x161800, 2, RI_ALL_ONLINE }, { 0x164000, 60, RI_ALL_ONLINE },
- { 0x1640fc, 1, RI_ALL_ONLINE }, { 0x16410c, 1, RI_ALL_ONLINE },
- { 0x164110, 2, RI_E1H_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
+ { 0x10411c, 16, RI_E2_ONLINE }, { 0x104200, 17, RI_ALL_ONLINE },
+ { 0x104400, 64, RI_ALL_ONLINE }, { 0x104500, 192, RI_ALL_OFFLINE },
+ { 0x104800, 64, RI_ALL_ONLINE }, { 0x104900, 192, RI_ALL_OFFLINE },
+ { 0x105000, 256, RI_ALL_ONLINE }, { 0x105400, 768, RI_ALL_OFFLINE },
+ { 0x107000, 7, RI_E2_ONLINE }, { 0x108000, 33, RI_E1E1H_ONLINE },
+ { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_E1E1H_ONLINE },
+ { 0x108120, 5, RI_E1E1H_ONLINE }, { 0x108200, 74, RI_E1E1H_ONLINE },
+ { 0x108400, 74, RI_E1E1H_ONLINE }, { 0x108800, 152, RI_E1E1H_ONLINE },
+ { 0x110000, 111, RI_E2_ONLINE }, { 0x110200, 4, RI_E2_ONLINE },
+ { 0x120000, 2, RI_ALL_ONLINE }, { 0x120008, 4, RI_ALL_ONLINE },
+ { 0x120018, 3, RI_ALL_ONLINE }, { 0x120024, 4, RI_ALL_ONLINE },
+ { 0x120034, 3, RI_ALL_ONLINE }, { 0x120040, 4, RI_ALL_ONLINE },
+ { 0x120050, 3, RI_ALL_ONLINE }, { 0x12005c, 4, RI_ALL_ONLINE },
+ { 0x12006c, 3, RI_ALL_ONLINE }, { 0x120078, 4, RI_ALL_ONLINE },
+ { 0x120088, 3, RI_ALL_ONLINE }, { 0x120094, 4, RI_ALL_ONLINE },
+ { 0x1200a4, 3, RI_ALL_ONLINE }, { 0x1200b0, 4, RI_ALL_ONLINE },
+ { 0x1200c0, 3, RI_ALL_ONLINE }, { 0x1200cc, 4, RI_ALL_ONLINE },
+ { 0x1200dc, 3, RI_ALL_ONLINE }, { 0x1200e8, 4, RI_ALL_ONLINE },
+ { 0x1200f8, 3, RI_ALL_ONLINE }, { 0x120104, 4, RI_ALL_ONLINE },
+ { 0x120114, 1, RI_ALL_ONLINE }, { 0x120118, 22, RI_ALL_ONLINE },
+ { 0x120170, 2, RI_E1E1H_ONLINE }, { 0x120178, 243, RI_ALL_ONLINE },
+ { 0x120544, 4, RI_E1E1H_ONLINE }, { 0x120554, 7, RI_ALL_ONLINE },
+ { 0x12059c, 6, RI_E1HE2_ONLINE }, { 0x1205b4, 1, RI_E1HE2_ONLINE },
+ { 0x1205b8, 16, RI_E1HE2_ONLINE }, { 0x1205f8, 4, RI_E2_ONLINE },
+ { 0x120618, 1, RI_E2_ONLINE }, { 0x12061c, 20, RI_E1HE2_ONLINE },
+ { 0x12066c, 11, RI_E1HE2_ONLINE }, { 0x120698, 5, RI_E2_ONLINE },
+ { 0x1206b0, 76, RI_E2_ONLINE }, { 0x1207fc, 1, RI_E2_ONLINE },
+ { 0x120808, 66, RI_ALL_ONLINE }, { 0x120910, 7, RI_E2_ONLINE },
+ { 0x120930, 9, RI_E2_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
+ { 0x122000, 2, RI_ALL_ONLINE }, { 0x122008, 2046, RI_E1_OFFLINE },
+ { 0x128000, 2, RI_E1HE2_ONLINE }, { 0x128008, 6142, RI_E1HE2_OFFLINE },
+ { 0x130000, 35, RI_E2_ONLINE }, { 0x130100, 29, RI_E2_ONLINE },
+ { 0x130180, 1, RI_E2_ONLINE }, { 0x130200, 1, RI_E2_ONLINE },
+ { 0x130280, 1, RI_E2_ONLINE }, { 0x130300, 5, RI_E2_ONLINE },
+ { 0x130380, 1, RI_E2_ONLINE }, { 0x130400, 1, RI_E2_ONLINE },
+ { 0x130480, 5, RI_E2_ONLINE }, { 0x130800, 72, RI_E2_ONLINE },
+ { 0x131000, 136, RI_E2_ONLINE }, { 0x132000, 148, RI_E2_ONLINE },
+ { 0x134000, 544, RI_E2_ONLINE }, { 0x140000, 64, RI_ALL_ONLINE },
+ { 0x140100, 5, RI_E1E1H_ONLINE }, { 0x140114, 45, RI_ALL_ONLINE },
+ { 0x140200, 6, RI_ALL_ONLINE }, { 0x140220, 4, RI_E2_ONLINE },
+ { 0x140240, 4, RI_E2_ONLINE }, { 0x140260, 4, RI_E2_ONLINE },
+ { 0x140280, 4, RI_E2_ONLINE }, { 0x1402a0, 4, RI_E2_ONLINE },
+ { 0x1402c0, 4, RI_E2_ONLINE }, { 0x1402e0, 13, RI_E2_ONLINE },
+ { 0x144000, 4, RI_E1E1H_ONLINE }, { 0x148000, 4, RI_E1E1H_ONLINE },
+ { 0x14c000, 4, RI_E1E1H_ONLINE }, { 0x150000, 4, RI_E1E1H_ONLINE },
+ { 0x154000, 4, RI_E1E1H_ONLINE }, { 0x158000, 4, RI_E1E1H_ONLINE },
+ { 0x15c000, 2, RI_E1HE2_ONLINE }, { 0x15c008, 5, RI_E1H_ONLINE },
+ { 0x15c020, 27, RI_E2_ONLINE }, { 0x15c090, 13, RI_E2_ONLINE },
+ { 0x15c0c8, 34, RI_E2_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
+ { 0x16103c, 2, RI_E2_ONLINE }, { 0x161800, 2, RI_ALL_ONLINE },
+ { 0x164000, 60, RI_ALL_ONLINE }, { 0x164110, 2, RI_E1HE2_ONLINE },
+ { 0x164118, 15, RI_E2_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
{ 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE },
{ 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE },
{ 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE },
@@ -284,169 +273,298 @@ static const struct reg_addr reg_addrs[REGS_COUNT] = {
{ 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE },
{ 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE },
{ 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE },
- { 0x166000, 164, RI_ALL_ONLINE }, { 0x16629c, 1, RI_ALL_ONLINE },
- { 0x1662ac, 1, RI_ALL_ONLINE }, { 0x1662bc, 1, RI_ALL_ONLINE },
+ { 0x166000, 164, RI_ALL_ONLINE }, { 0x1662cc, 7, RI_E2_ONLINE },
{ 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE },
- { 0x166568, 2, RI_ALL_ONLINE }, { 0x166800, 1, RI_ALL_ONLINE },
- { 0x168000, 270, RI_ALL_ONLINE }, { 0x168444, 1, RI_ALL_ONLINE },
- { 0x168454, 1, RI_ALL_ONLINE }, { 0x168800, 19, RI_ALL_ONLINE },
- { 0x168900, 1, RI_ALL_ONLINE }, { 0x168a00, 128, RI_ALL_ONLINE },
- { 0x16a000, 1, RI_ALL_ONLINE }, { 0x16a004, 1535, RI_ALL_OFFLINE },
- { 0x16c000, 1, RI_ALL_ONLINE }, { 0x16c004, 1535, RI_ALL_OFFLINE },
- { 0x16e000, 16, RI_E1H_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE },
- { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 183, RI_E1H_ONLINE },
- { 0x170000, 93, RI_ALL_ONLINE }, { 0x170180, 1, RI_ALL_ONLINE },
- { 0x170190, 1, RI_ALL_ONLINE }, { 0x170200, 4, RI_ALL_ONLINE },
- { 0x170214, 1, RI_ALL_ONLINE }, { 0x178000, 1, RI_ALL_ONLINE },
- { 0x180000, 61, RI_ALL_ONLINE }, { 0x180100, 1, RI_ALL_ONLINE },
- { 0x180110, 1, RI_ALL_ONLINE }, { 0x180120, 1, RI_ALL_ONLINE },
- { 0x180130, 1, RI_ALL_ONLINE }, { 0x18013c, 2, RI_E1H_ONLINE },
- { 0x180200, 58, RI_ALL_ONLINE }, { 0x180340, 4, RI_ALL_ONLINE },
- { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_ALL_OFFLINE },
+ { 0x166568, 2, RI_ALL_ONLINE }, { 0x166570, 5, RI_E2_ONLINE },
+ { 0x166800, 1, RI_ALL_ONLINE }, { 0x168000, 137, RI_ALL_ONLINE },
+ { 0x168224, 2, RI_E1E1H_ONLINE }, { 0x16822c, 29, RI_ALL_ONLINE },
+ { 0x1682a0, 12, RI_E1E1H_ONLINE }, { 0x1682d0, 12, RI_ALL_ONLINE },
+ { 0x168300, 2, RI_E1E1H_ONLINE }, { 0x168308, 68, RI_ALL_ONLINE },
+ { 0x168418, 2, RI_E1E1H_ONLINE }, { 0x168420, 6, RI_ALL_ONLINE },
+ { 0x168800, 19, RI_ALL_ONLINE }, { 0x168900, 1, RI_ALL_ONLINE },
+ { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16a000, 1, RI_ALL_ONLINE },
+ { 0x16a004, 1535, RI_ALL_OFFLINE }, { 0x16c000, 1, RI_ALL_ONLINE },
+ { 0x16c004, 1535, RI_ALL_OFFLINE }, { 0x16e000, 16, RI_E1H_ONLINE },
+ { 0x16e040, 8, RI_E2_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE },
+ { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 161, RI_E1H_ONLINE },
+ { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 12, RI_E1H_ONLINE },
+ { 0x16e6bc, 4, RI_E1HE2_ONLINE }, { 0x16e6cc, 4, RI_E1H_ONLINE },
+ { 0x16e6e0, 12, RI_E2_ONLINE }, { 0x16e768, 17, RI_E2_ONLINE },
+ { 0x170000, 24, RI_ALL_ONLINE }, { 0x170060, 4, RI_E1E1H_ONLINE },
+ { 0x170070, 65, RI_ALL_ONLINE }, { 0x170194, 11, RI_E2_ONLINE },
+ { 0x1701c4, 1, RI_E2_ONLINE }, { 0x1701cc, 7, RI_E2_ONLINE },
+ { 0x1701ec, 1, RI_E2_ONLINE }, { 0x1701f4, 1, RI_E2_ONLINE },
+ { 0x170200, 4, RI_ALL_ONLINE }, { 0x170214, 1, RI_ALL_ONLINE },
+ { 0x170218, 77, RI_E2_ONLINE }, { 0x170400, 64, RI_E2_ONLINE },
+ { 0x178000, 1, RI_ALL_ONLINE }, { 0x180000, 61, RI_ALL_ONLINE },
+ { 0x18013c, 2, RI_E1HE2_ONLINE }, { 0x180200, 58, RI_ALL_ONLINE },
+ { 0x180340, 4, RI_ALL_ONLINE }, { 0x180380, 1, RI_E2_ONLINE },
+ { 0x180388, 1, RI_E2_ONLINE }, { 0x180390, 1, RI_E2_ONLINE },
+ { 0x180398, 1, RI_E2_ONLINE }, { 0x1803a0, 5, RI_E2_ONLINE },
+ { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_E1E1H_OFFLINE },
{ 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE },
- { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 1023, RI_ALL_OFFLINE },
- { 0x1a1000, 1, RI_ALL_ONLINE }, { 0x1a1004, 4607, RI_ALL_OFFLINE },
- { 0x1a5800, 2560, RI_E1H_OFFLINE }, { 0x1a8000, 64, RI_ALL_OFFLINE },
- { 0x1a8100, 1984, RI_E1H_OFFLINE }, { 0x1aa000, 1, RI_E1H_ONLINE },
- { 0x1aa004, 6655, RI_E1H_OFFLINE }, { 0x1b1800, 128, RI_ALL_OFFLINE },
- { 0x1b1c00, 128, RI_ALL_OFFLINE }, { 0x1b2000, 1, RI_ALL_OFFLINE },
- { 0x1b2400, 64, RI_E1H_OFFLINE }, { 0x1b8200, 1, RI_ALL_ONLINE },
+ { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 5631, RI_ALL_OFFLINE },
+ { 0x1a5800, 2560, RI_E1HE2_OFFLINE }, { 0x1a8000, 1, RI_ALL_ONLINE },
+ { 0x1a8004, 8191, RI_E1HE2_OFFLINE }, { 0x1b0000, 1, RI_ALL_ONLINE },
+ { 0x1b0004, 15, RI_E1H_OFFLINE }, { 0x1b0040, 1, RI_E1HE2_ONLINE },
+ { 0x1b0044, 239, RI_E1H_OFFLINE }, { 0x1b0400, 1, RI_ALL_ONLINE },
+ { 0x1b0404, 255, RI_E1H_OFFLINE }, { 0x1b0800, 1, RI_ALL_ONLINE },
+ { 0x1b0840, 1, RI_E1HE2_ONLINE }, { 0x1b0c00, 1, RI_ALL_ONLINE },
+ { 0x1b1000, 1, RI_ALL_ONLINE }, { 0x1b1040, 1, RI_E1HE2_ONLINE },
+ { 0x1b1400, 1, RI_ALL_ONLINE }, { 0x1b1440, 1, RI_E1HE2_ONLINE },
+ { 0x1b1480, 1, RI_E1HE2_ONLINE }, { 0x1b14c0, 1, RI_E1HE2_ONLINE },
+ { 0x1b1800, 128, RI_ALL_OFFLINE }, { 0x1b1c00, 128, RI_ALL_OFFLINE },
+ { 0x1b2000, 1, RI_ALL_ONLINE }, { 0x1b2400, 1, RI_E1HE2_ONLINE },
+ { 0x1b2404, 5631, RI_E2_OFFLINE }, { 0x1b8000, 1, RI_ALL_ONLINE },
+ { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE },
+ { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x1b8100, 1, RI_ALL_ONLINE },
+ { 0x1b8140, 1, RI_ALL_ONLINE }, { 0x1b8180, 1, RI_ALL_ONLINE },
+ { 0x1b81c0, 1, RI_ALL_ONLINE }, { 0x1b8200, 1, RI_ALL_ONLINE },
{ 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE },
- { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE },
- { 0x1b8a80, 1, RI_ALL_ONLINE }, { 0x1c0000, 2, RI_ALL_ONLINE },
- { 0x200000, 65, RI_ALL_ONLINE }, { 0x200110, 1, RI_ALL_ONLINE },
- { 0x200120, 1, RI_ALL_ONLINE }, { 0x200130, 1, RI_ALL_ONLINE },
- { 0x200140, 1, RI_ALL_ONLINE }, { 0x20014c, 2, RI_E1H_ONLINE },
- { 0x200200, 58, RI_ALL_ONLINE }, { 0x200340, 4, RI_ALL_ONLINE },
- { 0x200400, 1, RI_ALL_ONLINE }, { 0x200404, 255, RI_ALL_OFFLINE },
- { 0x202000, 4, RI_ALL_ONLINE }, { 0x202010, 2044, RI_ALL_OFFLINE },
- { 0x220000, 1, RI_ALL_ONLINE }, { 0x220004, 1023, RI_ALL_OFFLINE },
- { 0x221000, 1, RI_ALL_ONLINE }, { 0x221004, 4607, RI_ALL_OFFLINE },
- { 0x225800, 1536, RI_E1H_OFFLINE }, { 0x227000, 1, RI_E1H_ONLINE },
- { 0x227004, 1023, RI_E1H_OFFLINE }, { 0x228000, 64, RI_ALL_OFFLINE },
- { 0x228100, 8640, RI_E1H_OFFLINE }, { 0x231800, 128, RI_ALL_OFFLINE },
- { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_OFFLINE },
- { 0x232400, 64, RI_E1H_OFFLINE }, { 0x238200, 1, RI_ALL_ONLINE },
- { 0x238240, 1, RI_ALL_ONLINE }, { 0x238280, 1, RI_ALL_ONLINE },
- { 0x2382c0, 1, RI_ALL_ONLINE }, { 0x238a00, 1, RI_ALL_ONLINE },
- { 0x238a80, 1, RI_ALL_ONLINE }, { 0x240000, 2, RI_ALL_ONLINE },
- { 0x280000, 65, RI_ALL_ONLINE }, { 0x280110, 1, RI_ALL_ONLINE },
- { 0x280120, 1, RI_ALL_ONLINE }, { 0x280130, 1, RI_ALL_ONLINE },
- { 0x280140, 1, RI_ALL_ONLINE }, { 0x28014c, 2, RI_E1H_ONLINE },
- { 0x280200, 58, RI_ALL_ONLINE }, { 0x280340, 4, RI_ALL_ONLINE },
- { 0x280400, 1, RI_ALL_ONLINE }, { 0x280404, 255, RI_ALL_OFFLINE },
- { 0x282000, 4, RI_ALL_ONLINE }, { 0x282010, 2044, RI_ALL_OFFLINE },
- { 0x2a0000, 1, RI_ALL_ONLINE }, { 0x2a0004, 1023, RI_ALL_OFFLINE },
- { 0x2a1000, 1, RI_ALL_ONLINE }, { 0x2a1004, 4607, RI_ALL_OFFLINE },
- { 0x2a5800, 2560, RI_E1H_OFFLINE }, { 0x2a8000, 64, RI_ALL_OFFLINE },
- { 0x2a8100, 960, RI_E1H_OFFLINE }, { 0x2a9000, 1, RI_E1H_ONLINE },
- { 0x2a9004, 7679, RI_E1H_OFFLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE },
- { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_OFFLINE },
- { 0x2b2400, 64, RI_E1H_OFFLINE }, { 0x2b8200, 1, RI_ALL_ONLINE },
- { 0x2b8240, 1, RI_ALL_ONLINE }, { 0x2b8280, 1, RI_ALL_ONLINE },
- { 0x2b82c0, 1, RI_ALL_ONLINE }, { 0x2b8a00, 1, RI_ALL_ONLINE },
- { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE },
- { 0x300000, 65, RI_ALL_ONLINE }, { 0x300110, 1, RI_ALL_ONLINE },
- { 0x300120, 1, RI_ALL_ONLINE }, { 0x300130, 1, RI_ALL_ONLINE },
- { 0x300140, 1, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1H_ONLINE },
+ { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8300, 1, RI_ALL_ONLINE },
+ { 0x1b8340, 1, RI_ALL_ONLINE }, { 0x1b8380, 1, RI_ALL_ONLINE },
+ { 0x1b83c0, 1, RI_ALL_ONLINE }, { 0x1b8400, 1, RI_ALL_ONLINE },
+ { 0x1b8440, 1, RI_ALL_ONLINE }, { 0x1b8480, 1, RI_ALL_ONLINE },
+ { 0x1b84c0, 1, RI_ALL_ONLINE }, { 0x1b8500, 1, RI_ALL_ONLINE },
+ { 0x1b8540, 1, RI_ALL_ONLINE }, { 0x1b8580, 1, RI_ALL_ONLINE },
+ { 0x1b85c0, 19, RI_E2_ONLINE }, { 0x1b8800, 1, RI_ALL_ONLINE },
+ { 0x1b8840, 1, RI_ALL_ONLINE }, { 0x1b8880, 1, RI_ALL_ONLINE },
+ { 0x1b88c0, 1, RI_ALL_ONLINE }, { 0x1b8900, 1, RI_ALL_ONLINE },
+ { 0x1b8940, 1, RI_ALL_ONLINE }, { 0x1b8980, 1, RI_ALL_ONLINE },
+ { 0x1b89c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE },
+ { 0x1b8a40, 1, RI_ALL_ONLINE }, { 0x1b8a80, 1, RI_ALL_ONLINE },
+ { 0x1b8ac0, 1, RI_ALL_ONLINE }, { 0x1b8b00, 1, RI_ALL_ONLINE },
+ { 0x1b8b40, 1, RI_ALL_ONLINE }, { 0x1b8b80, 1, RI_ALL_ONLINE },
+ { 0x1b8bc0, 1, RI_ALL_ONLINE }, { 0x1b8c00, 1, RI_ALL_ONLINE },
+ { 0x1b8c40, 1, RI_ALL_ONLINE }, { 0x1b8c80, 1, RI_ALL_ONLINE },
+ { 0x1b8cc0, 1, RI_ALL_ONLINE }, { 0x1b8cc4, 1, RI_E2_ONLINE },
+ { 0x1b8d00, 1, RI_ALL_ONLINE }, { 0x1b8d40, 1, RI_ALL_ONLINE },
+ { 0x1b8d80, 1, RI_ALL_ONLINE }, { 0x1b8dc0, 1, RI_ALL_ONLINE },
+ { 0x1b8e00, 1, RI_ALL_ONLINE }, { 0x1b8e40, 1, RI_ALL_ONLINE },
+ { 0x1b8e80, 1, RI_ALL_ONLINE }, { 0x1b8e84, 1, RI_E2_ONLINE },
+ { 0x1b8ec0, 1, RI_E1HE2_ONLINE }, { 0x1b8f00, 1, RI_E1HE2_ONLINE },
+ { 0x1b8f40, 1, RI_E1HE2_ONLINE }, { 0x1b8f80, 1, RI_E1HE2_ONLINE },
+ { 0x1b8fc0, 1, RI_E1HE2_ONLINE }, { 0x1b8fc4, 2, RI_E2_ONLINE },
+ { 0x1b8fd0, 6, RI_E2_ONLINE }, { 0x1b9000, 1, RI_E2_ONLINE },
+ { 0x1b9040, 3, RI_E2_ONLINE }, { 0x1b9400, 14, RI_E2_ONLINE },
+ { 0x1b943c, 19, RI_E2_ONLINE }, { 0x1b9490, 10, RI_E2_ONLINE },
+ { 0x1c0000, 2, RI_ALL_ONLINE }, { 0x200000, 65, RI_ALL_ONLINE },
+ { 0x20014c, 2, RI_E1HE2_ONLINE }, { 0x200200, 58, RI_ALL_ONLINE },
+ { 0x200340, 4, RI_ALL_ONLINE }, { 0x200380, 1, RI_E2_ONLINE },
+ { 0x200388, 1, RI_E2_ONLINE }, { 0x200390, 1, RI_E2_ONLINE },
+ { 0x200398, 1, RI_E2_ONLINE }, { 0x2003a0, 1, RI_E2_ONLINE },
+ { 0x2003a8, 2, RI_E2_ONLINE }, { 0x200400, 1, RI_ALL_ONLINE },
+ { 0x200404, 255, RI_E1E1H_OFFLINE }, { 0x202000, 4, RI_ALL_ONLINE },
+ { 0x202010, 2044, RI_ALL_OFFLINE }, { 0x220000, 1, RI_ALL_ONLINE },
+ { 0x220004, 5631, RI_ALL_OFFLINE }, { 0x225800, 2560, RI_E1HE2_OFFLINE},
+ { 0x228000, 1, RI_ALL_ONLINE }, { 0x228004, 8191, RI_E1HE2_OFFLINE },
+ { 0x230000, 1, RI_ALL_ONLINE }, { 0x230004, 15, RI_E1H_OFFLINE },
+ { 0x230040, 1, RI_E1HE2_ONLINE }, { 0x230044, 239, RI_E1H_OFFLINE },
+ { 0x230400, 1, RI_ALL_ONLINE }, { 0x230404, 255, RI_E1H_OFFLINE },
+ { 0x230800, 1, RI_ALL_ONLINE }, { 0x230840, 1, RI_E1HE2_ONLINE },
+ { 0x230c00, 1, RI_ALL_ONLINE }, { 0x231000, 1, RI_ALL_ONLINE },
+ { 0x231040, 1, RI_E1HE2_ONLINE }, { 0x231400, 1, RI_ALL_ONLINE },
+ { 0x231440, 1, RI_E1HE2_ONLINE }, { 0x231480, 1, RI_E1HE2_ONLINE },
+ { 0x2314c0, 1, RI_E1HE2_ONLINE }, { 0x231800, 128, RI_ALL_OFFLINE },
+ { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_ONLINE },
+ { 0x232400, 1, RI_E1HE2_ONLINE }, { 0x232404, 5631, RI_E2_OFFLINE },
+ { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
+ { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
+ { 0x238100, 1, RI_ALL_ONLINE }, { 0x238140, 1, RI_ALL_ONLINE },
+ { 0x238180, 1, RI_ALL_ONLINE }, { 0x2381c0, 1, RI_ALL_ONLINE },
+ { 0x238200, 1, RI_ALL_ONLINE }, { 0x238240, 1, RI_ALL_ONLINE },
+ { 0x238280, 1, RI_ALL_ONLINE }, { 0x2382c0, 1, RI_ALL_ONLINE },
+ { 0x238300, 1, RI_ALL_ONLINE }, { 0x238340, 1, RI_ALL_ONLINE },
+ { 0x238380, 1, RI_ALL_ONLINE }, { 0x2383c0, 1, RI_ALL_ONLINE },
+ { 0x238400, 1, RI_ALL_ONLINE }, { 0x238440, 1, RI_ALL_ONLINE },
+ { 0x238480, 1, RI_ALL_ONLINE }, { 0x2384c0, 1, RI_ALL_ONLINE },
+ { 0x238500, 1, RI_ALL_ONLINE }, { 0x238540, 1, RI_ALL_ONLINE },
+ { 0x238580, 1, RI_ALL_ONLINE }, { 0x2385c0, 19, RI_E2_ONLINE },
+ { 0x238800, 1, RI_ALL_ONLINE }, { 0x238840, 1, RI_ALL_ONLINE },
+ { 0x238880, 1, RI_ALL_ONLINE }, { 0x2388c0, 1, RI_ALL_ONLINE },
+ { 0x238900, 1, RI_ALL_ONLINE }, { 0x238940, 1, RI_ALL_ONLINE },
+ { 0x238980, 1, RI_ALL_ONLINE }, { 0x2389c0, 1, RI_ALL_ONLINE },
+ { 0x238a00, 1, RI_ALL_ONLINE }, { 0x238a40, 1, RI_ALL_ONLINE },
+ { 0x238a80, 1, RI_ALL_ONLINE }, { 0x238ac0, 1, RI_ALL_ONLINE },
+ { 0x238b00, 1, RI_ALL_ONLINE }, { 0x238b40, 1, RI_ALL_ONLINE },
+ { 0x238b80, 1, RI_ALL_ONLINE }, { 0x238bc0, 1, RI_ALL_ONLINE },
+ { 0x238c00, 1, RI_ALL_ONLINE }, { 0x238c40, 1, RI_ALL_ONLINE },
+ { 0x238c80, 1, RI_ALL_ONLINE }, { 0x238cc0, 1, RI_ALL_ONLINE },
+ { 0x238cc4, 1, RI_E2_ONLINE }, { 0x238d00, 1, RI_ALL_ONLINE },
+ { 0x238d40, 1, RI_ALL_ONLINE }, { 0x238d80, 1, RI_ALL_ONLINE },
+ { 0x238dc0, 1, RI_ALL_ONLINE }, { 0x238e00, 1, RI_ALL_ONLINE },
+ { 0x238e40, 1, RI_ALL_ONLINE }, { 0x238e80, 1, RI_ALL_ONLINE },
+ { 0x238e84, 1, RI_E2_ONLINE }, { 0x238ec0, 1, RI_E1HE2_ONLINE },
+ { 0x238f00, 1, RI_E1HE2_ONLINE }, { 0x238f40, 1, RI_E1HE2_ONLINE },
+ { 0x238f80, 1, RI_E1HE2_ONLINE }, { 0x238fc0, 1, RI_E1HE2_ONLINE },
+ { 0x238fc4, 2, RI_E2_ONLINE }, { 0x238fd0, 6, RI_E2_ONLINE },
+ { 0x239000, 1, RI_E2_ONLINE }, { 0x239040, 3, RI_E2_ONLINE },
+ { 0x240000, 2, RI_ALL_ONLINE }, { 0x280000, 65, RI_ALL_ONLINE },
+ { 0x28014c, 2, RI_E1HE2_ONLINE }, { 0x280200, 58, RI_ALL_ONLINE },
+ { 0x280340, 4, RI_ALL_ONLINE }, { 0x280380, 1, RI_E2_ONLINE },
+ { 0x280388, 1, RI_E2_ONLINE }, { 0x280390, 1, RI_E2_ONLINE },
+ { 0x280398, 1, RI_E2_ONLINE }, { 0x2803a0, 1, RI_E2_ONLINE },
+ { 0x2803a8, 2, RI_E2_ONLINE }, { 0x280400, 1, RI_ALL_ONLINE },
+ { 0x280404, 255, RI_E1E1H_OFFLINE }, { 0x282000, 4, RI_ALL_ONLINE },
+ { 0x282010, 2044, RI_ALL_OFFLINE }, { 0x2a0000, 1, RI_ALL_ONLINE },
+ { 0x2a0004, 5631, RI_ALL_OFFLINE }, { 0x2a5800, 2560, RI_E1HE2_OFFLINE},
+ { 0x2a8000, 1, RI_ALL_ONLINE }, { 0x2a8004, 8191, RI_E1HE2_OFFLINE },
+ { 0x2b0000, 1, RI_ALL_ONLINE }, { 0x2b0004, 15, RI_E1H_OFFLINE },
+ { 0x2b0040, 1, RI_E1HE2_ONLINE }, { 0x2b0044, 239, RI_E1H_OFFLINE },
+ { 0x2b0400, 1, RI_ALL_ONLINE }, { 0x2b0404, 255, RI_E1H_OFFLINE },
+ { 0x2b0800, 1, RI_ALL_ONLINE }, { 0x2b0840, 1, RI_E1HE2_ONLINE },
+ { 0x2b0c00, 1, RI_ALL_ONLINE }, { 0x2b1000, 1, RI_ALL_ONLINE },
+ { 0x2b1040, 1, RI_E1HE2_ONLINE }, { 0x2b1400, 1, RI_ALL_ONLINE },
+ { 0x2b1440, 1, RI_E1HE2_ONLINE }, { 0x2b1480, 1, RI_E1HE2_ONLINE },
+ { 0x2b14c0, 1, RI_E1HE2_ONLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE },
+ { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_ONLINE },
+ { 0x2b2400, 1, RI_E1HE2_ONLINE }, { 0x2b2404, 5631, RI_E2_OFFLINE },
+ { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
+ { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x2b80c0, 1, RI_ALL_ONLINE },
+ { 0x2b8100, 1, RI_ALL_ONLINE }, { 0x2b8140, 1, RI_ALL_ONLINE },
+ { 0x2b8180, 1, RI_ALL_ONLINE }, { 0x2b81c0, 1, RI_ALL_ONLINE },
+ { 0x2b8200, 1, RI_ALL_ONLINE }, { 0x2b8240, 1, RI_ALL_ONLINE },
+ { 0x2b8280, 1, RI_ALL_ONLINE }, { 0x2b82c0, 1, RI_ALL_ONLINE },
+ { 0x2b8300, 1, RI_ALL_ONLINE }, { 0x2b8340, 1, RI_ALL_ONLINE },
+ { 0x2b8380, 1, RI_ALL_ONLINE }, { 0x2b83c0, 1, RI_ALL_ONLINE },
+ { 0x2b8400, 1, RI_ALL_ONLINE }, { 0x2b8440, 1, RI_ALL_ONLINE },
+ { 0x2b8480, 1, RI_ALL_ONLINE }, { 0x2b84c0, 1, RI_ALL_ONLINE },
+ { 0x2b8500, 1, RI_ALL_ONLINE }, { 0x2b8540, 1, RI_ALL_ONLINE },
+ { 0x2b8580, 1, RI_ALL_ONLINE }, { 0x2b85c0, 19, RI_E2_ONLINE },
+ { 0x2b8800, 1, RI_ALL_ONLINE }, { 0x2b8840, 1, RI_ALL_ONLINE },
+ { 0x2b8880, 1, RI_ALL_ONLINE }, { 0x2b88c0, 1, RI_ALL_ONLINE },
+ { 0x2b8900, 1, RI_ALL_ONLINE }, { 0x2b8940, 1, RI_ALL_ONLINE },
+ { 0x2b8980, 1, RI_ALL_ONLINE }, { 0x2b89c0, 1, RI_ALL_ONLINE },
+ { 0x2b8a00, 1, RI_ALL_ONLINE }, { 0x2b8a40, 1, RI_ALL_ONLINE },
+ { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2b8ac0, 1, RI_ALL_ONLINE },
+ { 0x2b8b00, 1, RI_ALL_ONLINE }, { 0x2b8b40, 1, RI_ALL_ONLINE },
+ { 0x2b8b80, 1, RI_ALL_ONLINE }, { 0x2b8bc0, 1, RI_ALL_ONLINE },
+ { 0x2b8c00, 1, RI_ALL_ONLINE }, { 0x2b8c40, 1, RI_ALL_ONLINE },
+ { 0x2b8c80, 1, RI_ALL_ONLINE }, { 0x2b8cc0, 1, RI_ALL_ONLINE },
+ { 0x2b8cc4, 1, RI_E2_ONLINE }, { 0x2b8d00, 1, RI_ALL_ONLINE },
+ { 0x2b8d40, 1, RI_ALL_ONLINE }, { 0x2b8d80, 1, RI_ALL_ONLINE },
+ { 0x2b8dc0, 1, RI_ALL_ONLINE }, { 0x2b8e00, 1, RI_ALL_ONLINE },
+ { 0x2b8e40, 1, RI_ALL_ONLINE }, { 0x2b8e80, 1, RI_ALL_ONLINE },
+ { 0x2b8e84, 1, RI_E2_ONLINE }, { 0x2b8ec0, 1, RI_E1HE2_ONLINE },
+ { 0x2b8f00, 1, RI_E1HE2_ONLINE }, { 0x2b8f40, 1, RI_E1HE2_ONLINE },
+ { 0x2b8f80, 1, RI_E1HE2_ONLINE }, { 0x2b8fc0, 1, RI_E1HE2_ONLINE },
+ { 0x2b8fc4, 2, RI_E2_ONLINE }, { 0x2b8fd0, 6, RI_E2_ONLINE },
+ { 0x2b9000, 1, RI_E2_ONLINE }, { 0x2b9040, 3, RI_E2_ONLINE },
+ { 0x2b9400, 14, RI_E2_ONLINE }, { 0x2b943c, 19, RI_E2_ONLINE },
+ { 0x2b9490, 10, RI_E2_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE },
+ { 0x300000, 65, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1HE2_ONLINE },
{ 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE },
- { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_ALL_OFFLINE },
+ { 0x300380, 1, RI_E2_ONLINE }, { 0x300388, 1, RI_E2_ONLINE },
+ { 0x300390, 1, RI_E2_ONLINE }, { 0x300398, 1, RI_E2_ONLINE },
+ { 0x3003a0, 1, RI_E2_ONLINE }, { 0x3003a8, 2, RI_E2_ONLINE },
+ { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_E1E1H_OFFLINE },
{ 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE },
- { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 1023, RI_ALL_OFFLINE },
- { 0x321000, 1, RI_ALL_ONLINE }, { 0x321004, 4607, RI_ALL_OFFLINE },
- { 0x325800, 2560, RI_E1H_OFFLINE }, { 0x328000, 64, RI_ALL_OFFLINE },
- { 0x328100, 536, RI_E1H_OFFLINE }, { 0x328960, 1, RI_E1H_ONLINE },
- { 0x328964, 8103, RI_E1H_OFFLINE }, { 0x331800, 128, RI_ALL_OFFLINE },
- { 0x331c00, 128, RI_ALL_OFFLINE }, { 0x332000, 1, RI_ALL_OFFLINE },
- { 0x332400, 64, RI_E1H_OFFLINE }, { 0x338200, 1, RI_ALL_ONLINE },
+ { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 5631, RI_ALL_OFFLINE },
+ { 0x325800, 2560, RI_E1HE2_OFFLINE }, { 0x328000, 1, RI_ALL_ONLINE },
+ { 0x328004, 8191, RI_E1HE2_OFFLINE }, { 0x330000, 1, RI_ALL_ONLINE },
+ { 0x330004, 15, RI_E1H_OFFLINE }, { 0x330040, 1, RI_E1HE2_ONLINE },
+ { 0x330044, 239, RI_E1H_OFFLINE }, { 0x330400, 1, RI_ALL_ONLINE },
+ { 0x330404, 255, RI_E1H_OFFLINE }, { 0x330800, 1, RI_ALL_ONLINE },
+ { 0x330840, 1, RI_E1HE2_ONLINE }, { 0x330c00, 1, RI_ALL_ONLINE },
+ { 0x331000, 1, RI_ALL_ONLINE }, { 0x331040, 1, RI_E1HE2_ONLINE },
+ { 0x331400, 1, RI_ALL_ONLINE }, { 0x331440, 1, RI_E1HE2_ONLINE },
+ { 0x331480, 1, RI_E1HE2_ONLINE }, { 0x3314c0, 1, RI_E1HE2_ONLINE },
+ { 0x331800, 128, RI_ALL_OFFLINE }, { 0x331c00, 128, RI_ALL_OFFLINE },
+ { 0x332000, 1, RI_ALL_ONLINE }, { 0x332400, 1, RI_E1HE2_ONLINE },
+ { 0x332404, 5631, RI_E2_OFFLINE }, { 0x338000, 1, RI_ALL_ONLINE },
+ { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
+ { 0x3380c0, 1, RI_ALL_ONLINE }, { 0x338100, 1, RI_ALL_ONLINE },
+ { 0x338140, 1, RI_ALL_ONLINE }, { 0x338180, 1, RI_ALL_ONLINE },
+ { 0x3381c0, 1, RI_ALL_ONLINE }, { 0x338200, 1, RI_ALL_ONLINE },
{ 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE },
- { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE },
- { 0x338a80, 1, RI_ALL_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE }
+ { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338300, 1, RI_ALL_ONLINE },
+ { 0x338340, 1, RI_ALL_ONLINE }, { 0x338380, 1, RI_ALL_ONLINE },
+ { 0x3383c0, 1, RI_ALL_ONLINE }, { 0x338400, 1, RI_ALL_ONLINE },
+ { 0x338440, 1, RI_ALL_ONLINE }, { 0x338480, 1, RI_ALL_ONLINE },
+ { 0x3384c0, 1, RI_ALL_ONLINE }, { 0x338500, 1, RI_ALL_ONLINE },
+ { 0x338540, 1, RI_ALL_ONLINE }, { 0x338580, 1, RI_ALL_ONLINE },
+ { 0x3385c0, 19, RI_E2_ONLINE }, { 0x338800, 1, RI_ALL_ONLINE },
+ { 0x338840, 1, RI_ALL_ONLINE }, { 0x338880, 1, RI_ALL_ONLINE },
+ { 0x3388c0, 1, RI_ALL_ONLINE }, { 0x338900, 1, RI_ALL_ONLINE },
+ { 0x338940, 1, RI_ALL_ONLINE }, { 0x338980, 1, RI_ALL_ONLINE },
+ { 0x3389c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE },
+ { 0x338a40, 1, RI_ALL_ONLINE }, { 0x338a80, 1, RI_ALL_ONLINE },
+ { 0x338ac0, 1, RI_ALL_ONLINE }, { 0x338b00, 1, RI_ALL_ONLINE },
+ { 0x338b40, 1, RI_ALL_ONLINE }, { 0x338b80, 1, RI_ALL_ONLINE },
+ { 0x338bc0, 1, RI_ALL_ONLINE }, { 0x338c00, 1, RI_ALL_ONLINE },
+ { 0x338c40, 1, RI_ALL_ONLINE }, { 0x338c80, 1, RI_ALL_ONLINE },
+ { 0x338cc0, 1, RI_ALL_ONLINE }, { 0x338cc4, 1, RI_E2_ONLINE },
+ { 0x338d00, 1, RI_ALL_ONLINE }, { 0x338d40, 1, RI_ALL_ONLINE },
+ { 0x338d80, 1, RI_ALL_ONLINE }, { 0x338dc0, 1, RI_ALL_ONLINE },
+ { 0x338e00, 1, RI_ALL_ONLINE }, { 0x338e40, 1, RI_ALL_ONLINE },
+ { 0x338e80, 1, RI_ALL_ONLINE }, { 0x338e84, 1, RI_E2_ONLINE },
+ { 0x338ec0, 1, RI_E1HE2_ONLINE }, { 0x338f00, 1, RI_E1HE2_ONLINE },
+ { 0x338f40, 1, RI_E1HE2_ONLINE }, { 0x338f80, 1, RI_E1HE2_ONLINE },
+ { 0x338fc0, 1, RI_E1HE2_ONLINE }, { 0x338fc4, 2, RI_E2_ONLINE },
+ { 0x338fd0, 6, RI_E2_ONLINE }, { 0x339000, 1, RI_E2_ONLINE },
+ { 0x339040, 3, RI_E2_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE },
};
-
-#define IDLE_REGS_COUNT 277
+#define IDLE_REGS_COUNT 237
static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = {
- { 0x2114, 1, RI_ALL_ONLINE }, { 0x2120, 1, RI_ALL_ONLINE },
- { 0x212c, 4, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE },
- { 0x281c, 2, RI_ALL_ONLINE }, { 0xa38c, 1, RI_ALL_ONLINE },
+ { 0x2104, 1, RI_ALL_ONLINE }, { 0x2110, 2, RI_ALL_ONLINE },
+ { 0x211c, 8, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE },
+ { 0x281c, 2, RI_ALL_ONLINE }, { 0x2854, 1, RI_ALL_ONLINE },
+ { 0x285c, 1, RI_ALL_ONLINE }, { 0x9010, 7, RI_E2_ONLINE },
+ { 0x9030, 1, RI_E2_ONLINE }, { 0x9068, 16, RI_E2_ONLINE },
+ { 0x9230, 2, RI_E2_ONLINE }, { 0x9244, 1, RI_E2_ONLINE },
+ { 0x9298, 1, RI_E2_ONLINE }, { 0x92a8, 1, RI_E2_ONLINE },
+ { 0xa38c, 1, RI_ALL_ONLINE }, { 0xa3c4, 1, RI_E1HE2_ONLINE },
{ 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE },
- { 0xa600, 5, RI_E1H_ONLINE }, { 0xa618, 1, RI_E1H_ONLINE },
- { 0xc09c, 1, RI_ALL_ONLINE }, { 0x103b0, 1, RI_ALL_ONLINE },
- { 0x103c0, 1, RI_ALL_ONLINE }, { 0x103d0, 1, RI_E1H_ONLINE },
- { 0x2021c, 11, RI_ALL_ONLINE }, { 0x202a8, 1, RI_ALL_ONLINE },
- { 0x202b8, 1, RI_ALL_ONLINE }, { 0x20404, 1, RI_ALL_ONLINE },
- { 0x2040c, 2, RI_ALL_ONLINE }, { 0x2041c, 2, RI_ALL_ONLINE },
- { 0x40154, 14, RI_ALL_ONLINE }, { 0x40198, 1, RI_ALL_ONLINE },
- { 0x404ac, 1, RI_ALL_ONLINE }, { 0x404bc, 1, RI_ALL_ONLINE },
- { 0x42290, 1, RI_ALL_ONLINE }, { 0x422a0, 1, RI_ALL_ONLINE },
- { 0x422b0, 1, RI_ALL_ONLINE }, { 0x42548, 1, RI_ALL_ONLINE },
- { 0x42550, 1, RI_ALL_ONLINE }, { 0x42558, 1, RI_ALL_ONLINE },
- { 0x50160, 8, RI_ALL_ONLINE }, { 0x501d0, 1, RI_ALL_ONLINE },
- { 0x501e0, 1, RI_ALL_ONLINE }, { 0x50204, 1, RI_ALL_ONLINE },
- { 0x5020c, 2, RI_ALL_ONLINE }, { 0x5021c, 1, RI_ALL_ONLINE },
- { 0x60090, 1, RI_ALL_ONLINE }, { 0x6011c, 1, RI_ALL_ONLINE },
- { 0x6012c, 1, RI_ALL_ONLINE }, { 0xc101c, 1, RI_ALL_ONLINE },
- { 0xc102c, 1, RI_ALL_ONLINE }, { 0xc2290, 1, RI_ALL_ONLINE },
- { 0xc22a0, 1, RI_ALL_ONLINE }, { 0xc22b0, 1, RI_ALL_ONLINE },
- { 0xc2548, 1, RI_ALL_ONLINE }, { 0xc2550, 1, RI_ALL_ONLINE },
- { 0xc2558, 1, RI_ALL_ONLINE }, { 0xc4294, 1, RI_ALL_ONLINE },
- { 0xc42a4, 1, RI_ALL_ONLINE }, { 0xc42b4, 1, RI_ALL_ONLINE },
- { 0xc4550, 1, RI_ALL_ONLINE }, { 0xc4558, 1, RI_ALL_ONLINE },
- { 0xc4560, 1, RI_ALL_ONLINE }, { 0xd016c, 8, RI_ALL_ONLINE },
- { 0xd01d8, 1, RI_ALL_ONLINE }, { 0xd01e8, 1, RI_ALL_ONLINE },
- { 0xd0204, 1, RI_ALL_ONLINE }, { 0xd020c, 3, RI_ALL_ONLINE },
- { 0xe0154, 8, RI_ALL_ONLINE }, { 0xe01c8, 1, RI_ALL_ONLINE },
- { 0xe01d8, 1, RI_ALL_ONLINE }, { 0xe0204, 1, RI_ALL_ONLINE },
- { 0xe020c, 2, RI_ALL_ONLINE }, { 0xe021c, 2, RI_ALL_ONLINE },
- { 0x101014, 1, RI_ALL_ONLINE }, { 0x101030, 1, RI_ALL_ONLINE },
- { 0x101040, 1, RI_ALL_ONLINE }, { 0x102058, 1, RI_ALL_ONLINE },
- { 0x102080, 16, RI_ALL_ONLINE }, { 0x103004, 2, RI_ALL_ONLINE },
- { 0x103068, 1, RI_ALL_ONLINE }, { 0x103078, 1, RI_ALL_ONLINE },
- { 0x103088, 1, RI_ALL_ONLINE }, { 0x10309c, 2, RI_E1H_ONLINE },
+ { 0xa600, 5, RI_E1HE2_ONLINE }, { 0xa618, 1, RI_E1HE2_ONLINE },
+ { 0xa714, 1, RI_E2_ONLINE }, { 0xa720, 1, RI_E2_ONLINE },
+ { 0xa750, 1, RI_E2_ONLINE }, { 0xc09c, 1, RI_E1E1H_ONLINE },
+ { 0x103b0, 1, RI_ALL_ONLINE }, { 0x103c0, 1, RI_ALL_ONLINE },
+ { 0x103d0, 1, RI_E1H_ONLINE }, { 0x183bc, 1, RI_E2_ONLINE },
+ { 0x183cc, 1, RI_E2_ONLINE }, { 0x2021c, 11, RI_ALL_ONLINE },
+ { 0x202a8, 1, RI_ALL_ONLINE }, { 0x202b8, 1, RI_ALL_ONLINE },
+ { 0x20404, 1, RI_ALL_ONLINE }, { 0x2040c, 2, RI_ALL_ONLINE },
+ { 0x2041c, 2, RI_ALL_ONLINE }, { 0x40154, 14, RI_ALL_ONLINE },
+ { 0x40198, 1, RI_ALL_ONLINE }, { 0x404ac, 1, RI_ALL_ONLINE },
+ { 0x404bc, 1, RI_ALL_ONLINE }, { 0x42290, 1, RI_ALL_ONLINE },
+ { 0x422a0, 1, RI_ALL_ONLINE }, { 0x422b0, 1, RI_ALL_ONLINE },
+ { 0x42548, 1, RI_ALL_ONLINE }, { 0x42550, 1, RI_ALL_ONLINE },
+ { 0x42558, 1, RI_ALL_ONLINE }, { 0x50160, 8, RI_ALL_ONLINE },
+ { 0x501d0, 1, RI_ALL_ONLINE }, { 0x501e0, 1, RI_ALL_ONLINE },
+ { 0x50204, 1, RI_ALL_ONLINE }, { 0x5020c, 2, RI_ALL_ONLINE },
+ { 0x5021c, 1, RI_ALL_ONLINE }, { 0x60090, 1, RI_ALL_ONLINE },
+ { 0x6011c, 1, RI_ALL_ONLINE }, { 0x6012c, 1, RI_ALL_ONLINE },
+ { 0xc101c, 1, RI_ALL_ONLINE }, { 0xc102c, 1, RI_ALL_ONLINE },
+ { 0xc2290, 1, RI_ALL_ONLINE }, { 0xc22a0, 1, RI_ALL_ONLINE },
+ { 0xc22b0, 1, RI_ALL_ONLINE }, { 0xc2548, 1, RI_ALL_ONLINE },
+ { 0xc2550, 1, RI_ALL_ONLINE }, { 0xc2558, 1, RI_ALL_ONLINE },
+ { 0xc4294, 1, RI_ALL_ONLINE }, { 0xc42a4, 1, RI_ALL_ONLINE },
+ { 0xc42b4, 1, RI_ALL_ONLINE }, { 0xc4550, 1, RI_ALL_ONLINE },
+ { 0xc4558, 1, RI_ALL_ONLINE }, { 0xc4560, 1, RI_ALL_ONLINE },
+ { 0xd016c, 8, RI_ALL_ONLINE }, { 0xd01d8, 1, RI_ALL_ONLINE },
+ { 0xd01e8, 1, RI_ALL_ONLINE }, { 0xd0204, 1, RI_ALL_ONLINE },
+ { 0xd020c, 3, RI_ALL_ONLINE }, { 0xe0154, 8, RI_ALL_ONLINE },
+ { 0xe01c8, 1, RI_ALL_ONLINE }, { 0xe01d8, 1, RI_ALL_ONLINE },
+ { 0xe0204, 1, RI_ALL_ONLINE }, { 0xe020c, 2, RI_ALL_ONLINE },
+ { 0xe021c, 2, RI_ALL_ONLINE }, { 0x101014, 1, RI_ALL_ONLINE },
+ { 0x101030, 1, RI_ALL_ONLINE }, { 0x101040, 1, RI_ALL_ONLINE },
+ { 0x102058, 1, RI_ALL_ONLINE }, { 0x102080, 16, RI_ALL_ONLINE },
+ { 0x103004, 2, RI_ALL_ONLINE }, { 0x103068, 1, RI_ALL_ONLINE },
+ { 0x103078, 1, RI_ALL_ONLINE }, { 0x103088, 1, RI_ALL_ONLINE },
+ { 0x10309c, 2, RI_E1HE2_ONLINE }, { 0x1030b8, 2, RI_E2_ONLINE },
+ { 0x1030cc, 1, RI_E2_ONLINE }, { 0x1030e0, 1, RI_E2_ONLINE },
{ 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE },
{ 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE },
{ 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE },
{ 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE },
- { 0x105000, 3, RI_ALL_ONLINE }, { 0x105010, 3, RI_ALL_ONLINE },
- { 0x105020, 3, RI_ALL_ONLINE }, { 0x105030, 3, RI_ALL_ONLINE },
- { 0x105040, 3, RI_ALL_ONLINE }, { 0x105050, 3, RI_ALL_ONLINE },
- { 0x105060, 3, RI_ALL_ONLINE }, { 0x105070, 3, RI_ALL_ONLINE },
- { 0x105080, 3, RI_ALL_ONLINE }, { 0x105090, 3, RI_ALL_ONLINE },
- { 0x1050a0, 3, RI_ALL_ONLINE }, { 0x1050b0, 3, RI_ALL_ONLINE },
- { 0x1050c0, 3, RI_ALL_ONLINE }, { 0x1050d0, 3, RI_ALL_ONLINE },
- { 0x1050e0, 3, RI_ALL_ONLINE }, { 0x1050f0, 3, RI_ALL_ONLINE },
- { 0x105100, 3, RI_ALL_ONLINE }, { 0x105110, 3, RI_ALL_ONLINE },
- { 0x105120, 3, RI_ALL_ONLINE }, { 0x105130, 3, RI_ALL_ONLINE },
- { 0x105140, 3, RI_ALL_ONLINE }, { 0x105150, 3, RI_ALL_ONLINE },
- { 0x105160, 3, RI_ALL_ONLINE }, { 0x105170, 3, RI_ALL_ONLINE },
- { 0x105180, 3, RI_ALL_ONLINE }, { 0x105190, 3, RI_ALL_ONLINE },
- { 0x1051a0, 3, RI_ALL_ONLINE }, { 0x1051b0, 3, RI_ALL_ONLINE },
- { 0x1051c0, 3, RI_ALL_ONLINE }, { 0x1051d0, 3, RI_ALL_ONLINE },
- { 0x1051e0, 3, RI_ALL_ONLINE }, { 0x1051f0, 3, RI_ALL_ONLINE },
- { 0x105200, 3, RI_ALL_ONLINE }, { 0x105210, 3, RI_ALL_ONLINE },
- { 0x105220, 3, RI_ALL_ONLINE }, { 0x105230, 3, RI_ALL_ONLINE },
- { 0x105240, 3, RI_ALL_ONLINE }, { 0x105250, 3, RI_ALL_ONLINE },
- { 0x105260, 3, RI_ALL_ONLINE }, { 0x105270, 3, RI_ALL_ONLINE },
- { 0x105280, 3, RI_ALL_ONLINE }, { 0x105290, 3, RI_ALL_ONLINE },
- { 0x1052a0, 3, RI_ALL_ONLINE }, { 0x1052b0, 3, RI_ALL_ONLINE },
- { 0x1052c0, 3, RI_ALL_ONLINE }, { 0x1052d0, 3, RI_ALL_ONLINE },
- { 0x1052e0, 3, RI_ALL_ONLINE }, { 0x1052f0, 3, RI_ALL_ONLINE },
- { 0x105300, 3, RI_ALL_ONLINE }, { 0x105310, 3, RI_ALL_ONLINE },
- { 0x105320, 3, RI_ALL_ONLINE }, { 0x105330, 3, RI_ALL_ONLINE },
- { 0x105340, 3, RI_ALL_ONLINE }, { 0x105350, 3, RI_ALL_ONLINE },
- { 0x105360, 3, RI_ALL_ONLINE }, { 0x105370, 3, RI_ALL_ONLINE },
- { 0x105380, 3, RI_ALL_ONLINE }, { 0x105390, 3, RI_ALL_ONLINE },
- { 0x1053a0, 3, RI_ALL_ONLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
- { 0x1053c0, 3, RI_ALL_ONLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
- { 0x1053e0, 3, RI_ALL_ONLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
- { 0x108094, 1, RI_ALL_ONLINE }, { 0x1201b0, 2, RI_ALL_ONLINE },
- { 0x12032c, 1, RI_ALL_ONLINE }, { 0x12036c, 3, RI_ALL_ONLINE },
- { 0x120408, 2, RI_ALL_ONLINE }, { 0x120414, 15, RI_ALL_ONLINE },
- { 0x120478, 2, RI_ALL_ONLINE }, { 0x12052c, 1, RI_ALL_ONLINE },
- { 0x120564, 3, RI_ALL_ONLINE }, { 0x12057c, 1, RI_ALL_ONLINE },
- { 0x12058c, 1, RI_ALL_ONLINE }, { 0x120608, 1, RI_E1H_ONLINE },
- { 0x120808, 1, RI_E1_ONLINE }, { 0x12080c, 2, RI_ALL_ONLINE },
+ { 0x105000, 256, RI_ALL_ONLINE }, { 0x108094, 1, RI_E1E1H_ONLINE },
+ { 0x1201b0, 2, RI_ALL_ONLINE }, { 0x12032c, 1, RI_ALL_ONLINE },
+ { 0x12036c, 3, RI_ALL_ONLINE }, { 0x120408, 2, RI_ALL_ONLINE },
+ { 0x120414, 15, RI_ALL_ONLINE }, { 0x120478, 2, RI_ALL_ONLINE },
+ { 0x12052c, 1, RI_ALL_ONLINE }, { 0x120564, 3, RI_ALL_ONLINE },
+ { 0x12057c, 1, RI_ALL_ONLINE }, { 0x12058c, 1, RI_ALL_ONLINE },
+ { 0x120608, 1, RI_E1HE2_ONLINE }, { 0x120738, 1, RI_E2_ONLINE },
+ { 0x120778, 2, RI_E2_ONLINE }, { 0x120808, 3, RI_ALL_ONLINE },
{ 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE },
{ 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE },
{ 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE },
@@ -462,48 +580,50 @@ static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = {
{ 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE },
{ 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE },
{ 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE },
- { 0x120908, 1, RI_ALL_ONLINE }, { 0x14005c, 2, RI_ALL_ONLINE },
- { 0x1400d0, 2, RI_ALL_ONLINE }, { 0x1400e0, 1, RI_ALL_ONLINE },
- { 0x1401c8, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE },
- { 0x16101c, 1, RI_ALL_ONLINE }, { 0x16102c, 1, RI_ALL_ONLINE },
- { 0x164014, 2, RI_ALL_ONLINE }, { 0x1640f0, 1, RI_ALL_ONLINE },
- { 0x166290, 1, RI_ALL_ONLINE }, { 0x1662a0, 1, RI_ALL_ONLINE },
- { 0x1662b0, 1, RI_ALL_ONLINE }, { 0x166548, 1, RI_ALL_ONLINE },
- { 0x166550, 1, RI_ALL_ONLINE }, { 0x166558, 1, RI_ALL_ONLINE },
- { 0x168000, 1, RI_ALL_ONLINE }, { 0x168008, 1, RI_ALL_ONLINE },
- { 0x168010, 1, RI_ALL_ONLINE }, { 0x168018, 1, RI_ALL_ONLINE },
- { 0x168028, 2, RI_ALL_ONLINE }, { 0x168058, 4, RI_ALL_ONLINE },
- { 0x168070, 1, RI_ALL_ONLINE }, { 0x168238, 1, RI_ALL_ONLINE },
- { 0x1682d0, 2, RI_ALL_ONLINE }, { 0x1682e0, 1, RI_ALL_ONLINE },
- { 0x168300, 67, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE },
+ { 0x120908, 1, RI_ALL_ONLINE }, { 0x120940, 5, RI_E2_ONLINE },
+ { 0x130030, 1, RI_E2_ONLINE }, { 0x13004c, 3, RI_E2_ONLINE },
+ { 0x130064, 2, RI_E2_ONLINE }, { 0x13009c, 1, RI_E2_ONLINE },
+ { 0x130130, 1, RI_E2_ONLINE }, { 0x13016c, 1, RI_E2_ONLINE },
+ { 0x130300, 1, RI_E2_ONLINE }, { 0x130480, 1, RI_E2_ONLINE },
+ { 0x14005c, 2, RI_ALL_ONLINE }, { 0x1400d0, 2, RI_ALL_ONLINE },
+ { 0x1400e0, 1, RI_ALL_ONLINE }, { 0x1401c8, 1, RI_ALL_ONLINE },
+ { 0x140200, 6, RI_ALL_ONLINE }, { 0x16101c, 1, RI_ALL_ONLINE },
+ { 0x16102c, 1, RI_ALL_ONLINE }, { 0x164014, 2, RI_ALL_ONLINE },
+ { 0x1640f0, 1, RI_ALL_ONLINE }, { 0x166290, 1, RI_ALL_ONLINE },
+ { 0x1662a0, 1, RI_ALL_ONLINE }, { 0x1662b0, 1, RI_ALL_ONLINE },
+ { 0x166548, 1, RI_ALL_ONLINE }, { 0x166550, 1, RI_ALL_ONLINE },
+ { 0x166558, 1, RI_ALL_ONLINE }, { 0x168000, 1, RI_ALL_ONLINE },
+ { 0x168008, 1, RI_ALL_ONLINE }, { 0x168010, 1, RI_ALL_ONLINE },
+ { 0x168018, 1, RI_ALL_ONLINE }, { 0x168028, 2, RI_ALL_ONLINE },
+ { 0x168058, 4, RI_ALL_ONLINE }, { 0x168070, 1, RI_ALL_ONLINE },
+ { 0x168238, 1, RI_ALL_ONLINE }, { 0x1682d0, 2, RI_ALL_ONLINE },
+ { 0x1682e0, 1, RI_ALL_ONLINE }, { 0x168300, 2, RI_E1E1H_ONLINE },
+ { 0x168308, 65, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE },
{ 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE },
{ 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE },
- { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 70, RI_E1H_ONLINE },
- { 0x1700a4, 1, RI_ALL_ONLINE }, { 0x1700ac, 2, RI_ALL_ONLINE },
- { 0x1700c0, 1, RI_ALL_ONLINE }, { 0x170174, 1, RI_ALL_ONLINE },
- { 0x170184, 1, RI_ALL_ONLINE }, { 0x1800f4, 1, RI_ALL_ONLINE },
- { 0x180104, 1, RI_ALL_ONLINE }, { 0x180114, 1, RI_ALL_ONLINE },
- { 0x180124, 1, RI_ALL_ONLINE }, { 0x18026c, 1, RI_ALL_ONLINE },
- { 0x1802a0, 1, RI_ALL_ONLINE }, { 0x1a1000, 1, RI_ALL_ONLINE },
- { 0x1aa000, 1, RI_E1H_ONLINE }, { 0x1b8000, 1, RI_ALL_ONLINE },
- { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE },
- { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x200104, 1, RI_ALL_ONLINE },
- { 0x200114, 1, RI_ALL_ONLINE }, { 0x200124, 1, RI_ALL_ONLINE },
- { 0x200134, 1, RI_ALL_ONLINE }, { 0x20026c, 1, RI_ALL_ONLINE },
- { 0x2002a0, 1, RI_ALL_ONLINE }, { 0x221000, 1, RI_ALL_ONLINE },
- { 0x227000, 1, RI_E1H_ONLINE }, { 0x238000, 1, RI_ALL_ONLINE },
- { 0x238040, 1, RI_ALL_ONLINE }, { 0x238080, 1, RI_ALL_ONLINE },
- { 0x2380c0, 1, RI_ALL_ONLINE }, { 0x280104, 1, RI_ALL_ONLINE },
- { 0x280114, 1, RI_ALL_ONLINE }, { 0x280124, 1, RI_ALL_ONLINE },
- { 0x280134, 1, RI_ALL_ONLINE }, { 0x28026c, 1, RI_ALL_ONLINE },
- { 0x2802a0, 1, RI_ALL_ONLINE }, { 0x2a1000, 1, RI_ALL_ONLINE },
- { 0x2a9000, 1, RI_E1H_ONLINE }, { 0x2b8000, 1, RI_ALL_ONLINE },
- { 0x2b8040, 1, RI_ALL_ONLINE }, { 0x2b8080, 1, RI_ALL_ONLINE },
- { 0x2b80c0, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
+ { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 64, RI_E1H_ONLINE },
+ { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 4, RI_E1H_ONLINE },
+ { 0x16e6fc, 4, RI_E2_ONLINE }, { 0x1700a4, 1, RI_ALL_ONLINE },
+ { 0x1700ac, 2, RI_ALL_ONLINE }, { 0x1700c0, 1, RI_ALL_ONLINE },
+ { 0x170174, 1, RI_ALL_ONLINE }, { 0x170184, 1, RI_ALL_ONLINE },
+ { 0x1800f4, 1, RI_ALL_ONLINE }, { 0x180104, 1, RI_ALL_ONLINE },
+ { 0x180114, 1, RI_ALL_ONLINE }, { 0x180124, 1, RI_ALL_ONLINE },
+ { 0x18026c, 1, RI_ALL_ONLINE }, { 0x1802a0, 1, RI_ALL_ONLINE },
+ { 0x1b8000, 1, RI_ALL_ONLINE }, { 0x1b8040, 1, RI_ALL_ONLINE },
+ { 0x1b8080, 1, RI_ALL_ONLINE }, { 0x1b80c0, 1, RI_ALL_ONLINE },
+ { 0x200104, 1, RI_ALL_ONLINE }, { 0x200114, 1, RI_ALL_ONLINE },
+ { 0x200124, 1, RI_ALL_ONLINE }, { 0x200134, 1, RI_ALL_ONLINE },
+ { 0x20026c, 1, RI_ALL_ONLINE }, { 0x2002a0, 1, RI_ALL_ONLINE },
+ { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
+ { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
+ { 0x280104, 1, RI_ALL_ONLINE }, { 0x280114, 1, RI_ALL_ONLINE },
+ { 0x280124, 1, RI_ALL_ONLINE }, { 0x280134, 1, RI_ALL_ONLINE },
+ { 0x28026c, 1, RI_ALL_ONLINE }, { 0x2802a0, 1, RI_ALL_ONLINE },
+ { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
+ { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
{ 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE },
{ 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE },
- { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x321000, 1, RI_ALL_ONLINE },
- { 0x328960, 1, RI_E1H_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
+ { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
{ 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
{ 0x3380c0, 1, RI_ALL_ONLINE }
};
@@ -515,7 +635,6 @@ static const struct wreg_addr wreg_addrs_e1[WREGS_COUNT_E1] = {
{ 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE }
};
-
#define WREGS_COUNT_E1H 1
static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 };
@@ -530,22 +649,53 @@ static const struct wreg_addr wreg_addrs_e2[WREGS_COUNT_E2] = {
{ 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE }
};
-static const struct dump_sign dump_sign_all = { 0x49aa93ee, 0x40835, 0x22 };
-
+static const struct dump_sign dump_sign_all = { 0x4d18b0a4, 0x60010, 0x3a };
#define TIMER_REGS_COUNT_E1 2
-static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] =
- { 0x164014, 0x164018 };
-static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] =
- { 0x1640d0, 0x1640d4 };
+static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] = {
+ 0x164014, 0x164018 };
+static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = {
+ 0x1640d0, 0x1640d4 };
#define TIMER_REGS_COUNT_E1H 2
-static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] =
- { 0x164014, 0x164018 };
-static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] =
- { 0x1640d0, 0x1640d4 };
+static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] = {
+ 0x164014, 0x164018 };
+static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = {
+ 0x1640d0, 0x1640d4 };
+
+#define TIMER_REGS_COUNT_E2 2
+
+static const u32 timer_status_regs_e2[TIMER_REGS_COUNT_E2] = {
+ 0x164014, 0x164018 };
+static const u32 timer_scan_regs_e2[TIMER_REGS_COUNT_E2] = {
+ 0x1640d0, 0x1640d4 };
+
+#define PAGE_MODE_VALUES_E1 0
+
+#define PAGE_READ_REGS_E1 0
+
+#define PAGE_WRITE_REGS_E1 0
+
+static const u32 page_vals_e1[] = { 0 };
+
+static const u32 page_write_regs_e1[] = { 0 };
+
+static const struct reg_addr page_read_regs_e1[] = { { 0x0, 0, RI_E1_ONLINE } };
+
+#define PAGE_MODE_VALUES_E1H 0
+
+#define PAGE_READ_REGS_E1H 0
+
+#define PAGE_WRITE_REGS_E1H 0
+
+static const u32 page_vals_e1h[] = { 0 };
+
+static const u32 page_write_regs_e1h[] = { 0 };
+
+static const struct reg_addr page_read_regs_e1h[] = {
+ { 0x0, 0, RI_E1H_ONLINE } };
#define PAGE_MODE_VALUES_E2 2
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index d02ffbdc9f0e..f5050155c6b5 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -24,6 +24,144 @@
#include "bnx2x.h"
#include "bnx2x_cmn.h"
#include "bnx2x_dump.h"
+#include "bnx2x_init.h"
+
+/* Note: in the format strings below %s is replaced by the queue-name which is
+ * either its index or 'fcoe' for the fcoe queue. Make sure the format string
+ * length does not exceed ETH_GSTRING_LEN - MAX_QUEUE_NAME_LEN + 2
+ */
+#define MAX_QUEUE_NAME_LEN 4
+static const struct {
+ long offset;
+ int size;
+ char string[ETH_GSTRING_LEN];
+} bnx2x_q_stats_arr[] = {
+/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
+ { Q_STATS_OFFSET32(error_bytes_received_hi),
+ 8, "[%s]: rx_error_bytes" },
+ { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
+ 8, "[%s]: rx_ucast_packets" },
+ { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
+ 8, "[%s]: rx_mcast_packets" },
+ { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
+ 8, "[%s]: rx_bcast_packets" },
+ { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" },
+ { Q_STATS_OFFSET32(rx_err_discard_pkt),
+ 4, "[%s]: rx_phy_ip_err_discards"},
+ { Q_STATS_OFFSET32(rx_skb_alloc_failed),
+ 4, "[%s]: rx_skb_alloc_discard" },
+ { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
+
+/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
+ { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+ 8, "[%s]: tx_ucast_packets" },
+ { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+ 8, "[%s]: tx_mcast_packets" },
+ { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+ 8, "[%s]: tx_bcast_packets" }
+};
+
+#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
+
+static const struct {
+ long offset;
+ int size;
+ u32 flags;
+#define STATS_FLAGS_PORT 1
+#define STATS_FLAGS_FUNC 2
+#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
+ char string[ETH_GSTRING_LEN];
+} bnx2x_stats_arr[] = {
+/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
+ 8, STATS_FLAGS_BOTH, "rx_bytes" },
+ { STATS_OFFSET32(error_bytes_received_hi),
+ 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
+ { STATS_OFFSET32(total_unicast_packets_received_hi),
+ 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
+ { STATS_OFFSET32(total_multicast_packets_received_hi),
+ 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
+ { STATS_OFFSET32(total_broadcast_packets_received_hi),
+ 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
+ { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
+ 8, STATS_FLAGS_PORT, "rx_crc_errors" },
+ { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
+ 8, STATS_FLAGS_PORT, "rx_align_errors" },
+ { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
+ 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
+ { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
+ 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
+/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
+ 8, STATS_FLAGS_PORT, "rx_fragments" },
+ { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
+ 8, STATS_FLAGS_PORT, "rx_jabbers" },
+ { STATS_OFFSET32(no_buff_discard_hi),
+ 8, STATS_FLAGS_BOTH, "rx_discards" },
+ { STATS_OFFSET32(mac_filter_discard),
+ 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
+ { STATS_OFFSET32(xxoverflow_discard),
+ 4, STATS_FLAGS_PORT, "rx_fw_discards" },
+ { STATS_OFFSET32(brb_drop_hi),
+ 8, STATS_FLAGS_PORT, "rx_brb_discard" },
+ { STATS_OFFSET32(brb_truncate_hi),
+ 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
+ { STATS_OFFSET32(pause_frames_received_hi),
+ 8, STATS_FLAGS_PORT, "rx_pause_frames" },
+ { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
+ 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
+ { STATS_OFFSET32(nig_timer_max),
+ 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
+/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
+ 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
+ { STATS_OFFSET32(rx_skb_alloc_failed),
+ 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
+ { STATS_OFFSET32(hw_csum_err),
+ 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
+
+ { STATS_OFFSET32(total_bytes_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_bytes" },
+ { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
+ 8, STATS_FLAGS_PORT, "tx_error_bytes" },
+ { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
+ { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
+ { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+ 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
+ { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
+ 8, STATS_FLAGS_PORT, "tx_mac_errors" },
+ { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
+ 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
+/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
+ 8, STATS_FLAGS_PORT, "tx_single_collisions" },
+ { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
+ 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
+ { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
+ 8, STATS_FLAGS_PORT, "tx_deferred" },
+ { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
+ 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
+ { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
+ 8, STATS_FLAGS_PORT, "tx_late_collisions" },
+ { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
+ 8, STATS_FLAGS_PORT, "tx_total_collisions" },
+ { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
+ { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
+ { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
+ { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
+/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
+ { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
+ { STATS_OFFSET32(etherstatspktsover1522octets_hi),
+ 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
+ { STATS_OFFSET32(pause_frames_sent_hi),
+ 8, STATS_FLAGS_PORT, "tx_pause_frames" }
+};
+
+#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
@@ -45,14 +183,9 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->speed = bp->link_params.req_line_speed[cfg_idx];
cmd->duplex = bp->link_params.req_duplex[cfg_idx];
}
- if (IS_MF(bp)) {
- u16 vn_max_rate = ((bp->mf_config[BP_VN(bp)] &
- FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT) *
- 100;
- if (vn_max_rate < cmd->speed)
- cmd->speed = vn_max_rate;
- }
+ if (IS_MF(bp))
+ cmd->speed = bnx2x_get_mf_speed(bp);
if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
cmd->port = PORT_TP;
@@ -87,18 +220,56 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
+ u32 speed;
- if (IS_MF(bp))
+ if (IS_MF_SD(bp))
return 0;
DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
- DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
- DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
- DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
+ " supported 0x%x advertising 0x%x speed %d speed_hi %d\n"
+ " duplex %d port %d phy_address %d transceiver %d\n"
+ " autoneg %d maxtxpkt %d maxrxpkt %d\n",
cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
+ cmd->speed_hi,
cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+ speed = cmd->speed;
+ speed |= (cmd->speed_hi << 16);
+
+ if (IS_MF_SI(bp)) {
+ u32 part;
+ u32 line_speed = bp->link_vars.line_speed;
+
+ /* use 10G if no link detected */
+ if (!line_speed)
+ line_speed = 10000;
+
+ if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) {
+ BNX2X_DEV_INFO("To set speed BC %X or higher "
+ "is required, please upgrade BC\n",
+ REQ_BC_VER_4_SET_MF_BW);
+ return -EINVAL;
+ }
+
+ part = (speed * 100) / line_speed;
+
+ if (line_speed < speed || !part) {
+ BNX2X_DEV_INFO("Speed setting should be in a range "
+ "from 1%% to 100%% "
+ "of actual line speed\n");
+ return -EINVAL;
+ }
+
+ if (bp->state != BNX2X_STATE_OPEN)
+ /* store value for following "load" */
+ bp->pending_max = part;
+ else
+ bnx2x_update_max_mf_config(bp, part);
+
+ return 0;
+ }
+
cfg_idx = bnx2x_get_link_cfg_idx(bp);
old_multi_phy_config = bp->link_params.multi_phy_config;
switch (cmd->port) {
@@ -168,8 +339,6 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
} else { /* forced speed */
/* advertise the requested speed and duplex if supported */
- u32 speed = cmd->speed;
- speed |= (cmd->speed_hi << 16);
switch (speed) {
case SPEED_10:
if (cmd->duplex == DUPLEX_FULL) {
@@ -303,7 +472,7 @@ static int bnx2x_get_regs_len(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
int regdump_len = 0;
- int i;
+ int i, j, k;
if (CHIP_IS_E1(bp)) {
for (i = 0; i < REGS_COUNT; i++)
@@ -333,6 +502,15 @@ static int bnx2x_get_regs_len(struct net_device *dev)
if (IS_E2_ONLINE(wreg_addrs_e2[i].info))
regdump_len += wreg_addrs_e2[i].size *
(1 + wreg_addrs_e2[i].read_regs_count);
+
+ for (i = 0; i < PAGE_MODE_VALUES_E2; i++)
+ for (j = 0; j < PAGE_WRITE_REGS_E2; j++) {
+ for (k = 0; k < PAGE_READ_REGS_E2; k++)
+ if (IS_E2_ONLINE(page_read_regs_e2[k].
+ info))
+ regdump_len +=
+ page_read_regs_e2[k].size;
+ }
}
regdump_len *= 4;
regdump_len += sizeof(struct dump_hdr);
@@ -370,6 +548,12 @@ static void bnx2x_get_regs(struct net_device *dev,
if (!netif_running(bp->dev))
return;
+ /* Disable parity attentions as long as following dump may
+ * cause false alarms by reading never written registers. We
+ * will re-enable parity attentions right after the dump.
+ */
+ bnx2x_disable_blocks_parity(bp);
+
dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
dump_hdr.dump_sign = dump_sign_all;
dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
@@ -411,6 +595,10 @@ static void bnx2x_get_regs(struct net_device *dev,
bnx2x_read_pages_regs_e2(bp, p);
}
+ /* Re-enable parity attentions */
+ bnx2x_clear_blocks_parity(bp);
+ if (CHIP_PARITY_ENABLED(bp))
+ bnx2x_enable_blocks_parity(bp);
}
#define PHY_FW_VER_LEN 20
@@ -1286,7 +1474,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
save_val = REG_RD(bp, offset);
- REG_WR(bp, offset, (wr_val & mask));
+ REG_WR(bp, offset, wr_val & mask);
val = REG_RD(bp, offset);
@@ -1429,7 +1617,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
/* prepare the loopback packet */
pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
- skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+ skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
if (!skb) {
rc = -ENOMEM;
goto test_loopback_exit;
@@ -1499,8 +1687,15 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
* updates that have been performed while interrupts were
* disabled.
*/
- if (bp->common.int_block == INT_BLOCK_IGU)
+ if (bp->common.int_block == INT_BLOCK_IGU) {
+ /* Disable local BHes to prevent a dead-lock situation between
+ * sch_direct_xmit() and bnx2x_run_loopback() (calling
+ * bnx2x_tx_int()), as both are taking netif_tx_lock().
+ */
+ local_bh_disable();
bnx2x_tx_int(fp_tx);
+ local_bh_enable();
+ }
rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
if (rx_idx != rx_start_idx + num_pkts)
@@ -1585,9 +1780,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
{ 0x100, 0x350 }, /* manuf_info */
{ 0x450, 0xf0 }, /* feature_info */
{ 0x640, 0x64 }, /* upgrade_key_info */
- { 0x6a4, 0x64 },
{ 0x708, 0x70 }, /* manuf_key_info */
- { 0x778, 0x70 },
{ 0, 0 }
};
__be32 buf[0x350 / 4];
@@ -1650,7 +1843,7 @@ static int bnx2x_test_intr(struct bnx2x *bp)
config->hdr.client_id = bp->fp->cl_id;
config->hdr.reserved1 = 0;
- bp->set_mac_pending++;
+ bp->set_mac_pending = 1;
smp_wmb();
rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
U64_HI(bnx2x_sp_mapping(bp, mac_config)),
@@ -1737,145 +1930,17 @@ static void bnx2x_self_test(struct net_device *dev,
buf[4] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
- if (bp->port.pmf)
- if (bnx2x_link_test(bp, is_serdes) != 0) {
- buf[5] = 1;
- etest->flags |= ETH_TEST_FL_FAILED;
- }
+
+ if (bnx2x_link_test(bp, is_serdes) != 0) {
+ buf[5] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
#ifdef BNX2X_EXTRA_DEBUG
bnx2x_panic_dump(bp);
#endif
}
-static const struct {
- long offset;
- int size;
- u8 string[ETH_GSTRING_LEN];
-} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
-/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
- { Q_STATS_OFFSET32(error_bytes_received_hi),
- 8, "[%d]: rx_error_bytes" },
- { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
- 8, "[%d]: rx_ucast_packets" },
- { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
- 8, "[%d]: rx_mcast_packets" },
- { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
- 8, "[%d]: rx_bcast_packets" },
- { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
- { Q_STATS_OFFSET32(rx_err_discard_pkt),
- 4, "[%d]: rx_phy_ip_err_discards"},
- { Q_STATS_OFFSET32(rx_skb_alloc_failed),
- 4, "[%d]: rx_skb_alloc_discard" },
- { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
-
-/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
- { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
- 8, "[%d]: tx_ucast_packets" },
- { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
- 8, "[%d]: tx_mcast_packets" },
- { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
- 8, "[%d]: tx_bcast_packets" }
-};
-
-static const struct {
- long offset;
- int size;
- u32 flags;
-#define STATS_FLAGS_PORT 1
-#define STATS_FLAGS_FUNC 2
-#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
- u8 string[ETH_GSTRING_LEN];
-} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
-/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
- 8, STATS_FLAGS_BOTH, "rx_bytes" },
- { STATS_OFFSET32(error_bytes_received_hi),
- 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
- { STATS_OFFSET32(total_unicast_packets_received_hi),
- 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
- { STATS_OFFSET32(total_multicast_packets_received_hi),
- 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
- { STATS_OFFSET32(total_broadcast_packets_received_hi),
- 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
- { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
- 8, STATS_FLAGS_PORT, "rx_crc_errors" },
- { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
- 8, STATS_FLAGS_PORT, "rx_align_errors" },
- { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
- 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
- { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
- 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
-/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
- 8, STATS_FLAGS_PORT, "rx_fragments" },
- { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
- 8, STATS_FLAGS_PORT, "rx_jabbers" },
- { STATS_OFFSET32(no_buff_discard_hi),
- 8, STATS_FLAGS_BOTH, "rx_discards" },
- { STATS_OFFSET32(mac_filter_discard),
- 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
- { STATS_OFFSET32(xxoverflow_discard),
- 4, STATS_FLAGS_PORT, "rx_fw_discards" },
- { STATS_OFFSET32(brb_drop_hi),
- 8, STATS_FLAGS_PORT, "rx_brb_discard" },
- { STATS_OFFSET32(brb_truncate_hi),
- 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
- { STATS_OFFSET32(pause_frames_received_hi),
- 8, STATS_FLAGS_PORT, "rx_pause_frames" },
- { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
- 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
- { STATS_OFFSET32(nig_timer_max),
- 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
-/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
- 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
- { STATS_OFFSET32(rx_skb_alloc_failed),
- 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
- { STATS_OFFSET32(hw_csum_err),
- 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
-
- { STATS_OFFSET32(total_bytes_transmitted_hi),
- 8, STATS_FLAGS_BOTH, "tx_bytes" },
- { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
- 8, STATS_FLAGS_PORT, "tx_error_bytes" },
- { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
- 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
- { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
- 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
- { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
- 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
- { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
- 8, STATS_FLAGS_PORT, "tx_mac_errors" },
- { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
- 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
-/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
- 8, STATS_FLAGS_PORT, "tx_single_collisions" },
- { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
- 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
- { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
- 8, STATS_FLAGS_PORT, "tx_deferred" },
- { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
- 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
- { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
- 8, STATS_FLAGS_PORT, "tx_late_collisions" },
- { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
- 8, STATS_FLAGS_PORT, "tx_total_collisions" },
- { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
- 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
- { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
- 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
- { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
- 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
- { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
- 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
-/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
- 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
- { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
- 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
- { STATS_OFFSET32(etherstatspktsover1522octets_hi),
- 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
- { STATS_OFFSET32(pause_frames_sent_hi),
- 8, STATS_FLAGS_PORT, "tx_pause_frames" }
-};
-
#define IS_PORT_STAT(i) \
((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
@@ -1890,7 +1955,8 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
switch (stringset) {
case ETH_SS_STATS:
if (is_multi(bp)) {
- num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
+ num_stats = BNX2X_NUM_STAT_QUEUES(bp) *
+ BNX2X_NUM_Q_STATS;
if (!IS_MF_MODE_STAT(bp))
num_stats += BNX2X_NUM_STATS;
} else {
@@ -1916,15 +1982,25 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
int i, j, k;
+ char queue_name[MAX_QUEUE_NAME_LEN+1];
switch (stringset) {
case ETH_SS_STATS:
if (is_multi(bp)) {
k = 0;
- for_each_queue(bp, i) {
+ for_each_napi_queue(bp, i) {
+ memset(queue_name, 0, sizeof(queue_name));
+
+ if (IS_FCOE_IDX(i))
+ sprintf(queue_name, "fcoe");
+ else
+ sprintf(queue_name, "%d", i);
+
for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
- sprintf(buf + (k + j)*ETH_GSTRING_LEN,
- bnx2x_q_stats_arr[j].string, i);
+ snprintf(buf + (k + j)*ETH_GSTRING_LEN,
+ ETH_GSTRING_LEN,
+ bnx2x_q_stats_arr[j].string,
+ queue_name);
k += BNX2X_NUM_Q_STATS;
}
if (IS_MF_MODE_STAT(bp))
@@ -1958,7 +2034,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
if (is_multi(bp)) {
k = 0;
- for_each_queue(bp, i) {
+ for_each_napi_queue(bp, i) {
hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
if (bnx2x_q_stats_arr[j].size == 0) {
@@ -2055,6 +2131,59 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
return 0;
}
+static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ void *rules __always_unused)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = BNX2X_NUM_ETH_QUEUES(bp);
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bnx2x_get_rxfh_indir(struct net_device *dev,
+ struct ethtool_rxfh_indir *indir)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ size_t copy_size =
+ min_t(size_t, indir->size, TSTORM_INDIRECTION_TABLE_SIZE);
+
+ if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
+ return -EOPNOTSUPP;
+
+ indir->size = TSTORM_INDIRECTION_TABLE_SIZE;
+ memcpy(indir->ring_index, bp->rx_indir_table,
+ copy_size * sizeof(bp->rx_indir_table[0]));
+ return 0;
+}
+
+static int bnx2x_set_rxfh_indir(struct net_device *dev,
+ const struct ethtool_rxfh_indir *indir)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ size_t i;
+
+ if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
+ return -EOPNOTSUPP;
+
+ /* Validate size and indices */
+ if (indir->size != TSTORM_INDIRECTION_TABLE_SIZE)
+ return -EINVAL;
+ for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
+ if (indir->ring_index[i] >= BNX2X_NUM_ETH_QUEUES(bp))
+ return -EINVAL;
+
+ memcpy(bp->rx_indir_table, indir->ring_index,
+ indir->size * sizeof(bp->rx_indir_table[0]));
+ bnx2x_push_indir_table(bp);
+ return 0;
+}
+
static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_settings = bnx2x_get_settings,
.set_settings = bnx2x_set_settings,
@@ -2091,6 +2220,9 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_strings = bnx2x_get_strings,
.phys_id = bnx2x_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
+ .get_rxnfc = bnx2x_get_rxnfc,
+ .get_rxfh_indir = bnx2x_get_rxfh_indir,
+ .set_rxfh_indir = bnx2x_set_rxfh_indir,
};
void bnx2x_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 4cfd4e9b5586..be503cc0a50b 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -11,20 +11,27 @@
#include "bnx2x_fw_defs.h"
+#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
+
struct license_key {
u32 reserved[6];
-#if defined(__BIG_ENDIAN)
- u16 max_iscsi_init_conn;
- u16 max_iscsi_trgt_conn;
-#elif defined(__LITTLE_ENDIAN)
- u16 max_iscsi_trgt_conn;
- u16 max_iscsi_init_conn;
-#endif
+ u32 max_iscsi_conn;
+#define BNX2X_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF
+#define BNX2X_MAX_ISCSI_TRGT_CONN_SHIFT 0
+#define BNX2X_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000
+#define BNX2X_MAX_ISCSI_INIT_CONN_SHIFT 16
- u32 reserved_a[6];
-};
+ u32 reserved_a;
+
+ u32 max_fcoe_conn;
+#define BNX2X_MAX_FCOE_TRGT_CONN_MASK 0xFFFF
+#define BNX2X_MAX_FCOE_TRGT_CONN_SHIFT 0
+#define BNX2X_MAX_FCOE_INIT_CONN_MASK 0xFFFF0000
+#define BNX2X_MAX_FCOE_INIT_CONN_SHIFT 16
+ u32 reserved_b[4];
+};
#define PORT_0 0
#define PORT_1 1
@@ -237,8 +244,26 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16
- u32 Reserved0[16]; /* 0x158 */
-
+ u32 Reserved0[3]; /* 0x158 */
+ /* Controls the TX laser of the SFP+ module */
+ u32 sfp_ctrl; /* 0x164 */
+#define PORT_HW_CFG_TX_LASER_MASK 0x000000FF
+#define PORT_HW_CFG_TX_LASER_SHIFT 0
+#define PORT_HW_CFG_TX_LASER_MDIO 0x00000000
+#define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001
+#define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002
+#define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003
+#define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004
+
+ /* Controls the fault module LED of the SFP+ */
+#define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00
+#define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300
+#define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400
+ u32 Reserved01[12]; /* 0x158 */
/* for external PHY, or forced mode or during AN */
u16 xgxs_config_rx[4]; /* 0x198 */
@@ -246,12 +271,78 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
u32 Reserved1[56]; /* 0x1A8 */
u32 default_cfg; /* 0x288 */
+#define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003
+#define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0
+#define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000
+#define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001
+#define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002
+#define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003
+
+#define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C
+#define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2
+#define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000
+#define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004
+#define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008
+#define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c
+
+#define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030
+#define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4
+#define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000
+#define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010
+#define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020
+#define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030
+
+#define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0
+#define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6
+#define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000
+#define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040
+#define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080
+#define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0
+
+ /*
+ * When KR link is required to be set to force which is not
+ * KR-compliant, this parameter determine what is the trigger for it.
+ * When GPIO is selected, low input will force the speed. Currently
+ * default speed is 1G. In the future, it may be widen to select the
+ * forced speed in with another parameter. Note when force-1G is
+ * enabled, it override option 56: Link Speed option.
+ */
+#define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00
+#define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8
+#define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800
+#define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900
+ /* Enable to determine with which GPIO to reset the external phy */
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000
/* Enable BAM on KR */
#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
+ /* Enable Common Mode Sense */
+#define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000
+#define PORT_HW_CFG_ENABLE_CMS_SHIFT 21
+#define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000
+#define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000
+
u32 speed_capability_mask2; /* 0x28C */
#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
@@ -352,6 +443,10 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8
/* forced only */
#define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4
+ /* Indicate whether to swap the external phy polarity */
+#define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000
+#define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000
+#define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000
u32 external_phy_config;
#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000
@@ -377,6 +472,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00
+#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
@@ -434,7 +530,12 @@ struct shared_feat_cfg { /* NVRAM Offset */
#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED 0x00000000
#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED 0x00000002
-#define SHARED_FEATURE_MF_MODE_DISABLED 0x00000100
+#define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700
+#define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8
+#define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000
+#define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100
+#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
+#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
};
@@ -679,7 +780,7 @@ struct shm_dev_info { /* size */
#define E1VN_MAX 1
#define E1HVN_MAX 4
-
+#define E2_VF_MAX 64
/* This value (in milliseconds) determines the frequency of the driver
* issuing the PULSE message code. The firmware monitors this periodic
* pulse to determine when to switch to an OS-absent mode. */
@@ -815,6 +916,11 @@ struct drv_func_mb {
#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
+#define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
+#define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
+#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
+#define REQ_BC_VER_4_SET_MF_BW 0x00060202
+#define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
#define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000
@@ -888,6 +994,7 @@ struct drv_func_mb {
u32 drv_status;
#define DRV_STATUS_PMF 0x00000001
+#define DRV_STATUS_SET_MF_BW 0x00000004
#define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00
#define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100
@@ -896,6 +1003,8 @@ struct drv_func_mb {
#define DRV_STATUS_DCC_RESERVED1 0x00000800
#define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000
#define DRV_STATUS_DCC_SET_PRIORITY 0x00002000
+#define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000
+#define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000
u32 virt_mac_upper;
#define VIRT_MAC_SIGN_MASK 0xffff0000
@@ -988,12 +1097,43 @@ struct func_mf_cfg {
};
+/* This structure is not applicable and should not be accessed on 57711 */
+struct func_ext_cfg {
+ u32 func_cfg;
+#define MACP_FUNC_CFG_FLAGS_MASK 0x000000FF
+#define MACP_FUNC_CFG_FLAGS_SHIFT 0
+#define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001
+#define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002
+#define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004
+#define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008
+
+ u32 iscsi_mac_addr_upper;
+ u32 iscsi_mac_addr_lower;
+
+ u32 fcoe_mac_addr_upper;
+ u32 fcoe_mac_addr_lower;
+
+ u32 fcoe_wwn_port_name_upper;
+ u32 fcoe_wwn_port_name_lower;
+
+ u32 fcoe_wwn_node_name_upper;
+ u32 fcoe_wwn_node_name_lower;
+
+ u32 preserve_data;
+#define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0)
+#define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1)
+#define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2)
+#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3)
+#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4)
+};
+
struct mf_cfg {
struct shared_mf_cfg shared_mf_config;
struct port_mf_cfg port_mf_config[PORT_MAX];
struct func_mf_cfg func_mf_config[E1H_FUNC_MAX];
+ struct func_ext_cfg func_ext_config[E1H_FUNC_MAX];
};
@@ -1049,6 +1189,251 @@ struct fw_flr_mb {
struct fw_flr_ack ack;
};
+/**** SUPPORT FOR SHMEM ARRRAYS ***
+ * The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to
+ * define arrays with storage types smaller then unsigned dwords.
+ * The macros below add generic support for SHMEM arrays with numeric elements
+ * that can span 2,4,8 or 16 bits. The array underlying type is a 32 bit dword
+ * array with individual bit-filed elements accessed using shifts and masks.
+ *
+ */
+
+/* eb is the bitwidth of a single element */
+#define SHMEM_ARRAY_MASK(eb) ((1<<(eb))-1)
+#define SHMEM_ARRAY_ENTRY(i, eb) ((i)/(32/(eb)))
+
+/* the bit-position macro allows the used to flip the order of the arrays
+ * elements on a per byte or word boundary.
+ *
+ * example: an array with 8 entries each 4 bit wide. This array will fit into
+ * a single dword. The diagrmas below show the array order of the nibbles.
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering:
+ *
+ * | | | |
+ * 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+ * | | | |
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte:
+ *
+ * | | | |
+ * 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 |
+ * | | | |
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word:
+ *
+ * | | | |
+ * 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 |
+ * | | | |
+ */
+#define SHMEM_ARRAY_BITPOS(i, eb, fb) \
+ ((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \
+ (((i)%((fb)/(eb))) * (eb)))
+
+#define SHMEM_ARRAY_GET(a, i, eb, fb) \
+ ((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \
+ SHMEM_ARRAY_MASK(eb))
+
+#define SHMEM_ARRAY_SET(a, i, eb, fb, val) \
+do { \
+ a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \
+ SHMEM_ARRAY_BITPOS(i, eb, fb)); \
+ a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \
+ SHMEM_ARRAY_BITPOS(i, eb, fb)); \
+} while (0)
+
+
+/****START OF DCBX STRUCTURES DECLARATIONS****/
+#define DCBX_MAX_NUM_PRI_PG_ENTRIES 8
+#define DCBX_PRI_PG_BITWIDTH 4
+#define DCBX_PRI_PG_FBITS 8
+#define DCBX_PRI_PG_GET(a, i) \
+ SHMEM_ARRAY_GET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS)
+#define DCBX_PRI_PG_SET(a, i, val) \
+ SHMEM_ARRAY_SET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS, val)
+#define DCBX_MAX_NUM_PG_BW_ENTRIES 8
+#define DCBX_BW_PG_BITWIDTH 8
+#define DCBX_PG_BW_GET(a, i) \
+ SHMEM_ARRAY_GET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH)
+#define DCBX_PG_BW_SET(a, i, val) \
+ SHMEM_ARRAY_SET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH, val)
+#define DCBX_STRICT_PRI_PG 15
+#define DCBX_MAX_APP_PROTOCOL 16
+#define FCOE_APP_IDX 0
+#define ISCSI_APP_IDX 1
+#define PREDEFINED_APP_IDX_MAX 2
+
+struct dcbx_ets_feature {
+ u32 enabled;
+ u32 pg_bw_tbl[2];
+ u32 pri_pg_tbl[1];
+};
+
+struct dcbx_pfc_feature {
+#ifdef __BIG_ENDIAN
+ u8 pri_en_bitmap;
+#define DCBX_PFC_PRI_0 0x01
+#define DCBX_PFC_PRI_1 0x02
+#define DCBX_PFC_PRI_2 0x04
+#define DCBX_PFC_PRI_3 0x08
+#define DCBX_PFC_PRI_4 0x10
+#define DCBX_PFC_PRI_5 0x20
+#define DCBX_PFC_PRI_6 0x40
+#define DCBX_PFC_PRI_7 0x80
+ u8 pfc_caps;
+ u8 reserved;
+ u8 enabled;
+#elif defined(__LITTLE_ENDIAN)
+ u8 enabled;
+ u8 reserved;
+ u8 pfc_caps;
+ u8 pri_en_bitmap;
+#define DCBX_PFC_PRI_0 0x01
+#define DCBX_PFC_PRI_1 0x02
+#define DCBX_PFC_PRI_2 0x04
+#define DCBX_PFC_PRI_3 0x08
+#define DCBX_PFC_PRI_4 0x10
+#define DCBX_PFC_PRI_5 0x20
+#define DCBX_PFC_PRI_6 0x40
+#define DCBX_PFC_PRI_7 0x80
+#endif
+};
+
+struct dcbx_app_priority_entry {
+#ifdef __BIG_ENDIAN
+ u16 app_id;
+ u8 pri_bitmap;
+ u8 appBitfield;
+#define DCBX_APP_ENTRY_VALID 0x01
+#define DCBX_APP_ENTRY_SF_MASK 0x30
+#define DCBX_APP_ENTRY_SF_SHIFT 4
+#define DCBX_APP_SF_ETH_TYPE 0x10
+#define DCBX_APP_SF_PORT 0x20
+#elif defined(__LITTLE_ENDIAN)
+ u8 appBitfield;
+#define DCBX_APP_ENTRY_VALID 0x01
+#define DCBX_APP_ENTRY_SF_MASK 0x30
+#define DCBX_APP_ENTRY_SF_SHIFT 4
+#define DCBX_APP_SF_ETH_TYPE 0x10
+#define DCBX_APP_SF_PORT 0x20
+ u8 pri_bitmap;
+ u16 app_id;
+#endif
+};
+
+struct dcbx_app_priority_feature {
+#ifdef __BIG_ENDIAN
+ u8 reserved;
+ u8 default_pri;
+ u8 tc_supported;
+ u8 enabled;
+#elif defined(__LITTLE_ENDIAN)
+ u8 enabled;
+ u8 tc_supported;
+ u8 default_pri;
+ u8 reserved;
+#endif
+ struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+struct dcbx_features {
+ struct dcbx_ets_feature ets;
+ struct dcbx_pfc_feature pfc;
+ struct dcbx_app_priority_feature app;
+};
+
+struct lldp_params {
+#ifdef __BIG_ENDIAN
+ u8 msg_fast_tx_interval;
+ u8 msg_tx_hold;
+ u8 msg_tx_interval;
+ u8 admin_status;
+#define LLDP_TX_ONLY 0x01
+#define LLDP_RX_ONLY 0x02
+#define LLDP_TX_RX 0x03
+#define LLDP_DISABLED 0x04
+ u8 reserved1;
+ u8 tx_fast;
+ u8 tx_crd_max;
+ u8 tx_crd;
+#elif defined(__LITTLE_ENDIAN)
+ u8 admin_status;
+#define LLDP_TX_ONLY 0x01
+#define LLDP_RX_ONLY 0x02
+#define LLDP_TX_RX 0x03
+#define LLDP_DISABLED 0x04
+ u8 msg_tx_interval;
+ u8 msg_tx_hold;
+ u8 msg_fast_tx_interval;
+ u8 tx_crd;
+ u8 tx_crd_max;
+ u8 tx_fast;
+ u8 reserved1;
+#endif
+#define REM_CHASSIS_ID_STAT_LEN 4
+#define REM_PORT_ID_STAT_LEN 4
+ u32 peer_chassis_id[REM_CHASSIS_ID_STAT_LEN];
+ u32 peer_port_id[REM_PORT_ID_STAT_LEN];
+};
+
+struct lldp_dcbx_stat {
+#define LOCAL_CHASSIS_ID_STAT_LEN 2
+#define LOCAL_PORT_ID_STAT_LEN 2
+ u32 local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN];
+ u32 local_port_id[LOCAL_PORT_ID_STAT_LEN];
+ u32 num_tx_dcbx_pkts;
+ u32 num_rx_dcbx_pkts;
+};
+
+struct lldp_admin_mib {
+ u32 ver_cfg_flags;
+#define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001
+#define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002
+#define DCBX_APP_CONFIG_TX_ENABLED 0x00000004
+#define DCBX_ETS_RECO_TX_ENABLED 0x00000008
+#define DCBX_ETS_RECO_VALID 0x00000010
+#define DCBX_ETS_WILLING 0x00000020
+#define DCBX_PFC_WILLING 0x00000040
+#define DCBX_APP_WILLING 0x00000080
+#define DCBX_VERSION_CEE 0x00000100
+#define DCBX_VERSION_IEEE 0x00000200
+#define DCBX_DCBX_ENABLED 0x00000400
+#define DCBX_CEE_VERSION_MASK 0x0000f000
+#define DCBX_CEE_VERSION_SHIFT 12
+#define DCBX_CEE_MAX_VERSION_MASK 0x000f0000
+#define DCBX_CEE_MAX_VERSION_SHIFT 16
+ struct dcbx_features features;
+};
+
+struct lldp_remote_mib {
+ u32 prefix_seq_num;
+ u32 flags;
+#define DCBX_ETS_TLV_RX 0x00000001
+#define DCBX_PFC_TLV_RX 0x00000002
+#define DCBX_APP_TLV_RX 0x00000004
+#define DCBX_ETS_RX_ERROR 0x00000010
+#define DCBX_PFC_RX_ERROR 0x00000020
+#define DCBX_APP_RX_ERROR 0x00000040
+#define DCBX_ETS_REM_WILLING 0x00000100
+#define DCBX_PFC_REM_WILLING 0x00000200
+#define DCBX_APP_REM_WILLING 0x00000400
+#define DCBX_REMOTE_ETS_RECO_VALID 0x00001000
+ struct dcbx_features features;
+ u32 suffix_seq_num;
+};
+
+struct lldp_local_mib {
+ u32 prefix_seq_num;
+ u32 error;
+#define DCBX_LOCAL_ETS_ERROR 0x00000001
+#define DCBX_LOCAL_PFC_ERROR 0x00000002
+#define DCBX_LOCAL_APP_ERROR 0x00000004
+#define DCBX_LOCAL_PFC_MISMATCH 0x00000010
+#define DCBX_LOCAL_APP_MISMATCH 0x00000020
+ struct dcbx_features features;
+ u32 suffix_seq_num;
+};
+/***END OF DCBX STRUCTURES DECLARATIONS***/
struct shmem2_region {
@@ -1072,7 +1457,12 @@ struct shmem2_region {
#define SHMEM_MF_CFG_ADDR_NONE 0x00000000
struct fw_flr_mb flr_mb;
- u32 reserved[3];
+ u32 dcbx_lldp_params_offset;
+#define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000
+ u32 dcbx_neg_res_offset;
+#define SHMEM_DCBX_NEG_RES_NONE 0x00000000
+ u32 dcbx_remote_mib_offset;
+#define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000
/*
* The other shmemX_base_addr holds the other path's shmem address
* required for example in case of common phy init, or for path1 to know
@@ -1081,6 +1471,10 @@ struct shmem2_region {
*/
u32 other_shmem_base_addr;
u32 other_shmem2_base_addr;
+ u32 reserved1[E2_VF_MAX / 32];
+ u32 reserved2[E2_FUNC_MAX][E2_VF_MAX / 32];
+ u32 dcbx_lldp_dcbx_stat_offset;
+#define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000
};
@@ -1534,8 +1928,8 @@ struct host_func_stats {
#define BCM_5710_FW_MAJOR_VERSION 6
-#define BCM_5710_FW_MINOR_VERSION 0
-#define BCM_5710_FW_REVISION_VERSION 34
+#define BCM_5710_FW_MINOR_VERSION 2
+#define BCM_5710_FW_REVISION_VERSION 5
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -2983,6 +3377,25 @@ struct fairness_vars_per_vn {
/*
+ * The data for flow control configuration
+ */
+struct flow_control_configuration {
+ struct priority_cos
+ traffic_type_to_priority_cos[MAX_PFC_TRAFFIC_TYPES];
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u8 dcb_version;
+ u8 dcb_enabled;
+#elif defined(__LITTLE_ENDIAN)
+ u8 dcb_enabled;
+ u8 dcb_version;
+ u16 reserved1;
+#endif
+ u32 reserved2;
+};
+
+
+/*
* FW version stored in the Xstorm RAM
*/
struct fw_version {
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index a9d54874a559..fa6dbe3f2058 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -192,5 +192,225 @@ struct src_ent {
u64 next;
};
+/****************************************************************************
+* Parity configuration
+****************************************************************************/
+#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2) \
+{ \
+ block##_REG_##block##_PRTY_MASK, \
+ block##_REG_##block##_PRTY_STS_CLR, \
+ en_mask, {m1, m1h, m2}, #block \
+}
+
+#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2) \
+{ \
+ block##_REG_##block##_PRTY_MASK_0, \
+ block##_REG_##block##_PRTY_STS_CLR_0, \
+ en_mask, {m1, m1h, m2}, #block"_0" \
+}
+
+#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2) \
+{ \
+ block##_REG_##block##_PRTY_MASK_1, \
+ block##_REG_##block##_PRTY_STS_CLR_1, \
+ en_mask, {m1, m1h, m2}, #block"_1" \
+}
+
+static const struct {
+ u32 mask_addr;
+ u32 sts_clr_addr;
+ u32 en_mask; /* Mask to enable parity attentions */
+ struct {
+ u32 e1; /* 57710 */
+ u32 e1h; /* 57711 */
+ u32 e2; /* 57712 */
+ } reg_mask; /* Register mask (all valid bits) */
+ char name[7]; /* Block's longest name is 6 characters long
+ * (name + suffix)
+ */
+} bnx2x_blocks_parity_data[] = {
+ /* bit 19 masked */
+ /* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
+ /* bit 5,18,20-31 */
+ /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
+ /* bit 5 */
+ /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */
+ /* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
+ /* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
+
+ /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
+ * want to handle "system kill" flow at the moment.
+ */
+ BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff),
+ BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff),
+ BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
+ BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff),
+ BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1),
+ BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff),
+ BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3),
+ {GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
+ GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0,
+ {0xf, 0xf, 0xf}, "UPB"},
+ {GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
+ GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
+ {0xf, 0xf, 0xf}, "XPB"},
+ BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7),
+ BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf),
+ BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1),
+ BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf),
+ BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf),
+ BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff),
+ BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f),
+ BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f),
+};
+
+
+/* [28] MCP Latched rom_parity
+ * [29] MCP Latched ump_rx_parity
+ * [30] MCP Latched ump_tx_parity
+ * [31] MCP Latched scpad_parity
+ */
+#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
+ (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+
+/* Below registers control the MCP parity attention output. When
+ * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
+ * enabled, when cleared - disabled.
+ */
+static const u32 mcp_attn_ctl_regs[] = {
+ MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
+ MISC_REG_AEU_ENABLE4_NIG_0,
+ MISC_REG_AEU_ENABLE4_PXP_0,
+ MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
+ MISC_REG_AEU_ENABLE4_NIG_1,
+ MISC_REG_AEU_ENABLE4_PXP_1
+};
+
+static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
+{
+ int i;
+ u32 reg_val;
+
+ for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
+ reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
+
+ if (enable)
+ reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
+ else
+ reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
+
+ REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
+ }
+}
+
+static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx)
+{
+ if (CHIP_IS_E1(bp))
+ return bnx2x_blocks_parity_data[idx].reg_mask.e1;
+ else if (CHIP_IS_E1H(bp))
+ return bnx2x_blocks_parity_data[idx].reg_mask.e1h;
+ else
+ return bnx2x_blocks_parity_data[idx].reg_mask.e2;
+}
+
+static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+ u32 dis_mask = bnx2x_parity_reg_mask(bp, i);
+
+ if (dis_mask) {
+ REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
+ dis_mask);
+ DP(NETIF_MSG_HW, "Setting parity mask "
+ "for %s to\t\t0x%x\n",
+ bnx2x_blocks_parity_data[i].name, dis_mask);
+ }
+ }
+
+ /* Disable MCP parity attentions */
+ bnx2x_set_mcp_parity(bp, false);
+}
+
+/**
+ * Clear the parity error status registers.
+ */
+static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
+{
+ int i;
+ u32 reg_val, mcp_aeu_bits =
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
+ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
+
+ /* Clear SEM_FAST parities */
+ REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+ REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+ REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+ REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+
+ for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+ u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
+
+ if (reg_mask) {
+ reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i].
+ sts_clr_addr);
+ if (reg_val & reg_mask)
+ DP(NETIF_MSG_HW,
+ "Parity errors in %s: 0x%x\n",
+ bnx2x_blocks_parity_data[i].name,
+ reg_val & reg_mask);
+ }
+ }
+
+ /* Check if there were parity attentions in MCP */
+ reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP);
+ if (reg_val & mcp_aeu_bits)
+ DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n",
+ reg_val & mcp_aeu_bits);
+
+ /* Clear parity attentions in MCP:
+ * [7] clears Latched rom_parity
+ * [8] clears Latched ump_rx_parity
+ * [9] clears Latched ump_tx_parity
+ * [10] clears Latched scpad_parity (both ports)
+ */
+ REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
+}
+
+static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+ u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
+
+ if (reg_mask)
+ REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
+ bnx2x_blocks_parity_data[i].en_mask & reg_mask);
+ }
+
+ /* Enable MCP parity attentions */
+ bnx2x_set_mcp_parity(bp, true);
+}
+
+
#endif /* BNX2X_INIT_H */
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
index a306b0e46b61..66df29fcf751 100644
--- a/drivers/net/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -838,7 +838,7 @@ static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
/****************************************************************************
* SRC initializations
****************************************************************************/
-
+#ifdef BCM_CNIC
/* called during init func stage */
static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
dma_addr_t t2_mapping, int src_cid_count)
@@ -862,5 +862,5 @@ static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
U64_HI((u64)t2_mapping +
(src_cid_count-1) * sizeof(struct src_ent)));
}
-
+#endif
#endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 580919619252..f2f367d4e74d 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -1,4 +1,4 @@
-/* Copyright 2008-2009 Broadcom Corporation
+/* Copyright 2008-2011 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -28,12 +28,13 @@
/********************************************************/
#define ETH_HLEN 14
-#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)/* 16 for CRC + VLAN + LLC */
+/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
#define ETH_MIN_PACKET_SIZE 60
#define ETH_MAX_PACKET_SIZE 1500
#define ETH_MAX_JUMBO_PACKET_SIZE 9600
#define MDIO_ACCESS_TIMEOUT 1000
-#define BMAC_CONTROL_RX_ENABLE 2
+#define BMAC_CONTROL_RX_ENABLE 2
/***********************************************************/
/* Shortcut definitions */
@@ -79,7 +80,7 @@
#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
-#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
+#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
#define AUTONEG_PARALLEL \
SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
#define AUTONEG_SGMII_FIBER_AUTODET \
@@ -112,10 +113,10 @@
#define GP_STATUS_10G_KX4 \
MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
-#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
-#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
+#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
+#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
-#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
+#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD
#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
@@ -123,18 +124,18 @@
#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD
#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
-#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
-#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
-#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
-#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
+#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
+#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
+#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
+#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
#define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD
#define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD
-#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
-#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
-#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
-#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
-#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
-#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
+#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
+#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
+#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
+#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
+#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
+#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
#define PHY_XGXS_FLAG 0x1
#define PHY_SGMII_FLAG 0x2
@@ -142,7 +143,7 @@
/* */
#define SFP_EEPROM_CON_TYPE_ADDR 0x2
- #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
+ #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
#define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
@@ -153,40 +154,35 @@
#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
#define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
- #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
+ #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
-#define SFP_EEPROM_OPTIONS_ADDR 0x40
+#define SFP_EEPROM_OPTIONS_ADDR 0x40
#define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
-#define SFP_EEPROM_OPTIONS_SIZE 2
-
-#define EDC_MODE_LINEAR 0x0022
-#define EDC_MODE_LIMITING 0x0044
-#define EDC_MODE_PASSIVE_DAC 0x0055
+#define SFP_EEPROM_OPTIONS_SIZE 2
+#define EDC_MODE_LINEAR 0x0022
+#define EDC_MODE_LIMITING 0x0044
+#define EDC_MODE_PASSIVE_DAC 0x0055
+#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
+#define ETS_BW_LIMIT_CREDIT_WEIGHT (0x5000)
/**********************************************************/
/* INTERFACE */
/**********************************************************/
-#define CL45_WR_OVER_CL22(_bp, _phy, _bank, _addr, _val) \
+#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
bnx2x_cl45_write(_bp, _phy, \
(_phy)->def_md_devad, \
(_bank + (_addr & 0xf)), \
_val)
-#define CL45_RD_OVER_CL22(_bp, _phy, _bank, _addr, _val) \
+#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
bnx2x_cl45_read(_bp, _phy, \
(_phy)->def_md_devad, \
(_bank + (_addr & 0xf)), \
_val)
-static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 *ret_val);
-
-static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 val);
-
static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
{
u32 val = REG_RD(bp, reg);
@@ -205,8 +201,271 @@ static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
return val;
}
+/******************************************************************/
+/* ETS section */
+/******************************************************************/
+void bnx2x_ets_disabled(struct link_params *params)
+{
+ /* ETS disabled configuration*/
+ struct bnx2x *bp = params->bp;
+
+ DP(NETIF_MSG_LINK, "ETS disabled configuration\n");
+
+ /*
+ * mapping between entry priority to client number (0,1,2 -debug and
+ * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
+ * 3bits client num.
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * cos1-100 cos0-011 dbg1-010 dbg0-001 MCP-000
+ */
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
+ /*
+ * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries, 3 -
+ * COS0 entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
+ /* defines which entries (clients) are subjected to WFQ arbitration */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
+ /*
+ * For strict priority entries defines the number of consecutive
+ * slots for the highest priority.
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+ /*
+ * mapping between the CREDIT_WEIGHT registers and actual client
+ * numbers
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0);
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0);
+ REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
+ /* ETS mode disable */
+ REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
+ /*
+ * If ETS mode is enabled (there is no strict priority) defines a WFQ
+ * weight for COS0/COS1.
+ */
+ REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710);
+ REG_WR(bp, PBF_REG_COS1_WEIGHT, 0x2710);
+ /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter */
+ REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, 0x989680);
+ REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, 0x989680);
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
+}
+
+static void bnx2x_ets_bw_limit_common(const struct link_params *params)
+{
+ /* ETS disabled configuration */
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
+ /*
+ * defines which entries (clients) are subjected to WFQ arbitration
+ * COS0 0x8
+ * COS1 0x10
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
+ /*
+ * mapping between the ARB_CREDIT_WEIGHT registers and actual
+ * client numbers (WEIGHT_0 does not actually have to represent
+ * client 0)
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
+ ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1,
+ ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+
+ /* ETS mode enabled*/
+ REG_WR(bp, PBF_REG_ETS_ENABLED, 1);
+
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
+ /*
+ * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
+ * entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
+
+ /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
+ REG_WR(bp, PBF_REG_COS0_UPPER_BOUND,
+ ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+ REG_WR(bp, PBF_REG_COS1_UPPER_BOUND,
+ ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+}
+
+void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
+ const u32 cos1_bw)
+{
+ /* ETS disabled configuration*/
+ struct bnx2x *bp = params->bp;
+ const u32 total_bw = cos0_bw + cos1_bw;
+ u32 cos0_credit_weight = 0;
+ u32 cos1_credit_weight = 0;
+
+ DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
+
+ if ((0 == total_bw) ||
+ (0 == cos0_bw) ||
+ (0 == cos1_bw)) {
+ DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
+ return;
+ }
+
+ cos0_credit_weight = (cos0_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
+ total_bw;
+ cos1_credit_weight = (cos1_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
+ total_bw;
+
+ bnx2x_ets_bw_limit_common(params);
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight);
+
+ REG_WR(bp, PBF_REG_COS0_WEIGHT, cos0_credit_weight);
+ REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight);
+}
+
+u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
+{
+ /* ETS disabled configuration*/
+ struct bnx2x *bp = params->bp;
+ u32 val = 0;
+
+ DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
+ /*
+ * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries,
+ * 3 - COS0 entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
+ /*
+ * For strict priority entries defines the number of consecutive slots
+ * for the highest priority.
+ */
+ REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+ /* ETS mode disable */
+ REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100);
+
+ /* Defines the number of consecutive slots for the strict priority */
+ REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
+
+ /*
+ * mapping between entry priority to client number (0,1,2 -debug and
+ * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
+ * 3bits client num.
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
+ * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
+ */
+ val = (0 == strict_cos) ? 0x2318 : 0x22E0;
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
+
+ return 0;
+}
+/******************************************************************/
+/* ETS section */
+/******************************************************************/
+
+static void bnx2x_bmac2_get_pfc_stat(struct link_params *params,
+ u32 pfc_frames_sent[2],
+ u32 pfc_frames_received[2])
+{
+ /* Read pfc statistic */
+ struct bnx2x *bp = params->bp;
+ u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+
+ DP(NETIF_MSG_LINK, "pfc statistic read from BMAC\n");
+
+ REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_STAT_GTPP,
+ pfc_frames_sent, 2);
+
+ REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_STAT_GRPP,
+ pfc_frames_received, 2);
+
+}
+static void bnx2x_emac_get_pfc_stat(struct link_params *params,
+ u32 pfc_frames_sent[2],
+ u32 pfc_frames_received[2])
+{
+ /* Read pfc statistic */
+ struct bnx2x *bp = params->bp;
+ u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ u32 val_xon = 0;
+ u32 val_xoff = 0;
+
+ DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n");
+
+ /* PFC received frames */
+ val_xoff = REG_RD(bp, emac_base +
+ EMAC_REG_RX_PFC_STATS_XOFF_RCVD);
+ val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT;
+ val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD);
+ val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT;
+
+ pfc_frames_received[0] = val_xon + val_xoff;
+
+ /* PFC received sent */
+ val_xoff = REG_RD(bp, emac_base +
+ EMAC_REG_RX_PFC_STATS_XOFF_SENT);
+ val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT;
+ val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT);
+ val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT;
+
+ pfc_frames_sent[0] = val_xon + val_xoff;
+}
+
+void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
+ u32 pfc_frames_sent[2],
+ u32 pfc_frames_received[2])
+{
+ /* Read pfc statistic */
+ struct bnx2x *bp = params->bp;
+ u32 val = 0;
+ DP(NETIF_MSG_LINK, "pfc statistic\n");
+
+ if (!vars->link_up)
+ return;
+
+ val = REG_RD(bp, MISC_REG_RESET_REG_2);
+ if ((val & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
+ == 0) {
+ DP(NETIF_MSG_LINK, "About to read stats from EMAC\n");
+ bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
+ pfc_frames_received);
+ } else {
+ DP(NETIF_MSG_LINK, "About to read stats from BMAC\n");
+ bnx2x_bmac2_get_pfc_stat(params, pfc_frames_sent,
+ pfc_frames_received);
+ }
+}
+/******************************************************************/
+/* MAC/PBF section */
+/******************************************************************/
static void bnx2x_emac_init(struct link_params *params,
- struct link_vars *vars)
+ struct link_vars *vars)
{
/* reset and unreset the emac core */
struct bnx2x *bp = params->bp;
@@ -216,10 +475,10 @@ static void bnx2x_emac_init(struct link_params *params,
u16 timeout;
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
- (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+ (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
udelay(5);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
- (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+ (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
/* init emac - use read-modify-write */
/* self clear reset */
@@ -250,7 +509,7 @@ static void bnx2x_emac_init(struct link_params *params,
}
static u8 bnx2x_emac_enable(struct link_params *params,
- struct link_vars *vars, u8 lb)
+ struct link_vars *vars, u8 lb)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -262,77 +521,86 @@ static u8 bnx2x_emac_enable(struct link_params *params,
/* enable emac and not bmac */
REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
- /* for paladium */
- if (CHIP_REV_IS_EMUL(bp)) {
- /* Use lane 1 (of lanes 0-3) */
- REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
- REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
- port*4, 1);
- }
- /* for fpga */
- else
-
- if (CHIP_REV_IS_FPGA(bp)) {
- /* Use lane 1 (of lanes 0-3) */
- DP(NETIF_MSG_LINK, "bnx2x_emac_enable: Setting FPGA\n");
-
- REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
- REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4,
- 0);
- } else
/* ASIC */
if (vars->phy_flags & PHY_XGXS_FLAG) {
u32 ser_lane = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
DP(NETIF_MSG_LINK, "XGXS\n");
/* select the master lanes (out of 0-3) */
- REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 +
- port*4, ser_lane);
+ REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane);
/* select XGXS */
- REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
- port*4, 1);
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
} else { /* SerDes */
DP(NETIF_MSG_LINK, "SerDes\n");
/* select SerDes */
- REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
- port*4, 0);
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
}
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
- EMAC_RX_MODE_RESET);
+ EMAC_RX_MODE_RESET);
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
- EMAC_TX_MODE_RESET);
+ EMAC_TX_MODE_RESET);
if (CHIP_REV_IS_SLOW(bp)) {
/* config GMII mode */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
- EMAC_WR(bp, EMAC_REG_EMAC_MODE,
- (val | EMAC_MODE_PORT_GMII));
+ EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
} else { /* ASIC */
/* pause enable/disable */
bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
EMAC_RX_MODE_FLOW_EN);
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
- bnx2x_bits_en(bp, emac_base +
- EMAC_REG_EMAC_RX_MODE,
- EMAC_RX_MODE_FLOW_EN);
bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
- (EMAC_TX_MODE_EXT_PAUSE_EN |
- EMAC_TX_MODE_FLOW_EN));
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
- bnx2x_bits_en(bp, emac_base +
- EMAC_REG_EMAC_TX_MODE,
- (EMAC_TX_MODE_EXT_PAUSE_EN |
- EMAC_TX_MODE_FLOW_EN));
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED)) {
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ bnx2x_bits_en(bp, emac_base +
+ EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_FLOW_EN);
+
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ bnx2x_bits_en(bp, emac_base +
+ EMAC_REG_EMAC_TX_MODE,
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
+ } else
+ bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+ EMAC_TX_MODE_FLOW_EN);
}
/* KEEP_VLAN_TAG, promiscuous */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
+
+ /*
+ * Setting this bit causes MAC control frames (except for pause
+ * frames) to be passed on for processing. This setting has no
+ * affect on the operation of the pause frames. This bit effects
+ * all packets regardless of RX Parser packet sorting logic.
+ * Turn the PFC off to make sure we are in Xon state before
+ * enabling it.
+ */
+ EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
+ DP(NETIF_MSG_LINK, "PFC is enabled\n");
+ /* Enable PFC again */
+ EMAC_WR(bp, EMAC_REG_RX_PFC_MODE,
+ EMAC_REG_RX_PFC_MODE_RX_EN |
+ EMAC_REG_RX_PFC_MODE_TX_EN |
+ EMAC_REG_RX_PFC_MODE_PRIORITIES);
+
+ EMAC_WR(bp, EMAC_REG_RX_PFC_PARAM,
+ ((0x0101 <<
+ EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT) |
+ (0x00ff <<
+ EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT)));
+ val |= EMAC_RX_MODE_KEEP_MAC_CONTROL;
+ }
EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
/* Set Loopback */
@@ -362,30 +630,52 @@ static u8 bnx2x_emac_enable(struct link_params *params,
/* enable the NIG in/out to the emac */
REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
val = 0;
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) ||
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
val = 1;
REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
- if (CHIP_REV_IS_EMUL(bp)) {
- /* take the BigMac out of reset */
- REG_WR(bp,
- GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
-
- /* enable access for bmac registers */
- REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
- } else
- REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
+ REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
vars->mac_type = MAC_TYPE_EMAC;
return 0;
}
-static void bnx2x_update_bmac2(struct link_params *params,
- struct link_vars *vars,
- u8 is_lb)
+static void bnx2x_update_pfc_bmac1(struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 wb_data[2];
+ struct bnx2x *bp = params->bp;
+ u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+
+ u32 val = 0x14;
+ if ((!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED)) &&
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
+ /* Enable BigMAC to react on received Pause packets */
+ val |= (1<<5);
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
+
+ /* tx control */
+ val = 0xc0;
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) &&
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val |= 0x800000;
+ wb_data[0] = val;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2);
+}
+
+static void bnx2x_update_pfc_bmac2(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
{
/*
* Set rx control: Strip CRC and enable BigMAC to relay
@@ -397,29 +687,63 @@ static void bnx2x_update_bmac2(struct link_params *params,
NIG_REG_INGRESS_BMAC0_MEM;
u32 val = 0x14;
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ if ((!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED)) &&
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
/* Enable BigMAC to react on received Pause packets */
val |= (1<<5);
wb_data[0] = val;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2);
udelay(30);
/* Tx control */
val = 0xc0;
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) &&
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
val |= 0x800000;
wb_data[0] = val;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2);
+
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
+ DP(NETIF_MSG_LINK, "PFC is enabled\n");
+ /* Enable PFC RX & TX & STATS and set 8 COS */
+ wb_data[0] = 0x0;
+ wb_data[0] |= (1<<0); /* RX */
+ wb_data[0] |= (1<<1); /* TX */
+ wb_data[0] |= (1<<2); /* Force initial Xon */
+ wb_data[0] |= (1<<3); /* 8 cos */
+ wb_data[0] |= (1<<5); /* STATS */
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL,
+ wb_data, 2);
+ /* Clear the force Xon */
+ wb_data[0] &= ~(1<<2);
+ } else {
+ DP(NETIF_MSG_LINK, "PFC is disabled\n");
+ /* disable PFC RX & TX & STATS and set 8 COS */
+ wb_data[0] = 0x8;
+ wb_data[1] = 0;
+ }
+
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
+ /*
+ * Set Time (based unit is 512 bit time) between automatic
+ * re-sending of PP packets amd enable automatic re-send of
+ * Per-Priroity Packet as long as pp_gen is asserted and
+ * pp_disable is low.
+ */
val = 0x8000;
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+ val |= (1<<16); /* enable automatic re-send */
+
wb_data[0] = val;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
- wb_data, 2);
+ wb_data, 2);
/* mac control */
val = 0x3; /* Enable RX and TX */
@@ -427,17 +751,260 @@ static void bnx2x_update_bmac2(struct link_params *params,
val |= 0x4; /* Local loopback */
DP(NETIF_MSG_LINK, "enable bmac loopback\n");
}
+ /* When PFC enabled, Pass pause frames towards the NIG. */
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+ val |= ((1<<6)|(1<<5));
wb_data[0] = val;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
}
+static void bnx2x_update_pfc_brb(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_nig_brb_pfc_port_params *pfc_params)
+{
+ struct bnx2x *bp = params->bp;
+ int set_pfc = params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED;
+
+ /* default - pause configuration */
+ u32 pause_xoff_th = PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE;
+ u32 pause_xon_th = PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE;
+ u32 full_xoff_th = PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE;
+ u32 full_xon_th = PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE;
+
+ if (set_pfc && pfc_params)
+ /* First COS */
+ if (!pfc_params->cos0_pauseable) {
+ pause_xoff_th =
+ PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE;
+ pause_xon_th =
+ PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE;
+ full_xoff_th =
+ PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE;
+ full_xon_th =
+ PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
+ }
+ /*
+ * The number of free blocks below which the pause signal to class 0
+ * of MAC #n is asserted. n=0,1
+ */
+ REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th);
+ /*
+ * The number of free blocks above which the pause signal to class 0
+ * of MAC #n is de-asserted. n=0,1
+ */
+ REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th);
+ /*
+ * The number of free blocks below which the full signal to class 0
+ * of MAC #n is asserted. n=0,1
+ */
+ REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th);
+ /*
+ * The number of free blocks above which the full signal to class 0
+ * of MAC #n is de-asserted. n=0,1
+ */
+ REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th);
+
+ if (set_pfc && pfc_params) {
+ /* Second COS */
+ if (pfc_params->cos1_pauseable) {
+ pause_xoff_th =
+ PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE;
+ pause_xon_th =
+ PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE;
+ full_xoff_th =
+ PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE;
+ full_xon_th =
+ PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE;
+ } else {
+ pause_xoff_th =
+ PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE;
+ pause_xon_th =
+ PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE;
+ full_xoff_th =
+ PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE;
+ full_xon_th =
+ PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
+ }
+ /*
+ * The number of free blocks below which the pause signal to
+ * class 1 of MAC #n is asserted. n=0,1
+ */
+ REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th);
+ /*
+ * The number of free blocks above which the pause signal to
+ * class 1 of MAC #n is de-asserted. n=0,1
+ */
+ REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th);
+ /*
+ * The number of free blocks below which the full signal to
+ * class 1 of MAC #n is asserted. n=0,1
+ */
+ REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th);
+ /*
+ * The number of free blocks above which the full signal to
+ * class 1 of MAC #n is de-asserted. n=0,1
+ */
+ REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th);
+ }
+}
+
+static void bnx2x_update_pfc_nig(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_nig_brb_pfc_port_params *nig_params)
+{
+ u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0;
+ u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0;
+ u32 pkt_priority_to_cos = 0;
+ u32 val;
+ struct bnx2x *bp = params->bp;
+ int port = params->port;
+ int set_pfc = params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED;
+ DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
+
+ /*
+ * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
+ * MAC control frames (that are not pause packets)
+ * will be forwarded to the XCM.
+ */
+ xcm_mask = REG_RD(bp,
+ port ? NIG_REG_LLH1_XCM_MASK :
+ NIG_REG_LLH0_XCM_MASK);
+ /*
+ * nig params will override non PFC params, since it's possible to
+ * do transition from PFC to SAFC
+ */
+ if (set_pfc) {
+ pause_enable = 0;
+ llfc_out_en = 0;
+ llfc_enable = 0;
+ ppp_enable = 1;
+ xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
+ NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
+ xcm0_out_en = 0;
+ p0_hwpfc_enable = 1;
+ } else {
+ if (nig_params) {
+ llfc_out_en = nig_params->llfc_out_en;
+ llfc_enable = nig_params->llfc_enable;
+ pause_enable = nig_params->pause_enable;
+ } else /*defaul non PFC mode - PAUSE */
+ pause_enable = 1;
+
+ xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
+ NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
+ xcm0_out_en = 1;
+ }
+
+ REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 :
+ NIG_REG_LLFC_OUT_EN_0, llfc_out_en);
+ REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 :
+ NIG_REG_LLFC_ENABLE_0, llfc_enable);
+ REG_WR(bp, port ? NIG_REG_PAUSE_ENABLE_1 :
+ NIG_REG_PAUSE_ENABLE_0, pause_enable);
+
+ REG_WR(bp, port ? NIG_REG_PPP_ENABLE_1 :
+ NIG_REG_PPP_ENABLE_0, ppp_enable);
+
+ REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK :
+ NIG_REG_LLH0_XCM_MASK, xcm_mask);
+
+ REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
+
+ /* output enable for RX_XCM # IF */
+ REG_WR(bp, NIG_REG_XCM0_OUT_EN, xcm0_out_en);
+
+ /* HW PFC TX enable */
+ REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable);
+
+ /* 0x2 = BMAC, 0x1= EMAC */
+ switch (vars->mac_type) {
+ case MAC_TYPE_EMAC:
+ val = 1;
+ break;
+ case MAC_TYPE_BMAC:
+ val = 0;
+ break;
+ default:
+ val = 0;
+ break;
+ }
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT, val);
+
+ if (nig_params) {
+ pkt_priority_to_cos = nig_params->pkt_priority_to_cos;
+
+ REG_WR(bp, port ? NIG_REG_P1_RX_COS0_PRIORITY_MASK :
+ NIG_REG_P0_RX_COS0_PRIORITY_MASK,
+ nig_params->rx_cos0_priority_mask);
+
+ REG_WR(bp, port ? NIG_REG_P1_RX_COS1_PRIORITY_MASK :
+ NIG_REG_P0_RX_COS1_PRIORITY_MASK,
+ nig_params->rx_cos1_priority_mask);
+
+ REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 :
+ NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0,
+ nig_params->llfc_high_priority_classes);
+
+ REG_WR(bp, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 :
+ NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0,
+ nig_params->llfc_low_priority_classes);
+ }
+ REG_WR(bp, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS :
+ NIG_REG_P0_PKT_PRIORITY_TO_COS,
+ pkt_priority_to_cos);
+}
+
+
+void bnx2x_update_pfc(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_nig_brb_pfc_port_params *pfc_params)
+{
+ /*
+ * The PFC and pause are orthogonal to one another, meaning when
+ * PFC is enabled, the pause are disabled, and when PFC is
+ * disabled, pause are set according to the pause result.
+ */
+ u32 val;
+ struct bnx2x *bp = params->bp;
+
+ /* update NIG params */
+ bnx2x_update_pfc_nig(params, vars, pfc_params);
+
+ /* update BRB params */
+ bnx2x_update_pfc_brb(params, vars, pfc_params);
+
+ if (!vars->link_up)
+ return;
+
+ val = REG_RD(bp, MISC_REG_RESET_REG_2);
+ if ((val & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
+ == 0) {
+ DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
+ bnx2x_emac_enable(params, vars, 0);
+ return;
+ }
+
+ DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
+ if (CHIP_IS_E2(bp))
+ bnx2x_update_pfc_bmac2(params, vars, 0);
+ else
+ bnx2x_update_pfc_bmac1(params, vars);
+
+ val = 0;
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) ||
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val = 1;
+ REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
+}
static u8 bnx2x_bmac1_enable(struct link_params *params,
struct link_vars *vars,
- u8 is_lb)
+ u8 is_lb)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -451,9 +1018,8 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
/* XGXS control */
wb_data[0] = 0x3c;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr +
- BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
+ wb_data, 2);
/* tx MAC SA */
wb_data[0] = ((params->mac_addr[2] << 24) |
@@ -462,17 +1028,7 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
params->mac_addr[5]);
wb_data[1] = ((params->mac_addr[0] << 8) |
params->mac_addr[1]);
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
- wb_data, 2);
-
- /* tx control */
- val = 0xc0;
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
- val |= 0x800000;
- wb_data[0] = val;
- wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
/* mac control */
val = 0x3;
@@ -482,50 +1038,30 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
}
wb_data[0] = val;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
/* set rx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
- /* rx control set to don't strip crc */
- val = 0x14;
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
- val |= 0x20;
- wb_data[0] = val;
- wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL,
- wb_data, 2);
+ bnx2x_update_pfc_bmac1(params, vars);
/* set tx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
/* set cnt max size */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
/* configure safc */
wb_data[0] = 0x1000200;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
wb_data, 2);
- /* fix for emulation */
- if (CHIP_REV_IS_EMUL(bp)) {
- wb_data[0] = 0xf000;
- wb_data[1] = 0;
- REG_WR_DMAE(bp,
- bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
- wb_data, 2);
- }
-
return 0;
}
@@ -544,16 +1080,14 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
wb_data[0] = 0;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
udelay(30);
/* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
wb_data[0] = 0x3c;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr +
- BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
+ wb_data, 2);
udelay(30);
@@ -565,7 +1099,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
wb_data[1] = ((params->mac_addr[0] << 8) |
params->mac_addr[1]);
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
- wb_data, 2);
+ wb_data, 2);
udelay(30);
@@ -573,29 +1107,26 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
wb_data[0] = 0x1000200;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
- wb_data, 2);
+ wb_data, 2);
udelay(30);
/* set rx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
udelay(30);
/* set tx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
udelay(30);
/* set cnt max size */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
udelay(30);
- bnx2x_update_bmac2(params, vars, is_lb);
+ bnx2x_update_pfc_bmac2(params, vars, is_lb);
return 0;
}
@@ -609,11 +1140,11 @@ static u8 bnx2x_bmac_enable(struct link_params *params,
u32 val;
/* reset and unreset the BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
msleep(1);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
/* enable access for bmac registers */
REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
@@ -627,7 +1158,9 @@ static u8 bnx2x_bmac_enable(struct link_params *params,
REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
val = 0;
- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) ||
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
val = 1;
REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
@@ -646,15 +1179,14 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
struct bnx2x *bp = params->bp;
REG_WR(bp, params->shmem_base +
- offsetof(struct shmem_region,
- port_mb[params->port].link_status),
- link_status);
+ offsetof(struct shmem_region,
+ port_mb[params->port].link_status), link_status);
}
static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
{
u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
- NIG_REG_INGRESS_BMAC0_MEM;
+ NIG_REG_INGRESS_BMAC0_MEM;
u32 wb_data[2];
u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
@@ -666,12 +1198,12 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
if (CHIP_IS_E2(bp)) {
/* Clear Rx Enable bit in BMAC_CONTROL register */
REG_RD_DMAE(bp, bmac_addr +
- BIGMAC2_REGISTER_BMAC_CONTROL,
- wb_data, 2);
+ BIGMAC2_REGISTER_BMAC_CONTROL,
+ wb_data, 2);
wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
REG_WR_DMAE(bp, bmac_addr +
- BIGMAC2_REGISTER_BMAC_CONTROL,
- wb_data, 2);
+ BIGMAC2_REGISTER_BMAC_CONTROL,
+ wb_data, 2);
} else {
/* Clear Rx Enable bit in BMAC_CONTROL register */
REG_RD_DMAE(bp, bmac_addr +
@@ -687,7 +1219,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
}
static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
- u32 line_speed)
+ u32 line_speed)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -724,7 +1256,7 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
/* update threshold */
REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
/* update init credit */
- init_crd = 778; /* (800-18-4) */
+ init_crd = 778; /* (800-18-4) */
} else {
u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
@@ -769,6 +1301,23 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
return 0;
}
+/*
+ * get_emac_base
+ *
+ * @param cb
+ * @param mdc_mdio_access
+ * @param port
+ *
+ * @return u32
+ *
+ * This function selects the MDC/MDIO access (through emac0 or
+ * emac1) depend on the mdc_mdio_access, port, port swapped. Each
+ * phy has a default access mode, which could also be overridden
+ * by nvram configuration. This parameter, whether this is the
+ * default phy configuration, or the nvram overrun
+ * configuration, is passed here as mdc_mdio_access and selects
+ * the emac_base for the CL45 read/writes operations
+ */
static u32 bnx2x_get_emac_base(struct bnx2x *bp,
u32 mdc_mdio_access, u8 port)
{
@@ -801,13 +1350,16 @@ static u32 bnx2x_get_emac_base(struct bnx2x *bp,
}
-u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 val)
+/******************************************************************/
+/* CL45 access functions */
+/******************************************************************/
+static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 val)
{
u32 tmp, saved_mode;
u8 i, rc = 0;
-
- /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
+ /*
+ * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
* (a value of 49==0x31) and make sure that the AUTO poll is off
*/
@@ -830,8 +1382,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
for (i = 0; i < 50; i++) {
udelay(10);
- tmp = REG_RD(bp, phy->mdio_ctrl +
- EMAC_REG_EMAC_MDIO_COMM);
+ tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
udelay(5);
break;
@@ -839,6 +1390,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
}
if (tmp & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "write phy register failed\n");
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
rc = -EFAULT;
} else {
/* data */
@@ -851,7 +1403,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
udelay(10);
tmp = REG_RD(bp, phy->mdio_ctrl +
- EMAC_REG_EMAC_MDIO_COMM);
+ EMAC_REG_EMAC_MDIO_COMM);
if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
udelay(5);
break;
@@ -859,6 +1411,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
}
if (tmp & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "write phy register failed\n");
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
rc = -EFAULT;
}
}
@@ -869,20 +1422,20 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
return rc;
}
-u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 *ret_val)
+static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 *ret_val)
{
u32 val, saved_mode;
u16 i;
u8 rc = 0;
-
- /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
+ /*
+ * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
* (a value of 49==0x31) and make sure that the AUTO poll is off
*/
saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL |
- EMAC_MDIO_MODE_CLOCK_CNT));
+ EMAC_MDIO_MODE_CLOCK_CNT));
val |= (EMAC_MDIO_MODE_CLAUSE_45 |
(49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
@@ -906,7 +1459,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
}
if (val & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "read phy register failed\n");
-
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
*ret_val = 0;
rc = -EFAULT;
@@ -921,7 +1474,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
udelay(10);
val = REG_RD(bp, phy->mdio_ctrl +
- EMAC_REG_EMAC_MDIO_COMM);
+ EMAC_REG_EMAC_MDIO_COMM);
if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
*ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
break;
@@ -929,7 +1482,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
}
if (val & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "read phy register failed\n");
-
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
*ret_val = 0;
rc = -EFAULT;
}
@@ -945,7 +1498,7 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
u8 devad, u16 reg, u16 *ret_val)
{
u8 phy_index;
- /**
+ /*
* Probe for the phy according to the given phy_addr, and execute
* the read request on it
*/
@@ -963,7 +1516,7 @@ u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
u8 devad, u16 reg, u16 val)
{
u8 phy_index;
- /**
+ /*
* Probe for the phy according to the given phy_addr, and execute
* the write request on it
*/
@@ -989,19 +1542,18 @@ static void bnx2x_set_aer_mmd_xgxs(struct link_params *params,
offset = phy->addr + ser_lane;
if (CHIP_IS_E2(bp))
- aer_val = 0x2800 + offset - 1;
+ aer_val = 0x3800 + offset - 1;
else
aer_val = 0x3800 + offset;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_AER_BLOCK,
- MDIO_AER_BLOCK_AER_REG, aer_val);
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, aer_val);
}
static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp,
struct bnx2x_phy *phy)
{
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_AER_BLOCK,
- MDIO_AER_BLOCK_AER_REG, 0x3800);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0x3800);
}
/******************************************************************/
@@ -1037,9 +1589,8 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
bnx2x_set_serdes_access(bp, port);
- REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD +
- port*0x10,
- DEFAULT_PHY_DEV_ADDR);
+ REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10,
+ DEFAULT_PHY_DEV_ADDR);
}
static void bnx2x_xgxs_deassert(struct link_params *params)
@@ -1057,23 +1608,22 @@ static void bnx2x_xgxs_deassert(struct link_params *params)
udelay(500);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
- REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST +
- port*0x18, 0);
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0);
REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
- params->phy[INT_PHY].def_md_devad);
+ params->phy[INT_PHY].def_md_devad);
}
void bnx2x_link_status_update(struct link_params *params,
- struct link_vars *vars)
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 link_10g;
u8 port = params->port;
vars->link_status = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region,
- port_mb[port].link_status));
+ offsetof(struct shmem_region,
+ port_mb[port].link_status));
vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
@@ -1083,7 +1633,7 @@ void bnx2x_link_status_update(struct link_params *params,
vars->phy_link_up = 1;
vars->duplex = DUPLEX_FULL;
switch (vars->link_status &
- LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
+ LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
case LINK_10THD:
vars->duplex = DUPLEX_HALF;
/* fall thru */
@@ -1195,20 +1745,20 @@ static void bnx2x_set_master_ln(struct link_params *params,
{
struct bnx2x *bp = params->bp;
u16 new_master_ln, ser_lane;
- ser_lane = ((params->lane_config &
+ ser_lane = ((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
/* set the master_ln for AN */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
- &new_master_ln);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+ &new_master_ln);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2 ,
- MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
- (new_master_ln | ser_lane));
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2 ,
+ MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+ (new_master_ln | ser_lane));
}
static u8 bnx2x_reset_unicore(struct link_params *params,
@@ -1218,17 +1768,16 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
struct bnx2x *bp = params->bp;
u16 mii_control;
u16 i;
-
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
/* reset the unicore */
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- (mii_control |
- MDIO_COMBO_IEEO_MII_CONTROL_RESET));
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ (mii_control |
+ MDIO_COMBO_IEEO_MII_CONTROL_RESET));
if (set_serdes)
bnx2x_set_serdes_access(bp, params->port);
@@ -1237,10 +1786,10 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
udelay(5);
/* the reset erased the previous bank value */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- &mii_control);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
udelay(5);
@@ -1248,6 +1797,9 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
}
}
+ netdev_err(bp->dev, "Warning: PHY was not initialized,"
+ " Port %d\n",
+ params->port);
DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
return -EINVAL;
@@ -1257,43 +1809,45 @@ static void bnx2x_set_swap_lanes(struct link_params *params,
struct bnx2x_phy *phy)
{
struct bnx2x *bp = params->bp;
- /* Each two bits represents a lane number:
- No swap is 0123 => 0x1b no need to enable the swap */
+ /*
+ * Each two bits represents a lane number:
+ * No swap is 0123 => 0x1b no need to enable the swap
+ */
u16 ser_lane, rx_lane_swap, tx_lane_swap;
ser_lane = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
rx_lane_swap = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
+ PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
tx_lane_swap = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
+ PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
if (rx_lane_swap != 0x1b) {
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_RX_LN_SWAP,
- (rx_lane_swap |
- MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
- MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP,
+ (rx_lane_swap |
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
} else {
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
}
if (tx_lane_swap != 0x1b) {
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_TX_LN_SWAP,
- (tx_lane_swap |
- MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP,
+ (tx_lane_swap |
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
} else {
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
}
}
@@ -1302,66 +1856,66 @@ static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 control2;
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
- &control2);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+ &control2);
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
else
control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
phy->speed_cap_mask, control2);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
- control2);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+ control2);
if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
(phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
DP(NETIF_MSG_LINK, "XGXS\n");
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_10G_PARALLEL_DETECT,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_10G_PARALLEL_DETECT,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
- &control2);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+ &control2);
control2 |=
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_10G_PARALLEL_DETECT,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
- control2);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+ control2);
/* Disable parallel detection of HiG */
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
- MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
- MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
}
}
static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
struct link_params *params,
- struct link_vars *vars,
- u8 enable_cl73)
+ struct link_vars *vars,
+ u8 enable_cl73)
{
struct bnx2x *bp = params->bp;
u16 reg_val;
/* CL37 Autoneg */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
/* CL37 Autoneg Enabled */
if (vars->line_speed == SPEED_AUTO_NEG)
@@ -1370,15 +1924,15 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
/* Enable/Disable Autodetection */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
@@ -1387,14 +1941,14 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
else
reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
/* Enable TetonII and BAM autoneg */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_BAM_NEXT_PAGE,
- MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_BAM_NEXT_PAGE,
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
&reg_val);
if (vars->line_speed == SPEED_AUTO_NEG) {
/* Enable BAM aneg Mode and TetonII aneg Mode */
@@ -1405,20 +1959,20 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
}
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_BAM_NEXT_PAGE,
- MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
- reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_BAM_NEXT_PAGE,
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+ reg_val);
if (enable_cl73) {
/* Enable Cl73 FSM status bits */
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_USERB0,
- MDIO_CL73_USERB0_CL73_UCTRL,
- 0xe);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_USERB0,
+ MDIO_CL73_USERB0_CL73_UCTRL,
+ 0xe);
/* Enable BAM Station Manager*/
- CL45_WR_OVER_CL22(bp, phy,
+ CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_CL73_USERB0,
MDIO_CL73_USERB0_CL73_BAM_CTRL1,
MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
@@ -1426,10 +1980,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
/* Advertise CL73 link speeds */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV2,
- &reg_val);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV2,
+ &reg_val);
if (phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
@@ -1437,10 +1991,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV2,
- reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV2,
+ reg_val);
/* CL73 Autoneg Enabled */
reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
@@ -1448,37 +2002,39 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
} else /* CL73 Autoneg Disabled */
reg_val = 0;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB0,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
}
/* program SerDes, forced speed */
static void bnx2x_program_serdes(struct bnx2x_phy *phy,
struct link_params *params,
- struct link_vars *vars)
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 reg_val;
/* program duplex, disable autoneg and sgmii*/
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
if (phy->req_duplex == DUPLEX_FULL)
reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
-
- /* program speed
- - needed only if the speed is greater than 1G (2.5G or 10G) */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_MISC1, &reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+
+ /*
+ * program speed
+ * - needed only if the speed is greater than 1G (2.5G or 10G)
+ */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_MISC1, &reg_val);
/* clearing the speed value before setting the right speed */
DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
@@ -1499,9 +2055,9 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
}
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_MISC1, reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_MISC1, reg_val);
}
@@ -1518,13 +2074,13 @@ static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy,
val |= MDIO_OVER_1G_UP1_2_5G;
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
val |= MDIO_OVER_1G_UP1_10G;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_OVER_1G,
- MDIO_OVER_1G_UP1, val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_UP1, val);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_OVER_1G,
- MDIO_OVER_1G_UP3, 0x400);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_UP3, 0x400);
}
static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
@@ -1532,22 +2088,21 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
*ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
- /* resolve pause mode and advertisement
- * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
+ /*
+ * Resolve pause mode and advertisement.
+ * Please refer to Table 28B-3 of the 802.3ab-1999 spec
+ */
switch (phy->req_flow_ctrl) {
case BNX2X_FLOW_CTRL_AUTO:
- if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) {
- *ieee_fc |=
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
- } else {
+ if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ else
*ieee_fc |=
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
- }
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
break;
case BNX2X_FLOW_CTRL_TX:
- *ieee_fc |=
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
break;
case BNX2X_FLOW_CTRL_RX:
@@ -1565,23 +2120,23 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy,
struct link_params *params,
- u16 ieee_fc)
+ u16 ieee_fc)
{
struct bnx2x *bp = params->bp;
u16 val;
/* for AN, we are always publishing full duplex */
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV1, &val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1, &val);
val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV1, val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1, val);
}
static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
@@ -1595,67 +2150,67 @@ static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
/* Enable and restart BAM/CL37 aneg */
if (enable_cl73) {
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB0,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
- &mii_control);
-
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB0,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
- (mii_control |
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ &mii_control);
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ (mii_control |
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
} else {
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- &mii_control);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
DP(NETIF_MSG_LINK,
"bnx2x_restart_autoneg mii_control before = 0x%x\n",
mii_control);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- (mii_control |
- MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
- MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ (mii_control |
+ MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+ MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
}
}
static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
struct link_params *params,
- struct link_vars *vars)
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 control1;
/* in SGMII mode, the unicore is always slave */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
- &control1);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+ &control1);
control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
/* set sgmii mode (and not fiber) */
control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
- control1);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+ control1);
/* if forced speed */
if (!(vars->line_speed == SPEED_AUTO_NEG)) {
/* set speed, disable autoneg */
u16 mii_control;
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- &mii_control);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
@@ -1683,10 +2238,10 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
if (phy->req_duplex == DUPLEX_FULL)
mii_control |=
MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- mii_control);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ mii_control);
} else { /* AN mode */
/* enable and restart AN */
@@ -1701,19 +2256,19 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
{ /* LD LP */
- switch (pause_result) { /* ASYM P ASYM P */
- case 0xb: /* 1 0 1 1 */
+ switch (pause_result) { /* ASYM P ASYM P */
+ case 0xb: /* 1 0 1 1 */
vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
break;
- case 0xe: /* 1 1 1 0 */
+ case 0xe: /* 1 1 1 0 */
vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
break;
- case 0x5: /* 0 1 0 1 */
- case 0x7: /* 0 1 1 1 */
- case 0xd: /* 1 1 0 1 */
- case 0xf: /* 1 1 1 1 */
+ case 0x5: /* 0 1 0 1 */
+ case 0x7: /* 0 1 1 1 */
+ case 0xd: /* 1 1 0 1 */
+ case 0xf: /* 1 1 1 1 */
vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
break;
@@ -1733,24 +2288,24 @@ static u8 bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
u16 pd_10g, status2_1000x;
if (phy->req_line_speed != SPEED_AUTO_NEG)
return 0;
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
- &status2_1000x);
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
- &status2_1000x);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+ &status2_1000x);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+ &status2_1000x);
if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
params->port);
return 1;
}
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_10G_PARALLEL_DETECT,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
- &pd_10g);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
+ &pd_10g);
if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
@@ -1789,14 +2344,14 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
(MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV1,
- &ld_pause);
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_LP_ADV1,
- &lp_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1,
+ &ld_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_LP_ADV1,
+ &lp_pause);
pause_result = (ld_pause &
MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
>> 8;
@@ -1806,18 +2361,18 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
pause_result);
} else {
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
- &ld_pause);
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
- &lp_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
+ &ld_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
+ &lp_pause);
pause_result = (ld_pause &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
pause_result |= (lp_pause &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
pause_result);
}
@@ -1833,25 +2388,25 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
u16 rx_status, ustat_val, cl37_fsm_recieved;
DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
/* Step 1: Make sure signal is detected */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_RX0,
- MDIO_RX0_RX_STATUS,
- &rx_status);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_RX0,
+ MDIO_RX0_RX_STATUS,
+ &rx_status);
if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
(MDIO_RX0_RX_STATUS_SIGDET)) {
DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
"rx_status(0x80b0) = 0x%x\n", rx_status);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB0,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
return;
}
/* Step 2: Check CL73 state machine */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_USERB0,
- MDIO_CL73_USERB0_CL73_USTAT1,
- &ustat_val);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_USERB0,
+ MDIO_CL73_USERB0_CL73_USTAT1,
+ &ustat_val);
if ((ustat_val &
(MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
@@ -1861,12 +2416,14 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
"ustat_val(0x8371) = 0x%x\n", ustat_val);
return;
}
- /* Step 3: Check CL37 Message Pages received to indicate LP
- supports only CL37 */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_REMOTE_PHY,
- MDIO_REMOTE_PHY_MISC_RX_STATUS,
- &cl37_fsm_recieved);
+ /*
+ * Step 3: Check CL37 Message Pages received to indicate LP
+ * supports only CL37
+ */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_REMOTE_PHY,
+ MDIO_REMOTE_PHY_MISC_RX_STATUS,
+ &cl37_fsm_recieved);
if ((cl37_fsm_recieved &
(MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
@@ -1877,14 +2434,18 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
cl37_fsm_recieved);
return;
}
- /* The combined cl37/cl73 fsm state information indicating that we are
- connected to a device which does not support cl73, but does support
- cl37 BAM. In this case we disable cl73 and restart cl37 auto-neg */
+ /*
+ * The combined cl37/cl73 fsm state information indicating that
+ * we are connected to a device which does not support cl73, but
+ * does support cl37 BAM. In this case we disable cl73 and
+ * restart cl37 auto-neg
+ */
+
/* Disable CL73 */
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB0,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
- 0);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ 0);
/* Restart CL37 autoneg */
bnx2x_restart_autoneg(phy, params, 0);
DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
@@ -1909,14 +2470,14 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u16 new_line_speed , gp_status;
+ u16 new_line_speed, gp_status;
u8 rc = 0;
/* Read gp_status */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_GP_STATUS,
- MDIO_GP_STATUS_TOP_AN_STATUS1,
- &gp_status);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
if (phy->req_line_speed == SPEED_AUTO_NEG)
vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
@@ -2053,9 +2614,9 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
u16 bank;
/* read precomp */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_OVER_1G,
- MDIO_OVER_1G_LP_UP2, &lp_up2);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_LP_UP2, &lp_up2);
/* bits [10:7] at lp_up2, positioned at [15:12] */
lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
@@ -2067,18 +2628,18 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
- CL45_RD_OVER_CL22(bp, phy,
- bank,
- MDIO_TX0_TX_DRIVER, &tx_driver);
+ CL22_RD_OVER_CL45(bp, phy,
+ bank,
+ MDIO_TX0_TX_DRIVER, &tx_driver);
/* replace tx_driver bits [15:12] */
if (lp_up2 !=
(tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
tx_driver |= lp_up2;
- CL45_WR_OVER_CL22(bp, phy,
- bank,
- MDIO_TX0_TX_DRIVER, tx_driver);
+ CL22_WR_OVER_CL45(bp, phy,
+ bank,
+ MDIO_TX0_TX_DRIVER, tx_driver);
}
}
}
@@ -2092,10 +2653,10 @@ static u8 bnx2x_emac_program(struct link_params *params,
DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
- EMAC_REG_EMAC_MODE,
- (EMAC_MODE_25G_MODE |
- EMAC_MODE_PORT_MII_10M |
- EMAC_MODE_HALF_DUPLEX));
+ EMAC_REG_EMAC_MODE,
+ (EMAC_MODE_25G_MODE |
+ EMAC_MODE_PORT_MII_10M |
+ EMAC_MODE_HALF_DUPLEX));
switch (vars->line_speed) {
case SPEED_10:
mode |= EMAC_MODE_PORT_MII_10M;
@@ -2123,8 +2684,8 @@ static u8 bnx2x_emac_program(struct link_params *params,
if (vars->duplex == DUPLEX_HALF)
mode |= EMAC_MODE_HALF_DUPLEX;
bnx2x_bits_en(bp,
- GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
- mode);
+ GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
+ mode);
bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
return 0;
@@ -2139,7 +2700,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
- CL45_WR_OVER_CL22(bp, phy,
+ CL22_WR_OVER_CL45(bp, phy,
bank,
MDIO_RX0_RX_EQ_BOOST,
phy->rx_preemphasis[i]);
@@ -2147,7 +2708,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
- CL45_WR_OVER_CL22(bp, phy,
+ CL22_WR_OVER_CL45(bp, phy,
bank,
MDIO_TX0_TX_DRIVER,
phy->tx_preemphasis[i]);
@@ -2170,7 +2731,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
/* forced speed requested? */
if (vars->line_speed != SPEED_AUTO_NEG ||
(SINGLE_MEDIA_DIRECT(params) &&
- params->loopback_mode == LOOPBACK_EXT)) {
+ params->loopback_mode == LOOPBACK_EXT)) {
DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
/* disable autoneg */
@@ -2187,7 +2748,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
/* program duplex & pause advertisement (for aneg) */
bnx2x_set_ieee_aneg_advertisment(phy, params,
- vars->ieee_fc);
+ vars->ieee_fc);
/* enable autoneg */
bnx2x_set_autoneg(phy, params, vars, enable_cl73);
@@ -2258,7 +2819,8 @@ static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
}
static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
- struct bnx2x_phy *phy)
+ struct bnx2x_phy *phy,
+ struct link_params *params)
{
u16 cnt, ctrl;
/* Wait for soft reset to get cleared upto 1 sec */
@@ -2269,6 +2831,11 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
break;
msleep(1);
}
+
+ if (cnt == 1000)
+ netdev_err(bp->dev, "Warning: PHY was not initialized,"
+ " Port %d\n",
+ params->port);
DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
return cnt;
}
@@ -2279,9 +2846,7 @@ static void bnx2x_link_int_enable(struct link_params *params)
u32 mask;
struct bnx2x *bp = params->bp;
- /* setting the status to report on link up
- for either XGXS or SerDes */
-
+ /* Setting the status to report on link up for either XGXS or SerDes */
if (params->switch_cfg == SWITCH_CFG_10G) {
mask = (NIG_MASK_XGXS0_LINK10G |
NIG_MASK_XGXS0_LINK_STATUS);
@@ -2324,7 +2889,7 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
{
u32 latch_status = 0;
- /**
+ /*
* Disable the MI INT ( external phy int ) by writing 1 to the
* status register. Link down indication is high-active-signal,
* so in this case we need to write the status to clear the XOR
@@ -2349,27 +2914,30 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
/* For all latched-signal=up : Re-Arm Latch signals */
REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
- (latch_status & 0xfffe) | (latch_status & 1));
+ (latch_status & 0xfffe) | (latch_status & 1));
}
/* For all latched-signal=up,Write original_signal to status */
}
static void bnx2x_link_int_ack(struct link_params *params,
- struct link_vars *vars, u8 is_10g)
+ struct link_vars *vars, u8 is_10g)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
- /* first reset all status
- * we assume only one line will be change at a time */
+ /*
+ * First reset all status we assume only one line will be
+ * change at a time
+ */
bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
- (NIG_STATUS_XGXS0_LINK10G |
- NIG_STATUS_XGXS0_LINK_STATUS |
- NIG_STATUS_SERDES0_LINK_STATUS));
+ (NIG_STATUS_XGXS0_LINK10G |
+ NIG_STATUS_XGXS0_LINK_STATUS |
+ NIG_STATUS_SERDES0_LINK_STATUS));
if (vars->phy_link_up) {
if (is_10g) {
- /* Disable the 10G link interrupt
- * by writing 1 to the status register
+ /*
+ * Disable the 10G link interrupt by writing 1 to the
+ * status register
*/
DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
bnx2x_bits_en(bp,
@@ -2377,9 +2945,9 @@ static void bnx2x_link_int_ack(struct link_params *params,
NIG_STATUS_XGXS0_LINK10G);
} else if (params->switch_cfg == SWITCH_CFG_10G) {
- /* Disable the link interrupt
- * by writing 1 to the relevant lane
- * in the status register
+ /*
+ * Disable the link interrupt by writing 1 to the
+ * relevant lane in the status register
*/
u32 ser_lane = ((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
@@ -2394,8 +2962,9 @@ static void bnx2x_link_int_ack(struct link_params *params,
} else { /* SerDes */
DP(NETIF_MSG_LINK, "SerDes phy link up\n");
- /* Disable the link interrupt
- * by writing 1 to the status register
+ /*
+ * Disable the link interrupt by writing 1 to the status
+ * register
*/
bnx2x_bits_en(bp,
NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
@@ -2475,8 +3044,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
}
if ((params->num_phys == MAX_PHYS) &&
(params->phy[EXT_PHY2].ver_addr != 0)) {
- spirom_ver = REG_RD(bp,
- params->phy[EXT_PHY2].ver_addr);
+ spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr);
if (params->phy[EXT_PHY2].format_fw_ver) {
*ver_p = '/';
ver_p++;
@@ -2505,29 +3073,27 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
/* change the uni_phy_addr in the nig */
md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
- port*0x18));
+ port*0x18));
REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
bnx2x_cl45_write(bp, phy,
- 5,
- (MDIO_REG_BANK_AER_BLOCK +
- (MDIO_AER_BLOCK_AER_REG & 0xf)),
- 0x2800);
+ 5,
+ (MDIO_REG_BANK_AER_BLOCK +
+ (MDIO_AER_BLOCK_AER_REG & 0xf)),
+ 0x2800);
bnx2x_cl45_write(bp, phy,
- 5,
- (MDIO_REG_BANK_CL73_IEEEB0 +
- (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
- 0x6041);
+ 5,
+ (MDIO_REG_BANK_CL73_IEEEB0 +
+ (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
+ 0x6041);
msleep(200);
/* set aer mmd back */
bnx2x_set_aer_mmd_xgxs(params, phy);
/* and md_devad */
- REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
- md_devad);
-
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
} else {
u16 mii_ctrl;
DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
@@ -2568,56 +3134,71 @@ u8 bnx2x_set_led(struct link_params *params,
case LED_MODE_OFF:
REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
- SHARED_HW_CFG_LED_MAC1);
+ SHARED_HW_CFG_LED_MAC1);
tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
break;
case LED_MODE_OPER:
- /**
+ /*
* For all other phys, OPER mode is same as ON, so in case
* link is down, do nothing
- **/
+ */
if (!vars->link_up)
break;
case LED_MODE_ON:
- if (SINGLE_MEDIA_DIRECT(params)) {
- /**
- * This is a work-around for HW issue found when link
- * is up in CL73
- */
+ if (params->phy[EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 &&
+ CHIP_IS_E2(bp) && params->num_phys == 2) {
+ /*
+ * This is a work-around for E2+8727 Configurations
+ */
+ if (mode == LED_MODE_ON ||
+ speed == SPEED_10000){
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+ REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+
+ tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+ EMAC_WR(bp, EMAC_REG_EMAC_LED,
+ (tmp | EMAC_LED_OVERRIDE));
+ return rc;
+ }
+ } else if (SINGLE_MEDIA_DIRECT(params)) {
+ /*
+ * This is a work-around for HW issue found when link
+ * is up in CL73
+ */
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
} else {
- REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
- hw_led_mode);
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
}
- REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 +
- port*4, 0);
+ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
/* Set blinking rate to ~15.9Hz */
REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
- LED_BLINK_RATE_VAL);
+ LED_BLINK_RATE_VAL);
REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
- port*4, 1);
+ port*4, 1);
tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
- EMAC_WR(bp, EMAC_REG_EMAC_LED,
- (tmp & (~EMAC_LED_OVERRIDE)));
+ EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp & (~EMAC_LED_OVERRIDE)));
if (CHIP_IS_E1(bp) &&
((speed == SPEED_2500) ||
(speed == SPEED_1000) ||
(speed == SPEED_100) ||
(speed == SPEED_10))) {
- /* On Everest 1 Ax chip versions for speeds less than
- 10G LED scheme is different */
+ /*
+ * On Everest 1 Ax chip versions for speeds less than
+ * 10G LED scheme is different
+ */
REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
- + port*4, 1);
+ + port*4, 1);
REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
- port*4, 0);
+ port*4, 0);
REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
- port*4, 1);
+ port*4, 1);
}
break;
@@ -2631,7 +3212,7 @@ u8 bnx2x_set_led(struct link_params *params,
}
-/**
+/*
* This function comes to reflect the actual link state read DIRECTLY from the
* HW
*/
@@ -2643,10 +3224,10 @@ u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars,
u8 ext_phy_link_up = 0, serdes_phy_type;
struct link_vars temp_vars;
- CL45_RD_OVER_CL22(bp, &params->phy[INT_PHY],
- MDIO_REG_BANK_GP_STATUS,
- MDIO_GP_STATUS_TOP_AN_STATUS1,
- &gp_status);
+ CL22_RD_OVER_CL45(bp, &params->phy[INT_PHY],
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
/* link is up only if both local phy and external phy are up */
if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
return -ESRCH;
@@ -2690,15 +3271,15 @@ static u8 bnx2x_link_initialize(struct link_params *params,
u8 rc = 0;
u8 phy_index, non_ext_phy;
struct bnx2x *bp = params->bp;
- /**
- * In case of external phy existence, the line speed would be the
- * line speed linked up by the external phy. In case it is direct
- * only, then the line_speed during initialization will be
- * equal to the req_line_speed
- */
+ /*
+ * In case of external phy existence, the line speed would be the
+ * line speed linked up by the external phy. In case it is direct
+ * only, then the line_speed during initialization will be
+ * equal to the req_line_speed
+ */
vars->line_speed = params->phy[INT_PHY].req_line_speed;
- /**
+ /*
* Initialize the internal phy in case this is a direct board
* (no external phys), or this board has external phy which requires
* to first.
@@ -2726,17 +3307,16 @@ static u8 bnx2x_link_initialize(struct link_params *params,
if (!non_ext_phy)
for (phy_index = EXT_PHY1; phy_index < params->num_phys;
phy_index++) {
- /**
+ /*
* No need to initialize second phy in case of first
* phy only selection. In case of second phy, we do
* need to initialize the first phy, since they are
* connected.
- **/
+ */
if (phy_index == EXT_PHY2 &&
(bnx2x_phy_selection(params) ==
PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
- DP(NETIF_MSG_LINK, "Not initializing"
- "second phy\n");
+ DP(NETIF_MSG_LINK, "Ignoring second phy\n");
continue;
}
params->phy[phy_index].config_init(
@@ -2758,9 +3338,8 @@ static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
/* reset the SerDes/XGXS */
- REG_WR(params->bp, GRCBASE_MISC +
- MISC_REGISTERS_RESET_REG_3_CLEAR,
- (0x1ff << (params->port*16)));
+ REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
+ (0x1ff << (params->port*16)));
}
static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
@@ -2774,11 +3353,11 @@ static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
else
gpio_port = params->port;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
DP(NETIF_MSG_LINK, "reset external PHY\n");
}
@@ -2809,9 +3388,8 @@ static u8 bnx2x_update_link_down(struct link_params *params,
/* reset BigMac */
bnx2x_bmac_rx_disable(bp, params->port);
- REG_WR(bp, GRCBASE_MISC +
- MISC_REGISTERS_RESET_REG_2_CLEAR,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
return 0;
}
@@ -2862,7 +3440,7 @@ static u8 bnx2x_update_link_up(struct link_params *params,
msleep(20);
return rc;
}
-/**
+/*
* The bnx2x_link_update function should be called upon link
* interrupt.
* Link is considered up as follows:
@@ -2901,12 +3479,11 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
- port*0x18) > 0);
+ port*0x18) > 0);
DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
is_mi_int,
- REG_RD(bp,
- NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
+ REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
@@ -2915,14 +3492,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
/* disable emac */
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
- /**
- * Step 1:
- * Check external link change only for external phys, and apply
- * priority selection between them in case the link on both phys
- * is up. Note that the instead of the common vars, a temporary
- * vars argument is used since each phy may have different link/
- * speed/duplex result
- */
+ /*
+ * Step 1:
+ * Check external link change only for external phys, and apply
+ * priority selection between them in case the link on both phys
+ * is up. Note that the instead of the common vars, a temporary
+ * vars argument is used since each phy may have different link/
+ * speed/duplex result
+ */
for (phy_index = EXT_PHY1; phy_index < params->num_phys;
phy_index++) {
struct bnx2x_phy *phy = &params->phy[phy_index];
@@ -2947,22 +3524,22 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
switch (bnx2x_phy_selection(params)) {
case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
- /**
+ /*
* In this option, the first PHY makes sure to pass the
* traffic through itself only.
* Its not clear how to reset the link on the second phy
- **/
+ */
active_external_phy = EXT_PHY1;
break;
case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
- /**
+ /*
* In this option, the first PHY makes sure to pass the
* traffic through the second PHY.
- **/
+ */
active_external_phy = EXT_PHY2;
break;
default:
- /**
+ /*
* Link indication on both PHYs with the following cases
* is invalid:
* - FIRST_PHY means that second phy wasn't initialized,
@@ -2970,7 +3547,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
* - SECOND_PHY means that first phy should not be able
* to link up by itself (using configuration)
* - DEFAULT should be overriden during initialiazation
- **/
+ */
DP(NETIF_MSG_LINK, "Invalid link indication"
"mpc=0x%x. DISABLING LINK !!!\n",
params->multi_phy_config);
@@ -2980,18 +3557,18 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
}
}
prev_line_speed = vars->line_speed;
- /**
- * Step 2:
- * Read the status of the internal phy. In case of
- * DIRECT_SINGLE_MEDIA board, this link is the external link,
- * otherwise this is the link between the 577xx and the first
- * external phy
- */
+ /*
+ * Step 2:
+ * Read the status of the internal phy. In case of
+ * DIRECT_SINGLE_MEDIA board, this link is the external link,
+ * otherwise this is the link between the 577xx and the first
+ * external phy
+ */
if (params->phy[INT_PHY].read_status)
params->phy[INT_PHY].read_status(
&params->phy[INT_PHY],
params, vars);
- /**
+ /*
* The INT_PHY flow control reside in the vars. This include the
* case where the speed or flow control are not set to AUTO.
* Otherwise, the active external phy flow control result is set
@@ -3001,13 +3578,13 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
*/
if (active_external_phy > INT_PHY) {
vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
- /**
+ /*
* Link speed is taken from the XGXS. AN and FC result from
* the external phy.
*/
vars->link_status |= phy_vars[active_external_phy].link_status;
- /**
+ /*
* if active_external_phy is first PHY and link is up - disable
* disable TX on second external PHY
*/
@@ -3043,7 +3620,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
" ext_phy_line_speed = %d\n", vars->flow_ctrl,
vars->link_status, ext_phy_line_speed);
- /**
+ /*
* Upon link speed change set the NIG into drain mode. Comes to
* deals with possible FIFO glitch due to clk change when speed
* is decreased without link down indicator
@@ -3058,8 +3635,8 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
ext_phy_line_speed);
vars->phy_link_up = 0;
} else if (prev_line_speed != vars->line_speed) {
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
- + params->port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
+ 0);
msleep(1);
}
}
@@ -3074,14 +3651,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
bnx2x_link_int_ack(params, vars, link_10g);
- /**
- * In case external phy link is up, and internal link is down
- * (not initialized yet probably after link initialization, it
- * needs to be initialized.
- * Note that after link down-up as result of cable plug, the xgxs
- * link would probably become up again without the need
- * initialize it
- */
+ /*
+ * In case external phy link is up, and internal link is down
+ * (not initialized yet probably after link initialization, it
+ * needs to be initialized.
+ * Note that after link down-up as result of cable plug, the xgxs
+ * link would probably become up again without the need
+ * initialize it
+ */
if (!(SINGLE_MEDIA_DIRECT(params))) {
DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
" init_preceding = %d\n", ext_phy_link_up,
@@ -3101,9 +3678,9 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
vars);
}
}
- /**
- * Link is up only if both local phy and external phy (in case of
- * non-direct board) are up
+ /*
+ * Link is up only if both local phy and external phy (in case of
+ * non-direct board) are up
*/
vars->link_up = (vars->phy_link_up &&
(ext_phy_link_up ||
@@ -3124,10 +3701,10 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
{
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
msleep(1);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
}
static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
@@ -3147,9 +3724,9 @@ static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
u16 fw_ver1, fw_ver2;
bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+ MDIO_PMA_REG_ROM_VER1, &fw_ver1);
bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2, &fw_ver2);
+ MDIO_PMA_REG_ROM_VER2, &fw_ver2);
bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
phy->ver_addr);
}
@@ -3170,7 +3747,7 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
if ((vars->ieee_fc &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
- val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+ val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
}
if ((vars->ieee_fc &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
@@ -3201,11 +3778,11 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
ret = 1;
bnx2x_cl45_read(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_ADV_PAUSE, &ld_pause);
bnx2x_cl45_read(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
pause_result = (ld_pause &
MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
pause_result |= (lp_pause &
@@ -3270,90 +3847,82 @@ static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
pause_result);
}
}
-
-static void bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
+static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
struct bnx2x_phy *phy,
u8 port)
{
+ u32 count = 0;
+ u16 fw_ver1, fw_msgout;
+ u8 rc = 0;
+
/* Boot port from external ROM */
/* EDC grst */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- 0x0001);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ 0x0001);
/* ucode reboot and rst */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- 0x008c);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ 0x008c);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0001);
/* Reset internal microprocessor */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
/* Release srst bit */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
- /* wait for 120ms for code download via SPI port */
- msleep(120);
+ /* Delay 100ms per the PHY specifications */
+ msleep(100);
- /* Clear ser_boot_ctl bit */
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0000);
- bnx2x_save_bcm_spirom_ver(bp, phy, port);
-}
+ /* 8073 sometimes taking longer to download */
+ do {
+ count++;
+ if (count > 300) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_8073_8727_external_rom_boot port %x:"
+ "Download failed. fw version = 0x%x\n",
+ port, fw_ver1);
+ rc = -EINVAL;
+ break;
+ }
-static void bnx2x_8073_set_xaui_low_power_mode(struct bnx2x *bp,
- struct bnx2x_phy *phy)
-{
- u16 val;
- bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
- if (val == 0) {
- /* Mustn't set low power mode in 8073 A0 */
- return;
- }
+ msleep(1);
+ } while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
+ ((fw_msgout & 0xff) != 0x03 && (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
- /* Disable PLL sequencer (use read-modify-write to clear bit 13) */
- bnx2x_cl45_read(bp, phy,
- MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
- val &= ~(1<<13);
+ /* Clear ser_boot_ctl bit */
bnx2x_cl45_write(bp, phy,
- MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
-
- /* PLL controls */
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805E, 0x1077);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805D, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805C, 0x030B);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805B, 0x1240);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805A, 0x2490);
-
- /* Tx Controls */
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A7, 0x0C74);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A6, 0x9041);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A5, 0x4640);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+ bnx2x_save_bcm_spirom_ver(bp, phy, port);
- /* Rx Controls */
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FE, 0x01C4);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FD, 0x9249);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FC, 0x2015);
+ DP(NETIF_MSG_LINK,
+ "bnx2x_8073_8727_external_rom_boot port %x:"
+ "Download complete. fw version = 0x%x\n",
+ port, fw_ver1);
- /* Enable PLL sequencer (use read-modify-write to set bit 13) */
- bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
- val |= (1<<13);
- bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
+ return rc;
}
/******************************************************************/
@@ -3366,8 +3935,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
/* Read 8073 HW revision*/
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8073_CHIP_REV, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_CHIP_REV, &val);
if (val != 1) {
/* No need to workaround in 8073 A1 */
@@ -3375,8 +3944,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
}
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2, &val);
/* SNR should be applied only for version 0x102 */
if (val != 0x102)
@@ -3390,8 +3959,8 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
u16 val, cnt, cnt1 ;
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8073_CHIP_REV, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_CHIP_REV, &val);
if (val > 0) {
/* No need to workaround in 8073 A1 */
@@ -3399,26 +3968,32 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
}
/* XAUI workaround in 8073 A0: */
- /* After loading the boot ROM and restarting Autoneg,
- poll Dev1, Reg $C820: */
+ /*
+ * After loading the boot ROM and restarting Autoneg, poll
+ * Dev1, Reg $C820:
+ */
for (cnt = 0; cnt < 1000; cnt++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
- &val);
- /* If bit [14] = 0 or bit [13] = 0, continue on with
- system initialization (XAUI work-around not required,
- as these bits indicate 2.5G or 1G link up). */
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
+ &val);
+ /*
+ * If bit [14] = 0 or bit [13] = 0, continue on with
+ * system initialization (XAUI work-around not required, as
+ * these bits indicate 2.5G or 1G link up).
+ */
if (!(val & (1<<14)) || !(val & (1<<13))) {
DP(NETIF_MSG_LINK, "XAUI work-around not required\n");
return 0;
} else if (!(val & (1<<15))) {
- DP(NETIF_MSG_LINK, "clc bit 15 went off\n");
- /* If bit 15 is 0, then poll Dev1, Reg $C841 until
- it's MSB (bit 15) goes to 1 (indicating that the
- XAUI workaround has completed),
- then continue on with system initialization.*/
+ DP(NETIF_MSG_LINK, "bit 15 went off\n");
+ /*
+ * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
+ * MSB (bit15) goes to 1 (indicating that the XAUI
+ * workaround has completed), then continue on with
+ * system initialization.
+ */
for (cnt1 = 0; cnt1 < 1000; cnt1++) {
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
@@ -3501,10 +4076,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
gpio_port = params->port;
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
/* enable LASI */
bnx2x_cl45_write(bp, phy,
@@ -3514,8 +4089,6 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
bnx2x_8073_set_pause_cl37(params, phy, vars);
- bnx2x_8073_set_xaui_low_power_mode(bp, phy);
-
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
@@ -3524,6 +4097,21 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
+ /* Swap polarity if required - Must be done only in non-1G mode */
+ if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+ /* Configure the 8073 to swap _P and _N of the KR lines */
+ DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n");
+ /* 10G Rx/Tx and 1G Tx signal polarity swap */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL,
+ (val | (3<<9)));
+ }
+
+
/* Enable CL37 BAM */
if (REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
@@ -3551,8 +4139,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
val = (1<<7);
} else if (phy->req_line_speed == SPEED_2500) {
val = (1<<5);
- /* Note that 2.5G works only
- when used with 1G advertisment */
+ /*
+ * Note that 2.5G works only when used with 1G
+ * advertisment
+ */
} else
val = (1<<5);
} else {
@@ -3561,8 +4151,7 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
val |= (1<<7);
- /* Note that 2.5G works only when
- used with 1G advertisment */
+ /* Note that 2.5G works only when used with 1G advertisment */
if (phy->speed_cap_mask &
(PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
@@ -3602,9 +4191,11 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
/* Add support for CL37 (passive mode) III */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
- /* The SNR will improve about 2db by changing
- BW and FEE main tap. Rest commands are executed
- after link is up*/
+ /*
+ * The SNR will improve about 2db by changing BW and FEE main
+ * tap. Rest commands are executed after link is up
+ * Change FFE main cursor to 5 in EDC register
+ */
if (bnx2x_8073_is_snr_needed(bp, phy))
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
@@ -3688,12 +4279,11 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
- /* The SNR will improve about 2dbby
- changing the BW and FEE main tap.*/
- /* The 1st write to change FFE main
- tap is set before restart AN */
- /* Change PLL Bandwidth in EDC
- register */
+ /*
+ * The SNR will improve about 2dbby changing the BW and FEE main
+ * tap. The 1st write to change FFE main tap is set before
+ * restart AN. Change PLL Bandwidth in EDC register
+ */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
0x26BC);
@@ -3730,8 +4320,32 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
}
if (link_up) {
+ /* Swap polarity if required */
+ if (params->lane_config &
+ PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+ /* Configure the 8073 to swap P and N of the KR lines */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_XS_DEVAD,
+ MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
+ /*
+ * Set bit 3 to invert Rx in 1G mode and clear this bit
+ * when it`s in 10G mode.
+ */
+ if (vars->line_speed == SPEED_1000) {
+ DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
+ "the 8073\n");
+ val1 |= (1<<3);
+ } else
+ val1 &= ~(1<<3);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_XS_DEVAD,
+ MDIO_XS_REG_8073_RX_CTRL_PCIE,
+ val1);
+ }
bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
bnx2x_8073_resolve_fc(phy, params, vars);
+ vars->duplex = DUPLEX_FULL;
}
return link_up;
}
@@ -3748,8 +4362,8 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
gpio_port);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
}
/******************************************************************/
@@ -3763,11 +4377,11 @@ static u8 bnx2x_8705_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "init 8705\n");
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
bnx2x_ext_phy_hw_reset(bp, params->port);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
@@ -3818,35 +4432,79 @@ static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
/******************************************************************/
/* SFP+ module Section */
/******************************************************************/
-static void bnx2x_sfp_set_transmitter(struct bnx2x *bp,
+static u8 bnx2x_get_gpio_port(struct link_params *params)
+{
+ u8 gpio_port;
+ u32 swap_val, swap_override;
+ struct bnx2x *bp = params->bp;
+ if (CHIP_IS_E2(bp))
+ gpio_port = BP_PATH(bp);
+ else
+ gpio_port = params->port;
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ return gpio_port ^ (swap_val && swap_override);
+}
+static void bnx2x_sfp_set_transmitter(struct link_params *params,
struct bnx2x_phy *phy,
- u8 port,
u8 tx_en)
{
u16 val;
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+ u32 tx_en_mode;
- DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x\n",
- tx_en, port);
/* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
- bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- &val);
+ tx_en_mode = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].sfp_ctrl)) &
+ PORT_HW_CFG_TX_LASER_MASK;
+ DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x "
+ "mode = %x\n", tx_en, port, tx_en_mode);
+ switch (tx_en_mode) {
+ case PORT_HW_CFG_TX_LASER_MDIO:
- if (tx_en)
- val &= ~(1<<15);
- else
- val |= (1<<15);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ &val);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- val);
+ if (tx_en)
+ val &= ~(1<<15);
+ else
+ val |= (1<<15);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ val);
+ break;
+ case PORT_HW_CFG_TX_LASER_GPIO0:
+ case PORT_HW_CFG_TX_LASER_GPIO1:
+ case PORT_HW_CFG_TX_LASER_GPIO2:
+ case PORT_HW_CFG_TX_LASER_GPIO3:
+ {
+ u16 gpio_pin;
+ u8 gpio_port, gpio_mode;
+ if (tx_en)
+ gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH;
+ else
+ gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW;
+
+ gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0;
+ gpio_port = bnx2x_get_gpio_port(params);
+ bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+ break;
+ }
+ default:
+ DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode);
+ break;
+ }
}
static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
- u16 addr, u8 byte_cnt, u8 *o_buf)
+ u16 addr, u8 byte_cnt, u8 *o_buf)
{
struct bnx2x *bp = params->bp;
u16 val = 0;
@@ -3859,23 +4517,23 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Set the read command byte count */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
- (byte_cnt | 0xa000));
+ (byte_cnt | 0xa000));
/* Set the read command address */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
- addr);
+ addr);
/* Activate read command */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
- 0x2c0f);
+ 0x2c0f);
/* Wait up to 500us for command complete status */
for (i = 0; i < 100; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
break;
@@ -3893,18 +4551,18 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Read the buffer */
for (i = 0; i < byte_cnt; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
}
for (i = 0; i < 100; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
- return 0;;
+ return 0;
msleep(1);
}
return -EINVAL;
@@ -3912,7 +4570,7 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
- u16 addr, u8 byte_cnt, u8 *o_buf)
+ u16 addr, u8 byte_cnt, u8 *o_buf)
{
struct bnx2x *bp = params->bp;
u16 val, i;
@@ -3925,41 +4583,43 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Need to read from 1.8000 to clear it */
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
- &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+ &val);
/* Set the read command byte count */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
- ((byte_cnt < 2) ? 2 : byte_cnt));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
+ ((byte_cnt < 2) ? 2 : byte_cnt));
/* Set the read command address */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
- addr);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
+ addr);
/* Set the destination address */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- 0x8004,
- MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
+ MDIO_PMA_DEVAD,
+ 0x8004,
+ MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
/* Activate read command */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
- 0x8002);
- /* Wait appropriate time for two-wire command to finish before
- polling the status register */
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+ 0x8002);
+ /*
+ * Wait appropriate time for two-wire command to finish before
+ * polling the status register
+ */
msleep(1);
/* Wait up to 500us for command complete status */
for (i = 0; i < 100; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
break;
@@ -3971,46 +4631,46 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK,
"Got bad status 0x%x when reading from SFP+ EEPROM\n",
(val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
- return -EINVAL;
+ return -EFAULT;
}
/* Read the buffer */
for (i = 0; i < byte_cnt; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
}
for (i = 0; i < 100; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
- return 0;;
+ return 0;
msleep(1);
}
return -EINVAL;
}
-static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf)
+u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u16 addr,
+ u8 byte_cnt, u8 *o_buf)
{
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf);
+ byte_cnt, o_buf);
else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf);
+ byte_cnt, o_buf);
return -EINVAL;
}
static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
struct link_params *params,
- u16 *edc_mode)
+ u16 *edc_mode)
{
struct bnx2x *bp = params->bp;
u8 val, check_limiting_mode = 0;
@@ -4031,8 +4691,10 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
{
u8 copper_module_type;
- /* Check if its active cable( includes SFP+ module)
- of passive cable*/
+ /*
+ * Check if its active cable (includes SFP+ module)
+ * of passive cable
+ */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
SFP_EEPROM_FC_TX_TECH_ADDR,
@@ -4091,8 +4753,10 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
return 0;
}
-/* This function read the relevant field from the module ( SFP+ ),
- and verify it is compliant with this board */
+/*
+ * This function read the relevant field from the module (SFP+), and verify it
+ * is compliant with this board
+ */
static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
struct link_params *params)
{
@@ -4141,24 +4805,24 @@ static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
/* format the warning message */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
- SFP_EEPROM_VENDOR_NAME_ADDR,
- SFP_EEPROM_VENDOR_NAME_SIZE,
- (u8 *)vendor_name))
+ SFP_EEPROM_VENDOR_NAME_ADDR,
+ SFP_EEPROM_VENDOR_NAME_SIZE,
+ (u8 *)vendor_name))
vendor_name[0] = '\0';
else
vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
if (bnx2x_read_sfp_module_eeprom(phy,
params,
- SFP_EEPROM_PART_NO_ADDR,
- SFP_EEPROM_PART_NO_SIZE,
- (u8 *)vendor_pn))
+ SFP_EEPROM_PART_NO_ADDR,
+ SFP_EEPROM_PART_NO_SIZE,
+ (u8 *)vendor_pn))
vendor_pn[0] = '\0';
else
vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
- netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected,"
- " Port %d from %s part number %s\n",
- params->port, vendor_name, vendor_pn);
+ netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected,"
+ " Port %d from %s part number %s\n",
+ params->port, vendor_name, vendor_pn);
phy->flags |= FLAGS_SFP_NOT_APPROVED;
return -EINVAL;
}
@@ -4170,8 +4834,11 @@ static u8 bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
u8 val;
struct bnx2x *bp = params->bp;
u16 timeout;
- /* Initialization time after hot-plug may take up to 300ms for some
- phys type ( e.g. JDSU ) */
+ /*
+ * Initialization time after hot-plug may take up to 300ms for
+ * some phys type ( e.g. JDSU )
+ */
+
for (timeout = 0; timeout < 60; timeout++) {
if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
== 0) {
@@ -4190,16 +4857,14 @@ static void bnx2x_8727_power_module(struct bnx2x *bp,
/* Make sure GPIOs are not using for LED mode */
u16 val;
/*
- * In the GPIO register, bit 4 is use to detemine if the GPIOs are
+ * In the GPIO register, bit 4 is use to determine if the GPIOs are
* operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
* output
* Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0
* Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1
* where the 1st bit is the over-current(only input), and 2nd bit is
* for power( only output )
- */
-
- /*
+ *
* In case of NOC feature is disabled and power is up, set GPIO control
* as input to enable listening of over-current indication
*/
@@ -4228,15 +4893,14 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
u16 cur_limiting_mode;
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2,
- &cur_limiting_mode);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ &cur_limiting_mode);
DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n",
cur_limiting_mode);
if (edc_mode == EDC_MODE_LIMITING) {
- DP(NETIF_MSG_LINK,
- "Setting LIMITING MODE\n");
+ DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n");
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER2,
@@ -4245,62 +4909,63 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
- /* Changing to LRM mode takes quite few seconds.
- So do it only if current mode is limiting
- ( default is LRM )*/
+ /*
+ * Changing to LRM mode takes quite few seconds. So do it only
+ * if current mode is limiting (default is LRM)
+ */
if (cur_limiting_mode != EDC_MODE_LIMITING)
return 0;
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LRM_MODE,
- 0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_LRM_MODE,
+ 0);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2,
- 0x128);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ 0x128);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL0,
- 0x4008);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL0,
+ 0x4008);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LRM_MODE,
- 0xaaaa);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_LRM_MODE,
+ 0xaaaa);
}
return 0;
}
static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
struct bnx2x_phy *phy,
- u16 edc_mode)
+ u16 edc_mode)
{
u16 phy_identifier;
u16 rom_ver2_val;
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- &phy_identifier);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ &phy_identifier);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- (phy_identifier & ~(1<<9)));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ (phy_identifier & ~(1<<9)));
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2,
- &rom_ver2_val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ &rom_ver2_val);
/* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2,
- (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- (phy_identifier | (1<<9)));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ (phy_identifier | (1<<9)));
return 0;
}
@@ -4313,11 +4978,11 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
switch (action) {
case DISABLE_TX:
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ bnx2x_sfp_set_transmitter(params, phy, 0);
break;
case ENABLE_TX:
if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 1);
+ bnx2x_sfp_set_transmitter(params, phy, 1);
break;
default:
DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
@@ -4326,6 +4991,38 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
}
}
+static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
+ u8 gpio_mode)
+{
+ struct bnx2x *bp = params->bp;
+
+ u32 fault_led_gpio = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].sfp_ctrl)) &
+ PORT_HW_CFG_FAULT_MODULE_LED_MASK;
+ switch (fault_led_gpio) {
+ case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED:
+ return;
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3:
+ {
+ u8 gpio_port = bnx2x_get_gpio_port(params);
+ u16 gpio_pin = fault_led_gpio -
+ PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
+ DP(NETIF_MSG_LINK, "Set fault module-detected led "
+ "pin %x port %x mode %x\n",
+ gpio_pin, gpio_port, gpio_mode);
+ bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+ }
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n",
+ fault_led_gpio);
+ }
+}
+
static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
struct link_params *params)
{
@@ -4343,15 +5040,14 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
return -EINVAL;
- } else if (bnx2x_verify_sfp_module(phy, params) !=
- 0) {
+ } else if (bnx2x_verify_sfp_module(phy, params) != 0) {
/* check SFP+ module compatibility */
DP(NETIF_MSG_LINK, "Module verification failed!!\n");
rc = -EINVAL;
/* Turn on fault module-detected led */
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
- MISC_REGISTERS_GPIO_HIGH,
- params->port);
+ bnx2x_set_sfp_module_fault_led(params,
+ MISC_REGISTERS_GPIO_HIGH);
+
if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) &&
((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) {
@@ -4362,18 +5058,17 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
}
} else {
/* Turn off fault module-detected led */
- DP(NETIF_MSG_LINK, "Turn off fault module-detected led\n");
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
- MISC_REGISTERS_GPIO_LOW,
- params->port);
+ bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
}
/* power up the SFP module */
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
bnx2x_8727_power_module(bp, phy, 1);
- /* Check and set limiting mode / LRM mode on 8726.
- On 8727 it is done automatically */
+ /*
+ * Check and set limiting mode / LRM mode on 8726. On 8727 it
+ * is done automatically
+ */
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
bnx2x_8726_set_limiting_mode(bp, phy, edc_mode);
else
@@ -4385,9 +5080,9 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
if (rc == 0 ||
(val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 1);
+ bnx2x_sfp_set_transmitter(params, phy, 1);
else
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ bnx2x_sfp_set_transmitter(params, phy, 0);
return rc;
}
@@ -4400,11 +5095,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
u8 port = params->port;
/* Set valid module led off */
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
- MISC_REGISTERS_GPIO_HIGH,
- params->port);
+ bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
- /* Get current gpio val refelecting module plugged in / out*/
+ /* Get current gpio val reflecting module plugged in / out*/
gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port);
/* Call the handling function in case module is detected */
@@ -4420,18 +5113,20 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
} else {
u32 val = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
- port_feature_config[params->port].
- config));
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].
+ config));
bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
port);
- /* Module was plugged out. */
- /* Disable transmit for this module */
+ /*
+ * Module was plugged out.
+ * Disable transmit for this module
+ */
if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ bnx2x_sfp_set_transmitter(params, phy, 0);
}
}
@@ -4467,9 +5162,9 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
" link_status 0x%x\n", rx_sd, pcs_status, val2);
- /* link is up if both bit 0 of pmd_rx_sd and
- * bit 0 of pcs_status are set, or if the autoneg bit
- * 1 is set
+ /*
+ * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
+ * are set, or if the autoneg bit 1 is set
*/
link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
if (link_up) {
@@ -4478,6 +5173,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
else
vars->line_speed = SPEED_10000;
bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ vars->duplex = DUPLEX_FULL;
}
return link_up;
}
@@ -4489,14 +5185,15 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
- u16 cnt, val;
+ u32 tx_en_mode;
+ u16 cnt, val, tmp1;
struct bnx2x *bp = params->bp;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
bnx2x_ext_phy_hw_reset(bp, params->port);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
/* Wait until fw is loaded */
for (cnt = 0; cnt < 100; cnt++) {
@@ -4563,6 +5260,26 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
0x0004);
}
bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
+
+ /*
+ * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+ * power mode, if TX Laser is disabled
+ */
+
+ tx_en_mode = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].sfp_ctrl))
+ & PORT_HW_CFG_TX_LASER_MASK;
+
+ if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+ DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1);
+ tmp1 |= 0x1;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
+ }
+
return 0;
}
@@ -4597,26 +5314,26 @@ static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
/* Set soft reset */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0001);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
/* wait for 150ms for microcode load */
msleep(150);
/* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0000);
msleep(200);
bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
@@ -4651,23 +5368,18 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
u32 val;
u32 swap_val, swap_override, aeu_gpio_mask, offset;
DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
- /* Restore normal power mode*/
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
-
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
bnx2x_8726_external_rom_boot(phy, params);
- /* Need to call module detected on initialization since
- the module detection triggered by actual module
- insertion might occur before driver is loaded, and when
- driver is loaded, it reset all registers, including the
- transmitter */
+ /*
+ * Need to call module detected on initialization since the module
+ * detection triggered by actual module insertion might occur before
+ * driver is loaded, and when driver is loaded, it reset all
+ * registers, including the transmitter
+ */
bnx2x_sfp_module_detection(phy, params);
if (phy->req_line_speed == SPEED_1000) {
@@ -4700,8 +5412,10 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
- /* Enable RX-ALARM control to receive
- interrupt for 1G speed change */
+ /*
+ * Enable RX-ALARM control to receive interrupt for 1G speed
+ * change
+ */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4);
bnx2x_cl45_write(bp, phy,
@@ -4733,7 +5447,7 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
/* Set GPIO3 to trigger SFP+ module insertion/removal */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
- MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
+ MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
/* The GPIO should be swapped if the swap register is set and active */
swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
@@ -4824,7 +5538,7 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
struct link_params *params) {
u32 swap_val, swap_override;
u8 port;
- /**
+ /*
* The PHY reset is controlled by GPIO 1. Fake the port number
* to cancel the swap done in set_gpio()
*/
@@ -4833,20 +5547,21 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
port = (swap_val && swap_override) ^ 1;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
}
static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
- u16 tmp1, val, mod_abs;
+ u32 tx_en_mode;
+ u16 tmp1, val, mod_abs, tmp2;
u16 rx_alarm_ctrl_val;
u16 lasi_ctrl_val;
struct bnx2x *bp = params->bp;
/* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
lasi_ctrl_val = 0x0004;
@@ -4859,14 +5574,17 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val);
- /* Initially configure MOD_ABS to interrupt when
- module is presence( bit 8) */
+ /*
+ * Initially configure MOD_ABS to interrupt when module is
+ * presence( bit 8)
+ */
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
- /* Set EDC off by setting OPTXLOS signal input to low
- (bit 9).
- When the EDC is off it locks onto a reference clock and
- avoids becoming 'lost'.*/
+ /*
+ * Set EDC off by setting OPTXLOS signal input to low (bit 9).
+ * When the EDC is off it locks onto a reference clock and avoids
+ * becoming 'lost'
+ */
mod_abs &= ~(1<<8);
if (!(phy->flags & FLAGS_NOC))
mod_abs &= ~(1<<9);
@@ -4881,7 +5599,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
if (phy->flags & FLAGS_NOC)
val |= (3<<5);
- /**
+ /*
* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
* status which reflect SFP+ module over-current
*/
@@ -4908,7 +5626,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
- /**
+ /*
* Power down the XAUI until link is up in case of dual-media
* and 1G
*/
@@ -4934,7 +5652,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
} else {
- /**
+ /*
* Since the 8727 has only single reset pin, need to set the 10G
* registers although it is default
*/
@@ -4950,7 +5668,8 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
0x0008);
}
- /* Set 2-wire transfer rate of SFP+ module EEPROM
+ /*
+ * Set 2-wire transfer rate of SFP+ module EEPROM
* to 100Khz since some DACs(direct attached cables) do
* not work at 400Khz.
*/
@@ -4973,6 +5692,26 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
phy->tx_preemphasis[1]);
}
+ /*
+ * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+ * power mode, if TX Laser is disabled
+ */
+ tx_en_mode = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].sfp_ctrl))
+ & PORT_HW_CFG_TX_LASER_MASK;
+
+ if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+
+ DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2);
+ tmp2 |= 0x1000;
+ tmp2 &= 0xFFEF;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
+ }
+
return 0;
}
@@ -4986,46 +5725,49 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
port_feature_config[params->port].
config));
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
if (mod_abs & (1<<8)) {
/* Module is absent */
DP(NETIF_MSG_LINK, "MOD_ABS indication "
"show module is absent\n");
- /* 1. Set mod_abs to detect next module
- presence event
- 2. Set EDC off by setting OPTXLOS signal input to low
- (bit 9).
- When the EDC is off it locks onto a reference clock and
- avoids becoming 'lost'.*/
+ /*
+ * 1. Set mod_abs to detect next module
+ * presence event
+ * 2. Set EDC off by setting OPTXLOS signal input to low
+ * (bit 9).
+ * When the EDC is off it locks onto a reference clock and
+ * avoids becoming 'lost'.
+ */
mod_abs &= ~(1<<8);
if (!(phy->flags & FLAGS_NOC))
mod_abs &= ~(1<<9);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
- /* Clear RX alarm since it stays up as long as
- the mod_abs wasn't changed */
+ /*
+ * Clear RX alarm since it stays up as long as
+ * the mod_abs wasn't changed
+ */
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
} else {
/* Module is present */
DP(NETIF_MSG_LINK, "MOD_ABS indication "
"show module is present\n");
- /* First thing, disable transmitter,
- and if the module is ok, the
- module_detection will enable it*/
-
- /* 1. Set mod_abs to detect next module
- absent event ( bit 8)
- 2. Restore the default polarity of the OPRXLOS signal and
- this signal will then correctly indicate the presence or
- absence of the Rx signal. (bit 9) */
+ /*
+ * First disable transmitter, and if the module is ok, the
+ * module_detection will enable it
+ * 1. Set mod_abs to detect next module absent event ( bit 8)
+ * 2. Restore the default polarity of the OPRXLOS signal and
+ * this signal will then correctly indicate the presence or
+ * absence of the Rx signal. (bit 9)
+ */
mod_abs |= (1<<8);
if (!(phy->flags & FLAGS_NOC))
mod_abs |= (1<<9);
@@ -5033,10 +5775,12 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
- /* Clear RX alarm since it stays up as long as
- the mod_abs wasn't changed. This is need to be done
- before calling the module detection, otherwise it will clear
- the link update alarm */
+ /*
+ * Clear RX alarm since it stays up as long as the mod_abs
+ * wasn't changed. This is need to be done before calling the
+ * module detection, otherwise it will clear* the link update
+ * alarm
+ */
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
@@ -5044,7 +5788,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ bnx2x_sfp_set_transmitter(params, phy, 0);
if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
bnx2x_sfp_module_detection(phy, params);
@@ -5053,9 +5797,8 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
}
DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
- rx_alarm_status);
- /* No need to check link status in case of
- module plugged in/out */
+ rx_alarm_status);
+ /* No need to check link status in case of module plugged in/out */
}
static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
@@ -5091,7 +5834,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
- /**
+ /*
* If a module is present and there is need to check
* for over current
*/
@@ -5111,12 +5854,8 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
" Please remove the SFP+ module and"
" restart the system to clear this"
" error.\n",
- params->port);
-
- /*
- * Disable all RX_ALARMs except for
- * mod_abs
- */
+ params->port);
+ /* Disable all RX_ALARMs except for mod_abs */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5));
@@ -5159,11 +5898,15 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
- /* Bits 0..2 --> speed detected,
- bits 13..15--> link is down */
+ /*
+ * Bits 0..2 --> speed detected,
+ * Bits 13..15--> link is down
+ */
if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
link_up = 1;
vars->line_speed = SPEED_10000;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
+ params->port);
} else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
link_up = 1;
vars->line_speed = SPEED_1000;
@@ -5174,15 +5917,18 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "port %x: External link is down\n",
params->port);
}
- if (link_up)
+ if (link_up) {
bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ vars->duplex = DUPLEX_FULL;
+ DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex);
+ }
if ((DUAL_MEDIA(params)) &&
(phy->req_line_speed == SPEED_1000)) {
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8727_PCS_GP, &val1);
- /**
+ /*
* In case of dual-media board and 1G, power up the XAUI side,
* otherwise power it down. For 10G it is done automatically
*/
@@ -5202,7 +5948,7 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
/* Disable Transmitter */
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ bnx2x_sfp_set_transmitter(params, phy, 0);
/* Clear LASI */
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0);
@@ -5214,19 +5960,23 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
struct link_params *params)
{
- u16 val, fw_ver1, fw_ver2, cnt;
+ u16 val, fw_ver1, fw_ver2, cnt, adj;
struct bnx2x *bp = params->bp;
+ adj = 0;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ adj = -1;
+
/* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
/* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0014);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, 0x0300);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x0009);
for (cnt = 0; cnt < 100; cnt++) {
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
if (val & 1)
break;
udelay(5);
@@ -5240,11 +5990,11 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
/* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x000A);
for (cnt = 0; cnt < 100; cnt++) {
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
if (val & 1)
break;
udelay(5);
@@ -5257,9 +6007,9 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
}
/* lower 16 bits of the register SPI_FW_STATUS */
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, &fw_ver1);
/* upper 16 bits of register SPI_FW_STATUS */
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, &fw_ver2);
bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1,
phy->ver_addr);
@@ -5268,33 +6018,53 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
static void bnx2x_848xx_set_led(struct bnx2x *bp,
struct bnx2x_phy *phy)
{
- u16 val;
+ u16 val, adj;
+
+ adj = 0;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ adj = -1;
/* PHYC_CTL_LED_CTL */
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
+ MDIO_PMA_REG_8481_LINK_SIGNAL + adj, &val);
val &= 0xFE00;
val |= 0x0092;
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL, val);
+ MDIO_PMA_REG_8481_LINK_SIGNAL + adj, val);
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
+ MDIO_PMA_REG_8481_LED1_MASK + adj,
0x80);
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK,
+ MDIO_PMA_REG_8481_LED2_MASK + adj,
0x18);
+ /* Select activity source by Tx and Rx, as suggested by PHY AE */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK,
- 0x0040);
+ MDIO_PMA_REG_8481_LED3_MASK + adj,
+ 0x0006);
+
+ /* Select the closest activity blink rate to that in 10/100/1000 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_BLINK + adj,
+ 0);
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, &val);
+ val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, val);
/* 'Interrupt Mask' */
bnx2x_cl45_write(bp, phy,
@@ -5308,7 +6078,11 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 autoneg_val, an_1000_val, an_10_100_val;
-
+ /*
+ * This phy uses the NIG latch mechanism since link indication
+ * arrives through its LED4 and not via its LASI signal, so we
+ * get steady signal instead of clear on read
+ */
bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
1 << NIG_LATCH_BC_ENABLE_MI_INT);
@@ -5433,11 +6207,11 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
bnx2x_ext_phy_hw_reset(bp, params->port);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -5449,12 +6223,15 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u8 port, initialize = 1;
- u16 val;
+ u16 val, adj;
u16 temp;
- u32 actual_phy_selection;
+ u32 actual_phy_selection, cms_enable;
u8 rc = 0;
/* This is just for MDIO_CTL_REG_84823_MEDIA register. */
+ adj = 0;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ adj = 3;
msleep(1);
if (CHIP_IS_E2(bp))
@@ -5464,11 +6241,12 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
MISC_REGISTERS_GPIO_OUTPUT_HIGH,
port);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
/* Wait for GPHY to come out of reset */
msleep(50);
- /* BCM84823 requires that XGXS links up first @ 10G for normal
- behavior */
+ /*
+ * BCM84823 requires that XGXS links up first @ 10G for normal behavior
+ */
temp = vars->line_speed;
vars->line_speed = SPEED_10000;
bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
@@ -5478,7 +6256,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
/* Set dual-media configuration according to configuration */
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_CTL_REG_84823_MEDIA, &val);
+ MDIO_CTL_REG_84823_MEDIA + adj, &val);
val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
@@ -5511,7 +6289,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_CTL_REG_84823_MEDIA, val);
+ MDIO_CTL_REG_84823_MEDIA + adj, val);
DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
params->multi_phy_config, val);
@@ -5519,29 +6297,50 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
else
bnx2x_save_848xx_spirom_version(phy, params);
+ cms_enable = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_ENABLE_CMS_MASK;
+
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
+ if (cms_enable)
+ val |= MDIO_CTL_REG_84823_USER_CTRL_CMS;
+ else
+ val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS;
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_USER_CTRL_REG, val);
+
+
return rc;
}
static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u16 val, val1, val2;
+ u16 val, val1, val2, adj;
u8 link_up = 0;
+ /* Reg offset adjustment for 84833 */
+ adj = 0;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ adj = -1;
+
/* Check 10G-BaseT link status */
/* Check PMD signal ok */
bnx2x_cl45_read(bp, phy,
MDIO_AN_DEVAD, 0xFFFA, &val1);
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL + adj,
&val2);
DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
/* Check link 10G */
if (val2 & (1<<11)) {
vars->line_speed = SPEED_10000;
+ vars->duplex = DUPLEX_FULL;
link_up = 1;
bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
} else { /* Check Legacy speed link */
@@ -5619,9 +6418,9 @@ static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
}
static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
@@ -5643,8 +6442,8 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
else
port = params->port;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
}
static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
@@ -5699,24 +6498,24 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
/* Set LED masks */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
- 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK,
- 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x0);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK,
- 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED5_MASK,
- 0x20);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x20);
} else {
bnx2x_cl45_write(bp, phy,
@@ -5740,35 +6539,35 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
val |= 0x2492;
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL,
- val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ val);
/* Set LED masks */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
- 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK,
- 0x20);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x20);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK,
- 0x20);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x20);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED5_MASK,
- 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x0);
} else {
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
- 0x20);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x20);
}
break;
@@ -5786,9 +6585,9 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
&val);
if (!((val &
- MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
- >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)){
- DP(NETIF_MSG_LINK, "Seting LINK_SIGNAL\n");
+ MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
+ >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) {
+ DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n");
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LINK_SIGNAL,
@@ -5797,30 +6596,42 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
/* Set LED masks */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
- 0x10);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x10);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK,
- 0x80);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x80);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK,
- 0x98);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x98);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED5_MASK,
- 0x40);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x40);
} else {
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LED1_MASK,
0x80);
+
+ /* Tell LED3 to blink on source */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ &val);
+ val &= ~(7<<6);
+ val |= (1<<6); /* A83B[8:6]= 1 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ val);
}
break;
}
@@ -5847,10 +6658,10 @@ static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy,
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
bnx2x_ext_phy_hw_reset(bp, params->port);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1);
@@ -5897,14 +6708,13 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
val2, val1);
link_up = ((val1 & 4) == 4);
- /* if link is up
- * print the AN outcome of the SFX7101 PHY
- */
+ /* if link is up print the AN outcome of the SFX7101 PHY */
if (link_up) {
bnx2x_cl45_read(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
&val2);
vars->line_speed = SPEED_10000;
+ vars->duplex = DUPLEX_FULL;
DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n",
val2, (val2 & (1<<14)));
bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
@@ -5932,20 +6742,20 @@ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy)
u16 val, cnt;
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7101_RESET, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET, &val);
for (cnt = 0; cnt < 10; cnt++) {
msleep(50);
/* Writes a self-clearing reset */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7101_RESET,
- (val | (1<<15)));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET,
+ (val | (1<<15)));
/* Wait for clear */
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7101_RESET, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET, &val);
if ((val & (1<<15)) == 0)
break;
@@ -5956,10 +6766,10 @@ static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy,
struct link_params *params) {
/* Low power mode is controlled by GPIO 2 */
bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
/* The PHY reset is controlled by GPIO 1 */
bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
}
static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
@@ -6001,9 +6811,9 @@ static struct bnx2x_phy phy_null = {
.supported = 0,
.media_type = ETH_PHY_NOT_PRESENT,
.ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
.config_init = (config_init_t)NULL,
@@ -6038,8 +6848,8 @@ static struct bnx2x_phy phy_serdes = {
.media_type = ETH_PHY_UNSPECIFIED,
.ver_addr = 0,
.req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
.config_init = (config_init_t)bnx2x_init_serdes,
@@ -6075,8 +6885,8 @@ static struct bnx2x_phy phy_xgxs = {
.media_type = ETH_PHY_UNSPECIFIED,
.ver_addr = 0,
.req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
.config_init = (config_init_t)bnx2x_init_xgxs,
@@ -6106,8 +6916,8 @@ static struct bnx2x_phy phy_7101 = {
.media_type = ETH_PHY_BASE_T,
.ver_addr = 0,
.req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
.config_init = (config_init_t)bnx2x_7101_config_init,
@@ -6137,9 +6947,9 @@ static struct bnx2x_phy phy_8073 = {
SUPPORTED_Asym_Pause),
.media_type = ETH_PHY_UNSPECIFIED,
.ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
.config_init = (config_init_t)bnx2x_8073_config_init,
@@ -6348,6 +7158,43 @@ static struct bnx2x_phy phy_84823 = {
.phy_specific_func = (phy_specific_func_t)NULL
};
+static struct bnx2x_phy phy_84833 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
+ .addr = 0xff,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_848x3_config_init,
+ .read_status = (read_status_t)bnx2x_848xx_read_status,
+ .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
/*****************************************************************/
/* */
/* Populate the phy according. Main function: bnx2x_populate_phy */
@@ -6361,7 +7208,7 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
/* Get the 4 lanes xgxs config rx and tx */
u32 rx = 0, tx = 0, i;
for (i = 0; i < 2; i++) {
- /**
+ /*
* INT_PHY and EXT_PHY1 share the same value location in the
* shmem. When num_phys is greater than 1, than this value
* applies only to EXT_PHY1
@@ -6369,19 +7216,19 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
rx = REG_RD(bp, shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
+ dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
tx = REG_RD(bp, shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
+ dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
} else {
rx = REG_RD(bp, shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+ dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
tx = REG_RD(bp, shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+ dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
}
phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
@@ -6501,6 +7348,9 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
*phy = phy_84823;
break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+ *phy = phy_84833;
+ break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
*phy = phy_7101;
break;
@@ -6515,21 +7365,21 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
- /**
- * The shmem address of the phy version is located on different
- * structures. In case this structure is too old, do not set
- * the address
- */
+ /*
+ * The shmem address of the phy version is located on different
+ * structures. In case this structure is too old, do not set
+ * the address
+ */
config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
dev_info.shared_hw_config.config2));
if (phy_index == EXT_PHY1) {
phy->ver_addr = shmem_base + offsetof(struct shmem_region,
port_mb[port].ext_phy_fw_version);
- /* Check specific mdc mdio settings */
- if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
- mdc_mdio_access = config2 &
- SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
+ /* Check specific mdc mdio settings */
+ if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
+ mdc_mdio_access = config2 &
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
} else {
u32 size = REG_RD(bp, shmem2_base);
@@ -6548,7 +7398,7 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
}
phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
- /**
+ /*
* In case mdc/mdio_access of the external phy is different than the
* mdc/mdio access of the XGXS, a HW lock must be taken in each access
* to prevent one port interfere with another port's CL45 operations.
@@ -6583,18 +7433,20 @@ static void bnx2x_phy_def_cfg(struct link_params *params,
/* Populate the default phy configuration for MF mode */
if (phy_index == EXT_PHY2) {
link_config = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
+ offsetof(struct shmem_region, dev_info.
port_feature_config[params->port].link_config2));
phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
+ offsetof(struct shmem_region,
+ dev_info.
port_hw_config[params->port].speed_capability_mask2));
} else {
link_config = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
+ offsetof(struct shmem_region, dev_info.
port_feature_config[params->port].link_config));
phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
- port_hw_config[params->port].speed_capability_mask));
+ offsetof(struct shmem_region,
+ dev_info.
+ port_hw_config[params->port].speed_capability_mask));
}
DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask"
" 0x%x\n", phy_index, link_config, phy->speed_cap_mask);
@@ -6741,7 +7593,7 @@ static void set_phy_vars(struct link_params *params)
else if (phy_index == EXT_PHY2)
actual_phy_idx = EXT_PHY1;
}
- params->phy[actual_phy_idx].req_flow_ctrl =
+ params->phy[actual_phy_idx].req_flow_ctrl =
params->req_flow_ctrl[link_cfg_idx];
params->phy[actual_phy_idx].req_line_speed =
@@ -6794,57 +7646,6 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
set_phy_vars(params);
DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
- if (CHIP_REV_IS_FPGA(bp)) {
-
- vars->link_up = 1;
- vars->line_speed = SPEED_10000;
- vars->duplex = DUPLEX_FULL;
- vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
- /* enable on E1.5 FPGA */
- if (CHIP_IS_E1H(bp)) {
- vars->flow_ctrl |=
- (BNX2X_FLOW_CTRL_TX |
- BNX2X_FLOW_CTRL_RX);
- vars->link_status |=
- (LINK_STATUS_TX_FLOW_CONTROL_ENABLED |
- LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
- }
-
- bnx2x_emac_enable(params, vars, 0);
- if (!(CHIP_IS_E2(bp)))
- bnx2x_pbf_update(params, vars->flow_ctrl,
- vars->line_speed);
- /* disable drain */
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
-
- /* update shared memory */
- bnx2x_update_mng(params, vars->link_status);
-
- return 0;
-
- } else
- if (CHIP_REV_IS_EMUL(bp)) {
-
- vars->link_up = 1;
- vars->line_speed = SPEED_10000;
- vars->duplex = DUPLEX_FULL;
- vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
-
- bnx2x_bmac_enable(params, vars, 0);
-
- bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed);
- /* Disable drain */
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
- + params->port*4, 0);
-
- /* update shared memory */
- bnx2x_update_mng(params, vars->link_status);
-
- return 0;
-
- } else
if (params->loopback_mode == LOOPBACK_BMAC) {
vars->link_up = 1;
@@ -6860,8 +7661,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
/* set bmac loopback */
bnx2x_bmac_enable(params, vars, 1);
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
- params->port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
} else if (params->loopback_mode == LOOPBACK_EMAC) {
@@ -6877,8 +7677,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
/* set bmac loopback */
bnx2x_emac_enable(params, vars, 1);
bnx2x_emac_program(params, vars);
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
- params->port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
} else if ((params->loopback_mode == LOOPBACK_XGXS) ||
(params->loopback_mode == LOOPBACK_EXT_PHY)) {
@@ -6901,8 +7700,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
bnx2x_emac_program(params, vars);
bnx2x_emac_enable(params, vars, 0);
} else
- bnx2x_bmac_enable(params, vars, 0);
-
+ bnx2x_bmac_enable(params, vars, 0);
if (params->loopback_mode == LOOPBACK_XGXS) {
/* set 10G XGXS loopback */
params->phy[INT_PHY].config_loopback(
@@ -6920,9 +7718,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
params);
}
}
-
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
- params->port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
bnx2x_set_led(params, vars,
LED_MODE_OPER, vars->line_speed);
@@ -6941,7 +7737,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
return 0;
}
u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
- u8 reset_ext_phy)
+ u8 reset_ext_phy)
{
struct bnx2x *bp = params->bp;
u8 phy_index, port = params->port, clear_latch_ind = 0;
@@ -6950,10 +7746,10 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
vars->link_status = 0;
bnx2x_update_mng(params, vars->link_status);
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
- (NIG_MASK_XGXS0_LINK_STATUS |
- NIG_MASK_XGXS0_LINK10G |
- NIG_MASK_SERDES0_LINK_STATUS |
- NIG_MASK_MI_INT));
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
/* activate nig drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
@@ -7021,10 +7817,13 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
struct bnx2x_phy phy[PORT_MAX];
struct bnx2x_phy *phy_blk[PORT_MAX];
u16 val;
- s8 port;
+ s8 port = 0;
s8 port_of_path = 0;
-
- bnx2x_ext_phy_hw_reset(bp, 0);
+ u32 swap_val, swap_override;
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ port ^= (swap_val && swap_override);
+ bnx2x_ext_phy_hw_reset(bp, port);
/* PART1 - Reset both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
u32 shmem_base, shmem2_base;
@@ -7049,21 +7848,22 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
/* disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
port_of_path*4,
- (NIG_MASK_XGXS0_LINK_STATUS |
- NIG_MASK_XGXS0_LINK10G |
- NIG_MASK_SERDES0_LINK_STATUS |
- NIG_MASK_MI_INT));
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
/* Need to take the phy out of low power mode in order
to write to access its registers */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
/* Reset the phy */
bnx2x_cl45_write(bp, &phy[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 1<<15);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL,
+ 1<<15);
}
/* Add delay of 150ms after reset */
@@ -7079,7 +7879,6 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
/* PART2 - Download firmware to both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
- u16 fw_ver1;
if (CHIP_IS_E2(bp))
port_of_path = 0;
else
@@ -7087,34 +7886,26 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
phy_blk[port]->addr);
- bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
- port_of_path);
-
- bnx2x_cl45_read(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER1, &fw_ver1);
- if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
- DP(NETIF_MSG_LINK,
- "bnx2x_8073_common_init_phy port %x:"
- "Download failed. fw version = 0x%x\n",
- port, fw_ver1);
+ if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+ port_of_path))
return -EINVAL;
- }
/* Only set bit 10 = 1 (Tx power down) */
bnx2x_cl45_read(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, &val);
/* Phase1 of TX_POWER_DOWN reset */
bnx2x_cl45_write(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN,
- (val | 1<<10));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN,
+ (val | 1<<10));
}
- /* Toggle Transmitter: Power down and then up with 600ms
- delay between */
+ /*
+ * Toggle Transmitter: Power down and then up with 600ms delay
+ * between
+ */
msleep(600);
/* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
@@ -7122,25 +7913,25 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
/* Phase2 of POWER_DOWN_RESET */
/* Release bit 10 (Release Tx power down) */
bnx2x_cl45_read(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, &val);
bnx2x_cl45_write(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
msleep(15);
/* Read modify write the SPI-ROM version select register */
bnx2x_cl45_read(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_EDC_FFE_MAIN, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_EDC_FFE_MAIN, &val);
bnx2x_cl45_write(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
/* set GPIO2 back to LOW */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
}
return 0;
}
@@ -7187,32 +7978,90 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
/* Set fault module detected LED on */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
- MISC_REGISTERS_GPIO_HIGH,
- port);
+ MISC_REGISTERS_GPIO_HIGH,
+ port);
}
return 0;
}
+static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base,
+ u8 *io_gpio, u8 *io_port)
+{
+
+ u32 phy_gpio_reset = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[PORT_0].default_cfg));
+ switch (phy_gpio_reset) {
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0:
+ *io_gpio = 0;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0:
+ *io_gpio = 1;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0:
+ *io_gpio = 2;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0:
+ *io_gpio = 3;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1:
+ *io_gpio = 0;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1:
+ *io_gpio = 1;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1:
+ *io_gpio = 2;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1:
+ *io_gpio = 3;
+ *io_port = 1;
+ break;
+ default:
+ /* Don't override the io_gpio and io_port */
+ break;
+ }
+}
static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
u32 shmem_base_path[],
u32 shmem2_base_path[], u8 phy_index,
u32 chip_id)
{
- s8 port;
+ s8 port, reset_gpio;
u32 swap_val, swap_override;
struct bnx2x_phy phy[PORT_MAX];
struct bnx2x_phy *phy_blk[PORT_MAX];
s8 port_of_path;
- swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
- swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ reset_gpio = MISC_REGISTERS_GPIO_1;
port = 1;
- bnx2x_ext_phy_hw_reset(bp, port ^ (swap_val && swap_override));
+ /*
+ * Retrieve the reset gpio/port which control the reset.
+ * Default is GPIO1, PORT1
+ */
+ bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
+ (u8 *)&reset_gpio, (u8 *)&port);
/* Calculate the port based on port swap */
port ^= (swap_val && swap_override);
+ /* Initiate PHY reset*/
+ bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
+ msleep(1);
+ bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
+
msleep(5);
/* PART1 - Reset both phys */
@@ -7248,9 +8097,7 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
/* Reset the phy */
bnx2x_cl45_write(bp, &phy[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 1<<15);
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
}
/* Add delay of 150ms after reset */
@@ -7264,27 +8111,17 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
}
/* PART2 - Download firmware to both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
- u16 fw_ver1;
- if (CHIP_IS_E2(bp))
+ if (CHIP_IS_E2(bp))
port_of_path = 0;
else
port_of_path = port;
DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
phy_blk[port]->addr);
- bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
- port_of_path);
- bnx2x_cl45_read(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER1, &fw_ver1);
- if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
- DP(NETIF_MSG_LINK,
- "bnx2x_8727_common_init_phy port %x:"
- "Download failed. fw version = 0x%x\n",
- port, fw_ver1);
+ if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+ port_of_path))
return -EINVAL;
- }
- }
+ }
return 0;
}
@@ -7309,8 +8146,10 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- /* GPIO1 affects both ports, so there's need to pull
- it for single port alone */
+ /*
+ * GPIO1 affects both ports, so there's need to pull
+ * it for single port alone
+ */
rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
shmem2_base_path,
phy_index, chip_id);
@@ -7320,11 +8159,15 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
break;
default:
DP(NETIF_MSG_LINK,
- "bnx2x_common_init_phy: ext_phy 0x%x not required\n",
- ext_phy_type);
+ "ext_phy 0x%x common init not required\n",
+ ext_phy_type);
break;
}
+ if (rc != 0)
+ netdev_err(bp->dev, "Warning: PHY was not initialized,"
+ " Port %d\n",
+ 0);
return rc;
}
@@ -7332,12 +8175,20 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
u32 shmem2_base_path[], u32 chip_id)
{
u8 rc = 0;
+ u32 phy_ver;
u8 phy_index;
u32 ext_phy_type, ext_phy_config;
DP(NETIF_MSG_LINK, "Begin common phy init\n");
- if (CHIP_REV_IS_EMUL(bp))
+ /* Check if common init was already done */
+ phy_ver = REG_RD(bp, shmem_base_path[0] +
+ offsetof(struct shmem_region,
+ port_mb[PORT_0].ext_phy_fw_version));
+ if (phy_ver) {
+ DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n",
+ phy_ver);
return 0;
+ }
/* Read the ext_phy_type for arbitrary port(0) */
for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index 171abf8097ee..92f36b6950dc 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -1,4 +1,4 @@
-/* Copyright 2008-2010 Broadcom Corporation
+/* Copyright 2008-2011 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -33,7 +33,7 @@
#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
-#define SPEED_AUTO_NEG 0
+#define SPEED_AUTO_NEG 0
#define SPEED_12000 12000
#define SPEED_12500 12500
#define SPEED_13000 13000
@@ -44,8 +44,8 @@
#define SFP_EEPROM_VENDOR_NAME_SIZE 16
#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
#define SFP_EEPROM_VENDOR_OUI_SIZE 3
-#define SFP_EEPROM_PART_NO_ADDR 0x28
-#define SFP_EEPROM_PART_NO_SIZE 16
+#define SFP_EEPROM_PART_NO_ADDR 0x28
+#define SFP_EEPROM_PART_NO_SIZE 16
#define PWR_FLT_ERR_MSG_LEN 250
#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
@@ -62,9 +62,25 @@
#define SINGLE_MEDIA(params) (params->num_phys == 2)
/* Dual Media board contains two external phy with different media */
#define DUAL_MEDIA(params) (params->num_phys == 3)
-#define FW_PARAM_MDIO_CTRL_OFFSET 16
+#define FW_PARAM_MDIO_CTRL_OFFSET 16
#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
(phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
+
+#define PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE 170
+#define PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE 0
+
+#define PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE 250
+#define PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE 0
+
+#define PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE 10
+#define PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE 90
+
+#define PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE 50
+#define PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE 250
+
+#define PFC_BRB_FULL_LB_XOFF_THRESHOLD 170
+#define PFC_BRB_FULL_LB_XON_THRESHOLD 250
+
/***********************************************************/
/* Structs */
/***********************************************************/
@@ -185,12 +201,14 @@ struct link_params {
/* Default / User Configuration */
u8 loopback_mode;
-#define LOOPBACK_NONE 0
-#define LOOPBACK_EMAC 1
-#define LOOPBACK_BMAC 2
+#define LOOPBACK_NONE 0
+#define LOOPBACK_EMAC 1
+#define LOOPBACK_BMAC 2
#define LOOPBACK_XGXS 3
#define LOOPBACK_EXT_PHY 4
-#define LOOPBACK_EXT 5
+#define LOOPBACK_EXT 5
+#define LOOPBACK_UMAC 6
+#define LOOPBACK_XMAC 7
/* Device parameters */
u8 mac_addr[6];
@@ -214,9 +232,11 @@ struct link_params {
/* Phy register parameter */
u32 chip_id;
+ /* features */
u32 feature_config_flags;
-#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
-#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
+#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
+#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
+#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
/* Will be populated during common init */
struct bnx2x_phy phy[MAX_PHYS];
@@ -317,6 +337,11 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
/* Reset the external of SFX7101 */
void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
+/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
+u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u16 addr,
+ u8 byte_cnt, u8 *o_buf);
+
void bnx2x_hw_reset_phy(struct link_params *params);
/* Checks if HW lock is required for this phy/board type */
@@ -332,4 +357,43 @@ u8 bnx2x_phy_probe(struct link_params *params);
u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
u32 shmem2_base, u8 port);
+/* PFC port configuration params */
+struct bnx2x_nig_brb_pfc_port_params {
+ /* NIG */
+ u32 pause_enable;
+ u32 llfc_out_en;
+ u32 llfc_enable;
+ u32 pkt_priority_to_cos;
+ u32 rx_cos0_priority_mask;
+ u32 rx_cos1_priority_mask;
+ u32 llfc_high_priority_classes;
+ u32 llfc_low_priority_classes;
+ /* BRB */
+ u32 cos0_pauseable;
+ u32 cos1_pauseable;
+};
+
+/**
+ * Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
+ * when link is already up
+ */
+void bnx2x_update_pfc(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_nig_brb_pfc_port_params *pfc_params);
+
+
+/* Used to configure the ETS to disable */
+void bnx2x_ets_disabled(struct link_params *params);
+
+/* Used to configure the ETS to BW limited */
+void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
+ const u32 cos1_bw);
+
+/* Used to configure the ETS to strict */
+u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
+
+/* Read pfc statistic*/
+void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
+ u32 pfc_frames_sent[2],
+ u32 pfc_frames_received[2]);
#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 9709b8569666..32e64cc85d2c 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -55,6 +55,7 @@
#include "bnx2x_init.h"
#include "bnx2x_init_ops.h"
#include "bnx2x_cmn.h"
+#include "bnx2x_dcb.h"
#include <linux/firmware.h>
#include "bnx2x_fw_file_hdr.h"
@@ -121,6 +122,10 @@ MODULE_PARM_DESC(debug, " Default debug msglevel");
static struct workqueue_struct *bnx2x_wq;
+#ifdef BCM_CNIC
+static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
+#endif
+
enum bnx2x_board_type {
BCM57710 = 0,
BCM57711 = 1,
@@ -140,13 +145,6 @@ static struct {
{ "Broadcom NetXtreme II BCM57712E XGb" }
};
-#ifndef PCI_DEVICE_ID_NX2_57712
-#define PCI_DEVICE_ID_NX2_57712 0x1662
-#endif
-#ifndef PCI_DEVICE_ID_NX2_57712E
-#define PCI_DEVICE_ID_NX2_57712E 0x1663
-#endif
-
static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
@@ -581,7 +579,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
/* lock the dmae channel */
- mutex_lock(&bp->dmae_mutex);
+ spin_lock_bh(&bp->dmae_lock);
/* reset completion */
*wb_comp = 0;
@@ -612,7 +610,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
unlock:
- mutex_unlock(&bp->dmae_mutex);
+ spin_unlock_bh(&bp->dmae_lock);
return rc;
}
@@ -921,7 +919,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
sp_sb_data.p_func.vf_valid);
- for_each_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
int loop;
struct hc_status_block_data_e2 sb_data_e2;
@@ -961,6 +959,10 @@ void bnx2x_panic_dump(struct bnx2x *bp)
/* host sb data */
+#ifdef BCM_CNIC
+ if (IS_FCOE_FP(fp))
+ continue;
+#endif
BNX2X_ERR(" run indexes (");
for (j = 0; j < HC_SB_MAX_SM; j++)
pr_cont("0x%x%s",
@@ -1029,7 +1031,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
#ifdef BNX2X_STOP_ON_ERROR
/* Rings */
/* Rx */
- for_each_queue(bp, i) {
+ for_each_rx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
@@ -1063,7 +1065,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
}
/* Tx */
- for_each_queue(bp, i) {
+ for_each_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
@@ -1298,7 +1300,7 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
#ifdef BCM_CNIC
offset++;
#endif
- for_each_queue(bp, i)
+ for_each_eth_queue(bp, i)
synchronize_irq(bp->msix_table[i + offset].vector);
} else
synchronize_irq(bp->pdev->irq);
@@ -1388,7 +1390,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp,
}
smp_mb__before_atomic_inc();
- atomic_inc(&bp->spq_left);
+ atomic_inc(&bp->cq_spq_left);
/* push the change in fp->state and towards the memory */
smp_wmb();
@@ -1420,7 +1422,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
return IRQ_HANDLED;
#endif
- for_each_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
@@ -1965,13 +1967,22 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
vn_max_rate = 0;
} else {
+ u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
+
vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
- /* If min rate is zero - set it to 1 */
+ /* If fairness is enabled (not all min rates are zeroes) and
+ if current min rate is zero - set it to 1.
+ This is a requirement of the algorithm. */
if (bp->vn_weight_sum && (vn_min_rate == 0))
vn_min_rate = DEF_MIN_RATE;
- vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
+
+ if (IS_MF_SI(bp))
+ /* maxCfg in percents of linkspeed */
+ vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
+ else
+ /* maxCfg is absolute in 100Mb units */
+ vn_max_rate = maxCfg * 100;
}
DP(NETIF_MSG_IFUP,
@@ -1997,7 +2008,8 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
m_fair_vn.vn_credit_delta =
max_t(u32, (vn_min_rate * (T_FAIR_COEF /
(8 * bp->vn_weight_sum))),
- (bp->cmng.fair_vars.fair_threshold * 2));
+ (bp->cmng.fair_vars.fair_threshold +
+ MIN_ABOVE_THRESH));
DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
m_fair_vn.vn_credit_delta);
}
@@ -2026,13 +2038,28 @@ static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
static void bnx2x_read_mf_cfg(struct bnx2x *bp)
{
- int vn;
+ int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
if (BP_NOMCP(bp))
return; /* what should be the default bvalue in this case */
+ /* For 2 port configuration the absolute function number formula
+ * is:
+ * abs_func = 2 * vn + BP_PORT + BP_PATH
+ *
+ * and there are 4 functions per port
+ *
+ * For 4 port configuration it is
+ * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
+ *
+ * and there are 2 functions per port
+ */
for (vn = VN_0; vn < E1HVN_MAX; vn++) {
- int /*abs*/func = 2*vn + BP_PORT(bp);
+ int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
+
+ if (func >= E1H_FUNC_MAX)
+ break;
+
bp->mf_config[vn] =
MF_CFG_RD(bp, func_mf_config[func].config);
}
@@ -2058,8 +2085,9 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
bnx2x_calc_vn_weight_sum(bp);
/* calculate and set min-max rate for each vn */
- for (vn = VN_0; vn < E1HVN_MAX; vn++)
- bnx2x_init_vn_minmax(bp, vn);
+ if (bp->port.pmf)
+ for (vn = VN_0; vn < E1HVN_MAX; vn++)
+ bnx2x_init_vn_minmax(bp, vn);
/* always enable rate shaping and fairness */
bp->cmng.flags.cmng_enables |=
@@ -2128,13 +2156,6 @@ static void bnx2x_link_attn(struct bnx2x *bp)
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
}
- /* indicate link status only if link status actually changed */
- if (prev_link_status != bp->link_vars.link_status)
- bnx2x_link_report(bp);
-
- if (IS_MF(bp))
- bnx2x_link_sync_notify(bp);
-
if (bp->link_vars.link_up && bp->link_vars.line_speed) {
int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
@@ -2146,6 +2167,13 @@ static void bnx2x_link_attn(struct bnx2x *bp)
DP(NETIF_MSG_IFUP,
"single function mode without fairness\n");
}
+
+ if (IS_MF(bp))
+ bnx2x_link_sync_notify(bp);
+
+ /* indicate link status only if link status actually changed */
+ if (prev_link_status != bp->link_vars.link_status)
+ bnx2x_link_report(bp);
}
void bnx2x__link_status_update(struct bnx2x *bp)
@@ -2238,6 +2266,15 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
return rc;
}
+static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
+{
+#ifdef BCM_CNIC
+ if (IS_FCOE_FP(fp) && IS_MF(bp))
+ return false;
+#endif
+ return true;
+}
+
/* must be called under rtnl_lock */
static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
{
@@ -2248,19 +2285,30 @@ static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
u8 unmatched_unicast = 0;
+ if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
+ unmatched_unicast = 1;
+
if (filters & BNX2X_PROMISCUOUS_MODE) {
/* promiscious - accept all, drop none */
drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
+ if (IS_MF_SI(bp)) {
+ /*
+ * SI mode defines to accept in promiscuos mode
+ * only unmatched packets
+ */
+ unmatched_unicast = 1;
+ accp_all_ucast = 0;
+ }
}
if (filters & BNX2X_ACCEPT_UNICAST) {
/* accept matched ucast */
drop_all_ucast = 0;
}
- if (filters & BNX2X_ACCEPT_MULTICAST) {
+ if (filters & BNX2X_ACCEPT_MULTICAST)
/* accept matched mcast */
drop_all_mcast = 0;
- }
+
if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
/* accept all mcast */
drop_all_ucast = 0;
@@ -2372,7 +2420,7 @@ static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
/* calculate queue flags */
flags |= QUEUE_FLG_CACHE_ALIGN;
flags |= QUEUE_FLG_HC;
- flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
+ flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
flags |= QUEUE_FLG_VLAN;
DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
@@ -2380,7 +2428,8 @@ static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
if (!fp->disable_tpa)
flags |= QUEUE_FLG_TPA;
- flags |= QUEUE_FLG_STATS;
+ flags = stat_counter_valid(bp, fp) ?
+ (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
return flags;
}
@@ -2428,8 +2477,14 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
rxq_init->sge_map = fp->rx_sge_mapping;
rxq_init->rcq_map = fp->rx_comp_mapping;
rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
- rxq_init->mtu = bp->dev->mtu;
- rxq_init->buf_sz = bp->rx_buf_size;
+
+ /* Always use mini-jumbo MTU for FCoE L2 ring */
+ if (IS_FCOE_FP(fp))
+ rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
+ else
+ rxq_init->mtu = bp->dev->mtu;
+
+ rxq_init->buf_sz = fp->rx_buf_size;
rxq_init->cl_qzone_id = fp->cl_qzone_id;
rxq_init->cl_id = fp->cl_id;
rxq_init->spcl_id = fp->cl_id;
@@ -2440,7 +2495,10 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
rxq_init->fw_sb_id = fp->fw_sb_id;
- rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
+ if (IS_FCOE_FP(fp))
+ rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
+ else
+ rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
rxq_init->cid = HW_CID(bp, fp->cid);
@@ -2460,6 +2518,12 @@ static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
txq_init->fw_sb_id = fp->fw_sb_id;
+
+ if (IS_FCOE_FP(fp)) {
+ txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
+ txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
+ }
+
txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
}
@@ -2573,6 +2637,26 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
*/
}
+/* called due to MCP event (on pmf):
+ * reread new bandwidth configuration
+ * configure FW
+ * notify others function about the change
+ */
+static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
+{
+ if (bp->link_vars.link_up) {
+ bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
+ bnx2x_link_sync_notify(bp);
+ }
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+}
+
+static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
+{
+ bnx2x_config_mf_bw(bp);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
+}
+
static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
{
DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
@@ -2598,10 +2682,7 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
}
if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
-
- bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
- bnx2x_link_sync_notify(bp);
- storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+ bnx2x_config_mf_bw(bp);
dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
}
@@ -2655,11 +2736,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
spin_lock_bh(&bp->spq_lock);
- if (!atomic_read(&bp->spq_left)) {
- BNX2X_ERR("BUG! SPQ ring full!\n");
- spin_unlock_bh(&bp->spq_lock);
- bnx2x_panic();
- return -EBUSY;
+ if (common) {
+ if (!atomic_read(&bp->eq_spq_left)) {
+ BNX2X_ERR("BUG! EQ ring full!\n");
+ spin_unlock_bh(&bp->spq_lock);
+ bnx2x_panic();
+ return -EBUSY;
+ }
+ } else if (!atomic_read(&bp->cq_spq_left)) {
+ BNX2X_ERR("BUG! SPQ ring full!\n");
+ spin_unlock_bh(&bp->spq_lock);
+ bnx2x_panic();
+ return -EBUSY;
}
spe = bnx2x_sp_get_next(bp);
@@ -2690,20 +2778,26 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
/* stats ramrod has it's own slot on the spq */
- if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
+ if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
/* It's ok if the actual decrement is issued towards the memory
* somewhere between the spin_lock and spin_unlock. Thus no
* more explict memory barrier is needed.
*/
- atomic_dec(&bp->spq_left);
+ if (common)
+ atomic_dec(&bp->eq_spq_left);
+ else
+ atomic_dec(&bp->cq_spq_left);
+ }
+
DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
"SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
- "type(0x%x) left %x\n",
+ "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
(u32)(U64_LO(bp->spq_mapping) +
(void *)bp->spq_prod_bd - (void *)bp->spq), command,
- HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
+ HW_CID(bp, cid), data_hi, data_lo, type,
+ atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
bnx2x_sp_prod_update(bp);
spin_unlock_bh(&bp->spq_lock);
@@ -3022,10 +3116,20 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
if (val & DRV_STATUS_DCC_EVENT_MASK)
bnx2x_dcc_event(bp,
(val & DRV_STATUS_DCC_EVENT_MASK));
+
+ if (val & DRV_STATUS_SET_MF_BW)
+ bnx2x_set_mf_bw(bp);
+
bnx2x__link_status_update(bp);
if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
bnx2x_pmf_update(bp);
+ if (bp->port.pmf &&
+ (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
+ bp->dcbx_enabled > 0)
+ /* start dcbx state machine */
+ bnx2x_dcbx_set_params(bp,
+ BNX2X_DCBX_STATE_NEG_RECEIVED);
} else if (attn & BNX2X_MC_ASSERT_BITS) {
BNX2X_ERR("MC assert!\n");
@@ -3066,7 +3170,6 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
-#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
/*
* should be run under rtnl lock
@@ -3441,7 +3544,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
try to handle this event */
bnx2x_acquire_alr(bp);
- if (bnx2x_chk_parity_attn(bp)) {
+ if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
bp->recovery_state = BNX2X_RECOVERY_INIT;
bnx2x_set_reset_in_progress(bp);
schedule_delayed_work(&bp->reset_task, 0);
@@ -3606,8 +3709,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
sw_cons = bp->eq_cons;
sw_prod = bp->eq_prod;
- DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
- hw_cons, sw_cons, atomic_read(&bp->spq_left));
+ DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
+ hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
for (; sw_cons != hw_cons;
sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
@@ -3637,11 +3740,23 @@ static void bnx2x_eq_int(struct bnx2x *bp)
#ifdef BCM_CNIC
if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
goto next_spqe;
+ if (cid == BNX2X_FCOE_ETH_CID)
+ bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
+ else
#endif
- bnx2x_fp(bp, cid, state) =
+ bnx2x_fp(bp, cid, state) =
BNX2X_FP_STATE_CLOSED;
goto next_spqe;
+
+ case EVENT_RING_OPCODE_STOP_TRAFFIC:
+ DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
+ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
+ goto next_spqe;
+ case EVENT_RING_OPCODE_START_TRAFFIC:
+ DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
+ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
+ goto next_spqe;
}
switch (opcode | bp->state) {
@@ -3660,13 +3775,15 @@ static void bnx2x_eq_int(struct bnx2x *bp)
case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
- bp->set_mac_pending = 0;
+ if (elem->message.data.set_mac_event.echo)
+ bp->set_mac_pending = 0;
break;
case (EVENT_RING_OPCODE_SET_MAC |
BNX2X_STATE_CLOSING_WAIT4_HALT):
DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
- bp->set_mac_pending = 0;
+ if (elem->message.data.set_mac_event.echo)
+ bp->set_mac_pending = 0;
break;
default:
/* unknown event log error and continue */
@@ -3678,7 +3795,7 @@ next_spqe:
} /* for */
smp_mb__before_atomic_inc();
- atomic_add(spqe_cnt, &bp->spq_left);
+ atomic_add(spqe_cnt, &bp->eq_spq_left);
bp->eq_cons = sw_cons;
bp->eq_prod = sw_prod;
@@ -3714,7 +3831,13 @@ static void bnx2x_sp_task(struct work_struct *work)
/* SP events: STAT_QUERY and others */
if (status & BNX2X_DEF_SB_IDX) {
+#ifdef BCM_CNIC
+ struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+ if ((!NO_FCOE(bp)) &&
+ (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
+ napi_schedule(&bnx2x_fcoe(bp, napi));
+#endif
/* Handle EQ completions */
bnx2x_eq_int(bp);
@@ -4097,15 +4220,15 @@ void bnx2x_update_coalesce(struct bnx2x *bp)
{
int i;
- for_each_queue(bp, i)
+ for_each_eth_queue(bp, i)
bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
- bp->rx_ticks, bp->tx_ticks);
+ bp->tx_ticks, bp->rx_ticks);
}
static void bnx2x_init_sp_ring(struct bnx2x *bp)
{
spin_lock_init(&bp->spq_lock);
- atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
+ atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
bp->spq_prod_idx = 0;
bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
@@ -4130,9 +4253,12 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
bp->eq_cons = 0;
bp->eq_prod = NUM_EQ_DESC;
bp->eq_cons_sb = BNX2X_EQ_INDEX;
+ /* we want a warning message before it gets rought... */
+ atomic_set(&bp->eq_spq_left,
+ min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
}
-static void bnx2x_init_ind_table(struct bnx2x *bp)
+void bnx2x_push_indir_table(struct bnx2x *bp)
{
int func = BP_FUNC(bp);
int i;
@@ -4140,18 +4266,28 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
return;
- DP(NETIF_MSG_IFUP,
- "Initializing indirection table multi_mode %d\n", bp->multi_mode);
for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
REG_WR8(bp, BAR_TSTRORM_INTMEM +
TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
- bp->fp->cl_id + (i % bp->num_queues));
+ bp->fp->cl_id + bp->rx_indir_table[i]);
+}
+
+static void bnx2x_init_ind_table(struct bnx2x *bp)
+{
+ int i;
+
+ for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
+ bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
+
+ bnx2x_push_indir_table(bp);
}
void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
{
int mode = bp->rx_mode;
+ int port = BP_PORT(bp);
u16 cl_id;
+ u32 def_q_filters = 0;
/* All but management unicast packets should pass to the host as well */
u32 llh_mask =
@@ -4162,30 +4298,56 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
switch (mode) {
case BNX2X_RX_MODE_NONE: /* no Rx */
- cl_id = BP_L_ID(bp);
- bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
+ def_q_filters = BNX2X_ACCEPT_NONE;
+#ifdef BCM_CNIC
+ if (!NO_FCOE(bp)) {
+ cl_id = bnx2x_fcoe(bp, cl_id);
+ bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
+ }
+#endif
break;
case BNX2X_RX_MODE_NORMAL:
- cl_id = BP_L_ID(bp);
- bnx2x_rxq_set_mac_filters(bp, cl_id,
- BNX2X_ACCEPT_UNICAST |
- BNX2X_ACCEPT_BROADCAST |
- BNX2X_ACCEPT_MULTICAST);
+ def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
+ BNX2X_ACCEPT_MULTICAST;
+#ifdef BCM_CNIC
+ if (!NO_FCOE(bp)) {
+ cl_id = bnx2x_fcoe(bp, cl_id);
+ bnx2x_rxq_set_mac_filters(bp, cl_id,
+ BNX2X_ACCEPT_UNICAST |
+ BNX2X_ACCEPT_MULTICAST);
+ }
+#endif
break;
case BNX2X_RX_MODE_ALLMULTI:
- cl_id = BP_L_ID(bp);
- bnx2x_rxq_set_mac_filters(bp, cl_id,
- BNX2X_ACCEPT_UNICAST |
- BNX2X_ACCEPT_BROADCAST |
- BNX2X_ACCEPT_ALL_MULTICAST);
+ def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
+ BNX2X_ACCEPT_ALL_MULTICAST;
+#ifdef BCM_CNIC
+ /*
+ * Prevent duplication of multicast packets by configuring FCoE
+ * L2 Client to receive only matched unicast frames.
+ */
+ if (!NO_FCOE(bp)) {
+ cl_id = bnx2x_fcoe(bp, cl_id);
+ bnx2x_rxq_set_mac_filters(bp, cl_id,
+ BNX2X_ACCEPT_UNICAST);
+ }
+#endif
break;
case BNX2X_RX_MODE_PROMISC:
- cl_id = BP_L_ID(bp);
- bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
-
+ def_q_filters |= BNX2X_PROMISCUOUS_MODE;
+#ifdef BCM_CNIC
+ /*
+ * Prevent packets duplication by configuring DROP_ALL for FCoE
+ * L2 Client.
+ */
+ if (!NO_FCOE(bp)) {
+ cl_id = bnx2x_fcoe(bp, cl_id);
+ bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
+ }
+#endif
/* pass management unicast packets as well */
llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
break;
@@ -4195,20 +4357,24 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
break;
}
+ cl_id = BP_L_ID(bp);
+ bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
+
REG_WR(bp,
- BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
- NIG_REG_LLH0_BRB1_DRV_MASK,
- llh_mask);
+ (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
+ NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
DP(NETIF_MSG_IFUP, "rx mode %d\n"
"drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
- "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
+ "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
+ "unmatched_ucast 0x%x\n", mode,
bp->mac_filters.ucast_drop_all,
bp->mac_filters.mcast_drop_all,
bp->mac_filters.bcast_drop_all,
bp->mac_filters.ucast_accept_all,
bp->mac_filters.mcast_accept_all,
- bp->mac_filters.bcast_accept_all
+ bp->mac_filters.bcast_accept_all,
+ bp->mac_filters.unmatched_unicast
);
storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
@@ -4232,6 +4398,15 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
bp->mf_mode);
}
+ if (IS_MF_SI(bp))
+ /*
+ * In switch independent mode, the TSTORM needs to accept
+ * packets that failed classification, since approximate match
+ * mac addresses aren't written to NIG LLH
+ */
+ REG_WR8(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
+
/* Zero this manually as its initialization is
currently missing in the initTool */
for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
@@ -4247,6 +4422,7 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
static void bnx2x_init_internal_port(struct bnx2x *bp)
{
/* port */
+ bnx2x_dcb_init_intmem_pfc(bp);
}
static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
@@ -4308,9 +4484,11 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
{
int i;
- for_each_queue(bp, i)
+ for_each_eth_queue(bp, i)
bnx2x_init_fp_sb(bp, i);
#ifdef BCM_CNIC
+ if (!NO_FCOE(bp))
+ bnx2x_init_fcoe_fp(bp);
bnx2x_init_sb(bp, bp->cnic_sb_mapping,
BNX2X_VF_ID_INVALID, false,
@@ -4619,7 +4797,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
return 0; /* OK */
}
-static void enable_blocks_attention(struct bnx2x *bp)
+static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
{
REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
if (CHIP_IS_E2(bp))
@@ -4673,53 +4851,9 @@ static void enable_blocks_attention(struct bnx2x *bp)
REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
- REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
-}
-
-static const struct {
- u32 addr;
- u32 mask;
-} bnx2x_parity_mask[] = {
- {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
- {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
- {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
- {HC_REG_HC_PRTY_MASK, 0x7},
- {MISC_REG_MISC_PRTY_MASK, 0x1},
- {QM_REG_QM_PRTY_MASK, 0x0},
- {DORQ_REG_DORQ_PRTY_MASK, 0x0},
- {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
- {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
- {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
- {CDU_REG_CDU_PRTY_MASK, 0x0},
- {CFC_REG_CFC_PRTY_MASK, 0x0},
- {DBG_REG_DBG_PRTY_MASK, 0x0},
- {DMAE_REG_DMAE_PRTY_MASK, 0x0},
- {BRB1_REG_BRB1_PRTY_MASK, 0x0},
- {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
- {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
- {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
- {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
- {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
- {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
- {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
- {USEM_REG_USEM_PRTY_MASK_0, 0x0},
- {USEM_REG_USEM_PRTY_MASK_1, 0x0},
- {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
- {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
- {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
- {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
-};
-
-static void enable_blocks_parity(struct bnx2x *bp)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
- REG_WR(bp, bnx2x_parity_mask[i].addr,
- bnx2x_parity_mask[i].mask);
+ REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
}
-
static void bnx2x_reset_common(struct bnx2x *bp)
{
/* reset_common */
@@ -4947,7 +5081,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
memset(&ilt, 0, sizeof(struct bnx2x_ilt));
- /* initalize dummy TM client */
+ /* initialize dummy TM client */
ilt_cli.start = 0;
ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
ilt_cli.client_num = ILT_CLIENT_TM;
@@ -5048,12 +5182,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
REG_WR(bp, PRS_REG_NIC_MODE, 1);
#endif
if (!CHIP_IS_E1(bp))
- REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
+ REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
if (CHIP_IS_E2(bp)) {
/* Bit-map indicating which L2 hdrs may appear after the
basic Ethernet header */
- int has_ovlan = IS_MF(bp);
+ int has_ovlan = IS_MF_SD(bp);
REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
}
@@ -5087,7 +5221,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
if (CHIP_IS_E2(bp)) {
- int has_ovlan = IS_MF(bp);
+ int has_ovlan = IS_MF_SD(bp);
REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
}
@@ -5164,12 +5298,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
if (!CHIP_IS_E1(bp)) {
REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
- REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
+ REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
}
if (CHIP_IS_E2(bp)) {
/* Bit-map indicating which L2 hdrs may appear after the
basic Ethernet header */
- REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
+ REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
}
if (CHIP_REV_IS_SLOW(bp))
@@ -5206,18 +5340,14 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
}
}
- bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
- bp->common.shmem_base,
- bp->common.shmem2_base);
-
bnx2x_setup_fan_failure_detection(bp);
/* clear PXP2 attentions */
REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
- enable_blocks_attention(bp);
- if (CHIP_PARITY_SUPPORTED(bp))
- enable_blocks_parity(bp);
+ bnx2x_enable_blocks_attention(bp);
+ if (CHIP_PARITY_ENABLED(bp))
+ bnx2x_enable_blocks_parity(bp);
if (!BP_NOMCP(bp)) {
/* In E2 2-PORT mode, same ext phy is used for the two paths */
@@ -5370,8 +5500,10 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
* - SF mode: bits 3-7 are masked. only bits 0-2 are in use
* - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
* bits 4-7 are used for "per vn group attention" */
- REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
- (IS_MF(bp) ? 0xF7 : 0x7));
+ val = IS_MF(bp) ? 0xF7 : 0x7;
+ /* Enable DCBX attention for all but E1 */
+ val |= CHIP_IS_E1(bp) ? 0 : 0x10;
+ REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
@@ -5386,7 +5518,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
if (!CHIP_IS_E1(bp)) {
/* 0x2 disable mf_ov, 0x1 enable */
REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
- (IS_MF(bp) ? 0x1 : 0x2));
+ (IS_MF_SD(bp) ? 0x1 : 0x2));
if (CHIP_IS_E2(bp)) {
val = 0;
@@ -5411,9 +5543,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
bnx2x_init_block(bp, MCP_BLOCK, init_stage);
bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
- bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
- bp->common.shmem_base,
- bp->common.shmem2_base);
if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
bp->common.shmem2_base, port)) {
u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -5746,7 +5875,7 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
BP_ABS_FUNC(bp), load_code);
bp->dmae_ready = 0;
- mutex_init(&bp->dmae_mutex);
+ spin_lock_init(&bp->dmae_lock);
rc = bnx2x_gunzip_init(bp);
if (rc)
return rc;
@@ -5816,6 +5945,15 @@ void bnx2x_free_mem(struct bnx2x *bp)
/* fastpath */
/* Common */
for_each_queue(bp, i) {
+#ifdef BCM_CNIC
+ /* FCoE client uses default status block */
+ if (IS_FCOE_IDX(i)) {
+ union host_hc_status_block *sb =
+ &bnx2x_fp(bp, i, status_blk);
+ memset(sb, 0, sizeof(union host_hc_status_block));
+ bnx2x_fp(bp, i, status_blk_mapping) = 0;
+ } else {
+#endif
/* status blocks */
if (CHIP_IS_E2(bp))
BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
@@ -5825,9 +5963,12 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
bnx2x_fp(bp, i, status_blk_mapping),
sizeof(struct host_hc_status_block_e1x));
+#ifdef BCM_CNIC
+ }
+#endif
}
/* Rx */
- for_each_queue(bp, i) {
+ for_each_rx_queue(bp, i) {
/* fastpath rx rings: rx_buf rx_desc rx_comp */
BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
@@ -5847,7 +5988,7 @@ void bnx2x_free_mem(struct bnx2x *bp)
BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
}
/* Tx */
- for_each_queue(bp, i) {
+ for_each_tx_queue(bp, i) {
/* fastpath tx rings: tx_buf tx_desc */
BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
@@ -5886,6 +6027,8 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
BCM_PAGE_SIZE * NUM_EQ_PAGES);
+ BNX2X_FREE(bp->rx_indir_table);
+
#undef BNX2X_PCI_FREE
#undef BNX2X_KFREE
}
@@ -5931,15 +6074,20 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
bnx2x_fp(bp, i, bp) = bp;
/* status blocks */
- if (CHIP_IS_E2(bp))
- BNX2X_PCI_ALLOC(sb->e2_sb,
- &bnx2x_fp(bp, i, status_blk_mapping),
- sizeof(struct host_hc_status_block_e2));
- else
- BNX2X_PCI_ALLOC(sb->e1x_sb,
- &bnx2x_fp(bp, i, status_blk_mapping),
- sizeof(struct host_hc_status_block_e1x));
-
+#ifdef BCM_CNIC
+ if (!IS_FCOE_IDX(i)) {
+#endif
+ if (CHIP_IS_E2(bp))
+ BNX2X_PCI_ALLOC(sb->e2_sb,
+ &bnx2x_fp(bp, i, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_ALLOC(sb->e1x_sb,
+ &bnx2x_fp(bp, i, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e1x));
+#ifdef BCM_CNIC
+ }
+#endif
set_sb_shortcuts(bp, i);
}
/* Rx */
@@ -6011,6 +6159,9 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
/* EQ */
BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
BCM_PAGE_SIZE * NUM_EQ_PAGES);
+
+ BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
+ TSTORM_INDIRECTION_TABLE_SIZE);
return 0;
alloc_mem_err:
@@ -6055,7 +6206,7 @@ static int bnx2x_func_stop(struct bnx2x *bp)
* @param cam_offset offset in a CAM to use
* @param is_bcast is the set MAC a broadcast address (for E1 only)
*/
-static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
+static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
u32 cl_bit_vec, u8 cam_offset,
u8 is_bcast)
{
@@ -6064,12 +6215,14 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
int ramrod_flags = WAIT_RAMROD_COMMON;
bp->set_mac_pending = 1;
- smp_wmb();
config->hdr.length = 1;
config->hdr.offset = cam_offset;
config->hdr.client_id = 0xff;
- config->hdr.reserved1 = 0;
+ /* Mark the single MAC configuration ramrod as opposed to a
+ * UC/MC list configuration).
+ */
+ config->hdr.echo = 1;
/* primary MAC */
config->config_table[0].msb_mac_addr =
@@ -6101,6 +6254,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
config->config_table[0].middle_mac_addr,
config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
+ mb();
+
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
U64_HI(bnx2x_sp_mapping(bp, mac_config)),
U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
@@ -6165,9 +6320,68 @@ static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
if (CHIP_IS_E1H(bp))
return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
else if (CHIP_MODE_IS_4_PORT(bp))
- return BP_FUNC(bp) * 32 + rel_offset;
+ return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
else
- return BP_VN(bp) * 32 + rel_offset;
+ return E2_FUNC_MAX * rel_offset + BP_VN(bp);
+}
+
+/**
+ * LLH CAM line allocations: currently only iSCSI and ETH macs are
+ * relevant. In addition, current implementation is tuned for a
+ * single ETH MAC.
+ */
+enum {
+ LLH_CAM_ISCSI_ETH_LINE = 0,
+ LLH_CAM_ETH_LINE,
+ LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
+};
+
+static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
+ int set,
+ unsigned char *dev_addr,
+ int index)
+{
+ u32 wb_data[2];
+ u32 mem_offset, ena_offset, mem_index;
+ /**
+ * indexes mapping:
+ * 0..7 - goes to MEM
+ * 8..15 - goes to MEM2
+ */
+
+ if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
+ return;
+
+ /* calculate memory start offset according to the mapping
+ * and index in the memory */
+ if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
+ mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
+ NIG_REG_LLH0_FUNC_MEM;
+ ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
+ NIG_REG_LLH0_FUNC_MEM_ENABLE;
+ mem_index = index;
+ } else {
+ mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
+ NIG_REG_P0_LLH_FUNC_MEM2;
+ ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
+ NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
+ mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
+ }
+
+ if (set) {
+ /* LLH_FUNC_MEM is a u64 WB register */
+ mem_offset += 8*mem_index;
+
+ wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
+ (dev_addr[4] << 8) | dev_addr[5]);
+ wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
+
+ REG_WR_DMAE(bp, mem_offset, wb_data, 2);
+ }
+
+ /* enable/disable the entry */
+ REG_WR(bp, ena_offset + 4*mem_index, set);
+
}
void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
@@ -6179,20 +6393,47 @@ void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
(1 << bp->fp->cl_id), cam_offset , 0);
+ bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
+
if (CHIP_IS_E1(bp)) {
/* broadcast MAC */
- u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ static const u8 bcast[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
}
}
-static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
+
+static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
+{
+ return CHIP_REV_IS_SLOW(bp) ?
+ (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
+ (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
+}
+
+/* set mc list, do not wait as wait implies sleep and
+ * set_rx_mode can be invoked from non-sleepable context.
+ *
+ * Instead we use the same ramrod data buffer each time we need
+ * to configure a list of addresses, and use the fact that the
+ * list of MACs is changed in an incremental way and that the
+ * function is called under the netif_addr_lock. A temporary
+ * inconsistent CAM configuration (possible in case of a very fast
+ * sequence of add/del/add on the host side) will shortly be
+ * restored by the handler of the last ramrod.
+ */
+static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
{
int i = 0, old;
struct net_device *dev = bp->dev;
+ u8 offset = bnx2x_e1_cam_mc_offset(bp);
struct netdev_hw_addr *ha;
struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
+ if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
+ return -EINVAL;
+
netdev_for_each_mc_addr(ha, dev) {
/* copy mac */
config_cmd->config_table[i].msb_mac_addr =
@@ -6233,32 +6474,47 @@ static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
}
}
+ wmb();
+
config_cmd->hdr.length = i;
config_cmd->hdr.offset = offset;
config_cmd->hdr.client_id = 0xff;
- config_cmd->hdr.reserved1 = 0;
+ /* Mark that this ramrod doesn't use bp->set_mac_pending for
+ * synchronization.
+ */
+ config_cmd->hdr.echo = 0;
- bp->set_mac_pending = 1;
- smp_wmb();
+ mb();
- bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
}
-static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
+
+void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
{
int i;
struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
int ramrod_flags = WAIT_RAMROD_COMMON;
+ u8 offset = bnx2x_e1_cam_mc_offset(bp);
- bp->set_mac_pending = 1;
- smp_wmb();
-
- for (i = 0; i < config_cmd->hdr.length; i++)
+ for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
SET_FLAG(config_cmd->config_table[i].flags,
MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
T_ETH_MAC_COMMAND_INVALIDATE);
+ wmb();
+
+ config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
+ config_cmd->hdr.offset = offset;
+ config_cmd->hdr.client_id = 0xff;
+ /* We'll wait for a completion this time... */
+ config_cmd->hdr.echo = 1;
+
+ bp->set_mac_pending = 1;
+
+ mb();
+
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
@@ -6268,6 +6524,44 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
}
+/* Accept one or more multicasts */
+static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
+{
+ struct net_device *dev = bp->dev;
+ struct netdev_hw_addr *ha;
+ u32 mc_filter[MC_HASH_SIZE];
+ u32 crc, bit, regidx;
+ int i;
+
+ memset(mc_filter, 0, 4 * MC_HASH_SIZE);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
+ bnx2x_mc_addr(ha));
+
+ crc = crc32c_le(0, bnx2x_mc_addr(ha),
+ ETH_ALEN);
+ bit = (crc >> 24) & 0xff;
+ regidx = bit >> 5;
+ bit &= 0x1f;
+ mc_filter[regidx] |= (1 << bit);
+ }
+
+ for (i = 0; i < MC_HASH_SIZE; i++)
+ REG_WR(bp, MC_HASH_OFFSET(bp, i),
+ mc_filter[i]);
+
+ return 0;
+}
+
+void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
+{
+ int i;
+
+ for (i = 0; i < MC_HASH_SIZE; i++)
+ REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
+}
+
#ifdef BCM_CNIC
/**
* Set iSCSI MAC(s) at the next enties in the CAM after the ETH
@@ -6283,12 +6577,60 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
{
u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
- u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
+ u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
+ BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
+ u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
/* Send a SET_MAC ramrod */
- bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
+ bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
cam_offset, 0);
+
+ bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
+
+ return 0;
+}
+
+/**
+ * Set FCoE L2 MAC(s) at the next enties in the CAM after the
+ * ETH MAC(s). This function will wait until the ramdord
+ * completion returns.
+ *
+ * @param bp driver handle
+ * @param set set or clear the CAM entry
+ *
+ * @return 0 if cussess, -ENODEV if ramrod doesn't return.
+ */
+int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
+{
+ u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
+ /**
+ * CAM allocation for E1H
+ * eth unicasts: by func number
+ * iscsi: by func number
+ * fip unicast: by func number
+ * fip multicast: by func number
+ */
+ bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
+ cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
+
+ return 0;
+}
+
+int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
+{
+ u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
+
+ /**
+ * CAM allocation for E1H
+ * eth unicasts: by func number
+ * iscsi: by func number
+ * fip unicast: by func number
+ * fip multicast: by func number
+ */
+ bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
+ bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
+
return 0;
}
#endif
@@ -6306,6 +6648,8 @@ static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
data->general.statistics_counter_id = params->rxq_params.stat_id;
data->general.statistics_en_flg =
(params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
+ data->general.is_fcoe_flg =
+ (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
data->general.activate_flg = activate;
data->general.sp_client_id = params->rxq_params.spcl_id;
@@ -6374,7 +6718,9 @@ static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
data->fc.safc_group_num = params->txq_params.cos;
data->fc.safc_group_en_flg =
(params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
- data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
+ data->fc.traffic_type =
+ (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
+ LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
}
static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
@@ -6473,7 +6819,7 @@ static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
bnx2x_enable_msi(bp);
/* falling through... */
case INT_MODE_INTx:
- bp->num_queues = 1;
+ bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
break;
default:
@@ -6496,8 +6842,8 @@ static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
"enable MSI-X (%d), "
"set number of queues to %d\n",
bp->num_queues,
- 1);
- bp->num_queues = 1;
+ 1 + NONE_ETH_CONTEXT_USE);
+ bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
if (!(bp->flags & DISABLE_MSI_FLAG))
bnx2x_enable_msi(bp);
@@ -6618,7 +6964,9 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct bnx2x_client_init_params params = { {0} };
int rc;
- bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
+ /* reset IGU state skip FCoE L2 queue */
+ if (!IS_FCOE_FP(fp))
+ bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
IGU_INT_ENABLE, 0);
params.ramrod_params.pstate = &fp->state;
@@ -6626,6 +6974,12 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
params.ramrod_params.index = fp->index;
params.ramrod_params.cid = fp->cid;
+#ifdef BCM_CNIC
+ if (IS_FCOE_FP(fp))
+ params.ramrod_params.flags |= CLIENT_IS_FCOE;
+
+#endif
+
if (is_leading)
params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
@@ -6710,7 +7064,7 @@ static void bnx2x_reset_func(struct bnx2x *bp)
REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
/* FP SBs */
- for_each_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
REG_WR8(bp,
BAR_CSTRORM_INTMEM +
@@ -6830,6 +7184,20 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
}
}
+#ifdef BCM_CNIC
+static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
+{
+ if (bp->flags & FCOE_MACS_SET) {
+ if (!IS_MF_SD(bp))
+ bnx2x_set_fip_eth_mac_addr(bp, 0);
+
+ bnx2x_set_all_enode_macs(bp, 0);
+
+ bp->flags &= ~FCOE_MACS_SET;
+ }
+}
+#endif
+
void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
{
int port = BP_PORT(bp);
@@ -6837,7 +7205,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
int i, cnt, rc;
/* Wait until tx fastpath tasks complete */
- for_each_queue(bp, i) {
+ for_each_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
cnt = 1000;
@@ -6860,30 +7228,19 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
/* Give HW time to discard old tx messages */
msleep(1);
- if (CHIP_IS_E1(bp)) {
- /* invalidate mc list,
- * wait and poll (interrupts are off)
- */
- bnx2x_invlidate_e1_mc_list(bp);
- bnx2x_set_eth_mac(bp, 0);
-
- } else {
- REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+ bnx2x_set_eth_mac(bp, 0);
- bnx2x_set_eth_mac(bp, 0);
+ bnx2x_invalidate_uc_list(bp);
- for (i = 0; i < MC_HASH_SIZE; i++)
- REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
+ if (CHIP_IS_E1(bp))
+ bnx2x_invalidate_e1_mc_list(bp);
+ else {
+ bnx2x_invalidate_e1h_mc_list(bp);
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
}
#ifdef BCM_CNIC
- /* Clear iSCSI L2 MAC */
- mutex_lock(&bp->cnic_mutex);
- if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
- bnx2x_set_iscsi_eth_mac_addr(bp, 0);
- bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
- }
- mutex_unlock(&bp->cnic_mutex);
+ bnx2x_del_fcoe_eth_macs(bp);
#endif
if (unload_mode == UNLOAD_NORMAL)
@@ -7736,7 +8093,7 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
bp->igu_sb_cnt = 0;
if (CHIP_INT_MODE_IS_BC(bp)) {
bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
- bp->l2_cid_count);
+ NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
FP_SB_MAX_E1x;
@@ -7767,7 +8124,8 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
}
}
}
- bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
+ bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
+ NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
if (bp->igu_sb_cnt == 0)
BNX2X_ERR("CAM configuration error\n");
}
@@ -8076,9 +8434,8 @@ static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
{
int port = BP_PORT(bp);
- u32 val, val2;
u32 config;
- u32 ext_phy_type, ext_phy_config;;
+ u32 ext_phy_type, ext_phy_config;
bp->link_params.bp = bp;
bp->link_params.port = port;
@@ -8136,24 +8493,150 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->mdio.prtad =
XGXS_EXT_PHY_ADDR(ext_phy_config);
- val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
- val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
- bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+ /*
+ * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
+ * In MF mode, it is set to cover self test cases
+ */
+ if (IS_MF(bp))
+ bp->port.need_hw_lock = 1;
+ else
+ bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
+ bp->common.shmem_base,
+ bp->common.shmem2_base);
+}
+
+#ifdef BCM_CNIC
+static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+{
+ u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+ drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
+ u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+ drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
+
+ /* Get the number of maximum allowed iSCSI and FCoE connections */
+ bp->cnic_eth_dev.max_iscsi_conn =
+ (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
+ BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
+
+ bp->cnic_eth_dev.max_fcoe_conn =
+ (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
+ BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
+
+ BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
+ bp->cnic_eth_dev.max_iscsi_conn,
+ bp->cnic_eth_dev.max_fcoe_conn);
+
+ /* If mamimum allowed number of connections is zero -
+ * disable the feature.
+ */
+ if (!bp->cnic_eth_dev.max_iscsi_conn)
+ bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+
+ if (!bp->cnic_eth_dev.max_fcoe_conn)
+ bp->flags |= NO_FCOE_FLAG;
+}
+#endif
+
+static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
+{
+ u32 val, val2;
+ int func = BP_ABS_FUNC(bp);
+ int port = BP_PORT(bp);
+#ifdef BCM_CNIC
+ u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
+ u8 *fip_mac = bp->fip_mac;
+#endif
+
+ if (BP_NOMCP(bp)) {
+ BNX2X_ERROR("warning: random MAC workaround active\n");
+ random_ether_addr(bp->dev->dev_addr);
+ } else if (IS_MF(bp)) {
+ val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
+ val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
+ if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
+ (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
+ bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+
+#ifdef BCM_CNIC
+ /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+ * FCoE MAC then the appropriate feature should be disabled.
+ */
+ if (IS_MF_SI(bp)) {
+ u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
+ if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
+ val2 = MF_CFG_RD(bp, func_ext_config[func].
+ iscsi_mac_addr_upper);
+ val = MF_CFG_RD(bp, func_ext_config[func].
+ iscsi_mac_addr_lower);
+ BNX2X_DEV_INFO("Read iSCSI MAC: "
+ "0x%x:0x%04x\n", val2, val);
+ bnx2x_set_mac_buf(iscsi_mac, val, val2);
+
+ /* Disable iSCSI OOO if MAC configuration is
+ * invalid.
+ */
+ if (!is_valid_ether_addr(iscsi_mac)) {
+ bp->flags |= NO_ISCSI_OOO_FLAG |
+ NO_ISCSI_FLAG;
+ memset(iscsi_mac, 0, ETH_ALEN);
+ }
+ } else
+ bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+
+ if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
+ val2 = MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_mac_addr_upper);
+ val = MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_mac_addr_lower);
+ BNX2X_DEV_INFO("Read FCoE MAC to "
+ "0x%x:0x%04x\n", val2, val);
+ bnx2x_set_mac_buf(fip_mac, val, val2);
+
+ /* Disable FCoE if MAC configuration is
+ * invalid.
+ */
+ if (!is_valid_ether_addr(fip_mac)) {
+ bp->flags |= NO_FCOE_FLAG;
+ memset(bp->fip_mac, 0, ETH_ALEN);
+ }
+ } else
+ bp->flags |= NO_FCOE_FLAG;
+ }
+#endif
+ } else {
+ /* in SF read MACs from port configuration */
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+ bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+
+#ifdef BCM_CNIC
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
+ iscsi_mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].
+ iscsi_mac_lower);
+ bnx2x_set_mac_buf(iscsi_mac, val, val2);
+#endif
+ }
+
memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
#ifdef BCM_CNIC
- val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
- val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
- bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
+ /* Set the FCoE MAC in modes other then MF_SI */
+ if (!CHIP_IS_E1x(bp)) {
+ if (IS_MF_SD(bp))
+ memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
+ else if (!IS_MF(bp))
+ memcpy(fip_mac, iscsi_mac, ETH_ALEN);
+ }
#endif
}
static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
{
- int func = BP_ABS_FUNC(bp);
- int vn;
- u32 val, val2;
+ int /*abs*/func = BP_ABS_FUNC(bp);
+ int vn, port;
+ u32 val = 0;
int rc = 0;
bnx2x_get_common_hwinfo(bp);
@@ -8163,7 +8646,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->igu_dsb_id = DEF_SB_IGU_ID;
bp->igu_base_sb = 0;
- bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
+ bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
+ NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
} else {
bp->common.int_block = INT_BLOCK_IGU;
val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
@@ -8186,44 +8670,99 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->mf_ov = 0;
bp->mf_mode = 0;
vn = BP_E1HVN(bp);
+ port = BP_PORT(bp);
+
if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
+ DP(NETIF_MSG_PROBE,
+ "shmem2base 0x%x, size %d, mfcfg offset %d\n",
+ bp->common.shmem2_base, SHMEM2_RD(bp, size),
+ (u32)offsetof(struct shmem2_region, mf_cfg_addr));
if (SHMEM2_HAS(bp, mf_cfg_addr))
bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
else
bp->common.mf_cfg_base = bp->common.shmem_base +
offsetof(struct shmem_region, func_mb) +
E1H_FUNC_MAX * sizeof(struct drv_func_mb);
- bp->mf_config[vn] =
- MF_CFG_RD(bp, func_mf_config[func].config);
+ /*
+ * get mf configuration:
+ * 1. existance of MF configuration
+ * 2. MAC address must be legal (check only upper bytes)
+ * for Switch-Independent mode;
+ * OVLAN must be legal for Switch-Dependent mode
+ * 3. SF_MODE configures specific MF mode
+ */
+ if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
+ /* get mf configuration */
+ val = SHMEM_RD(bp,
+ dev_info.shared_feature_config.config);
+ val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
+
+ switch (val) {
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
+ val = MF_CFG_RD(bp, func_mf_config[func].
+ mac_upper);
+ /* check for legal mac (upper bytes)*/
+ if (val != 0xffff) {
+ bp->mf_mode = MULTI_FUNCTION_SI;
+ bp->mf_config[vn] = MF_CFG_RD(bp,
+ func_mf_config[func].config);
+ } else
+ DP(NETIF_MSG_PROBE, "illegal MAC "
+ "address for SI\n");
+ break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
+ /* get OV configuration */
+ val = MF_CFG_RD(bp,
+ func_mf_config[FUNC_0].e1hov_tag);
+ val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
+
+ if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+ bp->mf_mode = MULTI_FUNCTION_SD;
+ bp->mf_config[vn] = MF_CFG_RD(bp,
+ func_mf_config[func].config);
+ } else
+ DP(NETIF_MSG_PROBE, "illegal OV for "
+ "SD\n");
+ break;
+ default:
+ /* Unknown configuration: reset mf_config */
+ bp->mf_config[vn] = 0;
+ DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
+ val);
+ }
+ }
- val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
- FUNC_MF_CFG_E1HOV_TAG_MASK);
- if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
- bp->mf_mode = 1;
BNX2X_DEV_INFO("%s function mode\n",
IS_MF(bp) ? "multi" : "single");
- if (IS_MF(bp)) {
- val = (MF_CFG_RD(bp, func_mf_config[func].
- e1hov_tag) &
- FUNC_MF_CFG_E1HOV_TAG_MASK);
+ switch (bp->mf_mode) {
+ case MULTI_FUNCTION_SD:
+ val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+ FUNC_MF_CFG_E1HOV_TAG_MASK;
if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
bp->mf_ov = val;
- BNX2X_DEV_INFO("MF OV for func %d is %d "
- "(0x%04x)\n",
- func, bp->mf_ov, bp->mf_ov);
+ BNX2X_DEV_INFO("MF OV for func %d is %d"
+ " (0x%04x)\n", func,
+ bp->mf_ov, bp->mf_ov);
} else {
- BNX2X_ERROR("No valid MF OV for func %d,"
- " aborting\n", func);
+ BNX2X_ERR("No valid MF OV for func %d,"
+ " aborting\n", func);
rc = -EPERM;
}
- } else {
- if (BP_VN(bp)) {
- BNX2X_ERROR("VN %d in single function mode,"
- " aborting\n", BP_E1HVN(bp));
+ break;
+ case MULTI_FUNCTION_SI:
+ BNX2X_DEV_INFO("func %d is in MF "
+ "switch-independent mode\n", func);
+ break;
+ default:
+ if (vn) {
+ BNX2X_ERR("VN %d in single function mode,"
+ " aborting\n", vn);
rc = -EPERM;
}
+ break;
}
+
}
/* adjust igu_sb_cnt to MF for E1x */
@@ -8248,32 +8787,12 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
}
- if (IS_MF(bp)) {
- val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
- val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
- if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
- (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
- bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
- bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
- bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
- bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
- bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
- bp->dev->dev_addr[5] = (u8)(val & 0xff);
- memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
- ETH_ALEN);
- memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
- ETH_ALEN);
- }
-
- return rc;
- }
+ /* Get MAC addresses */
+ bnx2x_get_mac_hwinfo(bp);
- if (BP_NOMCP(bp)) {
- /* only supposed to happen on emulation/FPGA */
- BNX2X_ERROR("warning: random MAC workaround active\n");
- random_ether_addr(bp->dev->dev_addr);
- memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
- }
+#ifdef BCM_CNIC
+ bnx2x_get_cnic_info(bp);
+#endif
return rc;
}
@@ -8382,13 +8901,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
dev_err(&bp->pdev->dev, "MCP disabled, "
"must load devices in order!\n");
- /* Set multi queue mode */
- if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
- ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
- dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
- "requested is not MSI-X\n");
- multi_mode = ETH_RSS_MODE_DISABLED;
- }
bp->multi_mode = multi_mode;
bp->int_mode = int_mode;
@@ -8427,6 +8939,9 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
bp->timer.data = (unsigned long) bp;
bp->timer.function = bnx2x_timer;
+ bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
+ bnx2x_dcbx_init_params(bp);
+
return rc;
}
@@ -8493,12 +9008,197 @@ static int bnx2x_close(struct net_device *dev)
return 0;
}
+#define E1_MAX_UC_LIST 29
+#define E1H_MAX_UC_LIST 30
+#define E2_MAX_UC_LIST 14
+static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
+{
+ if (CHIP_IS_E1(bp))
+ return E1_MAX_UC_LIST;
+ else if (CHIP_IS_E1H(bp))
+ return E1H_MAX_UC_LIST;
+ else
+ return E2_MAX_UC_LIST;
+}
+
+
+static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
+{
+ if (CHIP_IS_E1(bp))
+ /* CAM Entries for Port0:
+ * 0 - prim ETH MAC
+ * 1 - BCAST MAC
+ * 2 - iSCSI L2 ring ETH MAC
+ * 3-31 - UC MACs
+ *
+ * Port1 entries are allocated the same way starting from
+ * entry 32.
+ */
+ return 3 + 32 * BP_PORT(bp);
+ else if (CHIP_IS_E1H(bp)) {
+ /* CAM Entries:
+ * 0-7 - prim ETH MAC for each function
+ * 8-15 - iSCSI L2 ring ETH MAC for each function
+ * 16 till 255 UC MAC lists for each function
+ *
+ * Remark: There is no FCoE support for E1H, thus FCoE related
+ * MACs are not considered.
+ */
+ return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
+ bnx2x_max_uc_list(bp) * BP_FUNC(bp);
+ } else {
+ /* CAM Entries (there is a separate CAM per engine):
+ * 0-4 - prim ETH MAC for each function
+ * 4-7 - iSCSI L2 ring ETH MAC for each function
+ * 8-11 - FIP ucast L2 MAC for each function
+ * 12-15 - ALL_ENODE_MACS mcast MAC for each function
+ * 16 till 71 UC MAC lists for each function
+ */
+ u8 func_idx =
+ (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
+
+ return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
+ bnx2x_max_uc_list(bp) * func_idx;
+ }
+}
+
+/* set uc list, do not wait as wait implies sleep and
+ * set_rx_mode can be invoked from non-sleepable context.
+ *
+ * Instead we use the same ramrod data buffer each time we need
+ * to configure a list of addresses, and use the fact that the
+ * list of MACs is changed in an incremental way and that the
+ * function is called under the netif_addr_lock. A temporary
+ * inconsistent CAM configuration (possible in case of very fast
+ * sequence of add/del/add on the host side) will shortly be
+ * restored by the handler of the last ramrod.
+ */
+static int bnx2x_set_uc_list(struct bnx2x *bp)
+{
+ int i = 0, old;
+ struct net_device *dev = bp->dev;
+ u8 offset = bnx2x_uc_list_cam_offset(bp);
+ struct netdev_hw_addr *ha;
+ struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
+ dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
+
+ if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
+ return -EINVAL;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ /* copy mac */
+ config_cmd->config_table[i].msb_mac_addr =
+ swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
+ config_cmd->config_table[i].middle_mac_addr =
+ swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
+ config_cmd->config_table[i].lsb_mac_addr =
+ swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
+
+ config_cmd->config_table[i].vlan_id = 0;
+ config_cmd->config_table[i].pf_id = BP_FUNC(bp);
+ config_cmd->config_table[i].clients_bit_vector =
+ cpu_to_le32(1 << BP_L_ID(bp));
+
+ SET_FLAG(config_cmd->config_table[i].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_SET);
+
+ DP(NETIF_MSG_IFUP,
+ "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
+ config_cmd->config_table[i].msb_mac_addr,
+ config_cmd->config_table[i].middle_mac_addr,
+ config_cmd->config_table[i].lsb_mac_addr);
+
+ i++;
+
+ /* Set uc MAC in NIG */
+ bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
+ LLH_CAM_ETH_LINE + i);
+ }
+ old = config_cmd->hdr.length;
+ if (old > i) {
+ for (; i < old; i++) {
+ if (CAM_IS_INVALID(config_cmd->
+ config_table[i])) {
+ /* already invalidated */
+ break;
+ }
+ /* invalidate */
+ SET_FLAG(config_cmd->config_table[i].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_INVALIDATE);
+ }
+ }
+
+ wmb();
+
+ config_cmd->hdr.length = i;
+ config_cmd->hdr.offset = offset;
+ config_cmd->hdr.client_id = 0xff;
+ /* Mark that this ramrod doesn't use bp->set_mac_pending for
+ * synchronization.
+ */
+ config_cmd->hdr.echo = 0;
+
+ mb();
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
+ U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
+
+}
+
+void bnx2x_invalidate_uc_list(struct bnx2x *bp)
+{
+ int i;
+ struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
+ dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
+ int ramrod_flags = WAIT_RAMROD_COMMON;
+ u8 offset = bnx2x_uc_list_cam_offset(bp);
+ u8 max_list_size = bnx2x_max_uc_list(bp);
+
+ for (i = 0; i < max_list_size; i++) {
+ SET_FLAG(config_cmd->config_table[i].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_INVALIDATE);
+ bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
+ }
+
+ wmb();
+
+ config_cmd->hdr.length = max_list_size;
+ config_cmd->hdr.offset = offset;
+ config_cmd->hdr.client_id = 0xff;
+ /* We'll wait for a completion this time... */
+ config_cmd->hdr.echo = 1;
+
+ bp->set_mac_pending = 1;
+
+ mb();
+
+ bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
+ U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
+
+ /* Wait for a completion */
+ bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
+ ramrod_flags);
+
+}
+
+static inline int bnx2x_set_mc_list(struct bnx2x *bp)
+{
+ /* some multicasts */
+ if (CHIP_IS_E1(bp)) {
+ return bnx2x_set_e1_mc_list(bp);
+ } else { /* E1H and newer */
+ return bnx2x_set_e1h_mc_list(bp);
+ }
+}
+
/* called with netif_tx_lock from dev_mcast.c */
void bnx2x_set_rx_mode(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
u32 rx_mode = BNX2X_RX_MODE_NORMAL;
- int port = BP_PORT(bp);
if (bp->state != BNX2X_STATE_OPEN) {
DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
@@ -8509,47 +9209,16 @@ void bnx2x_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
rx_mode = BNX2X_RX_MODE_PROMISC;
- else if ((dev->flags & IFF_ALLMULTI) ||
- ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
- CHIP_IS_E1(bp)))
+ else if (dev->flags & IFF_ALLMULTI)
rx_mode = BNX2X_RX_MODE_ALLMULTI;
- else { /* some multicasts */
- if (CHIP_IS_E1(bp)) {
- /*
- * set mc list, do not wait as wait implies sleep
- * and set_rx_mode can be invoked from non-sleepable
- * context
- */
- u8 offset = (CHIP_REV_IS_SLOW(bp) ?
- BNX2X_MAX_EMUL_MULTI*(1 + port) :
- BNX2X_MAX_MULTICAST*(1 + port));
-
- bnx2x_set_e1_mc_list(bp, offset);
- } else { /* E1H */
- /* Accept one or more multicasts */
- struct netdev_hw_addr *ha;
- u32 mc_filter[MC_HASH_SIZE];
- u32 crc, bit, regidx;
- int i;
-
- memset(mc_filter, 0, 4 * MC_HASH_SIZE);
-
- netdev_for_each_mc_addr(ha, dev) {
- DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
- bnx2x_mc_addr(ha));
-
- crc = crc32c_le(0, bnx2x_mc_addr(ha),
- ETH_ALEN);
- bit = (crc >> 24) & 0xff;
- regidx = bit >> 5;
- bit &= 0x1f;
- mc_filter[regidx] |= (1 << bit);
- }
+ else {
+ /* some multicasts */
+ if (bnx2x_set_mc_list(bp))
+ rx_mode = BNX2X_RX_MODE_ALLMULTI;
- for (i = 0; i < MC_HASH_SIZE; i++)
- REG_WR(bp, MC_HASH_OFFSET(bp, i),
- mc_filter[i]);
- }
+ /* some unicasts */
+ if (bnx2x_set_uc_list(bp))
+ rx_mode = BNX2X_RX_MODE_PROMISC;
}
bp->rx_mode = rx_mode;
@@ -8629,7 +9298,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_open = bnx2x_open,
.ndo_stop = bnx2x_close,
.ndo_start_xmit = bnx2x_start_xmit,
- .ndo_set_multicast_list = bnx2x_set_rx_mode,
+ .ndo_select_queue = bnx2x_select_queue,
+ .ndo_set_rx_mode = bnx2x_set_rx_mode,
.ndo_set_mac_address = bnx2x_change_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = bnx2x_ioctl,
@@ -8761,7 +9431,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
dev->netdev_ops = &bnx2x_netdev_ops;
bnx2x_set_ethtool_ops(dev);
dev->features |= NETIF_F_SG;
- dev->features |= NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
if (bp->flags & USING_DAC_FLAG)
dev->features |= NETIF_F_HIGHDMA;
dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
@@ -8769,12 +9439,16 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
dev->vlan_features |= NETIF_F_SG;
- dev->vlan_features |= NETIF_F_HW_CSUM;
+ dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
if (bp->flags & USING_DAC_FLAG)
dev->vlan_features |= NETIF_F_HIGHDMA;
dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
dev->vlan_features |= NETIF_F_TSO6;
+#ifdef BCM_DCBNL
+ dev->dcbnl_ops = &bnx2x_dcbnl_ops;
+#endif
+
/* get_port_hwinfo() will set prtad and mmds properly */
bp->mdio.prtad = MDIO_PRTAD_NONE;
bp->mdio.mmds = 0;
@@ -9067,7 +9741,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
return -ENODEV;
}
- cid_count += CNIC_CONTEXT_USE;
+ cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
/* dev zeroed in init_etherdev */
dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
@@ -9096,11 +9770,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/* calc qm_cid_count */
bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
- rc = register_netdev(dev);
- if (rc) {
- dev_err(&pdev->dev, "Cannot register net device\n");
- goto init_one_exit;
- }
+#ifdef BCM_CNIC
+ /* disable FCOE L2 queue for E1x*/
+ if (CHIP_IS_E1x(bp))
+ bp->flags |= NO_FCOE_FLAG;
+
+#endif
/* Configure interupt mode: try to enable MSI-X/MSI if
* needed, set bp->num_queues appropriately.
@@ -9110,6 +9785,21 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/* Add all NAPI objects */
bnx2x_add_all_napi(bp);
+ rc = register_netdev(dev);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot register net device\n");
+ goto init_one_exit;
+ }
+
+#ifdef BCM_CNIC
+ if (!NO_FCOE(bp)) {
+ /* Add storage MAC address */
+ rtnl_lock();
+ dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
+ rtnl_unlock();
+ }
+#endif
+
bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
@@ -9153,14 +9843,34 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
}
bp = netdev_priv(dev);
+#ifdef BCM_CNIC
+ /* Delete storage MAC address */
+ if (!NO_FCOE(bp)) {
+ rtnl_lock();
+ dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
+ rtnl_unlock();
+ }
+#endif
+
+#ifdef BCM_DCBNL
+ /* Delete app tlvs from dcbnl */
+ bnx2x_dcbnl_update_applist(bp, true);
+#endif
+
unregister_netdev(dev);
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
+ /* Power on: we can't let PCI layer write to us while we are in D3 */
+ bnx2x_set_power_state(bp, PCI_D0);
+
/* Disable MSI/MSI-X */
bnx2x_disable_msi(bp);
+ /* Power off */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+
/* Make sure RESET task is not scheduled before continuing */
cancel_delayed_work_sync(&bp->reset_task);
@@ -9202,7 +9912,7 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
- for_each_queue(bp, i)
+ for_each_rx_queue(bp, i)
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
bnx2x_free_mem(bp);
@@ -9420,16 +10130,23 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
HW_CID(bp, BNX2X_ISCSI_ETH_CID));
}
- /* There may be not more than 8 L2 and COMMON SPEs and not more
- * than 8 L5 SPEs in the air.
+ /* There may be not more than 8 L2 and not more than 8 L5 SPEs
+ * We also check that the number of outstanding
+ * COMMON ramrods is not more than the EQ and SPQ can
+ * accommodate.
*/
- if ((type == NONE_CONNECTION_TYPE) ||
- (type == ETH_CONNECTION_TYPE)) {
- if (!atomic_read(&bp->spq_left))
+ if (type == ETH_CONNECTION_TYPE) {
+ if (!atomic_read(&bp->cq_spq_left))
+ break;
+ else
+ atomic_dec(&bp->cq_spq_left);
+ } else if (type == NONE_CONNECTION_TYPE) {
+ if (!atomic_read(&bp->eq_spq_left))
break;
else
- atomic_dec(&bp->spq_left);
- } else if (type == ISCSI_CONNECTION_TYPE) {
+ atomic_dec(&bp->eq_spq_left);
+ } else if ((type == ISCSI_CONNECTION_TYPE) ||
+ (type == FCOE_CONNECTION_TYPE)) {
if (bp->cnic_spq_pending >=
bp->cnic_eth_dev.max_kwqe_pending)
break;
@@ -9505,7 +10222,8 @@ static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
int rc = 0;
mutex_lock(&bp->cnic_mutex);
- c_ops = bp->cnic_ops;
+ c_ops = rcu_dereference_protected(bp->cnic_ops,
+ lockdep_is_held(&bp->cnic_mutex));
if (c_ops)
rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
mutex_unlock(&bp->cnic_mutex);
@@ -9576,6 +10294,9 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
case DRV_CTL_START_L2_CMD: {
u32 cli = ctl->data.ring.client_id;
+ /* Clear FCoE FIP and ALL ENODE MACs addresses first */
+ bnx2x_del_fcoe_eth_macs(bp);
+
/* Set iSCSI MAC address */
bnx2x_set_iscsi_eth_mac_addr(bp, 1);
@@ -9616,7 +10337,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
int count = ctl->data.credit.credit_count;
smp_mb__before_atomic_inc();
- atomic_add(count, &bp->spq_left);
+ atomic_add(count, &bp->cq_spq_left);
smp_mb__after_atomic_inc();
break;
}
@@ -9697,10 +10418,6 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
mutex_lock(&bp->cnic_mutex);
- if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
- bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
- bnx2x_set_iscsi_eth_mac_addr(bp, 0);
- }
cp->drv_state = 0;
rcu_assign_pointer(bp->cnic_ops, NULL);
mutex_unlock(&bp->cnic_mutex);
@@ -9716,6 +10433,13 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
struct bnx2x *bp = netdev_priv(dev);
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+ /* If both iSCSI and FCoE are disabled - return NULL in
+ * order to indicate CNIC that it should not try to work
+ * with this device.
+ */
+ if (NO_ISCSI(bp) && NO_FCOE(bp))
+ return NULL;
+
cp->drv_owner = THIS_MODULE;
cp->chip_id = CHIP_ID(bp);
cp->pdev = bp->pdev;
@@ -9731,9 +10455,20 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
cp->drv_ctl = bnx2x_drv_ctl;
cp->drv_register_cnic = bnx2x_register_cnic;
cp->drv_unregister_cnic = bnx2x_unregister_cnic;
- cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID;
+ cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
+ cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
+ BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
+ if (NO_ISCSI_OOO(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+
+ if (NO_ISCSI(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
+
+ if (NO_FCOE(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
+
DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
"starting cid %d\n",
cp->ctx_blk_size,
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index 1cefe489a955..1c89f19a4425 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -18,6 +18,8 @@
* WR - Write Clear (write 1 to clear the bit)
*
*/
+#ifndef BNX2X_REG_H
+#define BNX2X_REG_H
#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2)
@@ -39,6 +41,8 @@
#define BRB1_REG_BRB1_PRTY_MASK 0x60138
/* [R 4] Parity register #0 read */
#define BRB1_REG_BRB1_PRTY_STS 0x6012c
+/* [RC 4] Parity register #0 read clear */
+#define BRB1_REG_BRB1_PRTY_STS_CLR 0x60130
/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
* address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
* BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning -
@@ -132,8 +136,12 @@
#define CCM_REG_CCM_INT_MASK 0xd01e4
/* [R 11] Interrupt register #0 read */
#define CCM_REG_CCM_INT_STS 0xd01d8
+/* [RW 27] Parity mask register #0 read/write */
+#define CCM_REG_CCM_PRTY_MASK 0xd01f4
/* [R 27] Parity register #0 read */
#define CCM_REG_CCM_PRTY_STS 0xd01e8
+/* [RC 27] Parity register #0 read clear */
+#define CCM_REG_CCM_PRTY_STS_CLR 0xd01ec
/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
Is used to determine the number of the AG context REG-pairs written back;
@@ -350,6 +358,8 @@
#define CDU_REG_CDU_PRTY_MASK 0x10104c
/* [R 5] Parity register #0 read */
#define CDU_REG_CDU_PRTY_STS 0x101040
+/* [RC 5] Parity register #0 read clear */
+#define CDU_REG_CDU_PRTY_STS_CLR 0x101044
/* [RC 32] logging of error data in case of a CDU load error:
{expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
ype_error; ctual_active; ctual_compressed_context}; */
@@ -381,6 +391,8 @@
#define CFC_REG_CFC_PRTY_MASK 0x104118
/* [R 4] Parity register #0 read */
#define CFC_REG_CFC_PRTY_STS 0x10410c
+/* [RC 4] Parity register #0 read clear */
+#define CFC_REG_CFC_PRTY_STS_CLR 0x104110
/* [RW 21] CID cam access (21:1 - Data; alid - 0) */
#define CFC_REG_CID_CAM 0x104800
#define CFC_REG_CONTROL0 0x104028
@@ -466,6 +478,8 @@
#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc
/* [R 11] Parity register #0 read */
#define CSDM_REG_CSDM_PRTY_STS 0xc22b0
+/* [RC 11] Parity register #0 read clear */
+#define CSDM_REG_CSDM_PRTY_STS_CLR 0xc22b4
#define CSDM_REG_ENABLE_IN1 0xc2238
#define CSDM_REG_ENABLE_IN2 0xc223c
#define CSDM_REG_ENABLE_OUT1 0xc2240
@@ -556,6 +570,9 @@
/* [R 32] Parity register #0 read */
#define CSEM_REG_CSEM_PRTY_STS_0 0x200124
#define CSEM_REG_CSEM_PRTY_STS_1 0x200134
+/* [RC 32] Parity register #0 read clear */
+#define CSEM_REG_CSEM_PRTY_STS_CLR_0 0x200128
+#define CSEM_REG_CSEM_PRTY_STS_CLR_1 0x200138
#define CSEM_REG_ENABLE_IN 0x2000a4
#define CSEM_REG_ENABLE_OUT 0x2000a8
/* [RW 32] This address space contains all registers and memories that are
@@ -648,6 +665,8 @@
#define DBG_REG_DBG_PRTY_MASK 0xc0a8
/* [R 1] Parity register #0 read */
#define DBG_REG_DBG_PRTY_STS 0xc09c
+/* [RC 1] Parity register #0 read clear */
+#define DBG_REG_DBG_PRTY_STS_CLR 0xc0a0
/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The
* function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0;
* 4.Completion function=0; 5.Error handling=0 */
@@ -668,6 +687,8 @@
#define DMAE_REG_DMAE_PRTY_MASK 0x102064
/* [R 4] Parity register #0 read */
#define DMAE_REG_DMAE_PRTY_STS 0x102058
+/* [RC 4] Parity register #0 read clear */
+#define DMAE_REG_DMAE_PRTY_STS_CLR 0x10205c
/* [RW 1] Command 0 go. */
#define DMAE_REG_GO_C0 0x102080
/* [RW 1] Command 1 go. */
@@ -734,6 +755,8 @@
#define DORQ_REG_DORQ_PRTY_MASK 0x170190
/* [R 2] Parity register #0 read */
#define DORQ_REG_DORQ_PRTY_STS 0x170184
+/* [RC 2] Parity register #0 read clear */
+#define DORQ_REG_DORQ_PRTY_STS_CLR 0x170188
/* [RW 8] The address to write the DPM CID to STORM. */
#define DORQ_REG_DPM_CID_ADDR 0x170044
/* [RW 5] The DPM mode CID extraction offset. */
@@ -842,8 +865,12 @@
/* [R 1] data availble for error memory. If this bit is clear do not red
* from error_handling_memory. */
#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130
+/* [RW 11] Parity mask register #0 read/write */
+#define IGU_REG_IGU_PRTY_MASK 0x1300a8
/* [R 11] Parity register #0 read */
#define IGU_REG_IGU_PRTY_STS 0x13009c
+/* [RC 11] Parity register #0 read clear */
+#define IGU_REG_IGU_PRTY_STS_CLR 0x1300a0
/* [R 4] Debug: int_handle_fsm */
#define IGU_REG_INT_HANDLE_FSM 0x130050
#define IGU_REG_LEADING_EDGE_LATCH 0x130134
@@ -1501,6 +1528,8 @@
#define MISC_REG_MISC_PRTY_MASK 0xa398
/* [R 1] Parity register #0 read */
#define MISC_REG_MISC_PRTY_STS 0xa38c
+/* [RC 1] Parity register #0 read clear */
+#define MISC_REG_MISC_PRTY_STS_CLR 0xa390
#define MISC_REG_NIG_WOL_P0 0xa270
#define MISC_REG_NIG_WOL_P1 0xa274
/* [R 1] If set indicate that the pcie_rst_b was asserted without perst
@@ -1604,7 +1633,7 @@
(~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc
/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
- in this register. addres 0 - timer 1; address 1 - timer 2, ... address 7 -
+ in this register. address 0 - timer 1; address 1 - timer 2, ... address 7 -
timer 8 */
#define MISC_REG_SW_TIMER_VAL 0xa5c0
/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
@@ -1615,6 +1644,8 @@
#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4)
#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2)
#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3)
+#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN (0x1<<0)
+#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN (0x1<<0)
#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0)
#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1<<9)
#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1<<15)
@@ -1744,12 +1775,16 @@
~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same
port */
#define NIG_REG_LLFC_ENABLE_0 0x16208
+#define NIG_REG_LLFC_ENABLE_1 0x1620c
/* [RW 16] classes are high-priority for port0 */
#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 0x16058
+#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 0x1605c
/* [RW 16] classes are low-priority for port0 */
#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 0x16060
+#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 0x16064
/* [RW 1] Output enable of message to LLFC BMAC IF for port0 */
#define NIG_REG_LLFC_OUT_EN_0 0x160c8
+#define NIG_REG_LLFC_OUT_EN_1 0x160cc
#define NIG_REG_LLH0_ACPI_PAT_0_CRC 0x1015c
#define NIG_REG_LLH0_ACPI_PAT_6_LEN 0x10154
#define NIG_REG_LLH0_BRB1_DRV_MASK 0x10244
@@ -1774,6 +1809,8 @@
/* [RW 8] event id for llh0 */
#define NIG_REG_LLH0_EVENT_ID 0x10084
#define NIG_REG_LLH0_FUNC_EN 0x160fc
+#define NIG_REG_LLH0_FUNC_MEM 0x16180
+#define NIG_REG_LLH0_FUNC_MEM_ENABLE 0x16140
#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100
/* [RW 1] Determine the IP version to look for in
~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */
@@ -1797,6 +1834,9 @@
#define NIG_REG_LLH1_ERROR_MASK 0x10090
/* [RW 8] event id for llh1 */
#define NIG_REG_LLH1_EVENT_ID 0x10088
+#define NIG_REG_LLH1_FUNC_MEM 0x161c0
+#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160
+#define NIG_REG_LLH1_FUNC_MEM_SIZE 16
/* [RW 8] init credit counter for port1 in LLH */
#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564
#define NIG_REG_LLH1_XCM_MASK 0x10134
@@ -1907,11 +1947,17 @@
~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same
port */
#define NIG_REG_PAUSE_ENABLE_0 0x160c0
+#define NIG_REG_PAUSE_ENABLE_1 0x160c4
/* [RW 1] Input enable for RX PBF LP IF */
#define NIG_REG_PBF_LB_IN_EN 0x100b4
/* [RW 1] Value of this register will be transmitted to port swap when
~nig_registers_strap_override.strap_override =1 */
#define NIG_REG_PORT_SWAP 0x10394
+/* [RW 1] PPP enable for port0. This register may get 1 only when
+ * ~safc_enable.safc_enable = 0 and pause_enable.pause_enable =0 for the
+ * same port */
+#define NIG_REG_PPP_ENABLE_0 0x160b0
+#define NIG_REG_PPP_ENABLE_1 0x160b4
/* [RW 1] output enable for RX parser descriptor IF */
#define NIG_REG_PRS_EOP_OUT_EN 0x10104
/* [RW 1] Input enable for RX parser request IF */
@@ -1978,6 +2024,14 @@
#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G (0x1<<15)
#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS (0xf<<18)
#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter. */
+#define PBF_REG_COS0_UPPER_BOUND 0x15c05c
+/* [RW 31] The weight of COS0 in the ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT 0x15c054
+/* [RW 31] The upper bound of the weight of COS1 in the ETS command arbiter. */
+#define PBF_REG_COS1_UPPER_BOUND 0x15c060
+/* [RW 31] The weight of COS1 in the ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT 0x15c058
/* [RW 1] Disable processing further tasks from port 0 (after ending the
current task in process). */
#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c
@@ -1988,9 +2042,16 @@
current task in process). */
#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c
#define PBF_REG_DISABLE_PF 0x1402e8
+/* [RW 1] Indicates that ETS is performed between the COSes in the command
+ * arbiter. If reset strict priority w/ anti-starvation will be performed
+ * w/o WFQ. */
+#define PBF_REG_ETS_ENABLED 0x15c050
/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
* Ethernet header. */
#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8
+/* [RW 1] Indicates which COS is conncted to the highest priority in the
+ * command arbiter. */
+#define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c
#define PBF_REG_IF_ENABLE_REG 0x140044
/* [RW 1] Init bit. When set the initial credits are copied to the credit
registers (except the port credits). Should be set and then reset after
@@ -2016,6 +2077,10 @@
#define PBF_REG_MAC_LB_ENABLE 0x140040
/* [RW 6] Bit-map indicating which headers must appear in the packet */
#define PBF_REG_MUST_HAVE_HDRS 0x15c0c4
+/* [RW 16] The number of strict priority arbitration slots between 2 RR
+ * arbitration slots. A value of 0 means no strict priority cycles; i.e. the
+ * strict-priority w/ anti-starvation arbiter is a RR arbiter. */
+#define PBF_REG_NUM_STRICT_ARB_SLOTS 0x15c064
/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause
not suppoterd. */
#define PBF_REG_P0_ARB_THRSH 0x1400e4
@@ -2046,6 +2111,10 @@
#define PBF_REG_PBF_INT_MASK 0x1401d4
/* [R 5] Interrupt register #0 read */
#define PBF_REG_PBF_INT_STS 0x1401c8
+/* [RW 20] Parity mask register #0 read/write */
+#define PBF_REG_PBF_PRTY_MASK 0x1401e4
+/* [RC 20] Parity register #0 read clear */
+#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc
#define PB_REG_CONTROL 0
/* [RW 2] Interrupt mask register #0 read/write */
#define PB_REG_PB_INT_MASK 0x28
@@ -2055,6 +2124,8 @@
#define PB_REG_PB_PRTY_MASK 0x38
/* [R 4] Parity register #0 read */
#define PB_REG_PB_PRTY_STS 0x2c
+/* [RC 4] Parity register #0 read clear */
+#define PB_REG_PB_PRTY_STS_CLR 0x30
#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8)
#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1)
@@ -2410,6 +2481,8 @@
#define PRS_REG_PRS_PRTY_MASK 0x401a4
/* [R 8] Parity register #0 read */
#define PRS_REG_PRS_PRTY_STS 0x40198
+/* [RC 8] Parity register #0 read clear */
+#define PRS_REG_PRS_PRTY_STS_CLR 0x4019c
/* [RW 8] Context region for pure acknowledge packets. Used in CFC load
request message */
#define PRS_REG_PURE_REGIONS 0x40024
@@ -2563,6 +2636,9 @@
/* [R 32] Parity register #0 read */
#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c
#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c
+/* [RC 32] Parity register #0 read clear */
+#define PXP2_REG_PXP2_PRTY_STS_CLR_0 0x120580
+#define PXP2_REG_PXP2_PRTY_STS_CLR_1 0x120590
/* [R 1] Debug only: The 'almost full' indication from each fifo (gives
indication about backpressure) */
#define PXP2_REG_RD_ALMOST_FULL_0 0x120424
@@ -2965,6 +3041,8 @@
#define PXP_REG_PXP_PRTY_MASK 0x103094
/* [R 26] Parity register #0 read */
#define PXP_REG_PXP_PRTY_STS 0x103088
+/* [RC 27] Parity register #0 read clear */
+#define PXP_REG_PXP_PRTY_STS_CLR 0x10308c
/* [RW 4] The activity counter initial increment value sent in the load
request */
#define QM_REG_ACTCTRINITVAL_0 0x168040
@@ -3121,6 +3199,8 @@
#define QM_REG_QM_PRTY_MASK 0x168454
/* [R 12] Parity register #0 read */
#define QM_REG_QM_PRTY_STS 0x168448
+/* [RC 12] Parity register #0 read clear */
+#define QM_REG_QM_PRTY_STS_CLR 0x16844c
/* [R 32] Current queues in pipeline: Queues from 32 to 63 */
#define QM_REG_QSTATUS_HIGH 0x16802c
/* [R 32] Current queues in pipeline: Queues from 96 to 127 */
@@ -3406,6 +3486,8 @@
#define QM_REG_WRRWEIGHTS_9 0x168848
/* [R 6] Keep the fill level of the fifo from write client 1 */
#define QM_REG_XQM_WRC_FIFOLVL 0x168000
+/* [W 1] reset to parity interrupt */
+#define SEM_FAST_REG_PARITY_RST 0x18840
#define SRC_REG_COUNTFREE0 0x40500
/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two
ports. If set the searcher support 8 functions. */
@@ -3434,6 +3516,8 @@
#define SRC_REG_SRC_PRTY_MASK 0x404c8
/* [R 3] Parity register #0 read */
#define SRC_REG_SRC_PRTY_STS 0x404bc
+/* [RC 3] Parity register #0 read clear */
+#define SRC_REG_SRC_PRTY_STS_CLR 0x404c0
/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
#define TCM_REG_CAM_OCCUP 0x5017c
/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
@@ -3560,8 +3644,12 @@
#define TCM_REG_TCM_INT_MASK 0x501dc
/* [R 11] Interrupt register #0 read */
#define TCM_REG_TCM_INT_STS 0x501d0
+/* [RW 27] Parity mask register #0 read/write */
+#define TCM_REG_TCM_PRTY_MASK 0x501ec
/* [R 27] Parity register #0 read */
#define TCM_REG_TCM_PRTY_STS 0x501e0
+/* [RC 27] Parity register #0 read clear */
+#define TCM_REG_TCM_PRTY_STS_CLR 0x501e4
/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
Is used to determine the number of the AG context REG-pairs written back;
@@ -3719,6 +3807,10 @@
#define TM_REG_TM_INT_MASK 0x1640fc
/* [R 1] Interrupt register #0 read */
#define TM_REG_TM_INT_STS 0x1640f0
+/* [RW 7] Parity mask register #0 read/write */
+#define TM_REG_TM_PRTY_MASK 0x16410c
+/* [RC 7] Parity register #0 read clear */
+#define TM_REG_TM_PRTY_STS_CLR 0x164104
/* [RW 8] The event id for aggregated interrupt 0 */
#define TSDM_REG_AGG_INT_EVENT_0 0x42038
#define TSDM_REG_AGG_INT_EVENT_1 0x4203c
@@ -3799,6 +3891,8 @@
#define TSDM_REG_TSDM_PRTY_MASK 0x422bc
/* [R 11] Parity register #0 read */
#define TSDM_REG_TSDM_PRTY_STS 0x422b0
+/* [RC 11] Parity register #0 read clear */
+#define TSDM_REG_TSDM_PRTY_STS_CLR 0x422b4
/* [RW 5] The number of time_slots in the arbitration cycle */
#define TSEM_REG_ARB_CYCLE_SIZE 0x180034
/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -3878,6 +3972,9 @@
#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0
/* [RW 8] List of free threads . There is a bit per thread. */
#define TSEM_REG_THREADS_LIST 0x1802e4
+/* [RC 32] Parity register #0 read clear */
+#define TSEM_REG_TSEM_PRTY_STS_CLR_0 0x180118
+#define TSEM_REG_TSEM_PRTY_STS_CLR_1 0x180128
/* [RW 3] The arbitration scheme of time_slot 0 */
#define TSEM_REG_TS_0_AS 0x180038
/* [RW 3] The arbitration scheme of time_slot 10 */
@@ -4080,6 +4177,8 @@
#define UCM_REG_UCM_INT_STS 0xe01c8
/* [R 27] Parity register #0 read */
#define UCM_REG_UCM_PRTY_STS 0xe01d8
+/* [RC 27] Parity register #0 read clear */
+#define UCM_REG_UCM_PRTY_STS_CLR 0xe01dc
/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS
REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
Is used to determine the number of the AG context REG-pairs written back;
@@ -4256,6 +4355,8 @@
#define USDM_REG_USDM_PRTY_MASK 0xc42c0
/* [R 11] Parity register #0 read */
#define USDM_REG_USDM_PRTY_STS 0xc42b4
+/* [RC 11] Parity register #0 read clear */
+#define USDM_REG_USDM_PRTY_STS_CLR 0xc42b8
/* [RW 5] The number of time_slots in the arbitration cycle */
#define USEM_REG_ARB_CYCLE_SIZE 0x300034
/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -4385,6 +4486,9 @@
/* [R 32] Parity register #0 read */
#define USEM_REG_USEM_PRTY_STS_0 0x300124
#define USEM_REG_USEM_PRTY_STS_1 0x300134
+/* [RC 32] Parity register #0 read clear */
+#define USEM_REG_USEM_PRTY_STS_CLR_0 0x300128
+#define USEM_REG_USEM_PRTY_STS_CLR_1 0x300138
/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
* VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
#define USEM_REG_VFPF_ERR_NUM 0x300380
@@ -4761,6 +4865,8 @@
#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc
/* [R 11] Parity register #0 read */
#define XSDM_REG_XSDM_PRTY_STS 0x1662b0
+/* [RC 11] Parity register #0 read clear */
+#define XSDM_REG_XSDM_PRTY_STS_CLR 0x1662b4
/* [RW 5] The number of time_slots in the arbitration cycle */
#define XSEM_REG_ARB_CYCLE_SIZE 0x280034
/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -4893,6 +4999,9 @@
/* [R 32] Parity register #0 read */
#define XSEM_REG_XSEM_PRTY_STS_0 0x280124
#define XSEM_REG_XSEM_PRTY_STS_1 0x280134
+/* [RC 32] Parity register #0 read clear */
+#define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128
+#define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138
#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0)
#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1)
#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0)
@@ -4970,7 +5079,23 @@
#define EMAC_REG_EMAC_TX_MODE 0xbc
#define EMAC_REG_EMAC_TX_STAT_AC 0x280
#define EMAC_REG_EMAC_TX_STAT_AC_COUNT 22
+#define EMAC_REG_RX_PFC_MODE 0x320
+#define EMAC_REG_RX_PFC_MODE_PRIORITIES (1L<<2)
+#define EMAC_REG_RX_PFC_MODE_RX_EN (1L<<1)
+#define EMAC_REG_RX_PFC_MODE_TX_EN (1L<<0)
+#define EMAC_REG_RX_PFC_PARAM 0x324
+#define EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT 0
+#define EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT 16
+#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD 0x328
+#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XOFF_SENT 0x330
+#define EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XON_RCVD 0x32c
+#define EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XON_SENT 0x334
+#define EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT (0xffff<<0)
#define EMAC_RX_MODE_FLOW_EN (1L<<2)
+#define EMAC_RX_MODE_KEEP_MAC_CONTROL (1L<<3)
#define EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10)
#define EMAC_RX_MODE_PROMISCUOUS (1L<<8)
#define EMAC_RX_MODE_RESET (1L<<0)
@@ -5958,6 +6083,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
#define MDIO_PMA_REG_8727_PCS_GP 0xc842
+#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4
#define MDIO_AN_REG_8727_MISC_CTRL 0x8309
@@ -6069,7 +6195,11 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
+#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
+#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
+#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
+#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
#define IGU_FUNC_BASE 0x0400
@@ -6264,3 +6394,4 @@ static inline u8 calc_crc8(u32 data, u8 crc)
}
+#endif /* BNX2X_REG_H */
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index 4733c835dad9..3445ded6674f 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -158,9 +158,14 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
spin_lock_bh(&bp->stats_lock);
+ if (bp->stats_pending) {
+ spin_unlock_bh(&bp->stats_lock);
+ return;
+ }
+
ramrod_data.drv_counter = bp->stats_counter++;
ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
- for_each_queue(bp, i)
+ for_each_eth_queue(bp, i)
ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
@@ -766,7 +771,7 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
estats->no_buff_discard_hi = 0;
estats->no_buff_discard_lo = 0;
- for_each_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
int cl_id = fp->cl_id;
struct tstorm_per_client_stats *tclient =
@@ -996,7 +1001,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
tmp = estats->mac_discard;
- for_each_queue(bp, i)
+ for_each_rx_queue(bp, i)
tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
nstats->rx_dropped = tmp;
@@ -1087,7 +1092,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
bp->dev->name,
estats->brb_drop_lo, estats->brb_truncate_lo);
- for_each_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
@@ -1101,7 +1106,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
fp->rx_calls, fp->rx_pkt);
}
- for_each_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
struct netdev_queue *txq =
@@ -1234,14 +1239,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
if (unlikely(bp->panic))
return;
+ bnx2x_stats_stm[bp->stats_state][event].action(bp);
+
/* Protect a state change flow */
spin_lock_bh(&bp->stats_lock);
state = bp->stats_state;
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
spin_unlock_bh(&bp->stats_lock);
- bnx2x_stats_stm[state][event].action(bp);
-
if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
state, event, bp->stats_state);
@@ -1381,7 +1386,8 @@ void bnx2x_stats_init(struct bnx2x *bp)
memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
}
- for_each_queue(bp, i) {
+ /* FW stats are currently collected for ETH clients only */
+ for_each_eth_queue(bp, i) {
/* Set initial stats counter in the stats ramrod data to -1 */
int cl_id = bp->fp[i].cl_id;
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h
index afd15efa429a..596798c47452 100644
--- a/drivers/net/bnx2x/bnx2x_stats.h
+++ b/drivers/net/bnx2x/bnx2x_stats.h
@@ -53,7 +53,6 @@ struct bnx2x_eth_q_stats {
u32 hw_csum_err;
};
-#define BNX2X_NUM_Q_STATS 13
#define Q_STATS_OFFSET32(stat_name) \
(offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
@@ -225,7 +224,6 @@ struct bnx2x_eth_stats {
u32 nig_timer_max;
};
-#define BNX2X_NUM_STATS 43
#define STATS_OFFSET32(stat_name) \
(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 6f9c6faef24c..3c5c014e82b2 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -4,7 +4,10 @@
obj-$(CONFIG_BONDING) += bonding.o
-bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o
+bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o
+
+proc-$(CONFIG_PROC_FS) += bond_procfs.o
+bonding-objs += $(proc-y)
ipv6-$(subst m,y,$(CONFIG_IPV6)) += bond_ipv6.o
bonding-objs += $(ipv6-y)
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 881914bc4e9c..494bf960442d 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -246,7 +246,7 @@ static inline void __enable_port(struct port *port)
*/
static inline int __port_is_enabled(struct port *port)
{
- return port->slave->state == BOND_STATE_ACTIVE;
+ return bond_is_active_slave(port->slave);
}
/**
@@ -281,23 +281,23 @@ static inline int __check_agg_selection_timer(struct port *port)
}
/**
- * __get_rx_machine_lock - lock the port's RX machine
+ * __get_state_machine_lock - lock the port's state machines
* @port: the port we're looking at
*
*/
-static inline void __get_rx_machine_lock(struct port *port)
+static inline void __get_state_machine_lock(struct port *port)
{
- spin_lock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock));
+ spin_lock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
}
/**
- * __release_rx_machine_lock - unlock the port's RX machine
+ * __release_state_machine_lock - unlock the port's state machines
* @port: the port we're looking at
*
*/
-static inline void __release_rx_machine_lock(struct port *port)
+static inline void __release_state_machine_lock(struct port *port)
{
- spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock));
+ spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
}
/**
@@ -388,14 +388,14 @@ static u8 __get_duplex(struct port *port)
}
/**
- * __initialize_port_locks - initialize a port's RX machine spinlock
+ * __initialize_port_locks - initialize a port's STATE machine spinlock
* @port: the port we're looking at
*
*/
static inline void __initialize_port_locks(struct port *port)
{
// make sure it isn't called twice
- spin_lock_init(&(SLAVE_AD_INFO(port->slave).rx_machine_lock));
+ spin_lock_init(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
}
//conversions
@@ -840,7 +840,7 @@ static int ad_lacpdu_send(struct port *port)
lacpdu_header = (struct lacpdu_header *)skb_put(skb, length);
memcpy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
- /* Note: source addres is set to be the member's PERMANENT address,
+ /* Note: source address is set to be the member's PERMANENT address,
because we use it to identify loopback lacpdus in receive. */
memcpy(lacpdu_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU;
@@ -881,7 +881,7 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
marker_header = (struct bond_marker_header *)skb_put(skb, length);
memcpy(marker_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
- /* Note: source addres is set to be the member's PERMANENT address,
+ /* Note: source address is set to be the member's PERMANENT address,
because we use it to identify loopback MARKERs in receive. */
memcpy(marker_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
marker_header->hdr.h_proto = PKT_TYPE_LACPDU;
@@ -1025,9 +1025,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
{
rx_states_t last_state;
- // Lock to prevent 2 instances of this function to run simultaneously(rx interrupt and periodic machine callback)
- __get_rx_machine_lock(port);
-
// keep current State Machine state to compare later if it was changed
last_state = port->sm_rx_state;
@@ -1133,7 +1130,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
pr_err("%s: An illegal loopback occurred on adapter (%s).\n"
"Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
port->slave->dev->master->name, port->slave->dev->name);
- __release_rx_machine_lock(port);
return;
}
__update_selected(lacpdu, port);
@@ -1153,7 +1149,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
break;
}
}
- __release_rx_machine_lock(port);
}
/**
@@ -1916,7 +1911,7 @@ int bond_3ad_bind_slave(struct slave *slave)
return -1;
}
- //check that the slave has not been intialized yet.
+ //check that the slave has not been initialized yet.
if (SLAVE_AD_INFO(slave).port.slave != slave) {
// port initialization
@@ -2155,6 +2150,12 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
goto re_arm;
}
+ /* Lock around state machines to protect data accessed
+ * by all (e.g., port->sm_vars). ad_rx_machine may run
+ * concurrently due to incoming LACPDU.
+ */
+ __get_state_machine_lock(port);
+
ad_rx_machine(NULL, port);
ad_periodic_machine(port);
ad_port_selection_logic(port);
@@ -2164,6 +2165,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
// turn off the BEGIN bit, since we already handled it
if (port->sm_vars & AD_PORT_BEGIN)
port->sm_vars &= ~AD_PORT_BEGIN;
+
+ __release_state_machine_lock(port);
}
re_arm:
@@ -2200,7 +2203,10 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
case AD_TYPE_LACPDU:
pr_debug("Received LACPDU on port %d\n",
port->actor_port_number);
+ /* Protect against concurrent state machines */
+ __get_state_machine_lock(port);
ad_rx_machine(lacpdu, port);
+ __release_state_machine_lock(port);
break;
case AD_TYPE_MARKER:
@@ -2470,12 +2476,15 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
if (!(dev->flags & IFF_MASTER))
goto out;
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
goto out;
read_lock(&bond->lock);
- slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev),
- orig_dev);
+ slave = bond_get_slave_by_dev(netdev_priv(dev), orig_dev);
if (!slave)
goto out_unlock;
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 2c46a154f2c6..b28baff70864 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -264,7 +264,8 @@ struct ad_bond_info {
struct ad_slave_info {
struct aggregator aggregator; // 802.3ad aggregator structure
struct port port; // 802.3ad port structure
- spinlock_t rx_machine_lock; // To avoid race condition between callback and receive interrupt
+ spinlock_t state_machine_lock; /* mutex state machines vs.
+ incoming LACPDU */
u16 id;
};
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 26bb118c4533..9bc5de3e04a8 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -44,42 +44,6 @@
#include "bond_alb.h"
-#define ALB_TIMER_TICKS_PER_SEC 10 /* should be a divisor of HZ */
-#define BOND_TLB_REBALANCE_INTERVAL 10 /* In seconds, periodic re-balancing.
- * Used for division - never set
- * to zero !!!
- */
-#define BOND_ALB_LP_INTERVAL 1 /* In seconds, periodic send of
- * learning packets to the switch
- */
-
-#define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \
- * ALB_TIMER_TICKS_PER_SEC)
-
-#define BOND_ALB_LP_TICKS (BOND_ALB_LP_INTERVAL \
- * ALB_TIMER_TICKS_PER_SEC)
-
-#define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table.
- * Note that this value MUST NOT be smaller
- * because the key hash table is BYTE wide !
- */
-
-
-#define TLB_NULL_INDEX 0xffffffff
-#define MAX_LP_BURST 3
-
-/* rlb defs */
-#define RLB_HASH_TABLE_SIZE 256
-#define RLB_NULL_INDEX 0xffffffff
-#define RLB_UPDATE_DELAY 2*ALB_TIMER_TICKS_PER_SEC /* 2 seconds */
-#define RLB_ARP_BURST_SIZE 2
-#define RLB_UPDATE_RETRY 3 /* 3-ticks - must be smaller than the rlb
- * rebalance interval (5 min).
- */
-/* RLB_PROMISC_TIMEOUT = 10 sec equals the time that the current slave is
- * promiscuous after failover
- */
-#define RLB_PROMISC_TIMEOUT 10*ALB_TIMER_TICKS_PER_SEC
#ifndef __long_aligned
#define __long_aligned __attribute__((aligned((sizeof(long)))))
@@ -362,6 +326,10 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
goto out;
}
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
goto out;
@@ -636,7 +604,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
_lock_rx_hashtbl(bond);
- hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_src));
+ hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst));
client_info = &(bond_info->rx_hashtbl[hash_index]);
if (client_info->assigned) {
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index 50968f8196cf..118c28aa471e 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -31,6 +31,44 @@ struct slave;
#define BOND_ALB_INFO(bond) ((bond)->alb_info)
#define SLAVE_TLB_INFO(slave) ((slave)->tlb_info)
+#define ALB_TIMER_TICKS_PER_SEC 10 /* should be a divisor of HZ */
+#define BOND_TLB_REBALANCE_INTERVAL 10 /* In seconds, periodic re-balancing.
+ * Used for division - never set
+ * to zero !!!
+ */
+#define BOND_ALB_LP_INTERVAL 1 /* In seconds, periodic send of
+ * learning packets to the switch
+ */
+
+#define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \
+ * ALB_TIMER_TICKS_PER_SEC)
+
+#define BOND_ALB_LP_TICKS (BOND_ALB_LP_INTERVAL \
+ * ALB_TIMER_TICKS_PER_SEC)
+
+#define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table.
+ * Note that this value MUST NOT be smaller
+ * because the key hash table is BYTE wide !
+ */
+
+
+#define TLB_NULL_INDEX 0xffffffff
+#define MAX_LP_BURST 3
+
+/* rlb defs */
+#define RLB_HASH_TABLE_SIZE 256
+#define RLB_NULL_INDEX 0xffffffff
+#define RLB_UPDATE_DELAY (2*ALB_TIMER_TICKS_PER_SEC) /* 2 seconds */
+#define RLB_ARP_BURST_SIZE 2
+#define RLB_UPDATE_RETRY 3 /* 3-ticks - must be smaller than the rlb
+ * rebalance interval (5 min).
+ */
+/* RLB_PROMISC_TIMEOUT = 10 sec equals the time that the current slave is
+ * promiscuous after failover
+ */
+#define RLB_PROMISC_TIMEOUT (10*ALB_TIMER_TICKS_PER_SEC)
+
+
struct tlb_client_info {
struct slave *tx_slave; /* A pointer to slave used for transmiting
* packets to a Client that the Hash function
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
new file mode 100644
index 000000000000..3680aa251dea
--- /dev/null
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -0,0 +1,146 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+
+#include "bonding.h"
+#include "bond_alb.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static struct dentry *bonding_debug_root;
+
+/*
+ * Show RLB hash table
+ */
+static int bond_debug_rlb_hash_show(struct seq_file *m, void *v)
+{
+ struct bonding *bond = m->private;
+ struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+ struct rlb_client_info *client_info;
+ u32 hash_index;
+
+ if (bond->params.mode != BOND_MODE_ALB)
+ return 0;
+
+ seq_printf(m, "SourceIP DestinationIP "
+ "Destination MAC DEV\n");
+
+ spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
+
+ hash_index = bond_info->rx_hashtbl_head;
+ for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
+ client_info = &(bond_info->rx_hashtbl[hash_index]);
+ seq_printf(m, "%-15pI4 %-15pI4 %-17pM %s\n",
+ &client_info->ip_src,
+ &client_info->ip_dst,
+ &client_info->mac_dst,
+ client_info->slave->dev->name);
+ }
+
+ spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
+
+ return 0;
+}
+
+static int bond_debug_rlb_hash_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, bond_debug_rlb_hash_show, inode->i_private);
+}
+
+static const struct file_operations bond_debug_rlb_hash_fops = {
+ .owner = THIS_MODULE,
+ .open = bond_debug_rlb_hash_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void bond_debug_register(struct bonding *bond)
+{
+ if (!bonding_debug_root)
+ return;
+
+ bond->debug_dir =
+ debugfs_create_dir(bond->dev->name, bonding_debug_root);
+
+ if (!bond->debug_dir) {
+ pr_warning("%s: Warning: failed to register to debugfs\n",
+ bond->dev->name);
+ return;
+ }
+
+ debugfs_create_file("rlb_hash_table", 0400, bond->debug_dir,
+ bond, &bond_debug_rlb_hash_fops);
+}
+
+void bond_debug_unregister(struct bonding *bond)
+{
+ if (!bonding_debug_root)
+ return;
+
+ debugfs_remove_recursive(bond->debug_dir);
+}
+
+void bond_debug_reregister(struct bonding *bond)
+{
+ struct dentry *d;
+
+ if (!bonding_debug_root)
+ return;
+
+ d = debugfs_rename(bonding_debug_root, bond->debug_dir,
+ bonding_debug_root, bond->dev->name);
+ if (d) {
+ bond->debug_dir = d;
+ } else {
+ pr_warning("%s: Warning: failed to reregister, "
+ "so just unregister old one\n",
+ bond->dev->name);
+ bond_debug_unregister(bond);
+ }
+}
+
+void bond_create_debugfs(void)
+{
+ bonding_debug_root = debugfs_create_dir("bonding", NULL);
+
+ if (!bonding_debug_root) {
+ pr_warning("Warning: Cannot create bonding directory"
+ " in debugfs\n");
+ }
+}
+
+void bond_destroy_debugfs(void)
+{
+ debugfs_remove_recursive(bonding_debug_root);
+ bonding_debug_root = NULL;
+}
+
+
+#else /* !CONFIG_DEBUG_FS */
+
+void bond_debug_register(struct bonding *bond)
+{
+}
+
+void bond_debug_unregister(struct bonding *bond)
+{
+}
+
+void bond_debug_reregister(struct bonding *bond)
+{
+}
+
+void bond_create_debugfs(void)
+{
+}
+
+void bond_destroy_debugfs(void)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
index 121b073a6c3f..84fbd4ebd778 100644
--- a/drivers/net/bonding/bond_ipv6.c
+++ b/drivers/net/bonding/bond_ipv6.c
@@ -88,7 +88,12 @@ static void bond_na_send(struct net_device *slave_dev,
}
if (vlan_id) {
- skb = vlan_put_tag(skb, vlan_id);
+ /* The Ethernet header is not present yet, so it is
+ * too early to insert a VLAN tag. Force use of an
+ * out-of-line tag here and let dev_hard_start_xmit()
+ * insert it if the slave hardware can't.
+ */
+ skb = __vlan_hwaccel_put_tag(skb, vlan_id);
if (!skb) {
pr_err("failed to insert VLAN tag\n");
return;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 71a169740d05..1a6e9eb7af43 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -59,15 +59,12 @@
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
-#include <linux/netpoll.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/rtnetlink.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
#include <linux/smp.h>
#include <linux/if_ether.h>
#include <net/arp.h>
@@ -171,12 +168,9 @@ MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on link
/*----------------------------- Global variables ----------------------------*/
#ifdef CONFIG_NET_POLL_CONTROLLER
-cpumask_var_t netpoll_block_tx;
+atomic_t netpoll_block_tx = ATOMIC_INIT(0);
#endif
-static const char * const version =
- DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n";
-
int bond_net_id __read_mostly;
static __be32 arp_target[BOND_MAX_ARP_TARGETS];
@@ -246,7 +240,7 @@ static void bond_uninit(struct net_device *bond_dev);
/*---------------------------- General routines -----------------------------*/
-static const char *bond_mode_name(int mode)
+const char *bond_mode_name(int mode)
{
static const char *names[] = {
[BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
@@ -418,46 +412,15 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
* @bond: bond device that got this skb for tx.
* @skb: hw accel VLAN tagged skb to transmit
* @slave_dev: slave that is supposed to xmit this skbuff
- *
- * When the bond gets an skb to transmit that is
- * already hardware accelerated VLAN tagged, and it
- * needs to relay this skb to a slave that is not
- * hw accel capable, the skb needs to be "unaccelerated",
- * i.e. strip the hwaccel tag and re-insert it as part
- * of the payload.
*/
int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
struct net_device *slave_dev)
{
- unsigned short uninitialized_var(vlan_id);
-
- /* Test vlan_list not vlgrp to catch and handle 802.1p tags */
- if (!list_empty(&bond->vlan_list) &&
- !(slave_dev->features & NETIF_F_HW_VLAN_TX) &&
- vlan_get_tag(skb, &vlan_id) == 0) {
- skb->dev = slave_dev;
- skb = vlan_put_tag(skb, vlan_id);
- if (!skb) {
- /* vlan_put_tag() frees the skb in case of error,
- * so return success here so the calling functions
- * won't attempt to free is again.
- */
- return 0;
- }
- } else {
- skb->dev = slave_dev;
- }
-
+ skb->dev = slave_dev;
skb->priority = 1;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) {
- struct netpoll *np = bond->dev->npinfo->netpoll;
- slave_dev->npinfo = bond->dev->npinfo;
- slave_dev->priv_flags |= IFF_IN_NETPOLL;
- netpoll_send_skb_on_dev(np, skb, slave_dev);
- slave_dev->priv_flags &= ~IFF_IN_NETPOLL;
- } else
-#endif
+ if (unlikely(netpoll_tx_running(slave_dev)))
+ bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
+ else
dev_queue_xmit(skb);
return 0;
@@ -873,17 +836,11 @@ static void bond_mc_del(struct bonding *bond, void *addr)
static void __bond_resend_igmp_join_requests(struct net_device *dev)
{
struct in_device *in_dev;
- struct ip_mc_list *im;
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
- if (in_dev) {
- read_lock(&in_dev->mc_list_lock);
- for (im = in_dev->mc_list; im; im = im->next)
- ip_mc_rejoin_group(im);
- read_unlock(&in_dev->mc_list_lock);
- }
-
+ if (in_dev)
+ ip_mc_rejoin_groups(in_dev);
rcu_read_unlock();
}
@@ -1203,11 +1160,13 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
bond_do_fail_over_mac(bond, new_active,
old_active);
- bond->send_grat_arp = bond->params.num_grat_arp;
- bond_send_gratuitous_arp(bond);
+ if (netif_running(bond->dev)) {
+ bond->send_grat_arp = bond->params.num_grat_arp;
+ bond_send_gratuitous_arp(bond);
- bond->send_unsol_na = bond->params.num_unsol_na;
- bond_send_unsolicited_na(bond);
+ bond->send_unsol_na = bond->params.num_unsol_na;
+ bond_send_unsolicited_na(bond);
+ }
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
@@ -1221,8 +1180,9 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
/* resend IGMP joins since active slave has changed or
* all were sent on curr_active_slave */
- if ((USES_PRIMARY(bond->params.mode) && new_active) ||
- bond->params.mode == BOND_MODE_ROUNDROBIN) {
+ if (((USES_PRIMARY(bond->params.mode) && new_active) ||
+ bond->params.mode == BOND_MODE_ROUNDROBIN) &&
+ netif_running(bond->dev)) {
bond->igmp_retrans = bond->params.resend_igmp;
queue_delayed_work(bond->wq, &bond->mcast_work, 0);
}
@@ -1316,63 +1276,103 @@ static void bond_detach_slave(struct bonding *bond, struct slave *slave)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * You must hold read lock on bond->lock before calling this.
- */
-static bool slaves_support_netpoll(struct net_device *bond_dev)
+static inline int slave_enable_netpoll(struct slave *slave)
{
- struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave;
- int i = 0;
- bool ret = true;
+ struct netpoll *np;
+ int err = 0;
- bond_for_each_slave(bond, slave, i) {
- if ((slave->dev->priv_flags & IFF_DISABLE_NETPOLL) ||
- !slave->dev->netdev_ops->ndo_poll_controller)
- ret = false;
+ np = kzalloc(sizeof(*np), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!np)
+ goto out;
+
+ np->dev = slave->dev;
+ err = __netpoll_setup(np);
+ if (err) {
+ kfree(np);
+ goto out;
}
- return i != 0 && ret;
+ slave->np = np;
+out:
+ return err;
+}
+static inline void slave_disable_netpoll(struct slave *slave)
+{
+ struct netpoll *np = slave->np;
+
+ if (!np)
+ return;
+
+ slave->np = NULL;
+ synchronize_rcu_bh();
+ __netpoll_cleanup(np);
+ kfree(np);
+}
+static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
+{
+ if (slave_dev->priv_flags & IFF_DISABLE_NETPOLL)
+ return false;
+ if (!slave_dev->netdev_ops->ndo_poll_controller)
+ return false;
+ return true;
}
static void bond_poll_controller(struct net_device *bond_dev)
{
- struct bonding *bond = netdev_priv(bond_dev);
+}
+
+static void __bond_netpoll_cleanup(struct bonding *bond)
+{
struct slave *slave;
int i;
- bond_for_each_slave(bond, slave, i) {
- if (slave->dev && IS_UP(slave->dev))
- netpoll_poll_dev(slave->dev);
- }
+ bond_for_each_slave(bond, slave, i)
+ if (IS_UP(slave->dev))
+ slave_disable_netpoll(slave);
}
-
static void bond_netpoll_cleanup(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+
+ read_lock(&bond->lock);
+ __bond_netpoll_cleanup(bond);
+ read_unlock(&bond->lock);
+}
+
+static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
+{
+ struct bonding *bond = netdev_priv(dev);
struct slave *slave;
- const struct net_device_ops *ops;
- int i;
+ int i, err = 0;
read_lock(&bond->lock);
- bond_dev->npinfo = NULL;
bond_for_each_slave(bond, slave, i) {
- if (slave->dev) {
- ops = slave->dev->netdev_ops;
- if (ops->ndo_netpoll_cleanup)
- ops->ndo_netpoll_cleanup(slave->dev);
- else
- slave->dev->npinfo = NULL;
+ err = slave_enable_netpoll(slave);
+ if (err) {
+ __bond_netpoll_cleanup(bond);
+ break;
}
}
read_unlock(&bond->lock);
+ return err;
}
-#else
+static struct netpoll_info *bond_netpoll_info(struct bonding *bond)
+{
+ return bond->dev->npinfo;
+}
+#else
+static inline int slave_enable_netpoll(struct slave *slave)
+{
+ return 0;
+}
+static inline void slave_disable_netpoll(struct slave *slave)
+{
+}
static void bond_netpoll_cleanup(struct net_device *bond_dev)
{
}
-
#endif
/*---------------------------------- IOCTL ----------------------------------*/
@@ -1400,8 +1400,8 @@ static int bond_compute_features(struct bonding *bond)
{
struct slave *slave;
struct net_device *bond_dev = bond->dev;
- unsigned long features = bond_dev->features;
- unsigned long vlan_features = 0;
+ u32 features = bond_dev->features;
+ u32 vlan_features = 0;
unsigned short max_hard_header_len = max((u16)ETH_HLEN,
bond_dev->hard_header_len);
int i;
@@ -1428,8 +1428,8 @@ static int bond_compute_features(struct bonding *bond)
done:
features |= (bond_dev->features & BOND_VLAN_FEATURES);
- bond_dev->features = netdev_fix_features(features, NULL);
- bond_dev->vlan_features = netdev_fix_features(vlan_features, NULL);
+ bond_dev->features = netdev_fix_features(bond_dev, features);
+ bond_dev->vlan_features = netdev_fix_features(bond_dev, vlan_features);
bond_dev->hard_header_len = max_hard_header_len;
return 0;
@@ -1451,6 +1451,77 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
bond->setup_by_slave = 1;
}
+/* On bonding slaves other than the currently active slave, suppress
+ * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
+ * ARP on active-backup slaves with arp_validate enabled.
+ */
+static bool bond_should_deliver_exact_match(struct sk_buff *skb,
+ struct slave *slave,
+ struct bonding *bond)
+{
+ if (bond_is_slave_inactive(slave)) {
+ if (slave_do_arp_validate(bond, slave) &&
+ skb->protocol == __cpu_to_be16(ETH_P_ARP))
+ return false;
+
+ if (bond->params.mode == BOND_MODE_ALB &&
+ skb->pkt_type != PACKET_BROADCAST &&
+ skb->pkt_type != PACKET_MULTICAST)
+ return false;
+
+ if (bond->params.mode == BOND_MODE_8023AD &&
+ skb->protocol == __cpu_to_be16(ETH_P_SLOW))
+ return false;
+
+ return true;
+ }
+ return false;
+}
+
+static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ struct slave *slave;
+ struct net_device *bond_dev;
+ struct bonding *bond;
+
+ slave = bond_slave_get_rcu(skb->dev);
+ bond_dev = ACCESS_ONCE(slave->dev->master);
+ if (unlikely(!bond_dev))
+ return RX_HANDLER_PASS;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return RX_HANDLER_CONSUMED;
+
+ *pskb = skb;
+
+ bond = netdev_priv(bond_dev);
+
+ if (bond->params.arp_interval)
+ slave->dev->last_rx = jiffies;
+
+ if (bond_should_deliver_exact_match(skb, slave, bond)) {
+ return RX_HANDLER_EXACT;
+ }
+
+ skb->dev = bond_dev;
+
+ if (bond->params.mode == BOND_MODE_ALB &&
+ bond_dev->priv_flags & IFF_BRIDGE_PORT &&
+ skb->pkt_type == PACKET_HOST) {
+
+ if (unlikely(skb_cow_head(skb,
+ skb->data - skb_mac_header(skb)))) {
+ kfree_skb(skb);
+ return RX_HANDLER_CONSUMED;
+ }
+ memcpy(eth_hdr(skb)->h_dest, bond_dev->dev_addr, ETH_ALEN);
+ }
+
+ return RX_HANDLER_ANOTHER;
+}
+
/* enslave device <slave> to bond device <master> */
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
{
@@ -1576,7 +1647,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* If this is the first slave, then we need to set the master's hardware
* address to be the same as the slave's. */
- if (bond->slave_cnt == 0)
+ if (is_zero_ether_addr(bond->dev->dev_addr))
memcpy(bond->dev->dev_addr, slave_dev->dev_addr,
slave_dev->addr_len);
@@ -1622,16 +1693,23 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
}
- res = netdev_set_master(slave_dev, bond_dev);
+ res = netdev_set_bond_master(slave_dev, bond_dev);
if (res) {
- pr_debug("Error %d calling netdev_set_master\n", res);
+ pr_debug("Error %d calling netdev_set_bond_master\n", res);
goto err_restore_mac;
}
+ res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
+ new_slave);
+ if (res) {
+ pr_debug("Error %d calling netdev_rx_handler_register\n", res);
+ goto err_unset_master;
+ }
+
/* open the slave since the application closed it */
res = dev_open(slave_dev);
if (res) {
pr_debug("Opening slave %s failed\n", slave_dev->name);
- goto err_unset_master;
+ goto err_unreg_rxhandler;
}
new_slave->dev = slave_dev;
@@ -1785,7 +1863,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
break;
case BOND_MODE_TLB:
case BOND_MODE_ALB:
- new_slave->state = BOND_STATE_ACTIVE;
+ bond_set_active_slave(new_slave);
bond_set_slave_inactive_flags(new_slave);
bond_select_active_slave(bond);
break;
@@ -1793,7 +1871,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
pr_debug("This slave is always active in trunk mode\n");
/* always active in trunk mode */
- new_slave->state = BOND_STATE_ACTIVE;
+ bond_set_active_slave(new_slave);
/* In trunking mode there is little meaning to curr_active_slave
* anyway (it holds no special properties of the bond device),
@@ -1810,17 +1888,19 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_set_carrier(bond);
#ifdef CONFIG_NET_POLL_CONTROLLER
- if (slaves_support_netpoll(bond_dev)) {
- bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
- if (bond_dev->npinfo)
- slave_dev->npinfo = bond_dev->npinfo;
- } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) {
- bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
- pr_info("New slave device %s does not support netpoll\n",
- slave_dev->name);
- pr_info("Disabling netpoll support for %s\n", bond_dev->name);
+ slave_dev->npinfo = bond_netpoll_info(bond);
+ if (slave_dev->npinfo) {
+ if (slave_enable_netpoll(new_slave)) {
+ read_unlock(&bond->lock);
+ pr_info("Error, %s: master_dev is using netpoll, "
+ "but new slave device does not support netpoll.\n",
+ bond_dev->name);
+ res = -EBUSY;
+ goto err_close;
+ }
}
#endif
+
read_unlock(&bond->lock);
res = bond_create_slave_symlinks(bond_dev, slave_dev);
@@ -1829,7 +1909,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
bond_dev->name, slave_dev->name,
- new_slave->state == BOND_STATE_ACTIVE ? "n active" : " backup",
+ bond_is_active_slave(new_slave) ? "n active" : " backup",
new_slave->link != BOND_LINK_DOWN ? "n up" : " down");
/* enslave is successful */
@@ -1839,8 +1919,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
err_close:
dev_close(slave_dev);
+err_unreg_rxhandler:
+ netdev_rx_handler_unregister(slave_dev);
+ synchronize_net();
+
err_unset_master:
- netdev_set_master(slave_dev, NULL);
+ netdev_set_bond_master(slave_dev, NULL);
err_restore_mac:
if (!bond->params.fail_over_mac) {
@@ -1923,7 +2007,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
pr_info("%s: releasing %s interface %s\n",
bond_dev->name,
- (slave->state == BOND_STATE_ACTIVE) ? "active" : "backup",
+ bond_is_active_slave(slave) ? "active" : "backup",
slave_dev->name);
oldcurrent = bond->curr_active_slave;
@@ -2020,19 +2104,11 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
netif_addr_unlock_bh(bond_dev);
}
- netdev_set_master(slave_dev, NULL);
+ netdev_rx_handler_unregister(slave_dev);
+ synchronize_net();
+ netdev_set_bond_master(slave_dev, NULL);
-#ifdef CONFIG_NET_POLL_CONTROLLER
- read_lock_bh(&bond->lock);
-
- if (slaves_support_netpoll(bond_dev))
- bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
- read_unlock_bh(&bond->lock);
- if (slave_dev->netdev_ops->ndo_netpoll_cleanup)
- slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev);
- else
- slave_dev->npinfo = NULL;
-#endif
+ slave_disable_netpoll(slave);
/* close slave before restoring its mac address */
dev_close(slave_dev);
@@ -2046,9 +2122,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
dev_set_mtu(slave_dev, slave->original_mtu);
- slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB |
- IFF_SLAVE_INACTIVE | IFF_BONDING |
- IFF_SLAVE_NEEDARP);
+ slave_dev->priv_flags &= ~IFF_BONDING;
kfree(slave);
@@ -2067,6 +2141,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
ret = bond_release(bond_dev, slave_dev);
if ((ret == 0) && (bond->slave_cnt == 0)) {
+ bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
pr_info("%s: destroying bond %s.\n",
bond_dev->name, bond_dev->name);
unregister_netdevice(bond_dev);
@@ -2142,7 +2217,11 @@ static int bond_release_all(struct net_device *bond_dev)
netif_addr_unlock_bh(bond_dev);
}
- netdev_set_master(slave_dev, NULL);
+ netdev_rx_handler_unregister(slave_dev);
+ synchronize_net();
+ netdev_set_bond_master(slave_dev, NULL);
+
+ slave_disable_netpoll(slave);
/* close slave before restoring its mac address */
dev_close(slave_dev);
@@ -2154,9 +2233,6 @@ static int bond_release_all(struct net_device *bond_dev)
dev_set_mac_address(slave_dev, &addr);
}
- slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB |
- IFF_SLAVE_INACTIVE);
-
kfree(slave);
/* re-acquire the lock before getting the next slave */
@@ -2270,7 +2346,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
res = 0;
strcpy(info->slave_name, slave->dev->name);
info->link = slave->link;
- info->state = slave->state;
+ info->state = bond_slave_state(slave);
info->link_failure_count = slave->link_failure_count;
break;
}
@@ -2309,7 +2385,7 @@ static int bond_miimon_inspect(struct bonding *bond)
bond->dev->name,
(bond->params.mode ==
BOND_MODE_ACTIVEBACKUP) ?
- ((slave->state == BOND_STATE_ACTIVE) ?
+ (bond_is_active_slave(slave) ?
"active " : "backup ") : "",
slave->dev->name,
bond->params.downdelay * bond->params.miimon);
@@ -2400,13 +2476,13 @@ static void bond_miimon_commit(struct bonding *bond)
if (bond->params.mode == BOND_MODE_8023AD) {
/* prevent it from being the active one */
- slave->state = BOND_STATE_BACKUP;
+ bond_set_backup_slave(slave);
} else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
/* make it immediately active */
- slave->state = BOND_STATE_ACTIVE;
+ bond_set_active_slave(slave);
} else if (slave != bond->primary_slave) {
/* prevent it from being the active one */
- slave->state = BOND_STATE_BACKUP;
+ bond_set_backup_slave(slave);
}
bond_update_speed_duplex(slave);
@@ -2599,11 +2675,10 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
{
- int i, vlan_id, rv;
+ int i, vlan_id;
__be32 *targets = bond->params.arp_targets;
struct vlan_entry *vlan;
struct net_device *vlan_dev;
- struct flowi fl;
struct rtable *rt;
for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
@@ -2622,15 +2697,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
* determine which VLAN interface would be used, so we
* can tag the ARP with the proper VLAN tag.
*/
- memset(&fl, 0, sizeof(fl));
- fl.fl4_dst = targets[i];
- fl.fl4_tos = RTO_ONLINK;
-
- rv = ip_route_output_key(dev_net(bond->dev), &rt, &fl);
- if (rv) {
+ rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
+ RTO_ONLINK, 0);
+ if (IS_ERR(rt)) {
if (net_ratelimit()) {
pr_warning("%s: no route to arp_ip_target %pI4\n",
- bond->dev->name, &fl.fl4_dst);
+ bond->dev->name, &targets[i]);
}
continue;
}
@@ -2666,7 +2738,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
if (net_ratelimit()) {
pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
- bond->dev->name, &fl.fl4_dst,
+ bond->dev->name, &targets[i],
rt->dst.dev ? rt->dst.dev->name : "NULL");
}
ip_rt_put(rt);
@@ -2761,6 +2833,10 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
if (!slave || !slave_do_arp_validate(bond, slave))
goto out_unlock;
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out_unlock;
+
if (!pskb_may_pull(skb, arp_hdr_len(dev)))
goto out_unlock;
@@ -2780,7 +2856,7 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
memcpy(&tip, arp_ptr, 4);
pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n",
- bond->dev->name, slave->dev->name, slave->state,
+ bond->dev->name, slave->dev->name, bond_slave_state(slave),
bond->params.arp_validate, slave_do_arp_validate(bond, slave),
&sip, &tip);
@@ -2792,7 +2868,7 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
* the active, through one switch, the router, then the other
* switch before reaching the backup.
*/
- if (slave->state == BOND_STATE_ACTIVE)
+ if (bond_is_active_slave(slave))
bond_validate_arp(bond, slave, sip, tip);
else
bond_validate_arp(bond, slave, tip, sip);
@@ -2854,7 +2930,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
slave->dev->last_rx + delta_in_ticks)) {
slave->link = BOND_LINK_UP;
- slave->state = BOND_STATE_ACTIVE;
+ bond_set_active_slave(slave);
/* primary_slave has no meaning in round-robin
* mode. the window of a slave being up and
@@ -2887,7 +2963,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
slave->dev->last_rx + 2 * delta_in_ticks)) {
slave->link = BOND_LINK_DOWN;
- slave->state = BOND_STATE_BACKUP;
+ bond_set_backup_slave(slave);
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
@@ -2981,7 +3057,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
* gives each slave a chance to tx/rx traffic
* before being taken out
*/
- if (slave->state == BOND_STATE_BACKUP &&
+ if (!bond_is_active_slave(slave) &&
!bond->current_arp_slave &&
!time_in_range(jiffies,
slave_last_rx(bond, slave) - delta_in_ticks,
@@ -2998,7 +3074,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
* the bond has an IP address)
*/
trans_start = dev_trans_start(slave->dev);
- if ((slave->state == BOND_STATE_ACTIVE) &&
+ if (bond_is_active_slave(slave) &&
(!time_in_range(jiffies,
trans_start - delta_in_ticks,
trans_start + 2 * delta_in_ticks) ||
@@ -3206,299 +3282,6 @@ out:
read_unlock(&bond->lock);
}
-/*------------------------------ proc/seq_file-------------------------------*/
-
-#ifdef CONFIG_PROC_FS
-
-static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(&dev_base_lock)
- __acquires(&bond->lock)
-{
- struct bonding *bond = seq->private;
- loff_t off = 0;
- struct slave *slave;
- int i;
-
- /* make sure the bond won't be taken away */
- read_lock(&dev_base_lock);
- read_lock(&bond->lock);
-
- if (*pos == 0)
- return SEQ_START_TOKEN;
-
- bond_for_each_slave(bond, slave, i) {
- if (++off == *pos)
- return slave;
- }
-
- return NULL;
-}
-
-static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- struct bonding *bond = seq->private;
- struct slave *slave = v;
-
- ++*pos;
- if (v == SEQ_START_TOKEN)
- return bond->first_slave;
-
- slave = slave->next;
-
- return (slave == bond->first_slave) ? NULL : slave;
-}
-
-static void bond_info_seq_stop(struct seq_file *seq, void *v)
- __releases(&bond->lock)
- __releases(&dev_base_lock)
-{
- struct bonding *bond = seq->private;
-
- read_unlock(&bond->lock);
- read_unlock(&dev_base_lock);
-}
-
-static void bond_info_show_master(struct seq_file *seq)
-{
- struct bonding *bond = seq->private;
- struct slave *curr;
- int i;
-
- read_lock(&bond->curr_slave_lock);
- curr = bond->curr_active_slave;
- read_unlock(&bond->curr_slave_lock);
-
- seq_printf(seq, "Bonding Mode: %s",
- bond_mode_name(bond->params.mode));
-
- if (bond->params.mode == BOND_MODE_ACTIVEBACKUP &&
- bond->params.fail_over_mac)
- seq_printf(seq, " (fail_over_mac %s)",
- fail_over_mac_tbl[bond->params.fail_over_mac].modename);
-
- seq_printf(seq, "\n");
-
- if (bond->params.mode == BOND_MODE_XOR ||
- bond->params.mode == BOND_MODE_8023AD) {
- seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
- xmit_hashtype_tbl[bond->params.xmit_policy].modename,
- bond->params.xmit_policy);
- }
-
- if (USES_PRIMARY(bond->params.mode)) {
- seq_printf(seq, "Primary Slave: %s",
- (bond->primary_slave) ?
- bond->primary_slave->dev->name : "None");
- if (bond->primary_slave)
- seq_printf(seq, " (primary_reselect %s)",
- pri_reselect_tbl[bond->params.primary_reselect].modename);
-
- seq_printf(seq, "\nCurrently Active Slave: %s\n",
- (curr) ? curr->dev->name : "None");
- }
-
- seq_printf(seq, "MII Status: %s\n", netif_carrier_ok(bond->dev) ?
- "up" : "down");
- seq_printf(seq, "MII Polling Interval (ms): %d\n", bond->params.miimon);
- seq_printf(seq, "Up Delay (ms): %d\n",
- bond->params.updelay * bond->params.miimon);
- seq_printf(seq, "Down Delay (ms): %d\n",
- bond->params.downdelay * bond->params.miimon);
-
-
- /* ARP information */
- if (bond->params.arp_interval > 0) {
- int printed = 0;
- seq_printf(seq, "ARP Polling Interval (ms): %d\n",
- bond->params.arp_interval);
-
- seq_printf(seq, "ARP IP target/s (n.n.n.n form):");
-
- for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
- if (!bond->params.arp_targets[i])
- break;
- if (printed)
- seq_printf(seq, ",");
- seq_printf(seq, " %pI4", &bond->params.arp_targets[i]);
- printed = 1;
- }
- seq_printf(seq, "\n");
- }
-
- if (bond->params.mode == BOND_MODE_8023AD) {
- struct ad_info ad_info;
-
- seq_puts(seq, "\n802.3ad info\n");
- seq_printf(seq, "LACP rate: %s\n",
- (bond->params.lacp_fast) ? "fast" : "slow");
- seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
- ad_select_tbl[bond->params.ad_select].modename);
-
- if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
- seq_printf(seq, "bond %s has no active aggregator\n",
- bond->dev->name);
- } else {
- seq_printf(seq, "Active Aggregator Info:\n");
-
- seq_printf(seq, "\tAggregator ID: %d\n",
- ad_info.aggregator_id);
- seq_printf(seq, "\tNumber of ports: %d\n",
- ad_info.ports);
- seq_printf(seq, "\tActor Key: %d\n",
- ad_info.actor_key);
- seq_printf(seq, "\tPartner Key: %d\n",
- ad_info.partner_key);
- seq_printf(seq, "\tPartner Mac Address: %pM\n",
- ad_info.partner_system);
- }
- }
-}
-
-static void bond_info_show_slave(struct seq_file *seq,
- const struct slave *slave)
-{
- struct bonding *bond = seq->private;
-
- seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
- seq_printf(seq, "MII Status: %s\n",
- (slave->link == BOND_LINK_UP) ? "up" : "down");
- seq_printf(seq, "Speed: %d Mbps\n", slave->speed);
- seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
- seq_printf(seq, "Link Failure Count: %u\n",
- slave->link_failure_count);
-
- seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
-
- if (bond->params.mode == BOND_MODE_8023AD) {
- const struct aggregator *agg
- = SLAVE_AD_INFO(slave).port.aggregator;
-
- if (agg)
- seq_printf(seq, "Aggregator ID: %d\n",
- agg->aggregator_identifier);
- else
- seq_puts(seq, "Aggregator ID: N/A\n");
- }
- seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
-}
-
-static int bond_info_seq_show(struct seq_file *seq, void *v)
-{
- if (v == SEQ_START_TOKEN) {
- seq_printf(seq, "%s\n", version);
- bond_info_show_master(seq);
- } else
- bond_info_show_slave(seq, v);
-
- return 0;
-}
-
-static const struct seq_operations bond_info_seq_ops = {
- .start = bond_info_seq_start,
- .next = bond_info_seq_next,
- .stop = bond_info_seq_stop,
- .show = bond_info_seq_show,
-};
-
-static int bond_info_open(struct inode *inode, struct file *file)
-{
- struct seq_file *seq;
- struct proc_dir_entry *proc;
- int res;
-
- res = seq_open(file, &bond_info_seq_ops);
- if (!res) {
- /* recover the pointer buried in proc_dir_entry data */
- seq = file->private_data;
- proc = PDE(inode);
- seq->private = proc->data;
- }
-
- return res;
-}
-
-static const struct file_operations bond_info_fops = {
- .owner = THIS_MODULE,
- .open = bond_info_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-static void bond_create_proc_entry(struct bonding *bond)
-{
- struct net_device *bond_dev = bond->dev;
- struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
-
- if (bn->proc_dir) {
- bond->proc_entry = proc_create_data(bond_dev->name,
- S_IRUGO, bn->proc_dir,
- &bond_info_fops, bond);
- if (bond->proc_entry == NULL)
- pr_warning("Warning: Cannot create /proc/net/%s/%s\n",
- DRV_NAME, bond_dev->name);
- else
- memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
- }
-}
-
-static void bond_remove_proc_entry(struct bonding *bond)
-{
- struct net_device *bond_dev = bond->dev;
- struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
-
- if (bn->proc_dir && bond->proc_entry) {
- remove_proc_entry(bond->proc_file_name, bn->proc_dir);
- memset(bond->proc_file_name, 0, IFNAMSIZ);
- bond->proc_entry = NULL;
- }
-}
-
-/* Create the bonding directory under /proc/net, if doesn't exist yet.
- * Caller must hold rtnl_lock.
- */
-static void __net_init bond_create_proc_dir(struct bond_net *bn)
-{
- if (!bn->proc_dir) {
- bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
- if (!bn->proc_dir)
- pr_warning("Warning: cannot create /proc/net/%s\n",
- DRV_NAME);
- }
-}
-
-/* Destroy the bonding directory under /proc/net, if empty.
- * Caller must hold rtnl_lock.
- */
-static void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
-{
- if (bn->proc_dir) {
- remove_proc_entry(DRV_NAME, bn->net->proc_net);
- bn->proc_dir = NULL;
- }
-}
-
-#else /* !CONFIG_PROC_FS */
-
-static void bond_create_proc_entry(struct bonding *bond)
-{
-}
-
-static void bond_remove_proc_entry(struct bonding *bond)
-{
-}
-
-static inline void bond_create_proc_dir(struct bond_net *bn)
-{
-}
-
-static inline void bond_destroy_proc_dir(struct bond_net *bn)
-{
-}
-
-#endif /* CONFIG_PROC_FS */
-
-
/*-------------------------- netdev event handling --------------------------*/
/*
@@ -3509,6 +3292,8 @@ static int bond_event_changename(struct bonding *bond)
bond_remove_proc_entry(bond);
bond_create_proc_entry(bond);
+ bond_debug_reregister(bond);
+
return NOTIFY_DONE;
}
@@ -4353,7 +4138,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
bond_for_each_slave_from(bond, slave, i, start_at) {
if (IS_UP(slave->dev) &&
(slave->link == BOND_LINK_UP) &&
- (slave->state == BOND_STATE_ACTIVE)) {
+ bond_is_active_slave(slave)) {
res = bond_dev_queue_xmit(bond, skb, slave->dev);
break;
}
@@ -4430,7 +4215,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
bond_for_each_slave_from(bond, slave, i, start_at) {
if (IS_UP(slave->dev) &&
(slave->link == BOND_LINK_UP) &&
- (slave->state == BOND_STATE_ACTIVE)) {
+ bond_is_active_slave(slave)) {
res = bond_dev_queue_xmit(bond, skb, slave->dev);
break;
}
@@ -4471,7 +4256,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
bond_for_each_slave_from(bond, slave, i, start_at) {
if (IS_UP(slave->dev) &&
(slave->link == BOND_LINK_UP) &&
- (slave->state == BOND_STATE_ACTIVE)) {
+ bond_is_active_slave(slave)) {
if (tx_dev) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2) {
@@ -4559,11 +4344,18 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
{
/*
* This helper function exists to help dev_pick_tx get the correct
- * destination queue. Using a helper function skips the a call to
+ * destination queue. Using a helper function skips a call to
* skb_tx_hash and will put the skbs in the queue we expect on their
* way down to the bonding driver.
*/
- return skb->queue_mapping;
+ u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
+
+ if (unlikely(txq >= dev->real_num_tx_queues)) {
+ do
+ txq -= dev->real_num_tx_queues;
+ while (txq >= dev->real_num_tx_queues);
+ }
+ return txq;
}
static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -4625,11 +4417,9 @@ void bond_set_mode_ops(struct bonding *bond, int mode)
case BOND_MODE_BROADCAST:
break;
case BOND_MODE_8023AD:
- bond_set_master_3ad_flags(bond);
bond_set_xmit_hash_policy(bond);
break;
case BOND_MODE_ALB:
- bond_set_master_alb_flags(bond);
/* FALLTHRU */
case BOND_MODE_TLB:
break;
@@ -4676,9 +4466,12 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_netpoll_setup = bond_netpoll_setup,
.ndo_netpoll_cleanup = bond_netpoll_cleanup,
.ndo_poll_controller = bond_poll_controller,
#endif
+ .ndo_add_slave = bond_enslave,
+ .ndo_del_slave = bond_release,
};
static void bond_destructor(struct net_device *bond_dev)
@@ -4717,9 +4510,6 @@ static void bond_setup(struct net_device *bond_dev)
bond_dev->priv_flags |= IFF_BONDING;
bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
- if (bond->params.arp_interval)
- bond_dev->priv_flags |= IFF_MASTER_ARPMON;
-
/* At first, we block adding VLANs. That's the only way to
* prevent problems that occur when adding VLANs over an
* empty bond. The block will be removed once non-challenged
@@ -4791,6 +4581,8 @@ static void bond_uninit(struct net_device *bond_dev)
bond_remove_proc_entry(bond);
+ bond_debug_unregister(bond);
+
__hw_addr_flush(&bond->mc_list);
list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) {
@@ -5186,13 +4978,13 @@ static int bond_init(struct net_device *bond_dev)
bond_set_lockdep_class(bond_dev);
- netif_carrier_off(bond_dev);
-
bond_create_proc_entry(bond);
list_add_tail(&bond->bond_list, &bn->dev_list);
bond_prepare_sysfs_group(bond);
+ bond_debug_register(bond);
+
__hw_addr_init(&bond->mc_list);
return 0;
}
@@ -5255,6 +5047,8 @@ int bond_create(struct net *net, const char *name)
res = register_netdevice(bond_dev);
+ netif_carrier_off(bond_dev);
+
out:
rtnl_unlock();
if (res < 0)
@@ -5293,19 +5087,12 @@ static int __init bonding_init(void)
int i;
int res;
- pr_info("%s", version);
+ pr_info("%s", bond_version);
res = bond_check_params(&bonding_defaults);
if (res)
goto out;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- if (!alloc_cpumask_var(&netpoll_block_tx, GFP_KERNEL)) {
- res = -ENOMEM;
- goto out;
- }
-#endif
-
res = register_pernet_subsys(&bond_net_ops);
if (res)
goto out;
@@ -5314,6 +5101,8 @@ static int __init bonding_init(void)
if (res)
goto err_link;
+ bond_create_debugfs();
+
for (i = 0; i < max_bonds; i++) {
res = bond_create(&init_net, NULL);
if (res)
@@ -5324,7 +5113,6 @@ static int __init bonding_init(void)
if (res)
goto err;
-
register_netdevice_notifier(&bond_netdev_notifier);
register_inetaddr_notifier(&bond_inetaddr_notifier);
bond_register_ipv6_notifier();
@@ -5334,9 +5122,6 @@ err:
rtnl_link_unregister(&bond_link_ops);
err_link:
unregister_pernet_subsys(&bond_net_ops);
-#ifdef CONFIG_NET_POLL_CONTROLLER
- free_cpumask_var(netpoll_block_tx);
-#endif
goto out;
}
@@ -5348,12 +5133,16 @@ static void __exit bonding_exit(void)
bond_unregister_ipv6_notifier();
bond_destroy_sysfs();
+ bond_destroy_debugfs();
rtnl_link_unregister(&bond_link_ops);
unregister_pernet_subsys(&bond_net_ops);
#ifdef CONFIG_NET_POLL_CONTROLLER
- free_cpumask_var(netpoll_block_tx);
+ /*
+ * Make sure we don't have an imbalance on our netpoll blocking
+ */
+ WARN_ON(atomic_read(&netpoll_block_tx));
#endif
}
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
new file mode 100644
index 000000000000..c32ff55a34c1
--- /dev/null
+++ b/drivers/net/bonding/bond_procfs.c
@@ -0,0 +1,275 @@
+#include <linux/proc_fs.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include "bonding.h"
+
+
+extern const char *bond_mode_name(int mode);
+
+static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(RCU)
+ __acquires(&bond->lock)
+{
+ struct bonding *bond = seq->private;
+ loff_t off = 0;
+ struct slave *slave;
+ int i;
+
+ /* make sure the bond won't be taken away */
+ rcu_read_lock();
+ read_lock(&bond->lock);
+
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+
+ bond_for_each_slave(bond, slave, i) {
+ if (++off == *pos)
+ return slave;
+ }
+
+ return NULL;
+}
+
+static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct bonding *bond = seq->private;
+ struct slave *slave = v;
+
+ ++*pos;
+ if (v == SEQ_START_TOKEN)
+ return bond->first_slave;
+
+ slave = slave->next;
+
+ return (slave == bond->first_slave) ? NULL : slave;
+}
+
+static void bond_info_seq_stop(struct seq_file *seq, void *v)
+ __releases(&bond->lock)
+ __releases(RCU)
+{
+ struct bonding *bond = seq->private;
+
+ read_unlock(&bond->lock);
+ rcu_read_unlock();
+}
+
+static void bond_info_show_master(struct seq_file *seq)
+{
+ struct bonding *bond = seq->private;
+ struct slave *curr;
+ int i;
+
+ read_lock(&bond->curr_slave_lock);
+ curr = bond->curr_active_slave;
+ read_unlock(&bond->curr_slave_lock);
+
+ seq_printf(seq, "Bonding Mode: %s",
+ bond_mode_name(bond->params.mode));
+
+ if (bond->params.mode == BOND_MODE_ACTIVEBACKUP &&
+ bond->params.fail_over_mac)
+ seq_printf(seq, " (fail_over_mac %s)",
+ fail_over_mac_tbl[bond->params.fail_over_mac].modename);
+
+ seq_printf(seq, "\n");
+
+ if (bond->params.mode == BOND_MODE_XOR ||
+ bond->params.mode == BOND_MODE_8023AD) {
+ seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
+ xmit_hashtype_tbl[bond->params.xmit_policy].modename,
+ bond->params.xmit_policy);
+ }
+
+ if (USES_PRIMARY(bond->params.mode)) {
+ seq_printf(seq, "Primary Slave: %s",
+ (bond->primary_slave) ?
+ bond->primary_slave->dev->name : "None");
+ if (bond->primary_slave)
+ seq_printf(seq, " (primary_reselect %s)",
+ pri_reselect_tbl[bond->params.primary_reselect].modename);
+
+ seq_printf(seq, "\nCurrently Active Slave: %s\n",
+ (curr) ? curr->dev->name : "None");
+ }
+
+ seq_printf(seq, "MII Status: %s\n", netif_carrier_ok(bond->dev) ?
+ "up" : "down");
+ seq_printf(seq, "MII Polling Interval (ms): %d\n", bond->params.miimon);
+ seq_printf(seq, "Up Delay (ms): %d\n",
+ bond->params.updelay * bond->params.miimon);
+ seq_printf(seq, "Down Delay (ms): %d\n",
+ bond->params.downdelay * bond->params.miimon);
+
+
+ /* ARP information */
+ if (bond->params.arp_interval > 0) {
+ int printed = 0;
+ seq_printf(seq, "ARP Polling Interval (ms): %d\n",
+ bond->params.arp_interval);
+
+ seq_printf(seq, "ARP IP target/s (n.n.n.n form):");
+
+ for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
+ if (!bond->params.arp_targets[i])
+ break;
+ if (printed)
+ seq_printf(seq, ",");
+ seq_printf(seq, " %pI4", &bond->params.arp_targets[i]);
+ printed = 1;
+ }
+ seq_printf(seq, "\n");
+ }
+
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ struct ad_info ad_info;
+
+ seq_puts(seq, "\n802.3ad info\n");
+ seq_printf(seq, "LACP rate: %s\n",
+ (bond->params.lacp_fast) ? "fast" : "slow");
+ seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
+ ad_select_tbl[bond->params.ad_select].modename);
+
+ if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
+ seq_printf(seq, "bond %s has no active aggregator\n",
+ bond->dev->name);
+ } else {
+ seq_printf(seq, "Active Aggregator Info:\n");
+
+ seq_printf(seq, "\tAggregator ID: %d\n",
+ ad_info.aggregator_id);
+ seq_printf(seq, "\tNumber of ports: %d\n",
+ ad_info.ports);
+ seq_printf(seq, "\tActor Key: %d\n",
+ ad_info.actor_key);
+ seq_printf(seq, "\tPartner Key: %d\n",
+ ad_info.partner_key);
+ seq_printf(seq, "\tPartner Mac Address: %pM\n",
+ ad_info.partner_system);
+ }
+ }
+}
+
+static void bond_info_show_slave(struct seq_file *seq,
+ const struct slave *slave)
+{
+ struct bonding *bond = seq->private;
+
+ seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
+ seq_printf(seq, "MII Status: %s\n",
+ (slave->link == BOND_LINK_UP) ? "up" : "down");
+ seq_printf(seq, "Speed: %d Mbps\n", slave->speed);
+ seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
+ seq_printf(seq, "Link Failure Count: %u\n",
+ slave->link_failure_count);
+
+ seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
+
+ if (bond->params.mode == BOND_MODE_8023AD) {
+ const struct aggregator *agg
+ = SLAVE_AD_INFO(slave).port.aggregator;
+
+ if (agg)
+ seq_printf(seq, "Aggregator ID: %d\n",
+ agg->aggregator_identifier);
+ else
+ seq_puts(seq, "Aggregator ID: N/A\n");
+ }
+ seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
+}
+
+static int bond_info_seq_show(struct seq_file *seq, void *v)
+{
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(seq, "%s\n", bond_version);
+ bond_info_show_master(seq);
+ } else
+ bond_info_show_slave(seq, v);
+
+ return 0;
+}
+
+static const struct seq_operations bond_info_seq_ops = {
+ .start = bond_info_seq_start,
+ .next = bond_info_seq_next,
+ .stop = bond_info_seq_stop,
+ .show = bond_info_seq_show,
+};
+
+static int bond_info_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq;
+ struct proc_dir_entry *proc;
+ int res;
+
+ res = seq_open(file, &bond_info_seq_ops);
+ if (!res) {
+ /* recover the pointer buried in proc_dir_entry data */
+ seq = file->private_data;
+ proc = PDE(inode);
+ seq->private = proc->data;
+ }
+
+ return res;
+}
+
+static const struct file_operations bond_info_fops = {
+ .owner = THIS_MODULE,
+ .open = bond_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+void bond_create_proc_entry(struct bonding *bond)
+{
+ struct net_device *bond_dev = bond->dev;
+ struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
+
+ if (bn->proc_dir) {
+ bond->proc_entry = proc_create_data(bond_dev->name,
+ S_IRUGO, bn->proc_dir,
+ &bond_info_fops, bond);
+ if (bond->proc_entry == NULL)
+ pr_warning("Warning: Cannot create /proc/net/%s/%s\n",
+ DRV_NAME, bond_dev->name);
+ else
+ memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
+ }
+}
+
+void bond_remove_proc_entry(struct bonding *bond)
+{
+ struct net_device *bond_dev = bond->dev;
+ struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
+
+ if (bn->proc_dir && bond->proc_entry) {
+ remove_proc_entry(bond->proc_file_name, bn->proc_dir);
+ memset(bond->proc_file_name, 0, IFNAMSIZ);
+ bond->proc_entry = NULL;
+ }
+}
+
+/* Create the bonding directory under /proc/net, if doesn't exist yet.
+ * Caller must hold rtnl_lock.
+ */
+void __net_init bond_create_proc_dir(struct bond_net *bn)
+{
+ if (!bn->proc_dir) {
+ bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
+ if (!bn->proc_dir)
+ pr_warning("Warning: cannot create /proc/net/%s\n",
+ DRV_NAME);
+ }
+}
+
+/* Destroy the bonding directory under /proc/net, if empty.
+ * Caller must hold rtnl_lock.
+ */
+void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
+{
+ if (bn->proc_dir) {
+ remove_proc_entry(DRV_NAME, bn->net->proc_net);
+ bn->proc_dir = NULL;
+ }
+}
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 8fd0174c5380..de87aea6d01a 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -118,7 +118,10 @@ static ssize_t bonding_store_bonds(struct class *cls,
pr_info("%s is being created...\n", ifname);
rv = bond_create(net, ifname);
if (rv) {
- pr_info("Bond creation failed.\n");
+ if (rv == -EEXIST)
+ pr_info("%s already exists.\n", ifname);
+ else
+ pr_info("%s creation failed.\n", ifname);
res = rv;
}
} else if (command[0] == '-') {
@@ -322,11 +325,6 @@ static ssize_t bonding_store_mode(struct device *d,
ret = -EINVAL;
goto out;
}
- if (bond->params.mode == BOND_MODE_8023AD)
- bond_unset_master_3ad_flags(bond);
-
- if (bond->params.mode == BOND_MODE_ALB)
- bond_unset_master_alb_flags(bond);
bond->params.mode = new_value;
bond_set_mode_ops(bond, bond->params.mode);
@@ -527,8 +525,6 @@ static ssize_t bonding_store_arp_interval(struct device *d,
pr_info("%s: Setting ARP monitoring interval to %d.\n",
bond->dev->name, new_value);
bond->params.arp_interval = new_value;
- if (bond->params.arp_interval)
- bond->dev->priv_flags |= IFF_MASTER_ARPMON;
if (bond->params.miimon) {
pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
bond->dev->name, bond->dev->name);
@@ -1004,7 +1000,6 @@ static ssize_t bonding_store_miimon(struct device *d,
pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
bond->dev->name);
bond->params.arp_interval = 0;
- bond->dev->priv_flags &= ~IFF_MASTER_ARPMON;
if (bond->params.arp_validate) {
bond_unregister_arp(bond);
bond->params.arp_validate =
@@ -1198,7 +1193,7 @@ static ssize_t bonding_store_carrier(struct device *d,
bond->dev->name, new_value);
}
out:
- return count;
+ return ret;
}
static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
bonding_show_carrier, bonding_store_carrier);
@@ -1587,15 +1582,15 @@ static ssize_t bonding_store_slaves_active(struct device *d,
}
bond_for_each_slave(bond, slave, i) {
- if (slave->state == BOND_STATE_BACKUP) {
+ if (!bond_is_active_slave(slave)) {
if (new_value)
- slave->dev->priv_flags &= ~IFF_SLAVE_INACTIVE;
+ slave->inactive = 0;
else
- slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
+ slave->inactive = 1;
}
}
out:
- return count;
+ return ret;
}
static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
bonding_show_slaves_active, bonding_store_slaves_active);
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 4eedb12df6ca..6b26962fd0ec 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -18,9 +18,9 @@
#include <linux/timer.h>
#include <linux/proc_fs.h>
#include <linux/if_bonding.h>
-#include <linux/kobject.h>
#include <linux/cpumask.h>
#include <linux/in6.h>
+#include <linux/netpoll.h>
#include "bond_3ad.h"
#include "bond_alb.h"
@@ -29,6 +29,8 @@
#define DRV_NAME "bonding"
#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
+#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
+
#define BOND_MAX_ARP_TARGETS 16
#define IS_UP(dev) \
@@ -53,7 +55,7 @@
(((slave)->dev->flags & IFF_UP) && \
netif_running((slave)->dev) && \
((slave)->link == BOND_LINK_UP) && \
- ((slave)->state == BOND_STATE_ACTIVE))
+ bond_is_active_slave(slave))
#define USES_PRIMARY(mode) \
@@ -119,26 +121,22 @@
#ifdef CONFIG_NET_POLL_CONTROLLER
-extern cpumask_var_t netpoll_block_tx;
+extern atomic_t netpoll_block_tx;
static inline void block_netpoll_tx(void)
{
- preempt_disable();
- BUG_ON(cpumask_test_and_set_cpu(smp_processor_id(),
- netpoll_block_tx));
+ atomic_inc(&netpoll_block_tx);
}
static inline void unblock_netpoll_tx(void)
{
- BUG_ON(!cpumask_test_and_clear_cpu(smp_processor_id(),
- netpoll_block_tx));
- preempt_enable();
+ atomic_dec(&netpoll_block_tx);
}
static inline int is_netpoll_tx_blocked(struct net_device *dev)
{
- if (unlikely(dev->priv_flags & IFF_IN_NETPOLL))
- return cpumask_test_cpu(smp_processor_id(), netpoll_block_tx);
+ if (unlikely(netpoll_tx_running(dev)))
+ return atomic_read(&netpoll_block_tx);
return 0;
}
#else
@@ -194,7 +192,9 @@ struct slave {
unsigned long last_arp_rx;
s8 link; /* one of BOND_LINK_XXXX */
s8 new_link;
- s8 state; /* one of BOND_STATE_XXXX */
+ u8 backup:1, /* indicates backup slave. Value corresponds with
+ BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
+ inactive:1; /* indicates inactive slave */
u32 original_mtu;
u32 link_failure_count;
u8 perm_hwaddr[ETH_ALEN];
@@ -203,6 +203,9 @@ struct slave {
u16 queue_id;
struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */
struct tlb_slave_info tlb_info;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ struct netpoll *np;
+#endif
};
/*
@@ -259,25 +262,33 @@ struct bonding {
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct in6_addr master_ipv6;
#endif
+#ifdef CONFIG_DEBUG_FS
+ /* debugging suport via debugfs */
+ struct dentry *debug_dir;
+#endif /* CONFIG_DEBUG_FS */
};
+#define bond_slave_get_rcu(dev) \
+ ((struct slave *) rcu_dereference(dev->rx_handler_data))
+
/**
* Returns NULL if the net_device does not belong to any of the bond's slaves
*
* Caller must hold bond lock for read
*/
-static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct net_device *slave_dev)
+static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
+ struct net_device *slave_dev)
{
struct slave *slave = NULL;
int i;
bond_for_each_slave(bond, slave, i) {
if (slave->dev == slave_dev) {
- break;
+ return slave;
}
}
- return slave;
+ return NULL;
}
static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
@@ -286,7 +297,7 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
return NULL;
}
- return (struct bonding *)netdev_priv(slave->dev->master);
+ return netdev_priv(slave->dev->master);
}
static inline bool bond_is_lb(const struct bonding *bond)
@@ -295,6 +306,26 @@ static inline bool bond_is_lb(const struct bonding *bond)
bond->params.mode == BOND_MODE_ALB);
}
+static inline void bond_set_active_slave(struct slave *slave)
+{
+ slave->backup = 0;
+}
+
+static inline void bond_set_backup_slave(struct slave *slave)
+{
+ slave->backup = 1;
+}
+
+static inline int bond_slave_state(struct slave *slave)
+{
+ return slave->backup;
+}
+
+static inline bool bond_is_active_slave(struct slave *slave)
+{
+ return !bond_slave_state(slave);
+}
+
#define BOND_PRI_RESELECT_ALWAYS 0
#define BOND_PRI_RESELECT_BETTER 1
#define BOND_PRI_RESELECT_FAILURE 2
@@ -312,7 +343,7 @@ static inline bool bond_is_lb(const struct bonding *bond)
static inline int slave_do_arp_validate(struct bonding *bond,
struct slave *slave)
{
- return bond->params.arp_validate & (1 << slave->state);
+ return bond->params.arp_validate & (1 << bond_slave_state(slave));
}
static inline unsigned long slave_last_rx(struct bonding *bond,
@@ -324,41 +355,40 @@ static inline unsigned long slave_last_rx(struct bonding *bond,
return slave->dev->last_rx;
}
-static inline void bond_set_slave_inactive_flags(struct slave *slave)
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static inline void bond_netpoll_send_skb(const struct slave *slave,
+ struct sk_buff *skb)
{
- struct bonding *bond = netdev_priv(slave->dev->master);
- if (!bond_is_lb(bond))
- slave->state = BOND_STATE_BACKUP;
- if (!bond->params.all_slaves_active)
- slave->dev->priv_flags |= IFF_SLAVE_INACTIVE;
- if (slave_do_arp_validate(bond, slave))
- slave->dev->priv_flags |= IFF_SLAVE_NEEDARP;
-}
+ struct netpoll *np = slave->np;
-static inline void bond_set_slave_active_flags(struct slave *slave)
-{
- slave->state = BOND_STATE_ACTIVE;
- slave->dev->priv_flags &= ~(IFF_SLAVE_INACTIVE | IFF_SLAVE_NEEDARP);
+ if (np)
+ netpoll_send_skb(np, skb);
}
-
-static inline void bond_set_master_3ad_flags(struct bonding *bond)
+#else
+static inline void bond_netpoll_send_skb(const struct slave *slave,
+ struct sk_buff *skb)
{
- bond->dev->priv_flags |= IFF_MASTER_8023AD;
}
+#endif
-static inline void bond_unset_master_3ad_flags(struct bonding *bond)
+static inline void bond_set_slave_inactive_flags(struct slave *slave)
{
- bond->dev->priv_flags &= ~IFF_MASTER_8023AD;
+ struct bonding *bond = netdev_priv(slave->dev->master);
+ if (!bond_is_lb(bond))
+ bond_set_backup_slave(slave);
+ if (!bond->params.all_slaves_active)
+ slave->inactive = 1;
}
-static inline void bond_set_master_alb_flags(struct bonding *bond)
+static inline void bond_set_slave_active_flags(struct slave *slave)
{
- bond->dev->priv_flags |= IFF_MASTER_ALB;
+ bond_set_active_slave(slave);
+ slave->inactive = 0;
}
-static inline void bond_unset_master_alb_flags(struct bonding *bond)
+static inline bool bond_is_slave_inactive(struct slave *slave)
{
- bond->dev->priv_flags &= ~IFF_MASTER_ALB;
+ return slave->inactive;
}
struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
@@ -380,6 +410,11 @@ void bond_select_active_slave(struct bonding *bond);
void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
void bond_register_arp(struct bonding *);
void bond_unregister_arp(struct bonding *);
+void bond_create_debugfs(void);
+void bond_destroy_debugfs(void);
+void bond_debug_register(struct bonding *bond);
+void bond_debug_unregister(struct bonding *bond);
+void bond_debug_reregister(struct bonding *bond);
struct bond_net {
struct net * net; /* Associated network namespace */
@@ -389,6 +424,30 @@ struct bond_net {
#endif
};
+#ifdef CONFIG_PROC_FS
+void bond_create_proc_entry(struct bonding *bond);
+void bond_remove_proc_entry(struct bonding *bond);
+void bond_create_proc_dir(struct bond_net *bn);
+void bond_destroy_proc_dir(struct bond_net *bn);
+#else
+static inline void bond_create_proc_entry(struct bonding *bond)
+{
+}
+
+static inline void bond_remove_proc_entry(struct bonding *bond)
+{
+}
+
+static inline void bond_create_proc_dir(struct bond_net *bn)
+{
+}
+
+static inline void bond_destroy_proc_dir(struct bond_net *bn)
+{
+}
+#endif
+
+
/* exported from bond_main.c */
extern int bond_net_id;
extern const struct bond_parm_tbl bond_lacp_tbl[];
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index b38d987da67d..9560b9d624bd 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -1,6 +1,4 @@
-ifeq ($(CONFIG_CAIF_DEBUG),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_CAIF_DEBUG) := -DDEBUG
# Serial interface
obj-$(CONFIG_CAIF_TTY) += caif_serial.o
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
index 1cd90da86f13..5f771ab712c4 100644
--- a/drivers/net/caif/caif_shm_u5500.c
+++ b/drivers/net/caif/caif_shm_u5500.c
@@ -5,13 +5,13 @@
* License terms: GNU General Public License (GPL) version 2
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
#include <linux/version.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
-#include <mach/mbox.h>
+#include <mach/mbox-db5500.h>
#include <net/caif/caif_shm.h>
MODULE_LICENSE("GPL");
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index 19f9c0656667..80511167f35b 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -6,7 +6,7 @@
* License terms: GNU General Public License (GPL) version 2
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
#include <linux/spinlock.h>
#include <linux/sched.h>
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 080574b0fff0..1d699e3df547 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -12,6 +12,27 @@ config CAN_VCAN
This driver can also be built as a module. If so, the module
will be called vcan.
+config CAN_SLCAN
+ tristate "Serial / USB serial CAN Adaptors (slcan)"
+ depends on CAN
+ default N
+ ---help---
+ CAN driver for several 'low cost' CAN interfaces that are attached
+ via serial lines or via USB-to-serial adapters using the LAWICEL
+ ASCII protocol. The driver implements the tty linediscipline N_SLCAN.
+
+ As only the sending and receiving of CAN frames is implemented, this
+ driver should work with the (serial/USB) CAN hardware from:
+ www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
+
+ Userspace tools to attach the SLCAN line discipline (slcan_attach,
+ slcand) can be found in the can-utils at the SocketCAN SVN, see
+ http://developer.berlios.de/projects/socketcan for details.
+
+ The slcan driver supports up to 10 CAN netdevices by default which
+ can be changed by the 'maxdev=xx' module option. This driver can
+ also be built as a module. If so, the module will be called slcan.
+
config CAN_DEV
tristate "Platform CAN drivers with Netlink support"
depends on CAN
@@ -94,8 +115,12 @@ source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
+source "drivers/net/can/c_can/Kconfig"
+
source "drivers/net/can/usb/Kconfig"
+source "drivers/net/can/softing/Kconfig"
+
config CAN_DEBUG_DEVICES
bool "CAN devices debugging messages"
depends on CAN
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 90af15a4f106..24ebfe8d758a 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -3,14 +3,17 @@
#
obj-$(CONFIG_CAN_VCAN) += vcan.o
+obj-$(CONFIG_CAN_SLCAN) += slcan.o
obj-$(CONFIG_CAN_DEV) += can-dev.o
can-dev-y := dev.o
obj-y += usb/
+obj-y += softing/
obj-$(CONFIG_CAN_SJA1000) += sja1000/
obj-$(CONFIG_CAN_MSCAN) += mscan/
+obj-$(CONFIG_CAN_C_CAN) += c_can/
obj-$(CONFIG_CAN_AT91) += at91_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 7ef83d06f7ed..57d2ffbbb433 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -2,7 +2,7 @@
* at91_can.c - CAN network driver for AT91 SoC CAN controller
*
* (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
- * (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de>
+ * (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde <kernel@pengutronix.de>
*
* This software may be distributed under the terms of the GNU General
* Public License ("GPL") version 2 as distributed in the 'COPYING'
@@ -30,6 +30,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
+#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/string.h>
@@ -40,22 +41,23 @@
#include <mach/board.h>
-#define AT91_NAPI_WEIGHT 12
+#define AT91_NAPI_WEIGHT 11
/*
* RX/TX Mailbox split
* don't dare to touch
*/
-#define AT91_MB_RX_NUM 12
+#define AT91_MB_RX_NUM 11
#define AT91_MB_TX_SHIFT 2
-#define AT91_MB_RX_FIRST 0
+#define AT91_MB_RX_FIRST 1
#define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1)
#define AT91_MB_RX_MASK(i) ((1 << (i)) - 1)
#define AT91_MB_RX_SPLIT 8
#define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1)
-#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT))
+#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT) & \
+ ~AT91_MB_RX_MASK(AT91_MB_RX_FIRST))
#define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT)
#define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1)
@@ -168,6 +170,8 @@ struct at91_priv {
struct clk *clk;
struct at91_can_data *pdata;
+
+ canid_t mb0_id;
};
static struct can_bittiming_const at91_bittiming_const = {
@@ -220,6 +224,18 @@ static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
set_mb_mode_prio(priv, mb, mode, 0);
}
+static inline u32 at91_can_id_to_reg_mid(canid_t can_id)
+{
+ u32 reg_mid;
+
+ if (can_id & CAN_EFF_FLAG)
+ reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
+ else
+ reg_mid = (can_id & CAN_SFF_MASK) << 18;
+
+ return reg_mid;
+}
+
/*
* Swtich transceiver on or off
*/
@@ -233,12 +249,22 @@ static void at91_setup_mailboxes(struct net_device *dev)
{
struct at91_priv *priv = netdev_priv(dev);
unsigned int i;
+ u32 reg_mid;
/*
- * The first 12 mailboxes are used as a reception FIFO. The
- * last mailbox is configured with overwrite option. The
- * overwrite flag indicates a FIFO overflow.
+ * Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first
+ * mailbox is disabled. The next 11 mailboxes are used as a
+ * reception FIFO. The last mailbox is configured with
+ * overwrite option. The overwrite flag indicates a FIFO
+ * overflow.
*/
+ reg_mid = at91_can_id_to_reg_mid(priv->mb0_id);
+ for (i = 0; i < AT91_MB_RX_FIRST; i++) {
+ set_mb_mode(priv, i, AT91_MB_MODE_DISABLED);
+ at91_write(priv, AT91_MID(i), reg_mid);
+ at91_write(priv, AT91_MCR(i), 0x0); /* clear dlc */
+ }
+
for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++)
set_mb_mode(priv, i, AT91_MB_MODE_RX);
set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
@@ -254,7 +280,8 @@ static void at91_setup_mailboxes(struct net_device *dev)
set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
/* Reset tx and rx helper pointers */
- priv->tx_next = priv->tx_echo = priv->rx_next = 0;
+ priv->tx_next = priv->tx_echo = 0;
+ priv->rx_next = AT91_MB_RX_FIRST;
}
static int at91_set_bittiming(struct net_device *dev)
@@ -372,12 +399,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
return NETDEV_TX_BUSY;
}
-
- if (cf->can_id & CAN_EFF_FLAG)
- reg_mid = (cf->can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
- else
- reg_mid = (cf->can_id & CAN_SFF_MASK) << 18;
-
+ reg_mid = at91_can_id_to_reg_mid(cf->can_id);
reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
(cf->can_dlc << 16) | AT91_MCR_MTCR;
@@ -539,27 +561,31 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
*
* Theory of Operation:
*
- * 12 of the 16 mailboxes on the chip are reserved for RX. we split
- * them into 2 groups. The lower group holds 8 and upper 4 mailboxes.
+ * 11 of the 16 mailboxes on the chip are reserved for RX. we split
+ * them into 2 groups. The lower group holds 7 and upper 4 mailboxes.
*
* Like it or not, but the chip always saves a received CAN message
* into the first free mailbox it finds (starting with the
* lowest). This makes it very difficult to read the messages in the
* right order from the chip. This is how we work around that problem:
*
- * The first message goes into mb nr. 0 and issues an interrupt. All
+ * The first message goes into mb nr. 1 and issues an interrupt. All
* rx ints are disabled in the interrupt handler and a napi poll is
* scheduled. We read the mailbox, but do _not_ reenable the mb (to
* receive another message).
*
* lower mbxs upper
- * ______^______ __^__
- * / \ / \
+ * ____^______ __^__
+ * / \ / \
* +-+-+-+-+-+-+-+-++-+-+-+-+
- * |x|x|x|x|x|x|x|x|| | | | |
+ * | |x|x|x|x|x|x|x|| | | | |
* +-+-+-+-+-+-+-+-++-+-+-+-+
* 0 0 0 0 0 0 0 0 0 0 1 1 \ mail
* 0 1 2 3 4 5 6 7 8 9 0 1 / box
+ * ^
+ * |
+ * \
+ * unused, due to chip bug
*
* The variable priv->rx_next points to the next mailbox to read a
* message from. As long we're in the lower mailboxes we just read the
@@ -590,10 +616,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
"order of incoming frames cannot be guaranteed\n");
again:
- for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
- mb < AT91_MB_RX_NUM && quota > 0;
+ for (mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, priv->rx_next);
+ mb < AT91_MB_RX_LAST + 1 && quota > 0;
reg_sr = at91_read(priv, AT91_SR),
- mb = find_next_bit(addr, AT91_MB_RX_NUM, ++priv->rx_next)) {
+ mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, ++priv->rx_next)) {
at91_read_msg(dev, mb);
/* reactivate mailboxes */
@@ -610,8 +636,8 @@ static int at91_poll_rx(struct net_device *dev, int quota)
/* upper group completed, look again in lower */
if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
- quota > 0 && mb >= AT91_MB_RX_NUM) {
- priv->rx_next = 0;
+ quota > 0 && mb > AT91_MB_RX_LAST) {
+ priv->rx_next = AT91_MB_RX_FIRST;
goto again;
}
@@ -1037,6 +1063,64 @@ static const struct net_device_ops at91_netdev_ops = {
.ndo_start_xmit = at91_start_xmit,
};
+static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct at91_priv *priv = netdev_priv(to_net_dev(dev));
+
+ if (priv->mb0_id & CAN_EFF_FLAG)
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id);
+ else
+ return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id);
+}
+
+static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct at91_priv *priv = netdev_priv(ndev);
+ unsigned long can_id;
+ ssize_t ret;
+ int err;
+
+ rtnl_lock();
+
+ if (ndev->flags & IFF_UP) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ err = strict_strtoul(buf, 0, &can_id);
+ if (err) {
+ ret = err;
+ goto out;
+ }
+
+ if (can_id & CAN_EFF_FLAG)
+ can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
+ else
+ can_id &= CAN_SFF_MASK;
+
+ priv->mb0_id = can_id;
+ ret = count;
+
+ out:
+ rtnl_unlock();
+ return ret;
+}
+
+static DEVICE_ATTR(mb0_id, S_IWUSR | S_IRUGO,
+ at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id);
+
+static struct attribute *at91_sysfs_attrs[] = {
+ &dev_attr_mb0_id.attr,
+ NULL,
+};
+
+static struct attribute_group at91_sysfs_attr_group = {
+ .attrs = at91_sysfs_attrs,
+};
+
static int __devinit at91_can_probe(struct platform_device *pdev)
{
struct net_device *dev;
@@ -1082,6 +1166,7 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
dev->netdev_ops = &at91_netdev_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
+ dev->sysfs_groups[0] = &at91_sysfs_attr_group;
priv = netdev_priv(dev);
priv->can.clock.freq = clk_get_rate(clk);
@@ -1093,6 +1178,7 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
priv->dev = dev;
priv->clk = clk;
priv->pdata = pdev->dev.platform_data;
+ priv->mb0_id = 0x7ff;
netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT);
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
new file mode 100644
index 000000000000..ffb9773d102d
--- /dev/null
+++ b/drivers/net/can/c_can/Kconfig
@@ -0,0 +1,15 @@
+menuconfig CAN_C_CAN
+ tristate "Bosch C_CAN devices"
+ depends on CAN_DEV && HAS_IOMEM
+
+if CAN_C_CAN
+
+config CAN_C_CAN_PLATFORM
+ tristate "Generic Platform Bus based C_CAN driver"
+ ---help---
+ This driver adds support for the C_CAN chips connected to
+ the "platform bus" (Linux abstraction for directly to the
+ processor attached devices) which can be found on various
+ boards from ST Microelectronics (http://www.st.com)
+ like the SPEAr1310 and SPEAr320 evaluation boards.
+endif
diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile
new file mode 100644
index 000000000000..9273f6d5c4b7
--- /dev/null
+++ b/drivers/net/can/c_can/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Bosch C_CAN controller drivers.
+#
+
+obj-$(CONFIG_CAN_C_CAN) += c_can.o
+obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
new file mode 100644
index 000000000000..14050786218a
--- /dev/null
+++ b/drivers/net/can/c_can/c_can.c
@@ -0,0 +1,1158 @@
+/*
+ * CAN bus driver for Bosch C_CAN controller
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Bhupesh Sharma <bhupesh.sharma@st.com>
+ *
+ * Borrowed heavily from the C_CAN driver originally written by:
+ * Copyright (C) 2007
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
+ * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
+ *
+ * TX and RX NAPI implementation has been borrowed from at91 CAN driver
+ * written by:
+ * Copyright
+ * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
+ * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
+ * Bosch C_CAN user manual can be obtained from:
+ * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
+ * users_manual_c_can.pdf
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#include "c_can.h"
+
+/* control register */
+#define CONTROL_TEST BIT(7)
+#define CONTROL_CCE BIT(6)
+#define CONTROL_DISABLE_AR BIT(5)
+#define CONTROL_ENABLE_AR (0 << 5)
+#define CONTROL_EIE BIT(3)
+#define CONTROL_SIE BIT(2)
+#define CONTROL_IE BIT(1)
+#define CONTROL_INIT BIT(0)
+
+/* test register */
+#define TEST_RX BIT(7)
+#define TEST_TX1 BIT(6)
+#define TEST_TX2 BIT(5)
+#define TEST_LBACK BIT(4)
+#define TEST_SILENT BIT(3)
+#define TEST_BASIC BIT(2)
+
+/* status register */
+#define STATUS_BOFF BIT(7)
+#define STATUS_EWARN BIT(6)
+#define STATUS_EPASS BIT(5)
+#define STATUS_RXOK BIT(4)
+#define STATUS_TXOK BIT(3)
+
+/* error counter register */
+#define ERR_CNT_TEC_MASK 0xff
+#define ERR_CNT_TEC_SHIFT 0
+#define ERR_CNT_REC_SHIFT 8
+#define ERR_CNT_REC_MASK (0x7f << ERR_CNT_REC_SHIFT)
+#define ERR_CNT_RP_SHIFT 15
+#define ERR_CNT_RP_MASK (0x1 << ERR_CNT_RP_SHIFT)
+
+/* bit-timing register */
+#define BTR_BRP_MASK 0x3f
+#define BTR_BRP_SHIFT 0
+#define BTR_SJW_SHIFT 6
+#define BTR_SJW_MASK (0x3 << BTR_SJW_SHIFT)
+#define BTR_TSEG1_SHIFT 8
+#define BTR_TSEG1_MASK (0xf << BTR_TSEG1_SHIFT)
+#define BTR_TSEG2_SHIFT 12
+#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
+
+/* brp extension register */
+#define BRP_EXT_BRPE_MASK 0x0f
+#define BRP_EXT_BRPE_SHIFT 0
+
+/* IFx command request */
+#define IF_COMR_BUSY BIT(15)
+
+/* IFx command mask */
+#define IF_COMM_WR BIT(7)
+#define IF_COMM_MASK BIT(6)
+#define IF_COMM_ARB BIT(5)
+#define IF_COMM_CONTROL BIT(4)
+#define IF_COMM_CLR_INT_PND BIT(3)
+#define IF_COMM_TXRQST BIT(2)
+#define IF_COMM_DATAA BIT(1)
+#define IF_COMM_DATAB BIT(0)
+#define IF_COMM_ALL (IF_COMM_MASK | IF_COMM_ARB | \
+ IF_COMM_CONTROL | IF_COMM_TXRQST | \
+ IF_COMM_DATAA | IF_COMM_DATAB)
+
+/* IFx arbitration */
+#define IF_ARB_MSGVAL BIT(15)
+#define IF_ARB_MSGXTD BIT(14)
+#define IF_ARB_TRANSMIT BIT(13)
+
+/* IFx message control */
+#define IF_MCONT_NEWDAT BIT(15)
+#define IF_MCONT_MSGLST BIT(14)
+#define IF_MCONT_CLR_MSGLST (0 << 14)
+#define IF_MCONT_INTPND BIT(13)
+#define IF_MCONT_UMASK BIT(12)
+#define IF_MCONT_TXIE BIT(11)
+#define IF_MCONT_RXIE BIT(10)
+#define IF_MCONT_RMTEN BIT(9)
+#define IF_MCONT_TXRQST BIT(8)
+#define IF_MCONT_EOB BIT(7)
+#define IF_MCONT_DLC_MASK 0xf
+
+/*
+ * IFx register masks:
+ * allow easy operation on 16-bit registers when the
+ * argument is 32-bit instead
+ */
+#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF)
+#define IFX_WRITE_HIGH_16BIT(x) (((x) & 0xFFFF0000) >> 16)
+
+/* message object split */
+#define C_CAN_NO_OF_OBJECTS 32
+#define C_CAN_MSG_OBJ_RX_NUM 16
+#define C_CAN_MSG_OBJ_TX_NUM 16
+
+#define C_CAN_MSG_OBJ_RX_FIRST 1
+#define C_CAN_MSG_OBJ_RX_LAST (C_CAN_MSG_OBJ_RX_FIRST + \
+ C_CAN_MSG_OBJ_RX_NUM - 1)
+
+#define C_CAN_MSG_OBJ_TX_FIRST (C_CAN_MSG_OBJ_RX_LAST + 1)
+#define C_CAN_MSG_OBJ_TX_LAST (C_CAN_MSG_OBJ_TX_FIRST + \
+ C_CAN_MSG_OBJ_TX_NUM - 1)
+
+#define C_CAN_MSG_OBJ_RX_SPLIT 9
+#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1)
+
+#define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1)
+#define RECEIVE_OBJECT_BITS 0x0000ffff
+
+/* status interrupt */
+#define STATUS_INTERRUPT 0x8000
+
+/* global interrupt masks */
+#define ENABLE_ALL_INTERRUPTS 1
+#define DISABLE_ALL_INTERRUPTS 0
+
+/* minimum timeout for checking BUSY status */
+#define MIN_TIMEOUT_VALUE 6
+
+/* napi related */
+#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM
+
+/* c_can lec values */
+enum c_can_lec_type {
+ LEC_NO_ERROR = 0,
+ LEC_STUFF_ERROR,
+ LEC_FORM_ERROR,
+ LEC_ACK_ERROR,
+ LEC_BIT1_ERROR,
+ LEC_BIT0_ERROR,
+ LEC_CRC_ERROR,
+ LEC_UNUSED,
+};
+
+/*
+ * c_can error types:
+ * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported
+ */
+enum c_can_bus_error_types {
+ C_CAN_NO_ERROR = 0,
+ C_CAN_BUS_OFF,
+ C_CAN_ERROR_WARNING,
+ C_CAN_ERROR_PASSIVE,
+};
+
+static struct can_bittiming_const c_can_bittiming_const = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
+ .tseg1_max = 16,
+ .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024, /* 6-bit BRP field + 4-bit BRPE field*/
+ .brp_inc = 1,
+};
+
+static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
+{
+ return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
+ C_CAN_MSG_OBJ_TX_FIRST;
+}
+
+static inline int get_tx_echo_msg_obj(const struct c_can_priv *priv)
+{
+ return (priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) +
+ C_CAN_MSG_OBJ_TX_FIRST;
+}
+
+static u32 c_can_read_reg32(struct c_can_priv *priv, void *reg)
+{
+ u32 val = priv->read_reg(priv, reg);
+ val |= ((u32) priv->read_reg(priv, reg + 2)) << 16;
+ return val;
+}
+
+static void c_can_enable_all_interrupts(struct c_can_priv *priv,
+ int enable)
+{
+ unsigned int cntrl_save = priv->read_reg(priv,
+ &priv->regs->control);
+
+ if (enable)
+ cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE);
+ else
+ cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
+
+ priv->write_reg(priv, &priv->regs->control, cntrl_save);
+}
+
+static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
+{
+ int count = MIN_TIMEOUT_VALUE;
+
+ while (count && priv->read_reg(priv,
+ &priv->regs->ifregs[iface].com_req) &
+ IF_COMR_BUSY) {
+ count--;
+ udelay(1);
+ }
+
+ if (!count)
+ return 1;
+
+ return 0;
+}
+
+static inline void c_can_object_get(struct net_device *dev,
+ int iface, int objno, int mask)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ /*
+ * As per specs, after writting the message object number in the
+ * IF command request register the transfer b/w interface
+ * register and message RAM must be complete in 6 CAN-CLK
+ * period.
+ */
+ priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
+ IFX_WRITE_LOW_16BIT(mask));
+ priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
+ IFX_WRITE_LOW_16BIT(objno));
+
+ if (c_can_msg_obj_is_busy(priv, iface))
+ netdev_err(dev, "timed out in object get\n");
+}
+
+static inline void c_can_object_put(struct net_device *dev,
+ int iface, int objno, int mask)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ /*
+ * As per specs, after writting the message object number in the
+ * IF command request register the transfer b/w interface
+ * register and message RAM must be complete in 6 CAN-CLK
+ * period.
+ */
+ priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
+ (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
+ priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
+ IFX_WRITE_LOW_16BIT(objno));
+
+ if (c_can_msg_obj_is_busy(priv, iface))
+ netdev_err(dev, "timed out in object put\n");
+}
+
+static void c_can_write_msg_object(struct net_device *dev,
+ int iface, struct can_frame *frame, int objno)
+{
+ int i;
+ u16 flags = 0;
+ unsigned int id;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ if (!(frame->can_id & CAN_RTR_FLAG))
+ flags |= IF_ARB_TRANSMIT;
+
+ if (frame->can_id & CAN_EFF_FLAG) {
+ id = frame->can_id & CAN_EFF_MASK;
+ flags |= IF_ARB_MSGXTD;
+ } else
+ id = ((frame->can_id & CAN_SFF_MASK) << 18);
+
+ flags |= IF_ARB_MSGVAL;
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
+ IFX_WRITE_LOW_16BIT(id));
+ priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, flags |
+ IFX_WRITE_HIGH_16BIT(id));
+
+ for (i = 0; i < frame->can_dlc; i += 2) {
+ priv->write_reg(priv, &priv->regs->ifregs[iface].data[i / 2],
+ frame->data[i] | (frame->data[i + 1] << 8));
+ }
+
+ /* enable interrupt for this message object */
+ priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
+ frame->can_dlc);
+ c_can_object_put(dev, iface, objno, IF_COMM_ALL);
+}
+
+static inline void c_can_mark_rx_msg_obj(struct net_device *dev,
+ int iface, int ctrl_mask,
+ int obj)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ ctrl_mask & ~(IF_MCONT_MSGLST | IF_MCONT_INTPND));
+ c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
+
+}
+
+static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
+ int iface,
+ int ctrl_mask)
+{
+ int i;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) {
+ priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ ctrl_mask & ~(IF_MCONT_MSGLST |
+ IF_MCONT_INTPND | IF_MCONT_NEWDAT));
+ c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
+ }
+}
+
+static inline void c_can_activate_rx_msg_obj(struct net_device *dev,
+ int iface, int ctrl_mask,
+ int obj)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ ctrl_mask & ~(IF_MCONT_MSGLST |
+ IF_MCONT_INTPND | IF_MCONT_NEWDAT));
+ c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
+}
+
+static void c_can_handle_lost_msg_obj(struct net_device *dev,
+ int iface, int objno)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *skb;
+ struct can_frame *frame;
+
+ netdev_err(dev, "msg lost in buffer %d\n", objno);
+
+ c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ IF_MCONT_CLR_MSGLST);
+
+ c_can_object_put(dev, 0, objno, IF_COMM_CONTROL);
+
+ /* create an error msg */
+ skb = alloc_can_err_skb(dev, &frame);
+ if (unlikely(!skb))
+ return;
+
+ frame->can_id |= CAN_ERR_CRTL;
+ frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_errors++;
+ stats->rx_over_errors++;
+
+ netif_receive_skb(skb);
+}
+
+static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
+{
+ u16 flags, data;
+ int i;
+ unsigned int val;
+ struct c_can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *skb;
+ struct can_frame *frame;
+
+ skb = alloc_can_skb(dev, &frame);
+ if (!skb) {
+ stats->rx_dropped++;
+ return -ENOMEM;
+ }
+
+ frame->can_dlc = get_can_dlc(ctrl & 0x0F);
+
+ flags = priv->read_reg(priv, &priv->regs->ifregs[iface].arb2);
+ val = priv->read_reg(priv, &priv->regs->ifregs[iface].arb1) |
+ (flags << 16);
+
+ if (flags & IF_ARB_MSGXTD)
+ frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ else
+ frame->can_id = (val >> 18) & CAN_SFF_MASK;
+
+ if (flags & IF_ARB_TRANSMIT)
+ frame->can_id |= CAN_RTR_FLAG;
+ else {
+ for (i = 0; i < frame->can_dlc; i += 2) {
+ data = priv->read_reg(priv,
+ &priv->regs->ifregs[iface].data[i / 2]);
+ frame->data[i] = data;
+ frame->data[i + 1] = data >> 8;
+ }
+ }
+
+ netif_receive_skb(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += frame->can_dlc;
+
+ return 0;
+}
+
+static void c_can_setup_receive_object(struct net_device *dev, int iface,
+ int objno, unsigned int mask,
+ unsigned int id, unsigned int mcont)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].mask1,
+ IFX_WRITE_LOW_16BIT(mask));
+ priv->write_reg(priv, &priv->regs->ifregs[iface].mask2,
+ IFX_WRITE_HIGH_16BIT(mask));
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
+ IFX_WRITE_LOW_16BIT(id));
+ priv->write_reg(priv, &priv->regs->ifregs[iface].arb2,
+ (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, mcont);
+ c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
+
+ netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
+ c_can_read_reg32(priv, &priv->regs->msgval1));
+}
+
+static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, 0);
+ priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, 0);
+ priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 0);
+
+ c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
+
+ netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
+ c_can_read_reg32(priv, &priv->regs->msgval1));
+}
+
+static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
+{
+ int val = c_can_read_reg32(priv, &priv->regs->txrqst1);
+
+ /*
+ * as transmission request register's bit n-1 corresponds to
+ * message object n, we need to handle the same properly.
+ */
+ if (val & (1 << (objno - 1)))
+ return 1;
+
+ return 0;
+}
+
+static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ u32 msg_obj_no;
+ struct c_can_priv *priv = netdev_priv(dev);
+ struct can_frame *frame = (struct can_frame *)skb->data;
+
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ msg_obj_no = get_tx_next_msg_obj(priv);
+
+ /* prepare message object for transmission */
+ c_can_write_msg_object(dev, 0, frame, msg_obj_no);
+ can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
+
+ /*
+ * we have to stop the queue in case of a wrap around or
+ * if the next TX message object is still in use
+ */
+ priv->tx_next++;
+ if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
+ (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
+ netif_stop_queue(dev);
+
+ return NETDEV_TX_OK;
+}
+
+static int c_can_set_bittiming(struct net_device *dev)
+{
+ unsigned int reg_btr, reg_brpe, ctrl_save;
+ u8 brp, brpe, sjw, tseg1, tseg2;
+ u32 ten_bit_brp;
+ struct c_can_priv *priv = netdev_priv(dev);
+ const struct can_bittiming *bt = &priv->can.bittiming;
+
+ /* c_can provides a 6-bit brp and 4-bit brpe fields */
+ ten_bit_brp = bt->brp - 1;
+ brp = ten_bit_brp & BTR_BRP_MASK;
+ brpe = ten_bit_brp >> 6;
+
+ sjw = bt->sjw - 1;
+ tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
+ tseg2 = bt->phase_seg2 - 1;
+ reg_btr = brp | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) |
+ (tseg2 << BTR_TSEG2_SHIFT);
+ reg_brpe = brpe & BRP_EXT_BRPE_MASK;
+
+ netdev_info(dev,
+ "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
+
+ ctrl_save = priv->read_reg(priv, &priv->regs->control);
+ priv->write_reg(priv, &priv->regs->control,
+ ctrl_save | CONTROL_CCE | CONTROL_INIT);
+ priv->write_reg(priv, &priv->regs->btr, reg_btr);
+ priv->write_reg(priv, &priv->regs->brp_ext, reg_brpe);
+ priv->write_reg(priv, &priv->regs->control, ctrl_save);
+
+ return 0;
+}
+
+/*
+ * Configure C_CAN message objects for Tx and Rx purposes:
+ * C_CAN provides a total of 32 message objects that can be configured
+ * either for Tx or Rx purposes. Here the first 16 message objects are used as
+ * a reception FIFO. The end of reception FIFO is signified by the EoB bit
+ * being SET. The remaining 16 message objects are kept aside for Tx purposes.
+ * See user guide document for further details on configuring message
+ * objects.
+ */
+static void c_can_configure_msg_objects(struct net_device *dev)
+{
+ int i;
+
+ /* first invalidate all message objects */
+ for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
+ c_can_inval_msg_object(dev, 0, i);
+
+ /* setup receive message objects */
+ for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
+ c_can_setup_receive_object(dev, 0, i, 0, 0,
+ (IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB);
+
+ c_can_setup_receive_object(dev, 0, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
+ IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK);
+}
+
+/*
+ * Configure C_CAN chip:
+ * - enable/disable auto-retransmission
+ * - set operating mode
+ * - configure message objects
+ */
+static void c_can_chip_config(struct net_device *dev)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ /* disable automatic retransmission */
+ priv->write_reg(priv, &priv->regs->control,
+ CONTROL_DISABLE_AR);
+ else
+ /* enable automatic retransmission */
+ priv->write_reg(priv, &priv->regs->control,
+ CONTROL_ENABLE_AR);
+
+ if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
+ CAN_CTRLMODE_LOOPBACK)) {
+ /* loopback + silent mode : useful for hot self-test */
+ priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+ CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+ priv->write_reg(priv, &priv->regs->test,
+ TEST_LBACK | TEST_SILENT);
+ } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+ /* loopback mode : useful for self-test function */
+ priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+ CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+ priv->write_reg(priv, &priv->regs->test, TEST_LBACK);
+ } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+ /* silent mode : bus-monitoring mode */
+ priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+ CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+ priv->write_reg(priv, &priv->regs->test, TEST_SILENT);
+ } else
+ /* normal mode*/
+ priv->write_reg(priv, &priv->regs->control,
+ CONTROL_EIE | CONTROL_SIE | CONTROL_IE);
+
+ /* configure message objects */
+ c_can_configure_msg_objects(dev);
+
+ /* set a `lec` value so that we can check for updates later */
+ priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
+
+ /* set bittiming params */
+ c_can_set_bittiming(dev);
+}
+
+static void c_can_start(struct net_device *dev)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ /* enable status change, error and module interrupts */
+ c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
+
+ /* basic c_can configuration */
+ c_can_chip_config(dev);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* reset tx helper pointers */
+ priv->tx_next = priv->tx_echo = 0;
+}
+
+static void c_can_stop(struct net_device *dev)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ /* disable all interrupts */
+ c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+
+ /* set the state as STOPPED */
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
+{
+ switch (mode) {
+ case CAN_MODE_START:
+ c_can_start(dev);
+ netif_wake_queue(dev);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int c_can_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ unsigned int reg_err_counter;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
+ bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
+ ERR_CNT_REC_SHIFT;
+ bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
+
+ return 0;
+}
+
+/*
+ * theory of operation:
+ *
+ * priv->tx_echo holds the number of the oldest can_frame put for
+ * transmission into the hardware, but not yet ACKed by the CAN tx
+ * complete IRQ.
+ *
+ * We iterate from priv->tx_echo to priv->tx_next and check if the
+ * packet has been transmitted, echo it back to the CAN framework.
+ * If we discover a not yet transmitted package, stop looking for more.
+ */
+static void c_can_do_tx(struct net_device *dev)
+{
+ u32 val;
+ u32 msg_obj_no;
+ struct c_can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+
+ for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
+ msg_obj_no = get_tx_echo_msg_obj(priv);
+ c_can_inval_msg_object(dev, 0, msg_obj_no);
+ val = c_can_read_reg32(priv, &priv->regs->txrqst1);
+ if (!(val & (1 << msg_obj_no))) {
+ can_get_echo_skb(dev,
+ msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
+ stats->tx_bytes += priv->read_reg(priv,
+ &priv->regs->ifregs[0].msg_cntrl)
+ & IF_MCONT_DLC_MASK;
+ stats->tx_packets++;
+ }
+ }
+
+ /* restart queue if wrap-up or if queue stalled on last pkt */
+ if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
+ ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
+ netif_wake_queue(dev);
+}
+
+/*
+ * theory of operation:
+ *
+ * c_can core saves a received CAN message into the first free message
+ * object it finds free (starting with the lowest). Bits NEWDAT and
+ * INTPND are set for this message object indicating that a new message
+ * has arrived. To work-around this issue, we keep two groups of message
+ * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
+ *
+ * To ensure in-order frame reception we use the following
+ * approach while re-activating a message object to receive further
+ * frames:
+ * - if the current message object number is lower than
+ * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
+ * the INTPND bit.
+ * - if the current message object number is equal to
+ * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
+ * receive message objects.
+ * - if the current message object number is greater than
+ * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
+ * only this message object.
+ */
+static int c_can_do_rx_poll(struct net_device *dev, int quota)
+{
+ u32 num_rx_pkts = 0;
+ unsigned int msg_obj, msg_ctrl_save;
+ struct c_can_priv *priv = netdev_priv(dev);
+ u32 val = c_can_read_reg32(priv, &priv->regs->intpnd1);
+
+ for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST;
+ msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0;
+ val = c_can_read_reg32(priv, &priv->regs->intpnd1),
+ msg_obj++) {
+ /*
+ * as interrupt pending register's bit n-1 corresponds to
+ * message object n, we need to handle the same properly.
+ */
+ if (val & (1 << (msg_obj - 1))) {
+ c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL &
+ ~IF_COMM_TXRQST);
+ msg_ctrl_save = priv->read_reg(priv,
+ &priv->regs->ifregs[0].msg_cntrl);
+
+ if (msg_ctrl_save & IF_MCONT_EOB)
+ return num_rx_pkts;
+
+ if (msg_ctrl_save & IF_MCONT_MSGLST) {
+ c_can_handle_lost_msg_obj(dev, 0, msg_obj);
+ num_rx_pkts++;
+ quota--;
+ continue;
+ }
+
+ if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
+ continue;
+
+ /* read the data from the message object */
+ c_can_read_msg_object(dev, 0, msg_ctrl_save);
+
+ if (msg_obj < C_CAN_MSG_RX_LOW_LAST)
+ c_can_mark_rx_msg_obj(dev, 0,
+ msg_ctrl_save, msg_obj);
+ else if (msg_obj > C_CAN_MSG_RX_LOW_LAST)
+ /* activate this msg obj */
+ c_can_activate_rx_msg_obj(dev, 0,
+ msg_ctrl_save, msg_obj);
+ else if (msg_obj == C_CAN_MSG_RX_LOW_LAST)
+ /* activate all lower message objects */
+ c_can_activate_all_lower_rx_msg_obj(dev,
+ 0, msg_ctrl_save);
+
+ num_rx_pkts++;
+ quota--;
+ }
+ }
+
+ return num_rx_pkts;
+}
+
+static inline int c_can_has_and_handle_berr(struct c_can_priv *priv)
+{
+ return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
+ (priv->current_status & LEC_UNUSED);
+}
+
+static int c_can_handle_state_change(struct net_device *dev,
+ enum c_can_bus_error_types error_type)
+{
+ unsigned int reg_err_counter;
+ unsigned int rx_err_passive;
+ struct c_can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct can_berr_counter bec;
+
+ /* propogate the error condition to the CAN stack */
+ skb = alloc_can_err_skb(dev, &cf);
+ if (unlikely(!skb))
+ return 0;
+
+ c_can_get_berr_counter(dev, &bec);
+ reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
+ rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
+ ERR_CNT_RP_SHIFT;
+
+ switch (error_type) {
+ case C_CAN_ERROR_WARNING:
+ /* error warning state */
+ priv->can.can_stats.error_warning++;
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = (bec.txerr > bec.rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+
+ break;
+ case C_CAN_ERROR_PASSIVE:
+ /* error passive state */
+ priv->can.can_stats.error_passive++;
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ cf->can_id |= CAN_ERR_CRTL;
+ if (rx_err_passive)
+ cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+ if (bec.txerr > 127)
+ cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
+
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ break;
+ case C_CAN_BUS_OFF:
+ /* bus-off state */
+ priv->can.state = CAN_STATE_BUS_OFF;
+ cf->can_id |= CAN_ERR_BUSOFF;
+ /*
+ * disable all interrupts in bus-off mode to ensure that
+ * the CPU is not hogged down
+ */
+ c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+ can_bus_off(dev);
+ break;
+ default:
+ break;
+ }
+
+ netif_receive_skb(skb);
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ return 1;
+}
+
+static int c_can_handle_bus_err(struct net_device *dev,
+ enum c_can_lec_type lec_type)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /*
+ * early exit if no lec update or no error.
+ * no lec update means that no CAN bus event has been detected
+ * since CPU wrote 0x7 value to status reg.
+ */
+ if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
+ return 0;
+
+ /* propogate the error condition to the CAN stack */
+ skb = alloc_can_err_skb(dev, &cf);
+ if (unlikely(!skb))
+ return 0;
+
+ /*
+ * check for 'last error code' which tells us the
+ * type of the last error to occur on the CAN bus
+ */
+
+ /* common for all type of bus errors */
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+
+ switch (lec_type) {
+ case LEC_STUFF_ERROR:
+ netdev_dbg(dev, "stuff error\n");
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ case LEC_FORM_ERROR:
+ netdev_dbg(dev, "form error\n");
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case LEC_ACK_ERROR:
+ netdev_dbg(dev, "ack error\n");
+ cf->data[2] |= (CAN_ERR_PROT_LOC_ACK |
+ CAN_ERR_PROT_LOC_ACK_DEL);
+ break;
+ case LEC_BIT1_ERROR:
+ netdev_dbg(dev, "bit1 error\n");
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ break;
+ case LEC_BIT0_ERROR:
+ netdev_dbg(dev, "bit0 error\n");
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ break;
+ case LEC_CRC_ERROR:
+ netdev_dbg(dev, "CRC error\n");
+ cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL);
+ break;
+ default:
+ break;
+ }
+
+ /* set a `lec` value so that we can check for updates later */
+ priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
+
+ netif_receive_skb(skb);
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ return 1;
+}
+
+static int c_can_poll(struct napi_struct *napi, int quota)
+{
+ u16 irqstatus;
+ int lec_type = 0;
+ int work_done = 0;
+ struct net_device *dev = napi->dev;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+ if (!irqstatus)
+ goto end;
+
+ /* status events have the highest priority */
+ if (irqstatus == STATUS_INTERRUPT) {
+ priv->current_status = priv->read_reg(priv,
+ &priv->regs->status);
+
+ /* handle Tx/Rx events */
+ if (priv->current_status & STATUS_TXOK)
+ priv->write_reg(priv, &priv->regs->status,
+ priv->current_status & ~STATUS_TXOK);
+
+ if (priv->current_status & STATUS_RXOK)
+ priv->write_reg(priv, &priv->regs->status,
+ priv->current_status & ~STATUS_RXOK);
+
+ /* handle state changes */
+ if ((priv->current_status & STATUS_EWARN) &&
+ (!(priv->last_status & STATUS_EWARN))) {
+ netdev_dbg(dev, "entered error warning state\n");
+ work_done += c_can_handle_state_change(dev,
+ C_CAN_ERROR_WARNING);
+ }
+ if ((priv->current_status & STATUS_EPASS) &&
+ (!(priv->last_status & STATUS_EPASS))) {
+ netdev_dbg(dev, "entered error passive state\n");
+ work_done += c_can_handle_state_change(dev,
+ C_CAN_ERROR_PASSIVE);
+ }
+ if ((priv->current_status & STATUS_BOFF) &&
+ (!(priv->last_status & STATUS_BOFF))) {
+ netdev_dbg(dev, "entered bus off state\n");
+ work_done += c_can_handle_state_change(dev,
+ C_CAN_BUS_OFF);
+ }
+
+ /* handle bus recovery events */
+ if ((!(priv->current_status & STATUS_BOFF)) &&
+ (priv->last_status & STATUS_BOFF)) {
+ netdev_dbg(dev, "left bus off state\n");
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ }
+ if ((!(priv->current_status & STATUS_EPASS)) &&
+ (priv->last_status & STATUS_EPASS)) {
+ netdev_dbg(dev, "left error passive state\n");
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ }
+
+ priv->last_status = priv->current_status;
+
+ /* handle lec errors on the bus */
+ lec_type = c_can_has_and_handle_berr(priv);
+ if (lec_type)
+ work_done += c_can_handle_bus_err(dev, lec_type);
+ } else if ((irqstatus >= C_CAN_MSG_OBJ_RX_FIRST) &&
+ (irqstatus <= C_CAN_MSG_OBJ_RX_LAST)) {
+ /* handle events corresponding to receive message objects */
+ work_done += c_can_do_rx_poll(dev, (quota - work_done));
+ } else if ((irqstatus >= C_CAN_MSG_OBJ_TX_FIRST) &&
+ (irqstatus <= C_CAN_MSG_OBJ_TX_LAST)) {
+ /* handle events corresponding to transmit message objects */
+ c_can_do_tx(dev);
+ }
+
+end:
+ if (work_done < quota) {
+ napi_complete(napi);
+ /* enable all IRQs */
+ c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
+ }
+
+ return work_done;
+}
+
+static irqreturn_t c_can_isr(int irq, void *dev_id)
+{
+ u16 irqstatus;
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+ if (!irqstatus)
+ return IRQ_NONE;
+
+ /* disable all interrupts and schedule the NAPI */
+ c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+ napi_schedule(&priv->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int c_can_open(struct net_device *dev)
+{
+ int err;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ /* open the can device */
+ err = open_candev(dev);
+ if (err) {
+ netdev_err(dev, "failed to open can device\n");
+ return err;
+ }
+
+ /* register interrupt handler */
+ err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name,
+ dev);
+ if (err < 0) {
+ netdev_err(dev, "failed to request interrupt\n");
+ goto exit_irq_fail;
+ }
+
+ /* start the c_can controller */
+ c_can_start(dev);
+
+ napi_enable(&priv->napi);
+ netif_start_queue(dev);
+
+ return 0;
+
+exit_irq_fail:
+ close_candev(dev);
+ return err;
+}
+
+static int c_can_close(struct net_device *dev)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ napi_disable(&priv->napi);
+ c_can_stop(dev);
+ free_irq(dev->irq, dev);
+ close_candev(dev);
+
+ return 0;
+}
+
+struct net_device *alloc_c_can_dev(void)
+{
+ struct net_device *dev;
+ struct c_can_priv *priv;
+
+ dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM);
+ if (!dev)
+ return NULL;
+
+ priv = netdev_priv(dev);
+ netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
+
+ priv->dev = dev;
+ priv->can.bittiming_const = &c_can_bittiming_const;
+ priv->can.do_set_mode = c_can_set_mode;
+ priv->can.do_get_berr_counter = c_can_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_ONE_SHOT |
+ CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_BERR_REPORTING;
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(alloc_c_can_dev);
+
+void free_c_can_dev(struct net_device *dev)
+{
+ free_candev(dev);
+}
+EXPORT_SYMBOL_GPL(free_c_can_dev);
+
+static const struct net_device_ops c_can_netdev_ops = {
+ .ndo_open = c_can_open,
+ .ndo_stop = c_can_close,
+ .ndo_start_xmit = c_can_start_xmit,
+};
+
+int register_c_can_dev(struct net_device *dev)
+{
+ dev->flags |= IFF_ECHO; /* we support local echo */
+ dev->netdev_ops = &c_can_netdev_ops;
+
+ return register_candev(dev);
+}
+EXPORT_SYMBOL_GPL(register_c_can_dev);
+
+void unregister_c_can_dev(struct net_device *dev)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ /* disable all interrupts */
+ c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+
+ unregister_candev(dev);
+}
+EXPORT_SYMBOL_GPL(unregister_c_can_dev);
+
+MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CAN bus driver for Bosch C_CAN controller");
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
new file mode 100644
index 000000000000..9b7fbef3d09a
--- /dev/null
+++ b/drivers/net/can/c_can/c_can.h
@@ -0,0 +1,86 @@
+/*
+ * CAN bus driver for Bosch C_CAN controller
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Bhupesh Sharma <bhupesh.sharma@st.com>
+ *
+ * Borrowed heavily from the C_CAN driver originally written by:
+ * Copyright (C) 2007
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
+ * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
+ *
+ * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
+ * Bosch C_CAN user manual can be obtained from:
+ * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
+ * users_manual_c_can.pdf
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef C_CAN_H
+#define C_CAN_H
+
+/* c_can IF registers */
+struct c_can_if_regs {
+ u16 com_req;
+ u16 com_mask;
+ u16 mask1;
+ u16 mask2;
+ u16 arb1;
+ u16 arb2;
+ u16 msg_cntrl;
+ u16 data[4];
+ u16 _reserved[13];
+};
+
+/* c_can hardware registers */
+struct c_can_regs {
+ u16 control;
+ u16 status;
+ u16 err_cnt;
+ u16 btr;
+ u16 interrupt;
+ u16 test;
+ u16 brp_ext;
+ u16 _reserved1;
+ struct c_can_if_regs ifregs[2]; /* [0] = IF1 and [1] = IF2 */
+ u16 _reserved2[8];
+ u16 txrqst1;
+ u16 txrqst2;
+ u16 _reserved3[6];
+ u16 newdat1;
+ u16 newdat2;
+ u16 _reserved4[6];
+ u16 intpnd1;
+ u16 intpnd2;
+ u16 _reserved5[6];
+ u16 msgval1;
+ u16 msgval2;
+ u16 _reserved6[6];
+};
+
+/* c_can private data structure */
+struct c_can_priv {
+ struct can_priv can; /* must be the first member */
+ struct napi_struct napi;
+ struct net_device *dev;
+ int tx_object;
+ int current_status;
+ int last_status;
+ u16 (*read_reg) (struct c_can_priv *priv, void *reg);
+ void (*write_reg) (struct c_can_priv *priv, void *reg, u16 val);
+ struct c_can_regs __iomem *regs;
+ unsigned long irq_flags; /* for request_irq() */
+ unsigned int tx_next;
+ unsigned int tx_echo;
+ void *priv; /* for board-specific data */
+};
+
+struct net_device *alloc_c_can_dev(void);
+void free_c_can_dev(struct net_device *dev);
+int register_c_can_dev(struct net_device *dev);
+void unregister_c_can_dev(struct net_device *dev);
+
+#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
new file mode 100644
index 000000000000..e629b961ae2d
--- /dev/null
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -0,0 +1,215 @@
+/*
+ * Platform CAN bus driver for Bosch C_CAN controller
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Bhupesh Sharma <bhupesh.sharma@st.com>
+ *
+ * Borrowed heavily from the C_CAN driver originally written by:
+ * Copyright (C) 2007
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
+ * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
+ *
+ * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
+ * Bosch C_CAN user manual can be obtained from:
+ * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
+ * users_manual_c_can.pdf
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#include <linux/can/dev.h>
+
+#include "c_can.h"
+
+/*
+ * 16-bit c_can registers can be arranged differently in the memory
+ * architecture of different implementations. For example: 16-bit
+ * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
+ * Handle the same by providing a common read/write interface.
+ */
+static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv,
+ void *reg)
+{
+ return readw(reg);
+}
+
+static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv,
+ void *reg, u16 val)
+{
+ writew(val, reg);
+}
+
+static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv,
+ void *reg)
+{
+ return readw(reg + (long)reg - (long)priv->regs);
+}
+
+static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
+ void *reg, u16 val)
+{
+ writew(val, reg + (long)reg - (long)priv->regs);
+}
+
+static int __devinit c_can_plat_probe(struct platform_device *pdev)
+{
+ int ret;
+ void __iomem *addr;
+ struct net_device *dev;
+ struct c_can_priv *priv;
+ struct resource *mem, *irq;
+#ifdef CONFIG_HAVE_CLK
+ struct clk *clk;
+
+ /* get the appropriate clk */
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "no clock defined\n");
+ ret = -ENODEV;
+ goto exit;
+ }
+#endif
+
+ /* get the platform data */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!mem || (irq <= 0)) {
+ ret = -ENODEV;
+ goto exit_free_clk;
+ }
+
+ if (!request_mem_region(mem->start, resource_size(mem),
+ KBUILD_MODNAME)) {
+ dev_err(&pdev->dev, "resource unavailable\n");
+ ret = -ENODEV;
+ goto exit_free_clk;
+ }
+
+ addr = ioremap(mem->start, resource_size(mem));
+ if (!addr) {
+ dev_err(&pdev->dev, "failed to map can port\n");
+ ret = -ENOMEM;
+ goto exit_release_mem;
+ }
+
+ /* allocate the c_can device */
+ dev = alloc_c_can_dev();
+ if (!dev) {
+ ret = -ENOMEM;
+ goto exit_iounmap;
+ }
+
+ priv = netdev_priv(dev);
+
+ dev->irq = irq->start;
+ priv->regs = addr;
+#ifdef CONFIG_HAVE_CLK
+ priv->can.clock.freq = clk_get_rate(clk);
+ priv->priv = clk;
+#endif
+
+ switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
+ case IORESOURCE_MEM_32BIT:
+ priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
+ priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
+ break;
+ case IORESOURCE_MEM_16BIT:
+ default:
+ priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
+ priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+ break;
+ }
+
+ platform_set_drvdata(pdev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ ret = register_c_can_dev(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
+ KBUILD_MODNAME, ret);
+ goto exit_free_device;
+ }
+
+ dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
+ KBUILD_MODNAME, priv->regs, dev->irq);
+ return 0;
+
+exit_free_device:
+ platform_set_drvdata(pdev, NULL);
+ free_c_can_dev(dev);
+exit_iounmap:
+ iounmap(addr);
+exit_release_mem:
+ release_mem_region(mem->start, resource_size(mem));
+exit_free_clk:
+#ifdef CONFIG_HAVE_CLK
+ clk_put(clk);
+exit:
+#endif
+ dev_err(&pdev->dev, "probe failed\n");
+
+ return ret;
+}
+
+static int __devexit c_can_plat_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct c_can_priv *priv = netdev_priv(dev);
+ struct resource *mem;
+
+ unregister_c_can_dev(dev);
+ platform_set_drvdata(pdev, NULL);
+
+ free_c_can_dev(dev);
+ iounmap(priv->regs);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+
+#ifdef CONFIG_HAVE_CLK
+ clk_put(priv->priv);
+#endif
+
+ return 0;
+}
+
+static struct platform_driver c_can_plat_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = c_can_plat_probe,
+ .remove = __devexit_p(c_can_plat_remove),
+};
+
+static int __init c_can_plat_init(void)
+{
+ return platform_driver_register(&c_can_plat_driver);
+}
+module_init(c_can_plat_init);
+
+static void __exit c_can_plat_exit(void)
+{
+ platform_driver_unregister(&c_can_plat_driver);
+}
+module_exit(c_can_plat_exit);
+
+MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Platform CAN bus driver for Bosch C_CAN controller");
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 6e533dcc36c0..366f5cc050ae 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1114,11 +1114,6 @@ static bool ican3_txok(struct ican3_dev *mod)
/*
* Recieve one CAN frame from the hardware
*
- * This works like the core of a NAPI function, but is intended to be called
- * from workqueue context instead. This driver already needs a workqueue to
- * process control messages, so we use the workqueue instead of using NAPI.
- * This was done to simplify locking.
- *
* CONTEXT: must be called from user context
*/
static int ican3_recv_skb(struct ican3_dev *mod)
@@ -1251,7 +1246,6 @@ static irqreturn_t ican3_irq(int irq, void *dev_id)
* Reset an ICAN module to its power-on state
*
* CONTEXT: no network device registered
- * LOCKING: work function disabled
*/
static int ican3_reset_module(struct ican3_dev *mod)
{
@@ -1262,9 +1256,6 @@ static int ican3_reset_module(struct ican3_dev *mod)
/* disable interrupts so no more work is scheduled */
iowrite8(1 << mod->num, &mod->ctrl->int_disable);
- /* flush any pending work */
- flush_scheduled_work();
-
/* the first unallocated page in the DPM is #9 */
mod->free_page = DPM_FREE_START;
@@ -1627,7 +1618,7 @@ static ssize_t ican3_sysfs_set_term(struct device *dev,
return count;
}
-static DEVICE_ATTR(termination, S_IWUGO | S_IRUGO, ican3_sysfs_show_term,
+static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term,
ican3_sysfs_set_term);
static struct attribute *ican3_sysfs_attrs[] = {
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 7ab534aee452..7513c4523ac4 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -940,7 +940,7 @@ static int mcp251x_open(struct net_device *net)
goto open_unlock;
}
- priv->wq = create_freezeable_workqueue("mcp251x_wq");
+ priv->wq = create_freezable_workqueue("mcp251x_wq");
INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index 27d1d398e25e..d38706958af6 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -1,5 +1,5 @@
config CAN_MSCAN
- depends on CAN_DEV && (PPC || M68K || M68KNOMMU)
+ depends on CAN_DEV && (PPC || M68K)
tristate "Support for Freescale MSCAN based chips"
---help---
The Motorola Scalable Controller Area Network (MSCAN) definition
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 312b9c8f4f3b..c0a1bc5b1435 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -247,10 +247,9 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
}
#endif /* CONFIG_PPC_MPC512x */
-static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev,
- const struct of_device_id *id)
+static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
{
- struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data;
+ struct mpc5xxx_can_data *data;
struct device_node *np = ofdev->dev.of_node;
struct net_device *dev;
struct mscan_priv *priv;
@@ -259,6 +258,10 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev,
int irq, mscan_clksrc = 0;
int err = -ENOMEM;
+ if (!ofdev->dev.of_match)
+ return -EINVAL;
+ data = (struct mpc5xxx_can_data *)of_dev->dev.of_match->data;
+
base = of_iomap(np, 0);
if (!base) {
dev_err(&ofdev->dev, "couldn't ioremap\n");
@@ -391,7 +394,7 @@ static struct of_device_id __devinitdata mpc5xxx_can_table[] = {
{},
};
-static struct of_platform_driver mpc5xxx_can_driver = {
+static struct platform_driver mpc5xxx_can_driver = {
.driver = {
.name = "mpc5xxx_can",
.owner = THIS_MODULE,
@@ -407,13 +410,13 @@ static struct of_platform_driver mpc5xxx_can_driver = {
static int __init mpc5xxx_can_init(void)
{
- return of_register_platform_driver(&mpc5xxx_can_driver);
+ return platform_driver_register(&mpc5xxx_can_driver);
}
module_init(mpc5xxx_can_init);
static void __exit mpc5xxx_can_exit(void)
{
- return of_unregister_platform_driver(&mpc5xxx_can_driver);
+ platform_driver_unregister(&mpc5xxx_can_driver);
};
module_exit(mpc5xxx_can_exit);
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 64c378cd0c34..74cd880c7e06 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -182,7 +182,7 @@ static int mscan_restart(struct net_device *dev)
priv->can.state = CAN_STATE_ERROR_ACTIVE;
WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
- "bus-off state expected");
+ "bus-off state expected\n");
out_8(&regs->canmisc, MSCAN_BOHOLD);
/* Re-enable receive interrupts. */
out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 672718261c68..e54712b22c27 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -32,106 +32,115 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#define MAX_MSG_OBJ 32
-#define MSG_OBJ_RX 0 /* The receive message object flag. */
-#define MSG_OBJ_TX 1 /* The transmit message object flag. */
-
-#define ENABLE 1 /* The enable flag */
-#define DISABLE 0 /* The disable flag */
-#define CAN_CTRL_INIT 0x0001 /* The INIT bit of CANCONT register. */
-#define CAN_CTRL_IE 0x0002 /* The IE bit of CAN control register */
-#define CAN_CTRL_IE_SIE_EIE 0x000e
-#define CAN_CTRL_CCE 0x0040
-#define CAN_CTRL_OPT 0x0080 /* The OPT bit of CANCONT register. */
-#define CAN_OPT_SILENT 0x0008 /* The Silent bit of CANOPT reg. */
-#define CAN_OPT_LBACK 0x0010 /* The LoopBack bit of CANOPT reg. */
-#define CAN_CMASK_RX_TX_SET 0x00f3
-#define CAN_CMASK_RX_TX_GET 0x0073
-#define CAN_CMASK_ALL 0xff
-#define CAN_CMASK_RDWR 0x80
-#define CAN_CMASK_ARB 0x20
-#define CAN_CMASK_CTRL 0x10
-#define CAN_CMASK_MASK 0x40
-#define CAN_CMASK_NEWDAT 0x04
-#define CAN_CMASK_CLRINTPND 0x08
-
-#define CAN_IF_MCONT_NEWDAT 0x8000
-#define CAN_IF_MCONT_INTPND 0x2000
-#define CAN_IF_MCONT_UMASK 0x1000
-#define CAN_IF_MCONT_TXIE 0x0800
-#define CAN_IF_MCONT_RXIE 0x0400
-#define CAN_IF_MCONT_RMTEN 0x0200
-#define CAN_IF_MCONT_TXRQXT 0x0100
-#define CAN_IF_MCONT_EOB 0x0080
-#define CAN_IF_MCONT_DLC 0x000f
-#define CAN_IF_MCONT_MSGLOST 0x4000
-#define CAN_MASK2_MDIR_MXTD 0xc000
-#define CAN_ID2_DIR 0x2000
-#define CAN_ID_MSGVAL 0x8000
-
-#define CAN_STATUS_INT 0x8000
-#define CAN_IF_CREQ_BUSY 0x8000
-#define CAN_ID2_XTD 0x4000
-
-#define CAN_REC 0x00007f00
-#define CAN_TEC 0x000000ff
-
-#define PCH_RX_OK 0x00000010
-#define PCH_TX_OK 0x00000008
-#define PCH_BUS_OFF 0x00000080
-#define PCH_EWARN 0x00000040
-#define PCH_EPASSIV 0x00000020
-#define PCH_LEC0 0x00000001
-#define PCH_LEC1 0x00000002
-#define PCH_LEC2 0x00000004
-#define PCH_LEC_ALL (PCH_LEC0 | PCH_LEC1 | PCH_LEC2)
-#define PCH_STUF_ERR PCH_LEC0
-#define PCH_FORM_ERR PCH_LEC1
-#define PCH_ACK_ERR (PCH_LEC0 | PCH_LEC1)
-#define PCH_BIT1_ERR PCH_LEC2
-#define PCH_BIT0_ERR (PCH_LEC0 | PCH_LEC2)
-#define PCH_CRC_ERR (PCH_LEC1 | PCH_LEC2)
+#define PCH_CTRL_INIT BIT(0) /* The INIT bit of CANCONT register. */
+#define PCH_CTRL_IE BIT(1) /* The IE bit of CAN control register */
+#define PCH_CTRL_IE_SIE_EIE (BIT(3) | BIT(2) | BIT(1))
+#define PCH_CTRL_CCE BIT(6)
+#define PCH_CTRL_OPT BIT(7) /* The OPT bit of CANCONT register. */
+#define PCH_OPT_SILENT BIT(3) /* The Silent bit of CANOPT reg. */
+#define PCH_OPT_LBACK BIT(4) /* The LoopBack bit of CANOPT reg. */
+
+#define PCH_CMASK_RX_TX_SET 0x00f3
+#define PCH_CMASK_RX_TX_GET 0x0073
+#define PCH_CMASK_ALL 0xff
+#define PCH_CMASK_NEWDAT BIT(2)
+#define PCH_CMASK_CLRINTPND BIT(3)
+#define PCH_CMASK_CTRL BIT(4)
+#define PCH_CMASK_ARB BIT(5)
+#define PCH_CMASK_MASK BIT(6)
+#define PCH_CMASK_RDWR BIT(7)
+#define PCH_IF_MCONT_NEWDAT BIT(15)
+#define PCH_IF_MCONT_MSGLOST BIT(14)
+#define PCH_IF_MCONT_INTPND BIT(13)
+#define PCH_IF_MCONT_UMASK BIT(12)
+#define PCH_IF_MCONT_TXIE BIT(11)
+#define PCH_IF_MCONT_RXIE BIT(10)
+#define PCH_IF_MCONT_RMTEN BIT(9)
+#define PCH_IF_MCONT_TXRQXT BIT(8)
+#define PCH_IF_MCONT_EOB BIT(7)
+#define PCH_IF_MCONT_DLC (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+#define PCH_MASK2_MDIR_MXTD (BIT(14) | BIT(15))
+#define PCH_ID2_DIR BIT(13)
+#define PCH_ID2_XTD BIT(14)
+#define PCH_ID_MSGVAL BIT(15)
+#define PCH_IF_CREQ_BUSY BIT(15)
+
+#define PCH_STATUS_INT 0x8000
+#define PCH_REC 0x00007f00
+#define PCH_TEC 0x000000ff
+
+#define PCH_TX_OK BIT(3)
+#define PCH_RX_OK BIT(4)
+#define PCH_EPASSIV BIT(5)
+#define PCH_EWARN BIT(6)
+#define PCH_BUS_OFF BIT(7)
/* bit position of certain controller bits. */
-#define BIT_BITT_BRP 0
-#define BIT_BITT_SJW 6
-#define BIT_BITT_TSEG1 8
-#define BIT_BITT_TSEG2 12
-#define BIT_IF1_MCONT_RXIE 10
-#define BIT_IF2_MCONT_TXIE 11
-#define BIT_BRPE_BRPE 6
-#define BIT_ES_TXERRCNT 0
-#define BIT_ES_RXERRCNT 8
-#define MSK_BITT_BRP 0x3f
-#define MSK_BITT_SJW 0xc0
-#define MSK_BITT_TSEG1 0xf00
-#define MSK_BITT_TSEG2 0x7000
-#define MSK_BRPE_BRPE 0x3c0
-#define MSK_BRPE_GET 0x0f
-#define MSK_CTRL_IE_SIE_EIE 0x07
-#define MSK_MCONT_TXIE 0x08
-#define MSK_MCONT_RXIE 0x10
-#define PCH_CAN_NO_TX_BUFF 1
-#define COUNTER_LIMIT 10
+#define PCH_BIT_BRP_SHIFT 0
+#define PCH_BIT_SJW_SHIFT 6
+#define PCH_BIT_TSEG1_SHIFT 8
+#define PCH_BIT_TSEG2_SHIFT 12
+#define PCH_BIT_BRPE_BRPE_SHIFT 6
+
+#define PCH_MSK_BITT_BRP 0x3f
+#define PCH_MSK_BRPE_BRPE 0x3c0
+#define PCH_MSK_CTRL_IE_SIE_EIE 0x07
+#define PCH_COUNTER_LIMIT 10
#define PCH_CAN_CLK 50000000 /* 50MHz */
-/* Define the number of message object.
+/*
+ * Define the number of message object.
* PCH CAN communications are done via Message RAM.
- * The Message RAM consists of 32 message objects. */
-#define PCH_RX_OBJ_NUM 26 /* 1~ PCH_RX_OBJ_NUM is Rx*/
-#define PCH_TX_OBJ_NUM 6 /* PCH_RX_OBJ_NUM is RX ~ Tx*/
-#define PCH_OBJ_NUM (PCH_TX_OBJ_NUM + PCH_RX_OBJ_NUM)
+ * The Message RAM consists of 32 message objects.
+ */
+#define PCH_RX_OBJ_NUM 26
+#define PCH_TX_OBJ_NUM 6
+#define PCH_RX_OBJ_START 1
+#define PCH_RX_OBJ_END PCH_RX_OBJ_NUM
+#define PCH_TX_OBJ_START (PCH_RX_OBJ_END + 1)
+#define PCH_TX_OBJ_END (PCH_RX_OBJ_NUM + PCH_TX_OBJ_NUM)
#define PCH_FIFO_THRESH 16
+/* TxRqst2 show status of MsgObjNo.17~32 */
+#define PCH_TREQ2_TX_MASK (((1 << PCH_TX_OBJ_NUM) - 1) <<\
+ (PCH_RX_OBJ_END - 16))
+
+enum pch_ifreg {
+ PCH_RX_IFREG,
+ PCH_TX_IFREG,
+};
+
+enum pch_can_err {
+ PCH_STUF_ERR = 1,
+ PCH_FORM_ERR,
+ PCH_ACK_ERR,
+ PCH_BIT1_ERR,
+ PCH_BIT0_ERR,
+ PCH_CRC_ERR,
+ PCH_LEC_ALL,
+};
+
enum pch_can_mode {
PCH_CAN_ENABLE,
PCH_CAN_DISABLE,
PCH_CAN_ALL,
PCH_CAN_NONE,
PCH_CAN_STOP,
- PCH_CAN_RUN
+ PCH_CAN_RUN,
+};
+
+struct pch_can_if_regs {
+ u32 creq;
+ u32 cmask;
+ u32 mask1;
+ u32 mask2;
+ u32 id1;
+ u32 id2;
+ u32 mcont;
+ u32 data[4];
+ u32 rsv[13];
};
struct pch_can_regs {
@@ -142,62 +151,41 @@ struct pch_can_regs {
u32 intr;
u32 opt;
u32 brpe;
- u32 reserve1;
- u32 if1_creq;
- u32 if1_cmask;
- u32 if1_mask1;
- u32 if1_mask2;
- u32 if1_id1;
- u32 if1_id2;
- u32 if1_mcont;
- u32 if1_dataa1;
- u32 if1_dataa2;
- u32 if1_datab1;
- u32 if1_datab2;
- u32 reserve2;
- u32 reserve3[12];
- u32 if2_creq;
- u32 if2_cmask;
- u32 if2_mask1;
- u32 if2_mask2;
- u32 if2_id1;
- u32 if2_id2;
- u32 if2_mcont;
- u32 if2_dataa1;
- u32 if2_dataa2;
- u32 if2_datab1;
- u32 if2_datab2;
- u32 reserve4;
- u32 reserve5[20];
+ u32 reserve;
+ struct pch_can_if_regs ifregs[2]; /* [0]=if1 [1]=if2 */
+ u32 reserve1[8];
u32 treq1;
u32 treq2;
- u32 reserve6[2];
- u32 reserve7[56];
- u32 reserve8[3];
+ u32 reserve2[6];
+ u32 data1;
+ u32 data2;
+ u32 reserve3[6];
+ u32 canipend1;
+ u32 canipend2;
+ u32 reserve4[6];
+ u32 canmval1;
+ u32 canmval2;
+ u32 reserve5[37];
u32 srst;
};
struct pch_can_priv {
struct can_priv can;
- unsigned int can_num;
struct pci_dev *dev;
- unsigned int tx_enable[MAX_MSG_OBJ];
- unsigned int rx_enable[MAX_MSG_OBJ];
- unsigned int rx_link[MAX_MSG_OBJ];
- unsigned int int_enables;
- unsigned int int_stat;
+ u32 tx_enable[PCH_TX_OBJ_END];
+ u32 rx_enable[PCH_TX_OBJ_END];
+ u32 rx_link[PCH_TX_OBJ_END];
+ u32 int_enables;
struct net_device *ndev;
- spinlock_t msgif_reg_lock; /* Message Interface Registers Access Lock*/
- unsigned int msg_obj[MAX_MSG_OBJ];
struct pch_can_regs __iomem *regs;
struct napi_struct napi;
- unsigned int tx_obj; /* Point next Tx Obj index */
- unsigned int use_msi;
+ int tx_obj; /* Point next Tx Obj index */
+ int use_msi;
};
static struct can_bittiming_const pch_can_bittiming_const = {
.name = KBUILD_MODNAME,
- .tseg1_min = 1,
+ .tseg1_min = 2,
.tseg1_max = 16,
.tseg2_min = 1,
.tseg2_max = 8,
@@ -228,15 +216,15 @@ static void pch_can_set_run_mode(struct pch_can_priv *priv,
{
switch (mode) {
case PCH_CAN_RUN:
- pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_INIT);
+ pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_INIT);
break;
case PCH_CAN_STOP:
- pch_can_bit_set(&priv->regs->cont, CAN_CTRL_INIT);
+ pch_can_bit_set(&priv->regs->cont, PCH_CTRL_INIT);
break;
default:
- dev_err(&priv->ndev->dev, "%s -> Invalid Mode.\n", __func__);
+ netdev_err(priv->ndev, "%s -> Invalid Mode.\n", __func__);
break;
}
}
@@ -246,357 +234,184 @@ static void pch_can_set_optmode(struct pch_can_priv *priv)
u32 reg_val = ioread32(&priv->regs->opt);
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
- reg_val |= CAN_OPT_SILENT;
+ reg_val |= PCH_OPT_SILENT;
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
- reg_val |= CAN_OPT_LBACK;
+ reg_val |= PCH_OPT_LBACK;
- pch_can_bit_set(&priv->regs->cont, CAN_CTRL_OPT);
+ pch_can_bit_set(&priv->regs->cont, PCH_CTRL_OPT);
iowrite32(reg_val, &priv->regs->opt);
}
-static void pch_can_set_int_custom(struct pch_can_priv *priv)
+static void pch_can_rw_msg_obj(void __iomem *creq_addr, u32 num)
{
- /* Clearing the IE, SIE and EIE bits of Can control register. */
- pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
-
- /* Appropriately setting them. */
- pch_can_bit_set(&priv->regs->cont,
- ((priv->int_enables & MSK_CTRL_IE_SIE_EIE) << 1));
-}
+ int counter = PCH_COUNTER_LIMIT;
+ u32 ifx_creq;
-/* This function retrieves interrupt enabled for the CAN device. */
-static void pch_can_get_int_enables(struct pch_can_priv *priv, u32 *enables)
-{
- /* Obtaining the status of IE, SIE and EIE interrupt bits. */
- *enables = ((ioread32(&priv->regs->cont) & CAN_CTRL_IE_SIE_EIE) >> 1);
+ iowrite32(num, creq_addr);
+ while (counter) {
+ ifx_creq = ioread32(creq_addr) & PCH_IF_CREQ_BUSY;
+ if (!ifx_creq)
+ break;
+ counter--;
+ udelay(1);
+ }
+ if (!counter)
+ pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
}
static void pch_can_set_int_enables(struct pch_can_priv *priv,
enum pch_can_mode interrupt_no)
{
switch (interrupt_no) {
- case PCH_CAN_ENABLE:
- pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE);
- break;
-
case PCH_CAN_DISABLE:
- pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE);
+ pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE);
break;
case PCH_CAN_ALL:
- pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+ pch_can_bit_set(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
break;
case PCH_CAN_NONE:
- pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+ pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
break;
default:
- dev_err(&priv->ndev->dev, "Invalid interrupt number.\n");
+ netdev_err(priv->ndev, "Invalid interrupt number.\n");
break;
}
}
-static void pch_can_check_if_busy(u32 __iomem *creq_addr, u32 num)
+static void pch_can_set_rxtx(struct pch_can_priv *priv, u32 buff_num,
+ int set, enum pch_ifreg dir)
{
- u32 counter = COUNTER_LIMIT;
- u32 ifx_creq;
+ u32 ie;
- iowrite32(num, creq_addr);
- while (counter) {
- ifx_creq = ioread32(creq_addr) & CAN_IF_CREQ_BUSY;
- if (!ifx_creq)
- break;
- counter--;
- udelay(1);
- }
- if (!counter)
- pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
-}
-
-static void pch_can_set_rx_enable(struct pch_can_priv *priv, u32 buff_num,
- u32 set)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- /* Reading the receive buffer data from RAM to Interface1 registers */
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
- pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
-
- /* Setting the IF1MASK1 register to access MsgVal and RxIE bits */
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
- &priv->regs->if1_cmask);
-
- if (set == ENABLE) {
- /* Setting the MsgVal and RxIE bits */
- pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
- pch_can_bit_set(&priv->regs->if1_id2, CAN_ID_MSGVAL);
-
- } else if (set == DISABLE) {
- /* Resetting the MsgVal and RxIE bits */
- pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
- pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID_MSGVAL);
- }
-
- pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
- spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
-}
-
-static void pch_can_rx_enable_all(struct pch_can_priv *priv)
-{
- int i;
-
- /* Traversing to obtain the object configured as receivers. */
- for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_RX)
- pch_can_set_rx_enable(priv, i + 1, ENABLE);
- }
-}
+ if (dir)
+ ie = PCH_IF_MCONT_TXIE;
+ else
+ ie = PCH_IF_MCONT_RXIE;
-static void pch_can_rx_disable_all(struct pch_can_priv *priv)
-{
- int i;
+ /* Reading the Msg buffer from Message RAM to IF1/2 registers. */
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
- /* Traversing to obtain the object configured as receivers. */
- for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_RX)
- pch_can_set_rx_enable(priv, i + 1, DISABLE);
- }
-}
+ /* Setting the IF1/2MASK1 register to access MsgVal and RxIE bits */
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_ARB | PCH_CMASK_CTRL,
+ &priv->regs->ifregs[dir].cmask);
-static void pch_can_set_tx_enable(struct pch_can_priv *priv, u32 buff_num,
- u32 set)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- /* Reading the Msg buffer from Message RAM to Interface2 registers. */
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
- pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
-
- /* Setting the IF2CMASK register for accessing the
- MsgVal and TxIE bits */
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
- &priv->regs->if2_cmask);
-
- if (set == ENABLE) {
- /* Setting the MsgVal and TxIE bits */
- pch_can_bit_set(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
- pch_can_bit_set(&priv->regs->if2_id2, CAN_ID_MSGVAL);
- } else if (set == DISABLE) {
- /* Resetting the MsgVal and TxIE bits. */
- pch_can_bit_clear(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
- pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID_MSGVAL);
+ if (set) {
+ /* Setting the MsgVal and RxIE/TxIE bits */
+ pch_can_bit_set(&priv->regs->ifregs[dir].mcont, ie);
+ pch_can_bit_set(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
+ } else {
+ /* Clearing the MsgVal and RxIE/TxIE bits */
+ pch_can_bit_clear(&priv->regs->ifregs[dir].mcont, ie);
+ pch_can_bit_clear(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
}
- pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
- spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
}
-static void pch_can_tx_enable_all(struct pch_can_priv *priv)
+static void pch_can_set_rx_all(struct pch_can_priv *priv, int set)
{
int i;
- /* Traversing to obtain the object configured as transmit object. */
- for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_TX)
- pch_can_set_tx_enable(priv, i + 1, ENABLE);
- }
+ /* Traversing to obtain the object configured as receivers. */
+ for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++)
+ pch_can_set_rxtx(priv, i, set, PCH_RX_IFREG);
}
-static void pch_can_tx_disable_all(struct pch_can_priv *priv)
+static void pch_can_set_tx_all(struct pch_can_priv *priv, int set)
{
int i;
/* Traversing to obtain the object configured as transmit object. */
- for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_TX)
- pch_can_set_tx_enable(priv, i + 1, DISABLE);
- }
+ for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
+ pch_can_set_rxtx(priv, i, set, PCH_TX_IFREG);
}
-static void pch_can_get_rx_enable(struct pch_can_priv *priv, u32 buff_num,
- u32 *enable)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
- pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
-
- if (((ioread32(&priv->regs->if1_id2)) & CAN_ID_MSGVAL) &&
- ((ioread32(&priv->regs->if1_mcont)) &
- CAN_IF_MCONT_RXIE))
- *enable = ENABLE;
- else
- *enable = DISABLE;
- spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
-}
-
-static void pch_can_get_tx_enable(struct pch_can_priv *priv, u32 buff_num,
- u32 *enable)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
- pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
-
- if (((ioread32(&priv->regs->if2_id2)) & CAN_ID_MSGVAL) &&
- ((ioread32(&priv->regs->if2_mcont)) &
- CAN_IF_MCONT_TXIE)) {
- *enable = ENABLE;
- } else {
- *enable = DISABLE;
- }
- spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
-}
-
-static int pch_can_int_pending(struct pch_can_priv *priv)
+static u32 pch_can_int_pending(struct pch_can_priv *priv)
{
return ioread32(&priv->regs->intr) & 0xffff;
}
-static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
- u32 buffer_num, u32 set)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
- pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL, &priv->regs->if1_cmask);
- if (set == ENABLE)
- pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
- else
- pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
-
- pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
- spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
-}
-
-static void pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
- u32 buffer_num, u32 *link)
+static void pch_can_clear_if_buffers(struct pch_can_priv *priv)
{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
- pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
-
- if (ioread32(&priv->regs->if1_mcont) & CAN_IF_MCONT_EOB)
- *link = DISABLE;
- else
- *link = ENABLE;
- spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
-}
-
-static void pch_can_clear_buffers(struct pch_can_priv *priv)
-{
- int i;
-
- for (i = 0; i < PCH_RX_OBJ_NUM; i++) {
- iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if1_cmask);
- iowrite32(0xffff, &priv->regs->if1_mask1);
- iowrite32(0xffff, &priv->regs->if1_mask2);
- iowrite32(0x0, &priv->regs->if1_id1);
- iowrite32(0x0, &priv->regs->if1_id2);
- iowrite32(0x0, &priv->regs->if1_mcont);
- iowrite32(0x0, &priv->regs->if1_dataa1);
- iowrite32(0x0, &priv->regs->if1_dataa2);
- iowrite32(0x0, &priv->regs->if1_datab1);
- iowrite32(0x0, &priv->regs->if1_datab2);
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
- CAN_CMASK_ARB | CAN_CMASK_CTRL,
- &priv->regs->if1_cmask);
- pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
- }
-
- for (i = i; i < PCH_OBJ_NUM; i++) {
- iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if2_cmask);
- iowrite32(0xffff, &priv->regs->if2_mask1);
- iowrite32(0xffff, &priv->regs->if2_mask2);
- iowrite32(0x0, &priv->regs->if2_id1);
- iowrite32(0x0, &priv->regs->if2_id2);
- iowrite32(0x0, &priv->regs->if2_mcont);
- iowrite32(0x0, &priv->regs->if2_dataa1);
- iowrite32(0x0, &priv->regs->if2_dataa2);
- iowrite32(0x0, &priv->regs->if2_datab1);
- iowrite32(0x0, &priv->regs->if2_datab2);
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
- CAN_CMASK_ARB | CAN_CMASK_CTRL,
- &priv->regs->if2_cmask);
- pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+ int i; /* Msg Obj ID (1~32) */
+
+ for (i = PCH_RX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
+ iowrite32(PCH_CMASK_RX_TX_SET, &priv->regs->ifregs[0].cmask);
+ iowrite32(0xffff, &priv->regs->ifregs[0].mask1);
+ iowrite32(0xffff, &priv->regs->ifregs[0].mask2);
+ iowrite32(0x0, &priv->regs->ifregs[0].id1);
+ iowrite32(0x0, &priv->regs->ifregs[0].id2);
+ iowrite32(0x0, &priv->regs->ifregs[0].mcont);
+ iowrite32(0x0, &priv->regs->ifregs[0].data[0]);
+ iowrite32(0x0, &priv->regs->ifregs[0].data[1]);
+ iowrite32(0x0, &priv->regs->ifregs[0].data[2]);
+ iowrite32(0x0, &priv->regs->ifregs[0].data[3]);
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
+ PCH_CMASK_ARB | PCH_CMASK_CTRL,
+ &priv->regs->ifregs[0].cmask);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
}
}
static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
{
int i;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_RX) {
- iowrite32(CAN_CMASK_RX_TX_GET,
- &priv->regs->if1_cmask);
- pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
+ for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
- iowrite32(0x0, &priv->regs->if1_id1);
- iowrite32(0x0, &priv->regs->if1_id2);
+ iowrite32(0x0, &priv->regs->ifregs[0].id1);
+ iowrite32(0x0, &priv->regs->ifregs[0].id2);
- pch_can_bit_set(&priv->regs->if1_mcont,
- CAN_IF_MCONT_UMASK);
+ pch_can_bit_set(&priv->regs->ifregs[0].mcont,
+ PCH_IF_MCONT_UMASK);
- /* Set FIFO mode set to 0 except last Rx Obj*/
- pch_can_bit_clear(&priv->regs->if1_mcont,
- CAN_IF_MCONT_EOB);
- /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
- if (i == (PCH_RX_OBJ_NUM - 1))
- pch_can_bit_set(&priv->regs->if1_mcont,
- CAN_IF_MCONT_EOB);
+ /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
+ if (i == PCH_RX_OBJ_END)
+ pch_can_bit_set(&priv->regs->ifregs[0].mcont,
+ PCH_IF_MCONT_EOB);
+ else
+ pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
+ PCH_IF_MCONT_EOB);
- iowrite32(0, &priv->regs->if1_mask1);
- pch_can_bit_clear(&priv->regs->if1_mask2,
- 0x1fff | CAN_MASK2_MDIR_MXTD);
+ iowrite32(0, &priv->regs->ifregs[0].mask1);
+ pch_can_bit_clear(&priv->regs->ifregs[0].mask2,
+ 0x1fff | PCH_MASK2_MDIR_MXTD);
- /* Setting CMASK for writing */
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
- CAN_CMASK_ARB | CAN_CMASK_CTRL,
- &priv->regs->if1_cmask);
+ /* Setting CMASK for writing */
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB |
+ PCH_CMASK_CTRL, &priv->regs->ifregs[0].cmask);
- pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
- } else if (priv->msg_obj[i] == MSG_OBJ_TX) {
- iowrite32(CAN_CMASK_RX_TX_GET,
- &priv->regs->if2_cmask);
- pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
+ }
- /* Resetting DIR bit for reception */
- iowrite32(0x0, &priv->regs->if2_id1);
- iowrite32(0x0, &priv->regs->if2_id2);
- pch_can_bit_set(&priv->regs->if2_id2, CAN_ID2_DIR);
+ for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[1].cmask);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i);
- /* Setting EOB bit for transmitter */
- iowrite32(CAN_IF_MCONT_EOB, &priv->regs->if2_mcont);
+ /* Resetting DIR bit for reception */
+ iowrite32(0x0, &priv->regs->ifregs[1].id1);
+ iowrite32(PCH_ID2_DIR, &priv->regs->ifregs[1].id2);
- pch_can_bit_set(&priv->regs->if2_mcont,
- CAN_IF_MCONT_UMASK);
+ /* Setting EOB bit for transmitter */
+ iowrite32(PCH_IF_MCONT_EOB | PCH_IF_MCONT_UMASK,
+ &priv->regs->ifregs[1].mcont);
- iowrite32(0, &priv->regs->if2_mask1);
- pch_can_bit_clear(&priv->regs->if2_mask2, 0x1fff);
+ iowrite32(0, &priv->regs->ifregs[1].mask1);
+ pch_can_bit_clear(&priv->regs->ifregs[1].mask2, 0x1fff);
- /* Setting CMASK for writing */
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
- CAN_CMASK_ARB | CAN_CMASK_CTRL,
- &priv->regs->if2_cmask);
+ /* Setting CMASK for writing */
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB |
+ PCH_CMASK_CTRL, &priv->regs->ifregs[1].cmask);
- pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
- }
+ pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i);
}
- spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
}
static void pch_can_init(struct pch_can_priv *priv)
@@ -605,7 +420,7 @@ static void pch_can_init(struct pch_can_priv *priv)
pch_can_set_run_mode(priv, PCH_CAN_STOP);
/* Clearing all the message object buffers. */
- pch_can_clear_buffers(priv);
+ pch_can_clear_if_buffers(priv);
/* Configuring the respective message object as either rx/tx object. */
pch_can_config_rx_tx_buffers(priv);
@@ -623,57 +438,47 @@ static void pch_can_release(struct pch_can_priv *priv)
pch_can_set_int_enables(priv, PCH_CAN_NONE);
/* Disabling all the receive object. */
- pch_can_rx_disable_all(priv);
+ pch_can_set_rx_all(priv, 0);
/* Disabling all the transmit object. */
- pch_can_tx_disable_all(priv);
+ pch_can_set_tx_all(priv, 0);
}
/* This function clears interrupt(s) from the CAN device. */
static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
{
- if (mask == CAN_STATUS_INT) {
- ioread32(&priv->regs->stat);
- return;
- }
-
/* Clear interrupt for transmit object */
- if (priv->msg_obj[mask - 1] == MSG_OBJ_TX) {
- /* Setting CMASK for clearing interrupts for
- frame transmission. */
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
- &priv->regs->if2_cmask);
-
- /* Resetting the ID registers. */
- pch_can_bit_set(&priv->regs->if2_id2,
- CAN_ID2_DIR | (0x7ff << 2));
- iowrite32(0x0, &priv->regs->if2_id1);
-
- /* Claring NewDat, TxRqst & IntPnd */
- pch_can_bit_clear(&priv->regs->if2_mcont,
- CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
- CAN_IF_MCONT_TXRQXT);
- pch_can_check_if_busy(&priv->regs->if2_creq, mask);
- } else if (priv->msg_obj[mask - 1] == MSG_OBJ_RX) {
+ if ((mask >= PCH_RX_OBJ_START) && (mask <= PCH_RX_OBJ_END)) {
/* Setting CMASK for clearing the reception interrupts. */
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
- &priv->regs->if1_cmask);
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
+ &priv->regs->ifregs[0].cmask);
/* Clearing the Dir bit. */
- pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
+ pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
/* Clearing NewDat & IntPnd */
- pch_can_bit_clear(&priv->regs->if1_mcont,
- CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND);
+ pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
+ PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND);
- pch_can_check_if_busy(&priv->regs->if1_creq, mask);
- }
-}
+ pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, mask);
+ } else if ((mask >= PCH_TX_OBJ_START) && (mask <= PCH_TX_OBJ_END)) {
+ /*
+ * Setting CMASK for clearing interrupts for frame transmission.
+ */
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
+ &priv->regs->ifregs[1].cmask);
-static int pch_can_get_buffer_status(struct pch_can_priv *priv)
-{
- return (ioread32(&priv->regs->treq1) & 0xffff) |
- ((ioread32(&priv->regs->treq2) & 0xffff) << 16);
+ /* Resetting the ID registers. */
+ pch_can_bit_set(&priv->regs->ifregs[1].id2,
+ PCH_ID2_DIR | (0x7ff << 2));
+ iowrite32(0x0, &priv->regs->ifregs[1].id1);
+
+ /* Claring NewDat, TxRqst & IntPnd */
+ pch_can_bit_clear(&priv->regs->ifregs[1].mcont,
+ PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
+ PCH_IF_MCONT_TXRQXT);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, mask);
+ }
}
static void pch_can_reset(struct pch_can_priv *priv)
@@ -688,7 +493,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
struct sk_buff *skb;
struct pch_can_priv *priv = netdev_priv(ndev);
struct can_frame *cf;
- u32 errc;
+ u32 errc, lec;
struct net_device_stats *stats = &(priv->ndev->stats);
enum can_state state = priv->can.state;
@@ -697,26 +502,24 @@ static void pch_can_error(struct net_device *ndev, u32 status)
return;
if (status & PCH_BUS_OFF) {
- pch_can_tx_disable_all(priv);
- pch_can_rx_disable_all(priv);
+ pch_can_set_tx_all(priv, 0);
+ pch_can_set_rx_all(priv, 0);
state = CAN_STATE_BUS_OFF;
cf->can_id |= CAN_ERR_BUSOFF;
can_bus_off(ndev);
- pch_can_set_run_mode(priv, PCH_CAN_RUN);
- dev_err(&ndev->dev, "%s -> Bus Off occurres.\n", __func__);
}
+ errc = ioread32(&priv->regs->errc);
/* Warning interrupt. */
if (status & PCH_EWARN) {
state = CAN_STATE_ERROR_WARNING;
priv->can.can_stats.error_warning++;
cf->can_id |= CAN_ERR_CRTL;
- errc = ioread32(&priv->regs->errc);
- if (((errc & CAN_REC) >> 8) > 96)
+ if (((errc & PCH_REC) >> 8) > 96)
cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
- if ((errc & CAN_TEC) > 96)
+ if ((errc & PCH_TEC) > 96)
cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
- dev_warn(&ndev->dev,
+ netdev_dbg(ndev,
"%s -> Error Counter is more than 96.\n", __func__);
}
/* Error passive interrupt. */
@@ -724,46 +527,52 @@ static void pch_can_error(struct net_device *ndev, u32 status)
priv->can.can_stats.error_passive++;
state = CAN_STATE_ERROR_PASSIVE;
cf->can_id |= CAN_ERR_CRTL;
- errc = ioread32(&priv->regs->errc);
- if (((errc & CAN_REC) >> 8) > 127)
+ if (((errc & PCH_REC) >> 8) > 127)
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
- if ((errc & CAN_TEC) > 127)
+ if ((errc & PCH_TEC) > 127)
cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
- dev_err(&ndev->dev,
+ netdev_dbg(ndev,
"%s -> CAN controller is ERROR PASSIVE .\n", __func__);
}
- if (status & PCH_LEC_ALL) {
+ lec = status & PCH_LEC_ALL;
+ switch (lec) {
+ case PCH_STUF_ERR:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
priv->can.can_stats.bus_error++;
stats->rx_errors++;
- switch (status & PCH_LEC_ALL) {
- case PCH_STUF_ERR:
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- break;
- case PCH_FORM_ERR:
- cf->data[2] |= CAN_ERR_PROT_FORM;
- break;
- case PCH_ACK_ERR:
- cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
- CAN_ERR_PROT_LOC_ACK_DEL;
- break;
- case PCH_BIT1_ERR:
- case PCH_BIT0_ERR:
- cf->data[2] |= CAN_ERR_PROT_BIT;
- break;
- case PCH_CRC_ERR:
- cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
- CAN_ERR_PROT_LOC_CRC_DEL;
- break;
- default:
- iowrite32(status | PCH_LEC_ALL, &priv->regs->stat);
- break;
- }
-
+ break;
+ case PCH_FORM_ERR:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ break;
+ case PCH_ACK_ERR:
+ cf->can_id |= CAN_ERR_ACK;
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ break;
+ case PCH_BIT1_ERR:
+ case PCH_BIT0_ERR:
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ break;
+ case PCH_CRC_ERR:
+ cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL;
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ break;
+ case PCH_LEC_ALL: /* Written by CPU. No error status */
+ break;
}
+ cf->data[6] = errc & PCH_TEC;
+ cf->data[7] = (errc & PCH_REC) >> 8;
+
priv->can.state = state;
- netif_rx(skb);
+ netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
@@ -774,204 +583,202 @@ static irqreturn_t pch_can_interrupt(int irq, void *dev_id)
struct net_device *ndev = (struct net_device *)dev_id;
struct pch_can_priv *priv = netdev_priv(ndev);
- pch_can_set_int_enables(priv, PCH_CAN_NONE);
+ if (!pch_can_int_pending(priv))
+ return IRQ_NONE;
+ pch_can_set_int_enables(priv, PCH_CAN_NONE);
napi_schedule(&priv->napi);
-
return IRQ_HANDLED;
}
-static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
+static void pch_fifo_thresh(struct pch_can_priv *priv, int obj_id)
+{
+ if (obj_id < PCH_FIFO_THRESH) {
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL |
+ PCH_CMASK_ARB, &priv->regs->ifregs[0].cmask);
+
+ /* Clearing the Dir bit. */
+ pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
+
+ /* Clearing NewDat & IntPnd */
+ pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
+ PCH_IF_MCONT_INTPND);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id);
+ } else if (obj_id > PCH_FIFO_THRESH) {
+ pch_can_int_clr(priv, obj_id);
+ } else if (obj_id == PCH_FIFO_THRESH) {
+ int cnt;
+ for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++)
+ pch_can_int_clr(priv, cnt + 1);
+ }
+}
+
+static void pch_can_rx_msg_lost(struct net_device *ndev, int obj_id)
+{
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &(priv->ndev->stats);
+ struct sk_buff *skb;
+ struct can_frame *cf;
+
+ netdev_dbg(priv->ndev, "Msg Obj is overwritten.\n");
+ pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
+ PCH_IF_MCONT_MSGLOST);
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
+ &priv->regs->ifregs[0].cmask);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id);
+
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+
+ netif_receive_skb(skb);
+}
+
+static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
{
u32 reg;
canid_t id;
- u32 ide;
- u32 rtr;
- int i, j, k;
int rcv_pkts = 0;
struct sk_buff *skb;
struct can_frame *cf;
struct pch_can_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &(priv->ndev->stats);
+ int i;
+ u32 id2;
+ u16 data_reg;
- /* Reading the messsage object from the Message RAM */
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
- pch_can_check_if_busy(&priv->regs->if1_creq, int_stat);
+ do {
+ /* Reading the messsage object from the Message RAM */
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_num);
+
+ /* Reading the MCONT register. */
+ reg = ioread32(&priv->regs->ifregs[0].mcont);
- /* Reading the MCONT register. */
- reg = ioread32(&priv->regs->if1_mcont);
- reg &= 0xffff;
+ if (reg & PCH_IF_MCONT_EOB)
+ break;
- for (k = int_stat; !(reg & CAN_IF_MCONT_EOB); k++) {
/* If MsgLost bit set. */
- if (reg & CAN_IF_MCONT_MSGLOST) {
- dev_err(&priv->ndev->dev, "Msg Obj is overwritten.\n");
- pch_can_bit_clear(&priv->regs->if1_mcont,
- CAN_IF_MCONT_MSGLOST);
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL,
- &priv->regs->if1_cmask);
- pch_can_check_if_busy(&priv->regs->if1_creq, k);
-
- skb = alloc_can_err_skb(ndev, &cf);
- if (!skb)
- return -ENOMEM;
-
- priv->can.can_stats.error_passive++;
- priv->can.state = CAN_STATE_ERROR_PASSIVE;
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
- cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
-
- netif_receive_skb(skb);
+ if (reg & PCH_IF_MCONT_MSGLOST) {
+ pch_can_rx_msg_lost(ndev, obj_num);
rcv_pkts++;
- goto RX_NEXT;
+ quota--;
+ obj_num++;
+ continue;
+ } else if (!(reg & PCH_IF_MCONT_NEWDAT)) {
+ obj_num++;
+ continue;
}
- if (!(reg & CAN_IF_MCONT_NEWDAT))
- goto RX_NEXT;
skb = alloc_can_skb(priv->ndev, &cf);
- if (!skb)
- return -ENOMEM;
+ if (!skb) {
+ netdev_err(ndev, "alloc_can_skb Failed\n");
+ return rcv_pkts;
+ }
/* Get Received data */
- ide = ((ioread32(&priv->regs->if1_id2)) & CAN_ID2_XTD) >> 14;
- if (ide) {
- id = (ioread32(&priv->regs->if1_id1) & 0xffff);
- id |= (((ioread32(&priv->regs->if1_id2)) &
- 0x1fff) << 16);
- cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ id2 = ioread32(&priv->regs->ifregs[0].id2);
+ if (id2 & PCH_ID2_XTD) {
+ id = (ioread32(&priv->regs->ifregs[0].id1) & 0xffff);
+ id |= (((id2) & 0x1fff) << 16);
+ cf->can_id = id | CAN_EFF_FLAG;
} else {
- id = (((ioread32(&priv->regs->if1_id2)) &
- (CAN_SFF_MASK << 2)) >> 2);
- cf->can_id = (id & CAN_SFF_MASK);
+ id = (id2 >> 2) & CAN_SFF_MASK;
+ cf->can_id = id;
}
- rtr = (ioread32(&priv->regs->if1_id2) & CAN_ID2_DIR);
- if (rtr) {
- cf->can_dlc = 0;
+ if (id2 & PCH_ID2_DIR)
cf->can_id |= CAN_RTR_FLAG;
- } else {
- cf->can_dlc = ((ioread32(&priv->regs->if1_mcont)) &
- 0x0f);
- }
- for (i = 0, j = 0; i < cf->can_dlc; j++) {
- reg = ioread32(&priv->regs->if1_dataa1 + j*4);
- cf->data[i++] = cpu_to_le32(reg & 0xff);
- if (i == cf->can_dlc)
- break;
- cf->data[i++] = cpu_to_le32((reg >> 8) & 0xff);
+ cf->can_dlc = get_can_dlc((ioread32(&priv->regs->
+ ifregs[0].mcont)) & 0xF);
+
+ for (i = 0; i < cf->can_dlc; i += 2) {
+ data_reg = ioread16(&priv->regs->ifregs[0].data[i / 2]);
+ cf->data[i] = data_reg;
+ cf->data[i + 1] = data_reg >> 8;
}
netif_receive_skb(skb);
rcv_pkts++;
stats->rx_packets++;
+ quota--;
stats->rx_bytes += cf->can_dlc;
- if (k < PCH_FIFO_THRESH) {
- iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL |
- CAN_CMASK_ARB, &priv->regs->if1_cmask);
-
- /* Clearing the Dir bit. */
- pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
-
- /* Clearing NewDat & IntPnd */
- pch_can_bit_clear(&priv->regs->if1_mcont,
- CAN_IF_MCONT_INTPND);
- pch_can_check_if_busy(&priv->regs->if1_creq, k);
- } else if (k > PCH_FIFO_THRESH) {
- pch_can_int_clr(priv, k);
- } else if (k == PCH_FIFO_THRESH) {
- int cnt;
- for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++)
- pch_can_int_clr(priv, cnt+1);
- }
-RX_NEXT:
- /* Reading the messsage object from the Message RAM */
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
- pch_can_check_if_busy(&priv->regs->if1_creq, k + 1);
- reg = ioread32(&priv->regs->if1_mcont);
- }
+ pch_fifo_thresh(priv, obj_num);
+ obj_num++;
+ } while (quota > 0);
return rcv_pkts;
}
-static int pch_can_rx_poll(struct napi_struct *napi, int quota)
+
+static void pch_can_tx_complete(struct net_device *ndev, u32 int_stat)
{
- struct net_device *ndev = napi->dev;
struct pch_can_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &(priv->ndev->stats);
u32 dlc;
+
+ can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1);
+ iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND,
+ &priv->regs->ifregs[1].cmask);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, int_stat);
+ dlc = get_can_dlc(ioread32(&priv->regs->ifregs[1].mcont) &
+ PCH_IF_MCONT_DLC);
+ stats->tx_bytes += dlc;
+ stats->tx_packets++;
+ if (int_stat == PCH_TX_OBJ_END)
+ netif_wake_queue(ndev);
+}
+
+static int pch_can_poll(struct napi_struct *napi, int quota)
+{
+ struct net_device *ndev = napi->dev;
+ struct pch_can_priv *priv = netdev_priv(ndev);
u32 int_stat;
- int rcv_pkts = 0;
u32 reg_stat;
- unsigned long flags;
+ int quota_save = quota;
int_stat = pch_can_int_pending(priv);
if (!int_stat)
- return 0;
+ goto end;
-INT_STAT:
- if (int_stat == CAN_STATUS_INT) {
+ if (int_stat == PCH_STATUS_INT) {
reg_stat = ioread32(&priv->regs->stat);
- if (reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) {
- if ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)
- pch_can_error(ndev, reg_stat);
- }
- if (reg_stat & PCH_TX_OK) {
- spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
- pch_can_check_if_busy(&priv->regs->if2_creq,
- ioread32(&priv->regs->intr));
- spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
- pch_can_bit_clear(&priv->regs->stat, PCH_TX_OK);
+ if ((reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) &&
+ ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)) {
+ pch_can_error(ndev, reg_stat);
+ quota--;
}
- if (reg_stat & PCH_RX_OK)
- pch_can_bit_clear(&priv->regs->stat, PCH_RX_OK);
+ if (reg_stat & (PCH_TX_OK | PCH_RX_OK))
+ pch_can_bit_clear(&priv->regs->stat,
+ reg_stat & (PCH_TX_OK | PCH_RX_OK));
int_stat = pch_can_int_pending(priv);
- if (int_stat == CAN_STATUS_INT)
- goto INT_STAT;
}
-MSG_OBJ:
- if ((int_stat >= 1) && (int_stat <= PCH_RX_OBJ_NUM)) {
- spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- rcv_pkts = pch_can_rx_normal(ndev, int_stat);
- spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
- if (rcv_pkts < 0)
- return 0;
- } else if ((int_stat > PCH_RX_OBJ_NUM) && (int_stat <= PCH_OBJ_NUM)) {
- if (priv->msg_obj[int_stat - 1] == MSG_OBJ_TX) {
- /* Handle transmission interrupt */
- can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_NUM - 1);
- spin_lock_irqsave(&priv->msgif_reg_lock, flags);
- iowrite32(CAN_CMASK_RX_TX_GET | CAN_CMASK_CLRINTPND,
- &priv->regs->if2_cmask);
- dlc = ioread32(&priv->regs->if2_mcont) &
- CAN_IF_MCONT_DLC;
- pch_can_check_if_busy(&priv->regs->if2_creq, int_stat);
- spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
- if (dlc > 8)
- dlc = 8;
- stats->tx_bytes += dlc;
- stats->tx_packets++;
- }
- }
+ if (quota == 0)
+ goto end;
- int_stat = pch_can_int_pending(priv);
- if (int_stat == CAN_STATUS_INT)
- goto INT_STAT;
- else if (int_stat >= 1 && int_stat <= 32)
- goto MSG_OBJ;
+ if ((int_stat >= PCH_RX_OBJ_START) && (int_stat <= PCH_RX_OBJ_END)) {
+ quota -= pch_can_rx_normal(ndev, int_stat, quota);
+ } else if ((int_stat >= PCH_TX_OBJ_START) &&
+ (int_stat <= PCH_TX_OBJ_END)) {
+ /* Handle transmission interrupt */
+ pch_can_tx_complete(ndev, int_stat);
+ }
+end:
napi_complete(napi);
pch_can_set_int_enables(priv, PCH_CAN_ALL);
- return rcv_pkts;
+ return quota_save - quota;
}
static int pch_set_bittiming(struct net_device *ndev)
@@ -980,20 +787,18 @@ static int pch_set_bittiming(struct net_device *ndev)
const struct can_bittiming *bt = &priv->can.bittiming;
u32 canbit;
u32 bepe;
- u32 brp;
/* Setting the CCE bit for accessing the Can Timing register. */
- pch_can_bit_set(&priv->regs->cont, CAN_CTRL_CCE);
-
- brp = (bt->tq) / (1000000000/PCH_CAN_CLK) - 1;
- canbit = brp & MSK_BITT_BRP;
- canbit |= (bt->sjw - 1) << BIT_BITT_SJW;
- canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << BIT_BITT_TSEG1;
- canbit |= (bt->phase_seg2 - 1) << BIT_BITT_TSEG2;
- bepe = (brp & MSK_BRPE_BRPE) >> BIT_BRPE_BRPE;
+ pch_can_bit_set(&priv->regs->cont, PCH_CTRL_CCE);
+
+ canbit = (bt->brp - 1) & PCH_MSK_BITT_BRP;
+ canbit |= (bt->sjw - 1) << PCH_BIT_SJW_SHIFT;
+ canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << PCH_BIT_TSEG1_SHIFT;
+ canbit |= (bt->phase_seg2 - 1) << PCH_BIT_TSEG2_SHIFT;
+ bepe = ((bt->brp - 1) & PCH_MSK_BRPE_BRPE) >> PCH_BIT_BRPE_BRPE_SHIFT;
iowrite32(canbit, &priv->regs->bitt);
iowrite32(bepe, &priv->regs->brpe);
- pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_CCE);
+ pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_CCE);
return 0;
}
@@ -1008,8 +813,8 @@ static void pch_can_start(struct net_device *ndev)
pch_set_bittiming(ndev);
pch_can_set_optmode(priv);
- pch_can_tx_enable_all(priv);
- pch_can_rx_enable_all(priv);
+ pch_can_set_tx_all(priv, 1);
+ pch_can_set_rx_all(priv, 1);
/* Setting the CAN to run mode. */
pch_can_set_run_mode(priv, PCH_CAN_RUN);
@@ -1041,27 +846,18 @@ static int pch_can_open(struct net_device *ndev)
struct pch_can_priv *priv = netdev_priv(ndev);
int retval;
- retval = pci_enable_msi(priv->dev);
- if (retval) {
- dev_info(&ndev->dev, "PCH CAN opened without MSI\n");
- priv->use_msi = 0;
- } else {
- dev_info(&ndev->dev, "PCH CAN opened with MSI\n");
- priv->use_msi = 1;
- }
-
- /* Regsitering the interrupt. */
+ /* Regstering the interrupt. */
retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED,
ndev->name, ndev);
if (retval) {
- dev_err(&ndev->dev, "request_irq failed.\n");
+ netdev_err(ndev, "request_irq failed.\n");
goto req_irq_err;
}
/* Open common can device */
retval = open_candev(ndev);
if (retval) {
- dev_err(ndev->dev.parent, "open_candev() failed %d\n", retval);
+ netdev_err(ndev, "open_candev() failed %d\n", retval);
goto err_open_candev;
}
@@ -1075,9 +871,6 @@ static int pch_can_open(struct net_device *ndev)
err_open_candev:
free_irq(priv->dev->irq, ndev);
req_irq_err:
- if (priv->use_msi)
- pci_disable_msi(priv->dev);
-
pch_can_release(priv);
return retval;
@@ -1091,102 +884,65 @@ static int pch_close(struct net_device *ndev)
napi_disable(&priv->napi);
pch_can_release(priv);
free_irq(priv->dev->irq, ndev);
- if (priv->use_msi)
- pci_disable_msi(priv->dev);
close_candev(ndev);
priv->can.state = CAN_STATE_STOPPED;
return 0;
}
-static int pch_get_msg_obj_sts(struct net_device *ndev, u32 obj_id)
-{
- u32 buffer_status = 0;
- struct pch_can_priv *priv = netdev_priv(ndev);
-
- /* Getting the message object status. */
- buffer_status = (u32) pch_can_get_buffer_status(priv);
-
- return buffer_status & obj_id;
-}
-
-
static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
{
- int i, j;
- unsigned long flags;
struct pch_can_priv *priv = netdev_priv(ndev);
struct can_frame *cf = (struct can_frame *)skb->data;
- int tx_buffer_avail = 0;
+ int tx_obj_no;
+ int i;
+ u32 id2;
if (can_dropped_invalid_skb(ndev, skb))
return NETDEV_TX_OK;
- if (priv->tx_obj == (PCH_OBJ_NUM + 1)) { /* Point tail Obj */
- while (pch_get_msg_obj_sts(ndev, (((1 << PCH_TX_OBJ_NUM)-1) <<
- PCH_RX_OBJ_NUM)))
- udelay(500);
+ tx_obj_no = priv->tx_obj;
+ if (priv->tx_obj == PCH_TX_OBJ_END) {
+ if (ioread32(&priv->regs->treq2) & PCH_TREQ2_TX_MASK)
+ netif_stop_queue(ndev);
- priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj ID */
- tx_buffer_avail = priv->tx_obj; /* Point Tail of Tx Obj */
+ priv->tx_obj = PCH_TX_OBJ_START;
} else {
- tx_buffer_avail = priv->tx_obj;
+ priv->tx_obj++;
}
- priv->tx_obj++;
-
- /* Attaining the lock. */
- spin_lock_irqsave(&priv->msgif_reg_lock, flags);
-
- /* Reading the Msg Obj from the Msg RAM to the Interface register. */
- iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
- pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
/* Setting the CMASK register. */
- pch_can_bit_set(&priv->regs->if2_cmask, CAN_CMASK_ALL);
+ pch_can_bit_set(&priv->regs->ifregs[1].cmask, PCH_CMASK_ALL);
/* If ID extended is set. */
- pch_can_bit_clear(&priv->regs->if2_id1, 0xffff);
- pch_can_bit_clear(&priv->regs->if2_id2, 0x1fff | CAN_ID2_XTD);
if (cf->can_id & CAN_EFF_FLAG) {
- pch_can_bit_set(&priv->regs->if2_id1, cf->can_id & 0xffff);
- pch_can_bit_set(&priv->regs->if2_id2,
- ((cf->can_id >> 16) & 0x1fff) | CAN_ID2_XTD);
+ iowrite32(cf->can_id & 0xffff, &priv->regs->ifregs[1].id1);
+ id2 = ((cf->can_id >> 16) & 0x1fff) | PCH_ID2_XTD;
} else {
- pch_can_bit_set(&priv->regs->if2_id1, 0);
- pch_can_bit_set(&priv->regs->if2_id2,
- (cf->can_id & CAN_SFF_MASK) << 2);
+ iowrite32(0, &priv->regs->ifregs[1].id1);
+ id2 = (cf->can_id & CAN_SFF_MASK) << 2;
}
- /* If remote frame has to be transmitted.. */
- if (cf->can_id & CAN_RTR_FLAG)
- pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID2_DIR);
+ id2 |= PCH_ID_MSGVAL;
- for (i = 0, j = 0; i < cf->can_dlc; j++) {
- iowrite32(le32_to_cpu(cf->data[i++]),
- (&priv->regs->if2_dataa1) + j*4);
- if (i == cf->can_dlc)
- break;
- iowrite32(le32_to_cpu(cf->data[i++] << 8),
- (&priv->regs->if2_dataa1) + j*4);
- }
-
- can_put_echo_skb(skb, ndev, tx_buffer_avail - PCH_RX_OBJ_NUM - 1);
+ /* If remote frame has to be transmitted.. */
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ id2 |= PCH_ID2_DIR;
- /* Updating the size of the data. */
- pch_can_bit_clear(&priv->regs->if2_mcont, 0x0f);
- pch_can_bit_set(&priv->regs->if2_mcont, cf->can_dlc);
+ iowrite32(id2, &priv->regs->ifregs[1].id2);
- /* Clearing IntPend, NewDat & TxRqst */
- pch_can_bit_clear(&priv->regs->if2_mcont,
- CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
- CAN_IF_MCONT_TXRQXT);
+ /* Copy data to register */
+ for (i = 0; i < cf->can_dlc; i += 2) {
+ iowrite16(cf->data[i] | (cf->data[i + 1] << 8),
+ &priv->regs->ifregs[1].data[i / 2]);
+ }
- /* Setting NewDat, TxRqst bits */
- pch_can_bit_set(&priv->regs->if2_mcont,
- CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_TXRQXT);
+ can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1);
- pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
+ /* Set the size of the data. Update if2_mcont */
+ iowrite32(cf->can_dlc | PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT |
+ PCH_IF_MCONT_TXIE, &priv->regs->ifregs[1].mcont);
- spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, tx_obj_no);
return NETDEV_TX_OK;
}
@@ -1203,21 +959,98 @@ static void __devexit pch_can_remove(struct pci_dev *pdev)
struct pch_can_priv *priv = netdev_priv(ndev);
unregister_candev(priv->ndev);
- free_candev(priv->ndev);
- pci_iounmap(pdev, priv->regs);
+ if (priv->use_msi)
+ pci_disable_msi(priv->dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
pch_can_reset(priv);
+ pci_iounmap(pdev, priv->regs);
+ free_candev(priv->ndev);
}
#ifdef CONFIG_PM
+static void pch_can_set_int_custom(struct pch_can_priv *priv)
+{
+ /* Clearing the IE, SIE and EIE bits of Can control register. */
+ pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
+
+ /* Appropriately setting them. */
+ pch_can_bit_set(&priv->regs->cont,
+ ((priv->int_enables & PCH_MSK_CTRL_IE_SIE_EIE) << 1));
+}
+
+/* This function retrieves interrupt enabled for the CAN device. */
+static u32 pch_can_get_int_enables(struct pch_can_priv *priv)
+{
+ /* Obtaining the status of IE, SIE and EIE interrupt bits. */
+ return (ioread32(&priv->regs->cont) & PCH_CTRL_IE_SIE_EIE) >> 1;
+}
+
+static u32 pch_can_get_rxtx_ir(struct pch_can_priv *priv, u32 buff_num,
+ enum pch_ifreg dir)
+{
+ u32 ie, enable;
+
+ if (dir)
+ ie = PCH_IF_MCONT_RXIE;
+ else
+ ie = PCH_IF_MCONT_TXIE;
+
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
+
+ if (((ioread32(&priv->regs->ifregs[dir].id2)) & PCH_ID_MSGVAL) &&
+ ((ioread32(&priv->regs->ifregs[dir].mcont)) & ie))
+ enable = 1;
+ else
+ enable = 0;
+
+ return enable;
+}
+
+static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
+ u32 buffer_num, int set)
+{
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
+ iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
+ &priv->regs->ifregs[0].cmask);
+ if (set)
+ pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
+ PCH_IF_MCONT_EOB);
+ else
+ pch_can_bit_set(&priv->regs->ifregs[0].mcont, PCH_IF_MCONT_EOB);
+
+ pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
+}
+
+static u32 pch_can_get_rx_buffer_link(struct pch_can_priv *priv, u32 buffer_num)
+{
+ u32 link;
+
+ iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
+ pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
+
+ if (ioread32(&priv->regs->ifregs[0].mcont) & PCH_IF_MCONT_EOB)
+ link = 0;
+ else
+ link = 1;
+ return link;
+}
+
+static int pch_can_get_buffer_status(struct pch_can_priv *priv)
+{
+ return (ioread32(&priv->regs->treq1) & 0xffff) |
+ (ioread32(&priv->regs->treq2) << 16);
+}
+
static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
{
- int i; /* Counter variable. */
- int retval; /* Return value. */
+ int i;
+ int retval;
u32 buf_stat; /* Variable for reading the transmit buffer status. */
- u32 counter = 0xFFFFFF;
+ int counter = PCH_COUNTER_LIMIT;
struct net_device *dev = pci_get_drvdata(pdev);
struct pch_can_priv *priv = netdev_priv(dev);
@@ -1226,7 +1059,7 @@ static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
pch_can_set_run_mode(priv, PCH_CAN_STOP);
/* Indicate that we are aboutto/in suspend */
- priv->can.state = CAN_STATE_SLEEPING;
+ priv->can.state = CAN_STATE_STOPPED;
/* Waiting for all transmission to complete. */
while (counter) {
@@ -1240,31 +1073,26 @@ static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
dev_err(&pdev->dev, "%s -> Transmission time out.\n", __func__);
/* Save interrupt configuration and then disable them */
- pch_can_get_int_enables(priv, &(priv->int_enables));
+ priv->int_enables = pch_can_get_int_enables(priv);
pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
/* Save Tx buffer enable state */
- for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_TX)
- pch_can_get_tx_enable(priv, i + 1,
- &(priv->tx_enable[i]));
- }
+ for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
+ priv->tx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i,
+ PCH_TX_IFREG);
/* Disable all Transmit buffers */
- pch_can_tx_disable_all(priv);
+ pch_can_set_tx_all(priv, 0);
/* Save Rx buffer enable state */
- for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_RX) {
- pch_can_get_rx_enable(priv, i + 1,
- &(priv->rx_enable[i]));
- pch_can_get_rx_buffer_link(priv, i + 1,
- &(priv->rx_link[i]));
- }
+ for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
+ priv->rx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i,
+ PCH_RX_IFREG);
+ priv->rx_link[i - 1] = pch_can_get_rx_buffer_link(priv, i);
}
/* Disable all Receive buffers */
- pch_can_rx_disable_all(priv);
+ pch_can_set_rx_all(priv, 0);
retval = pci_save_state(pdev);
if (retval) {
dev_err(&pdev->dev, "pci_save_state failed.\n");
@@ -1279,8 +1107,8 @@ static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
static int pch_can_resume(struct pci_dev *pdev)
{
- int i; /* Counter variable. */
- int retval; /* Return variable. */
+ int i;
+ int retval;
struct net_device *dev = pci_get_drvdata(pdev);
struct pch_can_priv *priv = netdev_priv(dev);
@@ -1312,23 +1140,16 @@ static int pch_can_resume(struct pci_dev *pdev)
pch_can_set_optmode(priv);
/* Enabling the transmit buffer. */
- for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_TX) {
- pch_can_set_tx_enable(priv, i + 1,
- priv->tx_enable[i]);
- }
- }
+ for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
+ pch_can_set_rxtx(priv, i, priv->tx_enable[i - 1], PCH_TX_IFREG);
/* Configuring the receive buffer and enabling them. */
- for (i = 0; i < PCH_OBJ_NUM; i++) {
- if (priv->msg_obj[i] == MSG_OBJ_RX) {
- /* Restore buffer link */
- pch_can_set_rx_buffer_link(priv, i + 1,
- priv->rx_link[i]);
-
- /* Restore buffer enables */
- pch_can_set_rx_enable(priv, i + 1, priv->rx_enable[i]);
- }
+ for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
+ /* Restore buffer link */
+ pch_can_set_rx_buffer_link(priv, i, priv->rx_link[i - 1]);
+
+ /* Restore buffer enables */
+ pch_can_set_rxtx(priv, i, priv->rx_enable[i - 1], PCH_RX_IFREG);
}
/* Enable CAN Interrupts */
@@ -1348,9 +1169,10 @@ static int pch_can_get_berr_counter(const struct net_device *dev,
struct can_berr_counter *bec)
{
struct pch_can_priv *priv = netdev_priv(dev);
+ u32 errc = ioread32(&priv->regs->errc);
- bec->txerr = ioread32(&priv->regs->errc) & CAN_TEC;
- bec->rxerr = (ioread32(&priv->regs->errc) & CAN_REC) >> 8;
+ bec->txerr = errc & PCH_TEC;
+ bec->rxerr = (errc & PCH_REC) >> 8;
return 0;
}
@@ -1361,7 +1183,6 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
struct net_device *ndev;
struct pch_can_priv *priv;
int rc;
- int index;
void __iomem *addr;
rc = pci_enable_device(pdev);
@@ -1383,7 +1204,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
goto probe_exit_ipmap;
}
- ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_NUM);
+ ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_END);
if (!ndev) {
rc = -ENOMEM;
dev_err(&pdev->dev, "Failed alloc_candev\n");
@@ -1399,7 +1220,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
priv->can.do_get_berr_counter = pch_can_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_LOOPBACK;
- priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj */
+ priv->tx_obj = PCH_TX_OBJ_START; /* Point head of Tx Obj */
ndev->irq = pdev->irq;
ndev->flags |= IFF_ECHO;
@@ -1407,15 +1228,19 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->netdev_ops = &pch_can_netdev_ops;
-
priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
- for (index = 0; index < PCH_RX_OBJ_NUM;)
- priv->msg_obj[index++] = MSG_OBJ_RX;
- for (index = index; index < PCH_OBJ_NUM;)
- priv->msg_obj[index++] = MSG_OBJ_TX;
+ netif_napi_add(ndev, &priv->napi, pch_can_poll, PCH_RX_OBJ_END);
- netif_napi_add(ndev, &priv->napi, pch_can_rx_poll, PCH_RX_OBJ_NUM);
+ rc = pci_enable_msi(priv->dev);
+ if (rc) {
+ netdev_err(ndev, "PCH CAN opened without MSI\n");
+ priv->use_msi = 0;
+ } else {
+ netdev_err(ndev, "PCH CAN opened with MSI\n");
+ pci_set_master(pdev);
+ priv->use_msi = 1;
+ }
rc = register_candev(ndev);
if (rc) {
@@ -1426,6 +1251,8 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
return 0;
probe_exit_reg_candev:
+ if (priv->use_msi)
+ pci_disable_msi(priv->dev);
free_candev(ndev);
probe_exit_alloc_candev:
pci_iounmap(pdev, addr);
@@ -1458,6 +1285,6 @@ static void __exit pch_can_pci_exit(void)
}
module_exit(pch_can_pci_exit);
-MODULE_DESCRIPTION("Controller Area Network Driver");
+MODULE_DESCRIPTION("Intel EG20T PCH CAN(Controller Area Network) Driver");
MODULE_LICENSE("GPL v2");
MODULE_VERSION("0.94");
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 437b5c716a24..231385b8e08f 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -383,7 +383,7 @@ static void plx_pci_reset_marathon(struct pci_dev *pdev)
{
void __iomem *reset_addr;
int i;
- int reset_bar[2] = {3, 5};
+ static const int reset_bar[2] = {3, 5};
plx_pci_reset_common(pdev);
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 5bfccfdf3bbb..9793df6e3455 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -87,8 +87,7 @@ static int __devexit sja1000_ofp_remove(struct platform_device *ofdev)
return 0;
}
-static int __devinit sja1000_ofp_probe(struct platform_device *ofdev,
- const struct of_device_id *id)
+static int __devinit sja1000_ofp_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct net_device *dev;
@@ -107,17 +106,13 @@ static int __devinit sja1000_ofp_probe(struct platform_device *ofdev,
res_size = resource_size(&res);
if (!request_mem_region(res.start, res_size, DRV_NAME)) {
- dev_err(&ofdev->dev, "couldn't request %#llx..%#llx\n",
- (unsigned long long)res.start,
- (unsigned long long)res.end);
+ dev_err(&ofdev->dev, "couldn't request %pR\n", &res);
return -EBUSY;
}
base = ioremap_nocache(res.start, res_size);
if (!base) {
- dev_err(&ofdev->dev, "couldn't ioremap %#llx..%#llx\n",
- (unsigned long long)res.start,
- (unsigned long long)res.end);
+ dev_err(&ofdev->dev, "couldn't ioremap %pR\n", &res);
err = -ENOMEM;
goto exit_release_mem;
}
@@ -214,7 +209,7 @@ static struct of_device_id __devinitdata sja1000_ofp_table[] = {
};
MODULE_DEVICE_TABLE(of, sja1000_ofp_table);
-static struct of_platform_driver sja1000_ofp_driver = {
+static struct platform_driver sja1000_ofp_driver = {
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
@@ -226,12 +221,12 @@ static struct of_platform_driver sja1000_ofp_driver = {
static int __init sja1000_ofp_init(void)
{
- return of_register_platform_driver(&sja1000_ofp_driver);
+ return platform_driver_register(&sja1000_ofp_driver);
}
module_init(sja1000_ofp_init);
static void __exit sja1000_ofp_exit(void)
{
- return of_unregister_platform_driver(&sja1000_ofp_driver);
+ return platform_driver_unregister(&sja1000_ofp_driver);
};
module_exit(sja1000_ofp_exit);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
new file mode 100644
index 000000000000..b423965a78d1
--- /dev/null
+++ b/drivers/net/can/slcan.c
@@ -0,0 +1,756 @@
+/*
+ * slcan.c - serial line CAN interface driver (using tty line discipline)
+ *
+ * This file is derived from linux/drivers/net/slip.c
+ *
+ * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk>
+ * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
+ * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307. You can also get it
+ * at http://www.gnu.org/licenses/gpl.html
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * Send feedback to <socketcan-users@lists.berlios.de>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <asm/system.h>
+#include <linux/uaccess.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <linux/tty.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/can.h>
+
+static __initdata const char banner[] =
+ KERN_INFO "slcan: serial line CAN interface driver\n";
+
+MODULE_ALIAS_LDISC(N_SLCAN);
+MODULE_DESCRIPTION("serial line CAN interface");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
+
+#define SLCAN_MAGIC 0x53CA
+
+static int maxdev = 10; /* MAX number of SLCAN channels;
+ This can be overridden with
+ insmod slcan.ko maxdev=nnn */
+module_param(maxdev, int, 0);
+MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces");
+
+/* maximum rx buffer len: extended CAN frame with timestamp */
+#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1)
+
+struct slcan {
+ int magic;
+
+ /* Various fields. */
+ struct tty_struct *tty; /* ptr to TTY structure */
+ struct net_device *dev; /* easy for intr handling */
+ spinlock_t lock;
+
+ /* These are pointers to the malloc()ed frame buffers. */
+ unsigned char rbuff[SLC_MTU]; /* receiver buffer */
+ int rcount; /* received chars counter */
+ unsigned char xbuff[SLC_MTU]; /* transmitter buffer */
+ unsigned char *xhead; /* pointer to next XMIT byte */
+ int xleft; /* bytes left in XMIT queue */
+
+ unsigned long flags; /* Flag values/ mode etc */
+#define SLF_INUSE 0 /* Channel in use */
+#define SLF_ERROR 1 /* Parity, etc. error */
+
+ unsigned char leased;
+ dev_t line;
+ pid_t pid;
+};
+
+static struct net_device **slcan_devs;
+
+ /************************************************************************
+ * SLCAN ENCAPSULATION FORMAT *
+ ************************************************************************/
+
+/*
+ * A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended
+ * frame format) a data length code (can_dlc) which can be from 0 to 8
+ * and up to <can_dlc> data bytes as payload.
+ * Additionally a CAN frame may become a remote transmission frame if the
+ * RTR-bit is set. This causes another ECU to send a CAN frame with the
+ * given can_id.
+ *
+ * The SLCAN ASCII representation of these different frame types is:
+ * <type> <id> <dlc> <data>*
+ *
+ * Extended frames (29 bit) are defined by capital characters in the type.
+ * RTR frames are defined as 'r' types - normal frames have 't' type:
+ * t => 11 bit data frame
+ * r => 11 bit RTR frame
+ * T => 29 bit data frame
+ * R => 29 bit RTR frame
+ *
+ * The <id> is 3 (standard) or 8 (extended) bytes in ASCII Hex (base64).
+ * The <dlc> is a one byte ASCII number ('0' - '8')
+ * The <data> section has at much ASCII Hex bytes as defined by the <dlc>
+ *
+ * Examples:
+ *
+ * t1230 : can_id 0x123, can_dlc 0, no data
+ * t4563112233 : can_id 0x456, can_dlc 3, data 0x11 0x22 0x33
+ * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, can_dlc 2, data 0xAA 0x55
+ * r1230 : can_id 0x123, can_dlc 0, no data, remote transmission request
+ *
+ */
+
+ /************************************************************************
+ * STANDARD SLCAN DECAPSULATION *
+ ************************************************************************/
+
+static int asc2nibble(char c)
+{
+
+ if ((c >= '0') && (c <= '9'))
+ return c - '0';
+
+ if ((c >= 'A') && (c <= 'F'))
+ return c - 'A' + 10;
+
+ if ((c >= 'a') && (c <= 'f'))
+ return c - 'a' + 10;
+
+ return 16; /* error */
+}
+
+/* Send one completely decapsulated can_frame to the network layer */
+static void slc_bump(struct slcan *sl)
+{
+ struct sk_buff *skb;
+ struct can_frame cf;
+ int i, dlc_pos, tmp;
+ unsigned long ultmp;
+ char cmd = sl->rbuff[0];
+
+ if ((cmd != 't') && (cmd != 'T') && (cmd != 'r') && (cmd != 'R'))
+ return;
+
+ if (cmd & 0x20) /* tiny chars 'r' 't' => standard frame format */
+ dlc_pos = 4; /* dlc position tiiid */
+ else
+ dlc_pos = 9; /* dlc position Tiiiiiiiid */
+
+ if (!((sl->rbuff[dlc_pos] >= '0') && (sl->rbuff[dlc_pos] < '9')))
+ return;
+
+ cf.can_dlc = sl->rbuff[dlc_pos] - '0'; /* get can_dlc from ASCII val */
+
+ sl->rbuff[dlc_pos] = 0; /* terminate can_id string */
+
+ if (strict_strtoul(sl->rbuff+1, 16, &ultmp))
+ return;
+
+ cf.can_id = ultmp;
+
+ if (!(cmd & 0x20)) /* NO tiny chars => extended frame format */
+ cf.can_id |= CAN_EFF_FLAG;
+
+ if ((cmd | 0x20) == 'r') /* RTR frame */
+ cf.can_id |= CAN_RTR_FLAG;
+
+ *(u64 *) (&cf.data) = 0; /* clear payload */
+
+ for (i = 0, dlc_pos++; i < cf.can_dlc; i++) {
+
+ tmp = asc2nibble(sl->rbuff[dlc_pos++]);
+ if (tmp > 0x0F)
+ return;
+ cf.data[i] = (tmp << 4);
+ tmp = asc2nibble(sl->rbuff[dlc_pos++]);
+ if (tmp > 0x0F)
+ return;
+ cf.data[i] |= tmp;
+ }
+
+
+ skb = dev_alloc_skb(sizeof(struct can_frame));
+ if (!skb)
+ return;
+
+ skb->dev = sl->dev;
+ skb->protocol = htons(ETH_P_CAN);
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ memcpy(skb_put(skb, sizeof(struct can_frame)),
+ &cf, sizeof(struct can_frame));
+ netif_rx(skb);
+
+ sl->dev->stats.rx_packets++;
+ sl->dev->stats.rx_bytes += cf.can_dlc;
+}
+
+/* parse tty input stream */
+static void slcan_unesc(struct slcan *sl, unsigned char s)
+{
+
+ if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
+ if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
+ (sl->rcount > 4)) {
+ slc_bump(sl);
+ }
+ sl->rcount = 0;
+ } else {
+ if (!test_bit(SLF_ERROR, &sl->flags)) {
+ if (sl->rcount < SLC_MTU) {
+ sl->rbuff[sl->rcount++] = s;
+ return;
+ } else {
+ sl->dev->stats.rx_over_errors++;
+ set_bit(SLF_ERROR, &sl->flags);
+ }
+ }
+ }
+}
+
+ /************************************************************************
+ * STANDARD SLCAN ENCAPSULATION *
+ ************************************************************************/
+
+/* Encapsulate one can_frame and stuff into a TTY queue. */
+static void slc_encaps(struct slcan *sl, struct can_frame *cf)
+{
+ int actual, idx, i;
+ char cmd;
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ cmd = 'R'; /* becomes 'r' in standard frame format */
+ else
+ cmd = 'T'; /* becomes 't' in standard frame format */
+
+ if (cf->can_id & CAN_EFF_FLAG)
+ sprintf(sl->xbuff, "%c%08X%d", cmd,
+ cf->can_id & CAN_EFF_MASK, cf->can_dlc);
+ else
+ sprintf(sl->xbuff, "%c%03X%d", cmd | 0x20,
+ cf->can_id & CAN_SFF_MASK, cf->can_dlc);
+
+ idx = strlen(sl->xbuff);
+
+ for (i = 0; i < cf->can_dlc; i++)
+ sprintf(&sl->xbuff[idx + 2*i], "%02X", cf->data[i]);
+
+ strcat(sl->xbuff, "\r"); /* add terminating character */
+
+ /* Order of next two lines is *very* important.
+ * When we are sending a little amount of data,
+ * the transfer may be completed inside the ops->write()
+ * routine, because it's running with interrupts enabled.
+ * In this case we *never* got WRITE_WAKEUP event,
+ * if we did not request it before write operation.
+ * 14 Oct 1994 Dmitry Gorodchanin.
+ */
+ set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ actual = sl->tty->ops->write(sl->tty, sl->xbuff, strlen(sl->xbuff));
+ sl->xleft = strlen(sl->xbuff) - actual;
+ sl->xhead = sl->xbuff + actual;
+ sl->dev->stats.tx_bytes += cf->can_dlc;
+}
+
+/*
+ * Called by the driver when there's room for more data. If we have
+ * more packets to send, we send them here.
+ */
+static void slcan_write_wakeup(struct tty_struct *tty)
+{
+ int actual;
+ struct slcan *sl = (struct slcan *) tty->disc_data;
+
+ /* First make sure we're connected. */
+ if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
+ return;
+
+ if (sl->xleft <= 0) {
+ /* Now serial buffer is almost free & we can start
+ * transmission of another packet */
+ sl->dev->stats.tx_packets++;
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ netif_wake_queue(sl->dev);
+ return;
+ }
+
+ actual = tty->ops->write(tty, sl->xhead, sl->xleft);
+ sl->xleft -= actual;
+ sl->xhead += actual;
+}
+
+/* Send a can_frame to a TTY queue. */
+static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct slcan *sl = netdev_priv(dev);
+
+ if (skb->len != sizeof(struct can_frame))
+ goto out;
+
+ spin_lock(&sl->lock);
+ if (!netif_running(dev)) {
+ spin_unlock(&sl->lock);
+ printk(KERN_WARNING "%s: xmit: iface is down\n", dev->name);
+ goto out;
+ }
+ if (sl->tty == NULL) {
+ spin_unlock(&sl->lock);
+ goto out;
+ }
+
+ netif_stop_queue(sl->dev);
+ slc_encaps(sl, (struct can_frame *) skb->data); /* encaps & send */
+ spin_unlock(&sl->lock);
+
+out:
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+
+/******************************************
+ * Routines looking at netdevice side.
+ ******************************************/
+
+/* Netdevice UP -> DOWN routine */
+static int slc_close(struct net_device *dev)
+{
+ struct slcan *sl = netdev_priv(dev);
+
+ spin_lock_bh(&sl->lock);
+ if (sl->tty) {
+ /* TTY discipline is running. */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ }
+ netif_stop_queue(dev);
+ sl->rcount = 0;
+ sl->xleft = 0;
+ spin_unlock_bh(&sl->lock);
+
+ return 0;
+}
+
+/* Netdevice DOWN -> UP routine */
+static int slc_open(struct net_device *dev)
+{
+ struct slcan *sl = netdev_priv(dev);
+
+ if (sl->tty == NULL)
+ return -ENODEV;
+
+ sl->flags &= (1 << SLF_INUSE);
+ netif_start_queue(dev);
+ return 0;
+}
+
+/* Hook the destructor so we can free slcan devs at the right point in time */
+static void slc_free_netdev(struct net_device *dev)
+{
+ int i = dev->base_addr;
+ free_netdev(dev);
+ slcan_devs[i] = NULL;
+}
+
+static const struct net_device_ops slc_netdev_ops = {
+ .ndo_open = slc_open,
+ .ndo_stop = slc_close,
+ .ndo_start_xmit = slc_xmit,
+};
+
+static void slc_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &slc_netdev_ops;
+ dev->destructor = slc_free_netdev;
+
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->tx_queue_len = 10;
+
+ dev->mtu = sizeof(struct can_frame);
+ dev->type = ARPHRD_CAN;
+
+ /* New-style flags. */
+ dev->flags = IFF_NOARP;
+ dev->features = NETIF_F_NO_CSUM;
+}
+
+/******************************************
+ Routines looking at TTY side.
+ ******************************************/
+
+/*
+ * Handle the 'receiver data ready' interrupt.
+ * This function is called by the 'tty_io' module in the kernel when
+ * a block of SLCAN data has been received, which can now be decapsulated
+ * and sent on to some IP layer for further processing. This will not
+ * be re-entered while running but other ldisc functions may be called
+ * in parallel
+ */
+
+static void slcan_receive_buf(struct tty_struct *tty,
+ const unsigned char *cp, char *fp, int count)
+{
+ struct slcan *sl = (struct slcan *) tty->disc_data;
+
+ if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
+ return;
+
+ /* Read the characters out of the buffer */
+ while (count--) {
+ if (fp && *fp++) {
+ if (!test_and_set_bit(SLF_ERROR, &sl->flags))
+ sl->dev->stats.rx_errors++;
+ cp++;
+ continue;
+ }
+ slcan_unesc(sl, *cp++);
+ }
+}
+
+/************************************
+ * slcan_open helper routines.
+ ************************************/
+
+/* Collect hanged up channels */
+static void slc_sync(void)
+{
+ int i;
+ struct net_device *dev;
+ struct slcan *sl;
+
+ for (i = 0; i < maxdev; i++) {
+ dev = slcan_devs[i];
+ if (dev == NULL)
+ break;
+
+ sl = netdev_priv(dev);
+ if (sl->tty || sl->leased)
+ continue;
+ if (dev->flags & IFF_UP)
+ dev_close(dev);
+ }
+}
+
+/* Find a free SLCAN channel, and link in this `tty' line. */
+static struct slcan *slc_alloc(dev_t line)
+{
+ int i;
+ struct net_device *dev = NULL;
+ struct slcan *sl;
+
+ if (slcan_devs == NULL)
+ return NULL; /* Master array missing ! */
+
+ for (i = 0; i < maxdev; i++) {
+ dev = slcan_devs[i];
+ if (dev == NULL)
+ break;
+
+ }
+
+ /* Sorry, too many, all slots in use */
+ if (i >= maxdev)
+ return NULL;
+
+ if (dev) {
+ sl = netdev_priv(dev);
+ if (test_bit(SLF_INUSE, &sl->flags)) {
+ unregister_netdevice(dev);
+ dev = NULL;
+ slcan_devs[i] = NULL;
+ }
+ }
+
+ if (!dev) {
+ char name[IFNAMSIZ];
+ sprintf(name, "slcan%d", i);
+
+ dev = alloc_netdev(sizeof(*sl), name, slc_setup);
+ if (!dev)
+ return NULL;
+ dev->base_addr = i;
+ }
+
+ sl = netdev_priv(dev);
+
+ /* Initialize channel control data */
+ sl->magic = SLCAN_MAGIC;
+ sl->dev = dev;
+ spin_lock_init(&sl->lock);
+ slcan_devs[i] = dev;
+
+ return sl;
+}
+
+/*
+ * Open the high-level part of the SLCAN channel.
+ * This function is called by the TTY module when the
+ * SLCAN line discipline is called for. Because we are
+ * sure the tty line exists, we only have to link it to
+ * a free SLCAN channel...
+ *
+ * Called in process context serialized from other ldisc calls.
+ */
+
+static int slcan_open(struct tty_struct *tty)
+{
+ struct slcan *sl;
+ int err;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (tty->ops->write == NULL)
+ return -EOPNOTSUPP;
+
+ /* RTnetlink lock is misused here to serialize concurrent
+ opens of slcan channels. There are better ways, but it is
+ the simplest one.
+ */
+ rtnl_lock();
+
+ /* Collect hanged up channels. */
+ slc_sync();
+
+ sl = tty->disc_data;
+
+ err = -EEXIST;
+ /* First make sure we're not already connected. */
+ if (sl && sl->magic == SLCAN_MAGIC)
+ goto err_exit;
+
+ /* OK. Find a free SLCAN channel to use. */
+ err = -ENFILE;
+ sl = slc_alloc(tty_devnum(tty));
+ if (sl == NULL)
+ goto err_exit;
+
+ sl->tty = tty;
+ tty->disc_data = sl;
+ sl->line = tty_devnum(tty);
+ sl->pid = current->pid;
+
+ if (!test_bit(SLF_INUSE, &sl->flags)) {
+ /* Perform the low-level SLCAN initialization. */
+ sl->rcount = 0;
+ sl->xleft = 0;
+
+ set_bit(SLF_INUSE, &sl->flags);
+
+ err = register_netdevice(sl->dev);
+ if (err)
+ goto err_free_chan;
+ }
+
+ /* Done. We have linked the TTY line to a channel. */
+ rtnl_unlock();
+ tty->receive_room = 65536; /* We don't flow control */
+ return sl->dev->base_addr;
+
+err_free_chan:
+ sl->tty = NULL;
+ tty->disc_data = NULL;
+ clear_bit(SLF_INUSE, &sl->flags);
+
+err_exit:
+ rtnl_unlock();
+
+ /* Count references from TTY module */
+ return err;
+}
+
+/*
+ * Close down a SLCAN channel.
+ * This means flushing out any pending queues, and then returning. This
+ * call is serialized against other ldisc functions.
+ *
+ * We also use this method for a hangup event.
+ */
+
+static void slcan_close(struct tty_struct *tty)
+{
+ struct slcan *sl = (struct slcan *) tty->disc_data;
+
+ /* First make sure we're connected. */
+ if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
+ return;
+
+ tty->disc_data = NULL;
+ sl->tty = NULL;
+ if (!sl->leased)
+ sl->line = 0;
+
+ /* Flush network side */
+ unregister_netdev(sl->dev);
+ /* This will complete via sl_free_netdev */
+}
+
+static int slcan_hangup(struct tty_struct *tty)
+{
+ slcan_close(tty);
+ return 0;
+}
+
+/* Perform I/O control on an active SLCAN channel. */
+static int slcan_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct slcan *sl = (struct slcan *) tty->disc_data;
+ unsigned int tmp;
+
+ /* First make sure we're connected. */
+ if (!sl || sl->magic != SLCAN_MAGIC)
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGIFNAME:
+ tmp = strlen(sl->dev->name) + 1;
+ if (copy_to_user((void __user *)arg, sl->dev->name, tmp))
+ return -EFAULT;
+ return 0;
+
+ case SIOCSIFHWADDR:
+ return -EINVAL;
+
+ default:
+ return tty_mode_ioctl(tty, file, cmd, arg);
+ }
+}
+
+static struct tty_ldisc_ops slc_ldisc = {
+ .owner = THIS_MODULE,
+ .magic = TTY_LDISC_MAGIC,
+ .name = "slcan",
+ .open = slcan_open,
+ .close = slcan_close,
+ .hangup = slcan_hangup,
+ .ioctl = slcan_ioctl,
+ .receive_buf = slcan_receive_buf,
+ .write_wakeup = slcan_write_wakeup,
+};
+
+static int __init slcan_init(void)
+{
+ int status;
+
+ if (maxdev < 4)
+ maxdev = 4; /* Sanity */
+
+ printk(banner);
+ printk(KERN_INFO "slcan: %d dynamic interface channels.\n", maxdev);
+
+ slcan_devs = kzalloc(sizeof(struct net_device *)*maxdev, GFP_KERNEL);
+ if (!slcan_devs) {
+ printk(KERN_ERR "slcan: can't allocate slcan device array!\n");
+ return -ENOMEM;
+ }
+
+ /* Fill in our line protocol discipline, and register it */
+ status = tty_register_ldisc(N_SLCAN, &slc_ldisc);
+ if (status) {
+ printk(KERN_ERR "slcan: can't register line discipline\n");
+ kfree(slcan_devs);
+ }
+ return status;
+}
+
+static void __exit slcan_exit(void)
+{
+ int i;
+ struct net_device *dev;
+ struct slcan *sl;
+ unsigned long timeout = jiffies + HZ;
+ int busy = 0;
+
+ if (slcan_devs == NULL)
+ return;
+
+ /* First of all: check for active disciplines and hangup them.
+ */
+ do {
+ if (busy)
+ msleep_interruptible(100);
+
+ busy = 0;
+ for (i = 0; i < maxdev; i++) {
+ dev = slcan_devs[i];
+ if (!dev)
+ continue;
+ sl = netdev_priv(dev);
+ spin_lock_bh(&sl->lock);
+ if (sl->tty) {
+ busy++;
+ tty_hangup(sl->tty);
+ }
+ spin_unlock_bh(&sl->lock);
+ }
+ } while (busy && time_before(jiffies, timeout));
+
+ /* FIXME: hangup is async so we should wait when doing this second
+ phase */
+
+ for (i = 0; i < maxdev; i++) {
+ dev = slcan_devs[i];
+ if (!dev)
+ continue;
+ slcan_devs[i] = NULL;
+
+ sl = netdev_priv(dev);
+ if (sl->tty) {
+ printk(KERN_ERR "%s: tty discipline still running\n",
+ dev->name);
+ /* Intentionally leak the control block. */
+ dev->destructor = NULL;
+ }
+
+ unregister_netdev(dev);
+ }
+
+ kfree(slcan_devs);
+ slcan_devs = NULL;
+
+ i = tty_unregister_ldisc(N_SLCAN);
+ if (i)
+ printk(KERN_ERR "slcan: can't unregister ldisc (err %d)\n", i);
+}
+
+module_init(slcan_init);
+module_exit(slcan_exit);
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig
new file mode 100644
index 000000000000..5de46a9a77bb
--- /dev/null
+++ b/drivers/net/can/softing/Kconfig
@@ -0,0 +1,30 @@
+config CAN_SOFTING
+ tristate "Softing Gmbh CAN generic support"
+ depends on CAN_DEV && HAS_IOMEM
+ ---help---
+ Support for CAN cards from Softing Gmbh & some cards
+ from Vector Gmbh.
+ Softing Gmbh CAN cards come with 1 or 2 physical busses.
+ Those cards typically use Dual Port RAM to communicate
+ with the host CPU. The interface is then identical for PCI
+ and PCMCIA cards. This driver operates on a platform device,
+ which has been created by softing_cs or softing_pci driver.
+ Warning:
+ The API of the card does not allow fine control per bus, but
+ controls the 2 busses on the card together.
+ As such, some actions (start/stop/busoff recovery) on 1 bus
+ must bring down the other bus too temporarily.
+
+config CAN_SOFTING_CS
+ tristate "Softing Gmbh CAN pcmcia cards"
+ depends on PCMCIA
+ depends on CAN_SOFTING
+ ---help---
+ Support for PCMCIA cards from Softing Gmbh & some cards
+ from Vector Gmbh.
+ You need firmware for these, which you can get at
+ http://developer.berlios.de/projects/socketcan/
+ This version of the driver is written against
+ firmware version 4.6 (softing-fw-4.6-binaries.tar.gz)
+ In order to use the card as CAN device, you need the Softing generic
+ support too.
diff --git a/drivers/net/can/softing/Makefile b/drivers/net/can/softing/Makefile
new file mode 100644
index 000000000000..c5e5016c742e
--- /dev/null
+++ b/drivers/net/can/softing/Makefile
@@ -0,0 +1,6 @@
+
+softing-y := softing_main.o softing_fw.o
+obj-$(CONFIG_CAN_SOFTING) += softing.o
+obj-$(CONFIG_CAN_SOFTING_CS) += softing_cs.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/softing/softing.h b/drivers/net/can/softing/softing.h
new file mode 100644
index 000000000000..7ec9f4db3d52
--- /dev/null
+++ b/drivers/net/can/softing/softing.h
@@ -0,0 +1,167 @@
+/*
+ * softing common interfaces
+ *
+ * by Kurt Van Dijck, 2008-2010
+ */
+
+#include <linux/atomic.h>
+#include <linux/netdevice.h>
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+
+#include "softing_platform.h"
+
+struct softing;
+
+struct softing_priv {
+ struct can_priv can; /* must be the first member! */
+ struct net_device *netdev;
+ struct softing *card;
+ struct {
+ int pending;
+ /* variables wich hold the circular buffer */
+ int echo_put;
+ int echo_get;
+ } tx;
+ struct can_bittiming_const btr_const;
+ int index;
+ uint8_t output;
+ uint16_t chip;
+};
+#define netdev2softing(netdev) ((struct softing_priv *)netdev_priv(netdev))
+
+struct softing {
+ const struct softing_platform_data *pdat;
+ struct platform_device *pdev;
+ struct net_device *net[2];
+ spinlock_t spin; /* protect this structure & DPRAM access */
+ ktime_t ts_ref;
+ ktime_t ts_overflow; /* timestamp overflow value, in ktime */
+
+ struct {
+ /* indication of firmware status */
+ int up;
+ /* protection of the 'up' variable */
+ struct mutex lock;
+ } fw;
+ struct {
+ int nr;
+ int requested;
+ int svc_count;
+ unsigned int dpram_position;
+ } irq;
+ struct {
+ int pending;
+ int last_bus;
+ /*
+ * keep the bus that last tx'd a message,
+ * in order to let every netdev queue resume
+ */
+ } tx;
+ __iomem uint8_t *dpram;
+ unsigned long dpram_phys;
+ unsigned long dpram_size;
+ struct {
+ uint16_t fw_version, hw_version, license, serial;
+ uint16_t chip[2];
+ unsigned int freq; /* remote cpu's operating frequency */
+ } id;
+};
+
+extern int softing_default_output(struct net_device *netdev);
+
+extern ktime_t softing_raw2ktime(struct softing *card, u32 raw);
+
+extern int softing_chip_poweron(struct softing *card);
+
+extern int softing_bootloader_command(struct softing *card, int16_t cmd,
+ const char *msg);
+
+/* Load firmware after reset */
+extern int softing_load_fw(const char *file, struct softing *card,
+ __iomem uint8_t *virt, unsigned int size, int offset);
+
+/* Load final application firmware after bootloader */
+extern int softing_load_app_fw(const char *file, struct softing *card);
+
+/*
+ * enable or disable irq
+ * only called with fw.lock locked
+ */
+extern int softing_enable_irq(struct softing *card, int enable);
+
+/* start/stop 1 bus on card */
+extern int softing_startstop(struct net_device *netdev, int up);
+
+/* netif_rx() */
+extern int softing_netdev_rx(struct net_device *netdev,
+ const struct can_frame *msg, ktime_t ktime);
+
+/* SOFTING DPRAM mappings */
+#define DPRAM_RX 0x0000
+ #define DPRAM_RX_SIZE 32
+ #define DPRAM_RX_CNT 16
+#define DPRAM_RX_RD 0x0201 /* uint8_t */
+#define DPRAM_RX_WR 0x0205 /* uint8_t */
+#define DPRAM_RX_LOST 0x0207 /* uint8_t */
+
+#define DPRAM_FCT_PARAM 0x0300 /* int16_t [20] */
+#define DPRAM_FCT_RESULT 0x0328 /* int16_t */
+#define DPRAM_FCT_HOST 0x032b /* uint16_t */
+
+#define DPRAM_INFO_BUSSTATE 0x0331 /* uint16_t */
+#define DPRAM_INFO_BUSSTATE2 0x0335 /* uint16_t */
+#define DPRAM_INFO_ERRSTATE 0x0339 /* uint16_t */
+#define DPRAM_INFO_ERRSTATE2 0x033d /* uint16_t */
+#define DPRAM_RESET 0x0341 /* uint16_t */
+#define DPRAM_CLR_RECV_FIFO 0x0345 /* uint16_t */
+#define DPRAM_RESET_TIME 0x034d /* uint16_t */
+#define DPRAM_TIME 0x0350 /* uint64_t */
+#define DPRAM_WR_START 0x0358 /* uint8_t */
+#define DPRAM_WR_END 0x0359 /* uint8_t */
+#define DPRAM_RESET_RX_FIFO 0x0361 /* uint16_t */
+#define DPRAM_RESET_TX_FIFO 0x0364 /* uint8_t */
+#define DPRAM_READ_FIFO_LEVEL 0x0365 /* uint8_t */
+#define DPRAM_RX_FIFO_LEVEL 0x0366 /* uint16_t */
+#define DPRAM_TX_FIFO_LEVEL 0x0366 /* uint16_t */
+
+#define DPRAM_TX 0x0400 /* uint16_t */
+ #define DPRAM_TX_SIZE 16
+ #define DPRAM_TX_CNT 32
+#define DPRAM_TX_RD 0x0601 /* uint8_t */
+#define DPRAM_TX_WR 0x0605 /* uint8_t */
+
+#define DPRAM_COMMAND 0x07e0 /* uint16_t */
+#define DPRAM_RECEIPT 0x07f0 /* uint16_t */
+#define DPRAM_IRQ_TOHOST 0x07fe /* uint8_t */
+#define DPRAM_IRQ_TOCARD 0x07ff /* uint8_t */
+
+#define DPRAM_V2_RESET 0x0e00 /* uint8_t */
+#define DPRAM_V2_IRQ_TOHOST 0x0e02 /* uint8_t */
+
+#define TXMAX (DPRAM_TX_CNT - 1)
+
+/* DPRAM return codes */
+#define RES_NONE 0
+#define RES_OK 1
+#define RES_NOK 2
+#define RES_UNKNOWN 3
+/* DPRAM flags */
+#define CMD_TX 0x01
+#define CMD_ACK 0x02
+#define CMD_XTD 0x04
+#define CMD_RTR 0x08
+#define CMD_ERR 0x10
+#define CMD_BUS2 0x80
+
+/* returned fifo entry bus state masks */
+#define SF_MASK_BUSOFF 0x80
+#define SF_MASK_EPASSIVE 0x60
+
+/* bus states */
+#define STATE_BUSOFF 2
+#define STATE_EPASSIVE 1
+#define STATE_EACTIVE 0
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
new file mode 100644
index 000000000000..c11bb4de8630
--- /dev/null
+++ b/drivers/net/can/softing/softing_cs.c
@@ -0,0 +1,360 @@
+/*
+ * Copyright (C) 2008-2010
+ *
+ * - Kurt Van Dijck, EIA Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#include "softing_platform.h"
+
+static int softingcs_index;
+static spinlock_t softingcs_index_lock;
+
+static int softingcs_reset(struct platform_device *pdev, int v);
+static int softingcs_enable_irq(struct platform_device *pdev, int v);
+
+/*
+ * platform_data descriptions
+ */
+#define MHZ (1000*1000)
+static const struct softing_platform_data softingcs_platform_data[] = {
+{
+ .name = "CANcard",
+ .manf = 0x0168, .prod = 0x001,
+ .generation = 1,
+ .nbus = 2,
+ .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4,
+ .dpram_size = 0x0800,
+ .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = softingcs_enable_irq,
+}, {
+ .name = "CANcard-NEC",
+ .manf = 0x0168, .prod = 0x002,
+ .generation = 1,
+ .nbus = 2,
+ .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4,
+ .dpram_size = 0x0800,
+ .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = softingcs_enable_irq,
+}, {
+ .name = "CANcard-SJA",
+ .manf = 0x0168, .prod = 0x004,
+ .generation = 1,
+ .nbus = 2,
+ .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4,
+ .dpram_size = 0x0800,
+ .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = softingcs_enable_irq,
+}, {
+ .name = "CANcard-2",
+ .manf = 0x0168, .prod = 0x005,
+ .generation = 2,
+ .nbus = 2,
+ .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
+ .dpram_size = 0x1000,
+ .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = NULL,
+}, {
+ .name = "Vector-CANcard",
+ .manf = 0x0168, .prod = 0x081,
+ .generation = 1,
+ .nbus = 2,
+ .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4,
+ .dpram_size = 0x0800,
+ .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = softingcs_enable_irq,
+}, {
+ .name = "Vector-CANcard-SJA",
+ .manf = 0x0168, .prod = 0x084,
+ .generation = 1,
+ .nbus = 2,
+ .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4,
+ .dpram_size = 0x0800,
+ .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = softingcs_enable_irq,
+}, {
+ .name = "Vector-CANcard-2",
+ .manf = 0x0168, .prod = 0x085,
+ .generation = 2,
+ .nbus = 2,
+ .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
+ .dpram_size = 0x1000,
+ .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = NULL,
+}, {
+ .name = "EDICcard-NEC",
+ .manf = 0x0168, .prod = 0x102,
+ .generation = 1,
+ .nbus = 2,
+ .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4,
+ .dpram_size = 0x0800,
+ .boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = softingcs_enable_irq,
+}, {
+ .name = "EDICcard-2",
+ .manf = 0x0168, .prod = 0x105,
+ .generation = 2,
+ .nbus = 2,
+ .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
+ .dpram_size = 0x1000,
+ .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
+ .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
+ .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
+ .reset = softingcs_reset,
+ .enable_irq = NULL,
+}, {
+ 0, 0,
+},
+};
+
+MODULE_FIRMWARE(fw_dir "bcard.bin");
+MODULE_FIRMWARE(fw_dir "ldcard.bin");
+MODULE_FIRMWARE(fw_dir "cancard.bin");
+MODULE_FIRMWARE(fw_dir "cansja.bin");
+
+MODULE_FIRMWARE(fw_dir "bcard2.bin");
+MODULE_FIRMWARE(fw_dir "ldcard2.bin");
+MODULE_FIRMWARE(fw_dir "cancrd2.bin");
+
+static __devinit const struct softing_platform_data
+*softingcs_find_platform_data(unsigned int manf, unsigned int prod)
+{
+ const struct softing_platform_data *lp;
+
+ for (lp = softingcs_platform_data; lp->manf; ++lp) {
+ if ((lp->manf == manf) && (lp->prod == prod))
+ return lp;
+ }
+ return NULL;
+}
+
+/*
+ * platformdata callbacks
+ */
+static int softingcs_reset(struct platform_device *pdev, int v)
+{
+ struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent);
+
+ dev_dbg(&pdev->dev, "pcmcia config [2] %02x\n", v ? 0 : 0x20);
+ return pcmcia_write_config_byte(pcmcia, 2, v ? 0 : 0x20);
+}
+
+static int softingcs_enable_irq(struct platform_device *pdev, int v)
+{
+ struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent);
+
+ dev_dbg(&pdev->dev, "pcmcia config [0] %02x\n", v ? 0x60 : 0);
+ return pcmcia_write_config_byte(pcmcia, 0, v ? 0x60 : 0);
+}
+
+/*
+ * pcmcia check
+ */
+static __devinit int softingcs_probe_config(struct pcmcia_device *pcmcia,
+ void *priv_data)
+{
+ struct softing_platform_data *pdat = priv_data;
+ struct resource *pres;
+ int memspeed = 0;
+
+ WARN_ON(!pdat);
+ pres = pcmcia->resource[PCMCIA_IOMEM_0];
+ if (resource_size(pres) < 0x1000)
+ return -ERANGE;
+
+ pres->flags |= WIN_MEMORY_TYPE_CM | WIN_ENABLE;
+ if (pdat->generation < 2) {
+ pres->flags |= WIN_USE_WAIT | WIN_DATA_WIDTH_8;
+ memspeed = 3;
+ } else {
+ pres->flags |= WIN_DATA_WIDTH_16;
+ }
+ return pcmcia_request_window(pcmcia, pres, memspeed);
+}
+
+static __devexit void softingcs_remove(struct pcmcia_device *pcmcia)
+{
+ struct platform_device *pdev = pcmcia->priv;
+
+ /* free bits */
+ platform_device_unregister(pdev);
+ /* release pcmcia stuff */
+ pcmcia_disable_device(pcmcia);
+}
+
+/*
+ * platform_device wrapper
+ * pdev->resource has 2 entries: io & irq
+ */
+static void softingcs_pdev_release(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ kfree(pdev);
+}
+
+static __devinit int softingcs_probe(struct pcmcia_device *pcmcia)
+{
+ int ret;
+ struct platform_device *pdev;
+ const struct softing_platform_data *pdat;
+ struct resource *pres;
+ struct dev {
+ struct platform_device pdev;
+ struct resource res[2];
+ } *dev;
+
+ /* find matching platform_data */
+ pdat = softingcs_find_platform_data(pcmcia->manf_id, pcmcia->card_id);
+ if (!pdat)
+ return -ENOTTY;
+
+ /* setup pcmcia device */
+ pcmcia->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IOMEM |
+ CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC;
+ ret = pcmcia_loop_config(pcmcia, softingcs_probe_config, (void *)pdat);
+ if (ret)
+ goto pcmcia_failed;
+
+ ret = pcmcia_enable_device(pcmcia);
+ if (ret < 0)
+ goto pcmcia_failed;
+
+ pres = pcmcia->resource[PCMCIA_IOMEM_0];
+ if (!pres) {
+ ret = -EBADF;
+ goto pcmcia_bad;
+ }
+
+ /* create softing platform device */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto mem_failed;
+ }
+ dev->pdev.resource = dev->res;
+ dev->pdev.num_resources = ARRAY_SIZE(dev->res);
+ dev->pdev.dev.release = softingcs_pdev_release;
+
+ pdev = &dev->pdev;
+ pdev->dev.platform_data = (void *)pdat;
+ pdev->dev.parent = &pcmcia->dev;
+ pcmcia->priv = pdev;
+
+ /* platform device resources */
+ pdev->resource[0].flags = IORESOURCE_MEM;
+ pdev->resource[0].start = pres->start;
+ pdev->resource[0].end = pres->end;
+
+ pdev->resource[1].flags = IORESOURCE_IRQ;
+ pdev->resource[1].start = pcmcia->irq;
+ pdev->resource[1].end = pdev->resource[1].start;
+
+ /* platform device setup */
+ spin_lock(&softingcs_index_lock);
+ pdev->id = softingcs_index++;
+ spin_unlock(&softingcs_index_lock);
+ pdev->name = "softing";
+ dev_set_name(&pdev->dev, "softingcs.%i", pdev->id);
+ ret = platform_device_register(pdev);
+ if (ret < 0)
+ goto platform_failed;
+
+ dev_info(&pcmcia->dev, "created %s\n", dev_name(&pdev->dev));
+ return 0;
+
+platform_failed:
+ kfree(dev);
+mem_failed:
+pcmcia_bad:
+pcmcia_failed:
+ pcmcia_disable_device(pcmcia);
+ pcmcia->priv = NULL;
+ return ret ?: -ENODEV;
+}
+
+static /*const*/ struct pcmcia_device_id softingcs_ids[] = {
+ /* softing */
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0001),
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0002),
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0004),
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0005),
+ /* vector, manufacturer? */
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0081),
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0084),
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0085),
+ /* EDIC */
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0102),
+ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0105),
+ PCMCIA_DEVICE_NULL,
+};
+
+MODULE_DEVICE_TABLE(pcmcia, softingcs_ids);
+
+static struct pcmcia_driver softingcs_driver = {
+ .owner = THIS_MODULE,
+ .name = "softingcs",
+ .id_table = softingcs_ids,
+ .probe = softingcs_probe,
+ .remove = __devexit_p(softingcs_remove),
+};
+
+static int __init softingcs_start(void)
+{
+ spin_lock_init(&softingcs_index_lock);
+ return pcmcia_register_driver(&softingcs_driver);
+}
+
+static void __exit softingcs_stop(void)
+{
+ pcmcia_unregister_driver(&softingcs_driver);
+}
+
+module_init(softingcs_start);
+module_exit(softingcs_stop);
+
+MODULE_DESCRIPTION("softing CANcard driver"
+ ", links PCMCIA card to softing driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
new file mode 100644
index 000000000000..b520784fb197
--- /dev/null
+++ b/drivers/net/can/softing/softing_fw.c
@@ -0,0 +1,691 @@
+/*
+ * Copyright (C) 2008-2010
+ *
+ * - Kurt Van Dijck, EIA Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/firmware.h>
+#include <linux/sched.h>
+#include <asm/div64.h>
+
+#include "softing.h"
+
+/*
+ * low level DPRAM command.
+ * Make sure that card->dpram[DPRAM_FCT_HOST] is preset
+ */
+static int _softing_fct_cmd(struct softing *card, int16_t cmd, uint16_t vector,
+ const char *msg)
+{
+ int ret;
+ unsigned long stamp;
+
+ iowrite16(cmd, &card->dpram[DPRAM_FCT_PARAM]);
+ iowrite8(vector >> 8, &card->dpram[DPRAM_FCT_HOST + 1]);
+ iowrite8(vector, &card->dpram[DPRAM_FCT_HOST]);
+ /* be sure to flush this to the card */
+ wmb();
+ stamp = jiffies + 1 * HZ;
+ /* wait for card */
+ do {
+ /* DPRAM_FCT_HOST is _not_ aligned */
+ ret = ioread8(&card->dpram[DPRAM_FCT_HOST]) +
+ (ioread8(&card->dpram[DPRAM_FCT_HOST + 1]) << 8);
+ /* don't have any cached variables */
+ rmb();
+ if (ret == RES_OK)
+ /* read return-value now */
+ return ioread16(&card->dpram[DPRAM_FCT_RESULT]);
+
+ if ((ret != vector) || time_after(jiffies, stamp))
+ break;
+ /* process context => relax */
+ usleep_range(500, 10000);
+ } while (1);
+
+ ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED;
+ dev_alert(&card->pdev->dev, "firmware %s failed (%i)\n", msg, ret);
+ return ret;
+}
+
+static int softing_fct_cmd(struct softing *card, int16_t cmd, const char *msg)
+{
+ int ret;
+
+ ret = _softing_fct_cmd(card, cmd, 0, msg);
+ if (ret > 0) {
+ dev_alert(&card->pdev->dev, "%s returned %u\n", msg, ret);
+ ret = -EIO;
+ }
+ return ret;
+}
+
+int softing_bootloader_command(struct softing *card, int16_t cmd,
+ const char *msg)
+{
+ int ret;
+ unsigned long stamp;
+
+ iowrite16(RES_NONE, &card->dpram[DPRAM_RECEIPT]);
+ iowrite16(cmd, &card->dpram[DPRAM_COMMAND]);
+ /* be sure to flush this to the card */
+ wmb();
+ stamp = jiffies + 3 * HZ;
+ /* wait for card */
+ do {
+ ret = ioread16(&card->dpram[DPRAM_RECEIPT]);
+ /* don't have any cached variables */
+ rmb();
+ if (ret == RES_OK)
+ return 0;
+ if (time_after(jiffies, stamp))
+ break;
+ /* process context => relax */
+ usleep_range(500, 10000);
+ } while (!signal_pending(current));
+
+ ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED;
+ dev_alert(&card->pdev->dev, "bootloader %s failed (%i)\n", msg, ret);
+ return ret;
+}
+
+static int fw_parse(const uint8_t **pmem, uint16_t *ptype, uint32_t *paddr,
+ uint16_t *plen, const uint8_t **pdat)
+{
+ uint16_t checksum[2];
+ const uint8_t *mem;
+ const uint8_t *end;
+
+ /*
+ * firmware records are a binary, unaligned stream composed of:
+ * uint16_t type;
+ * uint32_t addr;
+ * uint16_t len;
+ * uint8_t dat[len];
+ * uint16_t checksum;
+ * all values in little endian.
+ * We could define a struct for this, with __attribute__((packed)),
+ * but would that solve the alignment in _all_ cases (cfr. the
+ * struct itself may be an odd address)?
+ *
+ * I chose to use leXX_to_cpup() since this solves both
+ * endianness & alignment.
+ */
+ mem = *pmem;
+ *ptype = le16_to_cpup((void *)&mem[0]);
+ *paddr = le32_to_cpup((void *)&mem[2]);
+ *plen = le16_to_cpup((void *)&mem[6]);
+ *pdat = &mem[8];
+ /* verify checksum */
+ end = &mem[8 + *plen];
+ checksum[0] = le16_to_cpup((void *)end);
+ for (checksum[1] = 0; mem < end; ++mem)
+ checksum[1] += *mem;
+ if (checksum[0] != checksum[1])
+ return -EINVAL;
+ /* increment */
+ *pmem += 10 + *plen;
+ return 0;
+}
+
+int softing_load_fw(const char *file, struct softing *card,
+ __iomem uint8_t *dpram, unsigned int size, int offset)
+{
+ const struct firmware *fw;
+ int ret;
+ const uint8_t *mem, *end, *dat;
+ uint16_t type, len;
+ uint32_t addr;
+ uint8_t *buf = NULL;
+ int buflen = 0;
+ int8_t type_end = 0;
+
+ ret = request_firmware(&fw, file, &card->pdev->dev);
+ if (ret < 0)
+ return ret;
+ dev_dbg(&card->pdev->dev, "%s, firmware(%s) got %u bytes"
+ ", offset %c0x%04x\n",
+ card->pdat->name, file, (unsigned int)fw->size,
+ (offset >= 0) ? '+' : '-', (unsigned int)abs(offset));
+ /* parse the firmware */
+ mem = fw->data;
+ end = &mem[fw->size];
+ /* look for header record */
+ ret = fw_parse(&mem, &type, &addr, &len, &dat);
+ if (ret < 0)
+ goto failed;
+ if (type != 0xffff)
+ goto failed;
+ if (strncmp("Structured Binary Format, Softing GmbH" , dat, len)) {
+ ret = -EINVAL;
+ goto failed;
+ }
+ /* ok, we had a header */
+ while (mem < end) {
+ ret = fw_parse(&mem, &type, &addr, &len, &dat);
+ if (ret < 0)
+ goto failed;
+ if (type == 3) {
+ /* start address, not used here */
+ continue;
+ } else if (type == 1) {
+ /* eof */
+ type_end = 1;
+ break;
+ } else if (type != 0) {
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ if ((addr + len + offset) > size)
+ goto failed;
+ memcpy_toio(&dpram[addr + offset], dat, len);
+ /* be sure to flush caches from IO space */
+ mb();
+ if (len > buflen) {
+ /* align buflen */
+ buflen = (len + (1024-1)) & ~(1024-1);
+ buf = krealloc(buf, buflen, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+ }
+ /* verify record data */
+ memcpy_fromio(buf, &dpram[addr + offset], len);
+ if (memcmp(buf, dat, len)) {
+ /* is not ok */
+ dev_alert(&card->pdev->dev, "DPRAM readback failed\n");
+ ret = -EIO;
+ goto failed;
+ }
+ }
+ if (!type_end)
+ /* no end record seen */
+ goto failed;
+ ret = 0;
+failed:
+ kfree(buf);
+ release_firmware(fw);
+ if (ret < 0)
+ dev_info(&card->pdev->dev, "firmware %s failed\n", file);
+ return ret;
+}
+
+int softing_load_app_fw(const char *file, struct softing *card)
+{
+ const struct firmware *fw;
+ const uint8_t *mem, *end, *dat;
+ int ret, j;
+ uint16_t type, len;
+ uint32_t addr, start_addr = 0;
+ unsigned int sum, rx_sum;
+ int8_t type_end = 0, type_entrypoint = 0;
+
+ ret = request_firmware(&fw, file, &card->pdev->dev);
+ if (ret) {
+ dev_alert(&card->pdev->dev, "request_firmware(%s) got %i\n",
+ file, ret);
+ return ret;
+ }
+ dev_dbg(&card->pdev->dev, "firmware(%s) got %lu bytes\n",
+ file, (unsigned long)fw->size);
+ /* parse the firmware */
+ mem = fw->data;
+ end = &mem[fw->size];
+ /* look for header record */
+ ret = fw_parse(&mem, &type, &addr, &len, &dat);
+ if (ret)
+ goto failed;
+ ret = -EINVAL;
+ if (type != 0xffff) {
+ dev_alert(&card->pdev->dev, "firmware starts with type 0x%x\n",
+ type);
+ goto failed;
+ }
+ if (strncmp("Structured Binary Format, Softing GmbH", dat, len)) {
+ dev_alert(&card->pdev->dev, "firmware string '%.*s' fault\n",
+ len, dat);
+ goto failed;
+ }
+ /* ok, we had a header */
+ while (mem < end) {
+ ret = fw_parse(&mem, &type, &addr, &len, &dat);
+ if (ret)
+ goto failed;
+
+ if (type == 3) {
+ /* start address */
+ start_addr = addr;
+ type_entrypoint = 1;
+ continue;
+ } else if (type == 1) {
+ /* eof */
+ type_end = 1;
+ break;
+ } else if (type != 0) {
+ dev_alert(&card->pdev->dev,
+ "unknown record type 0x%04x\n", type);
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ /* regualar data */
+ for (sum = 0, j = 0; j < len; ++j)
+ sum += dat[j];
+ /* work in 16bit (target) */
+ sum &= 0xffff;
+
+ memcpy_toio(&card->dpram[card->pdat->app.offs], dat, len);
+ iowrite32(card->pdat->app.offs + card->pdat->app.addr,
+ &card->dpram[DPRAM_COMMAND + 2]);
+ iowrite32(addr, &card->dpram[DPRAM_COMMAND + 6]);
+ iowrite16(len, &card->dpram[DPRAM_COMMAND + 10]);
+ iowrite8(1, &card->dpram[DPRAM_COMMAND + 12]);
+ ret = softing_bootloader_command(card, 1, "loading app.");
+ if (ret < 0)
+ goto failed;
+ /* verify checksum */
+ rx_sum = ioread16(&card->dpram[DPRAM_RECEIPT + 2]);
+ if (rx_sum != sum) {
+ dev_alert(&card->pdev->dev, "SRAM seems to be damaged"
+ ", wanted 0x%04x, got 0x%04x\n", sum, rx_sum);
+ ret = -EIO;
+ goto failed;
+ }
+ }
+ if (!type_end || !type_entrypoint)
+ goto failed;
+ /* start application in card */
+ iowrite32(start_addr, &card->dpram[DPRAM_COMMAND + 2]);
+ iowrite8(1, &card->dpram[DPRAM_COMMAND + 6]);
+ ret = softing_bootloader_command(card, 3, "start app.");
+ if (ret < 0)
+ goto failed;
+ ret = 0;
+failed:
+ release_firmware(fw);
+ if (ret < 0)
+ dev_info(&card->pdev->dev, "firmware %s failed\n", file);
+ return ret;
+}
+
+static int softing_reset_chip(struct softing *card)
+{
+ int ret;
+
+ do {
+ /* reset chip */
+ iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO]);
+ iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO+1]);
+ iowrite8(1, &card->dpram[DPRAM_RESET]);
+ iowrite8(0, &card->dpram[DPRAM_RESET+1]);
+
+ ret = softing_fct_cmd(card, 0, "reset_can");
+ if (!ret)
+ break;
+ if (signal_pending(current))
+ /* don't wait any longer */
+ break;
+ } while (1);
+ card->tx.pending = 0;
+ return ret;
+}
+
+int softing_chip_poweron(struct softing *card)
+{
+ int ret;
+ /* sync */
+ ret = _softing_fct_cmd(card, 99, 0x55, "sync-a");
+ if (ret < 0)
+ goto failed;
+
+ ret = _softing_fct_cmd(card, 99, 0xaa, "sync-b");
+ if (ret < 0)
+ goto failed;
+
+ ret = softing_reset_chip(card);
+ if (ret < 0)
+ goto failed;
+ /* get_serial */
+ ret = softing_fct_cmd(card, 43, "get_serial_number");
+ if (ret < 0)
+ goto failed;
+ card->id.serial = ioread32(&card->dpram[DPRAM_FCT_PARAM]);
+ /* get_version */
+ ret = softing_fct_cmd(card, 12, "get_version");
+ if (ret < 0)
+ goto failed;
+ card->id.fw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 2]);
+ card->id.hw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 4]);
+ card->id.license = ioread16(&card->dpram[DPRAM_FCT_PARAM + 6]);
+ card->id.chip[0] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 8]);
+ card->id.chip[1] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 10]);
+ return 0;
+failed:
+ return ret;
+}
+
+static void softing_initialize_timestamp(struct softing *card)
+{
+ uint64_t ovf;
+
+ card->ts_ref = ktime_get();
+
+ /* 16MHz is the reference */
+ ovf = 0x100000000ULL * 16;
+ do_div(ovf, card->pdat->freq ?: 16);
+
+ card->ts_overflow = ktime_add_us(ktime_set(0, 0), ovf);
+}
+
+ktime_t softing_raw2ktime(struct softing *card, u32 raw)
+{
+ uint64_t rawl;
+ ktime_t now, real_offset;
+ ktime_t target;
+ ktime_t tmp;
+
+ now = ktime_get();
+ real_offset = ktime_sub(ktime_get_real(), now);
+
+ /* find nsec from card */
+ rawl = raw * 16;
+ do_div(rawl, card->pdat->freq ?: 16);
+ target = ktime_add_us(card->ts_ref, rawl);
+ /* test for overflows */
+ tmp = ktime_add(target, card->ts_overflow);
+ while (unlikely(ktime_to_ns(tmp) > ktime_to_ns(now))) {
+ card->ts_ref = ktime_add(card->ts_ref, card->ts_overflow);
+ target = tmp;
+ tmp = ktime_add(target, card->ts_overflow);
+ }
+ return ktime_add(target, real_offset);
+}
+
+static inline int softing_error_reporting(struct net_device *netdev)
+{
+ struct softing_priv *priv = netdev_priv(netdev);
+
+ return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ ? 1 : 0;
+}
+
+int softing_startstop(struct net_device *dev, int up)
+{
+ int ret;
+ struct softing *card;
+ struct softing_priv *priv;
+ struct net_device *netdev;
+ int bus_bitmask_start;
+ int j, error_reporting;
+ struct can_frame msg;
+ const struct can_bittiming *bt;
+
+ priv = netdev_priv(dev);
+ card = priv->card;
+
+ if (!card->fw.up)
+ return -EIO;
+
+ ret = mutex_lock_interruptible(&card->fw.lock);
+ if (ret)
+ return ret;
+
+ bus_bitmask_start = 0;
+ if (dev && up)
+ /* prepare to start this bus as well */
+ bus_bitmask_start |= (1 << priv->index);
+ /* bring netdevs down */
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ netdev = card->net[j];
+ if (!netdev)
+ continue;
+ priv = netdev_priv(netdev);
+
+ if (dev != netdev)
+ netif_stop_queue(netdev);
+
+ if (netif_running(netdev)) {
+ if (dev != netdev)
+ bus_bitmask_start |= (1 << j);
+ priv->tx.pending = 0;
+ priv->tx.echo_put = 0;
+ priv->tx.echo_get = 0;
+ /*
+ * this bus' may just have called open_candev()
+ * which is rather stupid to call close_candev()
+ * already
+ * but we may come here from busoff recovery too
+ * in which case the echo_skb _needs_ flushing too.
+ * just be sure to call open_candev() again
+ */
+ close_candev(netdev);
+ }
+ priv->can.state = CAN_STATE_STOPPED;
+ }
+ card->tx.pending = 0;
+
+ softing_enable_irq(card, 0);
+ ret = softing_reset_chip(card);
+ if (ret)
+ goto failed;
+ if (!bus_bitmask_start)
+ /* no busses to be brought up */
+ goto card_done;
+
+ if ((bus_bitmask_start & 1) && (bus_bitmask_start & 2)
+ && (softing_error_reporting(card->net[0])
+ != softing_error_reporting(card->net[1]))) {
+ dev_alert(&card->pdev->dev,
+ "err_reporting flag differs for busses\n");
+ goto invalid;
+ }
+ error_reporting = 0;
+ if (bus_bitmask_start & 1) {
+ netdev = card->net[0];
+ priv = netdev_priv(netdev);
+ error_reporting += softing_error_reporting(netdev);
+ /* init chip 1 */
+ bt = &priv->can.bittiming;
+ iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]);
+ iowrite16(bt->phase_seg1 + bt->prop_seg,
+ &card->dpram[DPRAM_FCT_PARAM + 6]);
+ iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]);
+ iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0,
+ &card->dpram[DPRAM_FCT_PARAM + 10]);
+ ret = softing_fct_cmd(card, 1, "initialize_chip[0]");
+ if (ret < 0)
+ goto failed;
+ /* set mode */
+ iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]);
+ ret = softing_fct_cmd(card, 3, "set_mode[0]");
+ if (ret < 0)
+ goto failed;
+ /* set filter */
+ /* 11bit id & mask */
+ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]);
+ /* 29bit id.lo & mask.lo & id.hi & mask.hi */
+ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]);
+ iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]);
+ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]);
+ iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]);
+ ret = softing_fct_cmd(card, 7, "set_filter[0]");
+ if (ret < 0)
+ goto failed;
+ /* set output control */
+ iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ ret = softing_fct_cmd(card, 5, "set_output[0]");
+ if (ret < 0)
+ goto failed;
+ }
+ if (bus_bitmask_start & 2) {
+ netdev = card->net[1];
+ priv = netdev_priv(netdev);
+ error_reporting += softing_error_reporting(netdev);
+ /* init chip2 */
+ bt = &priv->can.bittiming;
+ iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]);
+ iowrite16(bt->phase_seg1 + bt->prop_seg,
+ &card->dpram[DPRAM_FCT_PARAM + 6]);
+ iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]);
+ iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0,
+ &card->dpram[DPRAM_FCT_PARAM + 10]);
+ ret = softing_fct_cmd(card, 2, "initialize_chip[1]");
+ if (ret < 0)
+ goto failed;
+ /* set mode2 */
+ iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]);
+ ret = softing_fct_cmd(card, 4, "set_mode[1]");
+ if (ret < 0)
+ goto failed;
+ /* set filter2 */
+ /* 11bit id & mask */
+ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]);
+ /* 29bit id.lo & mask.lo & id.hi & mask.hi */
+ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]);
+ iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]);
+ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]);
+ iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]);
+ ret = softing_fct_cmd(card, 8, "set_filter[1]");
+ if (ret < 0)
+ goto failed;
+ /* set output control2 */
+ iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ ret = softing_fct_cmd(card, 6, "set_output[1]");
+ if (ret < 0)
+ goto failed;
+ }
+ /* enable_error_frame */
+ /*
+ * Error reporting is switched off at the moment since
+ * the receiving of them is not yet 100% verified
+ * This should be enabled sooner or later
+ *
+ if (error_reporting) {
+ ret = softing_fct_cmd(card, 51, "enable_error_frame");
+ if (ret < 0)
+ goto failed;
+ }
+ */
+ /* initialize interface */
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 6]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 8]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 10]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 12]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 14]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 16]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 18]);
+ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 20]);
+ ret = softing_fct_cmd(card, 17, "initialize_interface");
+ if (ret < 0)
+ goto failed;
+ /* enable_fifo */
+ ret = softing_fct_cmd(card, 36, "enable_fifo");
+ if (ret < 0)
+ goto failed;
+ /* enable fifo tx ack */
+ ret = softing_fct_cmd(card, 13, "fifo_tx_ack[0]");
+ if (ret < 0)
+ goto failed;
+ /* enable fifo tx ack2 */
+ ret = softing_fct_cmd(card, 14, "fifo_tx_ack[1]");
+ if (ret < 0)
+ goto failed;
+ /* start_chip */
+ ret = softing_fct_cmd(card, 11, "start_chip");
+ if (ret < 0)
+ goto failed;
+ iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE]);
+ iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE2]);
+ if (card->pdat->generation < 2) {
+ iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]);
+ /* flush the DPRAM caches */
+ wmb();
+ }
+
+ softing_initialize_timestamp(card);
+
+ /*
+ * do socketcan notifications/status changes
+ * from here, no errors should occur, or the failed: part
+ * must be reviewed
+ */
+ memset(&msg, 0, sizeof(msg));
+ msg.can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED;
+ msg.can_dlc = CAN_ERR_DLC;
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ if (!(bus_bitmask_start & (1 << j)))
+ continue;
+ netdev = card->net[j];
+ if (!netdev)
+ continue;
+ priv = netdev_priv(netdev);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ open_candev(netdev);
+ if (dev != netdev) {
+ /* notify other busses on the restart */
+ softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
+ ++priv->can.can_stats.restarts;
+ }
+ netif_wake_queue(netdev);
+ }
+
+ /* enable interrupts */
+ ret = softing_enable_irq(card, 1);
+ if (ret)
+ goto failed;
+card_done:
+ mutex_unlock(&card->fw.lock);
+ return 0;
+invalid:
+ ret = -EINVAL;
+failed:
+ softing_enable_irq(card, 0);
+ softing_reset_chip(card);
+ mutex_unlock(&card->fw.lock);
+ /* bring all other interfaces down */
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ netdev = card->net[j];
+ if (!netdev)
+ continue;
+ dev_close(netdev);
+ }
+ return ret;
+}
+
+int softing_default_output(struct net_device *netdev)
+{
+ struct softing_priv *priv = netdev_priv(netdev);
+ struct softing *card = priv->card;
+
+ switch (priv->chip) {
+ case 1000:
+ return (card->pdat->generation < 2) ? 0xfb : 0xfa;
+ case 5:
+ return 0x60;
+ default:
+ return 0x40;
+ }
+}
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
new file mode 100644
index 000000000000..aeea9f9ff6e8
--- /dev/null
+++ b/drivers/net/can/softing/softing_main.c
@@ -0,0 +1,894 @@
+/*
+ * Copyright (C) 2008-2010
+ *
+ * - Kurt Van Dijck, EIA Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include "softing.h"
+
+#define TX_ECHO_SKB_MAX (((TXMAX+1)/2)-1)
+
+/*
+ * test is a specific CAN netdev
+ * is online (ie. up 'n running, not sleeping, not busoff
+ */
+static inline int canif_is_active(struct net_device *netdev)
+{
+ struct can_priv *can = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return 0;
+ return (can->state <= CAN_STATE_ERROR_PASSIVE);
+}
+
+/* reset DPRAM */
+static inline void softing_set_reset_dpram(struct softing *card)
+{
+ if (card->pdat->generation >= 2) {
+ spin_lock_bh(&card->spin);
+ iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) & ~1,
+ &card->dpram[DPRAM_V2_RESET]);
+ spin_unlock_bh(&card->spin);
+ }
+}
+
+static inline void softing_clr_reset_dpram(struct softing *card)
+{
+ if (card->pdat->generation >= 2) {
+ spin_lock_bh(&card->spin);
+ iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) | 1,
+ &card->dpram[DPRAM_V2_RESET]);
+ spin_unlock_bh(&card->spin);
+ }
+}
+
+/* trigger the tx queue-ing */
+static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct softing_priv *priv = netdev_priv(dev);
+ struct softing *card = priv->card;
+ int ret;
+ uint8_t *ptr;
+ uint8_t fifo_wr, fifo_rd;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ uint8_t buf[DPRAM_TX_SIZE];
+
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ spin_lock(&card->spin);
+
+ ret = NETDEV_TX_BUSY;
+ if (!card->fw.up ||
+ (card->tx.pending >= TXMAX) ||
+ (priv->tx.pending >= TX_ECHO_SKB_MAX))
+ goto xmit_done;
+ fifo_wr = ioread8(&card->dpram[DPRAM_TX_WR]);
+ fifo_rd = ioread8(&card->dpram[DPRAM_TX_RD]);
+ if (fifo_wr == fifo_rd)
+ /* fifo full */
+ goto xmit_done;
+ memset(buf, 0, sizeof(buf));
+ ptr = buf;
+ *ptr = CMD_TX;
+ if (cf->can_id & CAN_RTR_FLAG)
+ *ptr |= CMD_RTR;
+ if (cf->can_id & CAN_EFF_FLAG)
+ *ptr |= CMD_XTD;
+ if (priv->index)
+ *ptr |= CMD_BUS2;
+ ++ptr;
+ *ptr++ = cf->can_dlc;
+ *ptr++ = (cf->can_id >> 0);
+ *ptr++ = (cf->can_id >> 8);
+ if (cf->can_id & CAN_EFF_FLAG) {
+ *ptr++ = (cf->can_id >> 16);
+ *ptr++ = (cf->can_id >> 24);
+ } else {
+ /* increment 1, not 2 as you might think */
+ ptr += 1;
+ }
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ memcpy(ptr, &cf->data[0], cf->can_dlc);
+ memcpy_toio(&card->dpram[DPRAM_TX + DPRAM_TX_SIZE * fifo_wr],
+ buf, DPRAM_TX_SIZE);
+ if (++fifo_wr >= DPRAM_TX_CNT)
+ fifo_wr = 0;
+ iowrite8(fifo_wr, &card->dpram[DPRAM_TX_WR]);
+ card->tx.last_bus = priv->index;
+ ++card->tx.pending;
+ ++priv->tx.pending;
+ can_put_echo_skb(skb, dev, priv->tx.echo_put);
+ ++priv->tx.echo_put;
+ if (priv->tx.echo_put >= TX_ECHO_SKB_MAX)
+ priv->tx.echo_put = 0;
+ /* can_put_echo_skb() saves the skb, safe to return TX_OK */
+ ret = NETDEV_TX_OK;
+xmit_done:
+ spin_unlock(&card->spin);
+ if (card->tx.pending >= TXMAX) {
+ int j;
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ if (card->net[j])
+ netif_stop_queue(card->net[j]);
+ }
+ }
+ if (ret != NETDEV_TX_OK)
+ netif_stop_queue(dev);
+
+ return ret;
+}
+
+/*
+ * shortcut for skb delivery
+ */
+int softing_netdev_rx(struct net_device *netdev, const struct can_frame *msg,
+ ktime_t ktime)
+{
+ struct sk_buff *skb;
+ struct can_frame *cf;
+
+ skb = alloc_can_skb(netdev, &cf);
+ if (!skb)
+ return -ENOMEM;
+ memcpy(cf, msg, sizeof(*msg));
+ skb->tstamp = ktime;
+ return netif_rx(skb);
+}
+
+/*
+ * softing_handle_1
+ * pop 1 entry from the DPRAM queue, and process
+ */
+static int softing_handle_1(struct softing *card)
+{
+ struct net_device *netdev;
+ struct softing_priv *priv;
+ ktime_t ktime;
+ struct can_frame msg;
+ int cnt = 0, lost_msg;
+ uint8_t fifo_rd, fifo_wr, cmd;
+ uint8_t *ptr;
+ uint32_t tmp_u32;
+ uint8_t buf[DPRAM_RX_SIZE];
+
+ memset(&msg, 0, sizeof(msg));
+ /* test for lost msgs */
+ lost_msg = ioread8(&card->dpram[DPRAM_RX_LOST]);
+ if (lost_msg) {
+ int j;
+ /* reset condition */
+ iowrite8(0, &card->dpram[DPRAM_RX_LOST]);
+ /* prepare msg */
+ msg.can_id = CAN_ERR_FLAG | CAN_ERR_CRTL;
+ msg.can_dlc = CAN_ERR_DLC;
+ msg.data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ /*
+ * service to all busses, we don't know which it was applicable
+ * but only service busses that are online
+ */
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ netdev = card->net[j];
+ if (!netdev)
+ continue;
+ if (!canif_is_active(netdev))
+ /* a dead bus has no overflows */
+ continue;
+ ++netdev->stats.rx_over_errors;
+ softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
+ }
+ /* prepare for other use */
+ memset(&msg, 0, sizeof(msg));
+ ++cnt;
+ }
+
+ fifo_rd = ioread8(&card->dpram[DPRAM_RX_RD]);
+ fifo_wr = ioread8(&card->dpram[DPRAM_RX_WR]);
+
+ if (++fifo_rd >= DPRAM_RX_CNT)
+ fifo_rd = 0;
+ if (fifo_wr == fifo_rd)
+ return cnt;
+
+ memcpy_fromio(buf, &card->dpram[DPRAM_RX + DPRAM_RX_SIZE*fifo_rd],
+ DPRAM_RX_SIZE);
+ mb();
+ /* trigger dual port RAM */
+ iowrite8(fifo_rd, &card->dpram[DPRAM_RX_RD]);
+
+ ptr = buf;
+ cmd = *ptr++;
+ if (cmd == 0xff)
+ /* not quite usefull, probably the card has got out */
+ return 0;
+ netdev = card->net[0];
+ if (cmd & CMD_BUS2)
+ netdev = card->net[1];
+ priv = netdev_priv(netdev);
+
+ if (cmd & CMD_ERR) {
+ uint8_t can_state, state;
+
+ state = *ptr++;
+
+ msg.can_id = CAN_ERR_FLAG;
+ msg.can_dlc = CAN_ERR_DLC;
+
+ if (state & SF_MASK_BUSOFF) {
+ can_state = CAN_STATE_BUS_OFF;
+ msg.can_id |= CAN_ERR_BUSOFF;
+ state = STATE_BUSOFF;
+ } else if (state & SF_MASK_EPASSIVE) {
+ can_state = CAN_STATE_ERROR_PASSIVE;
+ msg.can_id |= CAN_ERR_CRTL;
+ msg.data[1] = CAN_ERR_CRTL_TX_PASSIVE;
+ state = STATE_EPASSIVE;
+ } else {
+ can_state = CAN_STATE_ERROR_ACTIVE;
+ msg.can_id |= CAN_ERR_CRTL;
+ state = STATE_EACTIVE;
+ }
+ /* update DPRAM */
+ iowrite8(state, &card->dpram[priv->index ?
+ DPRAM_INFO_BUSSTATE2 : DPRAM_INFO_BUSSTATE]);
+ /* timestamp */
+ tmp_u32 = le32_to_cpup((void *)ptr);
+ ptr += 4;
+ ktime = softing_raw2ktime(card, tmp_u32);
+
+ ++netdev->stats.rx_errors;
+ /* update internal status */
+ if (can_state != priv->can.state) {
+ priv->can.state = can_state;
+ if (can_state == CAN_STATE_ERROR_PASSIVE)
+ ++priv->can.can_stats.error_passive;
+ else if (can_state == CAN_STATE_BUS_OFF) {
+ /* this calls can_close_cleanup() */
+ can_bus_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ /* trigger socketcan */
+ softing_netdev_rx(netdev, &msg, ktime);
+ }
+
+ } else {
+ if (cmd & CMD_RTR)
+ msg.can_id |= CAN_RTR_FLAG;
+ msg.can_dlc = get_can_dlc(*ptr++);
+ if (cmd & CMD_XTD) {
+ msg.can_id |= CAN_EFF_FLAG;
+ msg.can_id |= le32_to_cpup((void *)ptr);
+ ptr += 4;
+ } else {
+ msg.can_id |= le16_to_cpup((void *)ptr);
+ ptr += 2;
+ }
+ /* timestamp */
+ tmp_u32 = le32_to_cpup((void *)ptr);
+ ptr += 4;
+ ktime = softing_raw2ktime(card, tmp_u32);
+ if (!(msg.can_id & CAN_RTR_FLAG))
+ memcpy(&msg.data[0], ptr, 8);
+ ptr += 8;
+ /* update socket */
+ if (cmd & CMD_ACK) {
+ /* acknowledge, was tx msg */
+ struct sk_buff *skb;
+ skb = priv->can.echo_skb[priv->tx.echo_get];
+ if (skb)
+ skb->tstamp = ktime;
+ can_get_echo_skb(netdev, priv->tx.echo_get);
+ ++priv->tx.echo_get;
+ if (priv->tx.echo_get >= TX_ECHO_SKB_MAX)
+ priv->tx.echo_get = 0;
+ if (priv->tx.pending)
+ --priv->tx.pending;
+ if (card->tx.pending)
+ --card->tx.pending;
+ ++netdev->stats.tx_packets;
+ if (!(msg.can_id & CAN_RTR_FLAG))
+ netdev->stats.tx_bytes += msg.can_dlc;
+ } else {
+ int ret;
+
+ ret = softing_netdev_rx(netdev, &msg, ktime);
+ if (ret == NET_RX_SUCCESS) {
+ ++netdev->stats.rx_packets;
+ if (!(msg.can_id & CAN_RTR_FLAG))
+ netdev->stats.rx_bytes += msg.can_dlc;
+ } else {
+ ++netdev->stats.rx_dropped;
+ }
+ }
+ }
+ ++cnt;
+ return cnt;
+}
+
+/*
+ * real interrupt handler
+ */
+static irqreturn_t softing_irq_thread(int irq, void *dev_id)
+{
+ struct softing *card = (struct softing *)dev_id;
+ struct net_device *netdev;
+ struct softing_priv *priv;
+ int j, offset, work_done;
+
+ work_done = 0;
+ spin_lock_bh(&card->spin);
+ while (softing_handle_1(card) > 0) {
+ ++card->irq.svc_count;
+ ++work_done;
+ }
+ spin_unlock_bh(&card->spin);
+ /* resume tx queue's */
+ offset = card->tx.last_bus;
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ if (card->tx.pending >= TXMAX)
+ break;
+ netdev = card->net[(j + offset + 1) % card->pdat->nbus];
+ if (!netdev)
+ continue;
+ priv = netdev_priv(netdev);
+ if (!canif_is_active(netdev))
+ /* it makes no sense to wake dead busses */
+ continue;
+ if (priv->tx.pending >= TX_ECHO_SKB_MAX)
+ continue;
+ ++work_done;
+ netif_wake_queue(netdev);
+ }
+ return work_done ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/*
+ * interrupt routines:
+ * schedule the 'real interrupt handler'
+ */
+static irqreturn_t softing_irq_v2(int irq, void *dev_id)
+{
+ struct softing *card = (struct softing *)dev_id;
+ uint8_t ir;
+
+ ir = ioread8(&card->dpram[DPRAM_V2_IRQ_TOHOST]);
+ iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]);
+ return (1 == ir) ? IRQ_WAKE_THREAD : IRQ_NONE;
+}
+
+static irqreturn_t softing_irq_v1(int irq, void *dev_id)
+{
+ struct softing *card = (struct softing *)dev_id;
+ uint8_t ir;
+
+ ir = ioread8(&card->dpram[DPRAM_IRQ_TOHOST]);
+ iowrite8(0, &card->dpram[DPRAM_IRQ_TOHOST]);
+ return ir ? IRQ_WAKE_THREAD : IRQ_NONE;
+}
+
+/*
+ * netdev/candev inter-operability
+ */
+static int softing_netdev_open(struct net_device *ndev)
+{
+ int ret;
+
+ /* check or determine and set bittime */
+ ret = open_candev(ndev);
+ if (!ret)
+ ret = softing_startstop(ndev, 1);
+ return ret;
+}
+
+static int softing_netdev_stop(struct net_device *ndev)
+{
+ int ret;
+
+ netif_stop_queue(ndev);
+
+ /* softing cycle does close_candev() */
+ ret = softing_startstop(ndev, 0);
+ return ret;
+}
+
+static int softing_candev_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int ret;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ /* softing_startstop does close_candev() */
+ ret = softing_startstop(ndev, 1);
+ return ret;
+ case CAN_MODE_STOP:
+ case CAN_MODE_SLEEP:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+/*
+ * Softing device management helpers
+ */
+int softing_enable_irq(struct softing *card, int enable)
+{
+ int ret;
+
+ if (!card->irq.nr) {
+ return 0;
+ } else if (card->irq.requested && !enable) {
+ free_irq(card->irq.nr, card);
+ card->irq.requested = 0;
+ } else if (!card->irq.requested && enable) {
+ ret = request_threaded_irq(card->irq.nr,
+ (card->pdat->generation >= 2) ?
+ softing_irq_v2 : softing_irq_v1,
+ softing_irq_thread, IRQF_SHARED,
+ dev_name(&card->pdev->dev), card);
+ if (ret) {
+ dev_alert(&card->pdev->dev,
+ "request_threaded_irq(%u) failed\n",
+ card->irq.nr);
+ return ret;
+ }
+ card->irq.requested = 1;
+ }
+ return 0;
+}
+
+static void softing_card_shutdown(struct softing *card)
+{
+ int fw_up = 0;
+
+ if (mutex_lock_interruptible(&card->fw.lock))
+ /* return -ERESTARTSYS */;
+ fw_up = card->fw.up;
+ card->fw.up = 0;
+
+ if (card->irq.requested && card->irq.nr) {
+ free_irq(card->irq.nr, card);
+ card->irq.requested = 0;
+ }
+ if (fw_up) {
+ if (card->pdat->enable_irq)
+ card->pdat->enable_irq(card->pdev, 0);
+ softing_set_reset_dpram(card);
+ if (card->pdat->reset)
+ card->pdat->reset(card->pdev, 1);
+ }
+ mutex_unlock(&card->fw.lock);
+}
+
+static __devinit int softing_card_boot(struct softing *card)
+{
+ int ret, j;
+ static const uint8_t stream[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, };
+ unsigned char back[sizeof(stream)];
+
+ if (mutex_lock_interruptible(&card->fw.lock))
+ return -ERESTARTSYS;
+ if (card->fw.up) {
+ mutex_unlock(&card->fw.lock);
+ return 0;
+ }
+ /* reset board */
+ if (card->pdat->enable_irq)
+ card->pdat->enable_irq(card->pdev, 1);
+ /* boot card */
+ softing_set_reset_dpram(card);
+ if (card->pdat->reset)
+ card->pdat->reset(card->pdev, 1);
+ for (j = 0; (j + sizeof(stream)) < card->dpram_size;
+ j += sizeof(stream)) {
+
+ memcpy_toio(&card->dpram[j], stream, sizeof(stream));
+ /* flush IO cache */
+ mb();
+ memcpy_fromio(back, &card->dpram[j], sizeof(stream));
+
+ if (!memcmp(back, stream, sizeof(stream)))
+ continue;
+ /* memory is not equal */
+ dev_alert(&card->pdev->dev, "dpram failed at 0x%04x\n", j);
+ ret = -EIO;
+ goto failed;
+ }
+ wmb();
+ /* load boot firmware */
+ ret = softing_load_fw(card->pdat->boot.fw, card, card->dpram,
+ card->dpram_size,
+ card->pdat->boot.offs - card->pdat->boot.addr);
+ if (ret < 0)
+ goto failed;
+ /* load loader firmware */
+ ret = softing_load_fw(card->pdat->load.fw, card, card->dpram,
+ card->dpram_size,
+ card->pdat->load.offs - card->pdat->load.addr);
+ if (ret < 0)
+ goto failed;
+
+ if (card->pdat->reset)
+ card->pdat->reset(card->pdev, 0);
+ softing_clr_reset_dpram(card);
+ ret = softing_bootloader_command(card, 0, "card boot");
+ if (ret < 0)
+ goto failed;
+ ret = softing_load_app_fw(card->pdat->app.fw, card);
+ if (ret < 0)
+ goto failed;
+
+ ret = softing_chip_poweron(card);
+ if (ret < 0)
+ goto failed;
+
+ card->fw.up = 1;
+ mutex_unlock(&card->fw.lock);
+ return 0;
+failed:
+ card->fw.up = 0;
+ if (card->pdat->enable_irq)
+ card->pdat->enable_irq(card->pdev, 0);
+ softing_set_reset_dpram(card);
+ if (card->pdat->reset)
+ card->pdat->reset(card->pdev, 1);
+ mutex_unlock(&card->fw.lock);
+ return ret;
+}
+
+/*
+ * netdev sysfs
+ */
+static ssize_t show_channel(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct softing_priv *priv = netdev2softing(ndev);
+
+ return sprintf(buf, "%i\n", priv->index);
+}
+
+static ssize_t show_chip(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct softing_priv *priv = netdev2softing(ndev);
+
+ return sprintf(buf, "%i\n", priv->chip);
+}
+
+static ssize_t show_output(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct softing_priv *priv = netdev2softing(ndev);
+
+ return sprintf(buf, "0x%02x\n", priv->output);
+}
+
+static ssize_t store_output(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct softing_priv *priv = netdev2softing(ndev);
+ struct softing *card = priv->card;
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ val &= 0xFF;
+
+ ret = mutex_lock_interruptible(&card->fw.lock);
+ if (ret)
+ return -ERESTARTSYS;
+ if (netif_running(ndev)) {
+ mutex_unlock(&card->fw.lock);
+ return -EBUSY;
+ }
+ priv->output = val;
+ mutex_unlock(&card->fw.lock);
+ return count;
+}
+
+static const DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
+static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL);
+static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output);
+
+static const struct attribute *const netdev_sysfs_attrs[] = {
+ &dev_attr_channel.attr,
+ &dev_attr_chip.attr,
+ &dev_attr_output.attr,
+ NULL,
+};
+static const struct attribute_group netdev_sysfs_group = {
+ .name = NULL,
+ .attrs = (struct attribute **)netdev_sysfs_attrs,
+};
+
+static const struct net_device_ops softing_netdev_ops = {
+ .ndo_open = softing_netdev_open,
+ .ndo_stop = softing_netdev_stop,
+ .ndo_start_xmit = softing_netdev_start_xmit,
+};
+
+static const struct can_bittiming_const softing_btr_const = {
+ .name = "softing",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4, /* overruled */
+ .brp_min = 1,
+ .brp_max = 32, /* overruled */
+ .brp_inc = 1,
+};
+
+
+static __devinit struct net_device *softing_netdev_create(struct softing *card,
+ uint16_t chip_id)
+{
+ struct net_device *netdev;
+ struct softing_priv *priv;
+
+ netdev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX);
+ if (!netdev) {
+ dev_alert(&card->pdev->dev, "alloc_candev failed\n");
+ return NULL;
+ }
+ priv = netdev_priv(netdev);
+ priv->netdev = netdev;
+ priv->card = card;
+ memcpy(&priv->btr_const, &softing_btr_const, sizeof(priv->btr_const));
+ priv->btr_const.brp_max = card->pdat->max_brp;
+ priv->btr_const.sjw_max = card->pdat->max_sjw;
+ priv->can.bittiming_const = &priv->btr_const;
+ priv->can.clock.freq = 8000000;
+ priv->chip = chip_id;
+ priv->output = softing_default_output(netdev);
+ SET_NETDEV_DEV(netdev, &card->pdev->dev);
+
+ netdev->flags |= IFF_ECHO;
+ netdev->netdev_ops = &softing_netdev_ops;
+ priv->can.do_set_mode = softing_candev_set_mode;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+
+ return netdev;
+}
+
+static __devinit int softing_netdev_register(struct net_device *netdev)
+{
+ int ret;
+
+ netdev->sysfs_groups[0] = &netdev_sysfs_group;
+ ret = register_candev(netdev);
+ if (ret) {
+ dev_alert(&netdev->dev, "register failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+static void softing_netdev_cleanup(struct net_device *netdev)
+{
+ unregister_candev(netdev);
+ free_candev(netdev);
+}
+
+/*
+ * sysfs for Platform device
+ */
+#define DEV_ATTR_RO(name, member) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct softing *card = platform_get_drvdata(to_platform_device(dev)); \
+ return sprintf(buf, "%u\n", card->member); \
+} \
+static DEVICE_ATTR(name, 0444, show_##name, NULL)
+
+#define DEV_ATTR_RO_STR(name, member) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct softing *card = platform_get_drvdata(to_platform_device(dev)); \
+ return sprintf(buf, "%s\n", card->member); \
+} \
+static DEVICE_ATTR(name, 0444, show_##name, NULL)
+
+DEV_ATTR_RO(serial, id.serial);
+DEV_ATTR_RO_STR(firmware, pdat->app.fw);
+DEV_ATTR_RO(firmware_version, id.fw_version);
+DEV_ATTR_RO_STR(hardware, pdat->name);
+DEV_ATTR_RO(hardware_version, id.hw_version);
+DEV_ATTR_RO(license, id.license);
+DEV_ATTR_RO(frequency, id.freq);
+DEV_ATTR_RO(txpending, tx.pending);
+
+static struct attribute *softing_pdev_attrs[] = {
+ &dev_attr_serial.attr,
+ &dev_attr_firmware.attr,
+ &dev_attr_firmware_version.attr,
+ &dev_attr_hardware.attr,
+ &dev_attr_hardware_version.attr,
+ &dev_attr_license.attr,
+ &dev_attr_frequency.attr,
+ &dev_attr_txpending.attr,
+ NULL,
+};
+
+static const struct attribute_group softing_pdev_group = {
+ .name = NULL,
+ .attrs = softing_pdev_attrs,
+};
+
+/*
+ * platform driver
+ */
+static __devexit int softing_pdev_remove(struct platform_device *pdev)
+{
+ struct softing *card = platform_get_drvdata(pdev);
+ int j;
+
+ /* first, disable card*/
+ softing_card_shutdown(card);
+
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ if (!card->net[j])
+ continue;
+ softing_netdev_cleanup(card->net[j]);
+ card->net[j] = NULL;
+ }
+ sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group);
+
+ iounmap(card->dpram);
+ kfree(card);
+ return 0;
+}
+
+static __devinit int softing_pdev_probe(struct platform_device *pdev)
+{
+ const struct softing_platform_data *pdat = pdev->dev.platform_data;
+ struct softing *card;
+ struct net_device *netdev;
+ struct softing_priv *priv;
+ struct resource *pres;
+ int ret;
+ int j;
+
+ if (!pdat) {
+ dev_warn(&pdev->dev, "no platform data\n");
+ return -EINVAL;
+ }
+ if (pdat->nbus > ARRAY_SIZE(card->net)) {
+ dev_warn(&pdev->dev, "%u nets??\n", pdat->nbus);
+ return -EINVAL;
+ }
+
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+ card->pdat = pdat;
+ card->pdev = pdev;
+ platform_set_drvdata(pdev, card);
+ mutex_init(&card->fw.lock);
+ spin_lock_init(&card->spin);
+
+ ret = -EINVAL;
+ pres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!pres)
+ goto platform_resource_failed;;
+ card->dpram_phys = pres->start;
+ card->dpram_size = pres->end - pres->start + 1;
+ card->dpram = ioremap_nocache(card->dpram_phys, card->dpram_size);
+ if (!card->dpram) {
+ dev_alert(&card->pdev->dev, "dpram ioremap failed\n");
+ goto ioremap_failed;
+ }
+
+ pres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (pres)
+ card->irq.nr = pres->start;
+
+ /* reset card */
+ ret = softing_card_boot(card);
+ if (ret < 0) {
+ dev_alert(&pdev->dev, "failed to boot\n");
+ goto boot_failed;
+ }
+
+ /* only now, the chip's are known */
+ card->id.freq = card->pdat->freq;
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &softing_pdev_group);
+ if (ret < 0) {
+ dev_alert(&card->pdev->dev, "sysfs failed\n");
+ goto sysfs_failed;
+ }
+
+ ret = -ENOMEM;
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ card->net[j] = netdev =
+ softing_netdev_create(card, card->id.chip[j]);
+ if (!netdev) {
+ dev_alert(&pdev->dev, "failed to make can[%i]", j);
+ goto netdev_failed;
+ }
+ priv = netdev_priv(card->net[j]);
+ priv->index = j;
+ ret = softing_netdev_register(netdev);
+ if (ret) {
+ free_candev(netdev);
+ card->net[j] = NULL;
+ dev_alert(&card->pdev->dev,
+ "failed to register can[%i]\n", j);
+ goto netdev_failed;
+ }
+ }
+ dev_info(&card->pdev->dev, "%s ready.\n", card->pdat->name);
+ return 0;
+
+netdev_failed:
+ for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+ if (!card->net[j])
+ continue;
+ softing_netdev_cleanup(card->net[j]);
+ }
+ sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group);
+sysfs_failed:
+ softing_card_shutdown(card);
+boot_failed:
+ iounmap(card->dpram);
+ioremap_failed:
+platform_resource_failed:
+ kfree(card);
+ return ret;
+}
+
+static struct platform_driver softing_driver = {
+ .driver = {
+ .name = "softing",
+ .owner = THIS_MODULE,
+ },
+ .probe = softing_pdev_probe,
+ .remove = __devexit_p(softing_pdev_remove),
+};
+
+MODULE_ALIAS("platform:softing");
+
+static int __init softing_start(void)
+{
+ return platform_driver_register(&softing_driver);
+}
+
+static void __exit softing_stop(void)
+{
+ platform_driver_unregister(&softing_driver);
+}
+
+module_init(softing_start);
+module_exit(softing_stop);
+
+MODULE_DESCRIPTION("Softing DPRAM CAN driver");
+MODULE_AUTHOR("Kurt Van Dijck <kurt.van.dijck@eia.be>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/softing/softing_platform.h b/drivers/net/can/softing/softing_platform.h
new file mode 100644
index 000000000000..ebbf69815623
--- /dev/null
+++ b/drivers/net/can/softing/softing_platform.h
@@ -0,0 +1,40 @@
+
+#include <linux/platform_device.h>
+
+#ifndef _SOFTING_DEVICE_H_
+#define _SOFTING_DEVICE_H_
+
+/* softing firmware directory prefix */
+#define fw_dir "softing-4.6/"
+
+struct softing_platform_data {
+ unsigned int manf;
+ unsigned int prod;
+ /*
+ * generation
+ * 1st with NEC or SJA1000
+ * 8bit, exclusive interrupt, ...
+ * 2nd only SJA1000
+ * 16bit, shared interrupt
+ */
+ int generation;
+ int nbus; /* # busses on device */
+ unsigned int freq; /* operating frequency in Hz */
+ unsigned int max_brp;
+ unsigned int max_sjw;
+ unsigned long dpram_size;
+ const char *name;
+ struct {
+ unsigned long offs;
+ unsigned long addr;
+ const char *fw;
+ } boot, load, app;
+ /*
+ * reset() function
+ * bring pdev in or out of reset, depending on value
+ */
+ int (*reset)(struct platform_device *pdev, int value);
+ int (*enable_irq)(struct platform_device *pdev, int value);
+};
+
+#endif
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 05a52754f486..dc53c831ea95 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -659,7 +659,7 @@ failed:
static void unlink_all_urbs(struct esd_usb2 *dev)
{
struct esd_usb2_net_priv *priv;
- int i;
+ int i, j;
usb_kill_anchored_urbs(&dev->rx_submitted);
for (i = 0; i < dev->net_count; i++) {
@@ -668,8 +668,8 @@ static void unlink_all_urbs(struct esd_usb2 *dev)
usb_kill_anchored_urbs(&priv->tx_submitted);
atomic_set(&priv->active_tx_jobs, 0);
- for (i = 0; i < MAX_TX_URBS; i++)
- priv->tx_contexts[i].echo_index = MAX_TX_URBS;
+ for (j = 0; j < MAX_TX_URBS; j++)
+ priv->tx_contexts[j].echo_index = MAX_TX_URBS;
}
}
}
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index d6b6d6aa565a..3437613f0454 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -2788,7 +2788,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
ctrl = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- const u64 csum_start_off = skb_transport_offset(skb);
+ const u64 csum_start_off = skb_checksum_start_offset(skb);
const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
ctrl = TX_DESC_CSUM_EN |
@@ -3203,6 +3203,10 @@ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
int mac_off = 0;
+#if defined(CONFIG_SPARC)
+ const unsigned char *addr;
+#endif
+
/* give us access to the PROM */
writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
cp->regs + REG_BIM_LOCAL_DEV_EN);
@@ -3350,6 +3354,14 @@ use_random_mac_addr:
if (found & VPD_FOUND_MAC)
goto done;
+#if defined(CONFIG_SPARC)
+ addr = of_get_property(cp->of_node, "local-mac-address", NULL);
+ if (addr != NULL) {
+ memcpy(dev_addr, addr, 6);
+ goto done;
+ }
+#endif
+
/* Sun MAC prefix then 3 random bytes. */
pr_info("MAC address not found in ROM VPD\n");
dev_addr[0] = 0x08;
@@ -3880,7 +3892,7 @@ static int cas_change_mtu(struct net_device *dev, int new_mtu)
schedule_work(&cp->reset_task);
#endif
- flush_scheduled_work();
+ flush_work_sync(&cp->reset_task);
return 0;
}
@@ -5019,6 +5031,10 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
cassini_debug;
+#if defined(CONFIG_SPARC)
+ cp->of_node = pci_device_to_OF_node(pdev);
+#endif
+
cp->link_transition = LINK_TRANSITION_UNKNOWN;
cp->link_transition_jiffies_valid = 0;
@@ -5177,7 +5193,7 @@ static void __devexit cas_remove_one(struct pci_dev *pdev)
vfree(cp->fw_data);
mutex_lock(&cp->pm_mutex);
- flush_scheduled_work();
+ cancel_work_sync(&cp->reset_task);
if (cp->hw_running)
cas_shutdown(cp);
mutex_unlock(&cp->pm_mutex);
diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h
index dbc47878d83b..faf4746a0f3e 100644
--- a/drivers/net/cassini.h
+++ b/drivers/net/cassini.h
@@ -2868,6 +2868,9 @@ struct cas {
dma_addr_t block_dvma, tx_tiny_dvma[N_TX_RINGS];
struct pci_dev *pdev;
struct net_device *dev;
+#if defined(CONFIG_OF)
+ struct device_node *of_node;
+#endif
/* Firmware Info */
u16 fw_load_addr;
diff --git a/drivers/net/chelsio/my3126.c b/drivers/net/chelsio/my3126.c
index 4c6028512d10..a683fd3bb624 100644
--- a/drivers/net/chelsio/my3126.c
+++ b/drivers/net/chelsio/my3126.c
@@ -22,7 +22,7 @@ static int my3126_interrupt_enable(struct cphy *cphy)
static int my3126_interrupt_disable(struct cphy *cphy)
{
- cancel_rearming_delayed_work(&cphy->phy_update);
+ cancel_delayed_work_sync(&cphy->phy_update);
return 0;
}
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 70221ca32683..f778b15ad3fd 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -273,6 +273,10 @@ struct sge {
struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
};
+static const u8 ch_mac_addr[ETH_ALEN] = {
+ 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
+};
+
/*
* stop tasklet and free all pending skb's
*/
@@ -2012,10 +2016,6 @@ static void espibug_workaround_t204(unsigned long data)
continue;
if (!skb->cb[0]) {
- u8 ch_mac_addr[ETH_ALEN] = {
- 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
- };
-
skb_copy_to_linear_data_offset(skb,
sizeof(struct cpl_tx_pkt),
ch_mac_addr,
@@ -2048,8 +2048,6 @@ static void espibug_workaround(unsigned long data)
if ((seop & 0xfff0fff) == 0xfff && skb) {
if (!skb->cb[0]) {
- u8 ch_mac_addr[ETH_ALEN] =
- {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
skb_copy_to_linear_data_offset(skb,
sizeof(struct cpl_tx_pkt),
ch_mac_addr,
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 63ebf76d2390..8a43c7e19701 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -556,7 +556,7 @@ struct chelsio_vpd_t {
#define EEPROM_MAX_POLL 4
/*
- * Read SEEPROM. A zero is written to the flag register when the addres is
+ * Read SEEPROM. A zero is written to the flag register when the address is
* written to the Control register. The hardware device will set the flag to a
* one when 4B have been transferred to the Data register.
*/
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 92bac19ad60a..8cca60e43444 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -59,12 +59,20 @@ MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(CNIC_MODULE_VERSION);
+/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
static LIST_HEAD(cnic_dev_list);
static LIST_HEAD(cnic_udev_list);
static DEFINE_RWLOCK(cnic_dev_lock);
static DEFINE_MUTEX(cnic_lock);
-static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
+static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
+
+/* helper function, assuming cnic_lock is held */
+static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
+{
+ return rcu_dereference_protected(cnic_ulp_tbl[type],
+ lockdep_is_held(&cnic_lock));
+}
static int cnic_service_bnx2(void *, void *);
static int cnic_service_bnx2x(void *, void *);
@@ -278,6 +286,7 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
u32 msg_type = ISCSI_KEVENT_IF_DOWN;
struct cnic_ulp_ops *ulp_ops;
struct cnic_uio_dev *udev = cp->udev;
+ int rc = 0, retry = 0;
if (!udev || udev->uio_dev == -1)
return -ENODEV;
@@ -302,14 +311,26 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
path_req.pmtu = csk->mtu;
}
- rcu_read_lock();
- ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
- if (ulp_ops)
- ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len);
- rcu_read_unlock();
+ while (retry < 3) {
+ rc = 0;
+ rcu_read_lock();
+ ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
+ if (ulp_ops)
+ rc = ulp_ops->iscsi_nl_send_msg(
+ cp->ulp_handle[CNIC_ULP_ISCSI],
+ msg_type, buf, len);
+ rcu_read_unlock();
+ if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
+ break;
+
+ msleep(100);
+ retry++;
+ }
return 0;
}
+static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
+
static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
char *buf, u16 len)
{
@@ -339,7 +360,9 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
}
csk = &cp->csk_tbl[l5_cid];
csk_hold(csk);
- if (cnic_in_use(csk)) {
+ if (cnic_in_use(csk) &&
+ test_bit(SK_F_CONNECT_START, &csk->flags)) {
+
memcpy(csk->ha, path_resp->mac_addr, 6);
if (test_bit(SK_F_IPV6, &csk->flags))
memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
@@ -347,8 +370,16 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
else
memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
sizeof(struct in_addr));
- if (is_valid_ether_addr(csk->ha))
+
+ if (is_valid_ether_addr(csk->ha)) {
cnic_cm_set_pg(csk);
+ } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
+ !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+
+ cnic_cm_upcall(cp, csk,
+ L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
+ clear_bit(SK_F_CONNECT_START, &csk->flags);
+ }
}
csk_put(csk);
rcu_read_unlock();
@@ -402,19 +433,6 @@ static int cnic_abort_prep(struct cnic_sock *csk)
return 0;
}
-static void cnic_uio_stop(void)
-{
- struct cnic_dev *dev;
-
- read_lock(&cnic_dev_lock);
- list_for_each_entry(dev, &cnic_dev_list, list) {
- struct cnic_local *cp = dev->cnic_priv;
-
- cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
- }
- read_unlock(&cnic_dev_lock);
-}
-
int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
{
struct cnic_dev *dev;
@@ -424,7 +442,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
return -EINVAL;
}
mutex_lock(&cnic_lock);
- if (cnic_ulp_tbl[ulp_type]) {
+ if (cnic_ulp_tbl_prot(ulp_type)) {
pr_err("%s: Type %d has already been registered\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
@@ -445,14 +463,12 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
/* Prevent race conditions with netdev_event */
rtnl_lock();
- read_lock(&cnic_dev_lock);
list_for_each_entry(dev, &cnic_dev_list, list) {
struct cnic_local *cp = dev->cnic_priv;
if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
ulp_ops->cnic_init(dev);
}
- read_unlock(&cnic_dev_lock);
rtnl_unlock();
return 0;
@@ -469,7 +485,7 @@ int cnic_unregister_driver(int ulp_type)
return -EINVAL;
}
mutex_lock(&cnic_lock);
- ulp_ops = cnic_ulp_tbl[ulp_type];
+ ulp_ops = cnic_ulp_tbl_prot(ulp_type);
if (!ulp_ops) {
pr_err("%s: Type %d has not been registered\n",
__func__, ulp_type);
@@ -488,9 +504,6 @@ int cnic_unregister_driver(int ulp_type)
}
read_unlock(&cnic_dev_lock);
- if (ulp_type == CNIC_ULP_ISCSI)
- cnic_uio_stop();
-
rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
mutex_unlock(&cnic_lock);
@@ -523,7 +536,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
return -EINVAL;
}
mutex_lock(&cnic_lock);
- if (cnic_ulp_tbl[ulp_type] == NULL) {
+ if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
pr_err("%s: Driver with type %d has not been registered\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
@@ -538,7 +551,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
cp->ulp_handle[ulp_type] = ulp_ctx;
- ulp_ops = cnic_ulp_tbl[ulp_type];
+ ulp_ops = cnic_ulp_tbl_prot(ulp_type);
rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
cnic_hold(dev);
@@ -574,6 +587,9 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
}
mutex_unlock(&cnic_lock);
+ if (ulp_type == CNIC_ULP_ISCSI)
+ cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+
synchronize_rcu();
while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
@@ -690,13 +706,13 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
{
int i;
- u32 *page_table = dma->pgtbl;
+ __le32 *page_table = (__le32 *) dma->pgtbl;
for (i = 0; i < dma->num_pages; i++) {
/* Each entry needs to be in big endian format. */
- *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
+ *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
page_table++;
- *page_table = (u32) dma->pg_map_arr[i];
+ *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
page_table++;
}
}
@@ -704,13 +720,13 @@ static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
{
int i;
- u32 *page_table = dma->pgtbl;
+ __le32 *page_table = (__le32 *) dma->pgtbl;
for (i = 0; i < dma->num_pages; i++) {
/* Each entry needs to be in little endian format. */
- *page_table = dma->pg_map_arr[i] & 0xffffffff;
+ *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
page_table++;
- *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
+ *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
page_table++;
}
}
@@ -821,12 +837,14 @@ static void cnic_free_resc(struct cnic_dev *dev)
cnic_free_dma(dev, &cp->conn_buf_info);
cnic_free_dma(dev, &cp->kwq_info);
cnic_free_dma(dev, &cp->kwq_16_data_info);
+ cnic_free_dma(dev, &cp->kcq2.dma);
cnic_free_dma(dev, &cp->kcq1.dma);
kfree(cp->iscsi_tbl);
cp->iscsi_tbl = NULL;
kfree(cp->ctx_tbl);
cp->ctx_tbl = NULL;
+ cnic_free_id_tbl(&cp->fcoe_cid_tbl);
cnic_free_id_tbl(&cp->cid_tbl);
}
@@ -940,7 +958,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
&udev->l2_ring_map,
GFP_KERNEL | __GFP_COMP);
if (!udev->l2_ring)
- return -ENOMEM;
+ goto err_udev;
udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
@@ -948,7 +966,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
&udev->l2_buf_map,
GFP_KERNEL | __GFP_COMP);
if (!udev->l2_buf)
- return -ENOMEM;
+ goto err_dma;
write_lock(&cnic_dev_lock);
list_add(&udev->list, &cnic_udev_list);
@@ -959,6 +977,12 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
cp->udev = udev;
return 0;
+ err_dma:
+ dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
+ udev->l2_ring, udev->l2_ring_map);
+ err_udev:
+ kfree(udev);
+ return -ENOMEM;
}
static int cnic_init_uio(struct cnic_dev *dev)
@@ -1114,12 +1138,22 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
cp->iro_arr = ethdev->iro_arr;
- cp->max_cid_space = MAX_ISCSI_TBL_SZ;
+ cp->max_cid_space = MAX_ISCSI_TBL_SZ + BNX2X_FCOE_NUM_CONNECTIONS;
cp->iscsi_start_cid = start_cid;
+ cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
+
+ if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
+ cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS;
+ cp->fcoe_init_cid = ethdev->fcoe_init_cid;
+ if (!cp->fcoe_init_cid)
+ cp->fcoe_init_cid = 0x10;
+ }
+
if (start_cid < BNX2X_ISCSI_START_CID) {
u32 delta = BNX2X_ISCSI_START_CID - start_cid;
cp->iscsi_start_cid = BNX2X_ISCSI_START_CID;
+ cp->fcoe_start_cid += delta;
cp->max_cid_space += delta;
}
@@ -1138,6 +1172,9 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
}
+ for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
+ cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
+
pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
PAGE_SIZE;
@@ -1161,6 +1198,12 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
if (ret)
goto error;
+ if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
+ ret = cnic_alloc_kcq(dev, &cp->kcq2);
+ if (ret)
+ goto error;
+ }
+
pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1);
@@ -1254,12 +1297,18 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
struct cnic_local *cp = dev->cnic_priv;
struct l5cm_spe kwqe;
struct kwqe_16 *kwq[1];
+ u16 type_16;
int ret;
kwqe.hdr.conn_and_cmd_data =
cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
BNX2X_HW_CID(cp, cid)));
- kwqe.hdr.type = cpu_to_le16(type);
+
+ type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
+ type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
+ SPE_HDR_FUNCTION_ID;
+
+ kwqe.hdr.type = cpu_to_le16(type_16);
kwqe.hdr.reserved1 = 0;
kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
@@ -1425,8 +1474,11 @@ static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
cnic_free_dma(dev, &iscsi->hq_info);
cnic_free_dma(dev, &iscsi->r2tq_info);
cnic_free_dma(dev, &iscsi->task_array_info);
+ cnic_free_id(&cp->cid_tbl, ctx->cid);
+ } else {
+ cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
}
- cnic_free_id(&cp->cid_tbl, ctx->cid);
+
ctx->cid = 0;
}
@@ -1438,6 +1490,16 @@ static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
struct cnic_iscsi *iscsi = ctx->proto.iscsi;
+ if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
+ cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
+ if (cid == -1) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ ctx->cid = cid;
+ return 0;
+ }
+
cid = cnic_alloc_new_id(&cp->cid_tbl);
if (cid == -1) {
ret = -ENOMEM;
@@ -1695,7 +1757,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
*work = num;
return -EINVAL;
}
- *work = 2 + req2->num_additional_wqes;;
+ *work = 2 + req2->num_additional_wqes;
l5_cid = req1->iscsi_conn_id;
if (l5_cid >= MAX_ISCSI_TBL_SZ)
@@ -1770,19 +1832,15 @@ static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
union l5cm_specific_data l5_data;
int ret;
- u32 hw_cid, type;
+ u32 hw_cid;
init_waitqueue_head(&ctx->waitq);
ctx->wait_cond = 0;
memset(&l5_data, 0, sizeof(l5_data));
hw_cid = BNX2X_HW_CID(cp, ctx->cid);
- type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
- & SPE_HDR_CONN_TYPE;
- type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
- SPE_HDR_FUNCTION_ID);
ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
- hw_cid, type, &l5_data);
+ hw_cid, NONE_CONNECTION_TYPE, &l5_data);
if (ret == 0)
wait_event(ctx->waitq, ctx->wait_cond);
@@ -2078,8 +2136,306 @@ static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
return 0;
}
-static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
- u32 num_wqes)
+static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+ struct fcoe_kwqe_stat *req;
+ struct fcoe_stat_ramrod_params *fcoe_stat;
+ union l5cm_specific_data l5_data;
+ struct cnic_local *cp = dev->cnic_priv;
+ int ret;
+ u32 cid;
+
+ req = (struct fcoe_kwqe_stat *) kwqe;
+ cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
+
+ fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
+ if (!fcoe_stat)
+ return -ENOMEM;
+
+ memset(fcoe_stat, 0, sizeof(*fcoe_stat));
+ memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
+
+ ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT, cid,
+ FCOE_CONNECTION_TYPE, &l5_data);
+ return ret;
+}
+
+static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
+ u32 num, int *work)
+{
+ int ret;
+ struct cnic_local *cp = dev->cnic_priv;
+ u32 cid;
+ struct fcoe_init_ramrod_params *fcoe_init;
+ struct fcoe_kwqe_init1 *req1;
+ struct fcoe_kwqe_init2 *req2;
+ struct fcoe_kwqe_init3 *req3;
+ union l5cm_specific_data l5_data;
+
+ if (num < 3) {
+ *work = num;
+ return -EINVAL;
+ }
+ req1 = (struct fcoe_kwqe_init1 *) wqes[0];
+ req2 = (struct fcoe_kwqe_init2 *) wqes[1];
+ req3 = (struct fcoe_kwqe_init3 *) wqes[2];
+ if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
+ *work = 1;
+ return -EINVAL;
+ }
+ if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
+ *work = 2;
+ return -EINVAL;
+ }
+
+ if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
+ netdev_err(dev->netdev, "fcoe_init size too big\n");
+ return -ENOMEM;
+ }
+ fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
+ if (!fcoe_init)
+ return -ENOMEM;
+
+ memset(fcoe_init, 0, sizeof(*fcoe_init));
+ memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
+ memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
+ memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
+ fcoe_init->eq_addr.lo = cp->kcq2.dma.pg_map_arr[0] & 0xffffffff;
+ fcoe_init->eq_addr.hi = (u64) cp->kcq2.dma.pg_map_arr[0] >> 32;
+ fcoe_init->eq_next_page_addr.lo =
+ cp->kcq2.dma.pg_map_arr[1] & 0xffffffff;
+ fcoe_init->eq_next_page_addr.hi =
+ (u64) cp->kcq2.dma.pg_map_arr[1] >> 32;
+
+ fcoe_init->sb_num = cp->status_blk_num;
+ fcoe_init->eq_prod = MAX_KCQ_IDX;
+ fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
+ cp->kcq2.sw_prod_idx = 0;
+
+ cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
+ ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT, cid,
+ FCOE_CONNECTION_TYPE, &l5_data);
+ *work = 3;
+ return ret;
+}
+
+static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
+ u32 num, int *work)
+{
+ int ret = 0;
+ u32 cid = -1, l5_cid;
+ struct cnic_local *cp = dev->cnic_priv;
+ struct fcoe_kwqe_conn_offload1 *req1;
+ struct fcoe_kwqe_conn_offload2 *req2;
+ struct fcoe_kwqe_conn_offload3 *req3;
+ struct fcoe_kwqe_conn_offload4 *req4;
+ struct fcoe_conn_offload_ramrod_params *fcoe_offload;
+ struct cnic_context *ctx;
+ struct fcoe_context *fctx;
+ struct regpair ctx_addr;
+ union l5cm_specific_data l5_data;
+ struct fcoe_kcqe kcqe;
+ struct kcqe *cqes[1];
+
+ if (num < 4) {
+ *work = num;
+ return -EINVAL;
+ }
+ req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
+ req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
+ req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
+ req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
+
+ *work = 4;
+
+ l5_cid = req1->fcoe_conn_id;
+ if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
+ goto err_reply;
+
+ l5_cid += BNX2X_FCOE_L5_CID_BASE;
+
+ ctx = &cp->ctx_tbl[l5_cid];
+ if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
+ goto err_reply;
+
+ ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
+ if (ret) {
+ ret = 0;
+ goto err_reply;
+ }
+ cid = ctx->cid;
+
+ fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
+ if (fctx) {
+ u32 hw_cid = BNX2X_HW_CID(cp, cid);
+ u32 val;
+
+ val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
+ FCOE_CONNECTION_TYPE);
+ fctx->xstorm_ag_context.cdu_reserved = val;
+ val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
+ FCOE_CONNECTION_TYPE);
+ fctx->ustorm_ag_context.cdu_usage = val;
+ }
+ if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
+ netdev_err(dev->netdev, "fcoe_offload size too big\n");
+ goto err_reply;
+ }
+ fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
+ if (!fcoe_offload)
+ goto err_reply;
+
+ memset(fcoe_offload, 0, sizeof(*fcoe_offload));
+ memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
+ memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
+ memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
+ memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
+
+ cid = BNX2X_HW_CID(cp, cid);
+ ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
+ FCOE_CONNECTION_TYPE, &l5_data);
+ if (!ret)
+ set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+
+ return ret;
+
+err_reply:
+ if (cid != -1)
+ cnic_free_bnx2x_conn_resc(dev, l5_cid);
+
+ memset(&kcqe, 0, sizeof(kcqe));
+ kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
+ kcqe.fcoe_conn_id = req1->fcoe_conn_id;
+ kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
+
+ cqes[0] = (struct kcqe *) &kcqe;
+ cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
+ return ret;
+}
+
+static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+ struct fcoe_kwqe_conn_enable_disable *req;
+ struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
+ union l5cm_specific_data l5_data;
+ int ret;
+ u32 cid, l5_cid;
+ struct cnic_local *cp = dev->cnic_priv;
+
+ req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
+ cid = req->context_id;
+ l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
+
+ if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
+ netdev_err(dev->netdev, "fcoe_enable size too big\n");
+ return -ENOMEM;
+ }
+ fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
+ if (!fcoe_enable)
+ return -ENOMEM;
+
+ memset(fcoe_enable, 0, sizeof(*fcoe_enable));
+ memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
+ ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
+ FCOE_CONNECTION_TYPE, &l5_data);
+ return ret;
+}
+
+static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+ struct fcoe_kwqe_conn_enable_disable *req;
+ struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
+ union l5cm_specific_data l5_data;
+ int ret;
+ u32 cid, l5_cid;
+ struct cnic_local *cp = dev->cnic_priv;
+
+ req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
+ cid = req->context_id;
+ l5_cid = req->conn_id;
+ if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
+ return -EINVAL;
+
+ l5_cid += BNX2X_FCOE_L5_CID_BASE;
+
+ if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
+ netdev_err(dev->netdev, "fcoe_disable size too big\n");
+ return -ENOMEM;
+ }
+ fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
+ if (!fcoe_disable)
+ return -ENOMEM;
+
+ memset(fcoe_disable, 0, sizeof(*fcoe_disable));
+ memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
+ ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
+ FCOE_CONNECTION_TYPE, &l5_data);
+ return ret;
+}
+
+static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+ struct fcoe_kwqe_conn_destroy *req;
+ union l5cm_specific_data l5_data;
+ int ret;
+ u32 cid, l5_cid;
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_context *ctx;
+ struct fcoe_kcqe kcqe;
+ struct kcqe *cqes[1];
+
+ req = (struct fcoe_kwqe_conn_destroy *) kwqe;
+ cid = req->context_id;
+ l5_cid = req->conn_id;
+ if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
+ return -EINVAL;
+
+ l5_cid += BNX2X_FCOE_L5_CID_BASE;
+
+ ctx = &cp->ctx_tbl[l5_cid];
+
+ init_waitqueue_head(&ctx->waitq);
+ ctx->wait_cond = 0;
+
+ memset(&l5_data, 0, sizeof(l5_data));
+ ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
+ FCOE_CONNECTION_TYPE, &l5_data);
+ if (ret == 0) {
+ wait_event(ctx->waitq, ctx->wait_cond);
+ set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
+ queue_delayed_work(cnic_wq, &cp->delete_task,
+ msecs_to_jiffies(2000));
+ }
+
+ memset(&kcqe, 0, sizeof(kcqe));
+ kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
+ kcqe.fcoe_conn_id = req->conn_id;
+ kcqe.fcoe_conn_context_id = cid;
+
+ cqes[0] = (struct kcqe *) &kcqe;
+ cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
+ return ret;
+}
+
+static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+ struct fcoe_kwqe_destroy *req;
+ union l5cm_specific_data l5_data;
+ struct cnic_local *cp = dev->cnic_priv;
+ int ret;
+ u32 cid;
+
+ req = (struct fcoe_kwqe_destroy *) kwqe;
+ cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
+
+ memset(&l5_data, 0, sizeof(l5_data));
+ ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY, cid,
+ FCOE_CONNECTION_TYPE, &l5_data);
+ return ret;
+}
+
+static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
+ struct kwqe *wqes[], u32 num_wqes)
{
int i, work, ret;
u32 opcode;
@@ -2143,6 +2499,98 @@ static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
return 0;
}
+static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
+ struct kwqe *wqes[], u32 num_wqes)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ int i, work, ret;
+ u32 opcode;
+ struct kwqe *kwqe;
+
+ if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+ return -EAGAIN; /* bnx2 is down */
+
+ if (BNX2X_CHIP_NUM(cp->chip_id) == BNX2X_CHIP_NUM_57710)
+ return -EINVAL;
+
+ for (i = 0; i < num_wqes; ) {
+ kwqe = wqes[i];
+ opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
+ work = 1;
+
+ switch (opcode) {
+ case FCOE_KWQE_OPCODE_INIT1:
+ ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
+ num_wqes - i, &work);
+ break;
+ case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
+ ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
+ num_wqes - i, &work);
+ break;
+ case FCOE_KWQE_OPCODE_ENABLE_CONN:
+ ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
+ break;
+ case FCOE_KWQE_OPCODE_DISABLE_CONN:
+ ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
+ break;
+ case FCOE_KWQE_OPCODE_DESTROY_CONN:
+ ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
+ break;
+ case FCOE_KWQE_OPCODE_DESTROY:
+ ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
+ break;
+ case FCOE_KWQE_OPCODE_STAT:
+ ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
+ break;
+ default:
+ ret = 0;
+ netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
+ opcode);
+ break;
+ }
+ if (ret < 0)
+ netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
+ opcode);
+ i += work;
+ }
+ return 0;
+}
+
+static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
+ u32 num_wqes)
+{
+ int ret = -EINVAL;
+ u32 layer_code;
+
+ if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+ return -EAGAIN; /* bnx2x is down */
+
+ if (!num_wqes)
+ return 0;
+
+ layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
+ switch (layer_code) {
+ case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
+ case KWQE_FLAGS_LAYER_MASK_L4:
+ case KWQE_FLAGS_LAYER_MASK_L2:
+ ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
+ break;
+
+ case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
+ ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
+ break;
+ }
+ return ret;
+}
+
+static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
+{
+ if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
+ return KCQE_FLAGS_LAYER_MASK_L4;
+
+ return opflag & KCQE_FLAGS_LAYER_MASK;
+}
+
static void service_kcqes(struct cnic_dev *dev, int num_cqes)
{
struct cnic_local *cp = dev->cnic_priv;
@@ -2154,7 +2602,7 @@ static void service_kcqes(struct cnic_dev *dev, int num_cqes)
struct cnic_ulp_ops *ulp_ops;
int ulp_type;
u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
- u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK;
+ u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
comp++;
@@ -2162,7 +2610,7 @@ static void service_kcqes(struct cnic_dev *dev, int num_cqes)
while (j < num_cqes) {
u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
- if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer)
+ if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
break;
if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
@@ -2174,6 +2622,8 @@ static void service_kcqes(struct cnic_dev *dev, int num_cqes)
ulp_type = CNIC_ULP_RDMA;
else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
ulp_type = CNIC_ULP_ISCSI;
+ else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
+ ulp_type = CNIC_ULP_FCOE;
else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
ulp_type = CNIC_ULP_L4;
else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
@@ -2317,6 +2767,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
int kcqe_cnt;
+ /* status block index must be read before reading other fields */
+ rmb();
cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
@@ -2327,6 +2779,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
barrier();
if (status_idx != *cp->kcq1.status_idx_ptr) {
status_idx = (u16) *cp->kcq1.status_idx_ptr;
+ /* status block index must be read first */
+ rmb();
cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
} else
break;
@@ -2342,11 +2796,12 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
static int cnic_service_bnx2(void *data, void *status_blk)
{
struct cnic_dev *dev = data;
- struct cnic_local *cp = dev->cnic_priv;
- u32 status_idx = *cp->kcq1.status_idx_ptr;
- if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
- return status_idx;
+ if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
+ struct status_block *sblk = status_blk;
+
+ return sblk->status_idx;
+ }
return cnic_service_bnx2_queues(dev);
}
@@ -2365,9 +2820,10 @@ static void cnic_service_bnx2_msix(unsigned long data)
static void cnic_doirq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
- u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
+ u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
+
prefetch(cp->status_blk.gen);
prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
@@ -2443,6 +2899,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
u32 last_status = *info->status_idx_ptr;
int kcqe_cnt;
+ /* status block index must be read before reading the KCQ */
+ rmb();
while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
service_kcqes(dev, kcqe_cnt);
@@ -2453,6 +2911,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
break;
last_status = *info->status_idx_ptr;
+ /* status block index must be read before reading the KCQ */
+ rmb();
}
return last_status;
}
@@ -2461,20 +2921,36 @@ static void cnic_service_bnx2x_bh(unsigned long data)
{
struct cnic_dev *dev = (struct cnic_dev *) data;
struct cnic_local *cp = dev->cnic_priv;
- u32 status_idx;
+ u32 status_idx, new_status_idx;
if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
return;
- status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
+ while (1) {
+ status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
+
+ CNIC_WR16(dev, cp->kcq1.io_addr,
+ cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
+
+ if (!BNX2X_CHIP_IS_E2(cp->chip_id)) {
+ cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
+ status_idx, IGU_INT_ENABLE, 1);
+ break;
+ }
+
+ new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
+
+ if (new_status_idx != status_idx)
+ continue;
+
+ CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
+ MAX_KCQ_IDX);
- CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
- if (BNX2X_CHIP_IS_E2(cp->chip_id))
cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
status_idx, IGU_INT_ENABLE, 1);
- else
- cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
- status_idx, IGU_INT_ENABLE, 1);
+
+ break;
+ }
}
static int cnic_service_bnx2x(void *data, void *status_blk)
@@ -2501,7 +2977,8 @@ static void cnic_ulp_stop(struct cnic_dev *dev)
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
- ulp_ops = cp->ulp_ops[if_type];
+ ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+ lockdep_is_held(&cnic_lock));
if (!ulp_ops) {
mutex_unlock(&cnic_lock);
continue;
@@ -2525,7 +3002,8 @@ static void cnic_ulp_start(struct cnic_dev *dev)
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
- ulp_ops = cp->ulp_ops[if_type];
+ ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+ lockdep_is_held(&cnic_lock));
if (!ulp_ops || !ulp_ops->cnic_start) {
mutex_unlock(&cnic_lock);
continue;
@@ -2589,7 +3067,7 @@ static void cnic_ulp_init(struct cnic_dev *dev)
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
- ulp_ops = cnic_ulp_tbl[i];
+ ulp_ops = cnic_ulp_tbl_prot(i);
if (!ulp_ops || !ulp_ops->cnic_init) {
mutex_unlock(&cnic_lock);
continue;
@@ -2613,7 +3091,7 @@ static void cnic_ulp_exit(struct cnic_dev *dev)
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
- ulp_ops = cnic_ulp_tbl[i];
+ ulp_ops = cnic_ulp_tbl_prot(i);
if (!ulp_ops || !ulp_ops->cnic_exit) {
mutex_unlock(&cnic_lock);
continue;
@@ -2883,7 +3361,7 @@ static void cnic_cm_cleanup(struct cnic_sock *csk)
struct cnic_dev *dev = csk->dev;
struct cnic_local *cp = dev->cnic_priv;
- cnic_free_id(&cp->csk_port_tbl, csk->src_port);
+ cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
csk->src_port = 0;
}
}
@@ -2929,17 +3407,14 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
struct dst_entry **dst)
{
#if defined(CONFIG_INET)
- struct flowi fl;
- int err;
struct rtable *rt;
- memset(&fl, 0, sizeof(fl));
- fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
-
- err = ip_route_output_key(&init_net, &rt, &fl);
- if (!err)
+ rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
+ if (!IS_ERR(rt)) {
*dst = &rt->dst;
- return err;
+ return 0;
+ }
+ return PTR_ERR(rt);
#else
return -ENETUNREACH;
#endif
@@ -2949,14 +3424,14 @@ static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
struct dst_entry **dst)
{
#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
- struct flowi fl;
+ struct flowi6 fl6;
- memset(&fl, 0, sizeof(fl));
- ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr);
- if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL)
- fl.oif = dst_addr->sin6_scope_id;
+ memset(&fl6, 0, sizeof(fl6));
+ ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr);
+ if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+ fl6.flowi6_oif = dst_addr->sin6_scope_id;
- *dst = ip6_route_output(&init_net, NULL, &fl);
+ *dst = ip6_route_output(&init_net, NULL, &fl6);
if (*dst)
return 0;
#endif
@@ -3014,7 +3489,8 @@ static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
int is_v6, rc = 0;
struct dst_entry *dst = NULL;
struct net_device *realdev;
- u32 local_port;
+ __be16 local_port;
+ u32 port_id;
if (saddr->local.v6.sin6_family == AF_INET6 &&
saddr->remote.v6.sin6_family == AF_INET6)
@@ -3054,19 +3530,21 @@ static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
}
}
- if (local_port >= CNIC_LOCAL_PORT_MIN &&
- local_port < CNIC_LOCAL_PORT_MAX) {
- if (cnic_alloc_id(&cp->csk_port_tbl, local_port))
- local_port = 0;
+ port_id = be16_to_cpu(local_port);
+ if (port_id >= CNIC_LOCAL_PORT_MIN &&
+ port_id < CNIC_LOCAL_PORT_MAX) {
+ if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
+ port_id = 0;
} else
- local_port = 0;
+ port_id = 0;
- if (!local_port) {
- local_port = cnic_alloc_new_id(&cp->csk_port_tbl);
- if (local_port == -1) {
+ if (!port_id) {
+ port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
+ if (port_id == -1) {
rc = -ENOMEM;
goto err_out;
}
+ local_port = cpu_to_be16(port_id);
}
csk->src_port = local_port;
@@ -3208,6 +3686,18 @@ done:
csk_put(csk);
}
+static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
+ u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
+ struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+
+ ctx->timestamp = jiffies;
+ ctx->wait_cond = 1;
+ wake_up(&ctx->waitq);
+}
+
static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
{
struct cnic_local *cp = dev->cnic_priv;
@@ -3216,6 +3706,10 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
u32 l5_cid;
struct cnic_sock *csk;
+ if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
+ cnic_process_fcoe_term_conn(dev, kcqe);
+ return;
+ }
if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
cnic_cm_process_offld_pg(dev, l4kcqe);
@@ -3699,6 +4193,14 @@ static void cnic_enable_bnx2_int(struct cnic_dev *dev)
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
}
+static void cnic_get_bnx2_iscsi_info(struct cnic_dev *dev)
+{
+ u32 max_conn;
+
+ max_conn = cnic_reg_rd_ind(dev, BNX2_FW_MAX_ISCSI_CONN);
+ dev->max_iscsi_conn = max_conn;
+}
+
static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
@@ -3852,7 +4354,7 @@ static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
memset(&l2kwqe, 0, sizeof(l2kwqe));
wqes[0] = &l2kwqe;
- l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) |
+ l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
(L2_KWQE_OPCODE_VALUE_FLUSH <<
KWQE_OPCODE_SHIFT) | 2;
dev->submit_kwqes(dev, wqes, 1);
@@ -4023,6 +4525,8 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
return err;
}
+ cnic_get_bnx2_iscsi_info(dev);
+
return 0;
}
@@ -4106,7 +4610,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
int port = CNIC_PORT(cp);
int i;
- int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
+ u32 cli = cp->ethdev->iscsi_l2_client_id;
u32 val;
memset(txbd, 0, BCM_PAGE_SIZE);
@@ -4167,7 +4671,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
int i;
int port = CNIC_PORT(cp);
- int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
+ u32 cli = cp->ethdev->iscsi_l2_client_id;
int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
u32 val;
dma_addr_t ring_map = udev->l2_ring_map;
@@ -4231,77 +4735,44 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
cp->rx_cons_ptr =
&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
+ cp->rx_cons = *cp->rx_cons_ptr;
}
-static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
+static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
- u32 base, base2, addr, val;
- int port = CNIC_PORT(cp);
-
- dev->max_iscsi_conn = 0;
- base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
- if (base == 0)
- return;
-
- base2 = CNIC_RD(dev, (CNIC_PATH(cp) ? MISC_REG_GENERIC_CR_1 :
- MISC_REG_GENERIC_CR_0));
- addr = BNX2X_SHMEM_ADDR(base,
- dev_info.port_hw_config[port].iscsi_mac_upper);
-
- val = CNIC_RD(dev, addr);
-
- dev->mac_addr[0] = (u8) (val >> 8);
- dev->mac_addr[1] = (u8) val;
-
- addr = BNX2X_SHMEM_ADDR(base,
- dev_info.port_hw_config[port].iscsi_mac_lower);
-
- val = CNIC_RD(dev, addr);
-
- dev->mac_addr[2] = (u8) (val >> 24);
- dev->mac_addr[3] = (u8) (val >> 16);
- dev->mac_addr[4] = (u8) (val >> 8);
- dev->mac_addr[5] = (u8) val;
+ u32 pfid = cp->pfid;
- addr = BNX2X_SHMEM_ADDR(base, validity_map[port]);
- val = CNIC_RD(dev, addr);
+ cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
+ CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
+ cp->kcq1.sw_prod_idx = 0;
- if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) {
- u16 val16;
+ if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
+ struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
- addr = BNX2X_SHMEM_ADDR(base,
- drv_lic_key[port].max_iscsi_init_conn);
- val16 = CNIC_RD16(dev, addr);
+ cp->kcq1.hw_prod_idx_ptr =
+ &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
+ cp->kcq1.status_idx_ptr =
+ &sb->sb.running_index[SM_RX_ID];
+ } else {
+ struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
- if (val16)
- val16 ^= 0x1e1e;
- dev->max_iscsi_conn = val16;
+ cp->kcq1.hw_prod_idx_ptr =
+ &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
+ cp->kcq1.status_idx_ptr =
+ &sb->sb.running_index[SM_RX_ID];
}
- if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) {
- int func = CNIC_FUNC(cp);
- u32 mf_cfg_addr;
- if (BNX2X_SHMEM2_HAS(base2, mf_cfg_addr))
- mf_cfg_addr = CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base2,
- mf_cfg_addr));
- else
- mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET;
-
- addr = mf_cfg_addr +
- offsetof(struct mf_cfg, func_mf_config[func].e1hov_tag);
-
- val = CNIC_RD(dev, addr);
- val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
- if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
- addr = mf_cfg_addr +
- offsetof(struct mf_cfg,
- func_mf_config[func].config);
- val = CNIC_RD(dev, addr);
- val &= FUNC_MF_CFG_PROTOCOL_MASK;
- if (val != FUNC_MF_CFG_PROTOCOL_ISCSI)
- dev->max_iscsi_conn = 0;
- }
+ if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
+ struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
+
+ cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
+ USTORM_FCOE_EQ_PROD_OFFSET(pfid);
+ cp->kcq2.sw_prod_idx = 0;
+ cp->kcq2.hw_prod_idx_ptr =
+ &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
+ cp->kcq2.status_idx_ptr =
+ &sb->sb.running_index[SM_RX_ID];
}
}
@@ -4335,29 +4806,18 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
if (ret)
return -ENOMEM;
- cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
-
- cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
- CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
- cp->kcq1.sw_prod_idx = 0;
-
if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
- struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
+ ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl,
+ BNX2X_FCOE_NUM_CONNECTIONS,
+ cp->fcoe_start_cid);
- cp->kcq1.hw_prod_idx_ptr =
- &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
- cp->kcq1.status_idx_ptr =
- &sb->sb.running_index[SM_RX_ID];
- } else {
- struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
-
- cp->kcq1.hw_prod_idx_ptr =
- &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
- cp->kcq1.status_idx_ptr =
- &sb->sb.running_index[SM_RX_ID];
+ if (ret)
+ return -ENOMEM;
}
- cnic_get_bnx2x_iscsi_info(dev);
+ cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
+
+ cnic_init_bnx2x_kcq(dev);
/* Only 1 EQ */
CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
@@ -4424,8 +4884,9 @@ static void cnic_init_rings(struct cnic_dev *dev)
cnic_init_bnx2_rx_ring(dev);
set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
- u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
- u32 cl_qzone_id, type;
+ u32 cli = cp->ethdev->iscsi_l2_client_id;
+ u32 cid = cp->ethdev->iscsi_l2_cid;
+ u32 cl_qzone_id;
struct client_init_ramrod_data *data;
union l5cm_specific_data l5_data;
struct ustorm_eth_rx_producers rx_prods = {0};
@@ -4457,15 +4918,10 @@ static void cnic_init_rings(struct cnic_dev *dev)
l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
- type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
- & SPE_HDR_CONN_TYPE;
- type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
- SPE_HDR_FUNCTION_ID);
-
set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
- BNX2X_ISCSI_L2_CID, type, &l5_data);
+ cid, ETH_CONNECTION_TYPE, &l5_data);
i = 0;
while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
@@ -4476,7 +4932,7 @@ static void cnic_init_rings(struct cnic_dev *dev)
netdev_err(dev->netdev,
"iSCSI CLIENT_SETUP did not complete\n");
cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
- cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1);
+ cnic_ring_ctl(dev, cid, cli, 1);
}
}
@@ -4491,19 +4947,19 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
cnic_shutdown_bnx2_rx_ring(dev);
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
struct cnic_local *cp = dev->cnic_priv;
- u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
+ u32 cli = cp->ethdev->iscsi_l2_client_id;
+ u32 cid = cp->ethdev->iscsi_l2_cid;
union l5cm_specific_data l5_data;
int i;
- u32 type;
- cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0);
+ cnic_ring_ctl(dev, cid, cli, 0);
set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
l5_data.phy_address.lo = cli;
l5_data.phy_address.hi = 0;
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
- BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data);
+ cid, ETH_CONNECTION_TYPE, &l5_data);
i = 0;
while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
++i < 10)
@@ -4515,12 +4971,8 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
memset(&l5_data, 0, sizeof(l5_data));
- type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
- & SPE_HDR_CONN_TYPE;
- type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
- SPE_HDR_FUNCTION_ID);
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
- BNX2X_ISCSI_L2_CID, type, &l5_data);
+ cid, NONE_CONNECTION_TYPE, &l5_data);
msleep(10);
}
clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
@@ -4720,15 +5172,11 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
dev_hold(dev);
pci_dev_get(pdev);
- if (pdev->device == PCI_DEVICE_ID_NX2_5709 ||
- pdev->device == PCI_DEVICE_ID_NX2_5709S) {
- u8 rev;
-
- pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
- if (rev < 0x10) {
- pci_dev_put(pdev);
- goto cnic_err;
- }
+ if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
+ pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
+ (pdev->revision < 0x10)) {
+ pci_dev_put(pdev);
+ goto cnic_err;
}
pci_dev_put(pdev);
@@ -4799,6 +5247,14 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
cdev->pcidev = pdev;
cp->chip_id = ethdev->chip_id;
+ if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
+ cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
+ if (BNX2X_CHIP_IS_E2(cp->chip_id) &&
+ !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
+ cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
+
+ memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
+
cp->cnic_ops = &cnic_bnx2x_ops;
cp->start_hw = cnic_start_bnx2x_hw;
cp->stop_hw = cnic_stop_bnx2x_hw;
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index 6a4a0ae5cfe3..4456260c653c 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -82,7 +82,7 @@ struct cnic_redirect_entry {
#define MAX_ISCSI_TBL_SZ 256
#define CNIC_LOCAL_PORT_MIN 60000
-#define CNIC_LOCAL_PORT_MAX 61000
+#define CNIC_LOCAL_PORT_MAX 61024
#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe))
@@ -220,7 +220,7 @@ struct cnic_local {
#define ULP_F_INIT 0
#define ULP_F_START 1
#define ULP_F_CALL_PENDING 2
- struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
+ struct cnic_ulp_ops __rcu *ulp_ops[MAX_CNIC_ULP_TYPE];
unsigned long cnic_local_flags;
#define CNIC_LCL_FL_KWQ_INIT 0x0
@@ -258,6 +258,7 @@ struct cnic_local {
u16 kwq_con_idx;
struct kcq_info kcq1;
+ struct kcq_info kcq2;
union {
void *gen;
@@ -290,6 +291,10 @@ struct cnic_local {
atomic_t iscsi_conn;
u32 iscsi_start_cid;
+ u32 fcoe_init_cid;
+ u32 fcoe_start_cid;
+ struct cnic_id_tbl fcoe_cid_tbl;
+
u32 max_cid_space;
/* per connection parameters */
@@ -356,11 +361,6 @@ struct bnx2x_bd_chain_next {
#define BNX2X_CONTEXT_MEM_SIZE 1024
#define BNX2X_FCOE_CID 16
-/* iSCSI client IDs are 17, 19, 21, 23 */
-#define BNX2X_ISCSI_BASE_CL_ID 17
-#define BNX2X_ISCSI_CL_ID(vn) (BNX2X_ISCSI_BASE_CL_ID + ((vn) << 1))
-
-#define BNX2X_ISCSI_L2_CID 17
#define BNX2X_ISCSI_START_CID 18
#define BNX2X_ISCSI_NUM_CONNECTIONS 128
#define BNX2X_ISCSI_TASK_CONTEXT_SIZE 128
@@ -372,6 +372,10 @@ struct bnx2x_bd_chain_next {
#define BNX2X_ISCSI_PBL_NOT_CACHED 0xff
#define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED 0xff
+#define BNX2X_FCOE_NUM_CONNECTIONS 128
+
+#define BNX2X_FCOE_L5_CID_BASE MAX_ISCSI_TBL_SZ
+
#define BNX2X_CHIP_NUM_57710 0x164e
#define BNX2X_CHIP_NUM_57711 0x164f
#define BNX2X_CHIP_NUM_57711E 0x1650
@@ -427,6 +431,13 @@ struct bnx2x_bd_chain_next {
(CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base, size)) > \
offsetof(struct shmem2_region, field)))
+#define BNX2X_MF_CFG_ADDR(base, field) \
+ ((base) + offsetof(struct mf_cfg, field))
+
+#ifndef ETH_MAX_RX_CLIENTS_E2
+#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
+#endif
+
#define CNIC_PORT(cp) ((cp)->pfid & 1)
#define CNIC_FUNC(cp) ((cp)->func)
#define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2(cp->chip_id) ? 0 :\
@@ -439,7 +450,9 @@ struct bnx2x_bd_chain_next {
#define BNX2X_SW_CID(x) (x & 0x1ffff)
#define BNX2X_CL_QZONE_ID(cp, cli) \
- (cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H))
+ (cli + (CNIC_PORT(cp) * (BNX2X_CHIP_IS_E2(cp->chip_id) ?\
+ ETH_MAX_RX_CLIENTS_E2 : \
+ ETH_MAX_RX_CLIENTS_E1H)))
#define TCP_TSTORM_OOO_DROP_AND_PROC_ACK (0<<4)
#endif
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
index 328e8b2765a3..fdbc00415603 100644
--- a/drivers/net/cnic_defs.h
+++ b/drivers/net/cnic_defs.h
@@ -35,6 +35,40 @@
#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE (L5CM_RAMROD_CMD_ID_BASE + 14)
#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD (L5CM_RAMROD_CMD_ID_BASE + 15)
+#define FCOE_KCQE_OPCODE_INIT_FUNC (0x10)
+#define FCOE_KCQE_OPCODE_DESTROY_FUNC (0x11)
+#define FCOE_KCQE_OPCODE_STAT_FUNC (0x12)
+#define FCOE_KCQE_OPCODE_OFFLOAD_CONN (0x15)
+#define FCOE_KCQE_OPCODE_ENABLE_CONN (0x16)
+#define FCOE_KCQE_OPCODE_DISABLE_CONN (0x17)
+#define FCOE_KCQE_OPCODE_DESTROY_CONN (0x18)
+#define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
+#define FCOE_KCQE_OPCODE_FCOE_ERROR (0x21)
+
+#define FCOE_RAMROD_CMD_ID_INIT (FCOE_KCQE_OPCODE_INIT_FUNC)
+#define FCOE_RAMROD_CMD_ID_DESTROY (FCOE_KCQE_OPCODE_DESTROY_FUNC)
+#define FCOE_RAMROD_CMD_ID_OFFLOAD_CONN (FCOE_KCQE_OPCODE_OFFLOAD_CONN)
+#define FCOE_RAMROD_CMD_ID_ENABLE_CONN (FCOE_KCQE_OPCODE_ENABLE_CONN)
+#define FCOE_RAMROD_CMD_ID_DISABLE_CONN (FCOE_KCQE_OPCODE_DISABLE_CONN)
+#define FCOE_RAMROD_CMD_ID_DESTROY_CONN (FCOE_KCQE_OPCODE_DESTROY_CONN)
+#define FCOE_RAMROD_CMD_ID_STAT (FCOE_KCQE_OPCODE_STAT_FUNC)
+#define FCOE_RAMROD_CMD_ID_TERMINATE_CONN (0x81)
+
+#define FCOE_KWQE_OPCODE_INIT1 (0)
+#define FCOE_KWQE_OPCODE_INIT2 (1)
+#define FCOE_KWQE_OPCODE_INIT3 (2)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN1 (3)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN2 (4)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN3 (5)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN4 (6)
+#define FCOE_KWQE_OPCODE_ENABLE_CONN (7)
+#define FCOE_KWQE_OPCODE_DISABLE_CONN (8)
+#define FCOE_KWQE_OPCODE_DESTROY_CONN (9)
+#define FCOE_KWQE_OPCODE_DESTROY (10)
+#define FCOE_KWQE_OPCODE_STAT (11)
+
+#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3)
+
/* KCQ (kernel completion queue) response op codes */
#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP (53)
#define L4_KCQE_OPCODE_VALUE_RESET_COMP (54)
@@ -683,6 +717,1496 @@ struct cstorm_iscsi_ag_context {
};
/*
+ * Parameters initialized during offloaded according to FLOGI/PLOGI/PRLI and used in FCoE context section
+ */
+struct ustorm_fcoe_params {
+#if defined(__BIG_ENDIAN)
+ u16 fcoe_conn_id;
+ u16 flags;
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0)
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1)
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3)
+#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3
+#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4)
+#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5)
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6)
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6
+#define USTORM_FCOE_PARAMS_B_C2_VALID (0x1<<7)
+#define USTORM_FCOE_PARAMS_B_C2_VALID_SHIFT 7
+#define USTORM_FCOE_PARAMS_B_ACK_0 (0x1<<8)
+#define USTORM_FCOE_PARAMS_B_ACK_0_SHIFT 8
+#define USTORM_FCOE_PARAMS_RSRV0 (0x7F<<9)
+#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 9
+#elif defined(__LITTLE_ENDIAN)
+ u16 flags;
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0)
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1)
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3)
+#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3
+#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4)
+#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5)
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6)
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6
+#define USTORM_FCOE_PARAMS_B_C2_VALID (0x1<<7)
+#define USTORM_FCOE_PARAMS_B_C2_VALID_SHIFT 7
+#define USTORM_FCOE_PARAMS_B_ACK_0 (0x1<<8)
+#define USTORM_FCOE_PARAMS_B_ACK_0_SHIFT 8
+#define USTORM_FCOE_PARAMS_RSRV0 (0x7F<<9)
+#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 9
+ u16 fcoe_conn_id;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 hc_csdm_byte_en;
+ u8 func_id;
+ u8 port_id;
+ u8 vnic_id;
+#elif defined(__LITTLE_ENDIAN)
+ u8 vnic_id;
+ u8 port_id;
+ u8 func_id;
+ u8 hc_csdm_byte_en;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 rx_total_conc_seqs;
+ u16 rx_max_fc_pay_len;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_max_fc_pay_len;
+ u16 rx_total_conc_seqs;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 ox_id;
+ u16 rx_max_conc_seqs;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_max_conc_seqs;
+ u16 ox_id;
+#endif
+};
+
+/*
+ * FCoE 16-bits index structure
+ */
+struct fcoe_idx16_fields {
+ u16 fields;
+#define FCOE_IDX16_FIELDS_IDX (0x7FFF<<0)
+#define FCOE_IDX16_FIELDS_IDX_SHIFT 0
+#define FCOE_IDX16_FIELDS_MSB (0x1<<15)
+#define FCOE_IDX16_FIELDS_MSB_SHIFT 15
+};
+
+/*
+ * FCoE 16-bits index union
+ */
+union fcoe_idx16_field_union {
+ struct fcoe_idx16_fields fields;
+ u16 val;
+};
+
+/*
+ * 4 regs size
+ */
+struct fcoe_bd_ctx {
+ u32 buf_addr_hi;
+ u32 buf_addr_lo;
+#if defined(__BIG_ENDIAN)
+ u16 rsrv0;
+ u16 buf_len;
+#elif defined(__LITTLE_ENDIAN)
+ u16 buf_len;
+ u16 rsrv0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 rsrv1;
+ u16 flags;
+#elif defined(__LITTLE_ENDIAN)
+ u16 flags;
+ u16 rsrv1;
+#endif
+};
+
+/*
+ * Parameters required for placement according to SGL
+ */
+struct ustorm_fcoe_data_place {
+#if defined(__BIG_ENDIAN)
+ u16 cached_sge_off;
+ u8 cached_num_sges;
+ u8 cached_sge_idx;
+#elif defined(__LITTLE_ENDIAN)
+ u8 cached_sge_idx;
+ u8 cached_num_sges;
+ u16 cached_sge_off;
+#endif
+ struct fcoe_bd_ctx cached_sge[3];
+};
+
+struct fcoe_task_ctx_entry_txwr_rxrd {
+#if defined(__BIG_ENDIAN)
+ u16 verify_tx_seq;
+ u8 init_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
+ u8 tx_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
+#elif defined(__LITTLE_ENDIAN)
+ u8 tx_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
+ u8 init_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
+ u16 verify_tx_seq;
+#endif
+};
+
+struct fcoe_fcp_cmd_payload {
+ u32 opaque[8];
+};
+
+struct fcoe_fc_hdr {
+#if defined(__BIG_ENDIAN)
+ u8 cs_ctl;
+ u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 s_id[3];
+ u8 cs_ctl;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 r_ctl;
+ u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 d_id[3];
+ u8 r_ctl;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 seq_id;
+ u8 df_ctl;
+ u16 seq_cnt;
+#elif defined(__LITTLE_ENDIAN)
+ u16 seq_cnt;
+ u8 df_ctl;
+ u8 seq_id;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 type;
+ u8 f_ctl[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 f_ctl[3];
+ u8 type;
+#endif
+ u32 parameters;
+#if defined(__BIG_ENDIAN)
+ u16 ox_id;
+ u16 rx_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_id;
+ u16 ox_id;
+#endif
+};
+
+struct fcoe_fc_frame {
+ struct fcoe_fc_hdr fc_hdr;
+ u32 reserved0[2];
+};
+
+union fcoe_cmd_flow_info {
+ struct fcoe_fcp_cmd_payload fcp_cmd_payload;
+ struct fcoe_fc_frame mp_fc_frame;
+};
+
+struct fcoe_read_flow_info {
+ struct fcoe_fc_hdr fc_data_in_hdr;
+ u32 reserved[2];
+};
+
+struct fcoe_fcp_xfr_rdy_payload {
+ u32 burst_len;
+ u32 data_ro;
+};
+
+struct fcoe_write_flow_info {
+ struct fcoe_fc_hdr fc_data_out_hdr;
+ struct fcoe_fcp_xfr_rdy_payload fcp_xfr_payload;
+};
+
+struct fcoe_fcp_rsp_flags {
+ u8 flags;
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4)
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5)
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5
+};
+
+struct fcoe_fcp_rsp_payload {
+ struct regpair reserved0;
+ u32 fcp_resid;
+#if defined(__BIG_ENDIAN)
+ u16 retry_delay_timer;
+ struct fcoe_fcp_rsp_flags fcp_flags;
+ u8 scsi_status_code;
+#elif defined(__LITTLE_ENDIAN)
+ u8 scsi_status_code;
+ struct fcoe_fcp_rsp_flags fcp_flags;
+ u16 retry_delay_timer;
+#endif
+ u32 fcp_rsp_len;
+ u32 fcp_sns_len;
+};
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ */
+struct fcoe_fcp_rsp_union {
+ struct fcoe_fcp_rsp_payload payload;
+ struct regpair reserved0;
+};
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ */
+struct fcoe_abts_rsp_union {
+ u32 r_ctl;
+ u32 abts_rsp_payload[7];
+};
+
+union fcoe_rsp_flow_info {
+ struct fcoe_fcp_rsp_union fcp_rsp;
+ struct fcoe_abts_rsp_union abts_rsp;
+};
+
+struct fcoe_cleanup_flow_info {
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u16 task_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 task_id;
+ u16 reserved1;
+#endif
+ u32 reserved2[7];
+};
+
+/*
+ * 32 bytes used for general purposes
+ */
+union fcoe_general_task_ctx {
+ union fcoe_cmd_flow_info cmd_info;
+ struct fcoe_read_flow_info read_info;
+ struct fcoe_write_flow_info write_info;
+ union fcoe_rsp_flow_info rsp_info;
+ struct fcoe_cleanup_flow_info cleanup_info;
+ u32 comp_info[8];
+};
+
+struct fcoe_s_stat_ctx {
+ u8 flags;
+#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0)
+#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1)
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2)
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3)
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3
+#define FCOE_S_STAT_CTX_P_RJT (0x1<<4)
+#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4
+#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5)
+#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5
+#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6)
+#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6
+};
+
+/*
+ * Common section. Both TX and RX processing might write and read from it in different flows
+ */
+struct fcoe_task_ctx_entry_tx_rx_cmn {
+ u32 data_2_trns;
+ union fcoe_general_task_ctx general;
+#if defined(__BIG_ENDIAN)
+ u16 tx_low_seq_cnt;
+ struct fcoe_s_stat_ctx tx_s_stat;
+ u8 tx_seq_id;
+#elif defined(__LITTLE_ENDIAN)
+ u8 tx_seq_id;
+ struct fcoe_s_stat_ctx tx_s_stat;
+ u16 tx_low_seq_cnt;
+#endif
+ u32 common_flags;
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID (0xFFFFFF<<0)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID (0x1<<24)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT 24
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT (0x1<<25)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT_SHIFT 25
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER (0x1<<26)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER_SHIFT 26
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF (0x1<<27)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF_SHIFT 27
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME (0x1<<28)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT 28
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV (0x7<<29)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV_SHIFT 29
+};
+
+struct fcoe_task_ctx_entry_rxwr_txrd {
+#if defined(__BIG_ENDIAN)
+ u16 rx_id;
+ u16 rx_flags;
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_flags;
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
+ u16 rx_id;
+#endif
+};
+
+struct fcoe_seq_ctx {
+#if defined(__BIG_ENDIAN)
+ u16 low_seq_cnt;
+ struct fcoe_s_stat_ctx s_stat;
+ u8 seq_id;
+#elif defined(__LITTLE_ENDIAN)
+ u8 seq_id;
+ struct fcoe_s_stat_ctx s_stat;
+ u16 low_seq_cnt;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 err_seq_cnt;
+ u16 high_seq_cnt;
+#elif defined(__LITTLE_ENDIAN)
+ u16 high_seq_cnt;
+ u16 err_seq_cnt;
+#endif
+ u32 low_exp_ro;
+ u32 high_exp_ro;
+};
+
+struct fcoe_single_sge_ctx {
+ struct regpair cur_buf_addr;
+#if defined(__BIG_ENDIAN)
+ u16 reserved0;
+ u16 cur_buf_rem;
+#elif defined(__LITTLE_ENDIAN)
+ u16 cur_buf_rem;
+ u16 reserved0;
+#endif
+};
+
+struct fcoe_mul_sges_ctx {
+ struct regpair cur_sge_addr;
+#if defined(__BIG_ENDIAN)
+ u8 sgl_size;
+ u8 cur_sge_idx;
+ u16 cur_sge_off;
+#elif defined(__LITTLE_ENDIAN)
+ u16 cur_sge_off;
+ u8 cur_sge_idx;
+ u8 sgl_size;
+#endif
+};
+
+union fcoe_sgl_ctx {
+ struct fcoe_single_sge_ctx single_sge;
+ struct fcoe_mul_sges_ctx mul_sges;
+};
+
+struct fcoe_task_ctx_entry_rx_only {
+ struct fcoe_seq_ctx seq_ctx;
+ struct fcoe_seq_ctx ooo_seq_ctx;
+ u32 rsrv3;
+ union fcoe_sgl_ctx sgl_ctx;
+};
+
+struct ustorm_fcoe_task_ctx_entry_rd {
+ struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd;
+ struct fcoe_task_ctx_entry_tx_rx_cmn cmn;
+ struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd;
+ struct fcoe_task_ctx_entry_rx_only rx_wr;
+ u32 reserved;
+};
+
+/*
+ * Ustorm FCoE Storm Context
+ */
+struct ustorm_fcoe_st_context {
+ struct ustorm_fcoe_params fcoe_params;
+ struct regpair task_addr;
+ struct regpair cq_base_addr;
+ struct regpair rq_pbl_base;
+ struct regpair rq_cur_page_addr;
+ struct regpair confq_pbl_base_addr;
+ struct regpair conn_db_base;
+ struct regpair xfrq_base_addr;
+ struct regpair lcq_base_addr;
+#if defined(__BIG_ENDIAN)
+ union fcoe_idx16_field_union rq_cons;
+ union fcoe_idx16_field_union rq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ union fcoe_idx16_field_union rq_prod;
+ union fcoe_idx16_field_union rq_cons;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 xfrq_prod;
+ u16 cq_cons;
+#elif defined(__LITTLE_ENDIAN)
+ u16 cq_cons;
+ u16 xfrq_prod;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 lcq_cons;
+ u16 hc_cram_address;
+#elif defined(__LITTLE_ENDIAN)
+ u16 hc_cram_address;
+ u16 lcq_cons;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 sq_xfrq_lcq_confq_size;
+ u16 confq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 confq_prod;
+ u16 sq_xfrq_lcq_confq_size;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 hc_csdm_agg_int;
+ u8 flags;
+#define USTORM_FCOE_ST_CONTEXT_MID_SEQ_PROC_FLAG (0x1<<0)
+#define USTORM_FCOE_ST_CONTEXT_MID_SEQ_PROC_FLAG_SHIFT 0
+#define USTORM_FCOE_ST_CONTEXT_CACHED_CONN_FLAG (0x1<<1)
+#define USTORM_FCOE_ST_CONTEXT_CACHED_CONN_FLAG_SHIFT 1
+#define USTORM_FCOE_ST_CONTEXT_CACHED_TCE_FLAG (0x1<<2)
+#define USTORM_FCOE_ST_CONTEXT_CACHED_TCE_FLAG_SHIFT 2
+#define USTORM_FCOE_ST_CONTEXT_RSRV1 (0x1F<<3)
+#define USTORM_FCOE_ST_CONTEXT_RSRV1_SHIFT 3
+ u8 available_rqes;
+ u8 sp_q_flush_cnt;
+#elif defined(__LITTLE_ENDIAN)
+ u8 sp_q_flush_cnt;
+ u8 available_rqes;
+ u8 flags;
+#define USTORM_FCOE_ST_CONTEXT_MID_SEQ_PROC_FLAG (0x1<<0)
+#define USTORM_FCOE_ST_CONTEXT_MID_SEQ_PROC_FLAG_SHIFT 0
+#define USTORM_FCOE_ST_CONTEXT_CACHED_CONN_FLAG (0x1<<1)
+#define USTORM_FCOE_ST_CONTEXT_CACHED_CONN_FLAG_SHIFT 1
+#define USTORM_FCOE_ST_CONTEXT_CACHED_TCE_FLAG (0x1<<2)
+#define USTORM_FCOE_ST_CONTEXT_CACHED_TCE_FLAG_SHIFT 2
+#define USTORM_FCOE_ST_CONTEXT_RSRV1 (0x1F<<3)
+#define USTORM_FCOE_ST_CONTEXT_RSRV1_SHIFT 3
+ u8 hc_csdm_agg_int;
+#endif
+ struct ustorm_fcoe_data_place data_place;
+ struct ustorm_fcoe_task_ctx_entry_rd tce;
+};
+
+/*
+ * The FCoE non-aggregative context of Tstorm
+ */
+struct tstorm_fcoe_st_context {
+ struct regpair reserved0;
+ struct regpair reserved1;
+};
+
+/*
+ * The fcoe aggregative context section of Xstorm
+ */
+struct xstorm_fcoe_extra_ag_context_section {
+#if defined(__BIG_ENDIAN)
+ u8 tcp_agg_vars1;
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51 (0x3<<0)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF (0x3<<4)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_SHIFT 4
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG_SHIFT 7
+ u8 __reserved_da_cnt;
+ u16 __mtu;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __mtu;
+ u8 __reserved_da_cnt;
+ u8 tcp_agg_vars1;
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51 (0x3<<0)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF (0x3<<4)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_SHIFT 4
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG_SHIFT 7
+#endif
+ u32 __task_addr_lo;
+ u32 __task_addr_hi;
+ u32 __reserved55;
+ u32 __tx_prods;
+#if defined(__BIG_ENDIAN)
+ u8 __agg_val8_th;
+ u8 __agg_val8;
+ u16 tcp_agg_vars2;
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57_SHIFT 0
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58 (0x1<<1)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58_SHIFT 1
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59 (0x1<<2)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59_SHIFT 2
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60 (0x1<<5)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_EN (0x1<<7)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_EN_SHIFT 7
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX8_CF (0x3<<14)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX8_CF_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 tcp_agg_vars2;
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57_SHIFT 0
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58 (0x1<<1)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58_SHIFT 1
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59 (0x1<<2)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59_SHIFT 2
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60 (0x1<<5)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_EN (0x1<<7)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_EN_SHIFT 7
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX8_CF (0x3<<14)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX8_CF_SHIFT 14
+ u8 __agg_val8;
+ u8 __agg_val8_th;
+#endif
+ u32 __sq_base_addr_lo;
+ u32 __sq_base_addr_hi;
+ u32 __xfrq_base_addr_lo;
+ u32 __xfrq_base_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u16 __xfrq_cons;
+ u16 __xfrq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __xfrq_prod;
+ u16 __xfrq_cons;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 __tcp_agg_vars5;
+ u8 __tcp_agg_vars4;
+ u8 __tcp_agg_vars3;
+ u8 __reserved_force_pure_ack_cnt;
+#elif defined(__LITTLE_ENDIAN)
+ u8 __reserved_force_pure_ack_cnt;
+ u8 __tcp_agg_vars3;
+ u8 __tcp_agg_vars4;
+ u8 __tcp_agg_vars5;
+#endif
+ u32 __tcp_agg_vars6;
+#if defined(__BIG_ENDIAN)
+ u16 __agg_misc6;
+ u16 __tcp_agg_vars7;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __tcp_agg_vars7;
+ u16 __agg_misc6;
+#endif
+ u32 __agg_val10;
+ u32 __agg_val10_th;
+#if defined(__BIG_ENDIAN)
+ u16 __reserved3;
+ u8 __reserved2;
+ u8 __da_only_cnt;
+#elif defined(__LITTLE_ENDIAN)
+ u8 __da_only_cnt;
+ u8 __reserved2;
+ u16 __reserved3;
+#endif
+};
+
+/*
+ * The fcoe aggregative context of Xstorm
+ */
+struct xstorm_fcoe_ag_context {
+#if defined(__BIG_ENDIAN)
+ u16 agg_val1;
+ u8 agg_vars1;
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51 (0x1<<2)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51_SHIFT 2
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52 (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN_SHIFT 7
+ u8 __state;
+#elif defined(__LITTLE_ENDIAN)
+ u8 __state;
+ u8 agg_vars1;
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51 (0x1<<2)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51_SHIFT 2
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52 (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN_SHIFT 7
+ u16 agg_val1;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 cdu_reserved;
+ u8 __agg_vars4;
+ u8 agg_vars3;
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF (0x3<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF_SHIFT 6
+ u8 agg_vars2;
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF (0x3<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+ u8 agg_vars2;
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF (0x3<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+ u8 agg_vars3;
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF (0x3<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF_SHIFT 6
+ u8 __agg_vars4;
+ u8 cdu_reserved;
+#endif
+ u32 more_to_send;
+#if defined(__BIG_ENDIAN)
+ u16 agg_vars5;
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE (0x3<<14)
+#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE_SHIFT 14
+ u16 sq_cons;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sq_cons;
+ u16 agg_vars5;
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE (0x3<<14)
+#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE_SHIFT 14
+#endif
+ struct xstorm_fcoe_extra_ag_context_section __extra_section;
+#if defined(__BIG_ENDIAN)
+ u16 agg_vars7;
+#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF (0x3<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62 (0x1<<10)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62_SHIFT 10
+#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG (0x1<<15)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG_SHIFT 15
+ u8 agg_val3_th;
+ u8 agg_vars6;
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE (0x7<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE (0x3<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+ u8 agg_vars6;
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE (0x7<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE (0x3<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE_SHIFT 6
+ u8 agg_val3_th;
+ u16 agg_vars7;
+#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF (0x3<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62 (0x1<<10)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62_SHIFT 10
+#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG (0x1<<15)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG_SHIFT 15
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __agg_val11_th;
+ u16 __agg_val11;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __agg_val11;
+ u16 __agg_val11_th;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 __reserved1;
+ u8 __agg_val6_th;
+ u16 __confq_tx_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __confq_tx_prod;
+ u8 __agg_val6_th;
+ u8 __reserved1;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 confq_cons;
+ u16 confq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 confq_prod;
+ u16 confq_cons;
+#endif
+ u32 agg_vars8;
+#define __XSTORM_FCOE_AG_CONTEXT_CACHE_WQE_IDX (0xFFFFFF<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_CACHE_WQE_IDX_SHIFT 0
+#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3 (0xFF<<24)
+#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3_SHIFT 24
+#if defined(__BIG_ENDIAN)
+ u16 ox_id;
+ u16 sq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sq_prod;
+ u16 ox_id;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 agg_val3;
+ u8 agg_val6;
+ u8 agg_val5_th;
+ u8 agg_val5;
+#elif defined(__LITTLE_ENDIAN)
+ u8 agg_val5;
+ u8 agg_val5_th;
+ u8 agg_val6;
+ u8 agg_val3;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __pbf_tx_seq_ack;
+ u16 agg_limit1;
+#elif defined(__LITTLE_ENDIAN)
+ u16 agg_limit1;
+ u16 __pbf_tx_seq_ack;
+#endif
+ u32 completion_seq;
+ u32 confq_pbl_base_lo;
+ u32 confq_pbl_base_hi;
+};
+
+/*
+ * The fcoe extra aggregative context section of Tstorm
+ */
+struct tstorm_fcoe_extra_ag_context_section {
+ u32 __agg_val1;
+#if defined(__BIG_ENDIAN)
+ u8 __tcp_agg_vars2;
+ u8 __agg_val3;
+ u16 __agg_val2;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __agg_val2;
+ u8 __agg_val3;
+ u8 __tcp_agg_vars2;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __agg_val5;
+ u8 __agg_val6;
+ u8 __tcp_agg_vars3;
+#elif defined(__LITTLE_ENDIAN)
+ u8 __tcp_agg_vars3;
+ u8 __agg_val6;
+ u16 __agg_val5;
+#endif
+ u32 __lcq_prod;
+ u32 rtt_seq;
+ u32 rtt_time;
+ u32 __reserved66;
+ u32 wnd_right_edge;
+ u32 tcp_agg_vars1;
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN (0x1<<9)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN_SHIFT 9
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28
+ u32 snd_max;
+ u32 __lcq_cons;
+ u32 __reserved2;
+};
+
+/*
+ * The fcoe aggregative context of Tstorm
+ */
+struct tstorm_fcoe_ag_context {
+#if defined(__BIG_ENDIAN)
+ u16 ulp_credit;
+ u8 agg_vars1;
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7
+ u8 state;
+#elif defined(__LITTLE_ENDIAN)
+ u8 state;
+ u8 agg_vars1;
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7
+ u16 ulp_credit;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __agg_val4;
+ u16 agg_vars2;
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+#elif defined(__LITTLE_ENDIAN)
+ u16 agg_vars2;
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+ u16 __agg_val4;
+#endif
+ struct tstorm_fcoe_extra_ag_context_section __extra_section;
+};
+
+/*
+ * The fcoe aggregative context of Ustorm
+ */
+struct ustorm_fcoe_ag_context {
+#if defined(__BIG_ENDIAN)
+ u8 __aux_counter_flags;
+ u8 agg_vars2;
+#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+ u8 agg_vars1;
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+ u8 state;
+#elif defined(__LITTLE_ENDIAN)
+ u8 state;
+ u8 agg_vars1;
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+ u8 agg_vars2;
+#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+ u8 __aux_counter_flags;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 cdu_usage;
+ u8 agg_misc2;
+ u16 pbf_tx_seq_ack;
+#elif defined(__LITTLE_ENDIAN)
+ u16 pbf_tx_seq_ack;
+ u8 agg_misc2;
+ u8 cdu_usage;
+#endif
+ u32 agg_misc4;
+#if defined(__BIG_ENDIAN)
+ u8 agg_val3_th;
+ u8 agg_val3;
+ u16 agg_misc3;
+#elif defined(__LITTLE_ENDIAN)
+ u16 agg_misc3;
+ u8 agg_val3;
+ u8 agg_val3_th;
+#endif
+ u32 expired_task_id;
+ u32 agg_misc4_th;
+#if defined(__BIG_ENDIAN)
+ u16 cq_prod;
+ u16 cq_cons;
+#elif defined(__LITTLE_ENDIAN)
+ u16 cq_cons;
+ u16 cq_prod;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __reserved2;
+ u8 decision_rules;
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0)
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6)
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7
+ u8 decision_rule_enable_bits;
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4)
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5)
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+ u8 decision_rule_enable_bits;
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4)
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5)
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+ u8 decision_rules;
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0)
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6)
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7
+ u16 __reserved2;
+#endif
+};
+
+/*
+ * Ethernet context section
+ */
+struct xstorm_fcoe_eth_context_section {
+#if defined(__BIG_ENDIAN)
+ u8 remote_addr_4;
+ u8 remote_addr_5;
+ u8 local_addr_0;
+ u8 local_addr_1;
+#elif defined(__LITTLE_ENDIAN)
+ u8 local_addr_1;
+ u8 local_addr_0;
+ u8 remote_addr_5;
+ u8 remote_addr_4;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 remote_addr_0;
+ u8 remote_addr_1;
+ u8 remote_addr_2;
+ u8 remote_addr_3;
+#elif defined(__LITTLE_ENDIAN)
+ u8 remote_addr_3;
+ u8 remote_addr_2;
+ u8 remote_addr_1;
+ u8 remote_addr_0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 reserved_vlan_type;
+ u16 params;
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI (0x1<<12)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI_SHIFT 12
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
+#elif defined(__LITTLE_ENDIAN)
+ u16 params;
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI (0x1<<12)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI_SHIFT 12
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
+ u16 reserved_vlan_type;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 local_addr_2;
+ u8 local_addr_3;
+ u8 local_addr_4;
+ u8 local_addr_5;
+#elif defined(__LITTLE_ENDIAN)
+ u8 local_addr_5;
+ u8 local_addr_4;
+ u8 local_addr_3;
+ u8 local_addr_2;
+#endif
+};
+
+/*
+ * Flags used in FCoE context section - 1 byte
+ */
+struct xstorm_fcoe_context_flags {
+ u8 flags;
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q (0x3<<0)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q_SHIFT 0
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ (0x1<<2)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ_SHIFT 2
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_EXCHANGE_CLEANUP_DEFFERED (0x1<<3)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_EXCHANGE_CLEANUP_DEFFERED_SHIFT 3
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT (0x1<<4)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT_SHIFT 4
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE (0x1<<5)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE_SHIFT 5
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE (0x1<<6)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE_SHIFT 6
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_ABTS_DEFFERED (0x1<<7)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_ABTS_DEFFERED_SHIFT 7
+};
+
+/*
+ * FCoE SQ element
+ */
+struct fcoe_sqe {
+ u16 wqe;
+#define FCOE_SQE_TASK_ID (0x7FFF<<0)
+#define FCOE_SQE_TASK_ID_SHIFT 0
+#define FCOE_SQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_SQE_TOGGLE_BIT_SHIFT 15
+};
+
+/*
+ * FCoE XFRQ element
+ */
+struct fcoe_xfrqe {
+ u16 wqe;
+#define FCOE_XFRQE_TASK_ID (0x7FFF<<0)
+#define FCOE_XFRQE_TASK_ID_SHIFT 0
+#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15
+};
+
+/*
+ * FCoE SQ\XFRQ element
+ */
+struct fcoe_cached_wqe {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_xfrqe xfrqe;
+ struct fcoe_sqe sqe;
+#elif defined(__LITTLE_ENDIAN)
+ struct fcoe_sqe sqe;
+ struct fcoe_xfrqe xfrqe;
+#endif
+};
+
+struct fcoe_task_ctx_entry_tx_only {
+ union fcoe_sgl_ctx sgl_ctx;
+};
+
+struct xstorm_fcoe_task_ctx_entry_rd {
+ struct fcoe_task_ctx_entry_tx_only tx_wr;
+ struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd;
+ struct fcoe_task_ctx_entry_tx_rx_cmn cmn;
+ struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd;
+};
+
+/*
+ * Cached SGEs
+ */
+struct common_fcoe_sgl {
+ struct fcoe_bd_ctx sge[2];
+};
+
+/*
+ * FCP_DATA parameters required for transmission
+ */
+struct xstorm_fcoe_fcp_data {
+ u32 io_rem;
+#if defined(__BIG_ENDIAN)
+ u16 cached_sge_off;
+ u8 cached_num_sges;
+ u8 cached_sge_idx;
+#elif defined(__LITTLE_ENDIAN)
+ u8 cached_sge_idx;
+ u8 cached_num_sges;
+ u16 cached_sge_off;
+#endif
+ struct common_fcoe_sgl cached_sgl;
+};
+
+/*
+ * FCoE context section
+ */
+struct xstorm_fcoe_context_section {
+#if defined(__BIG_ENDIAN)
+ u8 vlan_flag;
+ u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 s_id[3];
+ u8 vlan_flag;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 func_id;
+ u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 d_id[3];
+ u8 func_id;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 sq_xfrq_lcq_confq_size;
+ u16 tx_max_fc_pay_len;
+#elif defined(__LITTLE_ENDIAN)
+ u16 tx_max_fc_pay_len;
+ u16 sq_xfrq_lcq_confq_size;
+#endif
+ u32 lcq_prod;
+#if defined(__BIG_ENDIAN)
+ u8 port_id;
+ u8 tx_max_conc_seqs_c3;
+ u8 seq_id;
+ struct xstorm_fcoe_context_flags tx_flags;
+#elif defined(__LITTLE_ENDIAN)
+ struct xstorm_fcoe_context_flags tx_flags;
+ u8 seq_id;
+ u8 tx_max_conc_seqs_c3;
+ u8 port_id;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 verify_tx_seq;
+ u8 func_mode;
+ u8 vnic_id;
+#elif defined(__LITTLE_ENDIAN)
+ u8 vnic_id;
+ u8 func_mode;
+ u16 verify_tx_seq;
+#endif
+ struct regpair confq_curr_page_addr;
+ struct fcoe_cached_wqe cached_wqe[8];
+ struct regpair lcq_base_addr;
+ struct xstorm_fcoe_task_ctx_entry_rd tce;
+ struct xstorm_fcoe_fcp_data fcp_data;
+#if defined(__BIG_ENDIAN)
+ u16 fcoe_tx_stat_params_ram_addr;
+ u16 cmng_port_ram_addr;
+#elif defined(__LITTLE_ENDIAN)
+ u16 cmng_port_ram_addr;
+ u16 fcoe_tx_stat_params_ram_addr;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 fcp_cmd_pb_cmd_size;
+ u8 eth_hdr_size;
+ u16 pbf_addr;
+#elif defined(__LITTLE_ENDIAN)
+ u16 pbf_addr;
+ u8 eth_hdr_size;
+ u8 fcp_cmd_pb_cmd_size;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 reserved2[2];
+ u8 cos;
+ u8 dcb_version;
+#elif defined(__LITTLE_ENDIAN)
+ u8 dcb_version;
+ u8 cos;
+ u8 reserved2[2];
+#endif
+ u32 reserved3;
+ struct regpair reserved4[2];
+};
+
+/*
+ * Xstorm FCoE Storm Context
+ */
+struct xstorm_fcoe_st_context {
+ struct xstorm_fcoe_eth_context_section eth;
+ struct xstorm_fcoe_context_section fcoe;
+};
+
+/*
+ * Fcoe connection context
+ */
+struct fcoe_context {
+ struct ustorm_fcoe_st_context ustorm_st_context;
+ struct tstorm_fcoe_st_context tstorm_st_context;
+ struct xstorm_fcoe_ag_context xstorm_ag_context;
+ struct tstorm_fcoe_ag_context tstorm_ag_context;
+ struct ustorm_fcoe_ag_context ustorm_ag_context;
+ struct timers_block_context timers_context;
+ struct xstorm_fcoe_st_context xstorm_st_context;
+};
+
+/*
* iSCSI context region, used only in iSCSI
*/
struct ustorm_iscsi_rq_db {
@@ -2268,6 +3792,577 @@ struct iscsi_context {
};
/*
+ * FCoE KCQ CQE parameters
+ */
+union fcoe_kcqe_params {
+ u32 reserved0[4];
+};
+
+/*
+ * FCoE KCQ CQE
+ */
+struct fcoe_kcqe {
+ u32 fcoe_conn_id;
+ u32 completion_status;
+ u32 fcoe_conn_context_id;
+ union fcoe_kcqe_params params;
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define FCOE_KCQE_RESERVED0 (0x7<<0)
+#define FCOE_KCQE_RESERVED0_SHIFT 0
+#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
+#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
+#define FCOE_KCQE_LAYER_CODE (0x7<<4)
+#define FCOE_KCQE_LAYER_CODE_SHIFT 4
+#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
+#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
+ u8 op_code;
+ u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+ u16 qe_self_seq;
+ u8 op_code;
+ u8 flags;
+#define FCOE_KCQE_RESERVED0 (0x7<<0)
+#define FCOE_KCQE_RESERVED0_SHIFT 0
+#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
+#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
+#define FCOE_KCQE_LAYER_CODE (0x7<<4)
+#define FCOE_KCQE_LAYER_CODE_SHIFT 4
+#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
+#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
+#endif
+};
+
+/*
+ * FCoE KWQE header
+ */
+struct fcoe_kwqe_header {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
+#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
+ u8 op_code;
+#elif defined(__LITTLE_ENDIAN)
+ u8 op_code;
+ u8 flags;
+#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
+#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
+#endif
+};
+
+/*
+ * FCoE firmware init request 1
+ */
+struct fcoe_kwqe_init1 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 num_tasks;
+#elif defined(__LITTLE_ENDIAN)
+ u16 num_tasks;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 task_list_pbl_addr_lo;
+ u32 task_list_pbl_addr_hi;
+ u32 dummy_buffer_addr_lo;
+ u32 dummy_buffer_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u16 rq_num_wqes;
+ u16 sq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sq_num_wqes;
+ u16 rq_num_wqes;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 cq_num_wqes;
+ u16 rq_buffer_log_size;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rq_buffer_log_size;
+ u16 cq_num_wqes;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
+#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
+ u8 num_sessions_log;
+ u16 mtu;
+#elif defined(__LITTLE_ENDIAN)
+ u16 mtu;
+ u8 num_sessions_log;
+ u8 flags;
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
+#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
+#endif
+};
+
+/*
+ * FCoE firmware init request 2
+ */
+struct fcoe_kwqe_init2 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 hash_tbl_pbl_addr_lo;
+ u32 hash_tbl_pbl_addr_hi;
+ u32 t2_hash_tbl_addr_lo;
+ u32 t2_hash_tbl_addr_hi;
+ u32 t2_ptr_hash_tbl_addr_lo;
+ u32 t2_ptr_hash_tbl_addr_hi;
+ u32 free_list_count;
+};
+
+/*
+ * FCoE firmware init request 3
+ */
+struct fcoe_kwqe_init3 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 error_bit_map_lo;
+ u32 error_bit_map_hi;
+#if defined(__BIG_ENDIAN)
+ u8 reserved21[3];
+ u8 cached_session_enable;
+#elif defined(__LITTLE_ENDIAN)
+ u8 cached_session_enable;
+ u8 reserved21[3];
+#endif
+ u32 reserved2[4];
+};
+
+/*
+ * FCoE connection offload request 1
+ */
+struct fcoe_kwqe_conn_offload1 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 fcoe_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 fcoe_conn_id;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 sq_addr_lo;
+ u32 sq_addr_hi;
+ u32 rq_pbl_addr_lo;
+ u32 rq_pbl_addr_hi;
+ u32 rq_first_pbe_addr_lo;
+ u32 rq_first_pbe_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u16 reserved0;
+ u16 rq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rq_prod;
+ u16 reserved0;
+#endif
+};
+
+/*
+ * FCoE connection offload request 2
+ */
+struct fcoe_kwqe_conn_offload2 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 tx_max_fc_pay_len;
+#elif defined(__LITTLE_ENDIAN)
+ u16 tx_max_fc_pay_len;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 cq_addr_lo;
+ u32 cq_addr_hi;
+ u32 xferq_addr_lo;
+ u32 xferq_addr_hi;
+ u32 conn_db_addr_lo;
+ u32 conn_db_addr_hi;
+ u32 reserved1;
+};
+
+/*
+ * FCoE connection offload request 3
+ */
+struct fcoe_kwqe_conn_offload3 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
+#elif defined(__LITTLE_ENDIAN)
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
+ struct fcoe_kwqe_header hdr;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 tx_max_conc_seqs_c3;
+ u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 s_id[3];
+ u8 tx_max_conc_seqs_c3;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
+ u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 d_id[3];
+ u8 flags;
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
+#endif
+ u32 reserved;
+ u32 confq_first_pbe_addr_lo;
+ u32 confq_first_pbe_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u16 rx_max_fc_pay_len;
+ u16 tx_total_conc_seqs;
+#elif defined(__LITTLE_ENDIAN)
+ u16 tx_total_conc_seqs;
+ u16 rx_max_fc_pay_len;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 rx_open_seqs_exch_c3;
+ u8 rx_max_conc_seqs_c3;
+ u16 rx_total_conc_seqs;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_total_conc_seqs;
+ u8 rx_max_conc_seqs_c3;
+ u8 rx_open_seqs_exch_c3;
+#endif
+};
+
+/*
+ * FCoE connection offload request 4
+ */
+struct fcoe_kwqe_conn_offload4 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u8 reserved2;
+ u8 e_d_tov_timer_val;
+#elif defined(__LITTLE_ENDIAN)
+ u8 e_d_tov_timer_val;
+ u8 reserved2;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u8 src_mac_addr_lo32[4];
+#if defined(__BIG_ENDIAN)
+ u8 dst_mac_addr_hi16[2];
+ u8 src_mac_addr_hi16[2];
+#elif defined(__LITTLE_ENDIAN)
+ u8 src_mac_addr_hi16[2];
+ u8 dst_mac_addr_hi16[2];
+#endif
+ u8 dst_mac_addr_lo32[4];
+ u32 lcq_addr_lo;
+ u32 lcq_addr_hi;
+ u32 confq_pbl_base_addr_lo;
+ u32 confq_pbl_base_addr_hi;
+};
+
+/*
+ * FCoE connection enable request
+ */
+struct fcoe_kwqe_conn_enable_disable {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u8 src_mac_addr_lo32[4];
+#if defined(__BIG_ENDIAN)
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
+ u8 src_mac_addr_hi16[2];
+#elif defined(__LITTLE_ENDIAN)
+ u8 src_mac_addr_hi16[2];
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
+#endif
+ u8 dst_mac_addr_lo32[4];
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u8 dst_mac_addr_hi16[2];
+#elif defined(__LITTLE_ENDIAN)
+ u8 dst_mac_addr_hi16[2];
+ u16 reserved1;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 vlan_flag;
+ u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 s_id[3];
+ u8 vlan_flag;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 reserved3;
+ u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 d_id[3];
+ u8 reserved3;
+#endif
+ u32 context_id;
+ u32 conn_id;
+ u32 reserved4;
+};
+
+/*
+ * FCoE connection destroy request
+ */
+struct fcoe_kwqe_conn_destroy {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 context_id;
+ u32 conn_id;
+ u32 reserved1[5];
+};
+
+/*
+ * FCoe destroy request
+ */
+struct fcoe_kwqe_destroy {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 reserved1[7];
+};
+
+/*
+ * FCoe statistics request
+ */
+struct fcoe_kwqe_stat {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 stat_params_addr_lo;
+ u32 stat_params_addr_hi;
+ u32 reserved1[5];
+};
+
+/*
+ * FCoE KWQ WQE
+ */
+union fcoe_kwqe {
+ struct fcoe_kwqe_init1 init1;
+ struct fcoe_kwqe_init2 init2;
+ struct fcoe_kwqe_init3 init3;
+ struct fcoe_kwqe_conn_offload1 conn_offload1;
+ struct fcoe_kwqe_conn_offload2 conn_offload2;
+ struct fcoe_kwqe_conn_offload3 conn_offload3;
+ struct fcoe_kwqe_conn_offload4 conn_offload4;
+ struct fcoe_kwqe_conn_enable_disable conn_enable_disable;
+ struct fcoe_kwqe_conn_destroy conn_destroy;
+ struct fcoe_kwqe_destroy destroy;
+ struct fcoe_kwqe_stat statistics;
+};
+
+struct fcoe_task_ctx_entry {
+ struct fcoe_task_ctx_entry_tx_only tx_wr_only;
+ struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd;
+ struct fcoe_task_ctx_entry_tx_rx_cmn cmn;
+ struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd;
+ struct fcoe_task_ctx_entry_rx_only rx_wr_only;
+ u32 reserved[4];
+};
+
+/*
+ * FCoE connection enable\disable params passed by driver to FW in FCoE enable ramrod
+ */
+struct fcoe_conn_enable_disable_ramrod_params {
+ struct fcoe_kwqe_conn_enable_disable enable_disable_kwqe;
+};
+
+
+/*
+ * FCoE connection offload params passed by driver to FW in FCoE offload ramrod
+ */
+struct fcoe_conn_offload_ramrod_params {
+ struct fcoe_kwqe_conn_offload1 offload_kwqe1;
+ struct fcoe_kwqe_conn_offload2 offload_kwqe2;
+ struct fcoe_kwqe_conn_offload3 offload_kwqe3;
+ struct fcoe_kwqe_conn_offload4 offload_kwqe4;
+};
+
+/*
+ * FCoE init params passed by driver to FW in FCoE init ramrod
+ */
+struct fcoe_init_ramrod_params {
+ struct fcoe_kwqe_init1 init_kwqe1;
+ struct fcoe_kwqe_init2 init_kwqe2;
+ struct fcoe_kwqe_init3 init_kwqe3;
+ struct regpair eq_addr;
+ struct regpair eq_next_page_addr;
+#if defined(__BIG_ENDIAN)
+ u16 sb_num;
+ u16 eq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 eq_prod;
+ u16 sb_num;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u8 reserved0;
+ u8 sb_id;
+#elif defined(__LITTLE_ENDIAN)
+ u8 sb_id;
+ u8 reserved0;
+ u16 reserved1;
+#endif
+};
+
+
+/*
+ * FCoE statistics params buffer passed by driver to FW in FCoE statistics ramrod
+ */
+struct fcoe_stat_ramrod_params {
+ struct fcoe_kwqe_stat stat_kwqe;
+};
+
+
+/*
+ * FCoE 16-bits vlan structure
+ */
+struct fcoe_vlan_fields {
+ u16 fields;
+#define FCOE_VLAN_FIELDS_VID (0xFFF<<0)
+#define FCOE_VLAN_FIELDS_VID_SHIFT 0
+#define FCOE_VLAN_FIELDS_CLI (0x1<<12)
+#define FCOE_VLAN_FIELDS_CLI_SHIFT 12
+#define FCOE_VLAN_FIELDS_PRI (0x7<<13)
+#define FCOE_VLAN_FIELDS_PRI_SHIFT 13
+};
+
+
+/*
+ * FCoE 16-bits vlan union
+ */
+union fcoe_vlan_field_union {
+ struct fcoe_vlan_fields fields;
+ u16 val;
+};
+
+/*
+ * Parameters used for Class 2 verifications
+ */
+struct ustorm_fcoe_c2_params {
+#if defined(__BIG_ENDIAN)
+ u16 e2e_credit;
+ u16 con_seq;
+#elif defined(__LITTLE_ENDIAN)
+ u16 con_seq;
+ u16 e2e_credit;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 ackq_prod;
+ u16 open_seq_per_exch;
+#elif defined(__LITTLE_ENDIAN)
+ u16 open_seq_per_exch;
+ u16 ackq_prod;
+#endif
+ struct regpair ackq_pbl_base;
+ struct regpair ackq_cur_seg;
+};
+
+/*
+ * Parameters used for Class 2 verifications
+ */
+struct xstorm_fcoe_c2_params {
+#if defined(__BIG_ENDIAN)
+ u16 reserved0;
+ u8 ackq_x_prod;
+ u8 max_conc_seqs_c2;
+#elif defined(__LITTLE_ENDIAN)
+ u8 max_conc_seqs_c2;
+ u8 ackq_x_prod;
+ u16 reserved0;
+#endif
+ struct regpair ackq_pbl_base;
+ struct regpair ackq_cur_seg;
+};
+
+/*
* Buffer per connection, used in Tstorm
*/
struct iscsi_conn_buf {
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 0dbeaec4f03a..e01b49ee3591 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -1,6 +1,6 @@
/* cnic_if.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2010 Broadcom Corporation
+ * Copyright (c) 2006-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -12,22 +12,31 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
-#define CNIC_MODULE_VERSION "2.2.6"
-#define CNIC_MODULE_RELDATE "Oct 12, 2010"
+#define CNIC_MODULE_VERSION "2.2.13"
+#define CNIC_MODULE_RELDATE "Jan 31, 2011"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
-#define CNIC_ULP_L4 2
-#define MAX_CNIC_ULP_TYPE_EXT 2
-#define MAX_CNIC_ULP_TYPE 3
+#define CNIC_ULP_FCOE 2
+#define CNIC_ULP_L4 3
+#define MAX_CNIC_ULP_TYPE_EXT 3
+#define MAX_CNIC_ULP_TYPE 4
struct kwqe {
u32 kwqe_op_flag;
+#define KWQE_QID_SHIFT 8
#define KWQE_OPCODE_MASK 0x00ff0000
#define KWQE_OPCODE_SHIFT 16
-#define KWQE_FLAGS_LAYER_SHIFT 28
#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
+#define KWQE_LAYER_MASK 0x70000000
+#define KWQE_LAYER_SHIFT 28
+#define KWQE_FLAGS_LAYER_MASK_L2 (2<<28)
+#define KWQE_FLAGS_LAYER_MASK_L3 (3<<28)
+#define KWQE_FLAGS_LAYER_MASK_L4 (4<<28)
+#define KWQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
+#define KWQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
+#define KWQE_FLAGS_LAYER_MASK_L5_FCOE (7<<28)
u32 kwqe_info0;
u32 kwqe_info1;
@@ -62,6 +71,7 @@ struct kcqe {
#define KCQE_FLAGS_LAYER_MASK_L4 (4<<28)
#define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
#define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
+ #define KCQE_FLAGS_LAYER_MASK_L5_FCOE (7<<28)
#define KCQE_FLAGS_NEXT (1<<31)
#define KCQE_FLAGS_OPCODE_MASK (0xff<<16)
#define KCQE_FLAGS_OPCODE_SHIFT (16)
@@ -149,6 +159,9 @@ struct cnic_eth_dev {
u32 drv_state;
#define CNIC_DRV_STATE_REGD 0x00000001
#define CNIC_DRV_STATE_USING_MSIX 0x00000002
+#define CNIC_DRV_STATE_NO_ISCSI_OOO 0x00000004
+#define CNIC_DRV_STATE_NO_ISCSI 0x00000008
+#define CNIC_DRV_STATE_NO_FCOE 0x00000010
u32 chip_id;
u32 max_kwqe_pending;
struct pci_dev *pdev;
@@ -166,6 +179,7 @@ struct cnic_eth_dev {
u32 fcoe_init_cid;
u16 iscsi_l2_client_id;
u16 iscsi_l2_cid;
+ u8 iscsi_mac[ETH_ALEN];
int num_irq;
struct cnic_irq irq_arr[MAX_CNIC_VEC];
@@ -301,7 +315,7 @@ struct cnic_ulp_ops {
void (*cm_abort_complete)(struct cnic_sock *);
void (*cm_remote_close)(struct cnic_sock *);
void (*cm_remote_abort)(struct cnic_sock *);
- void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
+ int (*iscsi_nl_send_msg)(void *ulp_ctx, u32 msg_type,
char *data, u16 data_size);
struct module *owner;
atomic_t ref_count;
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 81475cc80e1c..80c2feeefec5 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -59,7 +59,6 @@ static struct sockaddr default_mac = {
/* Information that need to be kept for each board. */
struct net_local {
- struct net_device_stats stats;
struct mii_if_info mii_if;
/* Tx control lock. This protects the transmit buffer ring
@@ -1059,7 +1058,7 @@ e100_tx_timeout(struct net_device *dev)
/* remember we got an error */
- np->stats.tx_errors++;
+ dev->stats.tx_errors++;
/* reset the TX DMA in case it has hung on something */
@@ -1157,7 +1156,7 @@ e100rxtx_interrupt(int irq, void *dev_id)
* allocate a new buffer to put a packet in.
*/
e100_rx(dev);
- np->stats.rx_packets++;
+ dev->stats.rx_packets++;
/* restart/continue on the channel, for safety */
*R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart);
/* clear dma channel 1 eop/descr irq bits */
@@ -1173,8 +1172,8 @@ e100rxtx_interrupt(int irq, void *dev_id)
/* Report any packets that have been sent */
while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST &&
(netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) {
- np->stats.tx_bytes += myFirstTxDesc->skb->len;
- np->stats.tx_packets++;
+ dev->stats.tx_bytes += myFirstTxDesc->skb->len;
+ dev->stats.tx_packets++;
/* dma is ready with the transmission of the data in tx_skb, so now
we can release the skb memory */
@@ -1197,7 +1196,6 @@ static irqreturn_t
e100nw_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
- struct net_local *np = netdev_priv(dev);
unsigned long irqbits = *R_IRQ_MASK0_RD;
/* check for underrun irq */
@@ -1205,13 +1203,13 @@ e100nw_interrupt(int irq, void *dev_id)
SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
*R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
- np->stats.tx_errors++;
+ dev->stats.tx_errors++;
D(printk("ethernet receiver underrun!\n"));
}
/* check for overrun irq */
if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) {
- update_rx_stats(&np->stats); /* this will ack the irq */
+ update_rx_stats(&dev->stats); /* this will ack the irq */
D(printk("ethernet receiver overrun!\n"));
}
/* check for excessive collision irq */
@@ -1219,7 +1217,7 @@ e100nw_interrupt(int irq, void *dev_id)
SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
*R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
- np->stats.tx_errors++;
+ dev->stats.tx_errors++;
D(printk("ethernet excessive collisions!\n"));
}
return IRQ_HANDLED;
@@ -1250,7 +1248,7 @@ e100_rx(struct net_device *dev)
spin_unlock(&np->led_lock);
length = myNextRxDesc->descr.hw_len - 4;
- np->stats.rx_bytes += length;
+ dev->stats.rx_bytes += length;
#ifdef ETHDEBUG
printk("Got a packet of length %d:\n", length);
@@ -1268,7 +1266,7 @@ e100_rx(struct net_device *dev)
/* Small packet, copy data */
skb = dev_alloc_skb(length - ETHER_HEAD_LEN);
if (!skb) {
- np->stats.rx_errors++;
+ dev->stats.rx_errors++;
printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
goto update_nextrxdesc;
}
@@ -1294,7 +1292,7 @@ e100_rx(struct net_device *dev)
int align;
struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
if (!new_skb) {
- np->stats.rx_errors++;
+ dev->stats.rx_errors++;
printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
goto update_nextrxdesc;
}
@@ -1333,8 +1331,6 @@ e100_rx(struct net_device *dev)
static int
e100_close(struct net_device *dev)
{
- struct net_local *np = netdev_priv(dev);
-
printk(KERN_INFO "Closing %s.\n", dev->name);
netif_stop_queue(dev);
@@ -1366,8 +1362,8 @@ e100_close(struct net_device *dev)
/* Update the statistics here. */
- update_rx_stats(&np->stats);
- update_tx_stats(&np->stats);
+ update_rx_stats(&dev->stats);
+ update_tx_stats(&dev->stats);
/* Stop speed/duplex timers */
del_timer(&speed_timer);
@@ -1545,11 +1541,11 @@ e100_get_stats(struct net_device *dev)
spin_lock_irqsave(&lp->lock, flags);
- update_rx_stats(&lp->stats);
- update_tx_stats(&lp->stats);
+ update_rx_stats(&dev->stats);
+ update_tx_stats(&dev->stats);
spin_unlock_irqrestore(&lp->lock, flags);
- return &lp->stats;
+ return &dev->stats;
}
/*
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index d325e01a53e0..537a4b2e2020 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -95,6 +95,9 @@
Dmitry Pervushin : dpervushin@ru.mvista.com
: PNX010X platform support
+ Domenico Andreoli : cavokz@gmail.com
+ : QQ2440 platform support
+
*/
/* Always include 'config.h' first in case the user wants to turn on
@@ -176,6 +179,10 @@ static unsigned int cs8900_irq_map[] = {IRQ_IXDP2351_CS8900, 0, 0, 0};
#elif defined(CONFIG_ARCH_IXDP2X01)
static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0};
static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0};
+#elif defined(CONFIG_MACH_QQ2440)
+#include <mach/qq2440.h>
+static unsigned int netcard_portlist[] __used __initdata = { QQ2440_CS8900_VIRT_BASE + 0x300, 0 };
+static unsigned int cs8900_irq_map[] = { QQ2440_CS8900_IRQ, 0, 0, 0 };
#elif defined(CONFIG_MACH_MX31ADS)
#include <mach/board-mx31ads.h>
static unsigned int netcard_portlist[] __used __initdata = {
@@ -521,6 +528,10 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
#endif
lp->force = g_cs89x0_media__force;
#endif
+
+#if defined(CONFIG_MACH_QQ2440)
+ lp->force |= FORCE_RJ45 | FORCE_FULL;
+#endif
}
/* Grab the region so we can find another board if autoIRQ fails. */
@@ -943,10 +954,10 @@ skip_this_frame:
static void __init reset_chip(struct net_device *dev)
{
#if !defined(CONFIG_MACH_MX31ADS)
-#if !defined(CONFIG_MACH_IXDP2351) && !defined(CONFIG_ARCH_IXDP2X01)
+#if !defined(CS89x0_NONISA_IRQ)
struct net_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
-#endif
+#endif /* CS89x0_NONISA_IRQ */
int reset_start_time;
writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
@@ -954,7 +965,7 @@ static void __init reset_chip(struct net_device *dev)
/* wait 30 ms */
msleep(30);
-#if !defined(CONFIG_MACH_IXDP2351) && !defined(CONFIG_ARCH_IXDP2X01)
+#if !defined(CS89x0_NONISA_IRQ)
if (lp->chip_type != CS8900) {
/* Hardware problem requires PNP registers to be reconfigured after a reset */
writeword(ioaddr, ADD_PORT, PP_CS8920_ISAINT);
@@ -965,7 +976,7 @@ static void __init reset_chip(struct net_device *dev)
outb((dev->mem_start >> 16) & 0xff, ioaddr + DATA_PORT);
outb((dev->mem_start >> 8) & 0xff, ioaddr + DATA_PORT + 1);
}
-#endif /* IXDP2x01 */
+#endif /* CS89x0_NONISA_IRQ */
/* Wait until the chip is reset */
reset_start_time = jiffies;
diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c
index 35cd36729155..2028da95afa1 100644
--- a/drivers/net/cxgb3/ael1002.c
+++ b/drivers/net/cxgb3/ael1002.c
@@ -292,7 +292,7 @@ unknown:
*/
static int ael2005_setup_sr_edc(struct cphy *phy)
{
- static struct reg_val regs[] = {
+ static const struct reg_val regs[] = {
{ MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x181 },
{ MDIO_MMD_PMAPMD, 0xc010, 0xffff, 0x448a },
{ MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5200 },
@@ -324,11 +324,11 @@ static int ael2005_setup_sr_edc(struct cphy *phy)
static int ael2005_setup_twinax_edc(struct cphy *phy, int modtype)
{
- static struct reg_val regs[] = {
+ static const struct reg_val regs[] = {
{ MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5a00 },
{ 0, 0, 0, 0 }
};
- static struct reg_val preemphasis[] = {
+ static const struct reg_val preemphasis[] = {
{ MDIO_MMD_PMAPMD, 0xc014, 0xffff, 0xfe16 },
{ MDIO_MMD_PMAPMD, 0xc015, 0xffff, 0xa000 },
{ 0, 0, 0, 0 }
@@ -393,7 +393,7 @@ static int ael2005_intr_clear(struct cphy *phy)
static int ael2005_reset(struct cphy *phy, int wait)
{
- static struct reg_val regs0[] = {
+ static const struct reg_val regs0[] = {
{ MDIO_MMD_PMAPMD, 0xc001, 0, 1 << 5 },
{ MDIO_MMD_PMAPMD, 0xc017, 0, 1 << 5 },
{ MDIO_MMD_PMAPMD, 0xc013, 0xffff, 0xf341 },
@@ -403,7 +403,7 @@ static int ael2005_reset(struct cphy *phy, int wait)
{ MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0 },
{ 0, 0, 0, 0 }
};
- static struct reg_val regs1[] = {
+ static const struct reg_val regs1[] = {
{ MDIO_MMD_PMAPMD, 0xca00, 0xffff, 0x0080 },
{ MDIO_MMD_PMAPMD, 0xca12, 0xffff, 0 },
{ 0, 0, 0, 0 }
@@ -522,7 +522,7 @@ int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
*/
static int ael2020_setup_sr_edc(struct cphy *phy)
{
- static struct reg_val regs[] = {
+ static const struct reg_val regs[] = {
/* set CDR offset to 10 */
{ MDIO_MMD_PMAPMD, 0xcc01, 0xffff, 0x488a },
@@ -551,20 +551,20 @@ static int ael2020_setup_sr_edc(struct cphy *phy)
static int ael2020_setup_twinax_edc(struct cphy *phy, int modtype)
{
/* set uC to 40MHz */
- static struct reg_val uCclock40MHz[] = {
+ static const struct reg_val uCclock40MHz[] = {
{ MDIO_MMD_PMAPMD, 0xff28, 0xffff, 0x4001 },
{ MDIO_MMD_PMAPMD, 0xff2a, 0xffff, 0x0002 },
{ 0, 0, 0, 0 }
};
/* activate uC clock */
- static struct reg_val uCclockActivate[] = {
+ static const struct reg_val uCclockActivate[] = {
{ MDIO_MMD_PMAPMD, 0xd000, 0xffff, 0x5200 },
{ 0, 0, 0, 0 }
};
/* set PC to start of SRAM and activate uC */
- static struct reg_val uCactivate[] = {
+ static const struct reg_val uCactivate[] = {
{ MDIO_MMD_PMAPMD, 0xd080, 0xffff, 0x0100 },
{ MDIO_MMD_PMAPMD, 0xd092, 0xffff, 0x0000 },
{ 0, 0, 0, 0 }
@@ -624,7 +624,7 @@ static int ael2020_get_module_type(struct cphy *phy, int delay_ms)
*/
static int ael2020_intr_enable(struct cphy *phy)
{
- struct reg_val regs[] = {
+ static const struct reg_val regs[] = {
/* output Module's Loss Of Signal (LOS) to LED */
{ MDIO_MMD_PMAPMD, AEL2020_GPIO_CFG+AEL2020_GPIO_LSTAT,
0xffff, 0x4 },
@@ -664,7 +664,7 @@ static int ael2020_intr_enable(struct cphy *phy)
*/
static int ael2020_intr_disable(struct cphy *phy)
{
- struct reg_val regs[] = {
+ static const struct reg_val regs[] = {
/* reset "link status" LED to "off" */
{ MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
0xffff, 0xb << (AEL2020_GPIO_LSTAT*4) },
@@ -701,7 +701,7 @@ static int ael2020_intr_clear(struct cphy *phy)
return err ? err : t3_phy_lasi_intr_clear(phy);
}
-static struct reg_val ael2020_reset_regs[] = {
+static const struct reg_val ael2020_reset_regs[] = {
/* Erratum #2: CDRLOL asserted, causing PMA link down status */
{ MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x3101 },
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 046d846c652d..4d538a4e9d55 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1359,6 +1359,7 @@ out:
static int offload_close(struct t3cdev *tdev)
{
struct adapter *adapter = tdev2adap(tdev);
+ struct t3c_data *td = T3C_DATA(tdev);
if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
return 0;
@@ -1369,7 +1370,7 @@ static int offload_close(struct t3cdev *tdev)
sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
/* Flush work scheduled while releasing TIDs */
- flush_scheduled_work();
+ flush_work_sync(&td->tid_release_task);
tdev->lldev = NULL;
cxgb3_set_dummy_ops(tdev);
@@ -3006,12 +3007,11 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct adapter *adapter = pci_get_drvdata(pdev);
- int ret;
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
- ret = t3_adapter_error(adapter, 0, 0);
+ t3_adapter_error(adapter, 0, 0);
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index bcf07532953d..862804f32b6e 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -186,9 +186,10 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
dev = NULL;
if (grp)
dev = vlan_group_get_device(grp, vlan);
- } else
+ } else if (netif_is_bond_slave(dev)) {
while (dev->master)
dev = dev->master;
+ }
return dev;
}
}
@@ -967,8 +968,6 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
cxgb_neigh_update((struct neighbour *)ctx);
break;
}
- case (NETEVENT_PMTU_UPDATE):
- break;
case (NETEVENT_REDIRECT):{
struct netevent_redirect *nr = ctx;
cxgb_redirect(nr->old, nr->new);
@@ -1164,12 +1163,10 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
*/
void *cxgb_alloc_mem(unsigned long size)
{
- void *p = kmalloc(size, GFP_KERNEL);
+ void *p = kzalloc(size, GFP_KERNEL);
if (!p)
- p = vmalloc(size);
- if (p)
- memset(p, 0, size);
+ p = vzalloc(size);
return p;
}
diff --git a/drivers/net/cxgb3/mc5.c b/drivers/net/cxgb3/mc5.c
index a8766fb2f9ab..e13b7fe9d082 100644
--- a/drivers/net/cxgb3/mc5.c
+++ b/drivers/net/cxgb3/mc5.c
@@ -318,7 +318,7 @@ static void mc5_dbgi_mode_disable(const struct mc5 *mc5)
/*
* Initialization that requires the OS and protocol layers to already
- * be intialized goes here.
+ * be initialized goes here.
*/
int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
unsigned int nroutes)
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 3a6adf0b3e9d..d55db6b38e7b 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -607,7 +607,7 @@ struct t3_vpd {
*
* Read a 32-bit word from a location in VPD EEPROM using the card's PCI
* VPD ROM capability. A zero is written to the flag bit when the
- * addres is written to the control register. The hardware device will
+ * address is written to the control register. The hardware device will
* set the flag to 1 when 4 bytes have been read into the data register.
*/
int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
@@ -1562,7 +1562,7 @@ static void tp_intr_handler(struct adapter *adapter)
{0}
};
- static struct intr_info tp_intr_info_t3c[] = {
+ static const struct intr_info tp_intr_info_t3c[] = {
{0x1fffffff, "TP parity error", -1, 1},
{F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
{F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index 3d4253d311eb..01d49eaa44d2 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -482,11 +482,9 @@ struct adapter {
void __iomem *regs;
struct pci_dev *pdev;
struct device *pdev_dev;
- unsigned long registered_device_map;
unsigned int fn;
unsigned int flags;
- const char *name;
int msg_enable;
struct adapter_params params;
@@ -497,7 +495,7 @@ struct adapter {
struct {
unsigned short vec;
- char desc[14];
+ char desc[IFNAMSIZ + 10];
} msix_info[MAX_INGQ + 1];
struct sge sge;
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index f50bc98310f8..5352c8a23f4d 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -522,39 +522,33 @@ static irqreturn_t t4_nondata_intr(int irq, void *cookie)
*/
static void name_msix_vecs(struct adapter *adap)
{
- int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc) - 1;
+ int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
/* non-data interrupts */
- snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
- adap->msix_info[0].desc[n] = 0;
+ snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
/* FW events */
- snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", adap->name);
- adap->msix_info[1].desc[n] = 0;
+ snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
+ adap->port[0]->name);
/* Ethernet queues */
for_each_port(adap, j) {
struct net_device *d = adap->port[j];
const struct port_info *pi = netdev_priv(d);
- for (i = 0; i < pi->nqsets; i++, msi_idx++) {
+ for (i = 0; i < pi->nqsets; i++, msi_idx++)
snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
d->name, i);
- adap->msix_info[msi_idx].desc[n] = 0;
- }
}
/* offload queues */
- for_each_ofldrxq(&adap->sge, i) {
- snprintf(adap->msix_info[msi_idx].desc, n, "%s-ofld%d",
- adap->name, i);
- adap->msix_info[msi_idx++].desc[n] = 0;
- }
- for_each_rdmarxq(&adap->sge, i) {
- snprintf(adap->msix_info[msi_idx].desc, n, "%s-rdma%d",
- adap->name, i);
- adap->msix_info[msi_idx++].desc[n] = 0;
- }
+ for_each_ofldrxq(&adap->sge, i)
+ snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
+ adap->port[0]->name, i);
+
+ for_each_rdmarxq(&adap->sge, i)
+ snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
+ adap->port[0]->name, i);
}
static int request_msix_queue_irqs(struct adapter *adap)
@@ -868,12 +862,10 @@ out: release_firmware(fw);
*/
void *t4_alloc_mem(size_t size)
{
- void *p = kmalloc(size, GFP_KERNEL);
+ void *p = kzalloc(size, GFP_KERNEL);
if (!p)
- p = vmalloc(size);
- if (p)
- memset(p, 0, size);
+ p = vzalloc(size);
return p;
}
@@ -1377,7 +1369,12 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
} else if (type == FW_PORT_TYPE_KR)
v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
else if (type == FW_PORT_TYPE_BP_AP)
- v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC;
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+ SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
+ else if (type == FW_PORT_TYPE_BP4_AP)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+ SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
+ SUPPORTED_10000baseKX4_Full;
else if (type == FW_PORT_TYPE_FIBER_XFI ||
type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
v |= SUPPORTED_FIBRE;
@@ -2474,7 +2471,6 @@ static int netevent_cb(struct notifier_block *nb, unsigned long event,
case NETEVENT_NEIGH_UPDATE:
check_neigh_update(data);
break;
- case NETEVENT_PMTU_UPDATE:
case NETEVENT_REDIRECT:
default:
break;
@@ -2668,7 +2664,7 @@ static int cxgb_up(struct adapter *adap)
} else {
err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
(adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
- adap->name, adap);
+ adap->port[0]->name, adap);
if (err)
goto irq_err;
}
@@ -2713,16 +2709,14 @@ static int cxgb_open(struct net_device *dev)
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
+ netif_carrier_off(dev);
+
if (!(adapter->flags & FULL_INIT_DONE)) {
err = cxgb_up(adapter);
if (err < 0)
return err;
}
- netif_set_real_num_tx_queues(dev, pi->nqsets);
- err = netif_set_real_num_rx_queues(dev, pi->nqsets);
- if (err)
- return err;
err = link_start(dev);
if (!err)
netif_tx_start_all_queues(dev);
@@ -3491,49 +3485,53 @@ static int __devinit init_rss(struct adapter *adap)
return 0;
}
-static void __devinit print_port_info(struct adapter *adap)
+static void __devinit print_port_info(const struct net_device *dev)
{
static const char *base[] = {
"R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
- "KX", "KR", "KR SFP+", "KR FEC"
+ "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
};
- int i;
char buf[80];
+ char *bufp = buf;
const char *spd = "";
+ const struct port_info *pi = netdev_priv(dev);
+ const struct adapter *adap = pi->adapter;
if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
spd = " 2.5 GT/s";
else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
spd = " 5 GT/s";
- for_each_port(adap, i) {
- struct net_device *dev = adap->port[i];
- const struct port_info *pi = netdev_priv(dev);
- char *bufp = buf;
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
+ bufp += sprintf(bufp, "100/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
+ bufp += sprintf(bufp, "1000/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
+ bufp += sprintf(bufp, "10G/");
+ if (bufp != buf)
+ --bufp;
+ sprintf(bufp, "BASE-%s", base[pi->port_type]);
- if (!test_bit(i, &adap->registered_device_map))
- continue;
+ netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
+ adap->params.vpd.id, adap->params.rev, buf,
+ is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
+ (adap->flags & USING_MSIX) ? " MSI-X" :
+ (adap->flags & USING_MSI) ? " MSI" : "");
+ netdev_info(dev, "S/N: %s, E/C: %s\n",
+ adap->params.vpd.sn, adap->params.vpd.ec);
+}
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
- bufp += sprintf(bufp, "100/");
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
- bufp += sprintf(bufp, "1000/");
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
- bufp += sprintf(bufp, "10G/");
- if (bufp != buf)
- --bufp;
- sprintf(bufp, "BASE-%s", base[pi->port_type]);
+static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev)
+{
+ u16 v;
+ int pos;
- netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
- adap->params.vpd.id, adap->params.rev,
- buf, is_offload(adap) ? "R" : "",
- adap->params.pci.width, spd,
- (adap->flags & USING_MSIX) ? " MSI-X" :
- (adap->flags & USING_MSI) ? " MSI" : "");
- if (adap->name == dev->name)
- netdev_info(dev, "S/N: %s, E/C: %s\n",
- adap->params.vpd.sn, adap->params.vpd.ec);
+ pos = pci_pcie_cap(dev);
+ if (pos > 0) {
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v);
+ v |= PCI_EXP_DEVCTL_RELAX_EN;
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v);
}
}
@@ -3611,6 +3609,7 @@ static int __devinit init_one(struct pci_dev *pdev,
}
pci_enable_pcie_error_reporting(pdev);
+ enable_pcie_relaxed_ordering(pdev);
pci_set_master(pdev);
pci_save_state(pdev);
@@ -3630,7 +3629,6 @@ static int __devinit init_one(struct pci_dev *pdev,
adapter->pdev = pdev;
adapter->pdev_dev = &pdev->dev;
adapter->fn = func;
- adapter->name = pci_name(pdev);
adapter->msg_enable = dflt_msg_enable;
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
@@ -3664,7 +3662,6 @@ static int __devinit init_one(struct pci_dev *pdev,
pi->xact_addr_filt = -1;
pi->rx_offload = RX_CSO;
pi->port_id = i;
- netif_carrier_off(netdev);
netdev->irq = pdev->irq;
netdev->features |= NETIF_F_SG | TSO_FLAGS;
@@ -3721,27 +3718,24 @@ static int __devinit init_one(struct pci_dev *pdev,
* register at least one net device.
*/
for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
+ netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
+
err = register_netdev(adapter->port[i]);
if (err)
- dev_warn(&pdev->dev,
- "cannot register net device %s, skipping\n",
- adapter->port[i]->name);
- else {
- /*
- * Change the name we use for messages to the name of
- * the first successfully registered interface.
- */
- if (!adapter->registered_device_map)
- adapter->name = adapter->port[i]->name;
-
- __set_bit(i, &adapter->registered_device_map);
- adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
- }
+ break;
+ adapter->chan_map[pi->tx_chan] = i;
+ print_port_info(adapter->port[i]);
}
- if (!adapter->registered_device_map) {
+ if (i == 0) {
dev_err(&pdev->dev, "could not register any net devices\n");
goto out_free_dev;
}
+ if (err) {
+ dev_warn(&pdev->dev, "only %d net devices registered\n", i);
+ err = 0;
+ };
if (cxgb4_debugfs_root) {
adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
@@ -3752,8 +3746,6 @@ static int __devinit init_one(struct pci_dev *pdev,
if (is_offload(adapter))
attach_ulds(adapter);
- print_port_info(adapter);
-
sriov:
#ifdef CONFIG_PCI_IOV
if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
@@ -3792,7 +3784,7 @@ static void __devexit remove_one(struct pci_dev *pdev)
detach_ulds(adapter);
for_each_port(adapter, i)
- if (test_bit(i, &adapter->registered_device_map))
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
unregister_netdev(adapter->port[i]);
if (adapter->debugfs_root)
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index 17022258ed68..311471b439a8 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -579,6 +579,7 @@ static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
* @phys: the physical address of the allocated ring
* @metadata: address of the array holding the SW state for the ring
* @stat_size: extra space in HW ring for status information
+ * @node: preferred node for memory allocations
*
* Allocates resources for an SGE descriptor ring, such as Tx queues,
* free buffer lists, or response queues. Each SGE ring requires
@@ -590,7 +591,7 @@ static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
*/
static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
size_t sw_size, dma_addr_t *phys, void *metadata,
- size_t stat_size)
+ size_t stat_size, int node)
{
size_t len = nelem * elem_size + stat_size;
void *s = NULL;
@@ -599,7 +600,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
if (!p)
return NULL;
if (sw_size) {
- s = kcalloc(nelem, sw_size, GFP_KERNEL);
+ s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
if (!s) {
dma_free_coherent(dev, len, p, *phys);
@@ -1982,7 +1983,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->size = roundup(iq->size, 16);
iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
- &iq->phys_addr, NULL, 0);
+ &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
if (!iq->desc)
return -ENOMEM;
@@ -2008,12 +2009,14 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
fl->size = roundup(fl->size, 8);
fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
sizeof(struct rx_sw_desc), &fl->addr,
- &fl->sdesc, STAT_LEN);
+ &fl->sdesc, STAT_LEN, NUMA_NO_NODE);
if (!fl->desc)
goto fl_nomem;
flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc);
c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
+ FW_IQ_CMD_FL0FETCHRO(1) |
+ FW_IQ_CMD_FL0DATARO(1) |
FW_IQ_CMD_FL0PADEN);
c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
FW_IQ_CMD_FL0FBMAX(3));
@@ -2093,7 +2096,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
- &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
+ &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
+ netdev_queue_numa_node_read(netdevq));
if (!txq->q.desc)
return -ENOMEM;
@@ -2106,6 +2110,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
+ FW_EQ_ETH_CMD_FETCHRO(1) |
FW_EQ_ETH_CMD_IQID(iqid));
c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) |
FW_EQ_ETH_CMD_FBMAX(3) |
@@ -2144,7 +2149,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
sizeof(struct tx_desc), 0, &txq->q.phys_addr,
- NULL, 0);
+ NULL, 0, NUMA_NO_NODE);
if (!txq->q.desc)
return -ENOMEM;
@@ -2158,6 +2163,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
c.physeqid_pkd = htonl(0);
c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) |
FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
+ FW_EQ_CTRL_CMD_FETCHRO |
FW_EQ_CTRL_CMD_IQID(iqid));
c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) |
FW_EQ_CTRL_CMD_FBMAX(3) |
@@ -2194,7 +2200,8 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
- &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
+ &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
+ NUMA_NO_NODE);
if (!txq->q.desc)
return -ENOMEM;
@@ -2207,6 +2214,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) |
+ FW_EQ_OFLD_CMD_FETCHRO(1) |
FW_EQ_OFLD_CMD_IQID(iqid));
c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) |
FW_EQ_OFLD_CMD_FBMAX(3) |
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index bb813d94aea8..b9fd8a6f2cc4 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -183,7 +183,7 @@ static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
void *rpl, bool sleep_ok)
{
- static int delay[] = {
+ static const int delay[] = {
1, 1, 3, 5, 10, 10, 20, 50, 100, 200
};
@@ -330,18 +330,6 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
return 0;
}
-/*
- * Partial EEPROM Vital Product Data structure. Includes only the ID and
- * VPD-R header.
- */
-struct t4_vpd_hdr {
- u8 id_tag;
- u8 id_len[2];
- u8 id_data[ID_LEN];
- u8 vpdr_tag;
- u8 vpdr_len[2];
-};
-
#define EEPROM_STAT_ADDR 0x7bfc
#define VPD_BASE 0
#define VPD_LEN 512
@@ -370,25 +358,38 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
int i, ret;
- int ec, sn, v2;
+ int ec, sn;
u8 vpd[VPD_LEN], csum;
- unsigned int vpdr_len;
- const struct t4_vpd_hdr *v;
+ unsigned int vpdr_len, kw_offset, id_len;
ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
if (ret < 0)
return ret;
- v = (const struct t4_vpd_hdr *)vpd;
- vpdr_len = pci_vpd_lrdt_size(&v->vpdr_tag);
- if (vpdr_len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
+ if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
+ dev_err(adapter->pdev_dev, "missing VPD ID string\n");
+ return -EINVAL;
+ }
+
+ id_len = pci_vpd_lrdt_size(vpd);
+ if (id_len > ID_LEN)
+ id_len = ID_LEN;
+
+ i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
+ if (i < 0) {
+ dev_err(adapter->pdev_dev, "missing VPD-R section\n");
+ return -EINVAL;
+ }
+
+ vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
+ kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
+ if (vpdr_len + kw_offset > VPD_LEN) {
dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
return -EINVAL;
}
#define FIND_VPD_KW(var, name) do { \
- var = pci_vpd_find_info_keyword(&v->id_tag, sizeof(struct t4_vpd_hdr), \
- vpdr_len, name); \
+ var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
if (var < 0) { \
dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
return -EINVAL; \
@@ -408,11 +409,9 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
FIND_VPD_KW(ec, "EC");
FIND_VPD_KW(sn, "SN");
- FIND_VPD_KW(v2, "V2");
#undef FIND_VPD_KW
- p->cclk = simple_strtoul(vpd + v2, NULL, 10);
- memcpy(p->id, v->id_data, ID_LEN);
+ memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
strim(p->id);
memcpy(p->ec, vpd + ec, EC_LEN);
strim(p->ec);
@@ -919,7 +918,7 @@ static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
*/
static void pcie_intr_handler(struct adapter *adapter)
{
- static struct intr_info sysbus_intr_info[] = {
+ static const struct intr_info sysbus_intr_info[] = {
{ RNPP, "RXNP array parity error", -1, 1 },
{ RPCP, "RXPC array parity error", -1, 1 },
{ RCIP, "RXCIF array parity error", -1, 1 },
@@ -927,7 +926,7 @@ static void pcie_intr_handler(struct adapter *adapter)
{ RFTP, "RXFT array parity error", -1, 1 },
{ 0 }
};
- static struct intr_info pcie_port_intr_info[] = {
+ static const struct intr_info pcie_port_intr_info[] = {
{ TPCP, "TXPC array parity error", -1, 1 },
{ TNPP, "TXNP array parity error", -1, 1 },
{ TFTP, "TXFT array parity error", -1, 1 },
@@ -939,7 +938,7 @@ static void pcie_intr_handler(struct adapter *adapter)
{ TDUE, "Tx uncorrectable data error", -1, 1 },
{ 0 }
};
- static struct intr_info pcie_intr_info[] = {
+ static const struct intr_info pcie_intr_info[] = {
{ MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
{ MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
{ MSIDATAPERR, "MSI data parity error", -1, 1 },
@@ -991,7 +990,7 @@ static void pcie_intr_handler(struct adapter *adapter)
*/
static void tp_intr_handler(struct adapter *adapter)
{
- static struct intr_info tp_intr_info[] = {
+ static const struct intr_info tp_intr_info[] = {
{ 0x3fffffff, "TP parity error", -1, 1 },
{ FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
{ 0 }
@@ -1008,7 +1007,7 @@ static void sge_intr_handler(struct adapter *adapter)
{
u64 v;
- static struct intr_info sge_intr_info[] = {
+ static const struct intr_info sge_intr_info[] = {
{ ERR_CPL_EXCEED_IQE_SIZE,
"SGE received CPL exceeding IQE size", -1, 1 },
{ ERR_INVALID_CIDX_INC,
@@ -1053,7 +1052,7 @@ static void sge_intr_handler(struct adapter *adapter)
*/
static void cim_intr_handler(struct adapter *adapter)
{
- static struct intr_info cim_intr_info[] = {
+ static const struct intr_info cim_intr_info[] = {
{ PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
{ OBQPARERR, "CIM OBQ parity error", -1, 1 },
{ IBQPARERR, "CIM IBQ parity error", -1, 1 },
@@ -1063,7 +1062,7 @@ static void cim_intr_handler(struct adapter *adapter)
{ TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
{ 0 }
};
- static struct intr_info cim_upintr_info[] = {
+ static const struct intr_info cim_upintr_info[] = {
{ RSVDSPACEINT, "CIM reserved space access", -1, 1 },
{ ILLTRANSINT, "CIM illegal transaction", -1, 1 },
{ ILLWRINT, "CIM illegal write", -1, 1 },
@@ -1110,7 +1109,7 @@ static void cim_intr_handler(struct adapter *adapter)
*/
static void ulprx_intr_handler(struct adapter *adapter)
{
- static struct intr_info ulprx_intr_info[] = {
+ static const struct intr_info ulprx_intr_info[] = {
{ 0x1800000, "ULPRX context error", -1, 1 },
{ 0x7fffff, "ULPRX parity error", -1, 1 },
{ 0 }
@@ -1125,7 +1124,7 @@ static void ulprx_intr_handler(struct adapter *adapter)
*/
static void ulptx_intr_handler(struct adapter *adapter)
{
- static struct intr_info ulptx_intr_info[] = {
+ static const struct intr_info ulptx_intr_info[] = {
{ PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
0 },
{ PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
@@ -1147,7 +1146,7 @@ static void ulptx_intr_handler(struct adapter *adapter)
*/
static void pmtx_intr_handler(struct adapter *adapter)
{
- static struct intr_info pmtx_intr_info[] = {
+ static const struct intr_info pmtx_intr_info[] = {
{ PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
{ PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
{ PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
@@ -1169,7 +1168,7 @@ static void pmtx_intr_handler(struct adapter *adapter)
*/
static void pmrx_intr_handler(struct adapter *adapter)
{
- static struct intr_info pmrx_intr_info[] = {
+ static const struct intr_info pmrx_intr_info[] = {
{ ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
{ PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
{ OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
@@ -1188,7 +1187,7 @@ static void pmrx_intr_handler(struct adapter *adapter)
*/
static void cplsw_intr_handler(struct adapter *adapter)
{
- static struct intr_info cplsw_intr_info[] = {
+ static const struct intr_info cplsw_intr_info[] = {
{ CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
{ CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
{ TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
@@ -1207,7 +1206,7 @@ static void cplsw_intr_handler(struct adapter *adapter)
*/
static void le_intr_handler(struct adapter *adap)
{
- static struct intr_info le_intr_info[] = {
+ static const struct intr_info le_intr_info[] = {
{ LIPMISS, "LE LIP miss", -1, 0 },
{ LIP0, "LE 0 LIP error", -1, 0 },
{ PARITYERR, "LE parity error", -1, 1 },
@@ -1225,11 +1224,11 @@ static void le_intr_handler(struct adapter *adap)
*/
static void mps_intr_handler(struct adapter *adapter)
{
- static struct intr_info mps_rx_intr_info[] = {
+ static const struct intr_info mps_rx_intr_info[] = {
{ 0xffffff, "MPS Rx parity error", -1, 1 },
{ 0 }
};
- static struct intr_info mps_tx_intr_info[] = {
+ static const struct intr_info mps_tx_intr_info[] = {
{ TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
{ NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
{ TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
@@ -1239,25 +1238,25 @@ static void mps_intr_handler(struct adapter *adapter)
{ FRMERR, "MPS Tx framing error", -1, 1 },
{ 0 }
};
- static struct intr_info mps_trc_intr_info[] = {
+ static const struct intr_info mps_trc_intr_info[] = {
{ FILTMEM, "MPS TRC filter parity error", -1, 1 },
{ PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
{ MISCPERR, "MPS TRC misc parity error", -1, 1 },
{ 0 }
};
- static struct intr_info mps_stat_sram_intr_info[] = {
+ static const struct intr_info mps_stat_sram_intr_info[] = {
{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
{ 0 }
};
- static struct intr_info mps_stat_tx_intr_info[] = {
+ static const struct intr_info mps_stat_tx_intr_info[] = {
{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
{ 0 }
};
- static struct intr_info mps_stat_rx_intr_info[] = {
+ static const struct intr_info mps_stat_rx_intr_info[] = {
{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
{ 0 }
};
- static struct intr_info mps_cls_intr_info[] = {
+ static const struct intr_info mps_cls_intr_info[] = {
{ MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
{ MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
{ HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
@@ -1356,7 +1355,7 @@ static void ma_intr_handler(struct adapter *adap)
*/
static void smb_intr_handler(struct adapter *adap)
{
- static struct intr_info smb_intr_info[] = {
+ static const struct intr_info smb_intr_info[] = {
{ MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
{ MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
{ SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
@@ -1372,7 +1371,7 @@ static void smb_intr_handler(struct adapter *adap)
*/
static void ncsi_intr_handler(struct adapter *adap)
{
- static struct intr_info ncsi_intr_info[] = {
+ static const struct intr_info ncsi_intr_info[] = {
{ CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
{ MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
{ TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
@@ -1410,7 +1409,7 @@ static void xgmac_intr_handler(struct adapter *adap, int port)
*/
static void pl_intr_handler(struct adapter *adap)
{
- static struct intr_info pl_intr_info[] = {
+ static const struct intr_info pl_intr_info[] = {
{ FATALPERR, "T4 fatal parity error", -1, 1 },
{ PERRVFID, "PL VFID_MAP parity error", -1, 1 },
{ 0 }
@@ -2408,7 +2407,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
if (index < NEXACT_MAC)
ret++;
else if (hash)
- *hash |= (1 << hash_mac_addr(addr[i]));
+ *hash |= (1ULL << hash_mac_addr(addr[i]));
}
return ret;
}
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h
index a550d0c706f3..eb71b8250b91 100644
--- a/drivers/net/cxgb4/t4_msg.h
+++ b/drivers/net/cxgb4/t4_msg.h
@@ -123,6 +123,7 @@ enum {
ULP_MODE_NONE = 0,
ULP_MODE_ISCSI = 2,
ULP_MODE_RDMA = 4,
+ ULP_MODE_TCPDDP = 5,
ULP_MODE_FCOE = 6,
};
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
index 940584a8a640..edcfd7ec7802 100644
--- a/drivers/net/cxgb4/t4fw_api.h
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -1239,6 +1239,7 @@ enum fw_port_type {
FW_PORT_TYPE_KR,
FW_PORT_TYPE_SFP,
FW_PORT_TYPE_BP_AP,
+ FW_PORT_TYPE_BP4_AP,
FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
};
diff --git a/drivers/net/cxgb4vf/adapter.h b/drivers/net/cxgb4vf/adapter.h
index 8ea01962e045..4766b4116b41 100644
--- a/drivers/net/cxgb4vf/adapter.h
+++ b/drivers/net/cxgb4vf/adapter.h
@@ -60,7 +60,7 @@ enum {
* MSI-X interrupt index usage.
*/
MSIX_FW = 0, /* MSI-X index for firmware Q */
- MSIX_NIQFLINT = 1, /* MSI-X index base for Ingress Qs */
+ MSIX_IQFLINT = 1, /* MSI-X index base for Ingress Qs */
MSIX_EXTRAS = 1,
MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS,
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index d887a76cd39d..6aad64df4dcb 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -280,9 +280,7 @@ static void name_msix_vecs(struct adapter *adapter)
const struct port_info *pi = netdev_priv(dev);
int qs, msi;
- for (qs = 0, msi = MSIX_NIQFLINT;
- qs < pi->nqsets;
- qs++, msi++) {
+ for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
snprintf(adapter->msix_info[msi].desc, namelen,
"%s-%d", dev->name, qs);
adapter->msix_info[msi].desc[namelen] = 0;
@@ -309,7 +307,7 @@ static int request_msix_queue_irqs(struct adapter *adapter)
/*
* Ethernet queues.
*/
- msi = MSIX_NIQFLINT;
+ msi = MSIX_IQFLINT;
for_each_ethrxq(s, rxq) {
err = request_irq(adapter->msix_info[msi].vec,
t4vf_sge_intr_msix, 0,
@@ -337,7 +335,7 @@ static void free_msix_queue_irqs(struct adapter *adapter)
int rxq, msi;
free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
- msi = MSIX_NIQFLINT;
+ msi = MSIX_IQFLINT;
for_each_ethrxq(s, rxq)
free_irq(adapter->msix_info[msi++].vec,
&s->ethrxq[rxq].rspq);
@@ -527,7 +525,7 @@ static int setup_sge_queues(struct adapter *adapter)
* brought up at which point lots of things get nailed down
* permanently ...
*/
- msix = MSIX_NIQFLINT;
+ msix = MSIX_IQFLINT;
for_each_port(adapter, pidx) {
struct net_device *dev = adapter->port[pidx];
struct port_info *pi = netdev_priv(dev);
@@ -751,13 +749,19 @@ static int cxgb4vf_open(struct net_device *dev)
netif_set_real_num_tx_queues(dev, pi->nqsets);
err = netif_set_real_num_rx_queues(dev, pi->nqsets);
if (err)
- return err;
- set_bit(pi->port_id, &adapter->open_device_map);
+ goto err_unwind;
err = link_start(dev);
if (err)
- return err;
+ goto err_unwind;
+
netif_tx_start_all_queues(dev);
+ set_bit(pi->port_id, &adapter->open_device_map);
return 0;
+
+err_unwind:
+ if (adapter->open_device_map == 0)
+ adapter_down(adapter);
+ return err;
}
/*
@@ -766,13 +770,12 @@ static int cxgb4vf_open(struct net_device *dev)
*/
static int cxgb4vf_stop(struct net_device *dev)
{
- int ret;
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
- ret = t4vf_enable_vi(adapter, pi->viid, false, false);
+ t4vf_enable_vi(adapter, pi->viid, false, false);
pi->link_cfg.link_ok = 0;
clear_bit(pi->port_id, &adapter->open_device_map);
@@ -1365,6 +1368,8 @@ struct queue_port_stats {
u64 rx_csum;
u64 vlan_ex;
u64 vlan_ins;
+ u64 lro_pkts;
+ u64 lro_merged;
};
/*
@@ -1402,6 +1407,8 @@ static const char stats_strings[][ETH_GSTRING_LEN] = {
"RxCsumGood ",
"VLANextractions ",
"VLANinsertions ",
+ "GROPackets ",
+ "GROMerged ",
};
/*
@@ -1451,6 +1458,8 @@ static void collect_sge_port_stats(const struct adapter *adapter,
stats->rx_csum += rxq->stats.rx_cso;
stats->vlan_ex += rxq->stats.vlan_ex;
stats->vlan_ins += txq->vlan_ins;
+ stats->lro_pkts += rxq->stats.lro_pkts;
+ stats->lro_merged += rxq->stats.lro_merged;
}
}
@@ -1547,14 +1556,19 @@ static void cxgb4vf_get_wol(struct net_device *dev,
}
/*
+ * TCP Segmentation Offload flags which we support.
+ */
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+
+/*
* Set TCP Segmentation Offloading feature capabilities.
*/
static int cxgb4vf_set_tso(struct net_device *dev, u32 tso)
{
if (tso)
- dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+ dev->features |= TSO_FLAGS;
else
- dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ dev->features &= ~TSO_FLAGS;
return 0;
}
@@ -2026,7 +2040,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
{
int i;
- BUG_ON(adapter->debugfs_root == NULL);
+ BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
/*
* Debugfs support is best effort.
@@ -2045,9 +2059,9 @@ static int __devinit setup_debugfs(struct adapter *adapter)
* Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave
* it to our caller to tear down the directory (debugfs_root).
*/
-static void __devexit cleanup_debugfs(struct adapter *adapter)
+static void cleanup_debugfs(struct adapter *adapter)
{
- BUG_ON(adapter->debugfs_root == NULL);
+ BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
/*
* Unlike our sister routine cleanup_proc(), we don't need to remove
@@ -2063,7 +2077,7 @@ static void __devexit cleanup_debugfs(struct adapter *adapter)
* adapter parameters we're going to be using and initialize basic adapter
* hardware support.
*/
-static int adap_init0(struct adapter *adapter)
+static int __devinit adap_init0(struct adapter *adapter)
{
struct vf_resources *vfres = &adapter->params.vfres;
struct sge_params *sge_params = &adapter->params.sge;
@@ -2269,6 +2283,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
{
struct sge *s = &adapter->sge;
int q10g, n10g, qidx, pidx, qs;
+ size_t iqe_size;
/*
* We should not be called till we know how many Queue Sets we can
@@ -2313,6 +2328,13 @@ static void __devinit cfg_queues(struct adapter *adapter)
s->ethqsets = qidx;
/*
+ * The Ingress Queue Entry Size for our various Response Queues needs
+ * to be big enough to accommodate the largest message we can receive
+ * from the chip/firmware; which is 64 bytes ...
+ */
+ iqe_size = 64;
+
+ /*
* Set up default Queue Set parameters ... Start off with the
* shortest interrupt holdoff timer.
*/
@@ -2320,7 +2342,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
struct sge_eth_rxq *rxq = &s->ethrxq[qs];
struct sge_eth_txq *txq = &s->ethtxq[qs];
- init_rspq(&rxq->rspq, 0, 0, 1024, L1_CACHE_BYTES);
+ init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
rxq->fl.size = 72;
txq->q.size = 1024;
}
@@ -2329,8 +2351,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
* The firmware event queue is used for link state changes and
* notifications of TX DMA completions.
*/
- init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512,
- L1_CACHE_BYTES);
+ init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
/*
* The forwarded interrupt queue is used when we're in MSI interrupt
@@ -2346,7 +2367,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
* any time ...
*/
init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
- L1_CACHE_BYTES);
+ iqe_size);
}
/*
@@ -2468,17 +2489,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
struct net_device *netdev;
/*
- * Vet our module parameters.
- */
- if (msi != MSI_MSIX && msi != MSI_MSI) {
- dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d"
- " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX,
- MSI_MSI);
- err = -EINVAL;
- goto err_out;
- }
-
- /*
* Print our driver banner the first time we're called to initialize a
* device.
*/
@@ -2487,7 +2497,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
version_printed = 1;
}
-
/*
* Initialize generic PCI device state.
*/
@@ -2624,7 +2633,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
netif_carrier_off(netdev);
netdev->irq = pdev->irq;
- netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ netdev->features = (NETIF_F_SG | TSO_FLAGS |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
NETIF_F_GRO);
@@ -2691,11 +2700,11 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
/*
* Set up our debugfs entries.
*/
- if (cxgb4vf_debugfs_root) {
+ if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
adapter->debugfs_root =
debugfs_create_dir(pci_name(pdev),
cxgb4vf_debugfs_root);
- if (adapter->debugfs_root == NULL)
+ if (IS_ERR_OR_NULL(adapter->debugfs_root))
dev_warn(&pdev->dev, "could not create debugfs"
" directory");
else
@@ -2750,7 +2759,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
*/
err_free_debugfs:
- if (adapter->debugfs_root) {
+ if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
cleanup_debugfs(adapter);
debugfs_remove_recursive(adapter->debugfs_root);
}
@@ -2782,7 +2791,6 @@ err_release_regions:
err_disable_device:
pci_disable_device(pdev);
-err_out:
return err;
}
@@ -2820,7 +2828,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
/*
* Tear down our debugfs entries.
*/
- if (adapter->debugfs_root) {
+ if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
cleanup_debugfs(adapter);
debugfs_remove_recursive(adapter->debugfs_root);
}
@@ -2854,6 +2862,46 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
}
/*
+ * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
+ * delivery.
+ */
+static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev)
+{
+ struct adapter *adapter;
+ int pidx;
+
+ adapter = pci_get_drvdata(pdev);
+ if (!adapter)
+ return;
+
+ /*
+ * Disable all Virtual Interfaces. This will shut down the
+ * delivery of all ingress packets into the chip for these
+ * Virtual Interfaces.
+ */
+ for_each_port(adapter, pidx) {
+ struct net_device *netdev;
+ struct port_info *pi;
+
+ if (!test_bit(pidx, &adapter->registered_device_map))
+ continue;
+
+ netdev = adapter->port[pidx];
+ if (!netdev)
+ continue;
+
+ pi = netdev_priv(netdev);
+ t4vf_enable_vi(adapter, pi->viid, false, false);
+ }
+
+ /*
+ * Free up all Queues which will prevent further DMA and
+ * Interrupts allowing various internal pathways to drain.
+ */
+ t4vf_free_sge_resources(adapter);
+}
+
+/*
* PCI Device registration data structures.
*/
#define CH_DEVICE(devid, idx) \
@@ -2886,6 +2934,7 @@ static struct pci_driver cxgb4vf_driver = {
.id_table = cxgb4vf_pci_tbl,
.probe = cxgb4vf_pci_probe,
.remove = __devexit_p(cxgb4vf_pci_remove),
+ .shutdown = __devexit_p(cxgb4vf_pci_shutdown),
};
/*
@@ -2895,14 +2944,25 @@ static int __init cxgb4vf_module_init(void)
{
int ret;
+ /*
+ * Vet our module parameters.
+ */
+ if (msi != MSI_MSIX && msi != MSI_MSI) {
+ printk(KERN_WARNING KBUILD_MODNAME
+ ": bad module parameter msi=%d; must be %d"
+ " (MSI-X or MSI) or %d (MSI)\n",
+ msi, MSI_MSIX, MSI_MSI);
+ return -EINVAL;
+ }
+
/* Debugfs support is optional, just warn if this fails */
cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (!cxgb4vf_debugfs_root)
+ if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
printk(KERN_WARNING KBUILD_MODNAME ": could not create"
" debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4vf_driver);
- if (ret < 0)
+ if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
debugfs_remove(cxgb4vf_debugfs_root);
return ret;
}
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index ecf0770bf0ff..e0b3d1bc2fdf 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -1568,6 +1568,9 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
} else
skb_checksum_none_assert(skb);
+ /*
+ * Deliver the packet to the stack.
+ */
if (unlikely(pkt->vlan_ex)) {
struct vlan_group *grp = pi->vlan_grp;
@@ -2143,7 +2146,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
/*
* Calculate the size of the hardware free list ring plus
- * status page (which the SGE will place at the end of the
+ * Status Page (which the SGE will place after the end of the
* free list ring) in Egress Queue Units.
*/
flsz = (fl->size / FL_PER_EQ_UNIT +
@@ -2240,8 +2243,8 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
struct port_info *pi = netdev_priv(dev);
/*
- * Calculate the size of the hardware TX Queue (including the
- * status age on the end) in units of TX Descriptors.
+ * Calculate the size of the hardware TX Queue (including the Status
+ * Page on the end of the TX Queue) in units of TX Descriptors.
*/
nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index 19520afe1a12..192db226ec7f 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -116,7 +116,7 @@ static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data)
int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
void *rpl, bool sleep_ok)
{
- static int delay[] = {
+ static const int delay[] = {
1, 1, 3, 5, 10, 10, 20, 50, 100
};
@@ -147,9 +147,20 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
/*
* Write the command array into the Mailbox Data register array and
* transfer ownership of the mailbox to the firmware.
+ *
+ * For the VFs, the Mailbox Data "registers" are actually backed by
+ * T4's "MA" interface rather than PL Registers (as is the case for
+ * the PFs). Because these are in different coherency domains, the
+ * write to the VF's PL-register-backed Mailbox Control can race in
+ * front of the writes to the MA-backed VF Mailbox Data "registers".
+ * So we need to do a read-back on at least one byte of the VF Mailbox
+ * Data registers before doing the write to the VF Mailbox Control
+ * register.
*/
for (i = 0, p = cmd; i < size; i += 8)
t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
+ t4_read_reg(adapter, mbox_data); /* flush write */
+
t4_write_reg(adapter, mbox_ctl,
MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
t4_read_reg(adapter, mbox_ctl); /* flush write */
@@ -160,7 +171,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
delay_idx = 0;
ms = delay[0];
- for (i = 0; i < 500; i += ms) {
+ for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
if (sleep_ok) {
ms = delay[delay_idx];
if (delay_idx < ARRAY_SIZE(delay) - 1)
@@ -1300,7 +1311,7 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
*/
int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
{
- struct fw_cmd_hdr *cmd_hdr = (struct fw_cmd_hdr *)rpl;
+ const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl;
u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi));
switch (opcode) {
@@ -1308,7 +1319,8 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
/*
* Link/module state change message.
*/
- const struct fw_port_cmd *port_cmd = (void *)rpl;
+ const struct fw_port_cmd *port_cmd =
+ (const struct fw_port_cmd *)rpl;
u32 word;
int action, port_id, link_ok, speed, fc, pidx;
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 2a628d17d178..082d6ea69920 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1008,7 +1008,7 @@ static void emac_rx_handler(void *token, int len, int status)
int ret;
/* free and bail if we are shutting down */
- if (unlikely(!netif_running(ndev))) {
+ if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) {
dev_kfree_skb_any(skb);
return;
}
@@ -1730,7 +1730,7 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
emac_read(EMAC_TXCARRIERSENSE);
emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask);
- ndev->stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN);
+ ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN);
emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
return &ndev->stats;
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 91b3846ffc8a..8b0084d17c8c 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1094,7 +1094,7 @@ static int depca_rx(struct net_device *dev)
}
}
/* Change buffer ownership for this last frame, back to the adapter */
- for (; lp->rx_old != entry; lp->rx_old = (++lp->rx_old) & lp->rxRingMask) {
+ for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) {
writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base);
}
writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base);
@@ -1103,7 +1103,7 @@ static int depca_rx(struct net_device *dev)
/*
** Update entry information
*/
- lp->rx_new = (++lp->rx_new) & lp->rxRingMask;
+ lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask;
}
return 0;
@@ -1148,7 +1148,7 @@ static int depca_tx(struct net_device *dev)
}
/* Update all the pointers */
- lp->tx_old = (++lp->tx_old) & lp->txRingMask;
+ lp->tx_old = (lp->tx_old + 1) & lp->txRingMask;
}
return 0;
@@ -1513,7 +1513,7 @@ static enum depca_type __init depca_shmem_probe (ulong *mem_start)
return adapter;
}
-static int __init depca_isa_probe (struct platform_device *device)
+static int __devinit depca_isa_probe (struct platform_device *device)
{
struct net_device *dev;
struct depca_private *lp;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index e1a8216ff692..c05db6046050 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -1753,8 +1753,6 @@ rio_close (struct net_device *dev)
/* Free all the skbuffs in the queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
- np->rx_ring[i].status = 0;
- np->rx_ring[i].fraginfo = 0;
skb = np->rx_skbuff[i];
if (skb) {
pci_unmap_single(np->pdev,
@@ -1763,6 +1761,8 @@ rio_close (struct net_device *dev)
dev_kfree_skb (skb);
np->rx_skbuff[i] = NULL;
}
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].fraginfo = 0;
}
for (i = 0; i < TX_RING_SIZE; i++) {
skb = np->tx_skbuff[i];
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 9f6aeefa06bf..317708113601 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -802,10 +802,7 @@ dm9000_init_dm9000(struct net_device *dev)
/* Checksum mode */
dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
- /* GPIO0 on pre-activate PHY */
- iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
- iow(db, DM9000_GPR, 0); /* Enable PHY */
ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
@@ -852,8 +849,8 @@ static void dm9000_timeout(struct net_device *dev)
unsigned long flags;
/* Save previous register address */
- reg_save = readb(db->io_addr);
spin_lock_irqsave(&db->lock, flags);
+ reg_save = readb(db->io_addr);
netif_stop_queue(dev);
dm9000_reset(db);
@@ -1194,6 +1191,10 @@ dm9000_open(struct net_device *dev)
if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
return -EAGAIN;
+ /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
+ iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
+ mdelay(1); /* delay needs by DM9000B */
+
/* Initialize DM9000 board */
dm9000_reset(db);
dm9000_init_dm9000(dev);
@@ -1592,10 +1593,15 @@ dm9000_probe(struct platform_device *pdev)
ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
}
- if (!is_valid_ether_addr(ndev->dev_addr))
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
"set using ifconfig\n", ndev->name);
+ random_ether_addr(ndev->dev_addr);
+ mac_src = "random";
+ }
+
+
platform_set_drvdata(pdev, ndev);
ret = register_netdev(ndev);
@@ -1675,7 +1681,7 @@ dm9000_drv_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
unregister_netdev(ndev);
- dm9000_release_board(pdev, (board_info_t *) netdev_priv(ndev));
+ dm9000_release_board(pdev, netdev_priv(ndev));
free_netdev(ndev); /* free device structure */
dev_dbg(&pdev->dev, "released and freed device\n");
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
index 9d8a20b72fa9..8318ea06cb6d 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/dnet.c
@@ -337,8 +337,6 @@ static int dnet_mii_init(struct dnet *bp)
for (i = 0; i < PHY_MAX_ADDR; i++)
bp->mii_bus->irq[i] = PHY_POLL;
- platform_set_drvdata(bp->dev, bp->mii_bus);
-
if (mdiobus_register(bp->mii_bus)) {
err = -ENXIO;
goto err_out_free_mdio_irq;
@@ -863,6 +861,7 @@ static int __devinit dnet_probe(struct platform_device *pdev)
bp = netdev_priv(dev);
bp->dev = dev;
+ platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
spin_lock_init(&bp->lock);
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index c7e242b69a18..7501d977d992 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -124,16 +124,22 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw)
case M88E1000_I_PHY_ID:
case M88E1011_I_PHY_ID:
case M88E1111_I_PHY_ID:
+ case M88E1118_E_PHY_ID:
hw->phy_type = e1000_phy_m88;
break;
case IGP01E1000_I_PHY_ID:
if (hw->mac_type == e1000_82541 ||
hw->mac_type == e1000_82541_rev_2 ||
hw->mac_type == e1000_82547 ||
- hw->mac_type == e1000_82547_rev_2) {
+ hw->mac_type == e1000_82547_rev_2)
hw->phy_type = e1000_phy_igp;
- break;
- }
+ break;
+ case RTL8211B_PHY_ID:
+ hw->phy_type = e1000_phy_8211;
+ break;
+ case RTL8201N_PHY_ID:
+ hw->phy_type = e1000_phy_8201;
+ break;
default:
/* Should never have loaded on this device */
hw->phy_type = e1000_phy_undefined;
@@ -318,6 +324,9 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82547GI:
hw->mac_type = e1000_82547_rev_2;
break;
+ case E1000_DEV_ID_INTEL_CE4100_GBE:
+ hw->mac_type = e1000_ce4100;
+ break;
default:
/* Should never have loaded on this device */
return -E1000_ERR_MAC_TYPE;
@@ -372,6 +381,9 @@ void e1000_set_media_type(struct e1000_hw *hw)
case e1000_82542_rev2_1:
hw->media_type = e1000_media_type_fiber;
break;
+ case e1000_ce4100:
+ hw->media_type = e1000_media_type_copper;
+ break;
default:
status = er32(STATUS);
if (status & E1000_STATUS_TBIMODE) {
@@ -460,6 +472,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
/* Reset is performed on a shadow of the control register */
ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
break;
+ case e1000_ce4100:
default:
ew32(CTRL, (ctrl | E1000_CTRL_RST));
break;
@@ -952,6 +965,67 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
}
/**
+ * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series.
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Commits changes to PHY configuration by calling e1000_phy_reset().
+ */
+static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ /* SW reset the PHY so all changes take effect */
+ ret_val = e1000_phy_reset(hw);
+ if (ret_val) {
+ e_dbg("Error Resetting the PHY\n");
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+static s32 gbe_dhg_phy_setup(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u32 ctrl_aux;
+
+ switch (hw->phy_type) {
+ case e1000_phy_8211:
+ ret_val = e1000_copper_link_rtl_setup(hw);
+ if (ret_val) {
+ e_dbg("e1000_copper_link_rtl_setup failed!\n");
+ return ret_val;
+ }
+ break;
+ case e1000_phy_8201:
+ /* Set RMII mode */
+ ctrl_aux = er32(CTL_AUX);
+ ctrl_aux |= E1000_CTL_AUX_RMII;
+ ew32(CTL_AUX, ctrl_aux);
+ E1000_WRITE_FLUSH();
+
+ /* Disable the J/K bits required for receive */
+ ctrl_aux = er32(CTL_AUX);
+ ctrl_aux |= 0x4;
+ ctrl_aux &= ~0x2;
+ ew32(CTL_AUX, ctrl_aux);
+ E1000_WRITE_FLUSH();
+ ret_val = e1000_copper_link_rtl_setup(hw);
+
+ if (ret_val) {
+ e_dbg("e1000_copper_link_rtl_setup failed!\n");
+ return ret_val;
+ }
+ break;
+ default:
+ e_dbg("Error Resetting the PHY\n");
+ return E1000_ERR_PHY_TYPE;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
* e1000_copper_link_preconfig - early configuration for copper
* @hw: Struct containing variables accessed by shared code
*
@@ -1286,6 +1360,10 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
if (hw->autoneg_advertised == 0)
hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ /* IFE/RTL8201N PHY only supports 10/100 */
+ if (hw->phy_type == e1000_phy_8201)
+ hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
+
e_dbg("Reconfiguring auto-neg advertisement params\n");
ret_val = e1000_phy_setup_autoneg(hw);
if (ret_val) {
@@ -1341,7 +1419,7 @@ static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
s32 ret_val;
e_dbg("e1000_copper_link_postconfig");
- if (hw->mac_type >= e1000_82544) {
+ if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) {
e1000_config_collision_dist(hw);
} else {
ret_val = e1000_config_mac_to_phy(hw);
@@ -1395,6 +1473,12 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
ret_val = e1000_copper_link_mgp_setup(hw);
if (ret_val)
return ret_val;
+ } else {
+ ret_val = gbe_dhg_phy_setup(hw);
+ if (ret_val) {
+ e_dbg("gbe_dhg_phy_setup failed!\n");
+ return ret_val;
+ }
}
if (hw->autoneg) {
@@ -1461,10 +1545,11 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
return ret_val;
/* Read the MII 1000Base-T Control Register (Address 9). */
- ret_val =
- e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
if (ret_val)
return ret_val;
+ else if (hw->phy_type == e1000_phy_8201)
+ mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
/* Need to parse both autoneg_advertised and fc and set up
* the appropriate PHY registers. First we will parse for
@@ -1577,9 +1662,14 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
- ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
- if (ret_val)
- return ret_val;
+ if (hw->phy_type == e1000_phy_8201) {
+ mii_1000t_ctrl_reg = 0;
+ } else {
+ ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL,
+ mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+ }
return E1000_SUCCESS;
}
@@ -1860,7 +1950,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
/* 82544 or newer MAC, Auto Speed Detection takes care of
* MAC speed/duplex configuration.*/
- if (hw->mac_type >= e1000_82544)
+ if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100))
return E1000_SUCCESS;
/* Read the Device Control Register and set the bits to Force Speed
@@ -1870,27 +1960,49 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
- /* Set up duplex in the Device Control and Transmit Control
- * registers depending on negotiated values.
- */
- ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
- if (ret_val)
- return ret_val;
+ switch (hw->phy_type) {
+ case e1000_phy_8201:
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
- if (phy_data & M88E1000_PSSR_DPLX)
- ctrl |= E1000_CTRL_FD;
- else
- ctrl &= ~E1000_CTRL_FD;
+ if (phy_data & RTL_PHY_CTRL_FD)
+ ctrl |= E1000_CTRL_FD;
+ else
+ ctrl &= ~E1000_CTRL_FD;
- e1000_config_collision_dist(hw);
+ if (phy_data & RTL_PHY_CTRL_SPD_100)
+ ctrl |= E1000_CTRL_SPD_100;
+ else
+ ctrl |= E1000_CTRL_SPD_10;
- /* Set up speed in the Device Control register depending on
- * negotiated values.
- */
- if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
- ctrl |= E1000_CTRL_SPD_1000;
- else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
- ctrl |= E1000_CTRL_SPD_100;
+ e1000_config_collision_dist(hw);
+ break;
+ default:
+ /* Set up duplex in the Device Control and Transmit Control
+ * registers depending on negotiated values.
+ */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (phy_data & M88E1000_PSSR_DPLX)
+ ctrl |= E1000_CTRL_FD;
+ else
+ ctrl &= ~E1000_CTRL_FD;
+
+ e1000_config_collision_dist(hw);
+
+ /* Set up speed in the Device Control register depending on
+ * negotiated values.
+ */
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+ ctrl |= E1000_CTRL_SPD_1000;
+ else if ((phy_data & M88E1000_PSSR_SPEED) ==
+ M88E1000_PSSR_100MBS)
+ ctrl |= E1000_CTRL_SPD_100;
+ }
/* Write the configured values back to the Device Control Reg. */
ew32(CTRL, ctrl);
@@ -2401,7 +2513,8 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
* speed/duplex on the MAC to the current PHY speed/duplex
* settings.
*/
- if (hw->mac_type >= e1000_82544)
+ if ((hw->mac_type >= e1000_82544) &&
+ (hw->mac_type != e1000_ce4100))
e1000_config_collision_dist(hw);
else {
ret_val = e1000_config_mac_to_phy(hw);
@@ -2738,7 +2851,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
{
u32 i;
u32 mdic = 0;
- const u32 phy_addr = 1;
+ const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
e_dbg("e1000_read_phy_reg_ex");
@@ -2752,28 +2865,61 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
- mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
- (phy_addr << E1000_MDIC_PHY_SHIFT) |
- (E1000_MDIC_OP_READ));
+ if (hw->mac_type == e1000_ce4100) {
+ mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (INTEL_CE_GBE_MDIC_OP_READ) |
+ (INTEL_CE_GBE_MDIC_GO));
- ew32(MDIC, mdic);
+ writel(mdic, E1000_MDIO_CMD);
- /* Poll the ready bit to see if the MDI read completed */
- for (i = 0; i < 64; i++) {
- udelay(50);
- mdic = er32(MDIC);
- if (mdic & E1000_MDIC_READY)
- break;
- }
- if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Read did not complete\n");
- return -E1000_ERR_PHY;
- }
- if (mdic & E1000_MDIC_ERROR) {
- e_dbg("MDI Error\n");
- return -E1000_ERR_PHY;
+ /* Poll the ready bit to see if the MDI read
+ * completed
+ */
+ for (i = 0; i < 64; i++) {
+ udelay(50);
+ mdic = readl(E1000_MDIO_CMD);
+ if (!(mdic & INTEL_CE_GBE_MDIC_GO))
+ break;
+ }
+
+ if (mdic & INTEL_CE_GBE_MDIC_GO) {
+ e_dbg("MDI Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+
+ mdic = readl(E1000_MDIO_STS);
+ if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) {
+ e_dbg("MDI Read Error\n");
+ return -E1000_ERR_PHY;
+ }
+ *phy_data = (u16) mdic;
+ } else {
+ mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_READ));
+
+ ew32(MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read
+ * completed
+ */
+ for (i = 0; i < 64; i++) {
+ udelay(50);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ e_dbg("MDI Error\n");
+ return -E1000_ERR_PHY;
+ }
+ *phy_data = (u16) mdic;
}
- *phy_data = (u16) mdic;
} else {
/* We must first send a preamble through the MDIO pin to signal the
* beginning of an MII instruction. This is done by sending 32
@@ -2840,7 +2986,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
{
u32 i;
u32 mdic = 0;
- const u32 phy_addr = 1;
+ const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
e_dbg("e1000_write_phy_reg_ex");
@@ -2850,27 +2996,54 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
}
if (hw->mac_type > e1000_82543) {
- /* Set up Op-code, Phy Address, register address, and data intended
- * for the PHY register in the MDI Control register. The MAC will take
- * care of interfacing with the PHY to send the desired data.
+ /* Set up Op-code, Phy Address, register address, and data
+ * intended for the PHY register in the MDI Control register.
+ * The MAC will take care of interfacing with the PHY to send
+ * the desired data.
*/
- mdic = (((u32) phy_data) |
- (reg_addr << E1000_MDIC_REG_SHIFT) |
- (phy_addr << E1000_MDIC_PHY_SHIFT) |
- (E1000_MDIC_OP_WRITE));
+ if (hw->mac_type == e1000_ce4100) {
+ mdic = (((u32) phy_data) |
+ (reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (INTEL_CE_GBE_MDIC_OP_WRITE) |
+ (INTEL_CE_GBE_MDIC_GO));
- ew32(MDIC, mdic);
+ writel(mdic, E1000_MDIO_CMD);
- /* Poll the ready bit to see if the MDI read completed */
- for (i = 0; i < 641; i++) {
- udelay(5);
- mdic = er32(MDIC);
- if (mdic & E1000_MDIC_READY)
- break;
- }
- if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Write did not complete\n");
- return -E1000_ERR_PHY;
+ /* Poll the ready bit to see if the MDI read
+ * completed
+ */
+ for (i = 0; i < 640; i++) {
+ udelay(5);
+ mdic = readl(E1000_MDIO_CMD);
+ if (!(mdic & INTEL_CE_GBE_MDIC_GO))
+ break;
+ }
+ if (mdic & INTEL_CE_GBE_MDIC_GO) {
+ e_dbg("MDI Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ } else {
+ mdic = (((u32) phy_data) |
+ (reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_WRITE));
+
+ ew32(MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read
+ * completed
+ */
+ for (i = 0; i < 641; i++) {
+ udelay(5);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ e_dbg("MDI Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
}
} else {
/* We'll need to use the SW defined pins to shift the write command
@@ -3048,6 +3221,12 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
if (hw->phy_id == M88E1011_I_PHY_ID)
match = true;
break;
+ case e1000_ce4100:
+ if ((hw->phy_id == RTL8211B_PHY_ID) ||
+ (hw->phy_id == RTL8201N_PHY_ID) ||
+ (hw->phy_id == M88E1118_E_PHY_ID))
+ match = true;
+ break;
case e1000_82541:
case e1000_82541_rev_2:
case e1000_82547:
@@ -3291,6 +3470,9 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
if (hw->phy_type == e1000_phy_igp)
return e1000_phy_igp_get_info(hw, phy_info);
+ else if ((hw->phy_type == e1000_phy_8211) ||
+ (hw->phy_type == e1000_phy_8201))
+ return E1000_SUCCESS;
else
return e1000_phy_m88_get_info(hw, phy_info);
}
@@ -3742,6 +3924,12 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e_dbg("e1000_read_eeprom");
+ if (hw->mac_type == e1000_ce4100) {
+ GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words,
+ data);
+ return E1000_SUCCESS;
+ }
+
/* If eeprom is not yet detected, do so now */
if (eeprom->word_size == 0)
e1000_init_eeprom_params(hw);
@@ -3904,6 +4092,12 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e_dbg("e1000_write_eeprom");
+ if (hw->mac_type == e1000_ce4100) {
+ GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words,
+ data);
+ return E1000_SUCCESS;
+ }
+
/* If eeprom is not yet detected, do so now */
if (eeprom->word_size == 0)
e1000_init_eeprom_params(hw);
@@ -4892,11 +5086,11 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
} else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
u16 cur_agc_value;
u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
- u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
- { IGP01E1000_PHY_AGC_A,
- IGP01E1000_PHY_AGC_B,
- IGP01E1000_PHY_AGC_C,
- IGP01E1000_PHY_AGC_D
+ static const u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {
+ IGP01E1000_PHY_AGC_A,
+ IGP01E1000_PHY_AGC_B,
+ IGP01E1000_PHY_AGC_C,
+ IGP01E1000_PHY_AGC_D
};
/* Read the AGC registers for all channels */
for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
@@ -5071,11 +5265,11 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
{
s32 ret_val;
u16 phy_data, phy_saved_data, speed, duplex, i;
- u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
- { IGP01E1000_PHY_AGC_PARAM_A,
- IGP01E1000_PHY_AGC_PARAM_B,
- IGP01E1000_PHY_AGC_PARAM_C,
- IGP01E1000_PHY_AGC_PARAM_D
+ static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {
+ IGP01E1000_PHY_AGC_PARAM_A,
+ IGP01E1000_PHY_AGC_PARAM_B,
+ IGP01E1000_PHY_AGC_PARAM_C,
+ IGP01E1000_PHY_AGC_PARAM_D
};
u16 min_length, max_length;
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index ecd9f6c6bcd5..c70b23d52284 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -41,7 +41,7 @@ struct e1000_hw;
struct e1000_hw_stats;
/* Enumerated types specific to the e1000 hardware */
-/* Media Access Controlers */
+/* Media Access Controllers */
typedef enum {
e1000_undefined = 0,
e1000_82542_rev2_0,
@@ -52,6 +52,7 @@ typedef enum {
e1000_82545,
e1000_82545_rev_3,
e1000_82546,
+ e1000_ce4100,
e1000_82546_rev_3,
e1000_82541,
e1000_82541_rev_2,
@@ -209,9 +210,11 @@ typedef enum {
} e1000_1000t_rx_status;
typedef enum {
- e1000_phy_m88 = 0,
- e1000_phy_igp,
- e1000_phy_undefined = 0xFF
+ e1000_phy_m88 = 0,
+ e1000_phy_igp,
+ e1000_phy_8211,
+ e1000_phy_8201,
+ e1000_phy_undefined = 0xFF
} e1000_phy_type;
typedef enum {
@@ -442,6 +445,7 @@ void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
#define E1000_DEV_ID_82547EI 0x1019
#define E1000_DEV_ID_82547EI_MOBILE 0x101A
#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
+#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E
#define NODE_ADDRESS_SIZE 6
#define ETH_LENGTH_OF_ADDRESS 6
@@ -808,6 +812,16 @@ struct e1000_ffvt_entry {
#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
#define E1000_FLA 0x0001C /* Flash Access - RW */
#define E1000_MDIC 0x00020 /* MDI Control - RW */
+
+extern void __iomem *ce4100_gbe_mdio_base_virt;
+#define INTEL_CE_GBE_MDIO_RCOMP_BASE (ce4100_gbe_mdio_base_virt)
+#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0)
+#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4)
+#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8)
+#define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC)
+#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20)
+#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24)
+
#define E1000_SCTL 0x00024 /* SerDes Control - RW */
#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */
#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
@@ -820,6 +834,34 @@ struct e1000_ffvt_entry {
#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
+
+/* Auxiliary Control Register. This register is CE4100 specific,
+ * RMII/RGMII function is switched by this register - RW
+ * Following are bits definitions of the Auxiliary Control Register
+ */
+#define E1000_CTL_AUX 0x000E0
+#define E1000_CTL_AUX_END_SEL_SHIFT 10
+#define E1000_CTL_AUX_ENDIANESS_SHIFT 8
+#define E1000_CTL_AUX_RGMII_RMII_SHIFT 0
+
+/* descriptor and packet transfer use CTL_AUX.ENDIANESS */
+#define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT)
+/* descriptor use CTL_AUX.ENDIANESS, packet use default */
+#define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT)
+/* descriptor use default, packet use CTL_AUX.ENDIANESS */
+#define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT)
+/* all use CTL_AUX.ENDIANESS */
+#define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT)
+
+#define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
+#define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
+
+/* LW little endian, Byte big endian */
+#define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+#define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+#define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+#define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+
#define E1000_RCTL 0x00100 /* RX Control - RW */
#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */
#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */
@@ -1011,6 +1053,7 @@ struct e1000_ffvt_entry {
* in more current versions of the 8254x. Despite the difference in location,
* the registers function in the same manner.
*/
+#define E1000_82542_CTL_AUX E1000_CTL_AUX
#define E1000_82542_CTRL E1000_CTRL
#define E1000_82542_CTRL_DUP E1000_CTRL_DUP
#define E1000_82542_STATUS E1000_STATUS
@@ -1571,6 +1614,11 @@ struct e1000_hw {
#define E1000_MDIC_INT_EN 0x20000000
#define E1000_MDIC_ERROR 0x40000000
+#define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000
+#define INTEL_CE_GBE_MDIC_OP_READ 0x00000000
+#define INTEL_CE_GBE_MDIC_GO 0x80000000
+#define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000
+
#define E1000_KUMCTRLSTA_MASK 0x0000FFFF
#define E1000_KUMCTRLSTA_OFFSET 0x001F0000
#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16
@@ -2869,8 +2917,14 @@ struct e1000_host_command_info {
#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
#define M88E1011_I_REV_4 0x04
#define M88E1111_I_PHY_ID 0x01410CC0
+#define M88E1118_E_PHY_ID 0x01410E40
#define L1LXT971A_PHY_ID 0x001378E0
+#define RTL8211B_PHY_ID 0x001CC910
+#define RTL8201N_PHY_ID 0x8200
+#define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */
+#define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */
+
/* Bits...
* 15-5: page
* 4-0: register offset
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 4d62f7bfa036..bfab14092d2c 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -28,6 +28,12 @@
#include "e1000.h"
#include <net/ip6_checksum.h>
+#include <linux/io.h>
+
+/* Intel Media SOC GbE MDIO physical base address */
+static unsigned long ce4100_gbe_mdio_base_phy;
+/* Intel Media SOC GbE MDIO virtual base address */
+void __iomem *ce4100_gbe_mdio_base_virt;
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
@@ -79,6 +85,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
INTEL_E1000_ETHERNET_DEVICE(0x108A),
INTEL_E1000_ETHERNET_DEVICE(0x1099),
INTEL_E1000_ETHERNET_DEVICE(0x10B5),
+ INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
/* required last entry */
{0,}
};
@@ -459,6 +466,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
case e1000_82545:
case e1000_82545_rev_3:
case e1000_82546:
+ case e1000_ce4100:
case e1000_82546_rev_3:
case e1000_82541:
case e1000_82541_rev_2:
@@ -573,6 +581,7 @@ void e1000_reset(struct e1000_adapter *adapter)
case e1000_82545:
case e1000_82545_rev_3:
case e1000_82546:
+ case e1000_ce4100:
case e1000_82546_rev_3:
pba = E1000_PBA_48K;
break;
@@ -894,6 +903,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
static int global_quad_port_a = 0; /* global ksp3 port a indication */
int i, err, pci_using_dac;
u16 eeprom_data = 0;
+ u16 tmp = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
int bars, need_ioport;
@@ -971,11 +981,13 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
*/
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
pci_using_dac = 1;
- } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
} else {
- pr_err("No usable DMA config, aborting\n");
- goto err_dma;
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ pr_err("No usable DMA config, aborting\n");
+ goto err_dma;
+ }
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
}
netdev->netdev_ops = &e1000_netdev_ops;
@@ -994,6 +1006,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
goto err_sw_init;
err = -EIO;
+ if (hw->mac_type == e1000_ce4100) {
+ ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1);
+ ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy,
+ pci_resource_len(pdev, BAR_1));
+
+ if (!ce4100_gbe_mdio_base_virt)
+ goto err_mdio_ioremap;
+ }
if (hw->mac_type >= e1000_82543) {
netdev->features = NETIF_F_SG |
@@ -1133,6 +1153,20 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->wol = adapter->eeprom_wol;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+ /* Auto detect PHY address */
+ if (hw->mac_type == e1000_ce4100) {
+ for (i = 0; i < 32; i++) {
+ hw->phy_addr = i;
+ e1000_read_phy_reg(hw, PHY_ID2, &tmp);
+ if (tmp == 0 || tmp == 0xFF) {
+ if (i == 31)
+ goto err_eeprom;
+ continue;
+ } else
+ break;
+ }
+ }
+
/* reset the hardware with the new settings */
e1000_reset(adapter);
@@ -1169,6 +1203,8 @@ err_eeprom:
kfree(adapter->rx_ring);
err_dma:
err_sw_init:
+err_mdio_ioremap:
+ iounmap(ce4100_gbe_mdio_base_virt);
iounmap(hw->hw_addr);
err_ioremap:
free_netdev(netdev);
@@ -1407,6 +1443,7 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
/* First rev 82545 and 82546 need to not allow any memory
* write location to cross 64k boundary due to errata 23 */
if (hw->mac_type == e1000_82545 ||
+ hw->mac_type == e1000_ce4100 ||
hw->mac_type == e1000_82546) {
return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
}
@@ -1429,13 +1466,12 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
int size;
size = sizeof(struct e1000_buffer) * txdr->count;
- txdr->buffer_info = vmalloc(size);
+ txdr->buffer_info = vzalloc(size);
if (!txdr->buffer_info) {
e_err(probe, "Unable to allocate memory for the Tx descriptor "
"ring\n");
return -ENOMEM;
}
- memset(txdr->buffer_info, 0, size);
/* round up to nearest 4K */
@@ -1625,13 +1661,12 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
int size, desc_len;
size = sizeof(struct e1000_buffer) * rxdr->count;
- rxdr->buffer_info = vmalloc(size);
+ rxdr->buffer_info = vzalloc(size);
if (!rxdr->buffer_info) {
e_err(probe, "Unable to allocate memory for the Rx descriptor "
"ring\n");
return -ENOMEM;
}
- memset(rxdr->buffer_info, 0, size);
desc_len = sizeof(struct e1000_rx_desc);
@@ -2198,7 +2233,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
* addresses take precedence to avoid disabling unicast filtering
* when possible.
*
- * RAR 0 is used for the station MAC adddress
+ * RAR 0 is used for the station MAC address
* if there are not 14 addresses, go ahead and clear the filters
*/
i = 1;
@@ -2726,7 +2761,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
break;
}
- css = skb_transport_offset(skb);
+ css = skb_checksum_start_offset(skb);
i = tx_ring->next_to_use;
buffer_info = &tx_ring->buffer_info[i];
@@ -3443,9 +3478,17 @@ static irqreturn_t e1000_intr(int irq, void *data)
struct e1000_hw *hw = &adapter->hw;
u32 icr = er32(ICR);
- if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags)))
+ if (unlikely((!icr)))
return IRQ_NONE; /* Not our interrupt */
+ /*
+ * we might have caused the interrupt, but the above
+ * read cleared it, and just in case the driver is
+ * down there is nothing to do so return handled
+ */
+ if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
+ return IRQ_HANDLED;
+
if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
hw->get_link_status = 1;
/* guard against interrupt when we're going down */
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index edd1c75aa895..33e7c45a4fe4 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -34,12 +34,22 @@
#ifndef _E1000_OSDEP_H_
#define _E1000_OSDEP_H_
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
#include <asm/io.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
+
+#define CONFIG_RAM_BASE 0x60000
+#define GBE_CONFIG_OFFSET 0x0
+
+#define GBE_CONFIG_RAM_BASE \
+ ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
+
+#define GBE_CONFIG_BASE_VIRT \
+ ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE))
+
+#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
+ (iowrite16_rep(base + offset, data, count))
+
+#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \
+ (ioread16_rep(base + (offset << 1), data, count))
#define er32(reg) \
(readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 10d8d98bb797..1301eba8b57a 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -352,12 +352,13 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
}
{ /* Flow Control */
- struct e1000_opt_list fc_list[] =
- {{ E1000_FC_NONE, "Flow Control Disabled" },
- { E1000_FC_RX_PAUSE,"Flow Control Receive Only" },
- { E1000_FC_TX_PAUSE,"Flow Control Transmit Only" },
- { E1000_FC_FULL, "Flow Control Enabled" },
- { E1000_FC_DEFAULT, "Flow Control Hardware Default" }};
+ static const struct e1000_opt_list fc_list[] = {
+ { E1000_FC_NONE, "Flow Control Disabled" },
+ { E1000_FC_RX_PAUSE, "Flow Control Receive Only" },
+ { E1000_FC_TX_PAUSE, "Flow Control Transmit Only" },
+ { E1000_FC_FULL, "Flow Control Enabled" },
+ { E1000_FC_DEFAULT, "Flow Control Hardware Default" }
+ };
opt = (struct e1000_option) {
.type = list_option,
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 7236f1a53ba0..89a69035e538 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -52,6 +52,7 @@
(ID_LED_DEF1_DEF2))
#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */
#define E1000_BASE1000T_STATUS 10
#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
#define E1000_RECEIVE_ERROR_COUNTER 21
@@ -74,6 +75,11 @@ static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
static s32 e1000_led_on_82574(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
+static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
+static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active);
+static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active);
/**
* e1000_init_phy_params_82571 - Init PHY func ptrs.
@@ -107,6 +113,10 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
case e1000_82574:
case e1000_82583:
phy->type = e1000_phy_bm;
+ phy->ops.acquire = e1000_get_hw_semaphore_82574;
+ phy->ops.release = e1000_put_hw_semaphore_82574;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574;
break;
default:
return -E1000_ERR_PHY;
@@ -115,29 +125,36 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
/* This can only be done after all function pointers are setup. */
ret_val = e1000_get_phy_id_82571(hw);
+ if (ret_val) {
+ e_dbg("Error getting PHY ID\n");
+ return ret_val;
+ }
/* Verify phy id */
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
if (phy->id != IGP01E1000_I_PHY_ID)
- return -E1000_ERR_PHY;
+ ret_val = -E1000_ERR_PHY;
break;
case e1000_82573:
if (phy->id != M88E1111_I_PHY_ID)
- return -E1000_ERR_PHY;
+ ret_val = -E1000_ERR_PHY;
break;
case e1000_82574:
case e1000_82583:
if (phy->id != BME1000_E_PHY_ID_R2)
- return -E1000_ERR_PHY;
+ ret_val = -E1000_ERR_PHY;
break;
default:
- return -E1000_ERR_PHY;
+ ret_val = -E1000_ERR_PHY;
break;
}
- return 0;
+ if (ret_val)
+ e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id);
+
+ return ret_val;
}
/**
@@ -200,6 +217,17 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
break;
}
+ /* Function Pointers */
+ switch (hw->mac.type) {
+ case e1000_82574:
+ case e1000_82583:
+ nvm->ops.acquire = e1000_get_hw_semaphore_82574;
+ nvm->ops.release = e1000_put_hw_semaphore_82574;
+ break;
+ default:
+ break;
+ }
+
return 0;
}
@@ -300,7 +328,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
/*
* Ensure that the inter-port SWSM.SMBI lock bit is clear before
- * first NVM or PHY acess. This should be done for single-port
+ * first NVM or PHY access. This should be done for single-port
* devices, and for one port only on dual-port devices so that
* for those devices we can still use the SMBI lock to synchronize
* inter-port accesses to the PHY & NVM.
@@ -542,6 +570,146 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
ew32(SWSM, swsm);
}
+/**
+ * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore during reset.
+ *
+ **/
+static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl;
+ s32 ret_val = 0;
+ s32 i = 0;
+
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+ do {
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
+ break;
+
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+ msleep(2);
+ i++;
+ } while (i < MDIO_OWNERSHIP_TIMEOUT);
+
+ if (i == MDIO_OWNERSHIP_TIMEOUT) {
+ /* Release semaphores */
+ e1000_put_hw_semaphore_82573(hw);
+ e_dbg("Driver can't access the PHY\n");
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_put_hw_semaphore_82573 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used during reset.
+ *
+ **/
+static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw)
+{
+ u32 extcnf_ctrl;
+
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+}
+
+static DEFINE_MUTEX(swflag_mutex);
+
+/**
+ * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM.
+ *
+ **/
+static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ mutex_lock(&swflag_mutex);
+ ret_val = e1000_get_hw_semaphore_82573(hw);
+ if (ret_val)
+ mutex_unlock(&swflag_mutex);
+ return ret_val;
+}
+
+/**
+ * e1000_put_hw_semaphore_82574 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ *
+ **/
+static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
+{
+ e1000_put_hw_semaphore_82573(hw);
+ mutex_unlock(&swflag_mutex);
+}
+
+/**
+ * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state
+ * @hw: pointer to the HW structure
+ * @active: true to enable LPLU, false to disable
+ *
+ * Sets the LPLU D0 state according to the active flag.
+ * LPLU will not be activated unless the
+ * device autonegotiation advertisement meets standards of
+ * either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+ u16 data = er32(POEMB);
+
+ if (active)
+ data |= E1000_PHY_CTRL_D0A_LPLU;
+ else
+ data &= ~E1000_PHY_CTRL_D0A_LPLU;
+
+ ew32(POEMB, data);
+ return 0;
+}
+
+/**
+ * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * when active is true, else clear lplu for D3. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained.
+ **/
+static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+ u16 data = er32(POEMB);
+
+ if (!active) {
+ data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+ } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+ (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) ||
+ (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) {
+ data |= E1000_PHY_CTRL_NOND0A_LPLU;
+ }
+
+ ew32(POEMB, data);
+ return 0;
+}
/**
* e1000_acquire_nvm_82571 - Request for access to the EEPROM
@@ -562,8 +730,6 @@ static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82573:
- case e1000_82574:
- case e1000_82583:
break;
default:
ret_val = e1000e_acquire_nvm(hw);
@@ -853,9 +1019,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
**/
static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
{
- u32 ctrl, extcnf_ctrl, ctrl_ext, icr;
+ u32 ctrl, ctrl_ext;
s32 ret_val;
- u16 i = 0;
/*
* Prevent the PCI-E bus from sticking if there is no TLP connection
@@ -880,33 +1045,33 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
*/
switch (hw->mac.type) {
case e1000_82573:
+ ret_val = e1000_get_hw_semaphore_82573(hw);
+ break;
case e1000_82574:
case e1000_82583:
- extcnf_ctrl = er32(EXTCNF_CTRL);
- extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
-
- do {
- ew32(EXTCNF_CTRL, extcnf_ctrl);
- extcnf_ctrl = er32(EXTCNF_CTRL);
-
- if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
- break;
-
- extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
-
- msleep(2);
- i++;
- } while (i < MDIO_OWNERSHIP_TIMEOUT);
+ ret_val = e1000_get_hw_semaphore_82574(hw);
break;
default:
break;
}
+ if (ret_val)
+ e_dbg("Cannot acquire MDIO ownership\n");
ctrl = er32(CTRL);
e_dbg("Issuing a global reset to MAC\n");
ew32(CTRL, ctrl | E1000_CTRL_RST);
+ /* Must release MDIO ownership and mutex after MAC reset. */
+ switch (hw->mac.type) {
+ case e1000_82574:
+ case e1000_82583:
+ e1000_put_hw_semaphore_82574(hw);
+ break;
+ default:
+ break;
+ }
+
if (hw->nvm.type == e1000_nvm_flash_hw) {
udelay(10);
ctrl_ext = er32(CTRL_EXT);
@@ -938,7 +1103,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
/* Clear any pending interrupt events. */
ew32(IMC, 0xffffffff);
- icr = er32(ICR);
+ er32(ICR);
if (hw->mac.type == e1000_82571) {
/* Install any alternate MAC address into RAR0 */
@@ -1145,7 +1310,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
* apply workaround for hardware errata documented in errata
* docs Fixes issue where some error prone or unreliable PCIe
* completions are occurring, particularly with ASPM enabled.
- * Without fix, issue can cause tx timeouts.
+ * Without fix, issue can cause Tx timeouts.
*/
reg = er32(GCR2);
reg |= 1;
@@ -1402,6 +1567,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
u32 rxcw;
u32 ctrl;
u32 status;
+ u32 txcw;
+ u32 i;
s32 ret_val = 0;
ctrl = er32(CTRL);
@@ -1422,8 +1589,10 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
e1000_serdes_link_autoneg_progress;
mac->serdes_has_link = false;
e_dbg("AN_UP -> AN_PROG\n");
+ } else {
+ mac->serdes_has_link = true;
}
- break;
+ break;
case e1000_serdes_link_forced_up:
/*
@@ -1431,8 +1600,10 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
* auto-negotiation in the TXCW register and disable
* forced link in the Device Control register in an
* attempt to auto-negotiate with our link partner.
+ * If the partner code word is null, stop forcing
+ * and restart auto negotiation.
*/
- if (rxcw & E1000_RXCW_C) {
+ if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
/* Enable autoneg, and unforce link up */
ew32(TXCW, mac->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
@@ -1440,6 +1611,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
e1000_serdes_link_autoneg_progress;
mac->serdes_has_link = false;
e_dbg("FORCED_UP -> AN_PROG\n");
+ } else {
+ mac->serdes_has_link = true;
}
break;
@@ -1495,6 +1668,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
mac->serdes_link_state =
e1000_serdes_link_autoneg_progress;
+ mac->serdes_has_link = false;
e_dbg("DOWN -> AN_PROG\n");
break;
}
@@ -1505,16 +1679,32 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
e_dbg("ANYSTATE -> DOWN\n");
} else {
/*
- * We have sync, and can tolerate one invalid (IV)
- * codeword before declaring link down, so reread
- * to look again.
+ * Check several times, if Sync and Config
+ * both are consistently 1 then simply ignore
+ * the Invalid bit and restart Autoneg
*/
- udelay(10);
- rxcw = er32(RXCW);
- if (rxcw & E1000_RXCW_IV) {
- mac->serdes_link_state = e1000_serdes_link_down;
+ for (i = 0; i < AN_RETRY_COUNT; i++) {
+ udelay(10);
+ rxcw = er32(RXCW);
+ if ((rxcw & E1000_RXCW_IV) &&
+ !((rxcw & E1000_RXCW_SYNCH) &&
+ (rxcw & E1000_RXCW_C))) {
+ mac->serdes_has_link = false;
+ mac->serdes_link_state =
+ e1000_serdes_link_down;
+ e_dbg("ANYSTATE -> DOWN\n");
+ break;
+ }
+ }
+
+ if (i == AN_RETRY_COUNT) {
+ txcw = er32(TXCW);
+ txcw |= E1000_TXCW_ANE;
+ ew32(TXCW, txcw);
+ mac->serdes_link_state =
+ e1000_serdes_link_autoneg_progress;
mac->serdes_has_link = false;
- e_dbg("ANYSTATE -> DOWN\n");
+ e_dbg("ANYSTATE -> AN_PROG\n");
}
}
}
@@ -1897,7 +2087,7 @@ struct e1000_info e1000_82574_info = {
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
.flags2 = FLAG2_CHECK_PHY_HANG,
- .pba = 36,
+ .pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
@@ -1914,7 +2104,7 @@ struct e1000_info e1000_82583_info = {
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
- .pba = 36,
+ .pba = 32,
.max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
diff --git a/drivers/net/e1000e/Makefile b/drivers/net/e1000e/Makefile
index 360c91369f35..28519acacd2d 100644
--- a/drivers/net/e1000e/Makefile
+++ b/drivers/net/e1000e/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel PRO/1000 Linux driver
-# Copyright(c) 1999 - 2008 Intel Corporation.
+# Copyright(c) 1999 - 2011 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index d3f7a9c3f973..c516a7440bec 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -86,6 +86,7 @@
#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
+#define E1000_CTRL_EXT_LSECCK 0x00001000
#define E1000_CTRL_EXT_PHYPDEN 0x00100000
/* Receive Descriptor bit definitions */
@@ -488,6 +489,9 @@
#define E1000_BLK_PHY_RESET 12
#define E1000_ERR_SWFW_SYNC 13
#define E1000_NOT_IMPLEMENTED 14
+#define E1000_ERR_INVALID_ARGUMENT 16
+#define E1000_ERR_NO_SPACE 17
+#define E1000_ERR_NVM_PBA_SECTION 18
/* Loop limit on how long we wait for auto-negotiation to complete */
#define FIBER_LINK_UP_LIMIT 50
@@ -516,6 +520,7 @@
#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
/* Receive Configuration Word */
+#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
#define E1000_RXCW_C 0x20000000 /* Receive config */
#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
@@ -649,13 +654,16 @@
/* Mask bits for fields in Word 0x03 of the EEPROM */
#define NVM_COMPAT_LOM 0x0800
+/* length of string needed to store PBA number */
+#define E1000_PBANUM_LENGTH 11
+
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
#define NVM_SUM 0xBABA
/* PBA (printed board assembly) number words */
#define NVM_PBA_OFFSET_0 8
#define NVM_PBA_OFFSET_1 9
-
+#define NVM_PBA_PTR_GUARD 0xFAFA
#define NVM_WORD_SIZE_BASE_SHIFT 6
/* NVM Commands - SPI */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index fdc67fead4ea..00bf595ebd67 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -38,6 +38,7 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
+#include <linux/crc32.h>
#include "hw.h"
@@ -363,6 +364,7 @@ struct e1000_adapter {
/* structs defined in e1000_hw.h */
struct e1000_hw hw;
+ spinlock_t stats64_lock;
struct e1000_hw_stats stats;
struct e1000_phy_info phy_info;
struct e1000_phy_stats phy_stats;
@@ -482,6 +484,7 @@ extern const char e1000e_driver_version[];
extern void e1000e_check_options(struct e1000_adapter *adapter);
extern void e1000e_set_ethtool_ops(struct net_device *netdev);
+extern void e1000e_led_blink_task(struct work_struct *work);
extern int e1000e_up(struct e1000_adapter *adapter);
extern void e1000e_down(struct e1000_adapter *adapter);
@@ -492,9 +495,13 @@ extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter);
extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
-extern void e1000e_update_stats(struct e1000_adapter *adapter);
+extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64
+ *stats);
extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
+extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
extern unsigned int copybreak;
@@ -513,7 +520,8 @@ extern struct e1000_info e1000_pch_info;
extern struct e1000_info e1000_pch2_info;
extern struct e1000_info e1000_es2_info;
-extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num);
+extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
extern s32 e1000e_commit_phy(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 24f8ac9cf703..2fefa820302b 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -100,8 +100,8 @@
* with a lower bound at "index" and the upper bound at
* "index + 5".
*/
-static const u16 e1000_gg82563_cable_length_table[] =
- { 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF };
+static const u16 e1000_gg82563_cable_length_table[] = {
+ 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF };
#define GG82563_CABLE_LENGTH_TABLE_SIZE \
ARRAY_SIZE(e1000_gg82563_cable_length_table)
@@ -426,8 +426,8 @@ static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
- while (e1000e_get_hw_semaphore(hw) != 0);
- /* Empty */
+ while (e1000e_get_hw_semaphore(hw) != 0)
+ ; /* Empty */
swfw_sync = er32(SW_FW_SYNC);
swfw_sync &= ~mask;
@@ -784,7 +784,7 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
**/
static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
{
- u32 ctrl, icr;
+ u32 ctrl;
s32 ret_val;
/*
@@ -818,7 +818,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
/* Clear any pending interrupt events. */
ew32(IMC, 0xffffffff);
- icr = er32(ICR);
+ er32(ICR);
ret_val = e1000_check_alt_mac_addr_generic(hw);
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 8984d165a39b..07f09e96e453 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -45,63 +45,67 @@ struct e1000_stats {
int stat_offset;
};
-#define E1000_STAT(m) E1000_STATS, \
- sizeof(((struct e1000_adapter *)0)->m), \
- offsetof(struct e1000_adapter, m)
-#define E1000_NETDEV_STAT(m) NETDEV_STATS, \
- sizeof(((struct net_device *)0)->m), \
- offsetof(struct net_device, m)
+#define E1000_STAT(str, m) { \
+ .stat_string = str, \
+ .type = E1000_STATS, \
+ .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
+ .stat_offset = offsetof(struct e1000_adapter, m) }
+#define E1000_NETDEV_STAT(str, m) { \
+ .stat_string = str, \
+ .type = NETDEV_STATS, \
+ .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \
+ .stat_offset = offsetof(struct rtnl_link_stats64, m) }
static const struct e1000_stats e1000_gstrings_stats[] = {
- { "rx_packets", E1000_STAT(stats.gprc) },
- { "tx_packets", E1000_STAT(stats.gptc) },
- { "rx_bytes", E1000_STAT(stats.gorc) },
- { "tx_bytes", E1000_STAT(stats.gotc) },
- { "rx_broadcast", E1000_STAT(stats.bprc) },
- { "tx_broadcast", E1000_STAT(stats.bptc) },
- { "rx_multicast", E1000_STAT(stats.mprc) },
- { "tx_multicast", E1000_STAT(stats.mptc) },
- { "rx_errors", E1000_NETDEV_STAT(stats.rx_errors) },
- { "tx_errors", E1000_NETDEV_STAT(stats.tx_errors) },
- { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) },
- { "multicast", E1000_STAT(stats.mprc) },
- { "collisions", E1000_STAT(stats.colc) },
- { "rx_length_errors", E1000_NETDEV_STAT(stats.rx_length_errors) },
- { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) },
- { "rx_crc_errors", E1000_STAT(stats.crcerrs) },
- { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) },
- { "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
- { "rx_missed_errors", E1000_STAT(stats.mpc) },
- { "tx_aborted_errors", E1000_STAT(stats.ecol) },
- { "tx_carrier_errors", E1000_STAT(stats.tncrs) },
- { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) },
- { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) },
- { "tx_window_errors", E1000_STAT(stats.latecol) },
- { "tx_abort_late_coll", E1000_STAT(stats.latecol) },
- { "tx_deferred_ok", E1000_STAT(stats.dc) },
- { "tx_single_coll_ok", E1000_STAT(stats.scc) },
- { "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
- { "tx_timeout_count", E1000_STAT(tx_timeout_count) },
- { "tx_restart_queue", E1000_STAT(restart_queue) },
- { "rx_long_length_errors", E1000_STAT(stats.roc) },
- { "rx_short_length_errors", E1000_STAT(stats.ruc) },
- { "rx_align_errors", E1000_STAT(stats.algnerrc) },
- { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) },
- { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) },
- { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) },
- { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) },
- { "tx_flow_control_xon", E1000_STAT(stats.xontxc) },
- { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) },
- { "rx_long_byte_count", E1000_STAT(stats.gorc) },
- { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
- { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
- { "rx_header_split", E1000_STAT(rx_hdr_split) },
- { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
- { "tx_smbus", E1000_STAT(stats.mgptc) },
- { "rx_smbus", E1000_STAT(stats.mgprc) },
- { "dropped_smbus", E1000_STAT(stats.mgpdc) },
- { "rx_dma_failed", E1000_STAT(rx_dma_failed) },
- { "tx_dma_failed", E1000_STAT(tx_dma_failed) },
+ E1000_STAT("rx_packets", stats.gprc),
+ E1000_STAT("tx_packets", stats.gptc),
+ E1000_STAT("rx_bytes", stats.gorc),
+ E1000_STAT("tx_bytes", stats.gotc),
+ E1000_STAT("rx_broadcast", stats.bprc),
+ E1000_STAT("tx_broadcast", stats.bptc),
+ E1000_STAT("rx_multicast", stats.mprc),
+ E1000_STAT("tx_multicast", stats.mptc),
+ E1000_NETDEV_STAT("rx_errors", rx_errors),
+ E1000_NETDEV_STAT("tx_errors", tx_errors),
+ E1000_NETDEV_STAT("tx_dropped", tx_dropped),
+ E1000_STAT("multicast", stats.mprc),
+ E1000_STAT("collisions", stats.colc),
+ E1000_NETDEV_STAT("rx_length_errors", rx_length_errors),
+ E1000_NETDEV_STAT("rx_over_errors", rx_over_errors),
+ E1000_STAT("rx_crc_errors", stats.crcerrs),
+ E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
+ E1000_STAT("rx_no_buffer_count", stats.rnbc),
+ E1000_STAT("rx_missed_errors", stats.mpc),
+ E1000_STAT("tx_aborted_errors", stats.ecol),
+ E1000_STAT("tx_carrier_errors", stats.tncrs),
+ E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors),
+ E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors),
+ E1000_STAT("tx_window_errors", stats.latecol),
+ E1000_STAT("tx_abort_late_coll", stats.latecol),
+ E1000_STAT("tx_deferred_ok", stats.dc),
+ E1000_STAT("tx_single_coll_ok", stats.scc),
+ E1000_STAT("tx_multi_coll_ok", stats.mcc),
+ E1000_STAT("tx_timeout_count", tx_timeout_count),
+ E1000_STAT("tx_restart_queue", restart_queue),
+ E1000_STAT("rx_long_length_errors", stats.roc),
+ E1000_STAT("rx_short_length_errors", stats.ruc),
+ E1000_STAT("rx_align_errors", stats.algnerrc),
+ E1000_STAT("tx_tcp_seg_good", stats.tsctc),
+ E1000_STAT("tx_tcp_seg_failed", stats.tsctfc),
+ E1000_STAT("rx_flow_control_xon", stats.xonrxc),
+ E1000_STAT("rx_flow_control_xoff", stats.xoffrxc),
+ E1000_STAT("tx_flow_control_xon", stats.xontxc),
+ E1000_STAT("tx_flow_control_xoff", stats.xofftxc),
+ E1000_STAT("rx_long_byte_count", stats.gorc),
+ E1000_STAT("rx_csum_offload_good", hw_csum_good),
+ E1000_STAT("rx_csum_offload_errors", hw_csum_err),
+ E1000_STAT("rx_header_split", rx_hdr_split),
+ E1000_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
+ E1000_STAT("tx_smbus", stats.mgptc),
+ E1000_STAT("rx_smbus", stats.mgprc),
+ E1000_STAT("dropped_smbus", stats.mgpdc),
+ E1000_STAT("rx_dma_failed", rx_dma_failed),
+ E1000_STAT("tx_dma_failed", tx_dma_failed),
};
#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
@@ -194,20 +198,6 @@ static int e1000_get_settings(struct net_device *netdev,
return 0;
}
-static u32 e1000_get_link(struct net_device *netdev)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
-
- /*
- * Avoid touching hardware registers when possible, otherwise
- * link negotiation can get messed up when user-level scripts
- * are rapidly polling the driver to see if link is up.
- */
- return netif_running(netdev) ? netif_carrier_ok(netdev) :
- !!(er32(STATUS) & E1000_STATUS_LU);
-}
-
static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
{
struct e1000_mac_info *mac = &adapter->hw.mac;
@@ -443,13 +433,11 @@ static void e1000_get_regs(struct net_device *netdev,
struct e1000_hw *hw = &adapter->hw;
u32 *regs_buff = p;
u16 phy_data;
- u8 revision_id;
memset(p, 0, E1000_REGS_LEN * sizeof(u32));
- pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id);
-
- regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device;
+ regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
+ adapter->pdev->device;
regs_buff[0] = er32(CTRL);
regs_buff[1] = er32(STATUS);
@@ -634,20 +622,24 @@ static void e1000_get_drvinfo(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
char firmware_version[32];
- strncpy(drvinfo->driver, e1000e_driver_name, 32);
- strncpy(drvinfo->version, e1000e_driver_version, 32);
+ strncpy(drvinfo->driver, e1000e_driver_name,
+ sizeof(drvinfo->driver) - 1);
+ strncpy(drvinfo->version, e1000e_driver_version,
+ sizeof(drvinfo->version) - 1);
/*
* EEPROM image version # is reported as firmware version # for
* PCI-E controllers
*/
- sprintf(firmware_version, "%d.%d-%d",
+ snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
(adapter->eeprom_vers & 0xF000) >> 12,
(adapter->eeprom_vers & 0x0FF0) >> 4,
(adapter->eeprom_vers & 0x000F));
- strncpy(drvinfo->fw_version, firmware_version, 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strncpy(drvinfo->fw_version, firmware_version,
+ sizeof(drvinfo->fw_version) - 1);
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info) - 1);
drvinfo->regdump_len = e1000_get_regs_len(netdev);
drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
}
@@ -690,20 +682,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
rx_old = adapter->rx_ring;
err = -ENOMEM;
- tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+ tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL);
if (!tx_ring)
goto err_alloc_tx;
- /*
- * use a memcpy to save any previously configured
- * items like napi structs from having to be
- * reinitialized
- */
- memcpy(tx_ring, tx_old, sizeof(struct e1000_ring));
- rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+ rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL);
if (!rx_ring)
goto err_alloc_rx;
- memcpy(rx_ring, rx_old, sizeof(struct e1000_ring));
adapter->tx_ring = tx_ring;
adapter->rx_ring = rx_ring;
@@ -763,8 +748,8 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
int reg, int offset, u32 mask, u32 write)
{
u32 pat, val;
- static const u32 test[] =
- {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ static const u32 test[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
(test[pat] & write));
@@ -1261,8 +1246,8 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl_reg = 0;
- u32 stat_reg = 0;
u16 phy_reg = 0;
+ s32 ret_val = 0;
hw->mac.autoneg = 0;
@@ -1322,7 +1307,13 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
case e1000_phy_82577:
case e1000_phy_82578:
/* Workaround: K1 must be disabled for stable 1Gbps operation */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val) {
+ e_err("Cannot setup 1Gbps loopback.\n");
+ return ret_val;
+ }
e1000_configure_k1_ich8lan(hw, false);
+ hw->phy.ops.release(hw);
break;
case e1000_phy_82579:
/* Disable PHY energy detect power down */
@@ -1362,8 +1353,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
* Set the ILOS bit on the fiber Nic if half duplex link is
* detected.
*/
- stat_reg = er32(STATUS);
- if ((stat_reg & E1000_STATUS_FD) == 0)
+ if ((er32(STATUS) & E1000_STATUS_FD) == 0)
ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
}
@@ -1676,10 +1666,13 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
} else {
hw->mac.ops.check_for_link(hw);
if (hw->mac.autoneg)
- msleep(4000);
+ /*
+ * On some Phy/switch combinations, link establishment
+ * can take a few seconds more than expected.
+ */
+ msleep(5000);
- if (!(er32(STATUS) &
- E1000_STATUS_LU))
+ if (!(er32(STATUS) & E1000_STATUS_LU))
*data = 1;
}
return *data;
@@ -1707,6 +1700,19 @@ static void e1000_diag_test(struct net_device *netdev,
bool if_running = netif_running(netdev);
set_bit(__E1000_TESTING, &adapter->state);
+
+ if (!if_running) {
+ /* Get control of and reset hardware */
+ if (adapter->flags & FLAG_HAS_AMT)
+ e1000e_get_hw_control(adapter);
+
+ e1000e_power_up_phy(adapter);
+
+ adapter->hw.phy.autoneg_wait_to_complete = 1;
+ e1000e_reset(adapter);
+ adapter->hw.phy.autoneg_wait_to_complete = 0;
+ }
+
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
@@ -1720,8 +1726,6 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
/* indicate we're in test mode */
dev_close(netdev);
- else
- e1000e_reset(adapter);
if (e1000_reg_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1735,8 +1739,6 @@ static void e1000_diag_test(struct net_device *netdev,
eth_test->flags |= ETH_TEST_FL_FAILED;
e1000e_reset(adapter);
- /* make sure the phy is powered up */
- e1000e_power_up_phy(adapter);
if (e1000_loopback_test(adapter, &data[3]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1758,28 +1760,29 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
dev_open(netdev);
} else {
- if (!if_running && (adapter->flags & FLAG_HAS_AMT)) {
- clear_bit(__E1000_TESTING, &adapter->state);
- dev_open(netdev);
- set_bit(__E1000_TESTING, &adapter->state);
- }
+ /* Online tests */
e_info("online testing starting\n");
- /* Online tests */
- if (e1000_link_test(adapter, &data[4]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
- /* Online tests aren't run; pass by default */
+ /* register, eeprom, intr and loopback tests not run online */
data[0] = 0;
data[1] = 0;
data[2] = 0;
data[3] = 0;
- if (!if_running && (adapter->flags & FLAG_HAS_AMT))
- dev_close(netdev);
+ if (e1000_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__E1000_TESTING, &adapter->state);
}
+
+ if (!if_running) {
+ e1000e_reset(adapter);
+
+ if (adapter->flags & FLAG_HAS_AMT)
+ e1000e_release_hw_control(adapter);
+ }
+
msleep_interruptible(4 * 1000);
}
@@ -1796,8 +1799,7 @@ static void e1000_get_wol(struct net_device *netdev,
return;
wol->supported = WAKE_UCAST | WAKE_MCAST |
- WAKE_BCAST | WAKE_MAGIC |
- WAKE_PHY | WAKE_ARP;
+ WAKE_BCAST | WAKE_MAGIC | WAKE_PHY;
/* apply any specific unsupported masks here */
if (adapter->flags & FLAG_NO_WAKE_UCAST) {
@@ -1818,19 +1820,16 @@ static void e1000_get_wol(struct net_device *netdev,
wol->wolopts |= WAKE_MAGIC;
if (adapter->wol & E1000_WUFC_LNKC)
wol->wolopts |= WAKE_PHY;
- if (adapter->wol & E1000_WUFC_ARP)
- wol->wolopts |= WAKE_ARP;
}
-static int e1000_set_wol(struct net_device *netdev,
- struct ethtool_wolinfo *wol)
+static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
if (!(adapter->flags & FLAG_HAS_WOL) ||
!device_can_wakeup(&adapter->pdev->dev) ||
(wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
- WAKE_MAGIC | WAKE_PHY | WAKE_ARP)))
+ WAKE_MAGIC | WAKE_PHY)))
return -EOPNOTSUPP;
/* these settings will always override what we currently have */
@@ -1846,8 +1845,6 @@ static int e1000_set_wol(struct net_device *netdev,
adapter->wol |= E1000_WUFC_MAG;
if (wol->wolopts & WAKE_PHY)
adapter->wol |= E1000_WUFC_LNKC;
- if (wol->wolopts & WAKE_ARP)
- adapter->wol |= E1000_WUFC_ARP;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
@@ -1860,7 +1857,7 @@ static int e1000_set_wol(struct net_device *netdev,
/* bit defines for adapter->led_status */
#define E1000_LED_ON 0
-static void e1000e_led_blink_task(struct work_struct *work)
+void e1000e_led_blink_task(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter, led_blink_task);
@@ -1892,7 +1889,6 @@ static int e1000_phys_id(struct net_device *netdev, u32 data)
(hw->mac.type == e1000_pch2lan) ||
(hw->mac.type == e1000_82583) ||
(hw->mac.type == e1000_82574)) {
- INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task);
if (!adapter->blink_timer.function) {
init_timer(&adapter->blink_timer);
adapter->blink_timer.function =
@@ -1962,8 +1958,15 @@ static int e1000_set_coalesce(struct net_device *netdev,
static int e1000_nway_reset(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- if (netif_running(netdev))
- e1000e_reinit_locked(adapter);
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (!adapter->hw.mac.autoneg)
+ return -EINVAL;
+
+ e1000e_reinit_locked(adapter);
+
return 0;
}
@@ -1972,20 +1975,24 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
u64 *data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct rtnl_link_stats64 net_stats;
int i;
char *p = NULL;
- e1000e_update_stats(adapter);
+ e1000e_get_stats64(netdev, &net_stats);
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
switch (e1000_gstrings_stats[i].type) {
case NETDEV_STATS:
- p = (char *) netdev +
+ p = (char *) &net_stats +
e1000_gstrings_stats[i].stat_offset;
break;
case E1000_STATS:
p = (char *) adapter +
e1000_gstrings_stats[i].stat_offset;
break;
+ default:
+ data[i] = 0;
+ continue;
}
data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
@@ -2001,7 +2008,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
switch (stringset) {
case ETH_SS_TEST:
- memcpy(data, *e1000_gstrings_test, sizeof(e1000_gstrings_test));
+ memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test));
break;
case ETH_SS_STATS:
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
@@ -2024,7 +2031,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_msglevel = e1000_get_msglevel,
.set_msglevel = e1000_set_msglevel,
.nway_reset = e1000_nway_reset,
- .get_link = e1000_get_link,
+ .get_link = ethtool_op_get_link,
.get_eeprom_len = e1000_get_eeprom_len,
.get_eeprom = e1000_get_eeprom,
.set_eeprom = e1000_set_eeprom,
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index ba302a5c2c30..307e1ec22417 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -83,6 +83,7 @@ enum e1e_registers {
E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */
E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */
E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */
+#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
E1000_PBS = 0x01008, /* Packet Buffer Size */
E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
@@ -101,7 +102,7 @@ enum e1e_registers {
E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */
E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8))
- E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */
+ E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
/* Convenience macros
*
@@ -811,9 +812,8 @@ struct e1000_nvm_operations {
struct e1000_mac_info {
struct e1000_mac_operations ops;
-
- u8 addr[6];
- u8 perm_addr[6];
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
enum e1000_mac_type type;
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index e3374d9a2472..ce1dbfdca112 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -140,6 +140,11 @@
#define I82579_LPI_CTRL PHY_REG(772, 20)
#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
+/* EMI Registers */
+#define I82579_EMI_ADDR 0x10
+#define I82579_EMI_DATA 0x11
+#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
+
/* Strapping Option Register - RO */
#define E1000_STRAP 0x0000C
#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
@@ -302,9 +307,9 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
* the interconnect to PCIe mode.
*/
fwsm = er32(FWSM);
- if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
+ if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && !e1000_check_reset_block(hw)) {
ctrl = er32(CTRL);
- ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
+ ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
ew32(CTRL, ctrl);
udelay(10);
@@ -321,7 +326,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
}
/*
- * Reset the PHY before any acccess to it. Doing so, ensures that
+ * Reset the PHY before any access to it. Doing so, ensures that
* the PHY is in a known good state before we read/write PHY registers.
* The generic reset is sufficient here, because we haven't determined
* the PHY type yet.
@@ -331,19 +336,24 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
goto out;
/* Ungate automatic PHY configuration on non-managed 82579 */
- if ((hw->mac.type == e1000_pch2lan) &&
+ if ((hw->mac.type == e1000_pch2lan) &&
!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
msleep(10);
e1000_gate_hw_phy_config_ich8lan(hw, false);
}
phy->id = e1000_phy_unknown;
- ret_val = e1000e_get_phy_id(hw);
- if (ret_val)
- goto out;
- if ((phy->id == 0) || (phy->id == PHY_REVISION_MASK)) {
+ switch (hw->mac.type) {
+ default:
+ ret_val = e1000e_get_phy_id(hw);
+ if (ret_val)
+ goto out;
+ if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
+ break;
+ /* fall-through */
+ case e1000_pch2lan:
/*
- * In case the PHY needs to be in mdio slow mode (eg. 82577),
+ * In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
ret_val = e1000_set_mdio_slow_mode_hv(hw);
@@ -352,6 +362,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
ret_val = e1000e_get_phy_id(hw);
if (ret_val)
goto out;
+ break;
}
phy->type = e1000e_get_phy_type_from_id(phy->id);
@@ -360,7 +371,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
case e1000_phy_82579:
phy->ops.check_polarity = e1000_check_polarity_82577;
phy->ops.force_speed_duplex =
- e1000_phy_force_speed_duplex_82577;
+ e1000_phy_force_speed_duplex_82577;
phy->ops.get_cable_length = e1000_get_cable_length_82577;
phy->ops.get_info = e1000_get_phy_info_82577;
phy->ops.commit = e1000e_phy_sw_reset;
@@ -747,7 +758,13 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
if (rc)
return rc;
- if (adapter->hw.phy.type == e1000_phy_ife) {
+ /*
+ * Disable Jumbo Frame support on parts with Intel 10/100 PHY or
+ * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
+ */
+ if ((adapter->hw.phy.type == e1000_phy_ife) ||
+ ((adapter->hw.mac.type >= e1000_pch2lan) &&
+ (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
}
@@ -1389,22 +1406,6 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
}
}
-static u32 e1000_calc_rx_da_crc(u8 mac[])
-{
- u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
- u32 i, j, mask, crc;
-
- crc = 0xffffffff;
- for (i = 0; i < 6; i++) {
- crc = crc ^ mac[i];
- for (j = 8; j > 0; j--) {
- mask = (crc & 1) * (-1);
- crc = (crc >> 1) ^ (poly & mask);
- }
- }
- return ~crc;
-}
-
/**
* e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
* with 82579 PHY
@@ -1447,8 +1448,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
mac_addr[4] = (addr_high & 0xFF);
mac_addr[5] = ((addr_high >> 8) & 0xFF);
- ew32(PCH_RAICC(i),
- e1000_calc_rx_da_crc(mac_addr));
+ ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
}
/* Write Rx addresses to the PHY */
@@ -1734,11 +1734,25 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
/* Configure the LCD with the OEM bits in NVM */
ret_val = e1000_oem_bits_config_ich8lan(hw, true);
- /* Ungate automatic PHY configuration on non-managed 82579 */
- if ((hw->mac.type == e1000_pch2lan) &&
- !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
- msleep(10);
- e1000_gate_hw_phy_config_ich8lan(hw, false);
+ if (hw->mac.type == e1000_pch2lan) {
+ /* Ungate automatic PHY configuration on non-managed 82579 */
+ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ msleep(10);
+ e1000_gate_hw_phy_config_ich8lan(hw, false);
+ }
+
+ /* Set EEE LPI Update Timer to 200usec */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+ ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
+ I82579_LPI_UPDATE_TIMER);
+ if (ret_val)
+ goto release;
+ ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
+ 0x1387);
+release:
+ hw->phy.ops.release(hw);
}
out:
@@ -2115,7 +2129,6 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
{
union ich8_hws_flash_status hsfsts;
s32 ret_val = -E1000_ERR_NVM;
- s32 i = 0;
hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
@@ -2151,6 +2164,8 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
ret_val = 0;
} else {
+ s32 i = 0;
+
/*
* Otherwise poll for sometime so the current
* cycle has a chance to end before giving up.
@@ -2303,11 +2318,10 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
*/
if (ret_val == 0) {
flash_data = er32flash(ICH_FLASH_FDATA0);
- if (size == 1) {
+ if (size == 1)
*data = (u8)(flash_data & 0x000000FF);
- } else if (size == 2) {
+ else if (size == 2)
*data = (u16)(flash_data & 0x0000FFFF);
- }
break;
} else {
/*
@@ -2972,7 +2986,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
{
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
u16 reg;
- u32 ctrl, icr, kab;
+ u32 ctrl, kab;
s32 ret_val;
/*
@@ -3062,7 +3076,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ew32(CRC_OFFSET, 0x65656565);
ew32(IMC, 0xffffffff);
- icr = er32(ICR);
+ er32(ICR);
kab = er32(KABGTXD);
kab |= E1000_KABGTXD_BGSQLBIAS;
@@ -3113,7 +3127,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
* Reset the phy after disabling host wakeup to reset the Rx buffer.
*/
if (hw->phy.type == e1000_phy_82578) {
- hw->phy.ops.read_reg(hw, BM_WUC, &i);
+ e1e_rphy(hw, BM_WUC, &i);
ret_val = e1000_phy_hw_reset_ich8lan(hw);
if (ret_val)
return ret_val;
@@ -3271,9 +3285,8 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
(hw->phy.type == e1000_phy_82577)) {
ew32(FCRTV_PCH, hw->fc.refresh_time);
- ret_val = hw->phy.ops.write_reg(hw,
- PHY_REG(BM_PORT_CTRL_PAGE, 27),
- hw->fc.pause_time);
+ ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
+ hw->fc.pause_time);
if (ret_val)
return ret_val;
}
@@ -3337,8 +3350,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
return ret_val;
break;
case e1000_phy_ife:
- ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
- &reg_data);
+ ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
if (ret_val)
return ret_val;
@@ -3356,8 +3368,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
reg_data |= IFE_PMC_AUTO_MDIX;
break;
}
- ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
- reg_data);
+ ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
if (ret_val)
return ret_val;
break;
@@ -3591,7 +3602,7 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
ew32(PHY_CTRL, phy_ctrl);
if (hw->mac.type >= e1000_pchlan) {
- e1000_oem_bits_config_ich8lan(hw, true);
+ e1000_oem_bits_config_ich8lan(hw, false);
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return;
@@ -3641,7 +3652,8 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
{
if (hw->phy.type == e1000_phy_ife)
return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
- (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
+ (IFE_PSCL_PROBE_MODE |
+ IFE_PSCL_PROBE_LEDS_OFF));
ew32(LEDCTL, hw->mac.ledctl_mode1);
return 0;
@@ -3655,8 +3667,7 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
**/
static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
{
- return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
- (u16)hw->mac.ledctl_mode1);
+ return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
}
/**
@@ -3667,8 +3678,7 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
**/
static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
{
- return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
- (u16)hw->mac.ledctl_default);
+ return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
}
/**
@@ -3699,7 +3709,7 @@ static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
}
}
- return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
+ return e1e_wphy(hw, HV_LED_CONFIG, data);
}
/**
@@ -3730,7 +3740,7 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
}
}
- return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
+ return e1e_wphy(hw, HV_LED_CONFIG, data);
}
/**
@@ -3839,20 +3849,20 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82579) ||
(hw->phy.type == e1000_phy_82577)) {
- hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data);
- hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data);
+ e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
+ e1e_rphy(hw, HV_SCC_LOWER, &phy_data);
+ e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
+ e1e_rphy(hw, HV_ECOL_LOWER, &phy_data);
+ e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
+ e1e_rphy(hw, HV_MCC_LOWER, &phy_data);
+ e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
+ e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data);
+ e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
+ e1e_rphy(hw, HV_COLC_LOWER, &phy_data);
+ e1e_rphy(hw, HV_DC_UPPER, &phy_data);
+ e1e_rphy(hw, HV_DC_LOWER, &phy_data);
+ e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
+ e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data);
}
}
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 0fd4eb5ac5fb..96921de5df2e 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -493,9 +493,8 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
* different link partner.
*/
ret_val = e1000e_config_fc_after_link_up(hw);
- if (ret_val) {
+ if (ret_val)
e_dbg("Error configuring flow control\n");
- }
return ret_val;
}
@@ -534,7 +533,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
mac->autoneg_failed = 1;
return 0;
}
- e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
+ e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
/* Disable auto-negotiation in the TXCW register */
ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
@@ -557,7 +556,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
*/
- e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
+ e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
ew32(TXCW, mac->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
@@ -599,7 +598,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
mac->autoneg_failed = 1;
return 0;
}
- e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
+ e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
/* Disable auto-negotiation in the TXCW register */
ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
@@ -622,7 +621,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
*/
- e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
+ e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
ew32(TXCW, mac->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
@@ -801,9 +800,9 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
* The possible values of the "fc" parameter are:
* 0: Flow control is completely disabled
* 1: Rx flow control is enabled (we can receive pause frames,
- * but not send pause frames).
+ * but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames but we
- * do not support receiving pause frames).
+ * do not support receiving pause frames).
* 3: Both Rx and Tx flow control (symmetric) are enabled.
*/
switch (hw->fc.current_mode) {
@@ -1032,9 +1031,9 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
* The possible values of the "fc" parameter are:
* 0: Flow control is completely disabled
* 1: Rx flow control is enabled (we can receive pause
- * frames but not send pause frames).
+ * frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * frames but we do not receive pause frames).
* 3: Both Rx and Tx flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
@@ -1136,7 +1135,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
if (ret_val)
return ret_val;
- ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
+ ret_val =
+ e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
if (ret_val)
return ret_val;
@@ -1189,7 +1189,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
} else {
hw->fc.current_mode = e1000_fc_rx_pause;
e_dbg("Flow Control = "
- "RX PAUSE frames only.\r\n");
+ "Rx PAUSE frames only.\r\n");
}
}
/*
@@ -1496,9 +1496,8 @@ s32 e1000e_setup_led_generic(struct e1000_hw *hw)
{
u32 ledctl;
- if (hw->mac.ops.setup_led != e1000e_setup_led_generic) {
+ if (hw->mac.ops.setup_led != e1000e_setup_led_generic)
return -E1000_ERR_CONFIG;
- }
if (hw->phy.media_type == e1000_media_type_fiber) {
ledctl = er32(LEDCTL);
@@ -1979,15 +1978,15 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 eecd = er32(EECD);
- u16 timeout = 0;
u8 spi_stat_reg;
if (nvm->type == e1000_nvm_eeprom_spi) {
+ u16 timeout = NVM_MAX_RETRY_SPI;
+
/* Clear SK and CS */
eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
ew32(EECD, eecd);
udelay(1);
- timeout = NVM_MAX_RETRY_SPI;
/*
* Read "Status Register" repeatedly until the LSB is cleared.
@@ -2139,6 +2138,119 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
}
/**
+ * e1000_read_pba_string_generic - Read device part number
+ * @hw: pointer to the HW structure
+ * @pba_num: pointer to device part number
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number from the EEPROM and stores
+ * the value in pba_num.
+ **/
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ s32 ret_val;
+ u16 nvm_data;
+ u16 pba_ptr;
+ u16 offset;
+ u16 length;
+
+ if (pba_num == NULL) {
+ e_dbg("PBA string buffer was null\n");
+ ret_val = E1000_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ /*
+ * if nvm_data is not ptr guard the PBA must be in legacy format which
+ * means pba_ptr is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (nvm_data != NVM_PBA_PTR_GUARD) {
+ e_dbg("NVM PBA number is not stored as string\n");
+
+ /* we will need 11 characters to store the PBA */
+ if (pba_num_size < 11) {
+ e_dbg("PBA string buffer too small\n");
+ return E1000_ERR_NO_SPACE;
+ }
+
+ /* extract hex string from data and pba_ptr */
+ pba_num[0] = (nvm_data >> 12) & 0xF;
+ pba_num[1] = (nvm_data >> 8) & 0xF;
+ pba_num[2] = (nvm_data >> 4) & 0xF;
+ pba_num[3] = nvm_data & 0xF;
+ pba_num[4] = (pba_ptr >> 12) & 0xF;
+ pba_num[5] = (pba_ptr >> 8) & 0xF;
+ pba_num[6] = '-';
+ pba_num[7] = 0;
+ pba_num[8] = (pba_ptr >> 4) & 0xF;
+ pba_num[9] = pba_ptr & 0xF;
+
+ /* put a null character on the end of our string */
+ pba_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (pba_num[offset] < 0xA)
+ pba_num[offset] += '0';
+ else if (pba_num[offset] < 0x10)
+ pba_num[offset] += 'A' - 0xA;
+ }
+
+ goto out;
+ }
+
+ ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ e_dbg("NVM PBA number section invalid length\n");
+ ret_val = E1000_ERR_NVM_PBA_SECTION;
+ goto out;
+ }
+ /* check if pba_num buffer is big enough */
+ if (pba_num_size < (((u32)length * 2) - 1)) {
+ e_dbg("PBA string buffer too small\n");
+ ret_val = E1000_ERR_NO_SPACE;
+ goto out;
+ }
+
+ /* trim pba length from start of string */
+ pba_ptr++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ goto out;
+ }
+ pba_num[offset * 2] = (u8)(nvm_data >> 8);
+ pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+ }
+ pba_num[offset * 2] = '\0';
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_read_mac_addr_generic - Read device MAC address
* @hw: pointer to the HW structure
*
@@ -2579,25 +2691,3 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
out:
return ret_val;
}
-
-s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
-{
- s32 ret_val;
- u16 nvm_data;
-
- ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
- if (ret_val) {
- e_dbg("NVM Read Error\n");
- return ret_val;
- }
- *pba_num = (u32)(nvm_data << 16);
-
- ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
- if (ret_val) {
- e_dbg("NVM Read Error\n");
- return ret_val;
- }
- *pba_num |= nvm_data;
-
- return 0;
-}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index c4ca1629f532..a39d4a4d871c 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -54,7 +54,7 @@
#define DRV_EXTRAVERSION "-k2"
-#define DRV_VERSION "1.2.7" DRV_EXTRAVERSION
+#define DRV_VERSION "1.3.10" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -77,17 +77,17 @@ struct e1000_reg_info {
char *name;
};
-#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
-#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
-#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
-#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
-#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
+#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
-#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
-#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
-#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
-#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
-#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
+#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
static const struct e1000_reg_info e1000_reg_info_tbl[] = {
@@ -99,7 +99,7 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
/* Interrupt Registers */
{E1000_ICR, "ICR"},
- /* RX Registers */
+ /* Rx Registers */
{E1000_RCTL, "RCTL"},
{E1000_RDLEN, "RDLEN"},
{E1000_RDH, "RDH"},
@@ -115,7 +115,7 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
{E1000_RDFTS, "RDFTS"},
{E1000_RDFPC, "RDFPC"},
- /* TX Registers */
+ /* Tx Registers */
{E1000_TCTL, "TCTL"},
{E1000_TDBAL, "TDBAL"},
{E1000_TDBAH, "TDBAH"},
@@ -160,7 +160,7 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
break;
default:
printk(KERN_INFO "%-15s %08x\n",
- reginfo->name, __er32(hw, reginfo->ofs));
+ reginfo->name, __er32(hw, reginfo->ofs));
return;
}
@@ -171,9 +171,8 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
printk(KERN_CONT "\n");
}
-
/*
- * e1000e_dump - Print registers, tx-ring and rx-ring
+ * e1000e_dump - Print registers, Tx-ring and Rx-ring
*/
static void e1000e_dump(struct e1000_adapter *adapter)
{
@@ -182,12 +181,20 @@ static void e1000e_dump(struct e1000_adapter *adapter)
struct e1000_reg_info *reginfo;
struct e1000_ring *tx_ring = adapter->tx_ring;
struct e1000_tx_desc *tx_desc;
- struct my_u0 { u64 a; u64 b; } *u0;
+ struct my_u0 {
+ u64 a;
+ u64 b;
+ } *u0;
struct e1000_buffer *buffer_info;
struct e1000_ring *rx_ring = adapter->rx_ring;
union e1000_rx_desc_packet_split *rx_desc_ps;
struct e1000_rx_desc *rx_desc;
- struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1;
+ struct my_u1 {
+ u64 a;
+ u64 b;
+ u64 c;
+ u64 d;
+ } *u1;
u32 staterr;
int i = 0;
@@ -198,12 +205,10 @@ static void e1000e_dump(struct e1000_adapter *adapter)
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
printk(KERN_INFO "Device Name state "
- "trans_start last_rx\n");
+ "trans_start last_rx\n");
printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
- netdev->name,
- netdev->state,
- netdev->trans_start,
- netdev->last_rx);
+ netdev->name, netdev->state, netdev->trans_start,
+ netdev->last_rx);
}
/* Print Registers */
@@ -214,26 +219,26 @@ static void e1000e_dump(struct e1000_adapter *adapter)
e1000_regdump(hw, reginfo);
}
- /* Print TX Ring Summary */
+ /* Print Tx Ring Summary */
if (!netdev || !netif_running(netdev))
goto exit;
- dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+ dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
- " leng ntw timestamp\n");
+ " leng ntw timestamp\n");
buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
- 0, tx_ring->next_to_use, tx_ring->next_to_clean,
- (unsigned long long)buffer_info->dma,
- buffer_info->length,
- buffer_info->next_to_watch,
- (unsigned long long)buffer_info->time_stamp);
+ 0, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (unsigned long long)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (unsigned long long)buffer_info->time_stamp);
- /* Print TX Rings */
+ /* Print Tx Ring */
if (!netif_msg_tx_done(adapter))
goto rx_ring_summary;
- dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+ dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
*
@@ -263,22 +268,22 @@ static void e1000e_dump(struct e1000_adapter *adapter)
* 63 48 47 40 39 36 35 32 31 24 23 20 19 0
*/
printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Legacy format\n");
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Legacy format\n");
printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Ext Context format\n");
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Ext Context format\n");
printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Ext Data format\n");
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Ext Data format\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
- "%04X %3X %016llX %p",
- (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
- ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
+ "%04X %3X %016llX %p",
+ (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
+ ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
(unsigned long long)le64_to_cpu(u0->a),
(unsigned long long)le64_to_cpu(u0->b),
(unsigned long long)buffer_info->dma,
@@ -296,22 +301,22 @@ static void e1000e_dump(struct e1000_adapter *adapter)
if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
- 16, 1, phys_to_virt(buffer_info->dma),
- buffer_info->length, true);
+ 16, 1, phys_to_virt(buffer_info->dma),
+ buffer_info->length, true);
}
- /* Print RX Rings Summary */
+ /* Print Rx Ring Summary */
rx_ring_summary:
- dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+ dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
printk(KERN_INFO "Queue [NTU] [NTC]\n");
printk(KERN_INFO " %5d %5X %5X\n", 0,
- rx_ring->next_to_use, rx_ring->next_to_clean);
+ rx_ring->next_to_use, rx_ring->next_to_clean);
- /* Print RX Rings */
+ /* Print Rx Ring */
if (!netif_msg_rx_status(adapter))
goto exit;
- dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+ dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
switch (adapter->rx_ps_pages) {
case 1:
case 2:
@@ -329,7 +334,7 @@ rx_ring_summary:
* +-----------------------------------------------------+
*/
printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
- "[buffer 1 63:0 ] "
+ "[buffer 1 63:0 ] "
"[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
"[bi->skb] <-- Ext Pkt Split format\n");
/* [Extended] Receive Descriptor (Write-Back) Format
@@ -344,7 +349,7 @@ rx_ring_summary:
* 63 48 47 32 31 20 19 0
*/
printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
- "[vl l0 ee es] "
+ "[vl l0 ee es] "
"[ l3 l2 l1 hs] [reserved ] ---------------- "
"[bi->skb] <-- Ext Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
@@ -352,26 +357,26 @@ rx_ring_summary:
rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
u1 = (struct my_u1 *)rx_desc_ps;
staterr =
- le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+ le32_to_cpu(rx_desc_ps->wb.middle.status_error);
if (staterr & E1000_RXD_STAT_DD) {
/* Descriptor Done */
printk(KERN_INFO "RWB[0x%03X] %016llX "
- "%016llX %016llX %016llX "
- "---------------- %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- (unsigned long long)le64_to_cpu(u1->c),
- (unsigned long long)le64_to_cpu(u1->d),
- buffer_info->skb);
+ "%016llX %016llX %016llX "
+ "---------------- %p", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)le64_to_cpu(u1->c),
+ (unsigned long long)le64_to_cpu(u1->d),
+ buffer_info->skb);
} else {
printk(KERN_INFO "R [0x%03X] %016llX "
- "%016llX %016llX %016llX %016llX %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- (unsigned long long)le64_to_cpu(u1->c),
- (unsigned long long)le64_to_cpu(u1->d),
- (unsigned long long)buffer_info->dma,
- buffer_info->skb);
+ "%016llX %016llX %016llX %016llX %p", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)le64_to_cpu(u1->c),
+ (unsigned long long)le64_to_cpu(u1->d),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->skb);
if (netif_msg_pktdata(adapter))
print_hex_dump(KERN_INFO, "",
@@ -400,18 +405,18 @@ rx_ring_summary:
* 63 48 47 40 39 32 31 16 15 0
*/
printk(KERN_INFO "Rl[desc] [address 63:0 ] "
- "[vl er S cks ln] [bi->dma ] [bi->skb] "
- "<-- Legacy format\n");
+ "[vl er S cks ln] [bi->dma ] [bi->skb] "
+ "<-- Legacy format\n");
for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
rx_desc = E1000_RX_DESC(*rx_ring, i);
buffer_info = &rx_ring->buffer_info[i];
u0 = (struct my_u0 *)rx_desc;
printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
- "%016llX %p", i,
- (unsigned long long)le64_to_cpu(u0->a),
- (unsigned long long)le64_to_cpu(u0->b),
- (unsigned long long)buffer_info->dma,
- buffer_info->skb);
+ "%016llX %p", i,
+ (unsigned long long)le64_to_cpu(u0->a),
+ (unsigned long long)le64_to_cpu(u0->b),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->skb);
if (i == rx_ring->next_to_use)
printk(KERN_CONT " NTU\n");
else if (i == rx_ring->next_to_clean)
@@ -421,9 +426,10 @@ rx_ring_summary:
if (netif_msg_pktdata(adapter))
print_hex_dump(KERN_INFO, "",
- DUMP_PREFIX_ADDRESS,
- 16, 1, phys_to_virt(buffer_info->dma),
- adapter->rx_buffer_len, true);
+ DUMP_PREFIX_ADDRESS,
+ 16, 1,
+ phys_to_virt(buffer_info->dma),
+ adapter->rx_buffer_len, true);
}
}
@@ -450,8 +456,7 @@ static int e1000_desc_unused(struct e1000_ring *ring)
* @skb: pointer to sk_buff to be indicated to stack
**/
static void e1000_receive_skb(struct e1000_adapter *adapter,
- struct net_device *netdev,
- struct sk_buff *skb,
+ struct net_device *netdev, struct sk_buff *skb,
u8 status, __le16 vlan)
{
skb->protocol = eth_type_trans(skb, netdev);
@@ -464,7 +469,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
}
/**
- * e1000_rx_checksum - Receive Checksum Offload for 82543
+ * e1000_rx_checksum - Receive Checksum Offload
* @adapter: board private structure
* @status_err: receive descriptor status and error fields
* @csum: receive descriptor csum field
@@ -548,7 +553,7 @@ map_skb:
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
- dev_err(&pdev->dev, "RX DMA map failed\n");
+ dev_err(&pdev->dev, "Rx DMA map failed\n");
adapter->rx_dma_failed++;
break;
}
@@ -601,7 +606,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
ps_page = &buffer_info->ps_pages[j];
if (j >= adapter->rx_ps_pages) {
/* all unused desc entries get hw null ptr */
- rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0);
+ rx_desc->read.buffer_addr[j + 1] =
+ ~cpu_to_le64(0);
continue;
}
if (!ps_page->page) {
@@ -617,7 +623,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
if (dma_mapping_error(&pdev->dev,
ps_page->dma)) {
dev_err(&adapter->pdev->dev,
- "RX DMA page map failed\n");
+ "Rx DMA page map failed\n");
adapter->rx_dma_failed++;
goto no_buffers;
}
@@ -627,8 +633,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
* didn't change because each write-back
* erases this info.
*/
- rx_desc->read.buffer_addr[j+1] =
- cpu_to_le64(ps_page->dma);
+ rx_desc->read.buffer_addr[j + 1] =
+ cpu_to_le64(ps_page->dma);
}
skb = netdev_alloc_skb_ip_align(netdev,
@@ -644,7 +650,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
adapter->rx_ps_bsize0,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
- dev_err(&pdev->dev, "RX DMA map failed\n");
+ dev_err(&pdev->dev, "Rx DMA map failed\n");
adapter->rx_dma_failed++;
/* cleanup skb */
dev_kfree_skb_any(skb);
@@ -662,7 +668,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
* such as IA-64).
*/
wmb();
- writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
+ writel(i << 1, adapter->hw.hw_addr + rx_ring->tail);
}
i++;
@@ -894,8 +900,6 @@ next_desc:
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
- netdev->stats.rx_bytes += total_rx_bytes;
- netdev->stats.rx_packets += total_rx_packets;
return cleaned;
}
@@ -931,6 +935,9 @@ static void e1000_print_hw_hang(struct work_struct *work)
u16 phy_status, phy_1000t_status, phy_ext_status;
u16 pci_status;
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
e1e_rphy(hw, PHY_STATUS, &phy_status);
e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1051,8 +1058,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
}
adapter->total_tx_bytes += total_tx_bytes;
adapter->total_tx_packets += total_tx_packets;
- netdev->stats.tx_bytes += total_tx_bytes;
- netdev->stats.tx_packets += total_tx_packets;
return count < tx_ring->count;
}
@@ -1106,11 +1111,10 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
cleaned = 1;
cleaned_count++;
dma_unmap_single(&pdev->dev, buffer_info->dma,
- adapter->rx_ps_bsize0,
- DMA_FROM_DEVICE);
+ adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
buffer_info->dma = 0;
- /* see !EOP comment in other rx routine */
+ /* see !EOP comment in other Rx routine */
if (!(staterr & E1000_RXD_STAT_EOP))
adapter->flags2 |= FLAG2_IS_DISCARDING;
@@ -1240,8 +1244,6 @@ next_desc:
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
- netdev->stats.rx_bytes += total_rx_bytes;
- netdev->stats.rx_packets += total_rx_packets;
return cleaned;
}
@@ -1320,12 +1322,12 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* an error means any chain goes out the window
* too */
if (rx_ring->rx_skb_top)
- dev_kfree_skb(rx_ring->rx_skb_top);
+ dev_kfree_skb_irq(rx_ring->rx_skb_top);
rx_ring->rx_skb_top = NULL;
goto next_desc;
}
-#define rxtop rx_ring->rx_skb_top
+#define rxtop (rx_ring->rx_skb_top)
if (!(status & E1000_RXD_STAT_EOP)) {
/* this descriptor is only the beginning (or middle) */
if (!rxtop) {
@@ -1393,7 +1395,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* eth type trans needs skb->data to point to something */
if (!pskb_may_pull(skb, ETH_HLEN)) {
e_err("pskb_may_pull failed.\n");
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
goto next_desc;
}
@@ -1421,8 +1423,6 @@ next_desc:
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
- netdev->stats.rx_bytes += total_rx_bytes;
- netdev->stats.rx_packets += total_rx_packets;
return cleaned;
}
@@ -1501,6 +1501,9 @@ static void e1000e_downshift_workaround(struct work_struct *work)
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter, downshift_task);
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
}
@@ -1806,9 +1809,8 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
err = pci_enable_msix(adapter->pdev,
adapter->msix_entries,
adapter->num_vectors);
- if (err == 0) {
+ if (err == 0)
return;
- }
}
/* MSI-X failed, so fall through and try MSI */
e_err("Failed to initialize MSI-X interrupts. "
@@ -1847,7 +1849,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
int err = 0, vector = 0;
if (strlen(netdev->name) < (IFNAMSIZ - 5))
- sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
+ snprintf(adapter->rx_ring->name,
+ sizeof(adapter->rx_ring->name) - 1,
+ "%s-rx-0", netdev->name);
else
memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
err = request_irq(adapter->msix_entries[vector].vector,
@@ -1860,7 +1864,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
vector++;
if (strlen(netdev->name) < (IFNAMSIZ - 5))
- sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
+ snprintf(adapter->tx_ring->name,
+ sizeof(adapter->tx_ring->name) - 1,
+ "%s-tx-0", netdev->name);
else
memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
err = request_irq(adapter->msix_entries[vector].vector,
@@ -1981,15 +1987,15 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
}
/**
- * e1000_get_hw_control - get control of the h/w from f/w
+ * e1000e_get_hw_control - get control of the h/w from f/w
* @adapter: address of board private structure
*
- * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
* For ASF and Pass Through versions of f/w this means that
* the driver is loaded. For AMT version (only with 82573)
* of the f/w this means that the network i/f is open.
**/
-static void e1000_get_hw_control(struct e1000_adapter *adapter)
+void e1000e_get_hw_control(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl_ext;
@@ -2006,16 +2012,16 @@ static void e1000_get_hw_control(struct e1000_adapter *adapter)
}
/**
- * e1000_release_hw_control - release control of the h/w to f/w
+ * e1000e_release_hw_control - release control of the h/w to f/w
* @adapter: address of board private structure
*
- * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
* For ASF and Pass Through versions of f/w this means that the
* driver is no longer loaded. For AMT version (only with 82573) i
* of the f/w this means that the network i/f is closed.
*
**/
-static void e1000_release_hw_control(struct e1000_adapter *adapter)
+void e1000e_release_hw_control(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl_ext;
@@ -2059,10 +2065,9 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
int err = -ENOMEM, size;
size = sizeof(struct e1000_buffer) * tx_ring->count;
- tx_ring->buffer_info = vmalloc(size);
+ tx_ring->buffer_info = vzalloc(size);
if (!tx_ring->buffer_info)
goto err;
- memset(tx_ring->buffer_info, 0, size);
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
@@ -2095,10 +2100,9 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
int i, size, desc_len, err = -ENOMEM;
size = sizeof(struct e1000_buffer) * rx_ring->count;
- rx_ring->buffer_info = vmalloc(size);
+ rx_ring->buffer_info = vzalloc(size);
if (!rx_ring->buffer_info)
goto err;
- memset(rx_ring->buffer_info, 0, size);
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
@@ -2132,7 +2136,7 @@ err_pages:
}
err:
vfree(rx_ring->buffer_info);
- e_err("Unable to allocate memory for the transmit descriptor ring\n");
+ e_err("Unable to allocate memory for the receive descriptor ring\n");
return err;
}
@@ -2200,9 +2204,8 @@ void e1000e_free_rx_resources(struct e1000_adapter *adapter)
e1000_clean_rx_ring(adapter);
- for (i = 0; i < rx_ring->count; i++) {
+ for (i = 0; i < rx_ring->count; i++)
kfree(rx_ring->buffer_info[i].ps_pages);
- }
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
@@ -2242,20 +2245,18 @@ static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
/* handle TSO and jumbo frames */
if (bytes/packets > 8000)
retval = bulk_latency;
- else if ((packets < 5) && (bytes > 512)) {
+ else if ((packets < 5) && (bytes > 512))
retval = low_latency;
- }
break;
case low_latency: /* 50 usec aka 20000 ints/s */
if (bytes > 10000) {
/* this if handles the TSO accounting */
- if (bytes/packets > 8000) {
+ if (bytes/packets > 8000)
retval = bulk_latency;
- } else if ((packets < 10) || ((bytes/packets) > 1200)) {
+ else if ((packets < 10) || ((bytes/packets) > 1200))
retval = bulk_latency;
- } else if ((packets > 35)) {
+ else if ((packets > 35))
retval = lowest_latency;
- }
} else if (bytes/packets > 2000) {
retval = bulk_latency;
} else if (packets <= 2 && bytes < 512) {
@@ -2264,9 +2265,8 @@ static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
break;
case bulk_latency: /* 250 usec aka 4000 ints/s */
if (bytes > 25000) {
- if (packets > 35) {
+ if (packets > 35)
retval = low_latency;
- }
} else if (bytes < 6000) {
retval = low_latency;
}
@@ -2452,7 +2452,7 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
(vid == adapter->mng_vlan_id)) {
/* release control to f/w */
- e1000_release_hw_control(adapter);
+ e1000e_release_hw_control(adapter);
return;
}
@@ -2617,7 +2617,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
}
/**
- * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
+ * e1000_configure_tx - Configure Transmit Unit after Reset
* @adapter: board private structure
*
* Configure the Tx unit of the MAC after a reset.
@@ -2670,7 +2670,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
* hthresh = 1 ==> prefetch when one or more available
* pthresh = 0x1f ==> prefetch if internal cache 31 or less
* BEWARE: this seems to work but should be considered first if
- * there are tx hangs or other tx related bugs
+ * there are Tx hangs or other Tx related bugs
*/
txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
ew32(TXDCTL(0), txdctl);
@@ -2730,7 +2730,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 rctl, rfctl;
- u32 psrctl = 0;
u32 pages = 0;
/* Workaround Si errata on 82579 - configure jumbo frame flow */
@@ -2741,6 +2740,9 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
else
ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+
+ if (ret_val)
+ e_dbg("failed to enable jumbo frame workaround mode\n");
}
/* Program MC offset vector base */
@@ -2826,6 +2828,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
adapter->rx_ps_pages = 0;
if (adapter->rx_ps_pages) {
+ u32 psrctl = 0;
+
/* Configure extra packet-split registers */
rfctl = er32(RFCTL);
rfctl |= E1000_RFCTL_EXTEN;
@@ -2881,7 +2885,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
if (adapter->rx_ps_pages) {
/* this is a 32 byte descriptor */
rdlen = rx_ring->count *
- sizeof(union e1000_rx_desc_packet_split);
+ sizeof(union e1000_rx_desc_packet_split);
adapter->clean_rx = e1000_clean_rx_irq_ps;
adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
} else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
@@ -2904,7 +2908,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
/*
* set the writeback threshold (only takes effect if the RDTR
* is set). set GRAN=1 and write back up to 0x4 worth, and
- * enable prefetching of 0x20 rx descriptors
+ * enable prefetching of 0x20 Rx descriptors
* granularity = 01
* wthresh = 04,
* hthresh = 04,
@@ -2985,12 +2989,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
* excessive C-state transition latencies result in
* dropped transactions.
*/
- pm_qos_update_request(
- &adapter->netdev->pm_qos_req, 55);
+ pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
} else {
- pm_qos_update_request(
- &adapter->netdev->pm_qos_req,
- PM_QOS_DEFAULT_VALUE);
+ pm_qos_update_request(&adapter->netdev->pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
}
}
@@ -3029,7 +3031,6 @@ static void e1000_set_multi(struct net_device *netdev)
struct netdev_hw_addr *ha;
u8 *mta_list;
u32 rctl;
- int i;
/* Check for Promiscuous and All Multicast modes */
@@ -3052,12 +3053,13 @@ static void e1000_set_multi(struct net_device *netdev)
ew32(RCTL, rctl);
if (!netdev_mc_empty(netdev)) {
+ int i = 0;
+
mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
if (!mta_list)
return;
/* prepare a packed array of only addresses. */
- i = 0;
netdev_for_each_mc_addr(ha, netdev)
memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
@@ -3156,7 +3158,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
/* lower 16 bits has Rx packet buffer allocation size in KB */
pba &= 0xffff;
/*
- * the Tx fifo also stores 16 bytes of information about the tx
+ * the Tx fifo also stores 16 bytes of information about the Tx
* but don't include ethernet FCS because hardware appends it
*/
min_tx_space = (adapter->max_frame_size +
@@ -3179,7 +3181,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
pba -= min_tx_space - tx_space;
/*
- * if short on Rx space, Rx wins and must trump tx
+ * if short on Rx space, Rx wins and must trump Tx
* adjustment or use Early Receive if available
*/
if ((pba < min_rx_space) &&
@@ -3191,7 +3193,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
ew32(PBA, pba);
}
-
/*
* flow control settings
*
@@ -3279,7 +3280,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
* that the network interface is in control
*/
if (adapter->flags & FLAG_HAS_AMT)
- e1000_get_hw_control(adapter);
+ e1000e_get_hw_control(adapter);
ew32(WUC, 0);
@@ -3292,6 +3293,13 @@ void e1000e_reset(struct e1000_adapter *adapter)
ew32(VET, ETH_P_8021Q);
e1000e_reset_adaptive(hw);
+
+ if (!netif_running(adapter->netdev) &&
+ !test_bit(__E1000_TESTING, &adapter->state)) {
+ e1000_power_down_phy(adapter);
+ return;
+ }
+
e1000_get_phy_info(hw);
if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
@@ -3333,6 +3341,23 @@ int e1000e_up(struct e1000_adapter *adapter)
return 0;
}
+static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (!(adapter->flags2 & FLAG2_DMA_BURST))
+ return;
+
+ /* flush pending descriptor writebacks to memory */
+ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+ ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
+
+ /* execute the writes immediately */
+ e1e_flush();
+}
+
+static void e1000e_update_stats(struct e1000_adapter *adapter);
+
void e1000e_down(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -3367,11 +3392,19 @@ void e1000e_down(struct e1000_adapter *adapter)
del_timer_sync(&adapter->phy_info_timer);
netif_carrier_off(netdev);
+
+ spin_lock(&adapter->stats64_lock);
+ e1000e_update_stats(adapter);
+ spin_unlock(&adapter->stats64_lock);
+
adapter->link_speed = 0;
adapter->link_duplex = 0;
if (!pci_channel_offline(adapter->pdev))
e1000e_reset(adapter);
+
+ e1000e_flush_descriptors(adapter);
+
e1000_clean_tx_ring(adapter);
e1000_clean_rx_ring(adapter);
@@ -3408,6 +3441,8 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+ spin_lock_init(&adapter->stats64_lock);
+
e1000e_set_interrupt_capability(adapter);
if (e1000_alloc_queues(adapter))
@@ -3577,7 +3612,7 @@ static int e1000_open(struct net_device *netdev)
* interface is now open and reset the part to a known state.
*/
if (adapter->flags & FLAG_HAS_AMT) {
- e1000_get_hw_control(adapter);
+ e1000e_get_hw_control(adapter);
e1000e_reset(adapter);
}
@@ -3641,7 +3676,7 @@ static int e1000_open(struct net_device *netdev)
return 0;
err_req_irq:
- e1000_release_hw_control(adapter);
+ e1000e_release_hw_control(adapter);
e1000_power_down_phy(adapter);
e1000e_free_rx_resources(adapter);
err_setup_rx:
@@ -3696,8 +3731,9 @@ static int e1000_close(struct net_device *netdev)
* If AMT is enabled, let the firmware know that the network
* interface is now closed
*/
- if (adapter->flags & FLAG_HAS_AMT)
- e1000_release_hw_control(adapter);
+ if ((adapter->flags & FLAG_HAS_AMT) &&
+ !test_bit(__E1000_TESTING, &adapter->state))
+ e1000e_release_hw_control(adapter);
if ((adapter->flags & FLAG_HAS_ERT) ||
(adapter->hw.mac.type == e1000_pch2lan))
@@ -3759,6 +3795,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter, update_phy_task);
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
e1000_get_phy_info(&adapter->hw);
}
@@ -3769,6 +3809,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
static void e1000_update_phy_info(unsigned long data)
{
struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
schedule_work(&adapter->update_phy_task);
}
@@ -3880,7 +3924,7 @@ release:
* e1000e_update_stats - Update the board statistics counters
* @adapter: board private structure
**/
-void e1000e_update_stats(struct e1000_adapter *adapter)
+static void e1000e_update_stats(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
@@ -3992,10 +4036,11 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_phy_regs *phy = &adapter->phy_regs;
- int ret_val;
if ((er32(STATUS) & E1000_STATUS_LU) &&
(adapter->hw.phy.media_type == e1000_media_type_copper)) {
+ int ret_val;
+
ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
@@ -4036,11 +4081,11 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
adapter->netdev->name,
adapter->link_speed,
(adapter->link_duplex == FULL_DUPLEX) ?
- "Full Duplex" : "Half Duplex",
+ "Full Duplex" : "Half Duplex",
((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
- "RX/TX" :
- ((ctrl & E1000_CTRL_RFCE) ? "RX" :
- ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
+ "Rx/Tx" :
+ ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
+ ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
}
static bool e1000e_has_link(struct e1000_adapter *adapter)
@@ -4141,7 +4186,9 @@ static void e1000_watchdog_task(struct work_struct *work)
struct e1000_ring *tx_ring = adapter->tx_ring;
struct e1000_hw *hw = &adapter->hw;
u32 link, tctl;
- int tx_pending = 0;
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
link = e1000e_has_link(adapter);
if ((netif_carrier_ok(netdev)) && link) {
@@ -4279,7 +4326,9 @@ static void e1000_watchdog_task(struct work_struct *work)
}
link_up:
+ spin_lock(&adapter->stats64_lock);
e1000e_update_stats(adapter);
+ spin_unlock(&adapter->stats64_lock);
mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
adapter->tpt_old = adapter->stats.tpt;
@@ -4293,21 +4342,17 @@ link_up:
e1000e_update_adaptive(&adapter->hw);
- if (!netif_carrier_ok(netdev)) {
- tx_pending = (e1000_desc_unused(tx_ring) + 1 <
- tx_ring->count);
- if (tx_pending) {
- /*
- * We've lost link, so the controller stops DMA,
- * but we've got queued Tx work that's never going
- * to get done, so reset controller to flush Tx.
- * (Do the reset outside of interrupt context).
- */
- adapter->tx_timeout_count++;
- schedule_work(&adapter->reset_task);
- /* return immediately since reset is imminent */
- return;
- }
+ if (!netif_carrier_ok(netdev) &&
+ (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
+ /*
+ * We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context).
+ */
+ schedule_work(&adapter->reset_task);
+ /* return immediately since reset is imminent */
+ return;
}
/* Simple mode for Interrupt Throttle Rate (ITR) */
@@ -4332,19 +4377,12 @@ link_up:
else
ew32(ICS, E1000_ICS_RXDMT0);
+ /* flush pending descriptors to memory before detecting Tx hang */
+ e1000e_flush_descriptors(adapter);
+
/* Force detection of hung controller every watchdog period */
adapter->detect_tx_hung = 1;
- /* flush partial descriptors to memory before detecting tx hang */
- if (adapter->flags2 & FLAG2_DMA_BURST) {
- ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
- ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
- /*
- * no need to flush the writes because the timeout code does
- * an er32 first thing
- */
- }
-
/*
* With 82571 controllers, LAA may be overwritten due to controller
* reset from the other port. Set the appropriate LAA in RAR[0]
@@ -4378,13 +4416,13 @@ static int e1000_tso(struct e1000_adapter *adapter,
u32 cmd_length = 0;
u16 ipcse = 0, tucse, mss;
u8 ipcss, ipcso, tucss, tucso, hdr_len;
- int err;
if (!skb_is_gso(skb))
return 0;
if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+
if (err)
return err;
}
@@ -4475,7 +4513,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
break;
}
- css = skb_transport_offset(skb);
+ css = skb_checksum_start_offset(skb);
i = tx_ring->next_to_use;
buffer_info = &tx_ring->buffer_info[i];
@@ -4526,7 +4564,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->next_to_watch = i;
buffer_info->dma = dma_map_single(&pdev->dev,
skb->data + offset,
- size, DMA_TO_DEVICE);
+ size, DMA_TO_DEVICE);
buffer_info->mapped_as_page = false;
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
@@ -4573,7 +4611,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
}
- segs = skb_shinfo(skb)->gso_segs ?: 1;
+ segs = skb_shinfo(skb)->gso_segs ? : 1;
/* multiply data chunks by size of headers */
bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
@@ -4585,17 +4623,17 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
return count;
dma_error:
- dev_err(&pdev->dev, "TX DMA map failed\n");
+ dev_err(&pdev->dev, "Tx DMA map failed\n");
buffer_info->dma = 0;
if (count)
count--;
while (count--) {
- if (i==0)
+ if (i == 0)
i += tx_ring->count;
i--;
buffer_info = &tx_ring->buffer_info[i];
- e1000_put_txbuf(adapter, buffer_info);;
+ e1000_put_txbuf(adapter, buffer_info);
}
return 0;
@@ -4631,7 +4669,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
i = tx_ring->next_to_use;
- while (count--) {
+ do {
buffer_info = &tx_ring->buffer_info[i];
tx_desc = E1000_TX_DESC(*tx_ring, i);
tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -4642,7 +4680,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
i++;
if (i == tx_ring->count)
i = 0;
- }
+ } while (--count > 0);
tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
@@ -4882,6 +4920,10 @@ static void e1000_reset_task(struct work_struct *work)
struct e1000_adapter *adapter;
adapter = container_of(work, struct e1000_adapter, reset_task);
+ /* don't run the task if already down */
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
(adapter->flags & FLAG_RX_RESTART_NOW))) {
e1000e_dump(adapter);
@@ -4891,16 +4933,55 @@ static void e1000_reset_task(struct work_struct *work)
}
/**
- * e1000_get_stats - Get System Network Statistics
+ * e1000_get_stats64 - Get System Network Statistics
* @netdev: network interface device structure
+ * @stats: rtnl_link_stats64 pointer
*
* Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
**/
-static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
+struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
- /* only return the current stats */
- return &netdev->stats;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ memset(stats, 0, sizeof(struct rtnl_link_stats64));
+ spin_lock(&adapter->stats64_lock);
+ e1000e_update_stats(adapter);
+ /* Fill out the OS statistics structure */
+ stats->rx_bytes = adapter->stats.gorc;
+ stats->rx_packets = adapter->stats.gprc;
+ stats->tx_bytes = adapter->stats.gotc;
+ stats->tx_packets = adapter->stats.gptc;
+ stats->multicast = adapter->stats.mprc;
+ stats->collisions = adapter->stats.colc;
+
+ /* Rx Errors */
+
+ /*
+ * RLEC on some newer hardware can be incorrect so build
+ * our own version based on RUC and ROC
+ */
+ stats->rx_errors = adapter->stats.rxerrc +
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc +
+ adapter->stats.cexterr;
+ stats->rx_length_errors = adapter->stats.ruc +
+ adapter->stats.roc;
+ stats->rx_crc_errors = adapter->stats.crcerrs;
+ stats->rx_frame_errors = adapter->stats.algnerrc;
+ stats->rx_missed_errors = adapter->stats.mpc;
+
+ /* Tx Errors */
+ stats->tx_errors = adapter->stats.ecol +
+ adapter->stats.latecol;
+ stats->tx_aborted_errors = adapter->stats.ecol;
+ stats->tx_window_errors = adapter->stats.latecol;
+ stats->tx_carrier_errors = adapter->stats.tncrs;
+
+ /* Tx Dropped needs to be maintained elsewhere */
+
+ spin_unlock(&adapter->stats64_lock);
+ return stats;
}
/**
@@ -5216,7 +5297,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
- e1000_release_hw_control(adapter);
+ e1000e_release_hw_control(adapter);
pci_disable_device(pdev);
@@ -5301,7 +5382,7 @@ void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
__e1000e_disable_aspm(pdev, state);
}
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
static bool e1000e_pm_ready(struct e1000_adapter *adapter)
{
return !!adapter->tx_ring->buffer_info;
@@ -5373,7 +5454,7 @@ static int __e1000_resume(struct pci_dev *pdev)
* under the control of the driver.
*/
if (!(adapter->flags & FLAG_HAS_AMT))
- e1000_get_hw_control(adapter);
+ e1000e_get_hw_control(adapter);
return 0;
}
@@ -5452,7 +5533,7 @@ static int e1000_runtime_resume(struct device *dev)
return __e1000_resume(pdev);
}
#endif /* CONFIG_PM_RUNTIME */
-#endif /* CONFIG_PM_OPS */
+#endif /* CONFIG_PM */
static void e1000_shutdown(struct pci_dev *pdev)
{
@@ -5465,6 +5546,37 @@ static void e1000_shutdown(struct pci_dev *pdev)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
+
+static irqreturn_t e1000_intr_msix(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->msix_entries) {
+ int vector, msix_irq;
+
+ vector = 0;
+ msix_irq = adapter->msix_entries[vector].vector;
+ disable_irq(msix_irq);
+ e1000_intr_msix_rx(msix_irq, netdev);
+ enable_irq(msix_irq);
+
+ vector++;
+ msix_irq = adapter->msix_entries[vector].vector;
+ disable_irq(msix_irq);
+ e1000_intr_msix_tx(msix_irq, netdev);
+ enable_irq(msix_irq);
+
+ vector++;
+ msix_irq = adapter->msix_entries[vector].vector;
+ disable_irq(msix_irq);
+ e1000_msix_other(msix_irq, netdev);
+ enable_irq(msix_irq);
+ }
+
+ return IRQ_HANDLED;
+}
+
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
@@ -5474,10 +5586,21 @@ static void e1000_netpoll(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- disable_irq(adapter->pdev->irq);
- e1000_intr(adapter->pdev->irq, netdev);
-
- enable_irq(adapter->pdev->irq);
+ switch (adapter->int_mode) {
+ case E1000E_INT_MODE_MSIX:
+ e1000_intr_msix(adapter->pdev->irq, netdev);
+ break;
+ case E1000E_INT_MODE_MSI:
+ disable_irq(adapter->pdev->irq);
+ e1000_intr_msi(adapter->pdev->irq, netdev);
+ enable_irq(adapter->pdev->irq);
+ break;
+ default: /* E1000E_INT_MODE_LEGACY */
+ disable_irq(adapter->pdev->irq);
+ e1000_intr(adapter->pdev->irq, netdev);
+ enable_irq(adapter->pdev->irq);
+ break;
+ }
}
#endif
@@ -5579,7 +5702,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
* under the control of the driver.
*/
if (!(adapter->flags & FLAG_HAS_AMT))
- e1000_get_hw_control(adapter);
+ e1000e_get_hw_control(adapter);
}
@@ -5587,7 +5710,8 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- u32 pba_num;
+ u32 ret_val;
+ u8 pba_str[E1000_PBANUM_LENGTH];
/* print bus type/speed/width info */
e_info("(PCI Express:2.5GB/s:%s) %pM\n",
@@ -5598,9 +5722,12 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
netdev->dev_addr);
e_info("Intel(R) PRO/%s Network Connection\n",
(hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
- e1000e_read_pba_num(hw, &pba_num);
- e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
- hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff));
+ ret_val = e1000_read_pba_string_generic(hw, pba_str,
+ E1000_PBANUM_LENGTH);
+ if (ret_val)
+ strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1);
+ e_info("MAC: %d, PHY: %d, PBA No: %s\n",
+ hw->mac.type, hw->phy.type, pba_str);
}
static void e1000_eeprom_checks(struct e1000_adapter *adapter)
@@ -5624,7 +5751,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
.ndo_open = e1000_open,
.ndo_stop = e1000_close,
.ndo_start_xmit = e1000_xmit_frame,
- .ndo_get_stats = e1000_get_stats,
+ .ndo_get_stats64 = e1000e_get_stats64,
.ndo_set_multicast_list = e1000_set_multi,
.ndo_set_mac_address = e1000_set_mac,
.ndo_change_mtu = e1000_change_mtu,
@@ -5864,6 +5991,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
+ INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task);
/* Initialize link parameters. User can change them with ethtool */
adapter->hw.mac.autoneg = 1;
@@ -5884,7 +6012,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* APME bit in EEPROM is mapped to WUC.APME */
eeprom_data = er32(WUC);
eeprom_apme_mask = E1000_WUC_APME;
- if (eeprom_data & E1000_WUC_PHY_WAKE)
+ if ((hw->mac.type > e1000_ich10lan) &&
+ (eeprom_data & E1000_WUC_PHY_WAKE))
adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
@@ -5924,9 +6053,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
* under the control of the driver.
*/
if (!(adapter->flags & FLAG_HAS_AMT))
- e1000_get_hw_control(adapter);
+ e1000e_get_hw_control(adapter);
- strcpy(netdev->name, "eth%d");
+ strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1);
err = register_netdev(netdev);
if (err)
goto err_register;
@@ -5943,12 +6072,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
err_register:
if (!(adapter->flags & FLAG_HAS_AMT))
- e1000_release_hw_control(adapter);
+ e1000e_release_hw_control(adapter);
err_eeprom:
if (!e1000_check_reset_block(&adapter->hw))
e1000_phy_hw_reset(&adapter->hw);
err_hw_init:
-
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
err_sw_init:
@@ -5984,8 +6112,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
bool down = test_bit(__E1000_DOWN, &adapter->state);
/*
- * flush_scheduled work may reschedule our watchdog task, so
- * explicitly disable watchdog tasks from being rescheduled
+ * The timers may be rescheduled, so explicitly disable them
+ * from being rescheduled.
*/
if (!down)
set_bit(__E1000_DOWN, &adapter->state);
@@ -5996,8 +6124,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->downshift_task);
cancel_work_sync(&adapter->update_phy_task);
+ cancel_work_sync(&adapter->led_blink_task);
cancel_work_sync(&adapter->print_hang_task);
- flush_scheduled_work();
if (!(netdev->flags & IFF_UP))
e1000_power_down_phy(adapter);
@@ -6014,7 +6142,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
- e1000_release_hw_control(adapter);
+ e1000e_release_hw_control(adapter);
e1000e_reset_interrupt_capability(adapter);
kfree(adapter->tx_ring);
@@ -6113,7 +6241,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
};
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
static const struct dev_pm_ops e1000_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
@@ -6127,7 +6255,7 @@ static struct pci_driver e1000_driver = {
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
.remove = __devexit_p(e1000_remove),
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
.driver.pm = &e1000_pm_ops,
#endif
.shutdown = e1000_shutdown,
@@ -6145,7 +6273,7 @@ static int __init e1000_init_module(void)
int ret;
pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
e1000e_driver_version);
- pr_info("Copyright (c) 1999 - 2010 Intel Corporation.\n");
+ pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n");
ret = pci_register_driver(&e1000_driver);
return ret;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index 3d36911f77f3..4dd9b63273f6 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -62,10 +62,9 @@ MODULE_PARM_DESC(copybreak,
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
-
/*
* Transmit Interrupt Delay in units of 1.024 microseconds
- * Tx interrupt delay needs to typically be set to something non zero
+ * Tx interrupt delay needs to typically be set to something non-zero
*
* Valid Range: 0-65535
*/
@@ -112,6 +111,7 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
#define DEFAULT_ITR 3
#define MAX_ITR 100000
#define MIN_ITR 100
+
/* IntMode (Interrupt Mode)
*
* Valid Range: 0 - 2
@@ -421,7 +421,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
static const struct e1000_option opt = {
.type = enable_option,
.name = "CRC Stripping",
- .err = "defaulting to enabled",
+ .err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 3d3dc0c82355..6ae31fcfb629 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -42,20 +42,20 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
u16 *data, bool read);
/* Cable length tables */
-static const u16 e1000_m88_cable_length_table[] =
- { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
+static const u16 e1000_m88_cable_length_table[] = {
+ 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
ARRAY_SIZE(e1000_m88_cable_length_table)
-static const u16 e1000_igp_2_cable_length_table[] =
- { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3,
- 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22,
- 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40,
- 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61,
- 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
- 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
- 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
- 124};
+static const u16 e1000_igp_2_cable_length_table[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3,
+ 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22,
+ 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40,
+ 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61,
+ 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
+ 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
+ 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
+ 124};
#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
ARRAY_SIZE(e1000_igp_2_cable_length_table)
@@ -226,6 +226,13 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
}
*data = (u16) mdic;
+ /*
+ * Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ udelay(100);
+
return 0;
}
@@ -279,6 +286,13 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
return -E1000_ERR_PHY;
}
+ /*
+ * Allow some time after each MDIC transaction to avoid
+ * reading duplicate data in the next MDIC transaction.
+ */
+ if (hw->mac.type == e1000_pch2lan)
+ udelay(100);
+
return 0;
}
@@ -623,12 +637,11 @@ s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
**/
s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
{
- struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 phy_data;
- /* Enable CRS on TX. This must be set for half-duplex operation. */
- ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data);
+ /* Enable CRS on Tx. This must be set for half-duplex operation. */
+ ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data);
if (ret_val)
goto out;
@@ -637,7 +650,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
/* Enable downshift */
phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
- ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data);
+ ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data);
out:
return ret_val;
@@ -760,16 +773,14 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
}
if (phy->type == e1000_phy_82578) {
- ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
- &phy_data);
+ ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
return ret_val;
/* 82578 PHY - set the downshift count to 1x. */
phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
- ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
- phy_data);
+ ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
if (ret_val)
return ret_val;
}
@@ -1043,9 +1054,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
- if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+ if (phy->autoneg_mask & ADVERTISE_1000_FULL)
ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
- }
return ret_val;
}
@@ -1306,9 +1316,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
* We didn't get link.
* Reset the DSP and cross our fingers.
*/
- ret_val = e1e_wphy(hw,
- M88E1000_PHY_PAGE_SELECT,
- 0x001d);
+ ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
+ 0x001d);
if (ret_val)
return ret_val;
ret_val = e1000e_phy_reset_dsp(hw);
@@ -1840,11 +1849,12 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
u16 phy_data, i, agc_value = 0;
u16 cur_agc_index, max_agc_index = 0;
u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
- u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
- {IGP02E1000_PHY_AGC_A,
- IGP02E1000_PHY_AGC_B,
- IGP02E1000_PHY_AGC_C,
- IGP02E1000_PHY_AGC_D};
+ static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
+ IGP02E1000_PHY_AGC_A,
+ IGP02E1000_PHY_AGC_B,
+ IGP02E1000_PHY_AGC_C,
+ IGP02E1000_PHY_AGC_D
+ };
/* Read the AGC registers for all channels */
for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
@@ -2399,9 +2409,7 @@ static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg)
s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
{
s32 ret_val;
- u32 page_select = 0;
u32 page = offset >> IGP_PAGE_SHIFT;
- u32 page_shift = 0;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
@@ -2417,6 +2425,8 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ u32 page_shift, page_select;
+
/*
* Page select is register 31 for phy address 1 and 22 for
* phy address 2 and 3. Page select is shifted only for
@@ -2458,9 +2468,7 @@ out:
s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
{
s32 ret_val;
- u32 page_select = 0;
u32 page = offset >> IGP_PAGE_SHIFT;
- u32 page_shift = 0;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
@@ -2476,6 +2484,8 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ u32 page_shift, page_select;
+
/*
* Page select is register 31 for phy address 1 and 22 for
* phy address 2 and 3. Page select is shifted only for
@@ -2976,7 +2986,7 @@ s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data)
}
/**
- * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page
+ * e1000_get_phy_addr_for_hv_page - Get PHY address based on page
* @page: page to be accessed
**/
static u32 e1000_get_phy_addr_for_hv_page(u32 page)
@@ -3057,12 +3067,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
goto out;
/* Do not apply workaround if in PHY loopback bit 14 set */
- hw->phy.ops.read_reg(hw, PHY_CONTROL, &data);
+ e1e_rphy(hw, PHY_CONTROL, &data);
if (data & PHY_CONTROL_LB)
goto out;
/* check if link is up and at 1Gbps */
- ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data);
+ ret_val = e1e_rphy(hw, BM_CS_STATUS, &data);
if (ret_val)
goto out;
@@ -3078,14 +3088,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
mdelay(200);
/* flush the packets in the fifo buffer */
- ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
- HV_MUX_DATA_CTRL_GEN_TO_MAC |
- HV_MUX_DATA_CTRL_FORCE_SPEED);
+ ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC |
+ HV_MUX_DATA_CTRL_FORCE_SPEED);
if (ret_val)
goto out;
- ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
- HV_MUX_DATA_CTRL_GEN_TO_MAC);
+ ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC);
out:
return ret_val;
@@ -3105,7 +3113,7 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
s32 ret_val;
u16 data;
- ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+ ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
if (!ret_val)
phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
@@ -3128,13 +3136,13 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
u16 phy_data;
bool link;
- ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+ ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
if (ret_val)
goto out;
e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
- ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+ ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
if (ret_val)
goto out;
@@ -3198,7 +3206,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
if (ret_val)
goto out;
- ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+ ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
if (ret_val)
goto out;
@@ -3210,7 +3218,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
if (ret_val)
goto out;
- ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+ ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
if (ret_val)
goto out;
@@ -3244,7 +3252,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data, length;
- ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
+ ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data);
if (ret_val)
goto out;
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c
index 06e72fbef862..94ec973b2bdc 100644
--- a/drivers/net/e2100.c
+++ b/drivers/net/e2100.c
@@ -216,7 +216,7 @@ static int __init e21_probe1(struct net_device *dev, int ioaddr)
printk(" %02X", station_addr[i]);
if (dev->irq < 2) {
- int irqlist[] = {15, 11, 10, 12, 5, 9, 3, 4};
+ static const int irqlist[] = {15, 11, 10, 12, 5, 9, 3, 4};
for (i = 0; i < ARRAY_SIZE(irqlist); i++)
if (request_irq (irqlist[i], NULL, 0, "bogus", NULL) != -EBUSY) {
dev->irq = irqlist[i];
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 7c826319ee5a..eb35951a2442 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -302,7 +302,7 @@ struct eepro_local {
#define ee_id_eepro10p0 0x10 /* ID for eepro/10+ */
#define ee_id_eepro10p1 0x31
-#define TX_TIMEOUT 40
+#define TX_TIMEOUT ((4*HZ)/10)
/* Index to functions, as function prototypes. */
@@ -891,12 +891,13 @@ err:
there is non-reboot way to recover if something goes wrong.
*/
-static char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1};
-static char irqrmap2[] = {-1,-1,4,0,1,2,-1,3,-1,4,5,6,7,-1,-1,-1};
+static const char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1};
+static const char irqrmap2[] = {-1,-1,4,0,1,2,-1,3,-1,4,5,6,7,-1,-1,-1};
static int eepro_grab_irq(struct net_device *dev)
{
- int irqlist[] = { 3, 4, 5, 7, 9, 10, 11, 12, 0 };
- int *irqp = irqlist, temp_reg, ioaddr = dev->base_addr;
+ static const int irqlist[] = { 3, 4, 5, 7, 9, 10, 11, 12, 0 };
+ const int *irqp = irqlist;
+ int temp_reg, ioaddr = dev->base_addr;
eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */
@@ -1760,7 +1761,7 @@ module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param_array(mem, int, NULL, 0);
module_param(autodetect, int, 0);
-MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base addres(es)");
+MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base address(es)");
MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)");
MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)");
MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)");
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 12c37d264108..48ee51bb9e50 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -1103,7 +1103,7 @@ static int __init eexp_hw_probe(struct net_device *dev, unsigned short ioaddr)
dev->dev_addr[i] = ((unsigned char *)hw_addr)[5-i];
{
- static char irqmap[]={0, 9, 3, 4, 5, 10, 11, 0};
+ static const char irqmap[] = { 0, 9, 3, 4, 5, 10, 11, 0 };
unsigned short setupval = eexp_hw_readeeprom(ioaddr,0);
/* Use the IRQ from EEPROM if none was given */
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 8e745e74828d..6c7257bd73fc 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0106"
+#define DRV_VERSION "EHEA_0107"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
@@ -130,19 +130,6 @@
/* utility functions */
-#define ehea_info(fmt, args...) \
- printk(KERN_INFO DRV_NAME ": " fmt "\n", ## args)
-
-#define ehea_error(fmt, args...) \
- printk(KERN_ERR DRV_NAME ": Error in %s: " fmt "\n", __func__, ## args)
-
-#ifdef DEBUG
-#define ehea_debug(fmt, args...) \
- printk(KERN_DEBUG DRV_NAME ": " fmt, ## args)
-#else
-#define ehea_debug(fmt, args...) do {} while (0)
-#endif
-
void ehea_dump(void *adr, int len, char *msg);
#define EHEA_BMASK(pos, length) (((pos) << 16) + (length))
@@ -515,6 +502,4 @@ void ehea_set_ethtool_ops(struct net_device *netdev);
int ehea_sense_port_attr(struct ehea_port *port);
int ehea_set_portspeed(struct ehea_port *port, u32 port_speed);
-extern struct work_struct ehea_rereg_mr_task;
-
#endif /* __EHEA_H__ */
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c
index 75b099ce49c9..3e2e734fecb7 100644
--- a/drivers/net/ehea/ehea_ethtool.c
+++ b/drivers/net/ehea/ehea_ethtool.c
@@ -26,6 +26,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ehea.h"
#include "ehea_phyp.h"
@@ -118,10 +120,10 @@ doit:
ret = ehea_set_portspeed(port, sp);
if (!ret)
- ehea_info("%s: Port speed successfully set: %dMbps "
- "%s Duplex",
- port->netdev->name, port->port_speed,
- port->full_duplex == 1 ? "Full" : "Half");
+ netdev_info(dev,
+ "Port speed successfully set: %dMbps %s Duplex\n",
+ port->port_speed,
+ port->full_duplex == 1 ? "Full" : "Half");
out:
return ret;
}
@@ -134,10 +136,10 @@ static int ehea_nway_reset(struct net_device *dev)
ret = ehea_set_portspeed(port, EHEA_SPEED_AUTONEG);
if (!ret)
- ehea_info("%s: Port speed successfully set: %dMbps "
- "%s Duplex",
- port->netdev->name, port->port_speed,
- port->full_duplex == 1 ? "Full" : "Half");
+ netdev_info(port->netdev,
+ "Port speed successfully set: %dMbps %s Duplex\n",
+ port->port_speed,
+ port->full_duplex == 1 ? "Full" : "Half");
return ret;
}
@@ -261,6 +263,20 @@ static void ehea_get_ethtool_stats(struct net_device *dev,
}
+static int ehea_set_flags(struct net_device *dev, u32 data)
+{
+ /* Avoid changing the VLAN flags */
+ if ((data & (ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN)) !=
+ (ethtool_op_get_flags(dev) & (ETH_FLAG_RXVLAN |
+ ETH_FLAG_TXVLAN))){
+ return -EINVAL;
+ }
+
+ return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO
+ | ETH_FLAG_TXVLAN
+ | ETH_FLAG_RXVLAN);
+}
+
const struct ethtool_ops ehea_ethtool_ops = {
.get_settings = ehea_get_settings,
.get_drvinfo = ehea_get_drvinfo,
@@ -273,6 +289,8 @@ const struct ethtool_ops ehea_ethtool_ops = {
.get_ethtool_stats = ehea_get_ethtool_stats,
.get_rx_csum = ehea_get_rx_csum,
.set_settings = ehea_set_settings,
+ .get_flags = ethtool_op_get_flags,
+ .set_flags = ehea_set_flags,
.nway_reset = ehea_nway_reset, /* Restart autonegotiation */
};
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 3d0af08483a1..f75d3144b8a5 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -26,6 +26,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
@@ -101,7 +103,6 @@ MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
static int port_name_cnt;
static LIST_HEAD(adapter_list);
static unsigned long ehea_driver_flags;
-struct work_struct ehea_rereg_mr_task;
static DEFINE_MUTEX(dlpar_mem_lock);
struct ehea_fw_handle_array ehea_fw_handles;
struct ehea_bcmc_reg_array ehea_bcmc_regs;
@@ -136,8 +137,8 @@ void ehea_dump(void *adr, int len, char *msg)
int x;
unsigned char *deb = adr;
for (x = 0; x < len; x += 16) {
- printk(DRV_NAME " %s adr=%p ofs=%04x %016llx %016llx\n", msg,
- deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
+ pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
+ msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
deb += 16;
}
}
@@ -337,7 +338,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
cb2 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb2) {
- ehea_error("no mem for cb2");
+ netdev_err(dev, "no mem for cb2\n");
goto out;
}
@@ -345,7 +346,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
port->logical_port_id,
H_PORT_CB2, H_PORT_CB2_ALL, cb2);
if (hret != H_SUCCESS) {
- ehea_error("query_ehea_port failed");
+ netdev_err(dev, "query_ehea_port failed\n");
goto out_herr;
}
@@ -400,7 +401,7 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
skb_arr_rq1[index] = netdev_alloc_skb(dev,
EHEA_L_PKT_SIZE);
if (!skb_arr_rq1[index]) {
- ehea_info("Unable to allocate enough skb in the array\n");
+ netdev_info(dev, "Unable to allocate enough skb in the array\n");
pr->rq1_skba.os_skbs = fill_wqes - i;
break;
}
@@ -424,19 +425,19 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
int i;
if (nr_rq1a > pr->rq1_skba.len) {
- ehea_error("NR_RQ1A bigger than skb array len\n");
+ netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
return;
}
for (i = 0; i < nr_rq1a; i++) {
skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
if (!skb_arr_rq1[i]) {
- ehea_info("No enough memory to allocate skb array\n");
+ netdev_info(dev, "Not enough memory to allocate skb array\n");
break;
}
}
/* Ring doorbell */
- ehea_update_rq1a(pr->qp, i);
+ ehea_update_rq1a(pr->qp, i - 1);
}
static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@ -469,8 +470,9 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
if (!skb) {
q_skba->os_skbs = fill_wqes - i;
if (q_skba->os_skbs == q_skba->len - 2) {
- ehea_info("%s: rq%i ran dry - no mem for skb",
- pr->port->netdev->name, rq_nr);
+ netdev_info(pr->port->netdev,
+ "rq%i ran dry - no mem for skb\n",
+ rq_nr);
ret = -ENOMEM;
}
break;
@@ -635,8 +637,8 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
if (netif_msg_rx_err(pr->port)) {
- ehea_error("Critical receive error for QP %d. "
- "Resetting port.", pr->qp->init_attr.qp_nr);
+ pr_err("Critical receive error for QP %d. Resetting port.\n",
+ pr->qp->init_attr.qp_nr);
ehea_dump(cqe, sizeof(*cqe), "CQE");
}
ehea_schedule_port_reset(pr->port);
@@ -683,7 +685,7 @@ static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) &&
pr->port->vgrp);
- if (use_lro) {
+ if (skb->dev->features & NETIF_F_LRO) {
if (vlan_extracted)
lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
pr->port->vgrp,
@@ -738,13 +740,13 @@ static int ehea_proc_rwqes(struct net_device *dev,
skb_arr_rq1_len,
wqe_index);
if (unlikely(!skb)) {
- if (netif_msg_rx_err(port))
- ehea_error("LL rq1: skb=NULL");
+ netif_info(port, rx_err, dev,
+ "LL rq1: skb=NULL\n");
skb = netdev_alloc_skb(dev,
EHEA_L_PKT_SIZE);
if (!skb) {
- ehea_info("Not enough memory to allocate skb\n");
+ netdev_err(dev, "Not enough memory to allocate skb\n");
break;
}
}
@@ -756,8 +758,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
skb = get_skb_by_index(skb_arr_rq2,
skb_arr_rq2_len, cqe);
if (unlikely(!skb)) {
- if (netif_msg_rx_err(port))
- ehea_error("rq2: skb=NULL");
+ netif_err(port, rx_err, dev,
+ "rq2: skb=NULL\n");
break;
}
ehea_fill_skb(dev, skb, cqe);
@@ -767,8 +769,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
skb = get_skb_by_index(skb_arr_rq3,
skb_arr_rq3_len, cqe);
if (unlikely(!skb)) {
- if (netif_msg_rx_err(port))
- ehea_error("rq3: skb=NULL");
+ netif_err(port, rx_err, dev,
+ "rq3: skb=NULL\n");
break;
}
ehea_fill_skb(dev, skb, cqe);
@@ -787,7 +789,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
}
cqe = ehea_poll_rq1(qp, &wqe_index);
}
- if (use_lro)
+ if (dev->features & NETIF_F_LRO)
lro_flush_all(&pr->lro_mgr);
pr->rx_packets += processed;
@@ -840,7 +842,7 @@ static void check_sqs(struct ehea_port *port)
msecs_to_jiffies(100));
if (!ret) {
- ehea_error("HW/SW queues out of sync");
+ pr_err("HW/SW queues out of sync\n");
ehea_schedule_port_reset(pr->port);
return;
}
@@ -873,14 +875,14 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
}
if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
- ehea_error("Bad send completion status=0x%04X",
- cqe->status);
+ pr_err("Bad send completion status=0x%04X\n",
+ cqe->status);
if (netif_msg_tx_err(pr->port))
ehea_dump(cqe, sizeof(*cqe), "Send CQE");
if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
- ehea_error("Resetting port");
+ pr_err("Resetting port\n");
ehea_schedule_port_reset(pr->port);
break;
}
@@ -998,8 +1000,8 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
while (eqe) {
qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
- ehea_error("QP aff_err: entry=0x%llx, token=0x%x",
- eqe->entry, qp_token);
+ pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
+ eqe->entry, qp_token);
qp = port->port_res[qp_token].qp;
@@ -1017,7 +1019,7 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
}
if (reset_port) {
- ehea_error("Resetting port");
+ pr_err("Resetting port\n");
ehea_schedule_port_reset(port);
}
@@ -1045,7 +1047,7 @@ int ehea_sense_port_attr(struct ehea_port *port)
/* may be called via ehea_neq_tasklet() */
cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
if (!cb0) {
- ehea_error("no mem for cb0");
+ pr_err("no mem for cb0\n");
ret = -ENOMEM;
goto out;
}
@@ -1137,7 +1139,7 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
cb4 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb4) {
- ehea_error("no mem for cb4");
+ pr_err("no mem for cb4\n");
ret = -ENOMEM;
goto out;
}
@@ -1188,16 +1190,16 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
break;
}
} else {
- ehea_error("Failed sensing port speed");
+ pr_err("Failed sensing port speed\n");
ret = -EIO;
}
} else {
if (hret == H_AUTHORITY) {
- ehea_info("Hypervisor denied setting port speed");
+ pr_info("Hypervisor denied setting port speed\n");
ret = -EPERM;
} else {
ret = -EIO;
- ehea_error("Failed setting port speed");
+ pr_err("Failed setting port speed\n");
}
}
if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
@@ -1214,80 +1216,78 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
u8 ec;
u8 portnum;
struct ehea_port *port;
+ struct net_device *dev;
ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
port = ehea_get_port(adapter, portnum);
+ dev = port->netdev;
switch (ec) {
case EHEA_EC_PORTSTATE_CHG: /* port state change */
if (!port) {
- ehea_error("unknown portnum %x", portnum);
+ netdev_err(dev, "unknown portnum %x\n", portnum);
break;
}
if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
- if (!netif_carrier_ok(port->netdev)) {
+ if (!netif_carrier_ok(dev)) {
ret = ehea_sense_port_attr(port);
if (ret) {
- ehea_error("failed resensing port "
- "attributes");
+ netdev_err(dev, "failed resensing port attributes\n");
break;
}
- if (netif_msg_link(port))
- ehea_info("%s: Logical port up: %dMbps "
- "%s Duplex",
- port->netdev->name,
- port->port_speed,
- port->full_duplex ==
- 1 ? "Full" : "Half");
+ netif_info(port, link, dev,
+ "Logical port up: %dMbps %s Duplex\n",
+ port->port_speed,
+ port->full_duplex == 1 ?
+ "Full" : "Half");
- netif_carrier_on(port->netdev);
- netif_wake_queue(port->netdev);
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
}
} else
- if (netif_carrier_ok(port->netdev)) {
- if (netif_msg_link(port))
- ehea_info("%s: Logical port down",
- port->netdev->name);
- netif_carrier_off(port->netdev);
- netif_stop_queue(port->netdev);
+ if (netif_carrier_ok(dev)) {
+ netif_info(port, link, dev,
+ "Logical port down\n");
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
}
if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
port->phy_link = EHEA_PHY_LINK_UP;
- if (netif_msg_link(port))
- ehea_info("%s: Physical port up",
- port->netdev->name);
+ netif_info(port, link, dev,
+ "Physical port up\n");
if (prop_carrier_state)
- netif_carrier_on(port->netdev);
+ netif_carrier_on(dev);
} else {
port->phy_link = EHEA_PHY_LINK_DOWN;
- if (netif_msg_link(port))
- ehea_info("%s: Physical port down",
- port->netdev->name);
+ netif_info(port, link, dev,
+ "Physical port down\n");
if (prop_carrier_state)
- netif_carrier_off(port->netdev);
+ netif_carrier_off(dev);
}
if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
- ehea_info("External switch port is primary port");
+ netdev_info(dev,
+ "External switch port is primary port\n");
else
- ehea_info("External switch port is backup port");
+ netdev_info(dev,
+ "External switch port is backup port\n");
break;
case EHEA_EC_ADAPTER_MALFUNC:
- ehea_error("Adapter malfunction");
+ netdev_err(dev, "Adapter malfunction\n");
break;
case EHEA_EC_PORT_MALFUNC:
- ehea_info("Port malfunction: Device: %s", port->netdev->name);
- netif_carrier_off(port->netdev);
- netif_stop_queue(port->netdev);
+ netdev_info(dev, "Port malfunction\n");
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
break;
default:
- ehea_error("unknown event code %x, eqe=0x%llX", ec, eqe);
+ netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
break;
}
}
@@ -1299,13 +1299,13 @@ static void ehea_neq_tasklet(unsigned long data)
u64 event_mask;
eqe = ehea_poll_eq(adapter->neq);
- ehea_debug("eqe=%p", eqe);
+ pr_debug("eqe=%p\n", eqe);
while (eqe) {
- ehea_debug("*eqe=%lx", eqe->entry);
+ pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
ehea_parse_eqe(adapter, eqe->entry);
eqe = ehea_poll_eq(adapter->neq);
- ehea_debug("next eqe=%p", eqe);
+ pr_debug("next eqe=%p\n", eqe);
}
event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
@@ -1329,9 +1329,7 @@ static int ehea_fill_port_res(struct ehea_port_res *pr)
int ret;
struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
- ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
- - init_attr->act_nr_rwqes_rq2
- - init_attr->act_nr_rwqes_rq3 - 1);
+ ehea_init_fill_rq1(pr, pr->rq1_skba.len);
ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
@@ -1354,14 +1352,14 @@ static int ehea_reg_interrupts(struct net_device *dev)
ehea_qp_aff_irq_handler,
IRQF_DISABLED, port->int_aff_name, port);
if (ret) {
- ehea_error("failed registering irq for qp_aff_irq_handler:"
- "ist=%X", port->qp_eq->attr.ist1);
+ netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
+ port->qp_eq->attr.ist1);
goto out_free_qpeq;
}
- if (netif_msg_ifup(port))
- ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
- "registered", port->qp_eq->attr.ist1);
+ netif_info(port, ifup, dev,
+ "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
+ port->qp_eq->attr.ist1);
for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
@@ -1373,14 +1371,13 @@ static int ehea_reg_interrupts(struct net_device *dev)
IRQF_DISABLED, pr->int_send_name,
pr);
if (ret) {
- ehea_error("failed registering irq for ehea_queue "
- "port_res_nr:%d, ist=%X", i,
- pr->eq->attr.ist1);
+ netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
+ i, pr->eq->attr.ist1);
goto out_free_req;
}
- if (netif_msg_ifup(port))
- ehea_info("irq_handle 0x%X for function ehea_queue_int "
- "%d registered", pr->eq->attr.ist1, i);
+ netif_info(port, ifup, dev,
+ "irq_handle 0x%X for function ehea_queue_int %d registered\n",
+ pr->eq->attr.ist1, i);
}
out:
return ret;
@@ -1411,16 +1408,16 @@ static void ehea_free_interrupts(struct net_device *dev)
for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
pr = &port->port_res[i];
ibmebus_free_irq(pr->eq->attr.ist1, pr);
- if (netif_msg_intr(port))
- ehea_info("free send irq for res %d with handle 0x%X",
- i, pr->eq->attr.ist1);
+ netif_info(port, intr, dev,
+ "free send irq for res %d with handle 0x%X\n",
+ i, pr->eq->attr.ist1);
}
/* associated events */
ibmebus_free_irq(port->qp_eq->attr.ist1, port);
- if (netif_msg_intr(port))
- ehea_info("associated event interrupt for handle 0x%X freed",
- port->qp_eq->attr.ist1);
+ netif_info(port, intr, dev,
+ "associated event interrupt for handle 0x%X freed\n",
+ port->qp_eq->attr.ist1);
}
static int ehea_configure_port(struct ehea_port *port)
@@ -1489,7 +1486,7 @@ int ehea_gen_smrs(struct ehea_port_res *pr)
out_free:
ehea_rem_mr(&pr->send_mr);
out:
- ehea_error("Generating SMRS failed\n");
+ pr_err("Generating SMRS failed\n");
return -EIO;
}
@@ -1506,12 +1503,10 @@ static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
{
int arr_size = sizeof(void *) * max_q_entries;
- q_skba->arr = vmalloc(arr_size);
+ q_skba->arr = vzalloc(arr_size);
if (!q_skba->arr)
return -ENOMEM;
- memset(q_skba->arr, 0, arr_size);
-
q_skba->len = max_q_entries;
q_skba->index = 0;
q_skba->os_skbs = 0;
@@ -1546,7 +1541,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
if (!pr->eq) {
- ehea_error("create_eq failed (eq)");
+ pr_err("create_eq failed (eq)\n");
goto out_free;
}
@@ -1554,7 +1549,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
pr->eq->fw_handle,
port->logical_port_id);
if (!pr->recv_cq) {
- ehea_error("create_cq failed (cq_recv)");
+ pr_err("create_cq failed (cq_recv)\n");
goto out_free;
}
@@ -1562,19 +1557,19 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
pr->eq->fw_handle,
port->logical_port_id);
if (!pr->send_cq) {
- ehea_error("create_cq failed (cq_send)");
+ pr_err("create_cq failed (cq_send)\n");
goto out_free;
}
if (netif_msg_ifup(port))
- ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
- pr->send_cq->attr.act_nr_of_cqes,
- pr->recv_cq->attr.act_nr_of_cqes);
+ pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
+ pr->send_cq->attr.act_nr_of_cqes,
+ pr->recv_cq->attr.act_nr_of_cqes);
init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
if (!init_attr) {
ret = -ENOMEM;
- ehea_error("no mem for ehea_qp_init_attr");
+ pr_err("no mem for ehea_qp_init_attr\n");
goto out_free;
}
@@ -1599,18 +1594,18 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
if (!pr->qp) {
- ehea_error("create_qp failed");
+ pr_err("create_qp failed\n");
ret = -EIO;
goto out_free;
}
if (netif_msg_ifup(port))
- ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
- "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr,
- init_attr->act_nr_send_wqes,
- init_attr->act_nr_rwqes_rq1,
- init_attr->act_nr_rwqes_rq2,
- init_attr->act_nr_rwqes_rq3);
+ pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
+ init_attr->qp_nr,
+ init_attr->act_nr_send_wqes,
+ init_attr->act_nr_rwqes_rq1,
+ init_attr->act_nr_rwqes_rq2,
+ init_attr->act_nr_rwqes_rq3);
pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
@@ -1761,7 +1756,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
swqe->descriptors++;
}
} else
- ehea_error("cannot handle fragmented headers");
+ pr_err("cannot handle fragmented headers\n");
}
static void write_swqe2_nonTSO(struct sk_buff *skb,
@@ -1857,8 +1852,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
port->logical_port_id,
reg_type, port->mac_addr, 0, hcallid);
if (hret != H_SUCCESS) {
- ehea_error("%sregistering bc address failed (tagged)",
- hcallid == H_REG_BCMC ? "" : "de");
+ pr_err("%sregistering bc address failed (tagged)\n",
+ hcallid == H_REG_BCMC ? "" : "de");
ret = -EIO;
goto out_herr;
}
@@ -1869,8 +1864,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
port->logical_port_id,
reg_type, port->mac_addr, 0, hcallid);
if (hret != H_SUCCESS) {
- ehea_error("%sregistering bc address failed (vlan)",
- hcallid == H_REG_BCMC ? "" : "de");
+ pr_err("%sregistering bc address failed (vlan)\n",
+ hcallid == H_REG_BCMC ? "" : "de");
ret = -EIO;
}
out_herr:
@@ -1892,7 +1887,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
cb0 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb0) {
- ehea_error("no mem for cb0");
+ pr_err("no mem for cb0\n");
ret = -ENOMEM;
goto out;
}
@@ -1940,11 +1935,11 @@ out:
static void ehea_promiscuous_error(u64 hret, int enable)
{
if (hret == H_AUTHORITY)
- ehea_info("Hypervisor denied %sabling promiscuous mode",
- enable == 1 ? "en" : "dis");
+ pr_info("Hypervisor denied %sabling promiscuous mode\n",
+ enable == 1 ? "en" : "dis");
else
- ehea_error("failed %sabling promiscuous mode",
- enable == 1 ? "en" : "dis");
+ pr_err("failed %sabling promiscuous mode\n",
+ enable == 1 ? "en" : "dis");
}
static void ehea_promiscuous(struct net_device *dev, int enable)
@@ -1958,7 +1953,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable)
cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
if (!cb7) {
- ehea_error("no mem for cb7");
+ pr_err("no mem for cb7\n");
goto out;
}
@@ -2018,7 +2013,7 @@ static int ehea_drop_multicast_list(struct net_device *dev)
hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
H_DEREG_BCMC);
if (hret) {
- ehea_error("failed deregistering mcast MAC");
+ pr_err("failed deregistering mcast MAC\n");
ret = -EIO;
}
@@ -2041,7 +2036,8 @@ static void ehea_allmulti(struct net_device *dev, int enable)
if (!hret)
port->allmulti = 1;
else
- ehea_error("failed enabling IFF_ALLMULTI");
+ netdev_err(dev,
+ "failed enabling IFF_ALLMULTI\n");
}
} else
if (!enable) {
@@ -2050,7 +2046,8 @@ static void ehea_allmulti(struct net_device *dev, int enable)
if (!hret)
port->allmulti = 0;
else
- ehea_error("failed disabling IFF_ALLMULTI");
+ netdev_err(dev,
+ "failed disabling IFF_ALLMULTI\n");
}
}
@@ -2061,7 +2058,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
if (!ehea_mcl_entry) {
- ehea_error("no mem for mcl_entry");
+ pr_err("no mem for mcl_entry\n");
return;
}
@@ -2074,7 +2071,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
if (!hret)
list_add(&ehea_mcl_entry->list, &port->mc_list->list);
else {
- ehea_error("failed registering mcast MAC");
+ pr_err("failed registering mcast MAC\n");
kfree(ehea_mcl_entry);
}
}
@@ -2107,9 +2104,8 @@ static void ehea_set_multicast_list(struct net_device *dev)
}
if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
- ehea_info("Mcast registration limit reached (0x%llx). "
- "Use ALLMULTI!",
- port->adapter->max_mc_mac);
+ pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
+ port->adapter->max_mc_mac);
goto out;
}
@@ -2315,10 +2311,10 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
pr->swqe_id_counter += 1;
- if (netif_msg_tx_queued(port)) {
- ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
+ netif_info(port, tx_queued, dev,
+ "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
+ if (netif_msg_tx_queued(port))
ehea_dump(swqe, 512, "swqe");
- }
if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
netif_stop_queue(dev);
@@ -2354,14 +2350,14 @@ static void ehea_vlan_rx_register(struct net_device *dev,
cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
- ehea_error("no mem for cb1");
+ pr_err("no mem for cb1\n");
goto out;
}
hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
if (hret != H_SUCCESS)
- ehea_error("modify_ehea_port failed");
+ pr_err("modify_ehea_port failed\n");
free_page((unsigned long)cb1);
out:
@@ -2378,14 +2374,14 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
- ehea_error("no mem for cb1");
+ pr_err("no mem for cb1\n");
goto out;
}
hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
if (hret != H_SUCCESS) {
- ehea_error("query_ehea_port failed");
+ pr_err("query_ehea_port failed\n");
goto out;
}
@@ -2395,7 +2391,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
if (hret != H_SUCCESS)
- ehea_error("modify_ehea_port failed");
+ pr_err("modify_ehea_port failed\n");
out:
free_page((unsigned long)cb1);
return;
@@ -2413,14 +2409,14 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
- ehea_error("no mem for cb1");
+ pr_err("no mem for cb1\n");
goto out;
}
hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
if (hret != H_SUCCESS) {
- ehea_error("query_ehea_port failed");
+ pr_err("query_ehea_port failed\n");
goto out;
}
@@ -2430,7 +2426,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
if (hret != H_SUCCESS)
- ehea_error("modify_ehea_port failed");
+ pr_err("modify_ehea_port failed\n");
out:
free_page((unsigned long)cb1);
}
@@ -2452,7 +2448,7 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
if (hret != H_SUCCESS) {
- ehea_error("query_ehea_qp failed (1)");
+ pr_err("query_ehea_qp failed (1)\n");
goto out;
}
@@ -2461,14 +2457,14 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
&dummy64, &dummy64, &dummy16, &dummy16);
if (hret != H_SUCCESS) {
- ehea_error("modify_ehea_qp failed (1)");
+ pr_err("modify_ehea_qp failed (1)\n");
goto out;
}
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
if (hret != H_SUCCESS) {
- ehea_error("query_ehea_qp failed (2)");
+ pr_err("query_ehea_qp failed (2)\n");
goto out;
}
@@ -2477,14 +2473,14 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
&dummy64, &dummy64, &dummy16, &dummy16);
if (hret != H_SUCCESS) {
- ehea_error("modify_ehea_qp failed (2)");
+ pr_err("modify_ehea_qp failed (2)\n");
goto out;
}
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
if (hret != H_SUCCESS) {
- ehea_error("query_ehea_qp failed (3)");
+ pr_err("query_ehea_qp failed (3)\n");
goto out;
}
@@ -2493,14 +2489,14 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
&dummy64, &dummy64, &dummy16, &dummy16);
if (hret != H_SUCCESS) {
- ehea_error("modify_ehea_qp failed (3)");
+ pr_err("modify_ehea_qp failed (3)\n");
goto out;
}
hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
if (hret != H_SUCCESS) {
- ehea_error("query_ehea_qp failed (4)");
+ pr_err("query_ehea_qp failed (4)\n");
goto out;
}
@@ -2521,7 +2517,7 @@ static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
EHEA_MAX_ENTRIES_EQ, 1);
if (!port->qp_eq) {
ret = -EINVAL;
- ehea_error("ehea_create_eq failed (qp_eq)");
+ pr_err("ehea_create_eq failed (qp_eq)\n");
goto out_kill_eq;
}
@@ -2602,27 +2598,27 @@ static int ehea_up(struct net_device *dev)
ret = ehea_port_res_setup(port, port->num_def_qps,
port->num_add_tx_qps);
if (ret) {
- ehea_error("port_res_failed");
+ netdev_err(dev, "port_res_failed\n");
goto out;
}
/* Set default QP for this port */
ret = ehea_configure_port(port);
if (ret) {
- ehea_error("ehea_configure_port failed. ret:%d", ret);
+ netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
goto out_clean_pr;
}
ret = ehea_reg_interrupts(dev);
if (ret) {
- ehea_error("reg_interrupts failed. ret:%d", ret);
+ netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
goto out_clean_pr;
}
for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
if (ret) {
- ehea_error("activate_qp failed");
+ netdev_err(dev, "activate_qp failed\n");
goto out_free_irqs;
}
}
@@ -2630,7 +2626,7 @@ static int ehea_up(struct net_device *dev)
for (i = 0; i < port->num_def_qps; i++) {
ret = ehea_fill_port_res(&port->port_res[i]);
if (ret) {
- ehea_error("out_free_irqs");
+ netdev_err(dev, "out_free_irqs\n");
goto out_free_irqs;
}
}
@@ -2653,7 +2649,7 @@ out_clean_pr:
ehea_clean_all_portres(port);
out:
if (ret)
- ehea_info("Failed starting %s. ret=%i", dev->name, ret);
+ netdev_info(dev, "Failed starting. ret=%i\n", ret);
ehea_update_bcmc_registrations();
ehea_update_firmware_handles();
@@ -2684,8 +2680,7 @@ static int ehea_open(struct net_device *dev)
mutex_lock(&port->port_lock);
- if (netif_msg_ifup(port))
- ehea_info("enabling port %s", dev->name);
+ netif_info(port, ifup, dev, "enabling port\n");
ret = ehea_up(dev);
if (!ret) {
@@ -2720,8 +2715,7 @@ static int ehea_down(struct net_device *dev)
ret = ehea_clean_all_portres(port);
if (ret)
- ehea_info("Failed freeing resources for %s. ret=%i",
- dev->name, ret);
+ netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
ehea_update_firmware_handles();
@@ -2733,8 +2727,7 @@ static int ehea_stop(struct net_device *dev)
int ret;
struct ehea_port *port = netdev_priv(dev);
- if (netif_msg_ifdown(port))
- ehea_info("disabling port %s", dev->name);
+ netif_info(port, ifdown, dev, "disabling port\n");
set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
cancel_work_sync(&port->reset_task);
@@ -2775,7 +2768,7 @@ static void ehea_flush_sq(struct ehea_port *port)
msecs_to_jiffies(100));
if (!ret) {
- ehea_error("WARNING: sq not flushed completely");
+ pr_err("WARNING: sq not flushed completely\n");
break;
}
}
@@ -2811,7 +2804,7 @@ int ehea_stop_qps(struct net_device *dev)
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
cb0);
if (hret != H_SUCCESS) {
- ehea_error("query_ehea_qp failed (1)");
+ pr_err("query_ehea_qp failed (1)\n");
goto out;
}
@@ -2823,7 +2816,7 @@ int ehea_stop_qps(struct net_device *dev)
1), cb0, &dummy64,
&dummy64, &dummy16, &dummy16);
if (hret != H_SUCCESS) {
- ehea_error("modify_ehea_qp failed (1)");
+ pr_err("modify_ehea_qp failed (1)\n");
goto out;
}
@@ -2831,14 +2824,14 @@ int ehea_stop_qps(struct net_device *dev)
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
cb0);
if (hret != H_SUCCESS) {
- ehea_error("query_ehea_qp failed (2)");
+ pr_err("query_ehea_qp failed (2)\n");
goto out;
}
/* deregister shared memory regions */
dret = ehea_rem_smrs(pr);
if (dret) {
- ehea_error("unreg shared memory region failed");
+ pr_err("unreg shared memory region failed\n");
goto out;
}
}
@@ -2907,7 +2900,7 @@ int ehea_restart_qps(struct net_device *dev)
ret = ehea_gen_smrs(pr);
if (ret) {
- ehea_error("creation of shared memory regions failed");
+ netdev_err(dev, "creation of shared memory regions failed\n");
goto out;
}
@@ -2918,7 +2911,7 @@ int ehea_restart_qps(struct net_device *dev)
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
cb0);
if (hret != H_SUCCESS) {
- ehea_error("query_ehea_qp failed (1)");
+ netdev_err(dev, "query_ehea_qp failed (1)\n");
goto out;
}
@@ -2930,7 +2923,7 @@ int ehea_restart_qps(struct net_device *dev)
1), cb0, &dummy64,
&dummy64, &dummy16, &dummy16);
if (hret != H_SUCCESS) {
- ehea_error("modify_ehea_qp failed (1)");
+ netdev_err(dev, "modify_ehea_qp failed (1)\n");
goto out;
}
@@ -2938,7 +2931,7 @@ int ehea_restart_qps(struct net_device *dev)
EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
cb0);
if (hret != H_SUCCESS) {
- ehea_error("query_ehea_qp failed (2)");
+ netdev_err(dev, "query_ehea_qp failed (2)\n");
goto out;
}
@@ -2975,8 +2968,7 @@ static void ehea_reset_port(struct work_struct *work)
ehea_set_multicast_list(dev);
- if (netif_msg_timer(port))
- ehea_info("Device %s resetted successfully", dev->name);
+ netif_info(port, timer, dev, "reset successful\n");
port_napi_enable(port);
@@ -2986,12 +2978,12 @@ out:
mutex_unlock(&dlpar_mem_lock);
}
-static void ehea_rereg_mrs(struct work_struct *work)
+static void ehea_rereg_mrs(void)
{
int ret, i;
struct ehea_adapter *adapter;
- ehea_info("LPAR memory changed - re-initializing driver");
+ pr_info("LPAR memory changed - re-initializing driver\n");
list_for_each_entry(adapter, &adapter_list, list)
if (adapter->active_ports) {
@@ -3023,8 +3015,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
/* Unregister old memory region */
ret = ehea_rem_mr(&adapter->mr);
if (ret) {
- ehea_error("unregister MR failed - driver"
- " inoperable!");
+ pr_err("unregister MR failed - driver inoperable!\n");
goto out;
}
}
@@ -3036,8 +3027,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
/* Register new memory region */
ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
if (ret) {
- ehea_error("register MR failed - driver"
- " inoperable!");
+ pr_err("register MR failed - driver inoperable!\n");
goto out;
}
@@ -3060,7 +3050,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
}
}
}
- ehea_info("re-initializing driver complete");
+ pr_info("re-initializing driver complete\n");
out:
return;
}
@@ -3113,7 +3103,7 @@ int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
/* (Try to) enable *jumbo frames */
cb4 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb4) {
- ehea_error("no mem for cb4");
+ pr_err("no mem for cb4\n");
ret = -ENOMEM;
goto out;
} else {
@@ -3175,13 +3165,13 @@ static struct device *ehea_register_port(struct ehea_port *port,
ret = of_device_register(&port->ofdev);
if (ret) {
- ehea_error("failed to register device. ret=%d", ret);
+ pr_err("failed to register device. ret=%d\n", ret);
goto out;
}
ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
if (ret) {
- ehea_error("failed to register attributes, ret=%d", ret);
+ pr_err("failed to register attributes, ret=%d\n", ret);
goto out_unreg_of_dev;
}
@@ -3231,7 +3221,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
dev = alloc_etherdev(sizeof(struct ehea_port));
if (!dev) {
- ehea_error("no mem for net_device");
+ pr_err("no mem for net_device\n");
ret = -ENOMEM;
goto out_err;
}
@@ -3278,11 +3268,14 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
| NETIF_F_LLTX;
dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
+ if (use_lro)
+ dev->features |= NETIF_F_LRO;
+
INIT_WORK(&port->reset_task, ehea_reset_port);
ret = register_netdev(dev);
if (ret) {
- ehea_error("register_netdev failed. ret=%d", ret);
+ pr_err("register_netdev failed. ret=%d\n", ret);
goto out_unreg_port;
}
@@ -3290,11 +3283,10 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
ret = ehea_get_jumboframe_status(port, &jumbo);
if (ret)
- ehea_error("failed determining jumbo frame status for %s",
- port->netdev->name);
+ netdev_err(dev, "failed determining jumbo frame status\n");
- ehea_info("%s: Jumbo frames are %sabled", dev->name,
- jumbo == 1 ? "en" : "dis");
+ netdev_info(dev, "Jumbo frames are %sabled\n",
+ jumbo == 1 ? "en" : "dis");
adapter->active_ports++;
@@ -3310,14 +3302,16 @@ out_free_ethdev:
free_netdev(dev);
out_err:
- ehea_error("setting up logical port with id=%d failed, ret=%d",
- logical_port_id, ret);
+ pr_err("setting up logical port with id=%d failed, ret=%d\n",
+ logical_port_id, ret);
return NULL;
}
static void ehea_shutdown_single_port(struct ehea_port *port)
{
struct ehea_adapter *adapter = port->adapter;
+
+ cancel_work_sync(&port->reset_task);
unregister_netdev(port->netdev);
ehea_unregister_port(port);
kfree(port->mc_list);
@@ -3339,13 +3333,13 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
NULL);
if (!dn_log_port_id) {
- ehea_error("bad device node: eth_dn name=%s",
- eth_dn->full_name);
+ pr_err("bad device node: eth_dn name=%s\n",
+ eth_dn->full_name);
continue;
}
if (ehea_add_adapter_mr(adapter)) {
- ehea_error("creating MR failed");
+ pr_err("creating MR failed\n");
of_node_put(eth_dn);
return -EIO;
}
@@ -3354,9 +3348,8 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
*dn_log_port_id,
eth_dn);
if (adapter->port[i])
- ehea_info("%s -> logical port id #%d",
- adapter->port[i]->netdev->name,
- *dn_log_port_id);
+ netdev_info(adapter->port[i]->netdev,
+ "logical port id #%d\n", *dn_log_port_id);
else
ehea_remove_adapter_mr(adapter);
@@ -3401,21 +3394,20 @@ static ssize_t ehea_probe_port(struct device *dev,
port = ehea_get_port(adapter, logical_port_id);
if (port) {
- ehea_info("adding port with logical port id=%d failed. port "
- "already configured as %s.", logical_port_id,
- port->netdev->name);
+ netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
+ logical_port_id);
return -EINVAL;
}
eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
if (!eth_dn) {
- ehea_info("no logical port with id %d found", logical_port_id);
+ pr_info("no logical port with id %d found\n", logical_port_id);
return -EINVAL;
}
if (ehea_add_adapter_mr(adapter)) {
- ehea_error("creating MR failed");
+ pr_err("creating MR failed\n");
return -EIO;
}
@@ -3430,8 +3422,8 @@ static ssize_t ehea_probe_port(struct device *dev,
break;
}
- ehea_info("added %s (logical port id=%d)", port->netdev->name,
- logical_port_id);
+ netdev_info(port->netdev, "added: (logical port id=%d)\n",
+ logical_port_id);
} else {
ehea_remove_adapter_mr(adapter);
return -EIO;
@@ -3454,8 +3446,8 @@ static ssize_t ehea_remove_port(struct device *dev,
port = ehea_get_port(adapter, logical_port_id);
if (port) {
- ehea_info("removed %s (logical port id=%d)", port->netdev->name,
- logical_port_id);
+ netdev_info(port->netdev, "removed: (logical port id=%d)\n",
+ logical_port_id);
ehea_shutdown_single_port(port);
@@ -3465,8 +3457,8 @@ static ssize_t ehea_remove_port(struct device *dev,
break;
}
} else {
- ehea_error("removing port with logical port id=%d failed. port "
- "not configured.", logical_port_id);
+ pr_err("removing port with logical port id=%d failed. port not configured.\n",
+ logical_port_id);
return -EINVAL;
}
@@ -3503,7 +3495,7 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
int ret;
if (!dev || !dev->dev.of_node) {
- ehea_error("Invalid ibmebus device probed");
+ pr_err("Invalid ibmebus device probed\n");
return -EINVAL;
}
@@ -3607,8 +3599,6 @@ static int __devexit ehea_remove(struct platform_device *dev)
ehea_remove_device_sysfs(dev);
- flush_scheduled_work();
-
ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
tasklet_kill(&adapter->neq_tasklet);
@@ -3651,21 +3641,21 @@ static int ehea_mem_notifier(struct notifier_block *nb,
switch (action) {
case MEM_CANCEL_OFFLINE:
- ehea_info("memory offlining canceled");
+ pr_info("memory offlining canceled");
/* Readd canceled memory block */
case MEM_ONLINE:
- ehea_info("memory is going online");
+ pr_info("memory is going online");
set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
goto out_unlock;
- ehea_rereg_mrs(NULL);
+ ehea_rereg_mrs();
break;
case MEM_GOING_OFFLINE:
- ehea_info("memory is going offline");
+ pr_info("memory is going offline");
set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
goto out_unlock;
- ehea_rereg_mrs(NULL);
+ ehea_rereg_mrs();
break;
default:
break;
@@ -3687,7 +3677,7 @@ static int ehea_reboot_notifier(struct notifier_block *nb,
unsigned long action, void *unused)
{
if (action == SYS_RESTART) {
- ehea_info("Reboot: freeing all eHEA resources");
+ pr_info("Reboot: freeing all eHEA resources\n");
ibmebus_unregister_driver(&ehea_driver);
}
return NOTIFY_DONE;
@@ -3703,22 +3693,22 @@ static int check_module_parm(void)
if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
(rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
- ehea_info("Bad parameter: rq1_entries");
+ pr_info("Bad parameter: rq1_entries\n");
ret = -EINVAL;
}
if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
(rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
- ehea_info("Bad parameter: rq2_entries");
+ pr_info("Bad parameter: rq2_entries\n");
ret = -EINVAL;
}
if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
(rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
- ehea_info("Bad parameter: rq3_entries");
+ pr_info("Bad parameter: rq3_entries\n");
ret = -EINVAL;
}
if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
(sq_entries > EHEA_MAX_ENTRIES_SQ)) {
- ehea_info("Bad parameter: sq_entries");
+ pr_info("Bad parameter: sq_entries\n");
ret = -EINVAL;
}
@@ -3738,11 +3728,8 @@ int __init ehea_module_init(void)
{
int ret;
- printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
- DRV_VERSION);
-
+ pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
- INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
@@ -3759,27 +3746,27 @@ int __init ehea_module_init(void)
ret = register_reboot_notifier(&ehea_reboot_nb);
if (ret)
- ehea_info("failed registering reboot notifier");
+ pr_info("failed registering reboot notifier\n");
ret = register_memory_notifier(&ehea_mem_nb);
if (ret)
- ehea_info("failed registering memory remove notifier");
+ pr_info("failed registering memory remove notifier\n");
ret = crash_shutdown_register(ehea_crash_handler);
if (ret)
- ehea_info("failed registering crash handler");
+ pr_info("failed registering crash handler\n");
ret = ibmebus_register_driver(&ehea_driver);
if (ret) {
- ehea_error("failed registering eHEA device driver on ebus");
+ pr_err("failed registering eHEA device driver on ebus\n");
goto out2;
}
ret = driver_create_file(&ehea_driver.driver,
&driver_attr_capabilities);
if (ret) {
- ehea_error("failed to register capabilities attribute, ret=%d",
- ret);
+ pr_err("failed to register capabilities attribute, ret=%d\n",
+ ret);
goto out3;
}
@@ -3799,13 +3786,12 @@ static void __exit ehea_module_exit(void)
{
int ret;
- flush_scheduled_work();
driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
ibmebus_unregister_driver(&ehea_driver);
unregister_reboot_notifier(&ehea_reboot_nb);
ret = crash_shutdown_unregister(ehea_crash_handler);
if (ret)
- ehea_info("failed unregistering crash handler");
+ pr_info("failed unregistering crash handler\n");
unregister_memory_notifier(&ehea_mem_nb);
kfree(ehea_fw_handles.arr);
kfree(ehea_bcmc_regs.arr);
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c
index 8fe9dcaa7538..0506967b9044 100644
--- a/drivers/net/ehea/ehea_phyp.c
+++ b/drivers/net/ehea/ehea_phyp.c
@@ -26,6 +26,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ehea_phyp.h"
@@ -67,12 +69,11 @@ static long ehea_plpar_hcall_norets(unsigned long opcode,
}
if (ret < H_SUCCESS)
- ehea_error("opcode=%lx ret=%lx"
- " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
- " arg5=%lx arg6=%lx arg7=%lx ",
- opcode, ret,
- arg1, arg2, arg3, arg4, arg5,
- arg6, arg7);
+ pr_err("opcode=%lx ret=%lx"
+ " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
+ " arg5=%lx arg6=%lx arg7=%lx\n",
+ opcode, ret,
+ arg1, arg2, arg3, arg4, arg5, arg6, arg7);
return ret;
}
@@ -114,19 +115,18 @@ static long ehea_plpar_hcall9(unsigned long opcode,
&& (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO)
|| (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7)
&& (arg3 == H_PORT_CB7_DUCQPN)))))
- ehea_error("opcode=%lx ret=%lx"
- " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
- " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
- " arg9=%lx"
- " out1=%lx out2=%lx out3=%lx out4=%lx"
- " out5=%lx out6=%lx out7=%lx out8=%lx"
- " out9=%lx",
- opcode, ret,
- arg1, arg2, arg3, arg4, arg5,
- arg6, arg7, arg8, arg9,
- outs[0], outs[1], outs[2], outs[3],
- outs[4], outs[5], outs[6], outs[7],
- outs[8]);
+ pr_err("opcode=%lx ret=%lx"
+ " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
+ " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
+ " arg9=%lx"
+ " out1=%lx out2=%lx out3=%lx out4=%lx"
+ " out5=%lx out6=%lx out7=%lx out8=%lx"
+ " out9=%lx\n",
+ opcode, ret,
+ arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9,
+ outs[0], outs[1], outs[2], outs[3], outs[4],
+ outs[5], outs[6], outs[7], outs[8]);
return ret;
}
@@ -515,7 +515,7 @@ u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
const u64 log_pageaddr, const u64 count)
{
if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) {
- ehea_error("not on pageboundary");
+ pr_err("not on pageboundary\n");
return H_PARAMETER;
}
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 89128b6373e3..cd44bb8017d9 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -26,6 +26,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/mm.h>
#include <linux/slab.h>
#include "ehea.h"
@@ -45,7 +47,7 @@ static void *hw_qpageit_get_inc(struct hw_queue *queue)
queue->current_q_offset -= queue->pagesize;
retvalue = NULL;
} else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
- ehea_error("not on pageboundary");
+ pr_err("not on pageboundary\n");
retvalue = NULL;
}
return retvalue;
@@ -58,15 +60,15 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
int i, k;
if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
- ehea_error("pagesize conflict! kernel pagesize=%d, "
- "ehea pagesize=%d", (int)PAGE_SIZE, (int)pagesize);
+ pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
+ (int)PAGE_SIZE, (int)pagesize);
return -EINVAL;
}
queue->queue_length = nr_of_pages * pagesize;
queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
if (!queue->queue_pages) {
- ehea_error("no mem for queue_pages");
+ pr_err("no mem for queue_pages\n");
return -ENOMEM;
}
@@ -130,7 +132,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
if (!cq) {
- ehea_error("no mem for cq");
+ pr_err("no mem for cq\n");
goto out_nomem;
}
@@ -147,7 +149,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
&cq->fw_handle, &cq->epas);
if (hret != H_SUCCESS) {
- ehea_error("alloc_resource_cq failed");
+ pr_err("alloc_resource_cq failed\n");
goto out_freemem;
}
@@ -159,7 +161,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
for (counter = 0; counter < cq->attr.nr_pages; counter++) {
vpage = hw_qpageit_get_inc(&cq->hw_queue);
if (!vpage) {
- ehea_error("hw_qpageit_get_inc failed");
+ pr_err("hw_qpageit_get_inc failed\n");
goto out_kill_hwq;
}
@@ -168,9 +170,8 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
0, EHEA_CQ_REGISTER_ORIG,
cq->fw_handle, rpage, 1);
if (hret < H_SUCCESS) {
- ehea_error("register_rpage_cq failed ehea_cq=%p "
- "hret=%llx counter=%i act_pages=%i",
- cq, hret, counter, cq->attr.nr_pages);
+ pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
+ cq, hret, counter, cq->attr.nr_pages);
goto out_kill_hwq;
}
@@ -178,14 +179,14 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
vpage = hw_qpageit_get_inc(&cq->hw_queue);
if ((hret != H_SUCCESS) || (vpage)) {
- ehea_error("registration of pages not "
- "complete hret=%llx\n", hret);
+ pr_err("registration of pages not complete hret=%llx\n",
+ hret);
goto out_kill_hwq;
}
} else {
if (hret != H_PAGE_REGISTERED) {
- ehea_error("CQ: registration of page failed "
- "hret=%llx\n", hret);
+ pr_err("CQ: registration of page failed hret=%llx\n",
+ hret);
goto out_kill_hwq;
}
}
@@ -241,7 +242,7 @@ int ehea_destroy_cq(struct ehea_cq *cq)
}
if (hret != H_SUCCESS) {
- ehea_error("destroy CQ failed");
+ pr_err("destroy CQ failed\n");
return -EIO;
}
@@ -259,7 +260,7 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
eq = kzalloc(sizeof(*eq), GFP_KERNEL);
if (!eq) {
- ehea_error("no mem for eq");
+ pr_err("no mem for eq\n");
return NULL;
}
@@ -272,21 +273,21 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
hret = ehea_h_alloc_resource_eq(adapter->handle,
&eq->attr, &eq->fw_handle);
if (hret != H_SUCCESS) {
- ehea_error("alloc_resource_eq failed");
+ pr_err("alloc_resource_eq failed\n");
goto out_freemem;
}
ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
EHEA_PAGESIZE, sizeof(struct ehea_eqe));
if (ret) {
- ehea_error("can't allocate eq pages");
+ pr_err("can't allocate eq pages\n");
goto out_freeres;
}
for (i = 0; i < eq->attr.nr_pages; i++) {
vpage = hw_qpageit_get_inc(&eq->hw_queue);
if (!vpage) {
- ehea_error("hw_qpageit_get_inc failed");
+ pr_err("hw_qpageit_get_inc failed\n");
hret = H_RESOURCE;
goto out_kill_hwq;
}
@@ -370,7 +371,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
}
if (hret != H_SUCCESS) {
- ehea_error("destroy EQ failed");
+ pr_err("destroy EQ failed\n");
return -EIO;
}
@@ -395,7 +396,7 @@ int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
for (cnt = 0; cnt < nr_pages; cnt++) {
vpage = hw_qpageit_get_inc(hw_queue);
if (!vpage) {
- ehea_error("hw_qpageit_get_inc failed");
+ pr_err("hw_qpageit_get_inc failed\n");
goto out_kill_hwq;
}
rpage = virt_to_abs(vpage);
@@ -403,7 +404,7 @@ int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
0, h_call_q_selector,
qp->fw_handle, rpage, 1);
if (hret < H_SUCCESS) {
- ehea_error("register_rpage_qp failed");
+ pr_err("register_rpage_qp failed\n");
goto out_kill_hwq;
}
}
@@ -432,7 +433,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp) {
- ehea_error("no mem for qp");
+ pr_err("no mem for qp\n");
return NULL;
}
@@ -441,7 +442,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
&qp->fw_handle, &qp->epas);
if (hret != H_SUCCESS) {
- ehea_error("ehea_h_alloc_resource_qp failed");
+ pr_err("ehea_h_alloc_resource_qp failed\n");
goto out_freemem;
}
@@ -455,7 +456,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
init_attr->act_wqe_size_enc_sq, adapter,
0);
if (ret) {
- ehea_error("can't register for sq ret=%x", ret);
+ pr_err("can't register for sq ret=%x\n", ret);
goto out_freeres;
}
@@ -465,7 +466,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
init_attr->act_wqe_size_enc_rq1,
adapter, 1);
if (ret) {
- ehea_error("can't register for rq1 ret=%x", ret);
+ pr_err("can't register for rq1 ret=%x\n", ret);
goto out_kill_hwsq;
}
@@ -476,7 +477,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
init_attr->act_wqe_size_enc_rq2,
adapter, 2);
if (ret) {
- ehea_error("can't register for rq2 ret=%x", ret);
+ pr_err("can't register for rq2 ret=%x\n", ret);
goto out_kill_hwr1q;
}
}
@@ -488,7 +489,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
init_attr->act_wqe_size_enc_rq3,
adapter, 3);
if (ret) {
- ehea_error("can't register for rq3 ret=%x", ret);
+ pr_err("can't register for rq3 ret=%x\n", ret);
goto out_kill_hwr2q;
}
}
@@ -553,7 +554,7 @@ int ehea_destroy_qp(struct ehea_qp *qp)
}
if (hret != H_SUCCESS) {
- ehea_error("destroy QP failed");
+ pr_err("destroy QP failed\n");
return -EIO;
}
@@ -842,7 +843,7 @@ static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
(hret != H_PAGE_REGISTERED)) {
ehea_h_free_resource(adapter->handle, mr->handle,
FORCE_FREE);
- ehea_error("register_rpage_mr failed");
+ pr_err("register_rpage_mr failed\n");
return hret;
}
}
@@ -896,7 +897,7 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
pt = (void *)get_zeroed_page(GFP_KERNEL);
if (!pt) {
- ehea_error("no mem");
+ pr_err("no mem\n");
ret = -ENOMEM;
goto out;
}
@@ -906,14 +907,14 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
&mr->handle, &mr->lkey);
if (hret != H_SUCCESS) {
- ehea_error("alloc_resource_mr failed");
+ pr_err("alloc_resource_mr failed\n");
ret = -EIO;
goto out;
}
if (!ehea_bmap) {
ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
- ehea_error("no busmap available");
+ pr_err("no busmap available\n");
ret = -EIO;
goto out;
}
@@ -929,7 +930,7 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
if (hret != H_SUCCESS) {
ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
- ehea_error("registering mr failed");
+ pr_err("registering mr failed\n");
ret = -EIO;
goto out;
}
@@ -952,7 +953,7 @@ int ehea_rem_mr(struct ehea_mr *mr)
hret = ehea_h_free_resource(mr->adapter->handle, mr->handle,
FORCE_FREE);
if (hret != H_SUCCESS) {
- ehea_error("destroy MR failed");
+ pr_err("destroy MR failed\n");
return -EIO;
}
@@ -987,14 +988,14 @@ void print_error_data(u64 *data)
length = EHEA_PAGESIZE;
if (type == EHEA_AER_RESTYPE_QP)
- ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, "
- "port=%llX", resource, data[6], data[12], data[22]);
+ pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
+ resource, data[6], data[12], data[22]);
else if (type == EHEA_AER_RESTYPE_CQ)
- ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource,
- data[6]);
+ pr_err("CQ (resource=%llX) state: AER=0x%llX\n",
+ resource, data[6]);
else if (type == EHEA_AER_RESTYPE_EQ)
- ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource,
- data[6]);
+ pr_err("EQ (resource=%llX) state: AER=0x%llX\n",
+ resource, data[6]);
ehea_dump(data, length, "error data");
}
@@ -1008,7 +1009,7 @@ u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
rblock = (void *)get_zeroed_page(GFP_KERNEL);
if (!rblock) {
- ehea_error("Cannot allocate rblock memory.");
+ pr_err("Cannot allocate rblock memory\n");
goto out;
}
@@ -1020,9 +1021,9 @@ u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
*aerr = rblock[12];
print_error_data(rblock);
} else if (ret == H_R_STATE) {
- ehea_error("No error data available: %llX.", res_handle);
+ pr_err("No error data available: %llX\n", res_handle);
} else
- ehea_error("Error data could not be fetched: %llX", res_handle);
+ pr_err("Error data could not be fetched: %llX\n", res_handle);
free_page((unsigned long)rblock);
out:
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index 112c5aa9af7f..907b05a1c659 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -812,7 +812,7 @@ static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE])
if (netif_msg_hw(priv))
printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n",
endptr + 1);
- enc28j60_mem_read(priv, endptr + 1, sizeof(tsv), tsv);
+ enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv);
}
static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index e7b6c31880ba..2e573be16c13 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_ENIC) := enic.o
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
- enic_res.o vnic_dev.o vnic_rq.o vnic_vic.o
+ enic_res.o enic_dev.o vnic_dev.o vnic_rq.o vnic_vic.o
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index c91d364c5527..3a3c3c8a3a9b 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,13 +32,13 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "1.4.1.6"
-#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc"
+#define DRV_VERSION "2.1.1.12"
+#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
-#define ENIC_WQ_MAX 8
-#define ENIC_RQ_MAX 8
+#define ENIC_WQ_MAX 1
+#define ENIC_RQ_MAX 1
#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
@@ -49,7 +49,7 @@ struct enic_msix_entry {
void *devid;
};
-#define ENIC_SET_APPLIED (1 << 0)
+#define ENIC_PORT_REQUEST_APPLIED (1 << 0)
#define ENIC_SET_REQUEST (1 << 1)
#define ENIC_SET_NAME (1 << 2)
#define ENIC_SET_INSTANCE (1 << 3)
@@ -61,6 +61,8 @@ struct enic_port_profile {
char name[PORT_PROFILE_MAX];
u8 instance_uuid[PORT_UUID_MAX];
u8 host_uuid[PORT_UUID_MAX];
+ u8 vf_mac[ETH_ALEN];
+ u8 mac_addr[ETH_ALEN];
};
/* Per-instance private data structure */
@@ -78,8 +80,10 @@ struct enic {
spinlock_t devcmd_lock;
u8 mac_addr[ETH_ALEN];
u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
+ u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
unsigned int flags;
unsigned int mc_count;
+ unsigned int uc_count;
int csum_rx_enabled;
u32 port_mtu;
u32 rx_coalesce_usecs;
@@ -97,7 +101,6 @@ struct enic {
/* receive queue cache line section */
____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
unsigned int rq_count;
- int (*rq_alloc_buf)(struct vnic_rq *rq);
u64 rq_truncated_pkts;
u64 rq_bad_fcs;
struct napi_struct napi[ENIC_RQ_MAX];
diff --git a/drivers/net/enic/enic_dev.c b/drivers/net/enic/enic_dev.c
new file mode 100644
index 000000000000..37ad3a1c82ee
--- /dev/null
+++ b/drivers/net/enic/enic_dev.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2011 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+
+#include "vnic_dev.h"
+#include "vnic_vic.h"
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_dev.h"
+
+int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_fw_info(enic->vdev, fw_info);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_stats_dump(enic->vdev, vstats);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_add_station_addr(struct enic *enic)
+{
+ int err;
+
+ if (!is_valid_ether_addr(enic->netdev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_del_station_addr(struct enic *enic)
+{
+ int err;
+
+ if (!is_valid_ether_addr(enic->netdev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
+ int broadcast, int promisc, int allmulti)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_packet_filter(enic->vdev, directed,
+ multicast, broadcast, promisc, allmulti);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_add_addr(struct enic *enic, u8 *addr)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_add_addr(enic->vdev, addr);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_del_addr(struct enic *enic, u8 *addr)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_del_addr(enic->vdev, addr);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_notify_unset(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_notify_unset(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_hang_notify(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_hang_notify(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
+ IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_enable(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_enable_wait(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_disable(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_disable(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_vnic_dev_deinit(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_deinit(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_init_prov(enic->vdev,
+ (u8 *)vp, vic_provinfo_size(vp));
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_init_done(struct enic *enic, int *done, int *error)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_init_done(enic->vdev, done, error);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+/* rtnl lock is held */
+void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ spin_lock(&enic->devcmd_lock);
+ enic_add_vlan(enic, vid);
+ spin_unlock(&enic->devcmd_lock);
+}
+
+/* rtnl lock is held */
+void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ spin_lock(&enic->devcmd_lock);
+ enic_del_vlan(enic, vid);
+ spin_unlock(&enic->devcmd_lock);
+}
diff --git a/drivers/net/enic/enic_dev.h b/drivers/net/enic/enic_dev.h
new file mode 100644
index 000000000000..495f57fcb887
--- /dev/null
+++ b/drivers/net/enic/enic_dev.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2011 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _ENIC_DEV_H_
+#define _ENIC_DEV_H_
+
+int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info);
+int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats);
+int enic_dev_add_station_addr(struct enic *enic);
+int enic_dev_del_station_addr(struct enic *enic);
+int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
+ int broadcast, int promisc, int allmulti);
+int enic_dev_add_addr(struct enic *enic, u8 *addr);
+int enic_dev_del_addr(struct enic *enic, u8 *addr);
+void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+int enic_dev_notify_unset(struct enic *enic);
+int enic_dev_hang_notify(struct enic *enic);
+int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
+int enic_dev_enable(struct enic *enic);
+int enic_dev_disable(struct enic *enic);
+int enic_vnic_dev_deinit(struct enic *enic);
+int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp);
+int enic_dev_init_done(struct enic *enic, int *done, int *error);
+
+#endif /* _ENIC_DEV_H_ */
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index a466ef91dd43..8b9cad5e9712 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -44,6 +44,7 @@
#include "vnic_vic.h"
#include "enic_res.h"
#include "enic.h"
+#include "enic_dev.h"
#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
@@ -190,18 +191,6 @@ static int enic_get_settings(struct net_device *netdev,
return 0;
}
-static int enic_dev_fw_info(struct enic *enic,
- struct vnic_devcmd_fw_info **fw_info)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_fw_info(enic->vdev, fw_info);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
static void enic_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
@@ -246,17 +235,6 @@ static int enic_get_sset_count(struct net_device *netdev, int sset)
}
}
-static int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_stats_dump(enic->vdev, vstats);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
static void enic_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
@@ -702,7 +680,7 @@ static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
{
unsigned int head_len = skb_headlen(skb);
unsigned int len_left = skb->len - head_len;
- unsigned int hdr_len = skb_transport_offset(skb);
+ unsigned int hdr_len = skb_checksum_start_offset(skb);
unsigned int csum_offset = hdr_len + skb->csum_offset;
int eop = (len_left == 0);
@@ -896,9 +874,10 @@ static struct net_device_stats *enic_get_stats(struct net_device *netdev)
return net_stats;
}
-static void enic_reset_multicast_list(struct enic *enic)
+static void enic_reset_addr_lists(struct enic *enic)
{
enic->mc_count = 0;
+ enic->uc_count = 0;
enic->flags = 0;
}
@@ -919,32 +898,6 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
return 0;
}
-static int enic_dev_add_station_addr(struct enic *enic)
-{
- int err = 0;
-
- if (is_valid_ether_addr(enic->netdev->dev_addr)) {
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
- spin_unlock(&enic->devcmd_lock);
- }
-
- return err;
-}
-
-static int enic_dev_del_station_addr(struct enic *enic)
-{
- int err = 0;
-
- if (is_valid_ether_addr(enic->netdev->dev_addr)) {
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
- spin_unlock(&enic->devcmd_lock);
- }
-
- return err;
-}
-
static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
{
struct enic *enic = netdev_priv(netdev);
@@ -989,64 +942,19 @@ static int enic_set_mac_address(struct net_device *netdev, void *p)
return enic_dev_add_station_addr(enic);
}
-static int enic_dev_packet_filter(struct enic *enic, int directed,
- int multicast, int broadcast, int promisc, int allmulti)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_packet_filter(enic->vdev, directed,
- multicast, broadcast, promisc, allmulti);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_dev_add_multicast_addr(struct enic *enic, u8 *addr)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_add_addr(enic->vdev, addr);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_dev_del_multicast_addr(struct enic *enic, u8 *addr)
+static void enic_update_multicast_addr_list(struct enic *enic)
{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_del_addr(enic->vdev, addr);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-/* netif_tx_lock held, BHs disabled */
-static void enic_set_multicast_list(struct net_device *netdev)
-{
- struct enic *enic = netdev_priv(netdev);
+ struct net_device *netdev = enic->netdev;
struct netdev_hw_addr *ha;
- int directed = 1;
- int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
- int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
- int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
unsigned int mc_count = netdev_mc_count(netdev);
- int allmulti = (netdev->flags & IFF_ALLMULTI) ||
- mc_count > ENIC_MULTICAST_PERFECT_FILTERS;
- unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0);
u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
unsigned int i, j;
- if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS)
+ if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
+ netdev_warn(netdev, "Registering only %d out of %d "
+ "multicast addresses\n",
+ ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
-
- if (enic->flags != flags) {
- enic->flags = flags;
- enic_dev_packet_filter(enic, directed,
- multicast, broadcast, promisc, allmulti);
}
/* Is there an easier way? Trying to minimize to
@@ -1068,7 +976,7 @@ static void enic_set_multicast_list(struct net_device *netdev)
mc_addr[j]) == 0)
break;
if (j == mc_count)
- enic_dev_del_multicast_addr(enic, enic->mc_addr[i]);
+ enic_dev_del_addr(enic, enic->mc_addr[i]);
}
for (i = 0; i < mc_count; i++) {
@@ -1077,7 +985,7 @@ static void enic_set_multicast_list(struct net_device *netdev)
enic->mc_addr[j]) == 0)
break;
if (j == enic->mc_count)
- enic_dev_add_multicast_addr(enic, mc_addr[i]);
+ enic_dev_add_addr(enic, mc_addr[i]);
}
/* Save the list to compare against next time
@@ -1089,32 +997,95 @@ static void enic_set_multicast_list(struct net_device *netdev)
enic->mc_count = mc_count;
}
-/* rtnl lock is held */
-static void enic_vlan_rx_register(struct net_device *netdev,
- struct vlan_group *vlan_group)
+static void enic_update_unicast_addr_list(struct enic *enic)
{
- struct enic *enic = netdev_priv(netdev);
- enic->vlan_group = vlan_group;
+ struct net_device *netdev = enic->netdev;
+ struct netdev_hw_addr *ha;
+ unsigned int uc_count = netdev_uc_count(netdev);
+ u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
+ unsigned int i, j;
+
+ if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
+ netdev_warn(netdev, "Registering only %d out of %d "
+ "unicast addresses\n",
+ ENIC_UNICAST_PERFECT_FILTERS, uc_count);
+ uc_count = ENIC_UNICAST_PERFECT_FILTERS;
+ }
+
+ /* Is there an easier way? Trying to minimize to
+ * calls to add/del unicast addrs. We keep the
+ * addrs from the last call in enic->uc_addr and
+ * look for changes to add/del.
+ */
+
+ i = 0;
+ netdev_for_each_uc_addr(ha, netdev) {
+ if (i == uc_count)
+ break;
+ memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
+ }
+
+ for (i = 0; i < enic->uc_count; i++) {
+ for (j = 0; j < uc_count; j++)
+ if (compare_ether_addr(enic->uc_addr[i],
+ uc_addr[j]) == 0)
+ break;
+ if (j == uc_count)
+ enic_dev_del_addr(enic, enic->uc_addr[i]);
+ }
+
+ for (i = 0; i < uc_count; i++) {
+ for (j = 0; j < enic->uc_count; j++)
+ if (compare_ether_addr(uc_addr[i],
+ enic->uc_addr[j]) == 0)
+ break;
+ if (j == enic->uc_count)
+ enic_dev_add_addr(enic, uc_addr[i]);
+ }
+
+ /* Save the list to compare against next time
+ */
+
+ for (i = 0; i < uc_count; i++)
+ memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);
+
+ enic->uc_count = uc_count;
}
-/* rtnl lock is held */
-static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+/* netif_tx_lock held, BHs disabled */
+static void enic_set_rx_mode(struct net_device *netdev)
{
struct enic *enic = netdev_priv(netdev);
+ int directed = 1;
+ int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
+ int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
+ int promisc = (netdev->flags & IFF_PROMISC) ||
+ netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
+ int allmulti = (netdev->flags & IFF_ALLMULTI) ||
+ netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
+ unsigned int flags = netdev->flags |
+ (allmulti ? IFF_ALLMULTI : 0) |
+ (promisc ? IFF_PROMISC : 0);
- spin_lock(&enic->devcmd_lock);
- enic_add_vlan(enic, vid);
- spin_unlock(&enic->devcmd_lock);
+ if (enic->flags != flags) {
+ enic->flags = flags;
+ enic_dev_packet_filter(enic, directed,
+ multicast, broadcast, promisc, allmulti);
+ }
+
+ if (!promisc) {
+ enic_update_unicast_addr_list(enic);
+ if (!allmulti)
+ enic_update_multicast_addr_list(enic);
+ }
}
/* rtnl lock is held */
-static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static void enic_vlan_rx_register(struct net_device *netdev,
+ struct vlan_group *vlan_group)
{
struct enic *enic = netdev_priv(netdev);
-
- spin_lock(&enic->devcmd_lock);
- enic_del_vlan(enic, vid);
- spin_unlock(&enic->devcmd_lock);
+ enic->vlan_group = vlan_group;
}
/* netif_tx_lock held, BHs disabled */
@@ -1124,51 +1095,39 @@ static void enic_tx_timeout(struct net_device *netdev)
schedule_work(&enic->reset);
}
-static int enic_vnic_dev_deinit(struct enic *enic)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_deinit(enic->vdev);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_init_prov(enic->vdev,
- (u8 *)vp, vic_provinfo_size(vp));
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_dev_init_done(struct enic *enic, int *done, int *error)
+static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
- int err;
+ struct enic *enic = netdev_priv(netdev);
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_init_done(enic->vdev, done, error);
- spin_unlock(&enic->devcmd_lock);
+ if (vf != PORT_SELF_VF)
+ return -EOPNOTSUPP;
- return err;
+ /* Ignore the vf argument for now. We can assume the request
+ * is coming on a vf.
+ */
+ if (is_valid_ether_addr(mac)) {
+ memcpy(enic->pp.vf_mac, mac, ETH_ALEN);
+ return 0;
+ } else
+ return -EINVAL;
}
static int enic_set_port_profile(struct enic *enic, u8 *mac)
{
struct vic_provinfo *vp;
u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
+ u16 os_type = VIC_GENERIC_PROV_OS_TYPE_LINUX;
char uuid_str[38];
+ char client_mac_str[18];
+ u8 *client_mac;
int err;
err = enic_vnic_dev_deinit(enic);
if (err)
return err;
+ enic_reset_addr_lists(enic);
+
switch (enic->pp.request) {
case PORT_REQUEST_ASSOCIATE:
@@ -1180,32 +1139,47 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
return -EADDRNOTAVAIL;
vp = vic_provinfo_alloc(GFP_KERNEL, oui,
- VIC_PROVINFO_LINUX_TYPE);
+ VIC_PROVINFO_GENERIC_TYPE);
if (!vp)
return -ENOMEM;
vic_provinfo_add_tlv(vp,
- VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR,
+ VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR,
strlen(enic->pp.name) + 1, enic->pp.name);
+ if (!is_zero_ether_addr(enic->pp.mac_addr))
+ client_mac = enic->pp.mac_addr;
+ else
+ client_mac = mac;
+
vic_provinfo_add_tlv(vp,
- VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR,
- ETH_ALEN, mac);
+ VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR,
+ ETH_ALEN, client_mac);
+
+ sprintf(client_mac_str, "%pM", client_mac);
+ vic_provinfo_add_tlv(vp,
+ VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR,
+ sizeof(client_mac_str), client_mac_str);
if (enic->pp.set & ENIC_SET_INSTANCE) {
sprintf(uuid_str, "%pUB", enic->pp.instance_uuid);
vic_provinfo_add_tlv(vp,
- VIC_LINUX_PROV_TLV_CLIENT_UUID_STR,
+ VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR,
sizeof(uuid_str), uuid_str);
}
if (enic->pp.set & ENIC_SET_HOST) {
sprintf(uuid_str, "%pUB", enic->pp.host_uuid);
vic_provinfo_add_tlv(vp,
- VIC_LINUX_PROV_TLV_HOST_UUID_STR,
+ VIC_GENERIC_PROV_TLV_HOST_UUID_STR,
sizeof(uuid_str), uuid_str);
}
+ os_type = htons(os_type);
+ vic_provinfo_add_tlv(vp,
+ VIC_GENERIC_PROV_TLV_OS_TYPE,
+ sizeof(os_type), &os_type);
+
err = enic_dev_init_prov(enic, vp);
vic_provinfo_free(vp);
if (err)
@@ -1219,7 +1193,11 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
return -EINVAL;
}
- enic->pp.set |= ENIC_SET_APPLIED;
+ /* Set flag to indicate that the port assoc/disassoc
+ * request has been sent out to fw
+ */
+ enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
+
return 0;
}
@@ -1227,29 +1205,31 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
struct nlattr *port[])
{
struct enic *enic = netdev_priv(netdev);
+ struct enic_port_profile new_pp;
+ int err = 0;
- memset(&enic->pp, 0, sizeof(enic->pp));
+ memset(&new_pp, 0, sizeof(new_pp));
if (port[IFLA_PORT_REQUEST]) {
- enic->pp.set |= ENIC_SET_REQUEST;
- enic->pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
+ new_pp.set |= ENIC_SET_REQUEST;
+ new_pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
}
if (port[IFLA_PORT_PROFILE]) {
- enic->pp.set |= ENIC_SET_NAME;
- memcpy(enic->pp.name, nla_data(port[IFLA_PORT_PROFILE]),
+ new_pp.set |= ENIC_SET_NAME;
+ memcpy(new_pp.name, nla_data(port[IFLA_PORT_PROFILE]),
PORT_PROFILE_MAX);
}
if (port[IFLA_PORT_INSTANCE_UUID]) {
- enic->pp.set |= ENIC_SET_INSTANCE;
- memcpy(enic->pp.instance_uuid,
+ new_pp.set |= ENIC_SET_INSTANCE;
+ memcpy(new_pp.instance_uuid,
nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
}
if (port[IFLA_PORT_HOST_UUID]) {
- enic->pp.set |= ENIC_SET_HOST;
- memcpy(enic->pp.host_uuid,
+ new_pp.set |= ENIC_SET_HOST;
+ memcpy(new_pp.host_uuid,
nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
}
@@ -1257,21 +1237,33 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
if (vf != PORT_SELF_VF)
return -EOPNOTSUPP;
- if (!(enic->pp.set & ENIC_SET_REQUEST))
+ if (!(new_pp.set & ENIC_SET_REQUEST))
return -EOPNOTSUPP;
- if (enic->pp.request == PORT_REQUEST_ASSOCIATE) {
-
- /* If the interface mac addr hasn't been assigned,
- * assign a random mac addr before setting port-
- * profile.
- */
+ if (new_pp.request == PORT_REQUEST_ASSOCIATE) {
+ /* Special case handling */
+ if (!is_zero_ether_addr(enic->pp.vf_mac))
+ memcpy(new_pp.mac_addr, enic->pp.vf_mac, ETH_ALEN);
if (is_zero_ether_addr(netdev->dev_addr))
random_ether_addr(netdev->dev_addr);
}
- return enic_set_port_profile(enic, netdev->dev_addr);
+ memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile));
+
+ err = enic_set_port_profile(enic, netdev->dev_addr);
+ if (err)
+ goto set_port_profile_cleanup;
+
+set_port_profile_cleanup:
+ memset(enic->pp.vf_mac, 0, ETH_ALEN);
+
+ if (err || enic->pp.request == PORT_REQUEST_DISASSOCIATE) {
+ memset(netdev->dev_addr, 0, ETH_ALEN);
+ memset(enic->pp.mac_addr, 0, ETH_ALEN);
+ }
+
+ return err;
}
static int enic_get_vf_port(struct net_device *netdev, int vf,
@@ -1281,7 +1273,7 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
int err, error, done;
u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
- if (!(enic->pp.set & ENIC_SET_APPLIED))
+ if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED))
return -ENODATA;
err = enic_dev_init_done(enic, &done, &error);
@@ -1359,62 +1351,6 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
return 0;
}
-static int enic_rq_alloc_buf_a1(struct vnic_rq *rq)
-{
- struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
-
- if (vnic_rq_posting_soon(rq)) {
-
- /* SW workaround for A0 HW erratum: if we're just about
- * to write posted_index, insert a dummy desc
- * of type resvd
- */
-
- rq_enet_desc_enc(desc, 0, RQ_ENET_TYPE_RESV2, 0);
- vnic_rq_post(rq, 0, 0, 0, 0);
- } else {
- return enic_rq_alloc_buf(rq);
- }
-
- return 0;
-}
-
-static int enic_dev_hw_version(struct enic *enic,
- enum vnic_dev_hw_version *hw_ver)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_hw_version(enic->vdev, hw_ver);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_set_rq_alloc_buf(struct enic *enic)
-{
- enum vnic_dev_hw_version hw_ver;
- int err;
-
- err = enic_dev_hw_version(enic, &hw_ver);
- if (err)
- return err;
-
- switch (hw_ver) {
- case VNIC_DEV_HW_VER_A1:
- enic->rq_alloc_buf = enic_rq_alloc_buf_a1;
- break;
- case VNIC_DEV_HW_VER_A2:
- case VNIC_DEV_HW_VER_UNKNOWN:
- enic->rq_alloc_buf = enic_rq_alloc_buf;
- break;
- default:
- return -ENODEV;
- }
-
- return 0;
-}
-
static void enic_rq_indicate_buf(struct vnic_rq *rq,
struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
int skipped, void *opaque)
@@ -1551,7 +1487,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
+ err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
/* Buffer allocation failed. Stay in polling
* mode so we can try to fill the ring again.
@@ -1601,7 +1537,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- err = vnic_rq_fill(&enic->rq[rq], enic->rq_alloc_buf);
+ err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
/* Buffer allocation failed. Stay in polling mode
* so we can try to fill the ring again.
@@ -1771,39 +1707,6 @@ static int enic_dev_notify_set(struct enic *enic)
return err;
}
-static int enic_dev_notify_unset(struct enic *enic)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_notify_unset(enic->vdev);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_dev_enable(struct enic *enic)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_enable_wait(enic->vdev);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_dev_disable(struct enic *enic)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_disable(enic->vdev);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
static void enic_notify_timer_start(struct enic *enic)
{
switch (vnic_dev_get_intr_mode(enic->vdev)) {
@@ -1837,7 +1740,7 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->rq_count; i++) {
- vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
+ vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
/* Need at least one buffer on ring to get going */
if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
netdev_err(netdev, "Unable to alloc receive buffers\n");
@@ -1851,8 +1754,11 @@ static int enic_open(struct net_device *netdev)
for (i = 0; i < enic->rq_count; i++)
vnic_rq_enable(&enic->rq[i]);
- enic_dev_add_station_addr(enic);
- enic_set_multicast_list(netdev);
+ if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr))
+ enic_dev_add_addr(enic, enic->pp.mac_addr);
+ else
+ enic_dev_add_station_addr(enic);
+ enic_set_rx_mode(netdev);
netif_wake_queue(netdev);
@@ -1899,7 +1805,10 @@ static int enic_stop(struct net_device *netdev)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
- enic_dev_del_station_addr(enic);
+ if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr))
+ enic_dev_del_addr(enic, enic->pp.mac_addr);
+ else
+ enic_dev_del_station_addr(enic);
for (i = 0; i < enic->wq_count; i++) {
err = vnic_wq_disable(&enic->wq[i]);
@@ -1962,7 +1871,8 @@ static void enic_poll_controller(struct net_device *netdev)
case VNIC_DEV_INTR_MODE_MSIX:
for (i = 0; i < enic->rq_count; i++) {
intr = enic_msix_rq_intr(enic, i);
- enic_isr_msix_rq(enic->msix_entry[intr].vector, enic);
+ enic_isr_msix_rq(enic->msix_entry[intr].vector,
+ &enic->napi[i]);
}
intr = enic_msix_wq_intr(enic, i);
enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
@@ -2042,7 +1952,7 @@ static int enic_dev_hang_reset(struct enic *enic)
static int enic_set_rsskey(struct enic *enic)
{
- u64 rss_key_buf_pa;
+ dma_addr_t rss_key_buf_pa;
union vnic_rss_key *rss_key_buf_va = NULL;
union vnic_rss_key rss_key = {
.key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
@@ -2073,7 +1983,7 @@ static int enic_set_rsskey(struct enic *enic)
static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
{
- u64 rss_cpu_buf_pa;
+ dma_addr_t rss_cpu_buf_pa;
union vnic_rss_cpu *rss_cpu_buf_va = NULL;
unsigned int i;
int err;
@@ -2148,29 +2058,6 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
rss_hash_bits, rss_base_cpu, rss_enable);
}
-static int enic_dev_hang_notify(struct enic *enic)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_hang_notify(enic->vdev);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
- IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
static void enic_reset(struct work_struct *work)
{
struct enic *enic = container_of(work, struct enic, reset);
@@ -2183,7 +2070,7 @@ static void enic_reset(struct work_struct *work)
enic_dev_hang_notify(enic);
enic_stop(enic->netdev);
enic_dev_hang_reset(enic);
- enic_reset_multicast_list(enic);
+ enic_reset_addr_lists(enic);
enic_init_vnic_resources(enic);
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
@@ -2195,7 +2082,7 @@ static void enic_reset(struct work_struct *work)
static int enic_set_intr_mode(struct enic *enic)
{
unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
- unsigned int m = 1;
+ unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
unsigned int i;
/* Set interrupt mode (INTx, MSI, MSI-X) depending
@@ -2328,7 +2215,8 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
.ndo_start_xmit = enic_hard_start_xmit,
.ndo_get_stats = enic_get_stats,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = enic_set_multicast_list,
+ .ndo_set_rx_mode = enic_set_rx_mode,
+ .ndo_set_multicast_list = enic_set_rx_mode,
.ndo_set_mac_address = enic_set_mac_address_dynamic,
.ndo_change_mtu = enic_change_mtu,
.ndo_vlan_rx_register = enic_vlan_rx_register,
@@ -2337,6 +2225,7 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
.ndo_tx_timeout = enic_tx_timeout,
.ndo_set_vf_port = enic_set_vf_port,
.ndo_get_vf_port = enic_get_vf_port,
+ .ndo_set_vf_mac = enic_set_vf_mac,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = enic_poll_controller,
#endif
@@ -2349,7 +2238,8 @@ static const struct net_device_ops enic_netdev_ops = {
.ndo_get_stats = enic_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = enic_set_mac_address,
- .ndo_set_multicast_list = enic_set_multicast_list,
+ .ndo_set_rx_mode = enic_set_rx_mode,
+ .ndo_set_multicast_list = enic_set_rx_mode,
.ndo_change_mtu = enic_change_mtu,
.ndo_vlan_rx_register = enic_vlan_rx_register,
.ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
@@ -2414,25 +2304,12 @@ static int enic_dev_init(struct enic *enic)
enic_init_vnic_resources(enic);
- err = enic_set_rq_alloc_buf(enic);
- if (err) {
- dev_err(dev, "Failed to set RQ buffer allocator, aborting\n");
- goto err_out_free_vnic_resources;
- }
-
err = enic_set_rss_nic_cfg(enic);
if (err) {
dev_err(dev, "Failed to config nic, aborting\n");
goto err_out_free_vnic_resources;
}
- err = enic_dev_set_ig_vlan_rewrite_mode(enic);
- if (err) {
- dev_err(dev,
- "Failed to set ingress vlan rewrite mode, aborting.\n");
- goto err_out_free_vnic_resources;
- }
-
switch (vnic_dev_get_intr_mode(enic->vdev)) {
default:
netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
@@ -2571,6 +2448,22 @@ static int __devinit enic_probe(struct pci_dev *pdev,
goto err_out_vnic_unregister;
}
+ /* Setup devcmd lock
+ */
+
+ spin_lock_init(&enic->devcmd_lock);
+
+ /*
+ * Set ingress vlan rewrite mode before vnic initialization
+ */
+
+ err = enic_dev_set_ig_vlan_rewrite_mode(enic);
+ if (err) {
+ dev_err(dev,
+ "Failed to set ingress vlan rewrite mode, aborting.\n");
+ goto err_out_dev_close;
+ }
+
/* Issue device init to initialize the vnic-to-switch link.
* We'll start with carrier off and wait for link UP
* notification later to turn on carrier. We don't need
@@ -2594,11 +2487,6 @@ static int __devinit enic_probe(struct pci_dev *pdev,
}
}
- /* Setup devcmd lock
- */
-
- spin_lock_init(&enic->devcmd_lock);
-
err = enic_dev_init(enic);
if (err) {
dev_err(dev, "Device initialization failed, aborting\n");
@@ -2693,7 +2581,7 @@ static void __devexit enic_remove(struct pci_dev *pdev)
if (netdev) {
struct enic *enic = netdev_priv(netdev);
- flush_scheduled_work();
+ cancel_work_sync(&enic->reset);
unregister_netdev(netdev);
enic_dev_deinit(enic);
vnic_dev_close(enic->vdev);
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index 9a103d9ef9e2..25be2734c3fe 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -34,6 +34,7 @@
#define ENIC_MAX_MTU 9000
#define ENIC_MULTICAST_PERFECT_FILTERS 32
+#define ENIC_UNICAST_PERFECT_FILTERS 32
#define ENIC_NON_TSO_MAX_DESC 16
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index fb35d8b17668..c089b362a36f 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -408,10 +408,17 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
if (!vdev->fw_info)
return -ENOMEM;
+ memset(vdev->fw_info, 0, sizeof(struct vnic_devcmd_fw_info));
+
a0 = vdev->fw_info_pa;
+ a1 = sizeof(struct vnic_devcmd_fw_info);
/* only get fw_info once and cache it */
err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
+ if (err == ERR_ECMDUNKNOWN) {
+ err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD,
+ &a0, &a1, wait);
+ }
}
*fw_info = vdev->fw_info;
@@ -419,25 +426,6 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
return err;
}
-int vnic_dev_hw_version(struct vnic_dev *vdev, enum vnic_dev_hw_version *hw_ver)
-{
- struct vnic_devcmd_fw_info *fw_info;
- int err;
-
- err = vnic_dev_fw_info(vdev, &fw_info);
- if (err)
- return err;
-
- if (strncmp(fw_info->hw_version, "A1", sizeof("A1")) == 0)
- *hw_ver = VNIC_DEV_HW_VER_A1;
- else if (strncmp(fw_info->hw_version, "A2", sizeof("A2")) == 0)
- *hw_ver = VNIC_DEV_HW_VER_A2;
- else
- *hw_ver = VNIC_DEV_HW_VER_UNKNOWN;
-
- return 0;
-}
-
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
void *value)
{
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index 05f9a24cd459..e837546213a8 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -44,12 +44,6 @@ static inline void writeq(u64 val, void __iomem *reg)
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-enum vnic_dev_hw_version {
- VNIC_DEV_HW_VER_UNKNOWN,
- VNIC_DEV_HW_VER_A1,
- VNIC_DEV_HW_VER_A2,
-};
-
enum vnic_dev_intr_mode {
VNIC_DEV_INTR_MODE_UNKNOWN,
VNIC_DEV_INTR_MODE_INTX,
@@ -93,8 +87,6 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait);
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info);
-int vnic_dev_hw_version(struct vnic_dev *vdev,
- enum vnic_dev_hw_version *hw_ver);
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
void *value);
int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h
index 9abb3d51dea1..d833a071bac5 100644
--- a/drivers/net/enic/vnic_devcmd.h
+++ b/drivers/net/enic/vnic_devcmd.h
@@ -80,8 +80,34 @@
enum vnic_devcmd_cmd {
CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
- /* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */
- CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
+ /*
+ * mcpu fw info in mem:
+ * in:
+ * (u64)a0=paddr to struct vnic_devcmd_fw_info
+ * action:
+ * Fills in struct vnic_devcmd_fw_info (128 bytes)
+ * note:
+ * An old definition of CMD_MCPU_FW_INFO
+ */
+ CMD_MCPU_FW_INFO_OLD = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
+
+ /*
+ * mcpu fw info in mem:
+ * in:
+ * (u64)a0=paddr to struct vnic_devcmd_fw_info
+ * (u16)a1=size of the structure
+ * out:
+ * (u16)a1=0 for in:a1 = 0,
+ * data size actually written for other values.
+ * action:
+ * Fills in first 128 bytes of vnic_devcmd_fw_info for in:a1 = 0,
+ * first in:a1 bytes for 0 < in:a1 <= 132,
+ * 132 bytes for other values of in:a1.
+ * note:
+ * CMD_MCPU_FW_INFO and CMD_MCPU_FW_INFO_OLD have the same enum 1
+ * for source compatibility.
+ */
+ CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 1),
/* dev-specific block member:
* in: (u16)a0=offset,(u8)a1=size
@@ -291,11 +317,19 @@ enum vnic_devcmd_error {
ERR_EMAXRES = 10,
};
+/*
+ * note: hw_version and asic_rev refer to the same thing,
+ * but have different formats. hw_version is
+ * a 32-byte string (e.g. "A2") and asic_rev is
+ * a 16-bit integer (e.g. 0xA2).
+ */
struct vnic_devcmd_fw_info {
char fw_version[32];
char fw_build[32];
char hw_version[32];
char hw_serial_number[32];
+ u16 asic_type;
+ u16 asic_rev;
};
struct vnic_devcmd_notify {
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h
index 37f08de2454a..2056586f4d4b 100644
--- a/drivers/net/enic/vnic_rq.h
+++ b/drivers/net/enic/vnic_rq.h
@@ -141,11 +141,6 @@ static inline void vnic_rq_post(struct vnic_rq *rq,
}
}
-static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
-{
- return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
-}
-
static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
{
rq->ring.desc_avail += count;
diff --git a/drivers/net/enic/vnic_vic.h b/drivers/net/enic/vnic_vic.h
index 7e46e5e8600f..f700f5d9e81d 100644
--- a/drivers/net/enic/vnic_vic.h
+++ b/drivers/net/enic/vnic_vic.h
@@ -24,14 +24,29 @@
/* Note: String field lengths include null char */
#define VIC_PROVINFO_CISCO_OUI { 0x00, 0x00, 0x0c }
-#define VIC_PROVINFO_LINUX_TYPE 0x2
-
-enum vic_linux_prov_tlv_type {
- VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR = 0,
- VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR = 1, /* u8[6] */
- VIC_LINUX_PROV_TLV_CLIENT_NAME_STR = 2,
- VIC_LINUX_PROV_TLV_HOST_UUID_STR = 8,
- VIC_LINUX_PROV_TLV_CLIENT_UUID_STR = 9,
+#define VIC_PROVINFO_GENERIC_TYPE 0x4
+
+enum vic_generic_prov_tlv_type {
+ VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR = 0,
+ VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR = 1,
+ VIC_GENERIC_PROV_TLV_CLIENT_NAME_STR = 2,
+ VIC_GENERIC_PROV_TLV_CLUSTER_PORT_NAME_STR = 3,
+ VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR = 4,
+ VIC_GENERIC_PROV_TLV_CLUSTER_UUID_STR = 5,
+ VIC_GENERIC_PROV_TLV_CLUSTER_NAME_STR = 7,
+ VIC_GENERIC_PROV_TLV_HOST_UUID_STR = 8,
+ VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR = 9,
+ VIC_GENERIC_PROV_TLV_INCARNATION_NUMBER = 10,
+ VIC_GENERIC_PROV_TLV_OS_TYPE = 11,
+ VIC_GENERIC_PROV_TLV_OS_VENDOR = 12,
+ VIC_GENERIC_PROV_TLV_CLIENT_TYPE = 15,
+};
+
+enum vic_generic_prov_os_type {
+ VIC_GENERIC_PROV_OS_TYPE_UNKNOWN = 0,
+ VIC_GENERIC_PROV_OS_TYPE_ESX = 1,
+ VIC_GENERIC_PROV_OS_TYPE_LINUX = 2,
+ VIC_GENERIC_PROV_OS_TYPE_WINDOWS = 3,
};
struct vic_provinfo {
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index aa56963ad558..c353bf3113cc 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -935,7 +935,7 @@ static void epic_init_ring(struct net_device *dev)
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
+ struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz + 2);
ep->rx_skbuff[i] = skb;
if (skb == NULL)
break;
@@ -1233,7 +1233,7 @@ static int epic_rx(struct net_device *dev, int budget)
entry = ep->dirty_rx % RX_RING_SIZE;
if (ep->rx_skbuff[entry] == NULL) {
struct sk_buff *skb;
- skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
+ skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz + 2);
if (skb == NULL)
break;
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index 0cb1cf9cf4b0..a59cf961a436 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -111,6 +111,8 @@
* Sorry, I had to rewrite most of this for 2.5.x -DaveM
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -162,7 +164,7 @@ static void eql_timer(unsigned long param)
}
static const char version[] __initconst =
- "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)\n";
+ "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)";
static const struct net_device_ops eql_netdev_ops = {
.ndo_open = eql_open,
@@ -204,8 +206,8 @@ static int eql_open(struct net_device *dev)
equalizer_t *eql = netdev_priv(dev);
/* XXX We should force this off automatically for the user. */
- printk(KERN_INFO "%s: remember to turn off Van-Jacobson compression on "
- "your slave devices.\n", dev->name);
+ netdev_info(dev,
+ "remember to turn off Van-Jacobson compression on your slave devices\n");
BUG_ON(!list_empty(&eql->queue.all_slaves));
@@ -591,7 +593,7 @@ static int __init eql_init_module(void)
{
int err;
- printk(version);
+ pr_info("%s\n", version);
dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup);
if (!dev_eql)
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index c5a2fe099a8d..db0290f05bdf 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <net/ethoc.h>
static int buffer_size = 0x8000; /* 32 KBytes */
@@ -184,7 +185,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
* @netdev: pointer to network device structure
* @napi: NAPI structure
* @msg_enable: device state flags
- * @rx_lock: receive lock
* @lock: device lock
* @phy: attached PHY
* @mdio: MDIO bus for PHY access
@@ -209,7 +209,6 @@ struct ethoc {
struct napi_struct napi;
u32 msg_enable;
- spinlock_t rx_lock;
spinlock_t lock;
struct phy_device *phy;
@@ -413,10 +412,21 @@ static int ethoc_rx(struct net_device *dev, int limit)
unsigned int entry;
struct ethoc_bd bd;
- entry = priv->num_tx + (priv->cur_rx % priv->num_rx);
+ entry = priv->num_tx + priv->cur_rx;
ethoc_read_bd(priv, entry, &bd);
- if (bd.stat & RX_BD_EMPTY)
- break;
+ if (bd.stat & RX_BD_EMPTY) {
+ ethoc_ack_irq(priv, INT_MASK_RX);
+ /* If packet (interrupt) came in between checking
+ * BD_EMTPY and clearing the interrupt source, then we
+ * risk missing the packet as the RX interrupt won't
+ * trigger right away when we reenable it; hence, check
+ * BD_EMTPY here again to make sure there isn't such a
+ * packet waiting for us...
+ */
+ ethoc_read_bd(priv, entry, &bd);
+ if (bd.stat & RX_BD_EMPTY)
+ break;
+ }
if (ethoc_update_rx_stats(priv, &bd) == 0) {
int size = bd.stat >> 16;
@@ -446,13 +456,14 @@ static int ethoc_rx(struct net_device *dev, int limit)
bd.stat &= ~RX_BD_STATS;
bd.stat |= RX_BD_EMPTY;
ethoc_write_bd(priv, entry, &bd);
- priv->cur_rx++;
+ if (++priv->cur_rx == priv->num_rx)
+ priv->cur_rx = 0;
}
return count;
}
-static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
+static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
{
struct net_device *netdev = dev->netdev;
@@ -482,32 +493,44 @@ static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
netdev->stats.collisions += (bd->stat >> 4) & 0xf;
netdev->stats.tx_bytes += bd->stat >> 16;
netdev->stats.tx_packets++;
- return 0;
}
-static void ethoc_tx(struct net_device *dev)
+static int ethoc_tx(struct net_device *dev, int limit)
{
struct ethoc *priv = netdev_priv(dev);
+ int count;
+ struct ethoc_bd bd;
- spin_lock(&priv->lock);
+ for (count = 0; count < limit; ++count) {
+ unsigned int entry;
- while (priv->dty_tx != priv->cur_tx) {
- unsigned int entry = priv->dty_tx % priv->num_tx;
- struct ethoc_bd bd;
+ entry = priv->dty_tx & (priv->num_tx-1);
ethoc_read_bd(priv, entry, &bd);
- if (bd.stat & TX_BD_READY)
- break;
- entry = (++priv->dty_tx) % priv->num_tx;
- (void)ethoc_update_tx_stats(priv, &bd);
+ if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) {
+ ethoc_ack_irq(priv, INT_MASK_TX);
+ /* If interrupt came in between reading in the BD
+ * and clearing the interrupt source, then we risk
+ * missing the event as the TX interrupt won't trigger
+ * right away when we reenable it; hence, check
+ * BD_EMPTY here again to make sure there isn't such an
+ * event pending...
+ */
+ ethoc_read_bd(priv, entry, &bd);
+ if (bd.stat & TX_BD_READY ||
+ (priv->dty_tx == priv->cur_tx))
+ break;
+ }
+
+ ethoc_update_tx_stats(priv, &bd);
+ priv->dty_tx++;
}
if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2))
netif_wake_queue(dev);
- ethoc_ack_irq(priv, INT_MASK_TX);
- spin_unlock(&priv->lock);
+ return count;
}
static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
@@ -515,32 +538,38 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
struct net_device *dev = dev_id;
struct ethoc *priv = netdev_priv(dev);
u32 pending;
-
- ethoc_disable_irq(priv, INT_MASK_ALL);
+ u32 mask;
+
+ /* Figure out what triggered the interrupt...
+ * The tricky bit here is that the interrupt source bits get
+ * set in INT_SOURCE for an event irregardless of whether that
+ * event is masked or not. Thus, in order to figure out what
+ * triggered the interrupt, we need to remove the sources
+ * for all events that are currently masked. This behaviour
+ * is not particularly well documented but reasonable...
+ */
+ mask = ethoc_read(priv, INT_MASK);
pending = ethoc_read(priv, INT_SOURCE);
+ pending &= mask;
+
if (unlikely(pending == 0)) {
- ethoc_enable_irq(priv, INT_MASK_ALL);
return IRQ_NONE;
}
ethoc_ack_irq(priv, pending);
+ /* We always handle the dropped packet interrupt */
if (pending & INT_MASK_BUSY) {
dev_err(&dev->dev, "packet dropped\n");
dev->stats.rx_dropped++;
}
- if (pending & INT_MASK_RX) {
- if (napi_schedule_prep(&priv->napi))
- __napi_schedule(&priv->napi);
- } else {
- ethoc_enable_irq(priv, INT_MASK_RX);
+ /* Handle receive/transmit event by switching to polling */
+ if (pending & (INT_MASK_TX | INT_MASK_RX)) {
+ ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
+ napi_schedule(&priv->napi);
}
- if (pending & INT_MASK_TX)
- ethoc_tx(dev);
-
- ethoc_enable_irq(priv, INT_MASK_ALL & ~INT_MASK_RX);
return IRQ_HANDLED;
}
@@ -566,26 +595,29 @@ static int ethoc_get_mac_address(struct net_device *dev, void *addr)
static int ethoc_poll(struct napi_struct *napi, int budget)
{
struct ethoc *priv = container_of(napi, struct ethoc, napi);
- int work_done = 0;
+ int rx_work_done = 0;
+ int tx_work_done = 0;
- work_done = ethoc_rx(priv->netdev, budget);
- if (work_done < budget) {
- ethoc_enable_irq(priv, INT_MASK_RX);
+ rx_work_done = ethoc_rx(priv->netdev, budget);
+ tx_work_done = ethoc_tx(priv->netdev, budget);
+
+ if (rx_work_done < budget && tx_work_done < budget) {
napi_complete(napi);
+ ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
}
- return work_done;
+ return rx_work_done;
}
static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
{
- unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
struct ethoc *priv = bus->priv;
+ int i;
ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
- while (time_before(jiffies, timeout)) {
+ for (i=0; i < 5; i++) {
u32 status = ethoc_read(priv, MIISTATUS);
if (!(status & MIISTATUS_BUSY)) {
u32 data = ethoc_read(priv, MIIRX_DATA);
@@ -593,8 +625,7 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
ethoc_write(priv, MIICOMMAND, 0);
return data;
}
-
- schedule();
+ usleep_range(100,200);
}
return -EBUSY;
@@ -602,22 +633,21 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
{
- unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
struct ethoc *priv = bus->priv;
+ int i;
ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
ethoc_write(priv, MIITX_DATA, val);
ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
- while (time_before(jiffies, timeout)) {
+ for (i=0; i < 5; i++) {
u32 stat = ethoc_read(priv, MIISTATUS);
if (!(stat & MIISTATUS_BUSY)) {
/* reset MII command register */
ethoc_write(priv, MIICOMMAND, 0);
return 0;
}
-
- schedule();
+ usleep_range(100,200);
}
return -EBUSY;
@@ -971,9 +1001,17 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
/* calculate the number of TX/RX buffers, maximum 128 supported */
num_bd = min_t(unsigned int,
128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
- priv->num_tx = max(2, num_bd / 4);
+ if (num_bd < 4) {
+ ret = -ENODEV;
+ goto error;
+ }
+ /* num_tx must be a power of two */
+ priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
priv->num_rx = num_bd - priv->num_tx;
+ dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n",
+ priv->num_tx, priv->num_rx);
+
priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL);
if (!priv->vma) {
ret = -ENOMEM;
@@ -982,10 +1020,23 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
/* Allow the platform setup code to pass in a MAC address. */
if (pdev->dev.platform_data) {
- struct ethoc_platform_data *pdata =
- (struct ethoc_platform_data *)pdev->dev.platform_data;
+ struct ethoc_platform_data *pdata = pdev->dev.platform_data;
memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
priv->phy_id = pdata->phy_id;
+ } else {
+ priv->phy_id = -1;
+
+#ifdef CONFIG_OF
+ {
+ const uint8_t* mac;
+
+ mac = of_get_property(pdev->dev.of_node,
+ "local-mac-address",
+ NULL);
+ if (mac)
+ memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
+ }
+#endif
}
/* Check that the given MAC address is valid. If it isn't, read the
@@ -1046,7 +1097,6 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
/* setup NAPI */
netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
- spin_lock_init(&priv->rx_lock);
spin_lock_init(&priv->lock);
ret = register_netdev(netdev);
@@ -1113,6 +1163,12 @@ static int ethoc_resume(struct platform_device *pdev)
# define ethoc_resume NULL
#endif
+static struct of_device_id ethoc_match[] = {
+ { .compatible = "opencores,ethoc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ethoc_match);
+
static struct platform_driver ethoc_driver = {
.probe = ethoc_probe,
.remove = __devexit_p(ethoc_remove),
@@ -1120,6 +1176,8 @@ static struct platform_driver ethoc_driver = {
.resume = ethoc_resume,
.driver = {
.name = "ethoc",
+ .owner = THIS_MODULE,
+ .of_match_table = ethoc_match,
},
};
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index cce32d43175f..885d8baff7d5 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -17,6 +17,8 @@
*
* Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
* Copyright (c) 2004-2006 Macq Electronique SA.
+ *
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
*/
#include <linux/module.h>
@@ -45,29 +47,42 @@
#include <asm/cacheflush.h>
-#ifndef CONFIG_ARCH_MXC
+#ifndef CONFIG_ARM
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#endif
#include "fec.h"
-#ifdef CONFIG_ARCH_MXC
-#include <mach/hardware.h>
+#if defined(CONFIG_ARM)
#define FEC_ALIGNMENT 0xf
#else
#define FEC_ALIGNMENT 0x3
#endif
-/*
- * Define the fixed address of the FEC hardware.
- */
-#if defined(CONFIG_M5272)
+#define DRIVER_NAME "fec"
+
+/* Controller is ENET-MAC */
+#define FEC_QUIRK_ENET_MAC (1 << 0)
+/* Controller needs driver to swap frame */
+#define FEC_QUIRK_SWAP_FRAME (1 << 1)
-static unsigned char fec_mac_default[] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+static struct platform_device_id fec_devtype[] = {
+ {
+ .name = DRIVER_NAME,
+ .driver_data = 0,
+ }, {
+ .name = "imx28-fec",
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
+ },
+ { }
};
+static unsigned char macaddr[ETH_ALEN];
+module_param_array(macaddr, byte, NULL, 0);
+MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
+
+#if defined(CONFIG_M5272)
/*
* Some hardware gets it MAC address out of local flash memory.
* if this is non-zero then assume it is the address to get MAC from.
@@ -133,7 +148,7 @@ static unsigned char fec_mac_default[] = {
* account when setting it.
*/
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
#else
#define OPT_FRAME_SIZE 0
@@ -168,7 +183,7 @@ struct fec_enet_private {
struct bufdesc *rx_bd_base;
struct bufdesc *tx_bd_base;
/* The next free ring entry */
- struct bufdesc *cur_rx, *cur_tx;
+ struct bufdesc *cur_rx, *cur_tx;
/* The ring entries to be free()ed */
struct bufdesc *dirty_tx;
@@ -176,29 +191,21 @@ struct fec_enet_private {
/* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
spinlock_t hw_lock;
- struct platform_device *pdev;
+ struct platform_device *pdev;
int opened;
/* Phylib and MDIO interface */
- struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
- int mii_timeout;
- uint phy_speed;
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ int mii_timeout;
+ uint phy_speed;
phy_interface_t phy_interface;
- int index;
int link;
int full_duplex;
struct completion mdio_done;
};
-static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
-static void fec_enet_tx(struct net_device *dev);
-static void fec_enet_rx(struct net_device *dev);
-static int fec_enet_close(struct net_device *dev);
-static void fec_restart(struct net_device *dev, int duplex);
-static void fec_stop(struct net_device *dev);
-
/* FEC MII MMFR bits definition */
#define FEC_MMFR_ST (1 << 30)
#define FEC_MMFR_OP_READ (2 << 28)
@@ -213,10 +220,23 @@ static void fec_stop(struct net_device *dev);
/* Transmitter timeout */
#define TX_TIMEOUT (2 * HZ)
+static void *swap_buffer(void *bufaddr, int len)
+{
+ int i;
+ unsigned int *buf = bufaddr;
+
+ for (i = 0; i < (len + 3) / 4; i++, buf++)
+ *buf = cpu_to_be32(*buf);
+
+ return bufaddr;
+}
+
static netdev_tx_t
-fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
struct bufdesc *bdp;
void *bufaddr;
unsigned short status;
@@ -235,9 +255,9 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (status & BD_ENET_TX_READY) {
/* Ooops. All transmit buffers are full. Bail out.
- * This should not happen, since dev->tbusy should be set.
+ * This should not happen, since ndev->tbusy should be set.
*/
- printk("%s: tx queue full!.\n", dev->name);
+ printk("%s: tx queue full!.\n", ndev->name);
spin_unlock_irqrestore(&fep->hw_lock, flags);
return NETDEV_TX_BUSY;
}
@@ -257,20 +277,28 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
unsigned int index;
index = bdp - fep->tx_bd_base;
- memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len);
+ memcpy(fep->tx_bounce[index], skb->data, skb->len);
bufaddr = fep->tx_bounce[index];
}
+ /*
+ * Some design made an incorrect assumption on endian mode of
+ * the system that it's running on. As the result, driver has to
+ * swap every frame going to and coming from the controller.
+ */
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(bufaddr, skb->len);
+
/* Save skb pointer */
fep->tx_skbuff[fep->skb_cur] = skb;
- dev->stats.tx_bytes += skb->len;
+ ndev->stats.tx_bytes += skb->len;
fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
/* Push the data cache so the CPM does not get stale memory
* data.
*/
- bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr,
+ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
/* Send it on its way. Tell FEC it's ready, interrupt when done,
@@ -291,7 +319,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (bdp == fep->dirty_tx) {
fep->tx_full = 1;
- netif_stop_queue(dev);
+ netif_stop_queue(ndev);
}
fep->cur_tx = bdp;
@@ -301,62 +329,170 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+/* This function is called to start or restart the FEC during a link
+ * change. This only happens when switching between half and full
+ * duplex.
+ */
static void
-fec_timeout(struct net_device *dev)
+fec_restart(struct net_device *ndev, int duplex)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ int i;
+ u32 temp_mac[2];
+ u32 rcntl = OPT_FRAME_SIZE | 0x04;
- dev->stats.tx_errors++;
+ /* Whack a reset. We should wait for this. */
+ writel(1, fep->hwp + FEC_ECNTRL);
+ udelay(10);
- fec_restart(dev, fep->full_duplex);
- netif_wake_queue(dev);
-}
+ /*
+ * enet-mac reset will reset mac address registers too,
+ * so need to reconfigure it.
+ */
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
+ writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
+ writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
+ }
-static irqreturn_t
-fec_enet_interrupt(int irq, void * dev_id)
-{
- struct net_device *dev = dev_id;
- struct fec_enet_private *fep = netdev_priv(dev);
- uint int_events;
- irqreturn_t ret = IRQ_NONE;
+ /* Clear any outstanding interrupt. */
+ writel(0xffc00000, fep->hwp + FEC_IEVENT);
- do {
- int_events = readl(fep->hwp + FEC_IEVENT);
- writel(int_events, fep->hwp + FEC_IEVENT);
+ /* Reset all multicast. */
+ writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+ writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+#ifndef CONFIG_M5272
+ writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
+ writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
+#endif
- if (int_events & FEC_ENET_RXF) {
- ret = IRQ_HANDLED;
- fec_enet_rx(dev);
- }
+ /* Set maximum receive buffer size. */
+ writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
- /* Transmit OK, or non-fatal error. Update the buffer
- * descriptors. FEC handles all errors, we just discover
- * them as part of the transmit process.
- */
- if (int_events & FEC_ENET_TXF) {
- ret = IRQ_HANDLED;
- fec_enet_tx(dev);
+ /* Set receive and transmit descriptor base. */
+ writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
+ writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
+ fep->hwp + FEC_X_DES_START);
+
+ fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
+ fep->cur_rx = fep->rx_bd_base;
+
+ /* Reset SKB transmit buffers. */
+ fep->skb_cur = fep->skb_dirty = 0;
+ for (i = 0; i <= TX_RING_MOD_MASK; i++) {
+ if (fep->tx_skbuff[i]) {
+ dev_kfree_skb_any(fep->tx_skbuff[i]);
+ fep->tx_skbuff[i] = NULL;
}
+ }
- if (int_events & FEC_ENET_MII) {
- ret = IRQ_HANDLED;
- complete(&fep->mdio_done);
+ /* Enable MII mode */
+ if (duplex) {
+ /* FD enable */
+ writel(0x04, fep->hwp + FEC_X_CNTRL);
+ } else {
+ /* No Rcv on Xmit */
+ rcntl |= 0x02;
+ writel(0x0, fep->hwp + FEC_X_CNTRL);
+ }
+
+ fep->full_duplex = duplex;
+
+ /* Set MII speed */
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+
+ /*
+ * The phy interface and speed need to get configured
+ * differently on enet-mac.
+ */
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ /* Enable flow control and length check */
+ rcntl |= 0x40000000 | 0x00000020;
+
+ /* MII or RMII */
+ if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+ rcntl |= (1 << 8);
+ else
+ rcntl &= ~(1 << 8);
+
+ /* 10M or 100M */
+ if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
+ rcntl &= ~(1 << 9);
+ else
+ rcntl |= (1 << 9);
+
+ } else {
+#ifdef FEC_MIIGSK_ENR
+ if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
+ /* disable the gasket and wait */
+ writel(0, fep->hwp + FEC_MIIGSK_ENR);
+ while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
+ udelay(1);
+
+ /*
+ * configure the gasket:
+ * RMII, 50 MHz, no loopback, no echo
+ */
+ writel(1, fep->hwp + FEC_MIIGSK_CFGR);
+
+ /* re-enable the gasket */
+ writel(2, fep->hwp + FEC_MIIGSK_ENR);
}
- } while (int_events);
+#endif
+ }
+ writel(rcntl, fep->hwp + FEC_R_CNTRL);
- return ret;
+ /* And last, enable the transmit and receive processing */
+ writel(2, fep->hwp + FEC_ECNTRL);
+ writel(0, fep->hwp + FEC_R_DES_ACTIVE);
+
+ /* Enable interrupts we wish to service */
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
}
+static void
+fec_stop(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ /* We cannot expect a graceful transmit stop without link !!! */
+ if (fep->link) {
+ writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
+ udelay(10);
+ if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
+ printk("fec_stop : Graceful transmit stop did not complete !\n");
+ }
+
+ /* Whack a reset. We should wait for this. */
+ writel(1, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+}
+
+
+static void
+fec_timeout(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ ndev->stats.tx_errors++;
+
+ fec_restart(ndev, fep->full_duplex);
+ netif_wake_queue(ndev);
+}
static void
-fec_enet_tx(struct net_device *dev)
+fec_enet_tx(struct net_device *ndev)
{
struct fec_enet_private *fep;
struct bufdesc *bdp;
unsigned short status;
struct sk_buff *skb;
- fep = netdev_priv(dev);
+ fep = netdev_priv(ndev);
spin_lock(&fep->hw_lock);
bdp = fep->dirty_tx;
@@ -364,7 +500,8 @@ fec_enet_tx(struct net_device *dev)
if (bdp == fep->cur_tx && fep->tx_full == 0)
break;
- dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
bdp->cbd_bufaddr = 0;
skb = fep->tx_skbuff[fep->skb_dirty];
@@ -372,19 +509,19 @@ fec_enet_tx(struct net_device *dev)
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
BD_ENET_TX_RL | BD_ENET_TX_UN |
BD_ENET_TX_CSL)) {
- dev->stats.tx_errors++;
+ ndev->stats.tx_errors++;
if (status & BD_ENET_TX_HB) /* No heartbeat */
- dev->stats.tx_heartbeat_errors++;
+ ndev->stats.tx_heartbeat_errors++;
if (status & BD_ENET_TX_LC) /* Late collision */
- dev->stats.tx_window_errors++;
+ ndev->stats.tx_window_errors++;
if (status & BD_ENET_TX_RL) /* Retrans limit */
- dev->stats.tx_aborted_errors++;
+ ndev->stats.tx_aborted_errors++;
if (status & BD_ENET_TX_UN) /* Underrun */
- dev->stats.tx_fifo_errors++;
+ ndev->stats.tx_fifo_errors++;
if (status & BD_ENET_TX_CSL) /* Carrier lost */
- dev->stats.tx_carrier_errors++;
+ ndev->stats.tx_carrier_errors++;
} else {
- dev->stats.tx_packets++;
+ ndev->stats.tx_packets++;
}
if (status & BD_ENET_TX_READY)
@@ -394,7 +531,7 @@ fec_enet_tx(struct net_device *dev)
* but we eventually sent the packet OK.
*/
if (status & BD_ENET_TX_DEF)
- dev->stats.collisions++;
+ ndev->stats.collisions++;
/* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb);
@@ -411,8 +548,8 @@ fec_enet_tx(struct net_device *dev)
*/
if (fep->tx_full) {
fep->tx_full = 0;
- if (netif_queue_stopped(dev))
- netif_wake_queue(dev);
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
}
}
fep->dirty_tx = bdp;
@@ -426,9 +563,11 @@ fec_enet_tx(struct net_device *dev)
* effectively tossing the packet.
*/
static void
-fec_enet_rx(struct net_device *dev)
+fec_enet_rx(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
struct bufdesc *bdp;
unsigned short status;
struct sk_buff *skb;
@@ -460,17 +599,17 @@ fec_enet_rx(struct net_device *dev)
/* Check for errors. */
if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
BD_ENET_RX_CR | BD_ENET_RX_OV)) {
- dev->stats.rx_errors++;
+ ndev->stats.rx_errors++;
if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
/* Frame too long or too short. */
- dev->stats.rx_length_errors++;
+ ndev->stats.rx_length_errors++;
}
if (status & BD_ENET_RX_NO) /* Frame alignment */
- dev->stats.rx_frame_errors++;
+ ndev->stats.rx_frame_errors++;
if (status & BD_ENET_RX_CR) /* CRC Error */
- dev->stats.rx_crc_errors++;
+ ndev->stats.rx_crc_errors++;
if (status & BD_ENET_RX_OV) /* FIFO overrun */
- dev->stats.rx_fifo_errors++;
+ ndev->stats.rx_fifo_errors++;
}
/* Report late collisions as a frame error.
@@ -478,19 +617,22 @@ fec_enet_rx(struct net_device *dev)
* have in the buffer. So, just drop this frame on the floor.
*/
if (status & BD_ENET_RX_CL) {
- dev->stats.rx_errors++;
- dev->stats.rx_frame_errors++;
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_frame_errors++;
goto rx_processing_done;
}
/* Process the incoming frame. */
- dev->stats.rx_packets++;
+ ndev->stats.rx_packets++;
pkt_len = bdp->cbd_datlen;
- dev->stats.rx_bytes += pkt_len;
+ ndev->stats.rx_bytes += pkt_len;
data = (__u8*)__va(bdp->cbd_bufaddr);
- dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
- DMA_FROM_DEVICE);
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
+
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(data, pkt_len);
/* This does 16 byte alignment, exactly what we need.
* The packet length includes FCS, but we don't want to
@@ -501,18 +643,18 @@ fec_enet_rx(struct net_device *dev)
if (unlikely(!skb)) {
printk("%s: Memory squeeze, dropping packet.\n",
- dev->name);
- dev->stats.rx_dropped++;
+ ndev->name);
+ ndev->stats.rx_dropped++;
} else {
skb_reserve(skb, NET_IP_ALIGN);
skb_put(skb, pkt_len - 4); /* Make room */
skb_copy_to_linear_data(skb, data, pkt_len - 4);
- skb->protocol = eth_type_trans(skb, dev);
+ skb->protocol = eth_type_trans(skb, ndev);
netif_rx(skb);
}
- bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen,
- DMA_FROM_DEVICE);
+ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
+ FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
rx_processing_done:
/* Clear the status flags for this buffer */
status &= ~BD_ENET_RX_STATS;
@@ -537,47 +679,97 @@ rx_processing_done:
spin_unlock(&fep->hw_lock);
}
+static irqreturn_t
+fec_enet_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ uint int_events;
+ irqreturn_t ret = IRQ_NONE;
+
+ do {
+ int_events = readl(fep->hwp + FEC_IEVENT);
+ writel(int_events, fep->hwp + FEC_IEVENT);
+
+ if (int_events & FEC_ENET_RXF) {
+ ret = IRQ_HANDLED;
+ fec_enet_rx(ndev);
+ }
+
+ /* Transmit OK, or non-fatal error. Update the buffer
+ * descriptors. FEC handles all errors, we just discover
+ * them as part of the transmit process.
+ */
+ if (int_events & FEC_ENET_TXF) {
+ ret = IRQ_HANDLED;
+ fec_enet_tx(ndev);
+ }
+
+ if (int_events & FEC_ENET_MII) {
+ ret = IRQ_HANDLED;
+ complete(&fep->mdio_done);
+ }
+ } while (int_events);
+
+ return ret;
+}
+
+
+
/* ------------------------------------------------------------------------- */
-#ifdef CONFIG_M5272
-static void __inline__ fec_get_mac(struct net_device *dev)
+static void __inline__ fec_get_mac(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
unsigned char *iap, tmpaddr[ETH_ALEN];
- if (FEC_FLASHMAC) {
- /*
- * Get MAC address from FLASH.
- * If it is all 1's or 0's, use the default.
- */
- iap = (unsigned char *)FEC_FLASHMAC;
- if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
- (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
- iap = fec_mac_default;
- if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
- (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
- iap = fec_mac_default;
- } else {
- *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
- *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
+ /*
+ * try to get mac address in following order:
+ *
+ * 1) module parameter via kernel command line in form
+ * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
+ */
+ iap = macaddr;
+
+ /*
+ * 2) from flash or fuse (via platform data)
+ */
+ if (!is_valid_ether_addr(iap)) {
+#ifdef CONFIG_M5272
+ if (FEC_FLASHMAC)
+ iap = (unsigned char *)FEC_FLASHMAC;
+#else
+ if (pdata)
+ memcpy(iap, pdata->mac, ETH_ALEN);
+#endif
+ }
+
+ /*
+ * 3) FEC mac registers set by bootloader
+ */
+ if (!is_valid_ether_addr(iap)) {
+ *((unsigned long *) &tmpaddr[0]) =
+ be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW));
+ *((unsigned short *) &tmpaddr[4]) =
+ be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
iap = &tmpaddr[0];
}
- memcpy(dev->dev_addr, iap, ETH_ALEN);
+ memcpy(ndev->dev_addr, iap, ETH_ALEN);
- /* Adjust MAC if using default MAC address */
- if (iap == fec_mac_default)
- dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
+ /* Adjust MAC if using macaddr */
+ if (iap == macaddr)
+ ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
}
-#endif
/* ------------------------------------------------------------------------- */
/*
* Phy section
*/
-static void fec_enet_adjust_link(struct net_device *dev)
+static void fec_enet_adjust_link(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct phy_device *phy_dev = fep->phy_dev;
unsigned long flags;
@@ -594,7 +786,7 @@ static void fec_enet_adjust_link(struct net_device *dev)
/* Duplex link change */
if (phy_dev->link) {
if (fep->full_duplex != phy_dev->duplex) {
- fec_restart(dev, phy_dev->duplex);
+ fec_restart(ndev, phy_dev->duplex);
status_change = 1;
}
}
@@ -603,9 +795,9 @@ static void fec_enet_adjust_link(struct net_device *dev)
if (phy_dev->link != fep->link) {
fep->link = phy_dev->link;
if (phy_dev->link)
- fec_restart(dev, phy_dev->duplex);
+ fec_restart(ndev, phy_dev->duplex);
else
- fec_stop(dev);
+ fec_stop(ndev);
status_change = 1;
}
@@ -651,8 +843,8 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
fep->mii_timeout = 0;
init_completion(&fep->mdio_done);
- /* start a read op */
- writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
+ /* start a write op */
+ writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
FEC_MMFR_TA | FEC_MMFR_DATA(value),
fep->hwp + FEC_MII_DATA);
@@ -674,13 +866,14 @@ static int fec_enet_mdio_reset(struct mii_bus *bus)
return 0;
}
-static int fec_enet_mii_probe(struct net_device *dev)
+static int fec_enet_mii_probe(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct phy_device *phy_dev = NULL;
char mdio_bus_id[MII_BUS_ID_SIZE];
char phy_name[MII_BUS_ID_SIZE + 3];
int phy_id;
+ int dev_id = fep->pdev->id;
fep->phy_dev = NULL;
@@ -692,22 +885,24 @@ static int fec_enet_mii_probe(struct net_device *dev)
continue;
if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
continue;
+ if (dev_id--)
+ continue;
strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
break;
}
if (phy_id >= PHY_MAX_ADDR) {
printk(KERN_INFO "%s: no PHY, assuming direct connection "
- "to switch\n", dev->name);
+ "to switch\n", ndev->name);
strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
phy_id = 0;
}
snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
- phy_dev = phy_connect(dev, phy_name, &fec_enet_adjust_link, 0,
+ phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phy_dev)) {
- printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
+ printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name);
return PTR_ERR(phy_dev);
}
@@ -720,7 +915,7 @@ static int fec_enet_mii_probe(struct net_device *dev)
fep->full_duplex = 0;
printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
- "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
+ "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
fep->phy_dev->irq);
@@ -729,10 +924,35 @@ static int fec_enet_mii_probe(struct net_device *dev)
static int fec_enet_mii_init(struct platform_device *pdev)
{
- struct net_device *dev = platform_get_drvdata(pdev);
- struct fec_enet_private *fep = netdev_priv(dev);
+ static struct mii_bus *fec0_mii_bus;
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
int err = -ENXIO, i;
+ /*
+ * The dual fec interfaces are not equivalent with enet-mac.
+ * Here are the differences:
+ *
+ * - fec0 supports MII & RMII modes while fec1 only supports RMII
+ * - fec0 acts as the 1588 time master while fec1 is slave
+ * - external phys can only be configured by fec0
+ *
+ * That is to say fec1 can not work independently. It only works
+ * when fec0 is working. The reason behind this design is that the
+ * second interface is added primarily for Switch mode.
+ *
+ * Because of the last point above, both phys are attached on fec0
+ * mdio interface in board design, and need to be configured by
+ * fec0 mii_bus.
+ */
+ if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id) {
+ /* fec1 uses fec0 mii_bus */
+ fep->mii_bus = fec0_mii_bus;
+ return 0;
+ }
+
fep->mii_timeout = 0;
/*
@@ -764,11 +984,13 @@ static int fec_enet_mii_init(struct platform_device *pdev)
for (i = 0; i < PHY_MAX_ADDR; i++)
fep->mii_bus->irq[i] = PHY_POLL;
- platform_set_drvdata(dev, fep->mii_bus);
-
if (mdiobus_register(fep->mii_bus))
goto err_out_free_mdio_irq;
+ /* save fec0 mii_bus */
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+ fec0_mii_bus = fep->mii_bus;
+
return 0;
err_out_free_mdio_irq:
@@ -788,10 +1010,10 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep)
mdiobus_free(fep->mii_bus);
}
-static int fec_enet_get_settings(struct net_device *dev,
+static int fec_enet_get_settings(struct net_device *ndev,
struct ethtool_cmd *cmd)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct phy_device *phydev = fep->phy_dev;
if (!phydev)
@@ -800,10 +1022,10 @@ static int fec_enet_get_settings(struct net_device *dev,
return phy_ethtool_gset(phydev, cmd);
}
-static int fec_enet_set_settings(struct net_device *dev,
+static int fec_enet_set_settings(struct net_device *ndev,
struct ethtool_cmd *cmd)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct phy_device *phydev = fep->phy_dev;
if (!phydev)
@@ -812,14 +1034,14 @@ static int fec_enet_set_settings(struct net_device *dev,
return phy_ethtool_sset(phydev, cmd);
}
-static void fec_enet_get_drvinfo(struct net_device *dev,
+static void fec_enet_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
strcpy(info->driver, fep->pdev->dev.driver->name);
strcpy(info->version, "Revision: 1.0");
- strcpy(info->bus_info, dev_name(&dev->dev));
+ strcpy(info->bus_info, dev_name(&ndev->dev));
}
static struct ethtool_ops fec_enet_ethtool_ops = {
@@ -829,12 +1051,12 @@ static struct ethtool_ops fec_enet_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
-static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct phy_device *phydev = fep->phy_dev;
- if (!netif_running(dev))
+ if (!netif_running(ndev))
return -EINVAL;
if (!phydev)
@@ -843,9 +1065,9 @@ static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return phy_mii_ioctl(phydev, rq, cmd);
}
-static void fec_enet_free_buffers(struct net_device *dev)
+static void fec_enet_free_buffers(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
int i;
struct sk_buff *skb;
struct bufdesc *bdp;
@@ -855,7 +1077,7 @@ static void fec_enet_free_buffers(struct net_device *dev)
skb = fep->rx_skbuff[i];
if (bdp->cbd_bufaddr)
- dma_unmap_single(&dev->dev, bdp->cbd_bufaddr,
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
if (skb)
dev_kfree_skb(skb);
@@ -867,9 +1089,9 @@ static void fec_enet_free_buffers(struct net_device *dev)
kfree(fep->tx_bounce[i]);
}
-static int fec_enet_alloc_buffers(struct net_device *dev)
+static int fec_enet_alloc_buffers(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
int i;
struct sk_buff *skb;
struct bufdesc *bdp;
@@ -878,12 +1100,12 @@ static int fec_enet_alloc_buffers(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) {
skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
if (!skb) {
- fec_enet_free_buffers(dev);
+ fec_enet_free_buffers(ndev);
return -ENOMEM;
}
fep->rx_skbuff[i] = skb;
- bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data,
+ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
bdp->cbd_sc = BD_ENET_RX_EMPTY;
bdp++;
@@ -910,45 +1132,47 @@ static int fec_enet_alloc_buffers(struct net_device *dev)
}
static int
-fec_enet_open(struct net_device *dev)
+fec_enet_open(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
int ret;
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
*/
- ret = fec_enet_alloc_buffers(dev);
+ ret = fec_enet_alloc_buffers(ndev);
if (ret)
return ret;
/* Probe and connect to PHY when open the interface */
- ret = fec_enet_mii_probe(dev);
+ ret = fec_enet_mii_probe(ndev);
if (ret) {
- fec_enet_free_buffers(dev);
+ fec_enet_free_buffers(ndev);
return ret;
}
phy_start(fep->phy_dev);
- netif_start_queue(dev);
+ netif_start_queue(ndev);
fep->opened = 1;
return 0;
}
static int
-fec_enet_close(struct net_device *dev)
+fec_enet_close(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
/* Don't know what to do yet. */
fep->opened = 0;
- netif_stop_queue(dev);
- fec_stop(dev);
+ netif_stop_queue(ndev);
+ fec_stop(ndev);
- if (fep->phy_dev)
+ if (fep->phy_dev) {
+ phy_stop(fep->phy_dev);
phy_disconnect(fep->phy_dev);
+ }
- fec_enet_free_buffers(dev);
+ fec_enet_free_buffers(ndev);
return 0;
}
@@ -966,14 +1190,14 @@ fec_enet_close(struct net_device *dev)
#define HASH_BITS 6 /* #bits in hash */
#define CRC32_POLY 0xEDB88320
-static void set_multicast_list(struct net_device *dev)
+static void set_multicast_list(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct netdev_hw_addr *ha;
unsigned int i, bit, data, crc, tmp;
unsigned char hash;
- if (dev->flags & IFF_PROMISC) {
+ if (ndev->flags & IFF_PROMISC) {
tmp = readl(fep->hwp + FEC_R_CNTRL);
tmp |= 0x8;
writel(tmp, fep->hwp + FEC_R_CNTRL);
@@ -984,7 +1208,7 @@ static void set_multicast_list(struct net_device *dev)
tmp &= ~0x8;
writel(tmp, fep->hwp + FEC_R_CNTRL);
- if (dev->flags & IFF_ALLMULTI) {
+ if (ndev->flags & IFF_ALLMULTI) {
/* Catch all multicast addresses, so set the
* filter to all 1's
*/
@@ -999,7 +1223,7 @@ static void set_multicast_list(struct net_device *dev)
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
- netdev_for_each_mc_addr(ha, dev) {
+ netdev_for_each_mc_addr(ha, ndev) {
/* Only support group multicast for now */
if (!(ha->addr[0] & 1))
continue;
@@ -1007,7 +1231,7 @@ static void set_multicast_list(struct net_device *dev)
/* calculate crc32 value of mac address */
crc = 0xffffffff;
- for (i = 0; i < dev->addr_len; i++) {
+ for (i = 0; i < ndev->addr_len; i++) {
data = ha->addr[i];
for (bit = 0; bit < 8; bit++, data >>= 1) {
crc = (crc >> 1) ^
@@ -1034,20 +1258,20 @@ static void set_multicast_list(struct net_device *dev)
/* Set a MAC change in hardware. */
static int
-fec_set_mac_address(struct net_device *dev, void *p)
+fec_set_mac_address(struct net_device *ndev, void *p)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
- writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) |
- (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24),
+ writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
+ (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
fep->hwp + FEC_ADDR_LOW);
- writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24),
+ writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
fep->hwp + FEC_ADDR_HIGH);
return 0;
}
@@ -1061,17 +1285,16 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
- .ndo_do_ioctl = fec_enet_ioctl,
+ .ndo_do_ioctl = fec_enet_ioctl,
};
/*
* XXX: We need to clean up on failure exits here.
*
- * index is only used in legacy code
*/
-static int fec_enet_init(struct net_device *dev, int index)
+static int fec_enet_init(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct bufdesc *cbd_base;
struct bufdesc *bdp;
int i;
@@ -1086,35 +1309,19 @@ static int fec_enet_init(struct net_device *dev, int index)
spin_lock_init(&fep->hw_lock);
- fep->index = index;
- fep->hwp = (void __iomem *)dev->base_addr;
- fep->netdev = dev;
+ fep->netdev = ndev;
- /* Set the Ethernet address */
-#ifdef CONFIG_M5272
- fec_get_mac(dev);
-#else
- {
- unsigned long l;
- l = readl(fep->hwp + FEC_ADDR_LOW);
- dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24);
- dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16);
- dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8);
- dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0);
- l = readl(fep->hwp + FEC_ADDR_HIGH);
- dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24);
- dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16);
- }
-#endif
+ /* Get the Ethernet address */
+ fec_get_mac(ndev);
/* Set receive and transmit descriptor base. */
fep->rx_bd_base = cbd_base;
fep->tx_bd_base = cbd_base + RX_RING_SIZE;
/* The FEC Ethernet specific entries in the device structure */
- dev->watchdog_timeo = TX_TIMEOUT;
- dev->netdev_ops = &fec_netdev_ops;
- dev->ethtool_ops = &fec_enet_ethtool_ops;
+ ndev->watchdog_timeo = TX_TIMEOUT;
+ ndev->netdev_ops = &fec_netdev_ops;
+ ndev->ethtool_ops = &fec_enet_ethtool_ops;
/* Initialize the receive buffer descriptors. */
bdp = fep->rx_bd_base;
@@ -1143,114 +1350,11 @@ static int fec_enet_init(struct net_device *dev, int index)
bdp--;
bdp->cbd_sc |= BD_SC_WRAP;
- fec_restart(dev, 0);
+ fec_restart(ndev, 0);
return 0;
}
-/* This function is called to start or restart the FEC during a link
- * change. This only happens when switching between half and full
- * duplex.
- */
-static void
-fec_restart(struct net_device *dev, int duplex)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- int i;
-
- /* Whack a reset. We should wait for this. */
- writel(1, fep->hwp + FEC_ECNTRL);
- udelay(10);
-
- /* Clear any outstanding interrupt. */
- writel(0xffc00000, fep->hwp + FEC_IEVENT);
-
- /* Reset all multicast. */
- writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
- writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
-#ifndef CONFIG_M5272
- writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
- writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
-#endif
-
- /* Set maximum receive buffer size. */
- writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
-
- /* Set receive and transmit descriptor base. */
- writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
- writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
- fep->hwp + FEC_X_DES_START);
-
- fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
- fep->cur_rx = fep->rx_bd_base;
-
- /* Reset SKB transmit buffers. */
- fep->skb_cur = fep->skb_dirty = 0;
- for (i = 0; i <= TX_RING_MOD_MASK; i++) {
- if (fep->tx_skbuff[i]) {
- dev_kfree_skb_any(fep->tx_skbuff[i]);
- fep->tx_skbuff[i] = NULL;
- }
- }
-
- /* Enable MII mode */
- if (duplex) {
- /* MII enable / FD enable */
- writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
- writel(0x04, fep->hwp + FEC_X_CNTRL);
- } else {
- /* MII enable / No Rcv on Xmit */
- writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL);
- writel(0x0, fep->hwp + FEC_X_CNTRL);
- }
- fep->full_duplex = duplex;
-
- /* Set MII speed */
- writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
-
-#ifdef FEC_MIIGSK_ENR
- if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
- /* disable the gasket and wait */
- writel(0, fep->hwp + FEC_MIIGSK_ENR);
- while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
- udelay(1);
-
- /* configure the gasket: RMII, 50 MHz, no loopback, no echo */
- writel(1, fep->hwp + FEC_MIIGSK_CFGR);
-
- /* re-enable the gasket */
- writel(2, fep->hwp + FEC_MIIGSK_ENR);
- }
-#endif
-
- /* And last, enable the transmit and receive processing */
- writel(2, fep->hwp + FEC_ECNTRL);
- writel(0, fep->hwp + FEC_R_DES_ACTIVE);
-
- /* Enable interrupts we wish to service */
- writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
-}
-
-static void
-fec_stop(struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
-
- /* We cannot expect a graceful transmit stop without link !!! */
- if (fep->link) {
- writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
- udelay(10);
- if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
- printk("fec_stop : Graceful transmit stop did not complete !\n");
- }
-
- /* Whack a reset. We should wait for this. */
- writel(1, fep->hwp + FEC_ECNTRL);
- udelay(10);
- writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
- writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
-}
-
static int __devinit
fec_probe(struct platform_device *pdev)
{
@@ -1270,19 +1374,20 @@ fec_probe(struct platform_device *pdev)
/* Init network device */
ndev = alloc_etherdev(sizeof(struct fec_enet_private));
- if (!ndev)
- return -ENOMEM;
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto failed_alloc_etherdev;
+ }
SET_NETDEV_DEV(ndev, &pdev->dev);
/* setup board info structure */
fep = netdev_priv(ndev);
- memset(fep, 0, sizeof(*fep));
- ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r));
+ fep->hwp = ioremap(r->start, resource_size(r));
fep->pdev = pdev;
- if (!ndev->base_addr) {
+ if (!fep->hwp) {
ret = -ENOMEM;
goto failed_ioremap;
}
@@ -1300,10 +1405,9 @@ fec_probe(struct platform_device *pdev)
break;
ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
if (ret) {
- while (i >= 0) {
+ while (--i >= 0) {
irq = platform_get_irq(pdev, i);
free_irq(irq, ndev);
- i--;
}
goto failed_irq;
}
@@ -1316,7 +1420,7 @@ fec_probe(struct platform_device *pdev)
}
clk_enable(fep->clk);
- ret = fec_enet_init(ndev, 0);
+ ret = fec_enet_init(ndev);
if (ret)
goto failed_init;
@@ -1346,9 +1450,11 @@ failed_clk:
free_irq(irq, ndev);
}
failed_irq:
- iounmap((void __iomem *)ndev->base_addr);
+ iounmap(fep->hwp);
failed_ioremap:
free_netdev(ndev);
+failed_alloc_etherdev:
+ release_mem_region(r->start, resource_size(r));
return ret;
}
@@ -1358,16 +1464,22 @@ fec_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
-
- platform_set_drvdata(pdev, NULL);
+ struct resource *r;
fec_stop(ndev);
fec_enet_mii_remove(fep);
clk_disable(fep->clk);
clk_put(fep->clk);
- iounmap((void __iomem *)ndev->base_addr);
+ iounmap(fep->hwp);
unregister_netdev(ndev);
free_netdev(ndev);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ BUG_ON(!r);
+ release_mem_region(r->start, resource_size(r));
+
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
@@ -1376,14 +1488,14 @@ static int
fec_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
- struct fec_enet_private *fep;
+ struct fec_enet_private *fep = netdev_priv(ndev);
- if (ndev) {
- fep = netdev_priv(ndev);
- if (netif_running(ndev))
- fec_enet_close(ndev);
- clk_disable(fep->clk);
+ if (netif_running(ndev)) {
+ fec_stop(ndev);
+ netif_device_detach(ndev);
}
+ clk_disable(fep->clk);
+
return 0;
}
@@ -1391,14 +1503,14 @@ static int
fec_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
- struct fec_enet_private *fep;
+ struct fec_enet_private *fep = netdev_priv(ndev);
- if (ndev) {
- fep = netdev_priv(ndev);
- clk_enable(fep->clk);
- if (netif_running(ndev))
- fec_enet_open(ndev);
+ clk_enable(fep->clk);
+ if (netif_running(ndev)) {
+ fec_restart(ndev, fep->full_duplex);
+ netif_device_attach(ndev);
}
+
return 0;
}
@@ -1414,12 +1526,13 @@ static const struct dev_pm_ops fec_pm_ops = {
static struct platform_driver fec_driver = {
.driver = {
- .name = "fec",
+ .name = DRIVER_NAME,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &fec_pm_ops,
#endif
},
+ .id_table = fec_devtype,
.probe = fec_probe,
.remove = __devexit_p(fec_drv_remove),
};
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
index 2c48b25668d5..ace318df4c8d 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/fec.h
@@ -14,7 +14,8 @@
/****************************************************************************/
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
+ defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
/*
* Just figures, Motorola would have to change the offsets for
* registers in the same peripheral device on different models
@@ -78,7 +79,7 @@
/*
* Define the buffer descriptor structure.
*/
-#ifdef CONFIG_ARCH_MXC
+#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
struct bufdesc {
unsigned short cbd_datlen; /* Data length */
unsigned short cbd_sc; /* Control and status info */
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index e9f5d030bc26..9f81b1ac130e 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -366,9 +366,8 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
- unsigned long flags;
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock(&priv->lock);
while (bcom_buffer_done(priv->tx_dmatsk)) {
struct sk_buff *skb;
struct bcom_fec_bd *bd;
@@ -379,7 +378,7 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
dev_kfree_skb_irq(skb);
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock(&priv->lock);
netif_wake_queue(dev);
@@ -395,9 +394,8 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
struct bcom_fec_bd *bd;
u32 status, physaddr;
int length;
- unsigned long flags;
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock(&priv->lock);
while (bcom_buffer_done(priv->rx_dmatsk)) {
@@ -429,7 +427,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
/* Process the received skb - Drop the spin lock while
* calling into the network stack */
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock(&priv->lock);
dma_unmap_single(dev->dev.parent, physaddr, rskb->len,
DMA_FROM_DEVICE);
@@ -438,10 +436,10 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
rskb->protocol = eth_type_trans(rskb, dev);
netif_rx(rskb);
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock(&priv->lock);
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock(&priv->lock);
return IRQ_HANDLED;
}
@@ -452,7 +450,6 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
u32 ievent;
- unsigned long flags;
ievent = in_be32(&fec->ievent);
@@ -470,9 +467,9 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR))
dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n");
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock(&priv->lock);
mpc52xx_fec_reset(dev);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock(&priv->lock);
return IRQ_HANDLED;
}
@@ -843,8 +840,7 @@ static const struct net_device_ops mpc52xx_fec_netdev_ops = {
/* OF Driver */
/* ======================================================================== */
-static int __devinit
-mpc52xx_fec_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit mpc52xx_fec_probe(struct platform_device *op)
{
int rv;
struct net_device *ndev;
@@ -1052,7 +1048,7 @@ static struct of_device_id mpc52xx_fec_match[] = {
MODULE_DEVICE_TABLE(of, mpc52xx_fec_match);
-static struct of_platform_driver mpc52xx_fec_driver = {
+static struct platform_driver mpc52xx_fec_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
@@ -1076,21 +1072,21 @@ mpc52xx_fec_init(void)
{
#ifdef CONFIG_FEC_MPC52xx_MDIO
int ret;
- ret = of_register_platform_driver(&mpc52xx_fec_mdio_driver);
+ ret = platform_driver_register(&mpc52xx_fec_mdio_driver);
if (ret) {
printk(KERN_ERR DRIVER_NAME ": failed to register mdio driver\n");
return ret;
}
#endif
- return of_register_platform_driver(&mpc52xx_fec_driver);
+ return platform_driver_register(&mpc52xx_fec_driver);
}
static void __exit
mpc52xx_fec_exit(void)
{
- of_unregister_platform_driver(&mpc52xx_fec_driver);
+ platform_driver_unregister(&mpc52xx_fec_driver);
#ifdef CONFIG_FEC_MPC52xx_MDIO
- of_unregister_platform_driver(&mpc52xx_fec_mdio_driver);
+ platform_driver_unregister(&mpc52xx_fec_mdio_driver);
#endif
}
diff --git a/drivers/net/fec_mpc52xx.h b/drivers/net/fec_mpc52xx.h
index a227a525bdbb..41d2dffde55b 100644
--- a/drivers/net/fec_mpc52xx.h
+++ b/drivers/net/fec_mpc52xx.h
@@ -289,6 +289,6 @@ struct mpc52xx_fec {
#define FEC_XMIT_FSM_ENABLE_CRC 0x01000000
-extern struct of_platform_driver mpc52xx_fec_mdio_driver;
+extern struct platform_driver mpc52xx_fec_mdio_driver;
#endif /* __DRIVERS_NET_MPC52XX_FEC_H__ */
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 0b4cb6f15984..360a578c2bb7 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -61,8 +61,7 @@ static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg,
data | FEC_MII_WRITE_FRAME);
}
-static int mpc52xx_fec_mdio_probe(struct platform_device *of,
- const struct of_device_id *match)
+static int mpc52xx_fec_mdio_probe(struct platform_device *of)
{
struct device *dev = &of->dev;
struct device_node *np = of->dev.of_node;
@@ -145,7 +144,7 @@ static struct of_device_id mpc52xx_fec_mdio_match[] = {
};
MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match);
-struct of_platform_driver mpc52xx_fec_mdio_driver = {
+struct platform_driver mpc52xx_fec_mdio_driver = {
.driver = {
.name = "mpc5200b-fec-phy",
.owner = THIS_MODULE,
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 0fa1776563a3..7b92897ca66b 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -39,6 +39,9 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define FORCEDETH_VERSION "0.64"
#define DRV_NAME "forcedeth"
@@ -60,18 +63,12 @@
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
#include <asm/system.h>
-#if 0
-#define dprintk printk
-#else
-#define dprintk(x...) do { } while (0)
-#endif
-
#define TX_WORK_PER_LOOP 64
#define RX_WORK_PER_LOOP 64
@@ -186,9 +183,9 @@ enum {
NvRegSlotTime = 0x9c,
#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
#define NVREG_SLOTTIME_10_100_FULL 0x00007f00
-#define NVREG_SLOTTIME_1000_FULL 0x0003ff00
+#define NVREG_SLOTTIME_1000_FULL 0x0003ff00
#define NVREG_SLOTTIME_HALF 0x0000ff00
-#define NVREG_SLOTTIME_DEFAULT 0x00007f00
+#define NVREG_SLOTTIME_DEFAULT 0x00007f00
#define NVREG_SLOTTIME_MASK 0x000000ff
NvRegTxDeferral = 0xA0,
@@ -297,7 +294,7 @@ enum {
#define NVREG_WAKEUPFLAGS_ENABLE 0x1111
NvRegMgmtUnitGetVersion = 0x204,
-#define NVREG_MGMTUNITGETVERSION 0x01
+#define NVREG_MGMTUNITGETVERSION 0x01
NvRegMgmtUnitVersion = 0x208,
#define NVREG_MGMTUNITVERSION 0x08
NvRegPowerCap = 0x268,
@@ -368,8 +365,8 @@ struct ring_desc_ex {
};
union ring_type {
- struct ring_desc* orig;
- struct ring_desc_ex* ex;
+ struct ring_desc *orig;
+ struct ring_desc_ex *ex;
};
#define FLAG_MASK_V1 0xffff0000
@@ -444,10 +441,10 @@ union ring_type {
#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
/* Miscelaneous hardware related defines: */
-#define NV_PCI_REGSZ_VER1 0x270
-#define NV_PCI_REGSZ_VER2 0x2d4
-#define NV_PCI_REGSZ_VER3 0x604
-#define NV_PCI_REGSZ_MAX 0x604
+#define NV_PCI_REGSZ_VER1 0x270
+#define NV_PCI_REGSZ_VER2 0x2d4
+#define NV_PCI_REGSZ_VER3 0x604
+#define NV_PCI_REGSZ_MAX 0x604
/* various timeout delays: all in usec */
#define NV_TXRX_RESET_DELAY 4
@@ -717,7 +714,7 @@ static const struct register_test nv_registers_test[] = {
{ NvRegMulticastAddrA, 0xffffffff },
{ NvRegTxWatermark, 0x0ff },
{ NvRegWakeUpFlags, 0x07777 },
- { 0,0 }
+ { 0, 0 }
};
struct nv_skb_map {
@@ -911,7 +908,7 @@ static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
* Power down phy when interface is down (persists through reboot;
* older Linux and other OSes may not power it up again)
*/
-static int phy_power_down = 0;
+static int phy_power_down;
static inline struct fe_priv *get_nvpriv(struct net_device *dev)
{
@@ -948,7 +945,7 @@ static bool nv_optimized(struct fe_priv *np)
}
static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
- int delay, int delaymax, const char *msg)
+ int delay, int delaymax)
{
u8 __iomem *base = get_hwbase(dev);
@@ -956,11 +953,8 @@ static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
do {
udelay(delay);
delaymax -= delay;
- if (delaymax < 0) {
- if (msg)
- printk("%s", msg);
+ if (delaymax < 0)
return 1;
- }
} while ((readl(base + offset) & mask) != target);
return 0;
}
@@ -984,12 +978,10 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
u8 __iomem *base = get_hwbase(dev);
if (!nv_optimized(np)) {
- if (rxtx_flags & NV_SETUP_RX_RING) {
+ if (rxtx_flags & NV_SETUP_RX_RING)
writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
- }
- if (rxtx_flags & NV_SETUP_TX_RING) {
+ if (rxtx_flags & NV_SETUP_TX_RING)
writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
- }
} else {
if (rxtx_flags & NV_SETUP_RX_RING) {
writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
@@ -1015,10 +1007,8 @@ static void free_rings(struct net_device *dev)
pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
np->rx_ring.ex, np->ring_addr);
}
- if (np->rx_skb)
- kfree(np->rx_skb);
- if (np->tx_skb)
- kfree(np->tx_skb);
+ kfree(np->rx_skb);
+ kfree(np->tx_skb);
}
static int using_multi_irqs(struct net_device *dev)
@@ -1145,23 +1135,15 @@ static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
writel(reg, base + NvRegMIIControl);
if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
- NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
- dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
- dev->name, miireg, addr);
+ NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) {
retval = -1;
} else if (value != MII_READ) {
/* it was a write operation - fewer failures are detectable */
- dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
- dev->name, value, miireg, addr);
retval = 0;
} else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
- dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
- dev->name, miireg, addr);
retval = -1;
} else {
retval = readl(base + NvRegMIIData);
- dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
- dev->name, miireg, addr, retval);
}
return retval;
@@ -1174,16 +1156,15 @@ static int phy_reset(struct net_device *dev, u32 bmcr_setup)
unsigned int tries = 0;
miicontrol = BMCR_RESET | bmcr_setup;
- if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
+ if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
return -1;
- }
/* wait for 500ms */
msleep(500);
/* must wait till reset is deasserted */
while (miicontrol & BMCR_RESET) {
- msleep(10);
+ usleep_range(10000, 20000);
miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
/* FIXME: 100 tries seem excessive */
if (tries++ > 100)
@@ -1192,106 +1173,239 @@ static int phy_reset(struct net_device *dev, u32 bmcr_setup)
return 0;
}
+static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
+{
+ static const struct {
+ int reg;
+ int init;
+ } ri[] = {
+ { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
+ { PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 },
+ { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 },
+ { PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 },
+ { PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 },
+ { PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 },
+ { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ri); i++) {
+ if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init))
+ return PHY_ERROR;
+ }
+
+ return 0;
+}
+
+static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
+{
+ u32 reg;
+ u8 __iomem *base = get_hwbase(dev);
+ u32 powerstate = readl(base + NvRegPowerState2);
+
+ /* need to perform hw phy reset */
+ powerstate |= NVREG_POWERSTATE2_PHY_RESET;
+ writel(powerstate, base + NvRegPowerState2);
+ msleep(25);
+
+ powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
+ writel(powerstate, base + NvRegPowerState2);
+ msleep(25);
+
+ reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
+ reg |= PHY_REALTEK_INIT9;
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10))
+ return PHY_ERROR;
+ reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
+ if (!(reg & PHY_REALTEK_INIT11)) {
+ reg |= PHY_REALTEK_INIT11;
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg))
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
+ return PHY_ERROR;
+
+ return 0;
+}
+
+static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
+{
+ u32 phy_reserved;
+
+ if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG6, MII_READ);
+ phy_reserved |= PHY_REALTEK_INIT7;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG6, phy_reserved))
+ return PHY_ERROR;
+ }
+
+ return 0;
+}
+
+static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
+{
+ u32 phy_reserved;
+
+ if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
+ if (mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG2, MII_READ);
+ phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
+ phy_reserved |= PHY_REALTEK_INIT3;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG2, phy_reserved))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
+ return PHY_ERROR;
+ }
+
+ return 0;
+}
+
+static int init_cicada(struct net_device *dev, struct fe_priv *np,
+ u32 phyinterface)
+{
+ u32 phy_reserved;
+
+ if (phyinterface & PHY_RGMII) {
+ phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
+ phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
+ phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
+ if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
+ phy_reserved |= PHY_CICADA_INIT5;
+ if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved))
+ return PHY_ERROR;
+ }
+ phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
+ phy_reserved |= PHY_CICADA_INIT6;
+ if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved))
+ return PHY_ERROR;
+
+ return 0;
+}
+
+static int init_vitesse(struct net_device *dev, struct fe_priv *np)
+{
+ u32 phy_reserved;
+
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG4, MII_READ);
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG3, MII_READ);
+ phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
+ phy_reserved |= PHY_VITESSE_INIT3;
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG4, MII_READ);
+ phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
+ phy_reserved |= PHY_VITESSE_INIT3;
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG3, MII_READ);
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG4, MII_READ);
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
+ return PHY_ERROR;
+ phy_reserved = mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG3, MII_READ);
+ phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
+ phy_reserved |= PHY_VITESSE_INIT8;
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9))
+ return PHY_ERROR;
+ if (mii_rw(dev, np->phyaddr,
+ PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10))
+ return PHY_ERROR;
+
+ return 0;
+}
+
static int phy_init(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
- u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
+ u32 phyinterface;
+ u32 mii_status, mii_control, mii_control_1000, reg;
/* phy errata for E3016 phy */
if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
reg &= ~PHY_MARVELL_E3016_INITMASK;
if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
- printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
+ netdev_info(dev, "%s: phy write to errata reg failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
}
if (np->phy_oui == PHY_OUI_REALTEK) {
if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
np->phy_rev == PHY_REV_REALTEK_8211B) {
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ if (init_realtek_8211b(dev, np)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ } else if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
+ np->phy_rev == PHY_REV_REALTEK_8211C) {
+ if (init_realtek_8211c(dev, np)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
+ if (init_realtek_8201(dev, np)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
}
- if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
- np->phy_rev == PHY_REV_REALTEK_8211C) {
- u32 powerstate = readl(base + NvRegPowerState2);
-
- /* need to perform hw phy reset */
- powerstate |= NVREG_POWERSTATE2_PHY_RESET;
- writel(powerstate, base + NvRegPowerState2);
- msleep(25);
-
- powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
- writel(powerstate, base + NvRegPowerState2);
- msleep(25);
-
- reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
- reg |= PHY_REALTEK_INIT9;
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
- if (!(reg & PHY_REALTEK_INIT11)) {
- reg |= PHY_REALTEK_INIT11;
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- }
- if (np->phy_model == PHY_MODEL_REALTEK_8201) {
- if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
- phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
- phy_reserved |= PHY_REALTEK_INIT7;
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- }
- }
}
/* set advertise register */
reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
- reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
+ reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL |
+ ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
- printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
+ netdev_info(dev, "%s: phy write to advertise failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
@@ -1302,7 +1416,8 @@ static int phy_init(struct net_device *dev)
mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
if (mii_status & PHY_GIGABIT) {
np->gigabit = PHY_GIGABIT;
- mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
+ mii_control_1000 = mii_rw(dev, np->phyaddr,
+ MII_CTRL1000, MII_READ);
mii_control_1000 &= ~ADVERTISE_1000HALF;
if (phyinterface & PHY_RGMII)
mii_control_1000 |= ADVERTISE_1000FULL;
@@ -1310,11 +1425,11 @@ static int phy_init(struct net_device *dev)
mii_control_1000 &= ~ADVERTISE_1000FULL;
if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
- }
- else
+ } else
np->gigabit = 0;
mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -1326,7 +1441,8 @@ static int phy_init(struct net_device *dev)
/* start autoneg since we already performed hw reset above */
mii_control |= BMCR_ANRESTART;
if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
- printk(KERN_INFO "%s: phy init failed\n", pci_name(np->pci_dev));
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
} else {
@@ -1334,165 +1450,42 @@ static int phy_init(struct net_device *dev)
* (certain phys need bmcr to be setup with reset)
*/
if (phy_reset(dev, mii_control)) {
- printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
+ netdev_info(dev, "%s: phy reset failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
}
/* phy vendor specific configuration */
- if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
- phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
- phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
- phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
- if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
- phy_reserved |= PHY_CICADA_INIT5;
- if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- }
- if (np->phy_oui == PHY_OUI_CICADA) {
- phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
- phy_reserved |= PHY_CICADA_INIT6;
- if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- }
- if (np->phy_oui == PHY_OUI_VITESSE) {
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ if ((np->phy_oui == PHY_OUI_CICADA)) {
+ if (init_cicada(dev, np, phyinterface)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
- phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
- phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
- phy_reserved |= PHY_VITESSE_INIT3;
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ } else if (np->phy_oui == PHY_OUI_VITESSE) {
+ if (init_vitesse(dev, np)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
- phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
- phy_reserved |= PHY_VITESSE_INIT3;
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
- phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
- phy_reserved |= PHY_VITESSE_INIT8;
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- }
- if (np->phy_oui == PHY_OUI_REALTEK) {
+ } else if (np->phy_oui == PHY_OUI_REALTEK) {
if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
np->phy_rev == PHY_REV_REALTEK_8211B) {
/* reset could have cleared these out, set them back */
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ if (init_realtek_8211b(dev, np)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
+ if (init_realtek_8201(dev, np) ||
+ init_realtek_8201_cross(dev, np)) {
+ netdev_info(dev, "%s: phy init failed\n",
+ pci_name(np->pci_dev));
return PHY_ERROR;
}
}
- if (np->phy_model == PHY_MODEL_REALTEK_8201) {
- if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
- phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
- phy_reserved |= PHY_REALTEK_INIT7;
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- }
- if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
- phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
- phy_reserved |= PHY_REALTEK_INIT3;
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
- printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
- return PHY_ERROR;
- }
- }
- }
}
/* some phys clear out pause advertisment on reset, set it back */
@@ -1501,12 +1494,10 @@ static int phy_init(struct net_device *dev)
/* restart auto negotiation, power down phy */
mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
- if (phy_power_down) {
+ if (phy_power_down)
mii_control |= BMCR_PDOWN;
- }
- if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
+ if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
return PHY_ERROR;
- }
return 0;
}
@@ -1517,7 +1508,6 @@ static void nv_start_rx(struct net_device *dev)
u8 __iomem *base = get_hwbase(dev);
u32 rx_ctrl = readl(base + NvRegReceiverControl);
- dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
/* Already running? Stop it. */
if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
rx_ctrl &= ~NVREG_RCVCTL_START;
@@ -1526,12 +1516,10 @@ static void nv_start_rx(struct net_device *dev)
}
writel(np->linkspeed, base + NvRegLinkSpeed);
pci_push(base);
- rx_ctrl |= NVREG_RCVCTL_START;
- if (np->mac_in_use)
+ rx_ctrl |= NVREG_RCVCTL_START;
+ if (np->mac_in_use)
rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
writel(rx_ctrl, base + NvRegReceiverControl);
- dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
- dev->name, np->duplex, np->linkspeed);
pci_push(base);
}
@@ -1541,15 +1529,15 @@ static void nv_stop_rx(struct net_device *dev)
u8 __iomem *base = get_hwbase(dev);
u32 rx_ctrl = readl(base + NvRegReceiverControl);
- dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
if (!np->mac_in_use)
rx_ctrl &= ~NVREG_RCVCTL_START;
else
rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
writel(rx_ctrl, base + NvRegReceiverControl);
- reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
- NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
- KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
+ if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
+ NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX))
+ netdev_info(dev, "%s: ReceiverStatus remained busy\n",
+ __func__);
udelay(NV_RXSTOP_DELAY2);
if (!np->mac_in_use)
@@ -1562,7 +1550,6 @@ static void nv_start_tx(struct net_device *dev)
u8 __iomem *base = get_hwbase(dev);
u32 tx_ctrl = readl(base + NvRegTransmitterControl);
- dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
tx_ctrl |= NVREG_XMITCTL_START;
if (np->mac_in_use)
tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
@@ -1576,15 +1563,15 @@ static void nv_stop_tx(struct net_device *dev)
u8 __iomem *base = get_hwbase(dev);
u32 tx_ctrl = readl(base + NvRegTransmitterControl);
- dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
if (!np->mac_in_use)
tx_ctrl &= ~NVREG_XMITCTL_START;
else
tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
writel(tx_ctrl, base + NvRegTransmitterControl);
- reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
- NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
- KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
+ if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
+ NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX))
+ netdev_info(dev, "%s: TransmitterStatus remained busy\n",
+ __func__);
udelay(NV_TXSTOP_DELAY2);
if (!np->mac_in_use)
@@ -1609,7 +1596,6 @@ static void nv_txrx_reset(struct net_device *dev)
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
- dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
pci_push(base);
udelay(NV_TXRX_RESET_DELAY);
@@ -1623,8 +1609,6 @@ static void nv_mac_reset(struct net_device *dev)
u8 __iomem *base = get_hwbase(dev);
u32 temp1, temp2, temp3;
- dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
-
writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
pci_push(base);
@@ -1745,7 +1729,7 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
static int nv_alloc_rx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
- struct ring_desc* less_rx;
+ struct ring_desc *less_rx;
less_rx = np->get_rx.orig;
if (less_rx-- == np->first_rx.orig)
@@ -1767,9 +1751,8 @@ static int nv_alloc_rx(struct net_device *dev)
np->put_rx.orig = np->first_rx.orig;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->first_rx_ctx;
- } else {
+ } else
return 1;
- }
}
return 0;
}
@@ -1777,7 +1760,7 @@ static int nv_alloc_rx(struct net_device *dev)
static int nv_alloc_rx_optimized(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
- struct ring_desc_ex* less_rx;
+ struct ring_desc_ex *less_rx;
less_rx = np->get_rx.ex;
if (less_rx-- == np->first_rx.ex)
@@ -1800,9 +1783,8 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
np->put_rx.ex = np->first_rx.ex;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->first_rx_ctx;
- } else {
+ } else
return 1;
- }
}
return 0;
}
@@ -2018,24 +2000,24 @@ static void nv_legacybackoff_reseed(struct net_device *dev)
/* Known Good seed sets */
static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
- {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
- {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
- {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
- {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
- {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
- {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
- {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
- {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}};
+ {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
+ {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
+ {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
+ {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
+ {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
+ {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
+ {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
+ {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
- {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
- {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
- {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
- {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
- {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
- {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
- {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
- {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}};
+ {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
+ {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
+ {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
+ {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
+ {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
+ {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
+ {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
+ {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
static void nv_gear_backoff_reseed(struct net_device *dev)
{
@@ -2083,13 +2065,12 @@ static void nv_gear_backoff_reseed(struct net_device *dev)
temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
- writel(temp,base + NvRegBackOffControl);
+ writel(temp, base + NvRegBackOffControl);
- /* Setup seeds for all gear LFSRs. */
+ /* Setup seeds for all gear LFSRs. */
get_random_bytes(&seedset, sizeof(seedset));
seedset = seedset % BACKOFF_SEEDSET_ROWS;
- for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++)
- {
+ for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
temp |= main_seedset[seedset][i-1] & 0x3ff;
temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
@@ -2113,10 +2094,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
u32 size = skb_headlen(skb);
u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
u32 empty_slots;
- struct ring_desc* put_tx;
- struct ring_desc* start_tx;
- struct ring_desc* prev_tx;
- struct nv_skb_map* prev_tx_ctx;
+ struct ring_desc *put_tx;
+ struct ring_desc *start_tx;
+ struct ring_desc *prev_tx;
+ struct nv_skb_map *prev_tx_ctx;
unsigned long flags;
/* add fragments to entries count */
@@ -2204,18 +2185,6 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&np->lock, flags);
- dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
- dev->name, entries, tx_flags_extra);
- {
- int j;
- for (j=0; j<64; j++) {
- if ((j%16) == 0)
- dprintk("\n%03x:", j);
- dprintk(" %02x", ((unsigned char*)skb->data)[j]);
- }
- dprintk("\n");
- }
-
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
return NETDEV_TX_OK;
}
@@ -2233,11 +2202,11 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
u32 size = skb_headlen(skb);
u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
u32 empty_slots;
- struct ring_desc_ex* put_tx;
- struct ring_desc_ex* start_tx;
- struct ring_desc_ex* prev_tx;
- struct nv_skb_map* prev_tx_ctx;
- struct nv_skb_map* start_tx_ctx;
+ struct ring_desc_ex *put_tx;
+ struct ring_desc_ex *start_tx;
+ struct ring_desc_ex *prev_tx;
+ struct nv_skb_map *prev_tx_ctx;
+ struct nv_skb_map *start_tx_ctx;
unsigned long flags;
/* add fragments to entries count */
@@ -2355,18 +2324,6 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
spin_unlock_irqrestore(&np->lock, flags);
- dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
- dev->name, entries, tx_flags_extra);
- {
- int j;
- for (j=0; j<64; j++) {
- if ((j%16) == 0)
- dprintk("\n%03x:", j);
- dprintk(" %02x", ((unsigned char*)skb->data)[j]);
- }
- dprintk("\n");
- }
-
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
return NETDEV_TX_OK;
}
@@ -2399,15 +2356,12 @@ static int nv_tx_done(struct net_device *dev, int limit)
struct fe_priv *np = netdev_priv(dev);
u32 flags;
int tx_work = 0;
- struct ring_desc* orig_get_tx = np->get_tx.orig;
+ struct ring_desc *orig_get_tx = np->get_tx.orig;
while ((np->get_tx.orig != np->put_tx.orig) &&
!((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
(tx_work < limit)) {
- dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
- dev->name, flags);
-
nv_unmap_txskb(np, np->get_tx_ctx);
if (np->desc_ver == DESC_VER_1) {
@@ -2464,15 +2418,12 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
struct fe_priv *np = netdev_priv(dev);
u32 flags;
int tx_work = 0;
- struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
+ struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
while ((np->get_tx.ex != np->put_tx.ex) &&
!((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
(tx_work < limit)) {
- dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
- dev->name, flags);
-
nv_unmap_txskb(np, np->get_tx_ctx);
if (flags & NV_TX2_LASTPACKET) {
@@ -2491,9 +2442,8 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
np->get_tx_ctx->skb = NULL;
tx_work++;
- if (np->tx_limit) {
+ if (np->tx_limit)
nv_tx_flip_ownership(dev);
- }
}
if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
np->get_tx.ex = np->first_tx.ex;
@@ -2518,57 +2468,56 @@ static void nv_tx_timeout(struct net_device *dev)
u32 status;
union ring_type put_tx;
int saved_tx_limit;
+ int i;
if (np->msi_flags & NV_MSI_X_ENABLED)
status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
else
status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
- printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
-
- {
- int i;
-
- printk(KERN_INFO "%s: Ring at %lx\n",
- dev->name, (unsigned long)np->ring_addr);
- printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
- for (i=0;i<=np->register_size;i+= 32) {
- printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
- i,
- readl(base + i + 0), readl(base + i + 4),
- readl(base + i + 8), readl(base + i + 12),
- readl(base + i + 16), readl(base + i + 20),
- readl(base + i + 24), readl(base + i + 28));
- }
- printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
- for (i=0;i<np->tx_ring_size;i+= 4) {
- if (!nv_optimized(np)) {
- printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
- i,
- le32_to_cpu(np->tx_ring.orig[i].buf),
- le32_to_cpu(np->tx_ring.orig[i].flaglen),
- le32_to_cpu(np->tx_ring.orig[i+1].buf),
- le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
- le32_to_cpu(np->tx_ring.orig[i+2].buf),
- le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
- le32_to_cpu(np->tx_ring.orig[i+3].buf),
- le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
- } else {
- printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
- i,
- le32_to_cpu(np->tx_ring.ex[i].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i].buflow),
- le32_to_cpu(np->tx_ring.ex[i].flaglen),
- le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i+1].buflow),
- le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
- le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i+2].buflow),
- le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
- le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i+3].buflow),
- le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
- }
+ netdev_info(dev, "Got tx_timeout. irq: %08x\n", status);
+
+ netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
+ netdev_info(dev, "Dumping tx registers\n");
+ for (i = 0; i <= np->register_size; i += 32) {
+ netdev_info(dev,
+ "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ i,
+ readl(base + i + 0), readl(base + i + 4),
+ readl(base + i + 8), readl(base + i + 12),
+ readl(base + i + 16), readl(base + i + 20),
+ readl(base + i + 24), readl(base + i + 28));
+ }
+ netdev_info(dev, "Dumping tx ring\n");
+ for (i = 0; i < np->tx_ring_size; i += 4) {
+ if (!nv_optimized(np)) {
+ netdev_info(dev,
+ "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
+ i,
+ le32_to_cpu(np->tx_ring.orig[i].buf),
+ le32_to_cpu(np->tx_ring.orig[i].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+1].buf),
+ le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+2].buf),
+ le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+3].buf),
+ le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
+ } else {
+ netdev_info(dev,
+ "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
+ i,
+ le32_to_cpu(np->tx_ring.ex[i].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i].buflow),
+ le32_to_cpu(np->tx_ring.ex[i].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+1].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+2].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+3].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
}
}
@@ -2616,15 +2565,13 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
int protolen; /* length as stored in the proto field */
/* 1) calculate len according to header */
- if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
- protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
+ if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
+ protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
hdrlen = VLAN_HLEN;
} else {
- protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
+ protolen = ntohs(((struct ethhdr *)packet)->h_proto);
hdrlen = ETH_HLEN;
}
- dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
- dev->name, datalen, protolen, hdrlen);
if (protolen > ETH_DATA_LEN)
return datalen; /* Value in proto field not a len, no checks possible */
@@ -2635,26 +2582,18 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
/* more data on wire than in 802 header, trim of
* additional data.
*/
- dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
- dev->name, protolen);
return protolen;
} else {
/* less data on wire than mentioned in header.
* Discard the packet.
*/
- dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
- dev->name);
return -1;
}
} else {
/* short packet. Accept only if 802 values are also short */
if (protolen > ETH_ZLEN) {
- dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
- dev->name);
return -1;
}
- dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
- dev->name, datalen);
return datalen;
}
}
@@ -2667,13 +2606,10 @@ static int nv_rx_process(struct net_device *dev, int limit)
struct sk_buff *skb;
int len;
- while((np->get_rx.orig != np->put_rx.orig) &&
+ while ((np->get_rx.orig != np->put_rx.orig) &&
!((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
(rx_work < limit)) {
- dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
- dev->name, flags);
-
/*
* the packet is for us - immediately tear down the pci mapping.
* TODO: check if a prefetch of the first cacheline improves
@@ -2685,16 +2621,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
skb = np->get_rx_ctx->skb;
np->get_rx_ctx->skb = NULL;
- {
- int j;
- dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
- for (j=0; j<64; j++) {
- if ((j%16) == 0)
- dprintk("\n%03x:", j);
- dprintk(" %02x", ((unsigned char*)skb->data)[j]);
- }
- dprintk("\n");
- }
/* look at what we actually got: */
if (np->desc_ver == DESC_VER_1) {
if (likely(flags & NV_RX_DESCRIPTORVALID)) {
@@ -2710,9 +2636,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
}
/* framing errors are soft errors */
else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
- if (flags & NV_RX_SUBSTRACT1) {
+ if (flags & NV_RX_SUBSTRACT1)
len--;
- }
}
/* the rest are hard errors */
else {
@@ -2745,9 +2670,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
}
/* framing errors are soft errors */
else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
- if (flags & NV_RX2_SUBSTRACT1) {
+ if (flags & NV_RX2_SUBSTRACT1)
len--;
- }
}
/* the rest are hard errors */
else {
@@ -2771,8 +2695,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
/* got a valid packet - forward it to the network core */
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, dev);
- dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
- dev->name, len, skb->protocol);
napi_gro_receive(&np->napi, skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
@@ -2797,13 +2719,10 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
struct sk_buff *skb;
int len;
- while((np->get_rx.ex != np->put_rx.ex) &&
+ while ((np->get_rx.ex != np->put_rx.ex) &&
!((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
(rx_work < limit)) {
- dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
- dev->name, flags);
-
/*
* the packet is for us - immediately tear down the pci mapping.
* TODO: check if a prefetch of the first cacheline improves
@@ -2815,16 +2734,6 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
skb = np->get_rx_ctx->skb;
np->get_rx_ctx->skb = NULL;
- {
- int j;
- dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
- for (j=0; j<64; j++) {
- if ((j%16) == 0)
- dprintk("\n%03x:", j);
- dprintk(" %02x", ((unsigned char*)skb->data)[j]);
- }
- dprintk("\n");
- }
/* look at what we actually got: */
if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
len = flags & LEN_MASK_V2;
@@ -2838,9 +2747,8 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
}
/* framing errors are soft errors */
else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
- if (flags & NV_RX2_SUBSTRACT1) {
+ if (flags & NV_RX2_SUBSTRACT1)
len--;
- }
}
/* the rest are hard errors */
else {
@@ -2858,9 +2766,6 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
skb->protocol = eth_type_trans(skb, dev);
prefetch(skb->data);
- dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
- dev->name, len, skb->protocol);
-
if (likely(!np->vlangrp)) {
napi_gro_receive(&np->napi, skb);
} else {
@@ -2949,7 +2854,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
/* reinit nic view of the rx queue */
writel(np->rx_buf_sz, base + NvRegOffloadConfig);
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
- writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
pci_push(base);
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -2986,7 +2891,7 @@ static void nv_copy_mac_to_hw(struct net_device *dev)
static int nv_set_mac_address(struct net_device *dev, void *addr)
{
struct fe_priv *np = netdev_priv(dev);
- struct sockaddr *macaddr = (struct sockaddr*)addr;
+ struct sockaddr *macaddr = (struct sockaddr *)addr;
if (!is_valid_ether_addr(macaddr->sa_data))
return -EADDRNOTAVAIL;
@@ -3076,8 +2981,6 @@ static void nv_set_multicast(struct net_device *dev)
writel(mask[0], base + NvRegMulticastMaskA);
writel(mask[1], base + NvRegMulticastMaskB);
writel(pff, base + NvRegPacketFilterFlags);
- dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
- dev->name);
nv_start_rx(dev);
spin_unlock_irq(&np->lock);
}
@@ -3152,8 +3055,6 @@ static int nv_update_linkspeed(struct net_device *dev)
mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
if (!(mii_status & BMSR_LSTATUS)) {
- dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
- dev->name);
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 0;
retval = 0;
@@ -3161,8 +3062,6 @@ static int nv_update_linkspeed(struct net_device *dev)
}
if (np->autoneg == 0) {
- dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
- dev->name, np->fixed_mode);
if (np->fixed_mode & LPA_100FULL) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
newdup = 1;
@@ -3185,14 +3084,11 @@ static int nv_update_linkspeed(struct net_device *dev)
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 0;
retval = 0;
- dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
goto set_speed;
}
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
- dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
- dev->name, adv, lpa);
retval = 1;
if (np->gigabit == PHY_GIGABIT) {
@@ -3201,8 +3097,6 @@ static int nv_update_linkspeed(struct net_device *dev)
if ((control_1000 & ADVERTISE_1000FULL) &&
(status_1000 & LPA_1000FULL)) {
- dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
- dev->name);
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
newdup = 1;
goto set_speed;
@@ -3224,7 +3118,6 @@ static int nv_update_linkspeed(struct net_device *dev)
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 0;
} else {
- dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 0;
}
@@ -3233,9 +3126,6 @@ set_speed:
if (np->duplex == newdup && np->linkspeed == newls)
return retval;
- dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
- dev->name, np->linkspeed, np->duplex, newls, newdup);
-
np->duplex = newdup;
np->linkspeed = newls;
@@ -3302,7 +3192,7 @@ set_speed:
}
writel(txreg, base + NvRegTxWatermark);
- writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
+ writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
base + NvRegMisc1);
pci_push(base);
writel(np->linkspeed, base + NvRegLinkSpeed);
@@ -3312,8 +3202,8 @@ set_speed:
/* setup pause frame */
if (np->duplex != 0) {
if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
- adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
- lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
+ adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+ lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
switch (adv_pause) {
case ADVERTISE_PAUSE_CAP:
@@ -3324,22 +3214,17 @@ set_speed:
}
break;
case ADVERTISE_PAUSE_ASYM:
- if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
- {
+ if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
- }
break;
- case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
- if (lpa_pause & LPA_PAUSE_CAP)
- {
+ case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
+ if (lpa_pause & LPA_PAUSE_CAP) {
pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
}
if (lpa_pause == LPA_PAUSE_ASYM)
- {
pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
- }
break;
}
} else {
@@ -3361,14 +3246,14 @@ static void nv_linkchange(struct net_device *dev)
if (nv_update_linkspeed(dev)) {
if (!netif_carrier_ok(dev)) {
netif_carrier_on(dev);
- printk(KERN_INFO "%s: link up.\n", dev->name);
+ netdev_info(dev, "link up\n");
nv_txrx_gate(dev, false);
nv_start_rx(dev);
}
} else {
if (netif_carrier_ok(dev)) {
netif_carrier_off(dev);
- printk(KERN_INFO "%s: link down.\n", dev->name);
+ netdev_info(dev, "link down\n");
nv_txrx_gate(dev, true);
nv_stop_rx(dev);
}
@@ -3382,11 +3267,9 @@ static void nv_link_irq(struct net_device *dev)
miistat = readl(base + NvRegMIIStatus);
writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
- dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
if (miistat & (NVREG_MIISTAT_LINKCHANGE))
nv_linkchange(dev);
- dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
}
static void nv_msi_workaround(struct fe_priv *np)
@@ -3437,8 +3320,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
- dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
-
if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
np->events = readl(base + NvRegIrqStatus);
writel(np->events, base + NvRegIrqStatus);
@@ -3446,7 +3327,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
np->events = readl(base + NvRegMSIXIrqStatus);
writel(np->events, base + NvRegMSIXIrqStatus);
}
- dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
if (!(np->events & np->irqmask))
return IRQ_NONE;
@@ -3460,8 +3340,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
__napi_schedule(&np->napi);
}
- dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
-
return IRQ_HANDLED;
}
@@ -3476,8 +3354,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
- dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
-
if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
np->events = readl(base + NvRegIrqStatus);
writel(np->events, base + NvRegIrqStatus);
@@ -3485,7 +3361,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
np->events = readl(base + NvRegMSIXIrqStatus);
writel(np->events, base + NvRegMSIXIrqStatus);
}
- dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
if (!(np->events & np->irqmask))
return IRQ_NONE;
@@ -3498,7 +3373,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
writel(0, base + NvRegIrqMask);
__napi_schedule(&np->napi);
}
- dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
return IRQ_HANDLED;
}
@@ -3512,12 +3386,9 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
int i;
unsigned long flags;
- dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
-
- for (i=0; ; i++) {
+ for (i = 0;; i++) {
events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
- dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
if (!(events & np->irqmask))
break;
@@ -3536,12 +3407,12 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
spin_unlock_irqrestore(&np->lock, flags);
- printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
+ netdev_dbg(dev, "%s: too many iterations (%d)\n",
+ __func__, i);
break;
}
}
- dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
return IRQ_RETVAL(i);
}
@@ -3553,7 +3424,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
u8 __iomem *base = get_hwbase(dev);
unsigned long flags;
int retcode;
- int rx_count, tx_work=0, rx_work=0;
+ int rx_count, tx_work = 0, rx_work = 0;
do {
if (!nv_optimized(np)) {
@@ -3626,12 +3497,9 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
int i;
unsigned long flags;
- dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
-
- for (i=0; ; i++) {
+ for (i = 0;; i++) {
events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
- dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
if (!(events & np->irqmask))
break;
@@ -3655,11 +3523,11 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
spin_unlock_irqrestore(&np->lock, flags);
- printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
+ netdev_dbg(dev, "%s: too many iterations (%d)\n",
+ __func__, i);
break;
}
}
- dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
return IRQ_RETVAL(i);
}
@@ -3673,12 +3541,9 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
int i;
unsigned long flags;
- dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
-
- for (i=0; ; i++) {
+ for (i = 0;; i++) {
events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
- dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
if (!(events & np->irqmask))
break;
@@ -3723,12 +3588,12 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
spin_unlock_irqrestore(&np->lock, flags);
- printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
+ netdev_dbg(dev, "%s: too many iterations (%d)\n",
+ __func__, i);
break;
}
}
- dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
return IRQ_RETVAL(i);
}
@@ -3740,8 +3605,6 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data)
u8 __iomem *base = get_hwbase(dev);
u32 events;
- dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
-
if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
@@ -3750,7 +3613,6 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data)
writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
}
pci_push(base);
- dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
if (!(events & NVREG_IRQ_TIMER))
return IRQ_RETVAL(0);
@@ -3760,8 +3622,6 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data)
np->intr_test = 1;
spin_unlock(&np->lock);
- dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
-
return IRQ_RETVAL(1);
}
@@ -3776,17 +3636,15 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
* the remaining 8 interrupts.
*/
for (i = 0; i < 8; i++) {
- if ((irqmask >> i) & 0x1) {
+ if ((irqmask >> i) & 0x1)
msixmap |= vector << (i << 2);
- }
}
writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
msixmap = 0;
for (i = 0; i < 8; i++) {
- if ((irqmask >> (i + 8)) & 0x1) {
+ if ((irqmask >> (i + 8)) & 0x1)
msixmap |= vector << (i << 2);
- }
}
writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
}
@@ -3809,17 +3667,19 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
}
if (np->msi_flags & NV_MSI_X_CAPABLE) {
- for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+ for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
np->msi_x_entry[i].entry = i;
- }
- if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
+ ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK));
+ if (ret == 0) {
np->msi_flags |= NV_MSI_X_ENABLED;
if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
/* Request irq for rx handling */
sprintf(np->name_rx, "%s-rx", dev->name);
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
+ netdev_info(dev,
+ "request_irq failed for rx %d\n",
+ ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
goto out_err;
@@ -3828,7 +3688,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
sprintf(np->name_tx, "%s-tx", dev->name);
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
+ netdev_info(dev,
+ "request_irq failed for tx %d\n",
+ ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
goto out_free_rx;
@@ -3837,7 +3699,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
sprintf(np->name_other, "%s-other", dev->name);
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
+ netdev_info(dev,
+ "request_irq failed for link %d\n",
+ ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
goto out_free_tx;
@@ -3851,7 +3715,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
} else {
/* Request irq for all interrupts */
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
+ netdev_info(dev,
+ "request_irq failed %d\n",
+ ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
goto out_err;
@@ -3864,11 +3730,13 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
}
}
if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
- if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
+ ret = pci_enable_msi(np->pci_dev);
+ if (ret == 0) {
np->msi_flags |= NV_MSI_ENABLED;
dev->irq = np->pci_dev->irq;
if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
+ netdev_info(dev, "request_irq failed %d\n",
+ ret);
pci_disable_msi(np->pci_dev);
np->msi_flags &= ~NV_MSI_ENABLED;
dev->irq = np->pci_dev->irq;
@@ -3903,9 +3771,8 @@ static void nv_free_irq(struct net_device *dev)
int i;
if (np->msi_flags & NV_MSI_X_ENABLED) {
- for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+ for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
free_irq(np->msi_x_entry[i].vector, dev);
- }
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
} else {
@@ -3954,7 +3821,7 @@ static void nv_do_nic_poll(unsigned long data)
if (np->recover_error) {
np->recover_error = 0;
- printk(KERN_INFO "%s: MAC in recoverable error state\n", dev->name);
+ netdev_info(dev, "MAC in recoverable error state\n");
if (netif_running(dev)) {
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
@@ -3975,7 +3842,7 @@ static void nv_do_nic_poll(unsigned long data)
/* reinit nic view of the rx queue */
writel(np->rx_buf_sz, base + NvRegOffloadConfig);
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
- writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
pci_push(base);
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -4082,6 +3949,7 @@ static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
writel(flags, base + NvRegWakeUpFlags);
spin_unlock_irq(&np->lock);
}
+ device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
return 0;
}
@@ -4105,7 +3973,7 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
}
if (netif_carrier_ok(dev)) {
- switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
+ switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
case NVREG_LINKSPEED_10:
ecmd->speed = SPEED_10;
break;
@@ -4250,14 +4118,14 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
}
if (netif_running(dev))
- printk(KERN_INFO "%s: link down.\n", dev->name);
+ netdev_info(dev, "link down\n");
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
bmcr |= BMCR_ANENABLE;
/* reset the phy in order for settings to stick,
* and cause autoneg to start */
if (phy_reset(dev, bmcr)) {
- printk(KERN_INFO "%s: phy reset failed\n", dev->name);
+ netdev_info(dev, "phy reset failed\n");
return -EINVAL;
}
} else {
@@ -4306,7 +4174,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if (np->phy_oui == PHY_OUI_MARVELL) {
/* reset the phy in order for forced mode settings to stick */
if (phy_reset(dev, bmcr)) {
- printk(KERN_INFO "%s: phy reset failed\n", dev->name);
+ netdev_info(dev, "phy reset failed\n");
return -EINVAL;
}
} else {
@@ -4344,7 +4212,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
regs->version = FORCEDETH_REGS_VER;
spin_lock_irq(&np->lock);
- for (i = 0;i <= np->register_size/sizeof(u32); i++)
+ for (i = 0; i <= np->register_size/sizeof(u32); i++)
rbuf[i] = readl(base + i*sizeof(u32));
spin_unlock_irq(&np->lock);
}
@@ -4368,7 +4236,7 @@ static int nv_nway_reset(struct net_device *dev)
spin_unlock(&np->lock);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
- printk(KERN_INFO "%s: link down.\n", dev->name);
+ netdev_info(dev, "link down\n");
}
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -4376,7 +4244,7 @@ static int nv_nway_reset(struct net_device *dev)
bmcr |= BMCR_ANENABLE;
/* reset the phy in order for settings to stick*/
if (phy_reset(dev, bmcr)) {
- printk(KERN_INFO "%s: phy reset failed\n", dev->name);
+ netdev_info(dev, "phy reset failed\n");
return -EINVAL;
}
} else {
@@ -4464,10 +4332,9 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
rxtx_ring, ring_addr);
}
- if (rx_skbuff)
- kfree(rx_skbuff);
- if (tx_skbuff)
- kfree(tx_skbuff);
+
+ kfree(rx_skbuff);
+ kfree(tx_skbuff);
goto exit;
}
@@ -4491,14 +4358,14 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
np->tx_ring_size = ring->tx_pending;
if (!nv_optimized(np)) {
- np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
+ np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
} else {
- np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
+ np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
}
- np->rx_skb = (struct nv_skb_map*)rx_skbuff;
- np->tx_skb = (struct nv_skb_map*)tx_skbuff;
+ np->rx_skb = (struct nv_skb_map *)rx_skbuff;
+ np->tx_skb = (struct nv_skb_map *)tx_skbuff;
np->ring_addr = ring_addr;
memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
@@ -4515,7 +4382,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
/* reinit nic view of the queues */
writel(np->rx_buf_sz, base + NvRegOffloadConfig);
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
- writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
pci_push(base);
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -4550,12 +4417,11 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
if ((!np->autoneg && np->duplex == 0) ||
(np->autoneg && !pause->autoneg && np->duplex == 0)) {
- printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
- dev->name);
+ netdev_info(dev, "can not set pause settings when forced link is in half duplex\n");
return -EINVAL;
}
if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
- printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
+ netdev_info(dev, "hardware does not support tx pause frames\n");
return -EINVAL;
}
@@ -4590,7 +4456,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
if (netif_running(dev))
- printk(KERN_INFO "%s: link down.\n", dev->name);
+ netdev_info(dev, "link down\n");
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
@@ -4841,7 +4707,7 @@ static int nv_loopback_test(struct net_device *dev)
/* reinit nic view of the rx queue */
writel(np->rx_buf_sz, base + NvRegOffloadConfig);
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
- writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
pci_push(base);
@@ -4852,8 +4718,7 @@ static int nv_loopback_test(struct net_device *dev)
pkt_len = ETH_DATA_LEN;
tx_skb = dev_alloc_skb(pkt_len);
if (!tx_skb) {
- printk(KERN_ERR "dev_alloc_skb() failed during loopback test"
- " of %s\n", dev->name);
+ netdev_err(dev, "dev_alloc_skb() failed during loopback test\n");
ret = 0;
goto out;
}
@@ -4893,29 +4758,22 @@ static int nv_loopback_test(struct net_device *dev)
if (flags & NV_RX_ERROR)
ret = 0;
} else {
- if (flags & NV_RX2_ERROR) {
+ if (flags & NV_RX2_ERROR)
ret = 0;
- }
}
if (ret) {
if (len != pkt_len) {
ret = 0;
- dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
- dev->name, len, pkt_len);
} else {
rx_skb = np->rx_skb[0].skb;
for (i = 0; i < pkt_len; i++) {
if (rx_skb->data[i] != (u8)(i & 0xff)) {
ret = 0;
- dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
- dev->name, i);
break;
}
}
}
- } else {
- dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
}
pci_unmap_single(np->pci_dev, test_dma_addr,
@@ -4958,11 +4816,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
netif_addr_lock(dev);
spin_lock_irq(&np->lock);
nv_disable_hw_interrupts(dev, np->irqmask);
- if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
+ if (!(np->msi_flags & NV_MSI_X_ENABLED))
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
- } else {
+ else
writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
- }
/* stop engines */
nv_stop_rxtx(dev);
nv_txrx_reset(dev);
@@ -5003,7 +4860,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
/* reinit nic view of the rx queue */
writel(np->rx_buf_sz, base + NvRegOffloadConfig);
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
- writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
pci_push(base);
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -5106,8 +4963,7 @@ static int nv_mgmt_acquire_sema(struct net_device *dev)
((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
np->mgmt_sema = 1;
return 1;
- }
- else
+ } else
udelay(50);
}
@@ -5167,8 +5023,6 @@ static int nv_open(struct net_device *dev)
int oom, i;
u32 low;
- dprintk(KERN_DEBUG "nv_open: begin\n");
-
/* power up phy */
mii_rw(dev, np->phyaddr, MII_BMCR,
mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
@@ -5204,7 +5058,7 @@ static int nv_open(struct net_device *dev)
/* give hw rings */
setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
- writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+ writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
writel(np->linkspeed, base + NvRegLinkSpeed);
@@ -5216,9 +5070,11 @@ static int nv_open(struct net_device *dev)
writel(np->vlanctl_bits, base + NvRegVlanControl);
pci_push(base);
writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
- reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
- NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
- KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
+ if (reg_delay(dev, NvRegUnknownSetupReg5,
+ NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
+ NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX))
+ netdev_info(dev,
+ "%s: SetupReg5, Bit 31 remained off\n", __func__);
writel(0, base + NvRegMIIMask);
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
@@ -5251,8 +5107,7 @@ static int nv_open(struct net_device *dev)
writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
else
writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
- }
- else
+ } else
writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
@@ -5263,7 +5118,7 @@ static int nv_open(struct net_device *dev)
writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
i = readl(base + NvRegPowerState);
- if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
+ if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
pci_push(base);
@@ -5276,9 +5131,8 @@ static int nv_open(struct net_device *dev)
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
pci_push(base);
- if (nv_request_irq(dev, 0)) {
+ if (nv_request_irq(dev, 0))
goto out_drain;
- }
/* ask for interrupts */
nv_enable_hw_interrupts(dev, np->irqmask);
@@ -5296,7 +5150,6 @@ static int nv_open(struct net_device *dev)
u32 miistat;
miistat = readl(base + NvRegMIIStatus);
writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
- dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
}
/* set linkspeed to invalid value, thus force nv_update_linkspeed
* to init hw */
@@ -5309,7 +5162,7 @@ static int nv_open(struct net_device *dev)
if (ret) {
netif_carrier_on(dev);
} else {
- printk(KERN_INFO "%s: no link during initialization.\n", dev->name);
+ netdev_info(dev, "no link during initialization\n");
netif_carrier_off(dev);
}
if (oom)
@@ -5352,7 +5205,6 @@ static int nv_close(struct net_device *dev)
base = get_hwbase(dev);
nv_disable_hw_interrupts(dev, np->irqmask);
pci_push(base);
- dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
spin_unlock_irq(&np->lock);
@@ -5421,8 +5273,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
static int printed_version;
if (!printed_version++)
- printk(KERN_INFO "%s: Reverse Engineered nForce ethernet"
- " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION);
+ pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
+ FORCEDETH_VERSION);
dev = alloc_etherdev(sizeof(struct fe_priv));
err = -ENOMEM;
@@ -5465,10 +5317,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
err = -EINVAL;
addr = 0;
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
- pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
- pci_resource_len(pci_dev, i),
- pci_resource_flags(pci_dev, i));
if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
pci_resource_len(pci_dev, i) >= np->register_size) {
addr = pci_resource_start(pci_dev, i);
@@ -5476,8 +5324,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
}
}
if (i == DEVICE_COUNT_RESOURCE) {
- dev_printk(KERN_INFO, &pci_dev->dev,
- "Couldn't find register window\n");
+ dev_info(&pci_dev->dev, "Couldn't find register window\n");
goto out_relreg;
}
@@ -5493,13 +5340,13 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
if (dma_64bit) {
if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39)))
- dev_printk(KERN_INFO, &pci_dev->dev,
- "64-bit DMA failed, using 32-bit addressing\n");
+ dev_info(&pci_dev->dev,
+ "64-bit DMA failed, using 32-bit addressing\n");
else
dev->features |= NETIF_F_HIGHDMA;
if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) {
- dev_printk(KERN_INFO, &pci_dev->dev,
- "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
+ dev_info(&pci_dev->dev,
+ "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
}
}
} else if (id->driver_data & DEV_HAS_LARGEDESC) {
@@ -5620,7 +5467,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
- printk(KERN_DEBUG "nv_probe: set workaround bit for reversed mac addr\n");
+ dev_dbg(&pci_dev->dev,
+ "%s: set workaround bit for reversed mac addr\n",
+ __func__);
}
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
@@ -5629,28 +5478,21 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
* Bad mac address. At least one bios sets the mac address
* to 01:23:45:67:89:ab
*/
- dev_printk(KERN_ERR, &pci_dev->dev,
- "Invalid Mac address detected: %pM\n",
- dev->dev_addr);
- dev_printk(KERN_ERR, &pci_dev->dev,
- "Please complain to your hardware vendor. Switching to a random MAC.\n");
+ dev_err(&pci_dev->dev,
+ "Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
+ dev->dev_addr);
random_ether_addr(dev->dev_addr);
+ dev_err(&pci_dev->dev,
+ "Using random MAC address: %pM\n", dev->dev_addr);
}
- dprintk(KERN_DEBUG "%s: MAC Address %pM\n",
- pci_name(pci_dev), dev->dev_addr);
-
/* set mac address */
nv_copy_mac_to_hw(dev);
- /* Workaround current PCI init glitch: wakeup bits aren't
- * being set from PCI PM capability.
- */
- device_init_wakeup(&pci_dev->dev, 1);
-
/* disable WOL */
writel(0, base + NvRegWakeUpFlags);
np->wolenabled = 0;
+ device_set_wakeup_enable(&pci_dev->dev, false);
if (id->driver_data & DEV_HAS_POWER_CNTRL) {
@@ -5663,16 +5505,15 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
writel(powerstate, base + NvRegPowerState2);
}
- if (np->desc_ver == DESC_VER_1) {
+ if (np->desc_ver == DESC_VER_1)
np->tx_flags = NV_TX_VALID;
- } else {
+ else
np->tx_flags = NV_TX2_VALID;
- }
np->msi_flags = 0;
- if ((id->driver_data & DEV_HAS_MSI) && msi) {
+ if ((id->driver_data & DEV_HAS_MSI) && msi)
np->msi_flags |= NV_MSI_CAPABLE;
- }
+
if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
/* msix has had reported issues when modifying irqmask
as in the case of napi, therefore, disable for now
@@ -5702,11 +5543,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
if (id->driver_data & DEV_NEED_TIMERIRQ)
np->irqmask |= NVREG_IRQ_TIMER;
if (id->driver_data & DEV_NEED_LINKTIMER) {
- dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
np->need_linktimer = 1;
np->link_timeout = jiffies + LINK_TIMEOUT;
} else {
- dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
np->need_linktimer = 0;
}
@@ -5735,19 +5574,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
nv_mgmt_acquire_sema(dev) &&
nv_mgmt_get_version(dev)) {
np->mac_in_use = 1;
- if (np->mgmt_version > 0) {
+ if (np->mgmt_version > 0)
np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
- }
- dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n",
- pci_name(pci_dev), np->mac_in_use);
/* management unit setup the phy already? */
if (np->mac_in_use &&
((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
NVREG_XMITCTL_SYNC_PHY_INIT)) {
/* phy is inited by mgmt unit */
phyinitialized = 1;
- dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n",
- pci_name(pci_dev));
} else {
/* we need to init the phy */
}
@@ -5773,8 +5607,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->phy_model = id2 & PHYID2_MODEL_MASK;
id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
- dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
- pci_name(pci_dev), id1, id2, phyaddr);
np->phyaddr = phyaddr;
np->phy_oui = id1 | id2;
@@ -5788,8 +5620,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
break;
}
if (i == 33) {
- dev_printk(KERN_INFO, &pci_dev->dev,
- "open: Could not find a valid PHY.\n");
+ dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n");
goto out_error;
}
@@ -5799,9 +5630,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
} else {
/* see if it is a gigabit phy */
u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
- if (mii_status & PHY_GIGABIT) {
+ if (mii_status & PHY_GIGABIT)
np->gigabit = PHY_GIGABIT;
- }
}
/* set default link speed settings */
@@ -5811,37 +5641,29 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
err = register_netdev(dev);
if (err) {
- dev_printk(KERN_INFO, &pci_dev->dev,
- "unable to register netdev: %d\n", err);
+ dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err);
goto out_error;
}
- dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, "
- "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
- dev->name,
- np->phy_oui,
- np->phyaddr,
- dev->dev_addr[0],
- dev->dev_addr[1],
- dev->dev_addr[2],
- dev->dev_addr[3],
- dev->dev_addr[4],
- dev->dev_addr[5]);
-
- dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
- dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
- dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
- "csum " : "",
- dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
- "vlan " : "",
- id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
- id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
- id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
- np->gigabit == PHY_GIGABIT ? "gbit " : "",
- np->need_linktimer ? "lnktim " : "",
- np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
- np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
- np->desc_ver);
+ netif_carrier_off(dev);
+
+ dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
+ dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
+
+ dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
+ dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
+ dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
+ "csum " : "",
+ dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
+ "vlan " : "",
+ id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
+ id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
+ id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
+ np->gigabit == PHY_GIGABIT ? "gbit " : "",
+ np->need_linktimer ? "lnktim " : "",
+ np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
+ np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
+ np->desc_ver);
return 0;
@@ -5922,45 +5744,38 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
pci_set_drvdata(pci_dev, NULL);
}
-#ifdef CONFIG_PM
-static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int nv_suspend(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
int i;
if (netif_running(dev)) {
- // Gross.
+ /* Gross. */
nv_close(dev);
}
netif_device_detach(dev);
/* save non-pci configuration space */
- for (i = 0;i <= np->register_size/sizeof(u32); i++)
+ for (i = 0; i <= np->register_size/sizeof(u32); i++)
np->saved_config_space[i] = readl(base + i*sizeof(u32));
- pci_save_state(pdev);
- pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
-static int nv_resume(struct pci_dev *pdev)
+static int nv_resume(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
int i, rc = 0;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- /* ack any pending wake events, disable PME */
- pci_enable_wake(pdev, PCI_D0, 0);
-
/* restore non-pci configuration space */
- for (i = 0;i <= np->register_size/sizeof(u32); i++)
+ for (i = 0; i <= np->register_size/sizeof(u32); i++)
writel(np->saved_config_space[i], base+i*sizeof(u32));
if (np->driver_data & DEV_NEED_MSI_FIX)
@@ -5977,6 +5792,14 @@ static int nv_resume(struct pci_dev *pdev)
return rc;
}
+static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
+#define NV_PM_OPS (&nv_pm_ops)
+
+#else
+#define NV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
static void nv_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -5990,9 +5813,8 @@ static void nv_shutdown(struct pci_dev *pdev)
* If we really go for poweroff, we must not restore the MAC,
* otherwise the MAC for WOL will be reversed at least on some boards.
*/
- if (system_state != SYSTEM_POWER_OFF) {
+ if (system_state != SYSTEM_POWER_OFF)
nv_restore_mac_addr(pdev);
- }
pci_disable_device(pdev);
/*
@@ -6000,15 +5822,12 @@ static void nv_shutdown(struct pci_dev *pdev)
* only put the device into D3 if we really go for poweroff.
*/
if (system_state == SYSTEM_POWER_OFF) {
- if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled))
- pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
+ pci_wake_from_d3(pdev, np->wolenabled);
pci_set_power_state(pdev, PCI_D3hot);
}
}
#else
-#define nv_suspend NULL
#define nv_shutdown NULL
-#define nv_resume NULL
#endif /* CONFIG_PM */
static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
@@ -6180,9 +5999,8 @@ static struct pci_driver driver = {
.id_table = pci_tbl,
.probe = nv_probe,
.remove = __devexit_p(nv_remove),
- .suspend = nv_suspend,
- .resume = nv_resume,
.shutdown = nv_shutdown,
+ .driver.pm = NV_PM_OPS,
};
static int __init init_nic(void)
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index d684f187de57..24cb953900dd 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -40,6 +40,7 @@
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
+#include <linux/of_net.h>
#include <linux/vmalloc.h>
#include <asm/pgtable.h>
@@ -997,8 +998,7 @@ static const struct net_device_ops fs_enet_netdev_ops = {
#endif
};
-static int __devinit fs_enet_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit fs_enet_probe(struct platform_device *ofdev)
{
struct net_device *ndev;
struct fs_enet_private *fep;
@@ -1007,11 +1007,14 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev,
const u8 *mac_addr;
int privsize, len, ret = -ENODEV;
+ if (!ofdev->dev.of_match)
+ return -EINVAL;
+
fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
if (!fpi)
return -ENOMEM;
- if (!IS_FEC(match)) {
+ if (!IS_FEC(ofdev->dev.of_match)) {
data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
if (!data || len != 4)
goto out_free_fpi;
@@ -1046,7 +1049,7 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev,
fep->dev = &ofdev->dev;
fep->ndev = ndev;
fep->fpi = fpi;
- fep->ops = match->data;
+ fep->ops = ofdev->dev.of_match->data;
ret = fep->ops->setup_data(ndev);
if (ret)
@@ -1155,7 +1158,7 @@ static struct of_device_id fs_enet_match[] = {
};
MODULE_DEVICE_TABLE(of, fs_enet_match);
-static struct of_platform_driver fs_enet_driver = {
+static struct platform_driver fs_enet_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "fs_enet",
@@ -1167,12 +1170,12 @@ static struct of_platform_driver fs_enet_driver = {
static int __init fs_init(void)
{
- return of_register_platform_driver(&fs_enet_driver);
+ return platform_driver_register(&fs_enet_driver);
}
static void __exit fs_cleanup(void)
{
- of_unregister_platform_driver(&fs_enet_driver);
+ platform_driver_unregister(&fs_enet_driver);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 3cda2b515471..ad2975440719 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -150,8 +150,7 @@ static int __devinit fs_mii_bitbang_init(struct mii_bus *bus,
return 0;
}
-static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)
{
struct mii_bus *new_bus;
struct bb_info *bitbang;
@@ -223,7 +222,7 @@ static struct of_device_id fs_enet_mdio_bb_match[] = {
};
MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match);
-static struct of_platform_driver fs_enet_bb_mdio_driver = {
+static struct platform_driver fs_enet_bb_mdio_driver = {
.driver = {
.name = "fsl-bb-mdio",
.owner = THIS_MODULE,
@@ -235,12 +234,12 @@ static struct of_platform_driver fs_enet_bb_mdio_driver = {
static int fs_enet_mdio_bb_init(void)
{
- return of_register_platform_driver(&fs_enet_bb_mdio_driver);
+ return platform_driver_register(&fs_enet_bb_mdio_driver);
}
static void fs_enet_mdio_bb_exit(void)
{
- of_unregister_platform_driver(&fs_enet_bb_mdio_driver);
+ platform_driver_unregister(&fs_enet_bb_mdio_driver);
}
module_init(fs_enet_mdio_bb_init);
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index dbb9c48623df..7e840d373ab3 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -101,15 +101,18 @@ static int fs_enet_fec_mii_reset(struct mii_bus *bus)
return 0;
}
-static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)
{
struct resource res;
struct mii_bus *new_bus;
struct fec_info *fec;
- int (*get_bus_freq)(struct device_node *) = match->data;
+ int (*get_bus_freq)(struct device_node *);
int ret = -ENOMEM, clock, speed;
+ if (!ofdev->dev.of_match)
+ return -EINVAL;
+ get_bus_freq = ofdev->dev.of_match->data;
+
new_bus = mdiobus_alloc();
if (!new_bus)
goto out;
@@ -221,7 +224,7 @@ static struct of_device_id fs_enet_mdio_fec_match[] = {
};
MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match);
-static struct of_platform_driver fs_enet_fec_mdio_driver = {
+static struct platform_driver fs_enet_fec_mdio_driver = {
.driver = {
.name = "fsl-fec-mdio",
.owner = THIS_MODULE,
@@ -233,12 +236,12 @@ static struct of_platform_driver fs_enet_fec_mdio_driver = {
static int fs_enet_mdio_fec_init(void)
{
- return of_register_platform_driver(&fs_enet_fec_mdio_driver);
+ return platform_driver_register(&fs_enet_fec_mdio_driver);
}
static void fs_enet_mdio_fec_exit(void)
{
- of_unregister_platform_driver(&fs_enet_fec_mdio_driver);
+ platform_driver_unregister(&fs_enet_fec_mdio_driver);
}
module_init(fs_enet_mdio_fec_init);
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index 8d3a2ccbc953..52f4e8ad48e7 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -265,8 +265,7 @@ static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
#endif
-static int fsl_pq_mdio_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int fsl_pq_mdio_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct device_node *tbi;
@@ -471,7 +470,7 @@ static struct of_device_id fsl_pq_mdio_match[] = {
};
MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
-static struct of_platform_driver fsl_pq_mdio_driver = {
+static struct platform_driver fsl_pq_mdio_driver = {
.driver = {
.name = "fsl-pq_mdio",
.owner = THIS_MODULE,
@@ -483,13 +482,13 @@ static struct of_platform_driver fsl_pq_mdio_driver = {
int __init fsl_pq_mdio_init(void)
{
- return of_register_platform_driver(&fsl_pq_mdio_driver);
+ return platform_driver_register(&fsl_pq_mdio_driver);
}
module_init(fsl_pq_mdio_init);
void fsl_pq_mdio_exit(void)
{
- of_unregister_platform_driver(&fsl_pq_mdio_driver);
+ platform_driver_unregister(&fsl_pq_mdio_driver);
}
module_exit(fsl_pq_mdio_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ftmac100.c b/drivers/net/ftmac100.c
new file mode 100644
index 000000000000..1d6f4b8d393a
--- /dev/null
+++ b/drivers/net/ftmac100.c
@@ -0,0 +1,1198 @@
+/*
+ * Faraday FTMAC100 10/100 Ethernet
+ *
+ * (C) Copyright 2009-2011 Faraday Technology
+ * Po-Yu Chuang <ratbert@faraday-tech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#include "ftmac100.h"
+
+#define DRV_NAME "ftmac100"
+#define DRV_VERSION "0.2"
+
+#define RX_QUEUE_ENTRIES 128 /* must be power of 2 */
+#define TX_QUEUE_ENTRIES 16 /* must be power of 2 */
+
+#define MAX_PKT_SIZE 1518
+#define RX_BUF_SIZE 2044 /* must be smaller than 0x7ff */
+
+#if MAX_PKT_SIZE > 0x7ff
+#error invalid MAX_PKT_SIZE
+#endif
+
+#if RX_BUF_SIZE > 0x7ff || RX_BUF_SIZE > PAGE_SIZE
+#error invalid RX_BUF_SIZE
+#endif
+
+/******************************************************************************
+ * private data
+ *****************************************************************************/
+struct ftmac100_descs {
+ struct ftmac100_rxdes rxdes[RX_QUEUE_ENTRIES];
+ struct ftmac100_txdes txdes[TX_QUEUE_ENTRIES];
+};
+
+struct ftmac100 {
+ struct resource *res;
+ void __iomem *base;
+ int irq;
+
+ struct ftmac100_descs *descs;
+ dma_addr_t descs_dma_addr;
+
+ unsigned int rx_pointer;
+ unsigned int tx_clean_pointer;
+ unsigned int tx_pointer;
+ unsigned int tx_pending;
+
+ spinlock_t tx_lock;
+
+ struct net_device *netdev;
+ struct device *dev;
+ struct napi_struct napi;
+
+ struct mii_if_info mii;
+};
+
+static int ftmac100_alloc_rx_page(struct ftmac100 *priv,
+ struct ftmac100_rxdes *rxdes, gfp_t gfp);
+
+/******************************************************************************
+ * internal functions (hardware register access)
+ *****************************************************************************/
+#define INT_MASK_ALL_ENABLED (FTMAC100_INT_RPKT_FINISH | \
+ FTMAC100_INT_NORXBUF | \
+ FTMAC100_INT_XPKT_OK | \
+ FTMAC100_INT_XPKT_LOST | \
+ FTMAC100_INT_RPKT_LOST | \
+ FTMAC100_INT_AHB_ERR | \
+ FTMAC100_INT_PHYSTS_CHG)
+
+#define INT_MASK_ALL_DISABLED 0
+
+static void ftmac100_enable_all_int(struct ftmac100 *priv)
+{
+ iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTMAC100_OFFSET_IMR);
+}
+
+static void ftmac100_disable_all_int(struct ftmac100 *priv)
+{
+ iowrite32(INT_MASK_ALL_DISABLED, priv->base + FTMAC100_OFFSET_IMR);
+}
+
+static void ftmac100_set_rx_ring_base(struct ftmac100 *priv, dma_addr_t addr)
+{
+ iowrite32(addr, priv->base + FTMAC100_OFFSET_RXR_BADR);
+}
+
+static void ftmac100_set_tx_ring_base(struct ftmac100 *priv, dma_addr_t addr)
+{
+ iowrite32(addr, priv->base + FTMAC100_OFFSET_TXR_BADR);
+}
+
+static void ftmac100_txdma_start_polling(struct ftmac100 *priv)
+{
+ iowrite32(1, priv->base + FTMAC100_OFFSET_TXPD);
+}
+
+static int ftmac100_reset(struct ftmac100 *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ int i;
+
+ /* NOTE: reset clears all registers */
+ iowrite32(FTMAC100_MACCR_SW_RST, priv->base + FTMAC100_OFFSET_MACCR);
+
+ for (i = 0; i < 5; i++) {
+ unsigned int maccr;
+
+ maccr = ioread32(priv->base + FTMAC100_OFFSET_MACCR);
+ if (!(maccr & FTMAC100_MACCR_SW_RST)) {
+ /*
+ * FTMAC100_MACCR_SW_RST cleared does not indicate
+ * that hardware reset completed (what the f*ck).
+ * We still need to wait for a while.
+ */
+ usleep_range(500, 1000);
+ return 0;
+ }
+
+ usleep_range(1000, 10000);
+ }
+
+ netdev_err(netdev, "software reset failed\n");
+ return -EIO;
+}
+
+static void ftmac100_set_mac(struct ftmac100 *priv, const unsigned char *mac)
+{
+ unsigned int maddr = mac[0] << 8 | mac[1];
+ unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
+
+ iowrite32(maddr, priv->base + FTMAC100_OFFSET_MAC_MADR);
+ iowrite32(laddr, priv->base + FTMAC100_OFFSET_MAC_LADR);
+}
+
+#define MACCR_ENABLE_ALL (FTMAC100_MACCR_XMT_EN | \
+ FTMAC100_MACCR_RCV_EN | \
+ FTMAC100_MACCR_XDMA_EN | \
+ FTMAC100_MACCR_RDMA_EN | \
+ FTMAC100_MACCR_CRC_APD | \
+ FTMAC100_MACCR_FULLDUP | \
+ FTMAC100_MACCR_RX_RUNT | \
+ FTMAC100_MACCR_RX_BROADPKT)
+
+static int ftmac100_start_hw(struct ftmac100 *priv)
+{
+ struct net_device *netdev = priv->netdev;
+
+ if (ftmac100_reset(priv))
+ return -EIO;
+
+ /* setup ring buffer base registers */
+ ftmac100_set_rx_ring_base(priv,
+ priv->descs_dma_addr +
+ offsetof(struct ftmac100_descs, rxdes));
+ ftmac100_set_tx_ring_base(priv,
+ priv->descs_dma_addr +
+ offsetof(struct ftmac100_descs, txdes));
+
+ iowrite32(FTMAC100_APTC_RXPOLL_CNT(1), priv->base + FTMAC100_OFFSET_APTC);
+
+ ftmac100_set_mac(priv, netdev->dev_addr);
+
+ iowrite32(MACCR_ENABLE_ALL, priv->base + FTMAC100_OFFSET_MACCR);
+ return 0;
+}
+
+static void ftmac100_stop_hw(struct ftmac100 *priv)
+{
+ iowrite32(0, priv->base + FTMAC100_OFFSET_MACCR);
+}
+
+/******************************************************************************
+ * internal functions (receive descriptor)
+ *****************************************************************************/
+static bool ftmac100_rxdes_first_segment(struct ftmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FRS);
+}
+
+static bool ftmac100_rxdes_last_segment(struct ftmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_LRS);
+}
+
+static bool ftmac100_rxdes_owned_by_dma(struct ftmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN);
+}
+
+static void ftmac100_rxdes_set_dma_own(struct ftmac100_rxdes *rxdes)
+{
+ /* clear status bits */
+ rxdes->rxdes0 = cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN);
+}
+
+static bool ftmac100_rxdes_rx_error(struct ftmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ERR);
+}
+
+static bool ftmac100_rxdes_crc_error(struct ftmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_CRC_ERR);
+}
+
+static bool ftmac100_rxdes_frame_too_long(struct ftmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FTL);
+}
+
+static bool ftmac100_rxdes_runt(struct ftmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RUNT);
+}
+
+static bool ftmac100_rxdes_odd_nibble(struct ftmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ODD_NB);
+}
+
+static unsigned int ftmac100_rxdes_frame_length(struct ftmac100_rxdes *rxdes)
+{
+ return le32_to_cpu(rxdes->rxdes0) & FTMAC100_RXDES0_RFL;
+}
+
+static bool ftmac100_rxdes_multicast(struct ftmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_MULTICAST);
+}
+
+static void ftmac100_rxdes_set_buffer_size(struct ftmac100_rxdes *rxdes,
+ unsigned int size)
+{
+ rxdes->rxdes1 &= cpu_to_le32(FTMAC100_RXDES1_EDORR);
+ rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_RXBUF_SIZE(size));
+}
+
+static void ftmac100_rxdes_set_end_of_ring(struct ftmac100_rxdes *rxdes)
+{
+ rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_EDORR);
+}
+
+static void ftmac100_rxdes_set_dma_addr(struct ftmac100_rxdes *rxdes,
+ dma_addr_t addr)
+{
+ rxdes->rxdes2 = cpu_to_le32(addr);
+}
+
+static dma_addr_t ftmac100_rxdes_get_dma_addr(struct ftmac100_rxdes *rxdes)
+{
+ return le32_to_cpu(rxdes->rxdes2);
+}
+
+/*
+ * rxdes3 is not used by hardware. We use it to keep track of page.
+ * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
+ */
+static void ftmac100_rxdes_set_page(struct ftmac100_rxdes *rxdes, struct page *page)
+{
+ rxdes->rxdes3 = (unsigned int)page;
+}
+
+static struct page *ftmac100_rxdes_get_page(struct ftmac100_rxdes *rxdes)
+{
+ return (struct page *)rxdes->rxdes3;
+}
+
+/******************************************************************************
+ * internal functions (receive)
+ *****************************************************************************/
+static int ftmac100_next_rx_pointer(int pointer)
+{
+ return (pointer + 1) & (RX_QUEUE_ENTRIES - 1);
+}
+
+static void ftmac100_rx_pointer_advance(struct ftmac100 *priv)
+{
+ priv->rx_pointer = ftmac100_next_rx_pointer(priv->rx_pointer);
+}
+
+static struct ftmac100_rxdes *ftmac100_current_rxdes(struct ftmac100 *priv)
+{
+ return &priv->descs->rxdes[priv->rx_pointer];
+}
+
+static struct ftmac100_rxdes *
+ftmac100_rx_locate_first_segment(struct ftmac100 *priv)
+{
+ struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv);
+
+ while (!ftmac100_rxdes_owned_by_dma(rxdes)) {
+ if (ftmac100_rxdes_first_segment(rxdes))
+ return rxdes;
+
+ ftmac100_rxdes_set_dma_own(rxdes);
+ ftmac100_rx_pointer_advance(priv);
+ rxdes = ftmac100_current_rxdes(priv);
+ }
+
+ return NULL;
+}
+
+static bool ftmac100_rx_packet_error(struct ftmac100 *priv,
+ struct ftmac100_rxdes *rxdes)
+{
+ struct net_device *netdev = priv->netdev;
+ bool error = false;
+
+ if (unlikely(ftmac100_rxdes_rx_error(rxdes))) {
+ if (net_ratelimit())
+ netdev_info(netdev, "rx err\n");
+
+ netdev->stats.rx_errors++;
+ error = true;
+ }
+
+ if (unlikely(ftmac100_rxdes_crc_error(rxdes))) {
+ if (net_ratelimit())
+ netdev_info(netdev, "rx crc err\n");
+
+ netdev->stats.rx_crc_errors++;
+ error = true;
+ }
+
+ if (unlikely(ftmac100_rxdes_frame_too_long(rxdes))) {
+ if (net_ratelimit())
+ netdev_info(netdev, "rx frame too long\n");
+
+ netdev->stats.rx_length_errors++;
+ error = true;
+ } else if (unlikely(ftmac100_rxdes_runt(rxdes))) {
+ if (net_ratelimit())
+ netdev_info(netdev, "rx runt\n");
+
+ netdev->stats.rx_length_errors++;
+ error = true;
+ } else if (unlikely(ftmac100_rxdes_odd_nibble(rxdes))) {
+ if (net_ratelimit())
+ netdev_info(netdev, "rx odd nibble\n");
+
+ netdev->stats.rx_length_errors++;
+ error = true;
+ }
+
+ return error;
+}
+
+static void ftmac100_rx_drop_packet(struct ftmac100 *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv);
+ bool done = false;
+
+ if (net_ratelimit())
+ netdev_dbg(netdev, "drop packet %p\n", rxdes);
+
+ do {
+ if (ftmac100_rxdes_last_segment(rxdes))
+ done = true;
+
+ ftmac100_rxdes_set_dma_own(rxdes);
+ ftmac100_rx_pointer_advance(priv);
+ rxdes = ftmac100_current_rxdes(priv);
+ } while (!done && !ftmac100_rxdes_owned_by_dma(rxdes));
+
+ netdev->stats.rx_dropped++;
+}
+
+static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed)
+{
+ struct net_device *netdev = priv->netdev;
+ struct ftmac100_rxdes *rxdes;
+ struct sk_buff *skb;
+ struct page *page;
+ dma_addr_t map;
+ int length;
+
+ rxdes = ftmac100_rx_locate_first_segment(priv);
+ if (!rxdes)
+ return false;
+
+ if (unlikely(ftmac100_rx_packet_error(priv, rxdes))) {
+ ftmac100_rx_drop_packet(priv);
+ return true;
+ }
+
+ /*
+ * It is impossible to get multi-segment packets
+ * because we always provide big enough receive buffers.
+ */
+ if (unlikely(!ftmac100_rxdes_last_segment(rxdes)))
+ BUG();
+
+ /* start processing */
+ skb = netdev_alloc_skb_ip_align(netdev, 128);
+ if (unlikely(!skb)) {
+ if (net_ratelimit())
+ netdev_err(netdev, "rx skb alloc failed\n");
+
+ ftmac100_rx_drop_packet(priv);
+ return true;
+ }
+
+ if (unlikely(ftmac100_rxdes_multicast(rxdes)))
+ netdev->stats.multicast++;
+
+ map = ftmac100_rxdes_get_dma_addr(rxdes);
+ dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+
+ length = ftmac100_rxdes_frame_length(rxdes);
+ page = ftmac100_rxdes_get_page(rxdes);
+ skb_fill_page_desc(skb, 0, page, 0, length);
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += length;
+ __pskb_pull_tail(skb, min(length, 64));
+
+ ftmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC);
+
+ ftmac100_rx_pointer_advance(priv);
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += skb->len;
+
+ /* push packet to protocol stack */
+ netif_receive_skb(skb);
+
+ (*processed)++;
+ return true;
+}
+
+/******************************************************************************
+ * internal functions (transmit descriptor)
+ *****************************************************************************/
+static void ftmac100_txdes_reset(struct ftmac100_txdes *txdes)
+{
+ /* clear all except end of ring bit */
+ txdes->txdes0 = 0;
+ txdes->txdes1 &= cpu_to_le32(FTMAC100_TXDES1_EDOTR);
+ txdes->txdes2 = 0;
+ txdes->txdes3 = 0;
+}
+
+static bool ftmac100_txdes_owned_by_dma(struct ftmac100_txdes *txdes)
+{
+ return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN);
+}
+
+static void ftmac100_txdes_set_dma_own(struct ftmac100_txdes *txdes)
+{
+ /*
+ * Make sure dma own bit will not be set before any other
+ * descriptor fields.
+ */
+ wmb();
+ txdes->txdes0 |= cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN);
+}
+
+static bool ftmac100_txdes_excessive_collision(struct ftmac100_txdes *txdes)
+{
+ return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_EXSCOL);
+}
+
+static bool ftmac100_txdes_late_collision(struct ftmac100_txdes *txdes)
+{
+ return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_LATECOL);
+}
+
+static void ftmac100_txdes_set_end_of_ring(struct ftmac100_txdes *txdes)
+{
+ txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_EDOTR);
+}
+
+static void ftmac100_txdes_set_first_segment(struct ftmac100_txdes *txdes)
+{
+ txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_FTS);
+}
+
+static void ftmac100_txdes_set_last_segment(struct ftmac100_txdes *txdes)
+{
+ txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_LTS);
+}
+
+static void ftmac100_txdes_set_txint(struct ftmac100_txdes *txdes)
+{
+ txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXIC);
+}
+
+static void ftmac100_txdes_set_buffer_size(struct ftmac100_txdes *txdes,
+ unsigned int len)
+{
+ txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXBUF_SIZE(len));
+}
+
+static void ftmac100_txdes_set_dma_addr(struct ftmac100_txdes *txdes,
+ dma_addr_t addr)
+{
+ txdes->txdes2 = cpu_to_le32(addr);
+}
+
+static dma_addr_t ftmac100_txdes_get_dma_addr(struct ftmac100_txdes *txdes)
+{
+ return le32_to_cpu(txdes->txdes2);
+}
+
+/*
+ * txdes3 is not used by hardware. We use it to keep track of socket buffer.
+ * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
+ */
+static void ftmac100_txdes_set_skb(struct ftmac100_txdes *txdes, struct sk_buff *skb)
+{
+ txdes->txdes3 = (unsigned int)skb;
+}
+
+static struct sk_buff *ftmac100_txdes_get_skb(struct ftmac100_txdes *txdes)
+{
+ return (struct sk_buff *)txdes->txdes3;
+}
+
+/******************************************************************************
+ * internal functions (transmit)
+ *****************************************************************************/
+static int ftmac100_next_tx_pointer(int pointer)
+{
+ return (pointer + 1) & (TX_QUEUE_ENTRIES - 1);
+}
+
+static void ftmac100_tx_pointer_advance(struct ftmac100 *priv)
+{
+ priv->tx_pointer = ftmac100_next_tx_pointer(priv->tx_pointer);
+}
+
+static void ftmac100_tx_clean_pointer_advance(struct ftmac100 *priv)
+{
+ priv->tx_clean_pointer = ftmac100_next_tx_pointer(priv->tx_clean_pointer);
+}
+
+static struct ftmac100_txdes *ftmac100_current_txdes(struct ftmac100 *priv)
+{
+ return &priv->descs->txdes[priv->tx_pointer];
+}
+
+static struct ftmac100_txdes *ftmac100_current_clean_txdes(struct ftmac100 *priv)
+{
+ return &priv->descs->txdes[priv->tx_clean_pointer];
+}
+
+static bool ftmac100_tx_complete_packet(struct ftmac100 *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct ftmac100_txdes *txdes;
+ struct sk_buff *skb;
+ dma_addr_t map;
+
+ if (priv->tx_pending == 0)
+ return false;
+
+ txdes = ftmac100_current_clean_txdes(priv);
+
+ if (ftmac100_txdes_owned_by_dma(txdes))
+ return false;
+
+ skb = ftmac100_txdes_get_skb(txdes);
+ map = ftmac100_txdes_get_dma_addr(txdes);
+
+ if (unlikely(ftmac100_txdes_excessive_collision(txdes) ||
+ ftmac100_txdes_late_collision(txdes))) {
+ /*
+ * packet transmitted to ethernet lost due to late collision
+ * or excessive collision
+ */
+ netdev->stats.tx_aborted_errors++;
+ } else {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+ }
+
+ dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
+ dev_kfree_skb(skb);
+
+ ftmac100_txdes_reset(txdes);
+
+ ftmac100_tx_clean_pointer_advance(priv);
+
+ spin_lock(&priv->tx_lock);
+ priv->tx_pending--;
+ spin_unlock(&priv->tx_lock);
+ netif_wake_queue(netdev);
+
+ return true;
+}
+
+static void ftmac100_tx_complete(struct ftmac100 *priv)
+{
+ while (ftmac100_tx_complete_packet(priv))
+ ;
+}
+
+static int ftmac100_xmit(struct ftmac100 *priv, struct sk_buff *skb,
+ dma_addr_t map)
+{
+ struct net_device *netdev = priv->netdev;
+ struct ftmac100_txdes *txdes;
+ unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
+
+ txdes = ftmac100_current_txdes(priv);
+ ftmac100_tx_pointer_advance(priv);
+
+ /* setup TX descriptor */
+ ftmac100_txdes_set_skb(txdes, skb);
+ ftmac100_txdes_set_dma_addr(txdes, map);
+
+ ftmac100_txdes_set_first_segment(txdes);
+ ftmac100_txdes_set_last_segment(txdes);
+ ftmac100_txdes_set_txint(txdes);
+ ftmac100_txdes_set_buffer_size(txdes, len);
+
+ spin_lock(&priv->tx_lock);
+ priv->tx_pending++;
+ if (priv->tx_pending == TX_QUEUE_ENTRIES)
+ netif_stop_queue(netdev);
+
+ /* start transmit */
+ ftmac100_txdes_set_dma_own(txdes);
+ spin_unlock(&priv->tx_lock);
+
+ ftmac100_txdma_start_polling(priv);
+ return NETDEV_TX_OK;
+}
+
+/******************************************************************************
+ * internal functions (buffer)
+ *****************************************************************************/
+static int ftmac100_alloc_rx_page(struct ftmac100 *priv,
+ struct ftmac100_rxdes *rxdes, gfp_t gfp)
+{
+ struct net_device *netdev = priv->netdev;
+ struct page *page;
+ dma_addr_t map;
+
+ page = alloc_page(gfp);
+ if (!page) {
+ if (net_ratelimit())
+ netdev_err(netdev, "failed to allocate rx page\n");
+ return -ENOMEM;
+ }
+
+ map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, map))) {
+ if (net_ratelimit())
+ netdev_err(netdev, "failed to map rx page\n");
+ __free_page(page);
+ return -ENOMEM;
+ }
+
+ ftmac100_rxdes_set_page(rxdes, page);
+ ftmac100_rxdes_set_dma_addr(rxdes, map);
+ ftmac100_rxdes_set_buffer_size(rxdes, RX_BUF_SIZE);
+ ftmac100_rxdes_set_dma_own(rxdes);
+ return 0;
+}
+
+static void ftmac100_free_buffers(struct ftmac100 *priv)
+{
+ int i;
+
+ for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
+ struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i];
+ struct page *page = ftmac100_rxdes_get_page(rxdes);
+ dma_addr_t map = ftmac100_rxdes_get_dma_addr(rxdes);
+
+ if (!page)
+ continue;
+
+ dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+ __free_page(page);
+ }
+
+ for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
+ struct ftmac100_txdes *txdes = &priv->descs->txdes[i];
+ struct sk_buff *skb = ftmac100_txdes_get_skb(txdes);
+ dma_addr_t map = ftmac100_txdes_get_dma_addr(txdes);
+
+ if (!skb)
+ continue;
+
+ dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
+ dev_kfree_skb(skb);
+ }
+
+ dma_free_coherent(priv->dev, sizeof(struct ftmac100_descs),
+ priv->descs, priv->descs_dma_addr);
+}
+
+static int ftmac100_alloc_buffers(struct ftmac100 *priv)
+{
+ int i;
+
+ priv->descs = dma_alloc_coherent(priv->dev, sizeof(struct ftmac100_descs),
+ &priv->descs_dma_addr, GFP_KERNEL);
+ if (!priv->descs)
+ return -ENOMEM;
+
+ memset(priv->descs, 0, sizeof(struct ftmac100_descs));
+
+ /* initialize RX ring */
+ ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
+
+ for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
+ struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i];
+
+ if (ftmac100_alloc_rx_page(priv, rxdes, GFP_KERNEL))
+ goto err;
+ }
+
+ /* initialize TX ring */
+ ftmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
+ return 0;
+
+err:
+ ftmac100_free_buffers(priv);
+ return -ENOMEM;
+}
+
+/******************************************************************************
+ * struct mii_if_info functions
+ *****************************************************************************/
+static int ftmac100_mdio_read(struct net_device *netdev, int phy_id, int reg)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ unsigned int phycr;
+ int i;
+
+ phycr = FTMAC100_PHYCR_PHYAD(phy_id) |
+ FTMAC100_PHYCR_REGAD(reg) |
+ FTMAC100_PHYCR_MIIRD;
+
+ iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR);
+
+ for (i = 0; i < 10; i++) {
+ phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR);
+
+ if ((phycr & FTMAC100_PHYCR_MIIRD) == 0)
+ return phycr & FTMAC100_PHYCR_MIIRDATA;
+
+ usleep_range(100, 1000);
+ }
+
+ netdev_err(netdev, "mdio read timed out\n");
+ return 0;
+}
+
+static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg,
+ int data)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ unsigned int phycr;
+ int i;
+
+ phycr = FTMAC100_PHYCR_PHYAD(phy_id) |
+ FTMAC100_PHYCR_REGAD(reg) |
+ FTMAC100_PHYCR_MIIWR;
+
+ data = FTMAC100_PHYWDATA_MIIWDATA(data);
+
+ iowrite32(data, priv->base + FTMAC100_OFFSET_PHYWDATA);
+ iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR);
+
+ for (i = 0; i < 10; i++) {
+ phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR);
+
+ if ((phycr & FTMAC100_PHYCR_MIIWR) == 0)
+ return;
+
+ usleep_range(100, 1000);
+ }
+
+ netdev_err(netdev, "mdio write timed out\n");
+}
+
+/******************************************************************************
+ * struct ethtool_ops functions
+ *****************************************************************************/
+static void ftmac100_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, dev_name(&netdev->dev));
+}
+
+static int ftmac100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ return mii_ethtool_gset(&priv->mii, cmd);
+}
+
+static int ftmac100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ return mii_ethtool_sset(&priv->mii, cmd);
+}
+
+static int ftmac100_nway_reset(struct net_device *netdev)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ return mii_nway_restart(&priv->mii);
+}
+
+static u32 ftmac100_get_link(struct net_device *netdev)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ return mii_link_ok(&priv->mii);
+}
+
+static const struct ethtool_ops ftmac100_ethtool_ops = {
+ .set_settings = ftmac100_set_settings,
+ .get_settings = ftmac100_get_settings,
+ .get_drvinfo = ftmac100_get_drvinfo,
+ .nway_reset = ftmac100_nway_reset,
+ .get_link = ftmac100_get_link,
+};
+
+/******************************************************************************
+ * interrupt handler
+ *****************************************************************************/
+static irqreturn_t ftmac100_interrupt(int irq, void *dev_id)
+{
+ struct net_device *netdev = dev_id;
+ struct ftmac100 *priv = netdev_priv(netdev);
+
+ if (likely(netif_running(netdev))) {
+ /* Disable interrupts for polling */
+ ftmac100_disable_all_int(priv);
+ napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/******************************************************************************
+ * struct napi_struct functions
+ *****************************************************************************/
+static int ftmac100_poll(struct napi_struct *napi, int budget)
+{
+ struct ftmac100 *priv = container_of(napi, struct ftmac100, napi);
+ struct net_device *netdev = priv->netdev;
+ unsigned int status;
+ bool completed = true;
+ int rx = 0;
+
+ status = ioread32(priv->base + FTMAC100_OFFSET_ISR);
+
+ if (status & (FTMAC100_INT_RPKT_FINISH | FTMAC100_INT_NORXBUF)) {
+ /*
+ * FTMAC100_INT_RPKT_FINISH:
+ * RX DMA has received packets into RX buffer successfully
+ *
+ * FTMAC100_INT_NORXBUF:
+ * RX buffer unavailable
+ */
+ bool retry;
+
+ do {
+ retry = ftmac100_rx_packet(priv, &rx);
+ } while (retry && rx < budget);
+
+ if (retry && rx == budget)
+ completed = false;
+ }
+
+ if (status & (FTMAC100_INT_XPKT_OK | FTMAC100_INT_XPKT_LOST)) {
+ /*
+ * FTMAC100_INT_XPKT_OK:
+ * packet transmitted to ethernet successfully
+ *
+ * FTMAC100_INT_XPKT_LOST:
+ * packet transmitted to ethernet lost due to late
+ * collision or excessive collision
+ */
+ ftmac100_tx_complete(priv);
+ }
+
+ if (status & (FTMAC100_INT_NORXBUF | FTMAC100_INT_RPKT_LOST |
+ FTMAC100_INT_AHB_ERR | FTMAC100_INT_PHYSTS_CHG)) {
+ if (net_ratelimit())
+ netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status,
+ status & FTMAC100_INT_NORXBUF ? "NORXBUF " : "",
+ status & FTMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "",
+ status & FTMAC100_INT_AHB_ERR ? "AHB_ERR " : "",
+ status & FTMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : "");
+
+ if (status & FTMAC100_INT_NORXBUF) {
+ /* RX buffer unavailable */
+ netdev->stats.rx_over_errors++;
+ }
+
+ if (status & FTMAC100_INT_RPKT_LOST) {
+ /* received packet lost due to RX FIFO full */
+ netdev->stats.rx_fifo_errors++;
+ }
+
+ if (status & FTMAC100_INT_PHYSTS_CHG) {
+ /* PHY link status change */
+ mii_check_link(&priv->mii);
+ }
+ }
+
+ if (completed) {
+ /* stop polling */
+ napi_complete(napi);
+ ftmac100_enable_all_int(priv);
+ }
+
+ return rx;
+}
+
+/******************************************************************************
+ * struct net_device_ops functions
+ *****************************************************************************/
+static int ftmac100_open(struct net_device *netdev)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ int err;
+
+ err = ftmac100_alloc_buffers(priv);
+ if (err) {
+ netdev_err(netdev, "failed to allocate buffers\n");
+ goto err_alloc;
+ }
+
+ err = request_irq(priv->irq, ftmac100_interrupt, 0, netdev->name, netdev);
+ if (err) {
+ netdev_err(netdev, "failed to request irq %d\n", priv->irq);
+ goto err_irq;
+ }
+
+ priv->rx_pointer = 0;
+ priv->tx_clean_pointer = 0;
+ priv->tx_pointer = 0;
+ priv->tx_pending = 0;
+
+ err = ftmac100_start_hw(priv);
+ if (err)
+ goto err_hw;
+
+ napi_enable(&priv->napi);
+ netif_start_queue(netdev);
+
+ ftmac100_enable_all_int(priv);
+
+ return 0;
+
+err_hw:
+ free_irq(priv->irq, netdev);
+err_irq:
+ ftmac100_free_buffers(priv);
+err_alloc:
+ return err;
+}
+
+static int ftmac100_stop(struct net_device *netdev)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+
+ ftmac100_disable_all_int(priv);
+ netif_stop_queue(netdev);
+ napi_disable(&priv->napi);
+ ftmac100_stop_hw(priv);
+ free_irq(priv->irq, netdev);
+ ftmac100_free_buffers(priv);
+
+ return 0;
+}
+
+static int ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ dma_addr_t map;
+
+ if (unlikely(skb->len > MAX_PKT_SIZE)) {
+ if (net_ratelimit())
+ netdev_dbg(netdev, "tx packet too big\n");
+
+ netdev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, map))) {
+ /* drop packet */
+ if (net_ratelimit())
+ netdev_err(netdev, "map socket buffer failed\n");
+
+ netdev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ return ftmac100_xmit(priv, skb, map);
+}
+
+/* optional */
+static int ftmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ return generic_mii_ioctl(&priv->mii, data, cmd, NULL);
+}
+
+static const struct net_device_ops ftmac100_netdev_ops = {
+ .ndo_open = ftmac100_open,
+ .ndo_stop = ftmac100_stop,
+ .ndo_start_xmit = ftmac100_hard_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = ftmac100_do_ioctl,
+};
+
+/******************************************************************************
+ * struct platform_driver functions
+ *****************************************************************************/
+static int ftmac100_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int irq;
+ struct net_device *netdev;
+ struct ftmac100 *priv;
+ int err;
+
+ if (!pdev)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENXIO;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ /* setup net_device */
+ netdev = alloc_etherdev(sizeof(*priv));
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ SET_ETHTOOL_OPS(netdev, &ftmac100_ethtool_ops);
+ netdev->netdev_ops = &ftmac100_netdev_ops;
+
+ platform_set_drvdata(pdev, netdev);
+
+ /* setup private data */
+ priv = netdev_priv(netdev);
+ priv->netdev = netdev;
+ priv->dev = &pdev->dev;
+
+ spin_lock_init(&priv->tx_lock);
+
+ /* initialize NAPI */
+ netif_napi_add(netdev, &priv->napi, ftmac100_poll, 64);
+
+ /* map io memory */
+ priv->res = request_mem_region(res->start, resource_size(res),
+ dev_name(&pdev->dev));
+ if (!priv->res) {
+ dev_err(&pdev->dev, "Could not reserve memory region\n");
+ err = -ENOMEM;
+ goto err_req_mem;
+ }
+
+ priv->base = ioremap(res->start, res->end - res->start);
+ if (!priv->base) {
+ dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ priv->irq = irq;
+
+ /* initialize struct mii_if_info */
+ priv->mii.phy_id = 0;
+ priv->mii.phy_id_mask = 0x1f;
+ priv->mii.reg_num_mask = 0x1f;
+ priv->mii.dev = netdev;
+ priv->mii.mdio_read = ftmac100_mdio_read;
+ priv->mii.mdio_write = ftmac100_mdio_write;
+
+ /* register network device */
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register netdev\n");
+ goto err_register_netdev;
+ }
+
+ netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base);
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ random_ether_addr(netdev->dev_addr);
+ netdev_info(netdev, "generated random MAC address %pM\n",
+ netdev->dev_addr);
+ }
+
+ return 0;
+
+err_register_netdev:
+ iounmap(priv->base);
+err_ioremap:
+ release_resource(priv->res);
+err_req_mem:
+ netif_napi_del(&priv->napi);
+ platform_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+err_alloc_etherdev:
+ return err;
+}
+
+static int __exit ftmac100_remove(struct platform_device *pdev)
+{
+ struct net_device *netdev;
+ struct ftmac100 *priv;
+
+ netdev = platform_get_drvdata(pdev);
+ priv = netdev_priv(netdev);
+
+ unregister_netdev(netdev);
+
+ iounmap(priv->base);
+ release_resource(priv->res);
+
+ netif_napi_del(&priv->napi);
+ platform_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+ return 0;
+}
+
+static struct platform_driver ftmac100_driver = {
+ .probe = ftmac100_probe,
+ .remove = __exit_p(ftmac100_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+/******************************************************************************
+ * initialization / finalization
+ *****************************************************************************/
+static int __init ftmac100_init(void)
+{
+ pr_info("Loading version " DRV_VERSION " ...\n");
+ return platform_driver_register(&ftmac100_driver);
+}
+
+static void __exit ftmac100_exit(void)
+{
+ platform_driver_unregister(&ftmac100_driver);
+}
+
+module_init(ftmac100_init);
+module_exit(ftmac100_exit);
+
+MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
+MODULE_DESCRIPTION("FTMAC100 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ftmac100.h b/drivers/net/ftmac100.h
new file mode 100644
index 000000000000..46a0c47b1ee1
--- /dev/null
+++ b/drivers/net/ftmac100.h
@@ -0,0 +1,180 @@
+/*
+ * Faraday FTMAC100 10/100 Ethernet
+ *
+ * (C) Copyright 2009-2011 Faraday Technology
+ * Po-Yu Chuang <ratbert@faraday-tech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __FTMAC100_H
+#define __FTMAC100_H
+
+#define FTMAC100_OFFSET_ISR 0x00
+#define FTMAC100_OFFSET_IMR 0x04
+#define FTMAC100_OFFSET_MAC_MADR 0x08
+#define FTMAC100_OFFSET_MAC_LADR 0x0c
+#define FTMAC100_OFFSET_MAHT0 0x10
+#define FTMAC100_OFFSET_MAHT1 0x14
+#define FTMAC100_OFFSET_TXPD 0x18
+#define FTMAC100_OFFSET_RXPD 0x1c
+#define FTMAC100_OFFSET_TXR_BADR 0x20
+#define FTMAC100_OFFSET_RXR_BADR 0x24
+#define FTMAC100_OFFSET_ITC 0x28
+#define FTMAC100_OFFSET_APTC 0x2c
+#define FTMAC100_OFFSET_DBLAC 0x30
+#define FTMAC100_OFFSET_MACCR 0x88
+#define FTMAC100_OFFSET_MACSR 0x8c
+#define FTMAC100_OFFSET_PHYCR 0x90
+#define FTMAC100_OFFSET_PHYWDATA 0x94
+#define FTMAC100_OFFSET_FCR 0x98
+#define FTMAC100_OFFSET_BPR 0x9c
+#define FTMAC100_OFFSET_TS 0xc4
+#define FTMAC100_OFFSET_DMAFIFOS 0xc8
+#define FTMAC100_OFFSET_TM 0xcc
+#define FTMAC100_OFFSET_TX_MCOL_SCOL 0xd4
+#define FTMAC100_OFFSET_RPF_AEP 0xd8
+#define FTMAC100_OFFSET_XM_PG 0xdc
+#define FTMAC100_OFFSET_RUNT_TLCC 0xe0
+#define FTMAC100_OFFSET_CRCER_FTL 0xe4
+#define FTMAC100_OFFSET_RLC_RCC 0xe8
+#define FTMAC100_OFFSET_BROC 0xec
+#define FTMAC100_OFFSET_MULCA 0xf0
+#define FTMAC100_OFFSET_RP 0xf4
+#define FTMAC100_OFFSET_XP 0xf8
+
+/*
+ * Interrupt status register & interrupt mask register
+ */
+#define FTMAC100_INT_RPKT_FINISH (1 << 0)
+#define FTMAC100_INT_NORXBUF (1 << 1)
+#define FTMAC100_INT_XPKT_FINISH (1 << 2)
+#define FTMAC100_INT_NOTXBUF (1 << 3)
+#define FTMAC100_INT_XPKT_OK (1 << 4)
+#define FTMAC100_INT_XPKT_LOST (1 << 5)
+#define FTMAC100_INT_RPKT_SAV (1 << 6)
+#define FTMAC100_INT_RPKT_LOST (1 << 7)
+#define FTMAC100_INT_AHB_ERR (1 << 8)
+#define FTMAC100_INT_PHYSTS_CHG (1 << 9)
+
+/*
+ * Interrupt timer control register
+ */
+#define FTMAC100_ITC_RXINT_CNT(x) (((x) & 0xf) << 0)
+#define FTMAC100_ITC_RXINT_THR(x) (((x) & 0x7) << 4)
+#define FTMAC100_ITC_RXINT_TIME_SEL (1 << 7)
+#define FTMAC100_ITC_TXINT_CNT(x) (((x) & 0xf) << 8)
+#define FTMAC100_ITC_TXINT_THR(x) (((x) & 0x7) << 12)
+#define FTMAC100_ITC_TXINT_TIME_SEL (1 << 15)
+
+/*
+ * Automatic polling timer control register
+ */
+#define FTMAC100_APTC_RXPOLL_CNT(x) (((x) & 0xf) << 0)
+#define FTMAC100_APTC_RXPOLL_TIME_SEL (1 << 4)
+#define FTMAC100_APTC_TXPOLL_CNT(x) (((x) & 0xf) << 8)
+#define FTMAC100_APTC_TXPOLL_TIME_SEL (1 << 12)
+
+/*
+ * DMA burst length and arbitration control register
+ */
+#define FTMAC100_DBLAC_INCR4_EN (1 << 0)
+#define FTMAC100_DBLAC_INCR8_EN (1 << 1)
+#define FTMAC100_DBLAC_INCR16_EN (1 << 2)
+#define FTMAC100_DBLAC_RXFIFO_LTHR(x) (((x) & 0x7) << 3)
+#define FTMAC100_DBLAC_RXFIFO_HTHR(x) (((x) & 0x7) << 6)
+#define FTMAC100_DBLAC_RX_THR_EN (1 << 9)
+
+/*
+ * MAC control register
+ */
+#define FTMAC100_MACCR_XDMA_EN (1 << 0)
+#define FTMAC100_MACCR_RDMA_EN (1 << 1)
+#define FTMAC100_MACCR_SW_RST (1 << 2)
+#define FTMAC100_MACCR_LOOP_EN (1 << 3)
+#define FTMAC100_MACCR_CRC_DIS (1 << 4)
+#define FTMAC100_MACCR_XMT_EN (1 << 5)
+#define FTMAC100_MACCR_ENRX_IN_HALFTX (1 << 6)
+#define FTMAC100_MACCR_RCV_EN (1 << 8)
+#define FTMAC100_MACCR_HT_MULTI_EN (1 << 9)
+#define FTMAC100_MACCR_RX_RUNT (1 << 10)
+#define FTMAC100_MACCR_RX_FTL (1 << 11)
+#define FTMAC100_MACCR_RCV_ALL (1 << 12)
+#define FTMAC100_MACCR_CRC_APD (1 << 14)
+#define FTMAC100_MACCR_FULLDUP (1 << 15)
+#define FTMAC100_MACCR_RX_MULTIPKT (1 << 16)
+#define FTMAC100_MACCR_RX_BROADPKT (1 << 17)
+
+/*
+ * PHY control register
+ */
+#define FTMAC100_PHYCR_MIIRDATA 0xffff
+#define FTMAC100_PHYCR_PHYAD(x) (((x) & 0x1f) << 16)
+#define FTMAC100_PHYCR_REGAD(x) (((x) & 0x1f) << 21)
+#define FTMAC100_PHYCR_MIIRD (1 << 26)
+#define FTMAC100_PHYCR_MIIWR (1 << 27)
+
+/*
+ * PHY write data register
+ */
+#define FTMAC100_PHYWDATA_MIIWDATA(x) ((x) & 0xffff)
+
+/*
+ * Transmit descriptor, aligned to 16 bytes
+ */
+struct ftmac100_txdes {
+ unsigned int txdes0;
+ unsigned int txdes1;
+ unsigned int txdes2; /* TXBUF_BADR */
+ unsigned int txdes3; /* not used by HW */
+} __attribute__ ((aligned(16)));
+
+#define FTMAC100_TXDES0_TXPKT_LATECOL (1 << 0)
+#define FTMAC100_TXDES0_TXPKT_EXSCOL (1 << 1)
+#define FTMAC100_TXDES0_TXDMA_OWN (1 << 31)
+
+#define FTMAC100_TXDES1_TXBUF_SIZE(x) ((x) & 0x7ff)
+#define FTMAC100_TXDES1_LTS (1 << 27)
+#define FTMAC100_TXDES1_FTS (1 << 28)
+#define FTMAC100_TXDES1_TX2FIC (1 << 29)
+#define FTMAC100_TXDES1_TXIC (1 << 30)
+#define FTMAC100_TXDES1_EDOTR (1 << 31)
+
+/*
+ * Receive descriptor, aligned to 16 bytes
+ */
+struct ftmac100_rxdes {
+ unsigned int rxdes0;
+ unsigned int rxdes1;
+ unsigned int rxdes2; /* RXBUF_BADR */
+ unsigned int rxdes3; /* not used by HW */
+} __attribute__ ((aligned(16)));
+
+#define FTMAC100_RXDES0_RFL 0x7ff
+#define FTMAC100_RXDES0_MULTICAST (1 << 16)
+#define FTMAC100_RXDES0_BROADCAST (1 << 17)
+#define FTMAC100_RXDES0_RX_ERR (1 << 18)
+#define FTMAC100_RXDES0_CRC_ERR (1 << 19)
+#define FTMAC100_RXDES0_FTL (1 << 20)
+#define FTMAC100_RXDES0_RUNT (1 << 21)
+#define FTMAC100_RXDES0_RX_ODD_NB (1 << 22)
+#define FTMAC100_RXDES0_LRS (1 << 28)
+#define FTMAC100_RXDES0_FRS (1 << 29)
+#define FTMAC100_RXDES0_RXDMA_OWN (1 << 31)
+
+#define FTMAC100_RXDES1_RXBUF_SIZE(x) ((x) & 0x7ff)
+#define FTMAC100_RXDES1_EDORR (1 << 31)
+
+#endif /* __FTMAC100_H */
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index d1bec6269173..ccb231c4d933 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -95,6 +95,7 @@
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <linux/of.h>
+#include <linux/of_net.h>
#include "gianfar.h"
#include "fsl_pq_mdio.h"
@@ -122,8 +123,7 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id);
static void adjust_link(struct net_device *dev);
static void init_registers(struct net_device *dev);
static int init_phy(struct net_device *dev);
-static int gfar_probe(struct platform_device *ofdev,
- const struct of_device_id *match);
+static int gfar_probe(struct platform_device *ofdev);
static int gfar_remove(struct platform_device *ofdev);
static void free_skb_resources(struct gfar_private *priv);
static void gfar_set_multi(struct net_device *dev);
@@ -143,7 +143,8 @@ void gfar_halt(struct net_device *dev);
static void gfar_halt_nodisable(struct net_device *dev);
void gfar_start(struct net_device *dev);
static void gfar_clear_exact_match(struct net_device *dev);
-static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
+static void gfar_set_mac_for_addr(struct net_device *dev, int num,
+ const u8 *addr);
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
MODULE_AUTHOR("Freescale Semiconductor, Inc");
@@ -432,7 +433,6 @@ static void gfar_init_mac(struct net_device *ndev)
static struct net_device_stats *gfar_get_stats(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- struct netdev_queue *txq;
unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
unsigned long tx_packets = 0, tx_bytes = 0;
int i = 0;
@@ -448,9 +448,8 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
dev->stats.rx_dropped = rx_dropped;
for (i = 0; i < priv->num_tx_queues; i++) {
- txq = netdev_get_tx_queue(dev, i);
- tx_bytes += txq->tx_bytes;
- tx_packets += txq->tx_packets;
+ tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
+ tx_packets += priv->tx_queue[i]->stats.tx_packets;
}
dev->stats.tx_bytes = tx_bytes;
@@ -957,8 +956,7 @@ static void gfar_detect_errata(struct gfar_private *priv)
/* Set up the ethernet device structure, private data,
* and anything else we need before we start */
-static int gfar_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int gfar_probe(struct platform_device *ofdev)
{
u32 tempval;
struct net_device *dev = NULL;
@@ -1920,7 +1918,7 @@ int startup_gfar(struct net_device *ndev)
if (err) {
for (j = 0; j < i; j++)
free_grp_irqs(&priv->gfargrp[j]);
- goto irq_fail;
+ goto irq_fail;
}
}
@@ -2107,8 +2105,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Update transmit stats */
- txq->tx_bytes += skb->len;
- txq->tx_packets ++;
+ tx_queue->stats.tx_bytes += skb->len;
+ tx_queue->stats.tx_packets++;
txbdp = txbdp_start = tx_queue->cur_tx;
lstatus = txbdp->lstatus;
@@ -3094,10 +3092,10 @@ static void gfar_set_multi(struct net_device *dev)
static void gfar_clear_exact_match(struct net_device *dev)
{
int idx;
- u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
+ static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
- gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
+ gfar_set_mac_for_addr(dev, idx, zero_arr);
}
/* Set the appropriate hash bit for the given addr */
@@ -3132,7 +3130,8 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
/* There are multiple MAC Address register pairs on some controllers
* This function sets the numth pair to a given address
*/
-static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
+static void gfar_set_mac_for_addr(struct net_device *dev, int num,
+ const u8 *addr)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
@@ -3255,7 +3254,7 @@ static struct of_device_id gfar_match[] =
MODULE_DEVICE_TABLE(of, gfar_match);
/* Structure for a device driver */
-static struct of_platform_driver gfar_driver = {
+static struct platform_driver gfar_driver = {
.driver = {
.name = "fsl-gianfar",
.owner = THIS_MODULE,
@@ -3268,12 +3267,12 @@ static struct of_platform_driver gfar_driver = {
static int __init gfar_init(void)
{
- return of_register_platform_driver(&gfar_driver);
+ return platform_driver_register(&gfar_driver);
}
static void __exit gfar_exit(void)
{
- of_unregister_platform_driver(&gfar_driver);
+ platform_driver_unregister(&gfar_driver);
}
module_init(gfar_init);
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 68984eb88ae0..54de4135e932 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -907,12 +907,21 @@ enum {
MQ_MG_MODE
};
+/*
+ * Per TX queue stats
+ */
+struct tx_q_stats {
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+};
+
/**
* struct gfar_priv_tx_q - per tx queue structure
* @txlock: per queue tx spin lock
* @tx_skbuff:skb pointers
* @skb_curtx: to be used skb pointer
* @skb_dirtytx:the last used skb pointer
+ * @stats: bytes/packets stats
* @qindex: index of this queue
* @dev: back pointer to the dev structure
* @grp: back pointer to the group to which this queue belongs
@@ -934,6 +943,7 @@ struct gfar_priv_tx_q {
struct txbd8 *tx_bd_base;
struct txbd8 *cur_tx;
struct txbd8 *dirty_tx;
+ struct tx_q_stats stats;
struct net_device *dev;
struct gfar_priv_grp *grp;
u16 skb_curtx;
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 27d6960ce09e..396ff7d785d1 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -1,7 +1,7 @@
/*
* Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
*
- * 2005-2009 (c) Aeroflex Gaisler AB
+ * 2005-2010 (c) Aeroflex Gaisler AB
*
* This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
* available in the GRLIB VHDL IP core library.
@@ -356,6 +356,8 @@ static int greth_open(struct net_device *dev)
dev_dbg(&dev->dev, " starting queue\n");
netif_start_queue(dev);
+ GRETH_REGSAVE(greth->regs->status, 0xFF);
+
napi_enable(&greth->napi);
greth_enable_irqs(greth);
@@ -371,7 +373,9 @@ static int greth_close(struct net_device *dev)
napi_disable(&greth->napi);
+ greth_disable_irqs(greth);
greth_disable_tx(greth);
+ greth_disable_rx(greth);
netif_stop_queue(dev);
@@ -388,12 +392,20 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct greth_private *greth = netdev_priv(dev);
struct greth_bd *bdp;
int err = NETDEV_TX_OK;
- u32 status, dma_addr;
+ u32 status, dma_addr, ctrl;
+ unsigned long flags;
- bdp = greth->tx_bd_base + greth->tx_next;
+ /* Clean TX Ring */
+ greth_clean_tx(greth->netdev);
if (unlikely(greth->tx_free <= 0)) {
+ spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
+ ctrl = GRETH_REGLOAD(greth->regs->control);
+ /* Enable TX IRQ only if not already in poll() routine */
+ if (ctrl & GRETH_RXI)
+ GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
netif_stop_queue(dev);
+ spin_unlock_irqrestore(&greth->devlock, flags);
return NETDEV_TX_BUSY;
}
@@ -406,13 +418,14 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
+ bdp = greth->tx_bd_base + greth->tx_next;
dma_addr = greth_read_bd(&bdp->addr);
memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
- status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN);
+ status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
/* Wrap around descriptor ring */
if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
@@ -422,22 +435,11 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
greth->tx_next = NEXT_TX(greth->tx_next);
greth->tx_free--;
- /* No more descriptors */
- if (unlikely(greth->tx_free == 0)) {
-
- /* Free transmitted descriptors */
- greth_clean_tx(dev);
-
- /* If nothing was cleaned, stop queue & wait for irq */
- if (unlikely(greth->tx_free == 0)) {
- status |= GRETH_BD_IE;
- netif_stop_queue(dev);
- }
- }
-
/* Write descriptor control word and enable transmission */
greth_write_bd(&bdp->stat, status);
+ spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
greth_enable_tx(greth);
+ spin_unlock_irqrestore(&greth->devlock, flags);
out:
dev_kfree_skb(skb);
@@ -450,13 +452,23 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
{
struct greth_private *greth = netdev_priv(dev);
struct greth_bd *bdp;
- u32 status = 0, dma_addr;
+ u32 status = 0, dma_addr, ctrl;
int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
+ unsigned long flags;
nr_frags = skb_shinfo(skb)->nr_frags;
+ /* Clean TX Ring */
+ greth_clean_tx_gbit(dev);
+
if (greth->tx_free < nr_frags + 1) {
+ spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
+ ctrl = GRETH_REGLOAD(greth->regs->control);
+ /* Enable TX IRQ only if not already in poll() routine */
+ if (ctrl & GRETH_RXI)
+ GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
netif_stop_queue(dev);
+ spin_unlock_irqrestore(&greth->devlock, flags);
err = NETDEV_TX_BUSY;
goto out;
}
@@ -499,7 +511,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
greth->tx_skbuff[curr_tx] = NULL;
bdp = greth->tx_bd_base + curr_tx;
- status = GRETH_TXBD_CSALL;
+ status = GRETH_TXBD_CSALL | GRETH_BD_EN;
status |= frag->size & GRETH_BD_LEN;
/* Wrap around descriptor ring */
@@ -509,14 +521,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
/* More fragments left */
if (i < nr_frags - 1)
status |= GRETH_TXBD_MORE;
-
- /* ... last fragment, check if out of descriptors */
- else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) {
-
- /* Enable interrupts and stop queue */
- status |= GRETH_BD_IE;
- netif_stop_queue(dev);
- }
+ else
+ status |= GRETH_BD_IE; /* enable IRQ on last fragment */
greth_write_bd(&bdp->stat, status);
@@ -536,26 +542,29 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
wmb();
- /* Enable the descriptors that we configured ... */
- for (i = 0; i < nr_frags + 1; i++) {
- bdp = greth->tx_bd_base + greth->tx_next;
- greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
- greth->tx_next = NEXT_TX(greth->tx_next);
- greth->tx_free--;
- }
+ /* Enable the descriptor chain by enabling the first descriptor */
+ bdp = greth->tx_bd_base + greth->tx_next;
+ greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
+ greth->tx_next = curr_tx;
+ greth->tx_free -= nr_frags + 1;
+ wmb();
+
+ spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
greth_enable_tx(greth);
+ spin_unlock_irqrestore(&greth->devlock, flags);
return NETDEV_TX_OK;
frag_map_error:
- /* Unmap SKB mappings that succeeded */
+ /* Unmap SKB mappings that succeeded and disable descriptor */
for (i = 0; greth->tx_next + i != curr_tx; i++) {
bdp = greth->tx_bd_base + greth->tx_next + i;
dma_unmap_single(greth->dev,
greth_read_bd(&bdp->addr),
greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
DMA_TO_DEVICE);
+ greth_write_bd(&bdp->stat, 0);
}
map_error:
if (net_ratelimit())
@@ -565,12 +574,11 @@ out:
return err;
}
-
static irqreturn_t greth_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct greth_private *greth;
- u32 status;
+ u32 status, ctrl;
irqreturn_t retval = IRQ_NONE;
greth = netdev_priv(dev);
@@ -580,13 +588,15 @@ static irqreturn_t greth_interrupt(int irq, void *dev_id)
/* Get the interrupt events that caused us to be here. */
status = GRETH_REGLOAD(greth->regs->status);
- /* Handle rx and tx interrupts through poll */
- if (status & (GRETH_INT_RX | GRETH_INT_TX)) {
-
- /* Clear interrupt status */
- GRETH_REGORIN(greth->regs->status,
- status & (GRETH_INT_RX | GRETH_INT_TX));
+ /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be
+ * set regardless of whether IRQ is enabled or not. Especially
+ * important when shared IRQ.
+ */
+ ctrl = GRETH_REGLOAD(greth->regs->control);
+ /* Handle rx and tx interrupts through poll */
+ if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) ||
+ ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) {
retval = IRQ_HANDLED;
/* Disable interrupts and schedule poll() */
@@ -610,6 +620,8 @@ static void greth_clean_tx(struct net_device *dev)
while (1) {
bdp = greth->tx_bd_base + greth->tx_last;
+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
+ mb();
stat = greth_read_bd(&bdp->stat);
if (unlikely(stat & GRETH_BD_EN))
@@ -670,7 +682,10 @@ static void greth_clean_tx_gbit(struct net_device *dev)
/* We only clean fully completed SKBs */
bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
- stat = bdp_last_frag->stat;
+
+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
+ mb();
+ stat = greth_read_bd(&bdp_last_frag->stat);
if (stat & GRETH_BD_EN)
break;
@@ -702,21 +717,9 @@ static void greth_clean_tx_gbit(struct net_device *dev)
greth->tx_free += nr_frags+1;
dev_kfree_skb(skb);
}
- if (greth->tx_free > (MAX_SKB_FRAGS + 1)) {
- netif_wake_queue(dev);
- }
-}
-static int greth_pending_packets(struct greth_private *greth)
-{
- struct greth_bd *bdp;
- u32 status;
- bdp = greth->rx_bd_base + greth->rx_cur;
- status = greth_read_bd(&bdp->stat);
- if (status & GRETH_BD_EN)
- return 0;
- else
- return 1;
+ if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
+ netif_wake_queue(dev);
}
static int greth_rx(struct net_device *dev, int limit)
@@ -727,20 +730,24 @@ static int greth_rx(struct net_device *dev, int limit)
int pkt_len;
int bad, count;
u32 status, dma_addr;
+ unsigned long flags;
greth = netdev_priv(dev);
for (count = 0; count < limit; ++count) {
bdp = greth->rx_bd_base + greth->rx_cur;
+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
+ mb();
status = greth_read_bd(&bdp->stat);
- dma_addr = greth_read_bd(&bdp->addr);
- bad = 0;
if (unlikely(status & GRETH_BD_EN)) {
break;
}
+ dma_addr = greth_read_bd(&bdp->addr);
+ bad = 0;
+
/* Check status for errors. */
if (unlikely(status & GRETH_RXBD_STATUS)) {
if (status & GRETH_RXBD_ERR_FT) {
@@ -802,7 +809,9 @@ static int greth_rx(struct net_device *dev, int limit)
dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
+ spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
greth_enable_rx(greth);
+ spin_unlock_irqrestore(&greth->devlock, flags);
greth->rx_cur = NEXT_RX(greth->rx_cur);
}
@@ -836,6 +845,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
int pkt_len;
int bad, count = 0;
u32 status, dma_addr;
+ unsigned long flags;
greth = netdev_priv(dev);
@@ -843,6 +853,8 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
bdp = greth->rx_bd_base + greth->rx_cur;
skb = greth->rx_skbuff[greth->rx_cur];
+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
+ mb();
status = greth_read_bd(&bdp->stat);
bad = 0;
@@ -865,10 +877,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
}
}
- /* Allocate new skb to replace current */
- newskb = netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN);
-
- if (!bad && newskb) {
+ /* Allocate new skb to replace current, not needed if the
+ * current skb can be reused */
+ if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
skb_reserve(newskb, NET_IP_ALIGN);
dma_addr = dma_map_single(greth->dev,
@@ -905,11 +916,22 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
if (net_ratelimit())
dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
dev_kfree_skb(newskb);
+ /* reusing current skb, so it is a drop */
dev->stats.rx_dropped++;
}
+ } else if (bad) {
+ /* Bad Frame transfer, the skb is reused */
+ dev->stats.rx_dropped++;
} else {
+ /* Failed Allocating a new skb. This is rather stupid
+ * but the current "filled" skb is reused, as if
+ * transfer failure. One could argue that RX descriptor
+ * table handling should be divided into cleaning and
+ * filling as the TX part of the driver
+ */
if (net_ratelimit())
dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
+ /* reusing current skb, so it is a drop */
dev->stats.rx_dropped++;
}
@@ -920,7 +942,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
wmb();
greth_write_bd(&bdp->stat, status);
+ spin_lock_irqsave(&greth->devlock, flags);
greth_enable_rx(greth);
+ spin_unlock_irqrestore(&greth->devlock, flags);
greth->rx_cur = NEXT_RX(greth->rx_cur);
}
@@ -932,15 +956,18 @@ static int greth_poll(struct napi_struct *napi, int budget)
{
struct greth_private *greth;
int work_done = 0;
+ unsigned long flags;
+ u32 mask, ctrl;
greth = container_of(napi, struct greth_private, napi);
- if (greth->gbit_mac) {
- greth_clean_tx_gbit(greth->netdev);
- } else {
- greth_clean_tx(greth->netdev);
+restart_txrx_poll:
+ if (netif_queue_stopped(greth->netdev)) {
+ if (greth->gbit_mac)
+ greth_clean_tx_gbit(greth->netdev);
+ else
+ greth_clean_tx(greth->netdev);
}
-restart_poll:
if (greth->gbit_mac) {
work_done += greth_rx_gbit(greth->netdev, budget - work_done);
} else {
@@ -949,15 +976,29 @@ restart_poll:
if (work_done < budget) {
- napi_complete(napi);
+ spin_lock_irqsave(&greth->devlock, flags);
+
+ ctrl = GRETH_REGLOAD(greth->regs->control);
+ if (netif_queue_stopped(greth->netdev)) {
+ GRETH_REGSAVE(greth->regs->control,
+ ctrl | GRETH_TXI | GRETH_RXI);
+ mask = GRETH_INT_RX | GRETH_INT_RE |
+ GRETH_INT_TX | GRETH_INT_TE;
+ } else {
+ GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI);
+ mask = GRETH_INT_RX | GRETH_INT_RE;
+ }
- if (greth_pending_packets(greth)) {
- napi_reschedule(napi);
- goto restart_poll;
+ if (GRETH_REGLOAD(greth->regs->status) & mask) {
+ GRETH_REGSAVE(greth->regs->control, ctrl);
+ spin_unlock_irqrestore(&greth->devlock, flags);
+ goto restart_txrx_poll;
+ } else {
+ __napi_complete(napi);
+ spin_unlock_irqrestore(&greth->devlock, flags);
}
}
- greth_enable_irqs(greth);
return work_done;
}
@@ -1152,11 +1193,11 @@ static const struct ethtool_ops greth_ethtool_ops = {
};
static struct net_device_ops greth_netdev_ops = {
- .ndo_open = greth_open,
- .ndo_stop = greth_close,
- .ndo_start_xmit = greth_start_xmit,
- .ndo_set_mac_address = greth_set_mac_add,
- .ndo_validate_addr = eth_validate_addr,
+ .ndo_open = greth_open,
+ .ndo_stop = greth_close,
+ .ndo_start_xmit = greth_start_xmit,
+ .ndo_set_mac_address = greth_set_mac_add,
+ .ndo_validate_addr = eth_validate_addr,
};
static inline int wait_for_mdio(struct greth_private *greth)
@@ -1217,29 +1258,26 @@ static void greth_link_change(struct net_device *dev)
struct greth_private *greth = netdev_priv(dev);
struct phy_device *phydev = greth->phy;
unsigned long flags;
-
int status_change = 0;
+ u32 ctrl;
spin_lock_irqsave(&greth->devlock, flags);
if (phydev->link) {
if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
-
- GRETH_REGANDIN(greth->regs->control,
- ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB));
+ ctrl = GRETH_REGLOAD(greth->regs->control) &
+ ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB);
if (phydev->duplex)
- GRETH_REGORIN(greth->regs->control, GRETH_CTRL_FD);
-
- if (phydev->speed == SPEED_100) {
-
- GRETH_REGORIN(greth->regs->control, GRETH_CTRL_SP);
- }
+ ctrl |= GRETH_CTRL_FD;
+ if (phydev->speed == SPEED_100)
+ ctrl |= GRETH_CTRL_SP;
else if (phydev->speed == SPEED_1000)
- GRETH_REGORIN(greth->regs->control, GRETH_CTRL_GB);
+ ctrl |= GRETH_CTRL_GB;
+ GRETH_REGSAVE(greth->regs->control, ctrl);
greth->speed = phydev->speed;
greth->duplex = phydev->duplex;
status_change = 1;
@@ -1373,7 +1411,7 @@ error:
}
/* Initialize the GRETH MAC */
-static int __devinit greth_of_probe(struct platform_device *ofdev, const struct of_device_id *match)
+static int __devinit greth_of_probe(struct platform_device *ofdev)
{
struct net_device *dev;
struct greth_private *greth;
@@ -1600,12 +1638,15 @@ static struct of_device_id greth_of_match[] = {
{
.name = "GAISLER_ETHMAC",
},
+ {
+ .name = "01_01d",
+ },
{},
};
MODULE_DEVICE_TABLE(of, greth_of_match);
-static struct of_platform_driver greth_of_driver = {
+static struct platform_driver greth_of_driver = {
.driver = {
.name = "grlib-greth",
.owner = THIS_MODULE,
@@ -1617,12 +1658,12 @@ static struct of_platform_driver greth_of_driver = {
static int __init greth_init(void)
{
- return of_register_platform_driver(&greth_of_driver);
+ return platform_driver_register(&greth_of_driver);
}
static void __exit greth_cleanup(void)
{
- of_unregister_platform_driver(&greth_of_driver);
+ platform_driver_unregister(&greth_of_driver);
}
module_init(greth_init);
diff --git a/drivers/net/greth.h b/drivers/net/greth.h
index 03ad903cd676..be0f2062bd14 100644
--- a/drivers/net/greth.h
+++ b/drivers/net/greth.h
@@ -23,6 +23,7 @@
#define GRETH_BD_LEN 0x7FF
#define GRETH_TXEN 0x1
+#define GRETH_INT_TE 0x2
#define GRETH_INT_TX 0x8
#define GRETH_TXI 0x4
#define GRETH_TXBD_STATUS 0x0001C000
@@ -35,6 +36,7 @@
#define GRETH_TXBD_ERR_UE 0x4000
#define GRETH_TXBD_ERR_AL 0x8000
+#define GRETH_INT_RE 0x1
#define GRETH_INT_RX 0x4
#define GRETH_RXEN 0x2
#define GRETH_RXI 0x8
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 9a6485892b3d..80d25ed53344 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1202,7 +1202,7 @@ static void hamachi_init_ring(struct net_device *dev)
}
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz);
+ struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2);
hmp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
@@ -1669,7 +1669,7 @@ static int hamachi_rx(struct net_device *dev)
entry = hmp->dirty_rx % RX_RING_SIZE;
desc = &(hmp->rx_ring[entry]);
if (hmp->rx_skbuff[entry] == NULL) {
- struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz);
+ struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2);
hmp->rx_skbuff[entry] = skb;
if (skb == NULL)
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index ac1d323c5eb5..8931168d3e74 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -400,13 +400,14 @@ static void *bpq_seq_start(struct seq_file *seq, loff_t *pos)
static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct list_head *p;
+ struct bpqdev *bpqdev = v;
++*pos;
if (v == SEQ_START_TOKEN)
- p = rcu_dereference(bpq_devices.next);
+ p = rcu_dereference(list_next_rcu(&bpq_devices));
else
- p = rcu_dereference(((struct bpqdev *)v)->bpq_list.next);
+ p = rcu_dereference(list_next_rcu(&bpqdev->bpq_list));
return (p == &bpq_devices) ? NULL
: list_entry(p, struct bpqdev, bpq_list);
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 4e7d1d0a2340..7d9ced0738c5 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -396,7 +396,7 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate,
while (p) {
if (p->bitrate == bitrate) {
memcpy(p->bits, bits, YAM_FPGA_SIZE);
- return p->bits;
+ goto out;
}
p = p->next;
}
@@ -411,7 +411,7 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate,
p->bitrate = bitrate;
p->next = yam_data;
yam_data = p;
-
+ out:
release_firmware(fw);
return p->bits;
}
diff --git a/drivers/net/hp.c b/drivers/net/hp.c
index d15d2f2ba78e..ef2014375e62 100644
--- a/drivers/net/hp.c
+++ b/drivers/net/hp.c
@@ -162,9 +162,9 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr)
/* Snarf the interrupt now. Someday this could be moved to open(). */
if (dev->irq < 2) {
- int irq_16list[] = { 11, 10, 5, 3, 4, 7, 9, 0};
- int irq_8list[] = { 7, 5, 3, 4, 9, 0};
- int *irqp = wordmode ? irq_16list : irq_8list;
+ static const int irq_16list[] = { 11, 10, 5, 3, 4, 7, 9, 0};
+ static const int irq_8list[] = { 7, 5, 3, 4, 9, 0};
+ const int *irqp = wordmode ? irq_16list : irq_8list;
do {
int irq = *irqp;
if (request_irq (irq, NULL, 0, "bogus", NULL) != -EBUSY) {
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 06bb9b799458..3bb990b6651a 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -1279,7 +1279,7 @@ static void emac_force_link_update(struct emac_instance *dev)
netif_carrier_off(dev->ndev);
smp_rmb();
if (dev->link_polling) {
- cancel_rearming_delayed_work(&dev->link_work);
+ cancel_delayed_work_sync(&dev->link_work);
if (dev->link_polling)
schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
}
@@ -1294,7 +1294,7 @@ static int emac_close(struct net_device *ndev)
if (dev->phy.address >= 0) {
dev->link_polling = 0;
- cancel_rearming_delayed_work(&dev->link_work);
+ cancel_delayed_work_sync(&dev->link_work);
}
mutex_lock(&dev->link_lock);
emac_netif_stop(dev);
@@ -2719,8 +2719,7 @@ static const struct net_device_ops emac_gige_netdev_ops = {
.ndo_change_mtu = emac_change_mtu,
};
-static int __devinit emac_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit emac_probe(struct platform_device *ofdev)
{
struct net_device *ndev;
struct emac_instance *dev;
@@ -2950,7 +2949,7 @@ static int __devexit emac_remove(struct platform_device *ofdev)
unregister_netdev(dev->ndev);
- flush_scheduled_work();
+ cancel_work_sync(&dev->reset_work);
if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
tah_detach(dev->tah_dev, dev->tah_port);
@@ -2994,7 +2993,7 @@ static struct of_device_id emac_match[] =
};
MODULE_DEVICE_TABLE(of, emac_match);
-static struct of_platform_driver emac_driver = {
+static struct platform_driver emac_driver = {
.driver = {
.name = "emac",
.owner = THIS_MODULE,
@@ -3069,7 +3068,7 @@ static int __init emac_init(void)
rc = tah_init();
if (rc)
goto err_rgmii;
- rc = of_register_platform_driver(&emac_driver);
+ rc = platform_driver_register(&emac_driver);
if (rc)
goto err_tah;
@@ -3091,7 +3090,7 @@ static void __exit emac_exit(void)
{
int i;
- of_unregister_platform_driver(&emac_driver);
+ platform_driver_unregister(&emac_driver);
tah_exit();
rgmii_exit();
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index d5717e2123e1..d268f404b7b0 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -517,8 +517,7 @@ void *mal_dump_regs(struct mal_instance *mal, void *buf)
return regs + 1;
}
-static int __devinit mal_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit mal_probe(struct platform_device *ofdev)
{
struct mal_instance *mal;
int err = 0, i, bd_size;
@@ -789,7 +788,7 @@ static struct of_device_id mal_platform_match[] =
{},
};
-static struct of_platform_driver mal_of_driver = {
+static struct platform_driver mal_of_driver = {
.driver = {
.name = "mcmal",
.owner = THIS_MODULE,
@@ -801,10 +800,10 @@ static struct of_platform_driver mal_of_driver = {
int __init mal_init(void)
{
- return of_register_platform_driver(&mal_of_driver);
+ return platform_driver_register(&mal_of_driver);
}
void mal_exit(void)
{
- of_unregister_platform_driver(&mal_of_driver);
+ platform_driver_unregister(&mal_of_driver);
}
diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c
index dd61798897ac..4fa53f3def64 100644
--- a/drivers/net/ibm_newemac/rgmii.c
+++ b/drivers/net/ibm_newemac/rgmii.c
@@ -228,8 +228,7 @@ void *rgmii_dump_regs(struct platform_device *ofdev, void *buf)
}
-static int __devinit rgmii_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit rgmii_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct rgmii_instance *dev;
@@ -318,7 +317,7 @@ static struct of_device_id rgmii_match[] =
{},
};
-static struct of_platform_driver rgmii_driver = {
+static struct platform_driver rgmii_driver = {
.driver = {
.name = "emac-rgmii",
.owner = THIS_MODULE,
@@ -330,10 +329,10 @@ static struct of_platform_driver rgmii_driver = {
int __init rgmii_init(void)
{
- return of_register_platform_driver(&rgmii_driver);
+ return platform_driver_register(&rgmii_driver);
}
void rgmii_exit(void)
{
- of_unregister_platform_driver(&rgmii_driver);
+ platform_driver_unregister(&rgmii_driver);
}
diff --git a/drivers/net/ibm_newemac/tah.c b/drivers/net/ibm_newemac/tah.c
index 299aa49490c0..8ead6a96abaa 100644
--- a/drivers/net/ibm_newemac/tah.c
+++ b/drivers/net/ibm_newemac/tah.c
@@ -87,8 +87,7 @@ void *tah_dump_regs(struct platform_device *ofdev, void *buf)
return regs + 1;
}
-static int __devinit tah_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit tah_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct tah_instance *dev;
@@ -165,7 +164,7 @@ static struct of_device_id tah_match[] =
{},
};
-static struct of_platform_driver tah_driver = {
+static struct platform_driver tah_driver = {
.driver = {
.name = "emac-tah",
.owner = THIS_MODULE,
@@ -177,10 +176,10 @@ static struct of_platform_driver tah_driver = {
int __init tah_init(void)
{
- return of_register_platform_driver(&tah_driver);
+ return platform_driver_register(&tah_driver);
}
void tah_exit(void)
{
- of_unregister_platform_driver(&tah_driver);
+ platform_driver_unregister(&tah_driver);
}
diff --git a/drivers/net/ibm_newemac/zmii.c b/drivers/net/ibm_newemac/zmii.c
index 34ed6ee8ca8a..97449e786d61 100644
--- a/drivers/net/ibm_newemac/zmii.c
+++ b/drivers/net/ibm_newemac/zmii.c
@@ -231,8 +231,7 @@ void *zmii_dump_regs(struct platform_device *ofdev, void *buf)
return regs + 1;
}
-static int __devinit zmii_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit zmii_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct zmii_instance *dev;
@@ -312,7 +311,7 @@ static struct of_device_id zmii_match[] =
{},
};
-static struct of_platform_driver zmii_driver = {
+static struct platform_driver zmii_driver = {
.driver = {
.name = "emac-zmii",
.owner = THIS_MODULE,
@@ -324,10 +323,10 @@ static struct of_platform_driver zmii_driver = {
int __init zmii_init(void)
{
- return of_register_platform_driver(&zmii_driver);
+ return platform_driver_register(&zmii_driver);
}
void zmii_exit(void)
{
- of_unregister_platform_driver(&zmii_driver);
+ platform_driver_unregister(&zmii_driver);
}
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index c454b45ca7ec..5522d459654c 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -729,11 +729,6 @@ static void netdev_get_drvinfo(struct net_device *dev,
sizeof(info->version) - 1);
}
-static u32 netdev_get_link(struct net_device *dev)
-{
- return 1;
-}
-
static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data)
{
struct ibmveth_adapter *adapter = netdev_priv(dev);
@@ -918,7 +913,7 @@ static void ibmveth_get_ethtool_stats(struct net_device *dev,
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
.get_settings = netdev_get_settings,
- .get_link = netdev_get_link,
+ .get_link = ethtool_op_get_link,
.set_tx_csum = ibmveth_set_tx_csum,
.get_rx_csum = ibmveth_get_rx_csum,
.set_rx_csum = ibmveth_set_rx_csum,
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index ab9f675c5b8b..e07d487f015a 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -36,22 +36,10 @@
#include <net/pkt_sched.h>
#include <net/net_namespace.h>
-#define TX_TIMEOUT (2*HZ)
-
#define TX_Q_LIMIT 32
struct ifb_private {
struct tasklet_struct ifb_tasklet;
int tasklet_pending;
- /* mostly debug stats leave in for now */
- unsigned long st_task_enter; /* tasklet entered */
- unsigned long st_txq_refl_try; /* transmit queue refill attempt */
- unsigned long st_rxq_enter; /* receive queue entered */
- unsigned long st_rx2tx_tran; /* receive to trasmit transfers */
- unsigned long st_rxq_notenter; /*receiveQ not entered, resched */
- unsigned long st_rx_frm_egr; /* received from egress path */
- unsigned long st_rx_frm_ing; /* received from ingress path */
- unsigned long st_rxq_check;
- unsigned long st_rxq_rsch;
struct sk_buff_head rq;
struct sk_buff_head tq;
};
@@ -73,24 +61,17 @@ static void ri_tasklet(unsigned long dev)
struct sk_buff *skb;
txq = netdev_get_tx_queue(_dev, 0);
- dp->st_task_enter++;
if ((skb = skb_peek(&dp->tq)) == NULL) {
- dp->st_txq_refl_try++;
if (__netif_tx_trylock(txq)) {
- dp->st_rxq_enter++;
- while ((skb = skb_dequeue(&dp->rq)) != NULL) {
- skb_queue_tail(&dp->tq, skb);
- dp->st_rx2tx_tran++;
- }
+ skb_queue_splice_tail_init(&dp->rq, &dp->tq);
__netif_tx_unlock(txq);
} else {
/* reschedule */
- dp->st_rxq_notenter++;
goto resched;
}
}
- while ((skb = skb_dequeue(&dp->tq)) != NULL) {
+ while ((skb = __skb_dequeue(&dp->tq)) != NULL) {
u32 from = G_TC_FROM(skb->tc_verd);
skb->tc_verd = 0;
@@ -104,30 +85,28 @@ static void ri_tasklet(unsigned long dev)
rcu_read_unlock();
dev_kfree_skb(skb);
stats->tx_dropped++;
+ if (skb_queue_len(&dp->tq) != 0)
+ goto resched;
break;
}
rcu_read_unlock();
skb->skb_iif = _dev->ifindex;
if (from & AT_EGRESS) {
- dp->st_rx_frm_egr++;
dev_queue_xmit(skb);
} else if (from & AT_INGRESS) {
- dp->st_rx_frm_ing++;
skb_pull(skb, skb->dev->hard_header_len);
- netif_rx(skb);
+ netif_receive_skb(skb);
} else
BUG();
}
if (__netif_tx_trylock(txq)) {
- dp->st_rxq_check++;
if ((skb = skb_peek(&dp->rq)) == NULL) {
dp->tasklet_pending = 0;
if (netif_queue_stopped(_dev))
netif_wake_queue(_dev);
} else {
- dp->st_rxq_rsch++;
__netif_tx_unlock(txq);
goto resched;
}
@@ -147,6 +126,10 @@ static const struct net_device_ops ifb_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
+#define IFB_FEATURES (NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
+ NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
+ NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX)
+
static void ifb_setup(struct net_device *dev)
{
/* Initialize the device structure. */
@@ -157,6 +140,9 @@ static void ifb_setup(struct net_device *dev)
ether_setup(dev);
dev->tx_queue_len = TX_Q_LIMIT;
+ dev->features |= IFB_FEATURES;
+ dev->vlan_features |= IFB_FEATURES;
+
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
@@ -182,7 +168,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
}
- skb_queue_tail(&dp->rq, skb);
+ __skb_queue_tail(&dp->rq, skb);
if (!dp->tasklet_pending) {
dp->tasklet_pending = 1;
tasklet_schedule(&dp->ifb_tasklet);
@@ -197,8 +183,8 @@ static int ifb_close(struct net_device *dev)
tasklet_kill(&dp->ifb_tasklet);
netif_stop_queue(dev);
- skb_queue_purge(&dp->rq);
- skb_queue_purge(&dp->tq);
+ __skb_queue_purge(&dp->rq);
+ __skb_queue_purge(&dp->tq);
return 0;
}
@@ -207,8 +193,8 @@ static int ifb_open(struct net_device *dev)
struct ifb_private *dp = netdev_priv(dev);
tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev);
- skb_queue_head_init(&dp->rq);
- skb_queue_head_init(&dp->tq);
+ __skb_queue_head_init(&dp->rq);
+ __skb_queue_head_init(&dp->tq);
netif_start_queue(dev);
return 0;
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index bc183f5487cb..6b256c275e10 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -64,7 +64,14 @@ static s32 igb_reset_init_script_82575(struct e1000_hw *);
static s32 igb_read_mac_addr_82575(struct e1000_hw *);
static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw);
static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw);
-
+static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
+static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
+static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw,
+ u16 offset);
+static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
+ u16 offset);
+static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
+static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
static const u16 e1000_82580_rxpbs_table[] =
{ 36, 72, 144, 1, 2, 4, 8, 16,
35, 70, 140 };
@@ -129,11 +136,14 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
break;
case E1000_DEV_ID_82580_COPPER:
case E1000_DEV_ID_82580_FIBER:
+ case E1000_DEV_ID_82580_QUAD_FIBER:
case E1000_DEV_ID_82580_SERDES:
case E1000_DEV_ID_82580_SGMII:
case E1000_DEV_ID_82580_COPPER_DUAL:
case E1000_DEV_ID_DH89XXCC_SGMII:
case E1000_DEV_ID_DH89XXCC_SERDES:
+ case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+ case E1000_DEV_ID_DH89XXCC_SFP:
mac->type = e1000_82580;
break;
case E1000_DEV_ID_I350_COPPER:
@@ -192,7 +202,11 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
mac->arc_subsystem_valid =
(rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
? true : false;
-
+ /* enable EEE on i350 parts */
+ if (mac->type == e1000_i350)
+ dev_spec->eee_disable = false;
+ else
+ dev_spec->eee_disable = true;
/* physical interface link setup */
mac->ops.setup_physical_interface =
(hw->phy.media_type == e1000_media_type_copper)
@@ -230,14 +244,42 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
- /* EEPROM access above 16k is unsupported */
- if (size > 14)
- size = 14;
nvm->word_size = 1 << size;
+ if (nvm->word_size == (1 << 15))
+ nvm->page_size = 128;
- /* if 82576 then initialize mailbox parameters */
- if (mac->type == e1000_82576)
+ /* NVM Function Pointers */
+ nvm->ops.acquire = igb_acquire_nvm_82575;
+ if (nvm->word_size < (1 << 15))
+ nvm->ops.read = igb_read_nvm_eerd;
+ else
+ nvm->ops.read = igb_read_nvm_spi;
+
+ nvm->ops.release = igb_release_nvm_82575;
+ switch (hw->mac.type) {
+ case e1000_82580:
+ nvm->ops.validate = igb_validate_nvm_checksum_82580;
+ nvm->ops.update = igb_update_nvm_checksum_82580;
+ break;
+ case e1000_i350:
+ nvm->ops.validate = igb_validate_nvm_checksum_i350;
+ nvm->ops.update = igb_update_nvm_checksum_i350;
+ break;
+ default:
+ nvm->ops.validate = igb_validate_nvm_checksum;
+ nvm->ops.update = igb_update_nvm_checksum;
+ }
+ nvm->ops.write = igb_write_nvm_spi;
+
+ /* if part supports SR-IOV then initialize mailbox parameters */
+ switch (mac->type) {
+ case e1000_82576:
+ case e1000_i350:
igb_init_mbx_params_pf(hw);
+ break;
+ default:
+ break;
+ }
/* setup PHY parameters */
if (phy->media_type != e1000_media_type_copper) {
@@ -1478,6 +1520,39 @@ out:
}
/**
+ * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
+ * @hw: pointer to the hardware struct
+ * @enable: state to enter, either enabled or disabled
+ * @pf: Physical Function pool - do not set anti-spoofing for the PF
+ *
+ * enables/disables L2 switch anti-spoofing functionality.
+ **/
+void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
+{
+ u32 dtxswc;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ case e1000_i350:
+ dtxswc = rd32(E1000_DTXSWC);
+ if (enable) {
+ dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
+ E1000_DTXSWC_VLAN_SPOOF_MASK);
+ /* The PF can spoof - it has to in order to
+ * support emulation mode NICs */
+ dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
+ } else {
+ dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
+ E1000_DTXSWC_VLAN_SPOOF_MASK);
+ }
+ wr32(E1000_DTXSWC, dtxswc);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
* igb_vmdq_set_loopback_pf - enable or disable vmdq loopback
* @hw: pointer to the hardware struct
* @enable: state to enter, either enabled or disabled
@@ -1578,7 +1653,7 @@ static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw)
{
s32 ret_val = 0;
u32 mdicnfg;
- u16 nvm_data;
+ u16 nvm_data = 0;
if (hw->mac.type != e1000_82580)
goto out;
@@ -1712,6 +1787,248 @@ u16 igb_rxpbs_adjust_82580(u32 data)
return ret_val;
}
+/**
+ * igb_validate_nvm_checksum_with_offset - Validate EEPROM
+ * checksum
+ * @hw: pointer to the HW structure
+ * @offset: offset in words of the checksum protected region
+ *
+ * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ * and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
+{
+ s32 ret_val = 0;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ hw_dbg("NVM Read Error\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+
+ if (checksum != (u16) NVM_SUM) {
+ hw_dbg("NVM Checksum Invalid\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_update_nvm_checksum_with_offset - Update EEPROM
+ * checksum
+ * @hw: pointer to the HW structure
+ * @offset: offset in words of the checksum protected region
+ *
+ * Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ * up to the checksum. Then calculates the EEPROM checksum and writes the
+ * value to the EEPROM.
+ **/
+s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
+{
+ s32 ret_val;
+ u16 checksum = 0;
+ u16 i, nvm_data;
+
+ for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+ if (ret_val) {
+ hw_dbg("NVM Read Error while updating checksum.\n");
+ goto out;
+ }
+ checksum += nvm_data;
+ }
+ checksum = (u16) NVM_SUM - checksum;
+ ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
+ &checksum);
+ if (ret_val)
+ hw_dbg("NVM Write Error while updating checksum.\n");
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM section checksum by reading/adding each word of
+ * the EEPROM and then verifies that the sum of the EEPROM is
+ * equal to 0xBABA.
+ **/
+static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 eeprom_regions_count = 1;
+ u16 j, nvm_data;
+ u16 nvm_offset;
+
+ ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
+ if (ret_val) {
+ hw_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
+ /* if chekcsums compatibility bit is set validate checksums
+ * for all 4 ports. */
+ eeprom_regions_count = 4;
+ }
+
+ for (j = 0; j < eeprom_regions_count; j++) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+ ret_val = igb_validate_nvm_checksum_with_offset(hw,
+ nvm_offset);
+ if (ret_val != 0)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_update_nvm_checksum_82580 - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM section checksums for all 4 ports by reading/adding
+ * each word of the EEPROM up to the checksum. Then calculates the EEPROM
+ * checksum and writes the value to the EEPROM.
+ **/
+static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 j, nvm_data;
+ u16 nvm_offset;
+
+ ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
+ if (ret_val) {
+ hw_dbg("NVM Read Error while updating checksum"
+ " compatibility bit.\n");
+ goto out;
+ }
+
+ if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
+ /* set compatibility bit to validate checksums appropriately */
+ nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
+ ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
+ &nvm_data);
+ if (ret_val) {
+ hw_dbg("NVM Write Error while updating checksum"
+ " compatibility bit.\n");
+ goto out;
+ }
+ }
+
+ for (j = 0; j < 4; j++) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+ ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
+ if (ret_val)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Calculates the EEPROM section checksum by reading/adding each word of
+ * the EEPROM and then verifies that the sum of the EEPROM is
+ * equal to 0xBABA.
+ **/
+static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 j;
+ u16 nvm_offset;
+
+ for (j = 0; j < 4; j++) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+ ret_val = igb_validate_nvm_checksum_with_offset(hw,
+ nvm_offset);
+ if (ret_val != 0)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * igb_update_nvm_checksum_i350 - Update EEPROM checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the EEPROM section checksums for all 4 ports by reading/adding
+ * each word of the EEPROM up to the checksum. Then calculates the EEPROM
+ * checksum and writes the value to the EEPROM.
+ **/
+static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u16 j;
+ u16 nvm_offset;
+
+ for (j = 0; j < 4; j++) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+ ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
+ if (ret_val != 0)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+/**
+ * igb_set_eee_i350 - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ *
+ * Enable/disable EEE based on setting in dev_spec structure.
+ *
+ **/
+s32 igb_set_eee_i350(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ u32 ipcnfg, eeer, ctrl_ext;
+
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ if ((hw->mac.type != e1000_i350) ||
+ (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK))
+ goto out;
+ ipcnfg = rd32(E1000_IPCNFG);
+ eeer = rd32(E1000_EEER);
+
+ /* enable or disable per user setting */
+ if (!(hw->dev_spec._82575.eee_disable)) {
+ ipcnfg |= (E1000_IPCNFG_EEE_1G_AN |
+ E1000_IPCNFG_EEE_100M_AN);
+ eeer |= (E1000_EEER_TX_LPI_EN |
+ E1000_EEER_RX_LPI_EN |
+ E1000_EEER_LPI_FC);
+
+ } else {
+ ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
+ E1000_IPCNFG_EEE_100M_AN);
+ eeer &= ~(E1000_EEER_TX_LPI_EN |
+ E1000_EEER_RX_LPI_EN |
+ E1000_EEER_LPI_FC);
+ }
+ wr32(E1000_IPCNFG, ipcnfg);
+ wr32(E1000_EEER, eeer);
+out:
+
+ return ret_val;
+}
+
static struct e1000_mac_operations e1000_mac_ops_82575 = {
.init_hw = igb_init_hw_82575,
.check_for_link = igb_check_for_link_82575,
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index cbd1e1259e4d..dd6df3498998 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -194,6 +194,10 @@ struct e1000_adv_tx_context_desc {
#define E1000_NVM_APME_82575 0x0400
#define MAX_NUM_VFS 8
+#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */
+#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */
+#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
+#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
/* Easy defines for setting default pool, would normally be left a zero */
@@ -243,8 +247,10 @@ struct e1000_adv_tx_context_desc {
/* RX packet buffer size defines */
#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
+void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int);
void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
u16 igb_rxpbs_adjust_82580(u32 data);
+s32 igb_set_eee_i350(struct e1000_hw *);
#endif
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 62222796a8b3..6b80d40110ca 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -51,6 +51,7 @@
#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
#define E1000_CTRL_EXT_EIAME 0x01000000
#define E1000_CTRL_EXT_IRCA 0x00000001
/* Interrupt delay cancellation */
@@ -110,6 +111,7 @@
/* Management Control */
#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */
/* Enable Neighbor Discovery Filtering */
#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
@@ -286,7 +288,34 @@
#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
-/* Transmit Arbitration Count */
+/* DMA Coalescing register fields */
+#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing
+ * Watchdog Timer */
+#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive
+ * Threshold */
+#define E1000_DMACR_DMACTHR_SHIFT 16
+#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe
+ * transactions */
+#define E1000_DMACR_DMAC_LX_SHIFT 28
+#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
+
+#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit
+ * Threshold */
+
+#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
+
+#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate
+ * Threshold */
+#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in
+ * current window */
+
+#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic
+ * Current Cnt */
+
+#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold
+ * High val */
+#define E1000_FCRTC_RTH_COAL_SHIFT 4
+#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */
/* SerDes Control */
#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
@@ -419,6 +448,9 @@
#define E1000_ERR_SWFW_SYNC 13
#define E1000_NOT_IMPLEMENTED 14
#define E1000_ERR_MBX 15
+#define E1000_ERR_INVALID_ARGUMENT 16
+#define E1000_ERR_NO_SPACE 17
+#define E1000_ERR_NVM_PBA_SECTION 18
/* Loop limit on how long we wait for auto-negotiation to complete */
#define COPPER_LINK_UP_LIMIT 10
@@ -562,6 +594,8 @@
#define NVM_INIT_CONTROL3_PORT_A 0x0024
#define NVM_ALT_MAC_ADDR_PTR 0x0037
#define NVM_CHECKSUM_REG 0x003F
+#define NVM_COMPATIBILITY_REG_3 0x0003
+#define NVM_COMPATIBILITY_BIT_MASK 0x8000
#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
@@ -580,11 +614,15 @@
/* Mask bits for fields in Word 0x1a of the NVM */
+/* length of string needed to store part num */
+#define E1000_PBANUM_LENGTH 11
+
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
#define NVM_SUM 0xBABA
#define NVM_PBA_OFFSET_0 8
#define NVM_PBA_OFFSET_1 9
+#define NVM_PBA_PTR_GUARD 0xFAFA
#define NVM_WORD_SIZE_BASE_SHIFT 6
/* NVM Commands - Microwire */
@@ -592,6 +630,7 @@
/* NVM Commands - SPI */
#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
+#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
@@ -750,6 +789,17 @@
#define E1000_MDIC_ERROR 0x40000000
#define E1000_MDIC_DEST 0x80000000
+/* Thermal Sensor */
+#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */
+#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */
+
+/* Energy Efficient Ethernet */
+#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */
+#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */
+#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */
+#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */
+#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */
+
/* SerDes Control */
#define E1000_GEN_CTL_READY 0x80000000
#define E1000_GEN_CTL_ADDRESS_SHIFT 8
@@ -763,4 +813,11 @@
#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based
on DMA coal */
+/* Tx Rate-Scheduler Config fields */
+#define E1000_RTTBCNRC_RS_ENA 0x80000000
+#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF
+#define E1000_RTTBCNRC_RF_INT_SHIFT 14
+#define E1000_RTTBCNRC_RF_INT_MASK \
+ (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
+
#endif
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index c0b017f8d782..27153e8d7b16 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -54,8 +54,11 @@ struct e1000_hw;
#define E1000_DEV_ID_82580_SERDES 0x1510
#define E1000_DEV_ID_82580_SGMII 0x1511
#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
-#define E1000_DEV_ID_DH89XXCC_SGMII 0x0436
-#define E1000_DEV_ID_DH89XXCC_SERDES 0x0438
+#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
+#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
+#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
+#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
+#define E1000_DEV_ID_DH89XXCC_SFP 0x0440
#define E1000_DEV_ID_I350_COPPER 0x1521
#define E1000_DEV_ID_I350_FIBER 0x1522
#define E1000_DEV_ID_I350_SERDES 0x1523
@@ -245,6 +248,10 @@ struct e1000_hw_stats {
u64 scvpc;
u64 hrmpc;
u64 doosync;
+ u64 o2bgptc;
+ u64 o2bspc;
+ u64 b2ospc;
+ u64 b2ogprc;
};
struct e1000_phy_stats {
@@ -329,6 +336,8 @@ struct e1000_nvm_operations {
s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
void (*release)(struct e1000_hw *);
s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
+ s32 (*update)(struct e1000_hw *);
+ s32 (*validate)(struct e1000_hw *);
};
struct e1000_info {
@@ -415,7 +424,6 @@ struct e1000_phy_info {
struct e1000_nvm_info {
struct e1000_nvm_operations ops;
-
enum e1000_nvm_type type;
enum e1000_nvm_override override;
@@ -481,6 +489,7 @@ struct e1000_mbx_info {
struct e1000_dev_spec_82575 {
bool sgmii_active;
bool global_device_reset;
+ bool eee_disable;
};
struct e1000_hw {
diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c
index c474cdb70047..78d48c7fa859 100644
--- a/drivers/net/igb/e1000_mbx.c
+++ b/drivers/net/igb/e1000_mbx.c
@@ -422,26 +422,24 @@ s32 igb_init_mbx_params_pf(struct e1000_hw *hw)
{
struct e1000_mbx_info *mbx = &hw->mbx;
- if (hw->mac.type == e1000_82576) {
- mbx->timeout = 0;
- mbx->usec_delay = 0;
-
- mbx->size = E1000_VFMAILBOX_SIZE;
-
- mbx->ops.read = igb_read_mbx_pf;
- mbx->ops.write = igb_write_mbx_pf;
- mbx->ops.read_posted = igb_read_posted_mbx;
- mbx->ops.write_posted = igb_write_posted_mbx;
- mbx->ops.check_for_msg = igb_check_for_msg_pf;
- mbx->ops.check_for_ack = igb_check_for_ack_pf;
- mbx->ops.check_for_rst = igb_check_for_rst_pf;
-
- mbx->stats.msgs_tx = 0;
- mbx->stats.msgs_rx = 0;
- mbx->stats.reqs = 0;
- mbx->stats.acks = 0;
- mbx->stats.rsts = 0;
- }
+ mbx->timeout = 0;
+ mbx->usec_delay = 0;
+
+ mbx->size = E1000_VFMAILBOX_SIZE;
+
+ mbx->ops.read = igb_read_mbx_pf;
+ mbx->ops.write = igb_write_mbx_pf;
+ mbx->ops.read_posted = igb_read_posted_mbx;
+ mbx->ops.write_posted = igb_write_posted_mbx;
+ mbx->ops.check_for_msg = igb_check_for_msg_pf;
+ mbx->ops.check_for_ack = igb_check_for_ack_pf;
+ mbx->ops.check_for_rst = igb_check_for_rst_pf;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
return 0;
}
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c
index d83b77fa4038..75bf36a4baee 100644
--- a/drivers/net/igb/e1000_nvm.c
+++ b/drivers/net/igb/e1000_nvm.c
@@ -318,6 +318,68 @@ out:
}
/**
+ * igb_read_nvm_spi - Read EEPROM's using SPI
+ * @hw: pointer to the HW structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM.
+ **/
+s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 i = 0;
+ s32 ret_val;
+ u16 word_in;
+ u8 read_opcode = NVM_READ_OPCODE_SPI;
+
+ /*
+ * A check for invalid values: offset too large, too many words,
+ * and not enough words.
+ */
+ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+ (words == 0)) {
+ hw_dbg("nvm parameter(s) out of bounds\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+ ret_val = nvm->ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = igb_ready_nvm_eeprom(hw);
+ if (ret_val)
+ goto release;
+
+ igb_standby_nvm(hw);
+
+ if ((nvm->address_bits == 8) && (offset >= 128))
+ read_opcode |= NVM_A8_OPCODE_SPI;
+
+ /* Send the READ command (opcode + addr) */
+ igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
+ igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
+
+ /*
+ * Read the data. SPI NVMs increment the address with each byte
+ * read and will roll over if reading beyond the end. This allows
+ * us to read the whole NVM from any offset
+ */
+ for (i = 0; i < words; i++) {
+ word_in = igb_shift_in_eec_bits(hw, 16);
+ data[i] = (word_in >> 8) | (word_in << 8);
+ }
+
+release:
+ nvm->ops.release(hw);
+
+out:
+ return ret_val;
+}
+
+/**
* igb_read_nvm_eerd - Reads EEPROM using EERD register
* @hw: pointer to the HW structure
* @offset: offset of word in the EEPROM to read
@@ -353,7 +415,7 @@ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
break;
data[i] = (rd32(E1000_EERD) >>
- E1000_NVM_RW_REG_DATA);
+ E1000_NVM_RW_REG_DATA);
}
out:
@@ -445,31 +507,112 @@ out:
}
/**
- * igb_read_part_num - Read device part number
+ * igb_read_part_string - Read device part number
* @hw: pointer to the HW structure
* @part_num: pointer to device part number
+ * @part_num_size: size of part number buffer
*
* Reads the product board assembly (PBA) number from the EEPROM and stores
* the value in part_num.
**/
-s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num)
+s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size)
{
- s32 ret_val;
+ s32 ret_val;
u16 nvm_data;
+ u16 pointer;
+ u16 offset;
+ u16 length;
+
+ if (part_num == NULL) {
+ hw_dbg("PBA string buffer was null\n");
+ ret_val = E1000_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
if (ret_val) {
hw_dbg("NVM Read Error\n");
goto out;
}
- *part_num = (u32)(nvm_data << 16);
- ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer);
+ if (ret_val) {
+ hw_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ /*
+ * if nvm_data is not ptr guard the PBA must be in legacy format which
+ * means pointer is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (nvm_data != NVM_PBA_PTR_GUARD) {
+ hw_dbg("NVM PBA number is not stored as string\n");
+
+ /* we will need 11 characters to store the PBA */
+ if (part_num_size < 11) {
+ hw_dbg("PBA string buffer too small\n");
+ return E1000_ERR_NO_SPACE;
+ }
+
+ /* extract hex string from data and pointer */
+ part_num[0] = (nvm_data >> 12) & 0xF;
+ part_num[1] = (nvm_data >> 8) & 0xF;
+ part_num[2] = (nvm_data >> 4) & 0xF;
+ part_num[3] = nvm_data & 0xF;
+ part_num[4] = (pointer >> 12) & 0xF;
+ part_num[5] = (pointer >> 8) & 0xF;
+ part_num[6] = '-';
+ part_num[7] = 0;
+ part_num[8] = (pointer >> 4) & 0xF;
+ part_num[9] = pointer & 0xF;
+
+ /* put a null character on the end of our string */
+ part_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (part_num[offset] < 0xA)
+ part_num[offset] += '0';
+ else if (part_num[offset] < 0x10)
+ part_num[offset] += 'A' - 0xA;
+ }
+
+ goto out;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, pointer, 1, &length);
if (ret_val) {
hw_dbg("NVM Read Error\n");
goto out;
}
- *part_num |= nvm_data;
+
+ if (length == 0xFFFF || length == 0) {
+ hw_dbg("NVM PBA number section invalid length\n");
+ ret_val = E1000_ERR_NVM_PBA_SECTION;
+ goto out;
+ }
+ /* check if part_num buffer is big enough */
+ if (part_num_size < (((u32)length * 2) - 1)) {
+ hw_dbg("PBA string buffer too small\n");
+ ret_val = E1000_ERR_NO_SPACE;
+ goto out;
+ }
+
+ /* trim pba length from start of string */
+ pointer++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data);
+ if (ret_val) {
+ hw_dbg("NVM Read Error\n");
+ goto out;
+ }
+ part_num[offset * 2] = (u8)(nvm_data >> 8);
+ part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+ }
+ part_num[offset * 2] = '\0';
out:
return ret_val;
diff --git a/drivers/net/igb/e1000_nvm.h b/drivers/net/igb/e1000_nvm.h
index 1041c34dcbe1..7f43564c4bcc 100644
--- a/drivers/net/igb/e1000_nvm.h
+++ b/drivers/net/igb/e1000_nvm.h
@@ -32,7 +32,10 @@ s32 igb_acquire_nvm(struct e1000_hw *hw);
void igb_release_nvm(struct e1000_hw *hw);
s32 igb_read_mac_addr(struct e1000_hw *hw);
s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
+s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
+ u32 part_num_size);
s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_validate_nvm_checksum(struct e1000_hw *hw);
s32 igb_update_nvm_checksum(struct e1000_hw *hw);
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index ddd036a78999..6694bf3e5ad9 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -1757,11 +1757,12 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
u16 phy_data, i, agc_value = 0;
u16 cur_agc_index, max_agc_index = 0;
u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
- u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
- {IGP02E1000_PHY_AGC_A,
- IGP02E1000_PHY_AGC_B,
- IGP02E1000_PHY_AGC_C,
- IGP02E1000_PHY_AGC_D};
+ static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
+ IGP02E1000_PHY_AGC_A,
+ IGP02E1000_PHY_AGC_B,
+ IGP02E1000_PHY_AGC_C,
+ IGP02E1000_PHY_AGC_D
+ };
/* Read the AGC registers for all channels */
for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index abb7333a1fbf..958ca3bda482 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -106,6 +106,19 @@
#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
+/* DMA Coalescing registers */
+#define E1000_DMACR 0x02508 /* Control Register */
+#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */
+#define E1000_DMCTLX 0x02514 /* Time to Lx Request */
+#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
+#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
+#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
+#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
+
+/* TX Rate Limit Registers */
+#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
+#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
+
/* Split and Replication RX Control - RW */
#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
/*
@@ -301,6 +314,7 @@
#define E1000_VFTE 0x00C90 /* VF Transmit Enables */
#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
+#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */
#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
@@ -323,4 +337,18 @@
/* DMA Coalescing registers */
#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
+
+/* Energy Efficient Ethernet "EEE" register */
+#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
+#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */
+
+/* Thermal Sensor Register */
+#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
+
+/* OS2BMC Registers */
+#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */
+#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */
+#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */
+#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
+
#endif
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index edab9c442399..1c687e298d5e 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -77,6 +77,7 @@ struct vf_data_storage {
unsigned long last_nack;
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
+ u16 tx_rate;
};
#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
@@ -323,13 +324,21 @@ struct igb_adapter {
u16 rx_ring_count;
unsigned int vfs_allocated_count;
struct vf_data_storage *vf_data;
+ int vf_rate_link_speed;
u32 rss_queues;
+ u32 wvbr;
};
#define IGB_FLAG_HAS_MSI (1 << 0)
#define IGB_FLAG_DCA_ENABLED (1 << 1)
#define IGB_FLAG_QUAD_PORT_A (1 << 2)
#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
+#define IGB_FLAG_DMAC (1 << 4)
+
+/* DMA Coalescing defines */
+#define IGB_MIN_TXPBSIZE 20408
+#define IGB_TX_BUF_4096 4096
+#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
#define IGB_82576_TSYNC_SHIFT 19
#define IGB_82580_TSYNC_SHIFT 24
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index a70e16bcfa7e..d976733bbcc2 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -86,6 +86,10 @@ static const struct igb_stats igb_gstrings_stats[] = {
IGB_STAT("tx_smbus", stats.mgptc),
IGB_STAT("rx_smbus", stats.mgprc),
IGB_STAT("dropped_smbus", stats.mgpdc),
+ IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
+ IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
+ IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
+ IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
};
#define IGB_NETDEV_STAT(_net_stat) { \
@@ -603,7 +607,10 @@ static void igb_get_regs(struct net_device *netdev,
regs_buff[548] = rd32(E1000_TDFT);
regs_buff[549] = rd32(E1000_TDFHS);
regs_buff[550] = rd32(E1000_TDFPC);
-
+ regs_buff[551] = adapter->stats.o2bgptc;
+ regs_buff[552] = adapter->stats.b2ospc;
+ regs_buff[553] = adapter->stats.o2bspc;
+ regs_buff[554] = adapter->stats.b2ogprc;
}
static int igb_get_eeprom_len(struct net_device *netdev)
@@ -714,7 +721,7 @@ static int igb_set_eeprom(struct net_device *netdev,
/* Update the checksum over the first part of the EEPROM if needed
* and flush shadow RAM for 82573 controllers */
if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
- igb_update_nvm_checksum(hw);
+ hw->nvm.ops.update(hw);
kfree(eeprom_buff);
return ret_val;
@@ -727,8 +734,9 @@ static void igb_get_drvinfo(struct net_device *netdev,
char firmware_version[32];
u16 eeprom_data;
- strncpy(drvinfo->driver, igb_driver_name, 32);
- strncpy(drvinfo->version, igb_driver_version, 32);
+ strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1);
+ strncpy(drvinfo->version, igb_driver_version,
+ sizeof(drvinfo->version) - 1);
/* EEPROM image version # is reported as firmware version # for
* 82575 controllers */
@@ -738,8 +746,10 @@ static void igb_get_drvinfo(struct net_device *netdev,
(eeprom_data & 0x0FF0) >> 4,
eeprom_data & 0x000F);
- strncpy(drvinfo->fw_version, firmware_version, 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strncpy(drvinfo->fw_version, firmware_version,
+ sizeof(drvinfo->fw_version) - 1);
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info) - 1);
drvinfo->n_stats = IGB_STATS_LEN;
drvinfo->testinfo_len = IGB_TEST_LEN;
drvinfo->regdump_len = igb_get_regs_len(netdev);
@@ -1070,7 +1080,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
{0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
wr32(reg, (_test[pat] & write));
- val = rd32(reg);
+ val = rd32(reg) & mask;
if (val != (_test[pat] & write & mask)) {
dev_err(&adapter->pdev->dev, "pattern test reg %04X "
"failed: got 0x%08X expected 0x%08X\n",
@@ -1999,6 +2009,12 @@ static int igb_set_coalesce(struct net_device *netdev,
if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
return -EINVAL;
+ /* If ITR is disabled, disable DMAC */
+ if (ec->rx_coalesce_usecs == 0) {
+ if (adapter->flags & IGB_FLAG_DMAC)
+ adapter->flags &= ~IGB_FLAG_DMAC;
+ }
+
/* convert to rate of irq's per second */
if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
adapter->rx_itr_setting = ec->rx_coalesce_usecs;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 892d196f17ac..3d850af0cdda 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -50,12 +50,17 @@
#endif
#include "igb.h"
-#define DRV_VERSION "2.1.0-k2"
+#define MAJ 3
+#define MIN 0
+#define BUILD 6
+#define KFIX 2
+#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
+__stringify(BUILD) "-k" __stringify(KFIX)
char igb_driver_name[] = "igb";
char igb_driver_version[] = DRV_VERSION;
static const char igb_driver_string[] =
"Intel(R) Gigabit Ethernet Network Driver";
-static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
+static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
static const struct e1000_info *igb_info_tbl[] = {
[board_82575] = &e1000_82575_info,
@@ -68,11 +73,14 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
@@ -98,6 +106,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *);
static void igb_setup_mrqc(struct igb_adapter *);
static int igb_probe(struct pci_dev *, const struct pci_device_id *);
static void __devexit igb_remove(struct pci_dev *pdev);
+static void igb_init_hw_timer(struct igb_adapter *adapter);
static int igb_sw_init(struct igb_adapter *);
static int igb_open(struct net_device *);
static int igb_close(struct net_device *);
@@ -147,6 +156,7 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev,
static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
struct ifla_vf_info *ivi);
+static void igb_check_vf_rate_limit(struct igb_adapter *);
#ifdef CONFIG_PM
static int igb_suspend(struct pci_dev *, pm_message_t);
@@ -1654,7 +1664,7 @@ void igb_reset(struct igb_adapter *adapter)
if (adapter->vfs_allocated_count) {
int i;
for (i = 0 ; i < adapter->vfs_allocated_count; i++)
- adapter->vf_data[i].flags = 0;
+ adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
/* ping all the active vfs to let them know we are going down */
igb_ping_all_vfs(adapter);
@@ -1670,7 +1680,58 @@ void igb_reset(struct igb_adapter *adapter)
if (hw->mac.ops.init_hw(hw))
dev_err(&pdev->dev, "Hardware Error\n");
+ if (hw->mac.type > e1000_82580) {
+ if (adapter->flags & IGB_FLAG_DMAC) {
+ u32 reg;
+ /*
+ * DMA Coalescing high water mark needs to be higher
+ * than * the * Rx threshold. The Rx threshold is
+ * currently * pba - 6, so we * should use a high water
+ * mark of pba * - 4. */
+ hwm = (pba - 4) << 10;
+
+ reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
+ & E1000_DMACR_DMACTHR_MASK);
+
+ /* transition to L0x or L1 if available..*/
+ reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
+
+ /* watchdog timer= +-1000 usec in 32usec intervals */
+ reg |= (1000 >> 5);
+ wr32(E1000_DMACR, reg);
+
+ /* no lower threshold to disable coalescing(smart fifb)
+ * -UTRESH=0*/
+ wr32(E1000_DMCRTRH, 0);
+
+ /* set hwm to PBA - 2 * max frame size */
+ wr32(E1000_FCRTC, hwm);
+
+ /*
+ * This sets the time to wait before requesting tran-
+ * sition to * low power state to number of usecs needed
+ * to receive 1 512 * byte frame at gigabit line rate
+ */
+ reg = rd32(E1000_DMCTLX);
+ reg |= IGB_DMCTLX_DCFLUSH_DIS;
+
+ /* Delay 255 usec before entering Lx state. */
+ reg |= 0xFF;
+ wr32(E1000_DMCTLX, reg);
+
+ /* free space in Tx packet buffer to wake from DMAC */
+ wr32(E1000_DMCTXTH,
+ (IGB_MIN_TXPBSIZE -
+ (IGB_TX_BUF_4096 + adapter->max_frame_size))
+ >> 6);
+
+ /* make low power state decision controlled by DMAC */
+ reg = rd32(E1000_PCIEMISC);
+ reg |= E1000_PCIEMISC_LX_DECISION;
+ wr32(E1000_PCIEMISC, reg);
+ } /* end if IGB_FLAG_DMAC set */
+ }
if (hw->mac.type == e1000_82580) {
u32 reg = rd32(E1000_PCIEMISC);
wr32(E1000_PCIEMISC,
@@ -1729,12 +1790,13 @@ static int __devinit igb_probe(struct pci_dev *pdev,
struct igb_adapter *adapter;
struct e1000_hw *hw;
u16 eeprom_data = 0;
+ s32 ret_val;
static int global_quad_port_a; /* global quad port a indication */
const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
unsigned long mmio_start, mmio_len;
int err, pci_using_dac;
u16 eeprom_apme_mask = IGB_EEPROM_APME;
- u32 part_num;
+ u8 part_str[E1000_PBANUM_LENGTH];
/* Catch broken hardware that put the wrong VF device ID in
* the PCIe SR-IOV capability.
@@ -1879,7 +1941,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
hw->mac.ops.reset_hw(hw);
/* make sure the NVM is good */
- if (igb_validate_nvm_checksum(hw) < 0) {
+ if (hw->nvm.ops.validate(hw) < 0) {
dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
@@ -1987,6 +2049,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
}
#endif
+ /* do hw tstamp init after resetting */
+ igb_init_hw_timer(adapter);
+
dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
/* print bus type/speed/width info */
dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
@@ -2000,16 +2065,22 @@ static int __devinit igb_probe(struct pci_dev *pdev,
"unknown"),
netdev->dev_addr);
- igb_read_part_num(hw, &part_num);
- dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
- (part_num >> 8), (part_num & 0xff));
-
+ ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
+ if (ret_val)
+ strcpy(part_str, "Unknown");
+ dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
dev_info(&pdev->dev,
"Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
adapter->msix_entries ? "MSI-X" :
(adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
adapter->num_rx_queues, adapter->num_tx_queues);
-
+ switch (hw->mac.type) {
+ case e1000_i350:
+ igb_set_eee_i350(hw);
+ break;
+ default:
+ break;
+ }
return 0;
err_register:
@@ -2049,13 +2120,16 @@ static void __devexit igb_remove(struct pci_dev *pdev)
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- /* flush_scheduled work may reschedule our watchdog task, so
- * explicitly disable watchdog tasks from being rescheduled */
+ /*
+ * The watchdog timer may be rescheduled, so explicitly
+ * disable watchdog from being rescheduled.
+ */
set_bit(__IGB_DOWN, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
- flush_scheduled_work();
+ cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->watchdog_task);
#ifdef CONFIG_IGB_DCA
if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
@@ -2143,6 +2217,9 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
random_ether_addr(mac_addr);
igb_set_vf_mac(adapter, i, mac_addr);
}
+ /* DMA Coalescing is not supported in IOV mode. */
+ if (adapter->flags & IGB_FLAG_DMAC)
+ adapter->flags &= ~IGB_FLAG_DMAC;
}
#endif /* CONFIG_PCI_IOV */
}
@@ -2280,9 +2357,19 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
spin_lock_init(&adapter->stats64_lock);
#ifdef CONFIG_PCI_IOV
- if (hw->mac.type == e1000_82576)
- adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs;
-
+ switch (hw->mac.type) {
+ case e1000_82576:
+ case e1000_i350:
+ if (max_vfs > 7) {
+ dev_warn(&pdev->dev,
+ "Maximum of 7 VFs per PF, using max\n");
+ adapter->vfs_allocated_count = 7;
+ } else
+ adapter->vfs_allocated_count = max_vfs;
+ break;
+ default:
+ break;
+ }
#endif /* CONFIG_PCI_IOV */
adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
@@ -2301,12 +2388,14 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
return -ENOMEM;
}
- igb_init_hw_timer(adapter);
igb_probe_vfs(adapter);
/* Explicitly disable IRQ since the NIC can be in any state. */
igb_irq_disable(adapter);
+ if (hw->mac.type == e1000_i350)
+ adapter->flags &= ~IGB_FLAG_DMAC;
+
set_bit(__IGB_DOWN, &adapter->state);
return 0;
}
@@ -2436,10 +2525,9 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
int size;
size = sizeof(struct igb_buffer) * tx_ring->count;
- tx_ring->buffer_info = vmalloc(size);
+ tx_ring->buffer_info = vzalloc(size);
if (!tx_ring->buffer_info)
goto err;
- memset(tx_ring->buffer_info, 0, size);
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
@@ -2587,10 +2675,9 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
int size, desc_len;
size = sizeof(struct igb_buffer) * rx_ring->count;
- rx_ring->buffer_info = vmalloc(size);
+ rx_ring->buffer_info = vzalloc(size);
if (!rx_ring->buffer_info)
goto err;
- memset(rx_ring->buffer_info, 0, size);
desc_len = sizeof(union e1000_adv_rx_desc);
@@ -3362,6 +3449,45 @@ static void igb_set_rx_mode(struct net_device *netdev)
igb_restore_vf_multicasts(adapter);
}
+static void igb_check_wvbr(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 wvbr = 0;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ case e1000_i350:
+ if (!(wvbr = rd32(E1000_WVBR)))
+ return;
+ break;
+ default:
+ break;
+ }
+
+ adapter->wvbr |= wvbr;
+}
+
+#define IGB_STAGGERED_QUEUE_OFFSET 8
+
+static void igb_spoof_check(struct igb_adapter *adapter)
+{
+ int j;
+
+ if (!adapter->wvbr)
+ return;
+
+ for(j = 0; j < adapter->vfs_allocated_count; j++) {
+ if (adapter->wvbr & (1 << j) ||
+ adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
+ dev_warn(&adapter->pdev->dev,
+ "Spoof event(s) detected on VF %d\n", j);
+ adapter->wvbr &=
+ ~((1 << j) |
+ (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
+ }
+ }
+}
+
/* Need to wait a few seconds after link up to get diagnostic information from
* the phy */
static void igb_update_phy_info(unsigned long data)
@@ -3424,7 +3550,7 @@ static void igb_watchdog_task(struct work_struct *work)
watchdog_task);
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- u32 link;
+ u32 link, ctrl_ext, thstat;
int i;
link = igb_has_link(adapter);
@@ -3448,6 +3574,25 @@ static void igb_watchdog_task(struct work_struct *work)
((ctrl & E1000_CTRL_RFCE) ? "RX" :
((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
+ /* check for thermal sensor event on i350,
+ * copper only */
+ if (hw->mac.type == e1000_i350) {
+ thstat = rd32(E1000_THSTAT);
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ if ((hw->phy.media_type ==
+ e1000_media_type_copper) && !(ctrl_ext &
+ E1000_CTRL_EXT_LINK_MODE_SGMII)) {
+ if (thstat &
+ E1000_THSTAT_LINK_THROTTLE) {
+ printk(KERN_INFO "igb: %s The "
+ "network adapter link "
+ "speed was downshifted "
+ "because it "
+ "overheated.\n",
+ netdev->name);
+ }
+ }
+ }
/* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
switch (adapter->link_speed) {
@@ -3462,6 +3607,7 @@ static void igb_watchdog_task(struct work_struct *work)
netif_carrier_on(netdev);
igb_ping_all_vfs(adapter);
+ igb_check_vf_rate_limit(adapter);
/* link state has changed, schedule phy info update */
if (!test_bit(__IGB_DOWN, &adapter->state))
@@ -3472,6 +3618,22 @@ static void igb_watchdog_task(struct work_struct *work)
if (netif_carrier_ok(netdev)) {
adapter->link_speed = 0;
adapter->link_duplex = 0;
+ /* check for thermal sensor event on i350
+ * copper only*/
+ if (hw->mac.type == e1000_i350) {
+ thstat = rd32(E1000_THSTAT);
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ if ((hw->phy.media_type ==
+ e1000_media_type_copper) && !(ctrl_ext &
+ E1000_CTRL_EXT_LINK_MODE_SGMII)) {
+ if (thstat & E1000_THSTAT_PWR_DOWN) {
+ printk(KERN_ERR "igb: %s The "
+ "network adapter was stopped "
+ "because it overheated.\n",
+ netdev->name);
+ }
+ }
+ }
/* Links status message must follow this format */
printk(KERN_INFO "igb: %s NIC Link is Down\n",
netdev->name);
@@ -3521,6 +3683,8 @@ static void igb_watchdog_task(struct work_struct *work)
wr32(E1000_ICS, E1000_ICS_RXDMT0);
}
+ igb_spoof_check(adapter);
+
/* Reset the timer */
if (!test_bit(__IGB_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer,
@@ -4502,6 +4666,15 @@ void igb_update_stats(struct igb_adapter *adapter,
adapter->stats.mgptc += rd32(E1000_MGTPTC);
adapter->stats.mgprc += rd32(E1000_MGTPRC);
adapter->stats.mgpdc += rd32(E1000_MGTPDC);
+
+ /* OS2BMC Stats */
+ reg = rd32(E1000_MANC);
+ if (reg & E1000_MANC_EN_BMC2OS) {
+ adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
+ adapter->stats.o2bspc += rd32(E1000_O2BSPC);
+ adapter->stats.b2ospc += rd32(E1000_B2OSPC);
+ adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
+ }
}
static irqreturn_t igb_msix_other(int irq, void *data)
@@ -4517,6 +4690,10 @@ static irqreturn_t igb_msix_other(int irq, void *data)
if (icr & E1000_ICR_DOUTSYNC) {
/* HW is reporting DMA is out of sync */
adapter->stats.doosync++;
+ /* The DMA Out of Sync is also indication of a spoof event
+ * in IOV mode. Check the Wrong VM Behavior register to
+ * see if it is really a spoof event. */
+ igb_check_wvbr(adapter);
}
/* Check for a mailbox event */
@@ -4969,8 +5146,8 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
{
- /* clear flags */
- adapter->vf_data[vf].flags &= ~(IGB_VF_FLAG_PF_SET_MAC);
+ /* clear flags - except flag that indicates PF has set the MAC */
+ adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
adapter->vf_data[vf].last_nack = jiffies;
/* reset offloads to defaults */
@@ -5024,7 +5201,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
reg = rd32(E1000_VFRE);
wr32(E1000_VFRE, reg | (1 << vf));
- adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
+ adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
/* reply to reset with ack and vf mac address */
msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
@@ -5103,7 +5280,14 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
switch ((msgbuf[0] & 0xFFFF)) {
case E1000_VF_SET_MAC_ADDR:
- retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
+ retval = -EINVAL;
+ if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
+ retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
+ else
+ dev_warn(&pdev->dev,
+ "VF %d attempted to override administratively "
+ "set MAC address\nReload the VF driver to "
+ "resume operations\n", vf);
break;
case E1000_VF_SET_PROMISC:
retval = igb_set_vf_promisc(adapter, msgbuf, vf);
@@ -5115,8 +5299,12 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
break;
case E1000_VF_SET_VLAN:
- if (adapter->vf_data[vf].pf_vlan)
- retval = -1;
+ retval = -1;
+ if (vf_data->pf_vlan)
+ dev_warn(&pdev->dev,
+ "VF %d attempted to override administratively "
+ "set VLAN tag\nReload the VF driver to "
+ "resume operations\n", vf);
else
retval = igb_set_vf_vlan(adapter, msgbuf, vf);
break;
@@ -6533,9 +6721,91 @@ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
return igb_set_vf_mac(adapter, vf, mac);
}
+static int igb_link_mbps(int internal_link_speed)
+{
+ switch (internal_link_speed) {
+ case SPEED_100:
+ return 100;
+ case SPEED_1000:
+ return 1000;
+ default:
+ return 0;
+ }
+}
+
+static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
+ int link_speed)
+{
+ int rf_dec, rf_int;
+ u32 bcnrc_val;
+
+ if (tx_rate != 0) {
+ /* Calculate the rate factor values to set */
+ rf_int = link_speed / tx_rate;
+ rf_dec = (link_speed - (rf_int * tx_rate));
+ rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
+
+ bcnrc_val = E1000_RTTBCNRC_RS_ENA;
+ bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
+ E1000_RTTBCNRC_RF_INT_MASK);
+ bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
+ } else {
+ bcnrc_val = 0;
+ }
+
+ wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
+ wr32(E1000_RTTBCNRC, bcnrc_val);
+}
+
+static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
+{
+ int actual_link_speed, i;
+ bool reset_rate = false;
+
+ /* VF TX rate limit was not set or not supported */
+ if ((adapter->vf_rate_link_speed == 0) ||
+ (adapter->hw.mac.type != e1000_82576))
+ return;
+
+ actual_link_speed = igb_link_mbps(adapter->link_speed);
+ if (actual_link_speed != adapter->vf_rate_link_speed) {
+ reset_rate = true;
+ adapter->vf_rate_link_speed = 0;
+ dev_info(&adapter->pdev->dev,
+ "Link speed has been changed. VF Transmit "
+ "rate is disabled\n");
+ }
+
+ for (i = 0; i < adapter->vfs_allocated_count; i++) {
+ if (reset_rate)
+ adapter->vf_data[i].tx_rate = 0;
+
+ igb_set_vf_rate_limit(&adapter->hw, i,
+ adapter->vf_data[i].tx_rate,
+ actual_link_speed);
+ }
+}
+
static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
{
- return -EOPNOTSUPP;
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int actual_link_speed;
+
+ if (hw->mac.type != e1000_82576)
+ return -EOPNOTSUPP;
+
+ actual_link_speed = igb_link_mbps(adapter->link_speed);
+ if ((vf >= adapter->vfs_allocated_count) ||
+ (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
+ (tx_rate < 0) || (tx_rate > actual_link_speed))
+ return -EINVAL;
+
+ adapter->vf_rate_link_speed = actual_link_speed;
+ adapter->vf_data[vf].tx_rate = (u16)tx_rate;
+ igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
+
+ return 0;
}
static int igb_ndo_get_vf_config(struct net_device *netdev,
@@ -6546,7 +6816,7 @@ static int igb_ndo_get_vf_config(struct net_device *netdev,
return -EINVAL;
ivi->vf = vf;
memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
- ivi->tx_rate = 0;
+ ivi->tx_rate = adapter->vf_data[vf].tx_rate;
ivi->vlan = adapter->vf_data[vf].pf_vlan;
ivi->qos = adapter->vf_data[vf].pf_qos;
return 0;
@@ -6580,6 +6850,8 @@ static void igb_vmm_control(struct igb_adapter *adapter)
if (adapter->vfs_allocated_count) {
igb_vmdq_set_loopback_pf(hw, true);
igb_vmdq_set_replication_pf(hw, true);
+ igb_vmdq_set_anti_spoofing_pf(hw, true,
+ adapter->vfs_allocated_count);
} else {
igb_vmdq_set_loopback_pf(hw, false);
igb_vmdq_set_replication_pf(hw, false);
diff --git a/drivers/net/igbvf/Makefile b/drivers/net/igbvf/Makefile
index c2f150d8f2d9..0fa3db3dd8b6 100644
--- a/drivers/net/igbvf/Makefile
+++ b/drivers/net/igbvf/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel(R) 82576 Virtual Function Linux driver
-# Copyright(c) 2009 Intel Corporation.
+# Copyright(c) 2009 - 2010 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/defines.h b/drivers/net/igbvf/defines.h
index 88a47537518a..79f2604673fe 100644
--- a/drivers/net/igbvf/defines.h
+++ b/drivers/net/igbvf/defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index 33add708bcbe..1d943aa7c7a6 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 Intel Corporation.
+ Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -110,11 +110,6 @@ static int igbvf_get_settings(struct net_device *netdev,
return 0;
}
-static u32 igbvf_get_link(struct net_device *netdev)
-{
- return netif_carrier_ok(netdev);
-}
-
static int igbvf_set_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
@@ -206,13 +201,11 @@ static void igbvf_get_regs(struct net_device *netdev,
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 *regs_buff = p;
- u8 revision_id;
memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
- pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id);
-
- regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device;
+ regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
+ adapter->pdev->device;
regs_buff[0] = er32(CTRL);
regs_buff[1] = er32(STATUS);
@@ -515,7 +508,7 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
.get_msglevel = igbvf_get_msglevel,
.set_msglevel = igbvf_set_msglevel,
.nway_reset = igbvf_nway_reset,
- .get_link = igbvf_get_link,
+ .get_link = ethtool_op_get_link,
.get_eeprom_len = igbvf_get_eeprom_len,
.get_eeprom = igbvf_get_eeprom,
.set_eeprom = igbvf_set_eeprom,
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index debeee2dc717..d5dad5d607d6 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 Intel Corporation.
+ Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -97,6 +97,7 @@ struct igbvf_adapter;
enum igbvf_boards {
board_vf,
+ board_i350_vf,
};
struct igbvf_queue_stats {
@@ -126,7 +127,6 @@ struct igbvf_buffer {
unsigned int page_offset;
};
};
- struct page *page;
};
union igbvf_desc {
@@ -201,9 +201,6 @@ struct igbvf_adapter {
unsigned int restart_queue;
u32 txd_cmd;
- bool detect_tx_hung;
- u8 tx_timeout_factor;
-
u32 tx_int_delay;
u32 tx_abs_int_delay;
diff --git a/drivers/net/igbvf/mbx.c b/drivers/net/igbvf/mbx.c
index 819a8ec901dc..3d6f4cc3998a 100644
--- a/drivers/net/igbvf/mbx.c
+++ b/drivers/net/igbvf/mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 Intel Corporation.
+ Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/mbx.h b/drivers/net/igbvf/mbx.h
index 4938609dbfb5..c2883c45d477 100644
--- a/drivers/net/igbvf/mbx.h
+++ b/drivers/net/igbvf/mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 28af019c97bb..6ccc32fd7338 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 Intel Corporation.
+ Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -44,12 +44,13 @@
#include "igbvf.h"
-#define DRV_VERSION "1.0.0-k0"
+#define DRV_VERSION "1.0.8-k0"
char igbvf_driver_name[] = "igbvf";
const char igbvf_driver_version[] = DRV_VERSION;
static const char igbvf_driver_string[] =
"Intel(R) Virtual Function Network Driver";
-static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
+static const char igbvf_copyright[] =
+ "Copyright (c) 2009 - 2010 Intel Corporation.";
static int igbvf_poll(struct napi_struct *napi, int budget);
static void igbvf_reset(struct igbvf_adapter *);
@@ -63,8 +64,16 @@ static struct igbvf_info igbvf_vf_info = {
.init_ops = e1000_init_function_pointers_vf,
};
+static struct igbvf_info igbvf_i350_vf_info = {
+ .mac = e1000_vfadapt_i350,
+ .flags = 0,
+ .pba = 10,
+ .init_ops = e1000_init_function_pointers_vf,
+};
+
static const struct igbvf_info *igbvf_info_tbl[] = {
[board_vf] = &igbvf_vf_info,
+ [board_i350_vf] = &igbvf_i350_vf_info,
};
/**
@@ -387,35 +396,6 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
buffer_info->time_stamp = 0;
}
-static void igbvf_print_tx_hang(struct igbvf_adapter *adapter)
-{
- struct igbvf_ring *tx_ring = adapter->tx_ring;
- unsigned int i = tx_ring->next_to_clean;
- unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
- union e1000_adv_tx_desc *eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
-
- /* detected Tx unit hang */
- dev_err(&adapter->pdev->dev,
- "Detected Tx Unit Hang:\n"
- " TDH <%x>\n"
- " TDT <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "buffer_info[next_to_clean]:\n"
- " time_stamp <%lx>\n"
- " next_to_watch <%x>\n"
- " jiffies <%lx>\n"
- " next_to_watch.status <%x>\n",
- readl(adapter->hw.hw_addr + tx_ring->head),
- readl(adapter->hw.hw_addr + tx_ring->tail),
- tx_ring->next_to_use,
- tx_ring->next_to_clean,
- tx_ring->buffer_info[eop].time_stamp,
- eop,
- jiffies,
- eop_desc->wb.status);
-}
-
/**
* igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
* @adapter: board private structure
@@ -429,10 +409,9 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
int size;
size = sizeof(struct igbvf_buffer) * tx_ring->count;
- tx_ring->buffer_info = vmalloc(size);
+ tx_ring->buffer_info = vzalloc(size);
if (!tx_ring->buffer_info)
goto err;
- memset(tx_ring->buffer_info, 0, size);
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
@@ -469,10 +448,9 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
int size, desc_len;
size = sizeof(struct igbvf_buffer) * rx_ring->count;
- rx_ring->buffer_info = vmalloc(size);
+ rx_ring->buffer_info = vzalloc(size);
if (!rx_ring->buffer_info)
goto err;
- memset(rx_ring->buffer_info, 0, size);
desc_len = sizeof(union e1000_adv_rx_desc);
@@ -764,7 +742,6 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
{
struct igbvf_adapter *adapter = tx_ring->adapter;
- struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct igbvf_buffer *buffer_info;
struct sk_buff *skb;
@@ -825,22 +802,6 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
}
}
- if (adapter->detect_tx_hung) {
- /* Detect a transmit hang in hardware, this serializes the
- * check with the clearing of time_stamp and movement of i */
- adapter->detect_tx_hung = false;
- if (tx_ring->buffer_info[i].time_stamp &&
- time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
- (adapter->tx_timeout_factor * HZ)) &&
- !(er32(STATUS) & E1000_STATUS_TXOFF)) {
-
- tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
- /* detected Tx unit hang */
- igbvf_print_tx_hang(adapter);
-
- netif_stop_queue(netdev);
- }
- }
adapter->net_stats.tx_bytes += total_bytes;
adapter->net_stats.tx_packets += total_packets;
return count < tx_ring->count;
@@ -1851,26 +1812,11 @@ static void igbvf_watchdog_task(struct work_struct *work)
if (link) {
if (!netif_carrier_ok(netdev)) {
- bool txb2b = 1;
-
mac->ops.get_link_up_info(&adapter->hw,
&adapter->link_speed,
&adapter->link_duplex);
igbvf_print_link_info(adapter);
- /* adjust timeout factor according to speed/duplex */
- adapter->tx_timeout_factor = 1;
- switch (adapter->link_speed) {
- case SPEED_10:
- txb2b = 0;
- adapter->tx_timeout_factor = 16;
- break;
- case SPEED_100:
- txb2b = 0;
- /* maybe add some timeout factor ? */
- break;
- }
-
netif_carrier_on(netdev);
netif_wake_queue(netdev);
}
@@ -1904,9 +1850,6 @@ static void igbvf_watchdog_task(struct work_struct *work)
/* Cause software interrupt to ensure Rx ring is cleaned */
ew32(EICS, adapter->rx_ring->eims_value);
- /* Force detection of hung controller every watchdog period */
- adapter->detect_tx_hung = 1;
-
/* Reset the timer */
if (!test_bit(__IGBVF_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer,
@@ -2696,8 +2639,7 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
hw->device_id = pdev->device;
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
-
- pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->revision_id = pdev->revision;
err = -EIO;
adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
@@ -2830,13 +2772,14 @@ static void __devexit igbvf_remove(struct pci_dev *pdev)
struct e1000_hw *hw = &adapter->hw;
/*
- * flush_scheduled work may reschedule our watchdog task, so
- * explicitly disable watchdog tasks from being rescheduled
+ * The watchdog timer may be rescheduled, so explicitly
+ * disable it from being rescheduled.
*/
set_bit(__IGBVF_DOWN, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
- flush_scheduled_work();
+ cancel_work_sync(&adapter->reset_task);
+ cancel_work_sync(&adapter->watchdog_task);
unregister_netdev(netdev);
@@ -2869,6 +2812,7 @@ static struct pci_error_handlers igbvf_err_handler = {
static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf },
{ } /* terminate list */
};
MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
diff --git a/drivers/net/igbvf/regs.h b/drivers/net/igbvf/regs.h
index b9e24ed70d0a..77e18d3d6b15 100644
--- a/drivers/net/igbvf/regs.h
+++ b/drivers/net/igbvf/regs.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 Intel Corporation.
+ Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c
index a9a61efa964c..af3822f9ea9a 100644
--- a/drivers/net/igbvf/vf.c
+++ b/drivers/net/igbvf/vf.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 Intel Corporation.
+ Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -220,7 +220,7 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
* The parameter rar_count will usually be hw->mac.rar_entry_count
* unless there are workarounds that change this.
**/
-void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
+static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count,
u32 rar_used_count, u32 rar_count)
{
@@ -362,8 +362,8 @@ static s32 e1000_check_for_link_vf(struct e1000_hw *hw)
* or a virtual function reset
*/
- /* If we were hit with a reset drop the link */
- if (!mbx->ops.check_for_rst(hw))
+ /* If we were hit with a reset or timeout drop the link */
+ if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
mac->get_link_status = true;
if (!mac->get_link_status)
diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
index 1e8ce3741a67..d7ed58fcd9bb 100644
--- a/drivers/net/igbvf/vf.h
+++ b/drivers/net/igbvf/vf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 Intel Corporation.
+ Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -39,6 +39,7 @@
struct e1000_hw;
#define E1000_DEV_ID_82576_VF 0x10CA
+#define E1000_DEV_ID_I350_VF 0x1520
#define E1000_REVISION_0 0
#define E1000_REVISION_1 1
#define E1000_REVISION_2 2
@@ -133,6 +134,7 @@ struct e1000_adv_tx_context_desc {
enum e1000_mac_type {
e1000_undefined = 0,
e1000_vfadapt,
+ e1000_vfadapt_i350,
e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
};
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index aa93655c3aa7..a5b0f0e194bb 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -2025,7 +2025,6 @@ static void ipg_init_mii(struct net_device *dev)
if (phyaddr != 0x1f) {
u16 mii_phyctrl, mii_1000cr;
- u8 revisionid = 0;
mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000);
mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF |
@@ -2035,8 +2034,7 @@ static void ipg_init_mii(struct net_device *dev)
mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR);
/* Set default phyparam */
- pci_read_config_byte(sp->pdev, PCI_REVISION_ID, &revisionid);
- ipg_set_phy_default_param(revisionid, dev, phyaddr);
+ ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr);
/* Reset PHY */
mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART;
diff --git a/drivers/net/irda/act200l-sir.c b/drivers/net/irda/act200l-sir.c
index 37ab8c855719..8ff084f1d236 100644
--- a/drivers/net/irda/act200l-sir.c
+++ b/drivers/net/irda/act200l-sir.c
@@ -199,7 +199,7 @@ static int act200l_reset(struct sir_dev *dev)
{
unsigned state = dev->fsm.substate;
unsigned delay = 0;
- u8 control[9] = {
+ static const u8 control[9] = {
ACT200L_REG15,
ACT200L_REG13 | ACT200L_SHDW,
ACT200L_REG21 | ACT200L_EXCK | ACT200L_OSCL,
diff --git a/drivers/net/irda/bfin_sir.h b/drivers/net/irda/bfin_sir.h
index b54a6f08db45..e3b285a67734 100644
--- a/drivers/net/irda/bfin_sir.h
+++ b/drivers/net/irda/bfin_sir.h
@@ -26,6 +26,8 @@
#include <asm/cacheflush.h>
#include <asm/dma.h>
#include <asm/portmux.h>
+#include <mach/bfin_serial_5xx.h>
+#undef DRIVER_NAME
#ifdef CONFIG_SIR_BFIN_DMA
struct dma_rx_buf {
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index b626cccbccd1..f81d944fc360 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -818,9 +818,9 @@ toshoboe_probe (struct toshoboe_cb *self)
{
int i, j, n;
#ifdef USE_MIR
- int bauds[] = { 9600, 115200, 4000000, 1152000 };
+ static const int bauds[] = { 9600, 115200, 4000000, 1152000 };
#else
- int bauds[] = { 9600, 115200, 4000000 };
+ static const int bauds[] = { 9600, 115200, 4000000 };
#endif
unsigned long flags;
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h
index 4dc39e5f0156..77fcf4459161 100644
--- a/drivers/net/irda/donauboe.h
+++ b/drivers/net/irda/donauboe.h
@@ -30,7 +30,7 @@
* or the type-DO IR port.
*
* IrDA chip set list from Toshiba Computer Engineering Corp.
- * model method maker controler Version
+ * model method maker controller Version
* Portege 320CT FIR,SIR Toshiba Oboe(Triangle)
* Portege 3010CT FIR,SIR Toshiba Oboe(Sydney)
* Portege 3015CT FIR,SIR Toshiba Oboe(Sydney)
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index ee1dde52e8fc..3352b2443e58 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -167,7 +167,7 @@ static int irtty_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
* let's be careful... Jean II
*/
IRDA_ASSERT(priv->tty->ops->tiocmset != NULL, return -1;);
- priv->tty->ops->tiocmset(priv->tty, NULL, set, clear);
+ priv->tty->ops->tiocmset(priv->tty, set, clear);
return 0;
}
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 74b20f179cea..cc821de2c966 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -959,7 +959,7 @@ static void mcs_disconnect(struct usb_interface *intf)
if (!mcs)
return;
- flush_scheduled_work();
+ cancel_work_sync(&mcs->work);
unregister_netdev(mcs->netdev);
free_netdev(mcs->netdev);
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 9e3f4f54281d..4488bd581eca 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -635,7 +635,7 @@ static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
ret = sh_irda_set_baudrate(self, speed);
if (ret < 0)
- return ret;
+ goto sh_irda_hard_xmit_end;
self->tx_buff.len = 0;
if (skb->len) {
@@ -652,11 +652,21 @@ static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
sh_irda_write(self, IRTFLR, self->tx_buff.len);
sh_irda_write(self, IRTCTR, ARMOD | TE);
- }
+ } else
+ goto sh_irda_hard_xmit_end;
dev_kfree_skb(skb);
return 0;
+
+sh_irda_hard_xmit_end:
+ sh_irda_set_baudrate(self, 9600);
+ netif_wake_queue(self->ndev);
+ sh_irda_rcv_ctrl(self, 1);
+ dev_kfree_skb(skb);
+
+ return ret;
+
}
static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 8c57bfb5f098..1c1677cfea29 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -376,7 +376,7 @@ MODULE_DEVICE_TABLE(pnp, smsc_ircc_pnp_table);
static int pnp_driver_registered;
#ifdef CONFIG_PNP
-static int __init smsc_ircc_pnp_probe(struct pnp_dev *dev,
+static int __devinit smsc_ircc_pnp_probe(struct pnp_dev *dev,
const struct pnp_device_id *dev_id)
{
unsigned int firbase, sirbase;
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 8df645e78f2e..9ece1fd9889d 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -885,17 +885,8 @@ static void veth_stop_connection(struct veth_lpar_connection *cnx)
veth_kick_statemachine(cnx);
spin_unlock_irq(&cnx->lock);
- /* There's a slim chance the reset code has just queued the
- * statemachine to run in five seconds. If so we need to cancel
- * that and requeue the work to run now. */
- if (cancel_delayed_work(&cnx->statemachine_wq)) {
- spin_lock_irq(&cnx->lock);
- veth_kick_statemachine(cnx);
- spin_unlock_irq(&cnx->lock);
- }
-
- /* Wait for the state machine to run. */
- flush_scheduled_work();
+ /* ensure the statemachine runs now and waits for its completion */
+ flush_delayed_work_sync(&cnx->statemachine_wq);
}
static void veth_destroy_connection(struct veth_lpar_connection *cnx)
@@ -1009,15 +1000,10 @@ static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return 0;
}
-static u32 veth_get_link(struct net_device *dev)
-{
- return 1;
-}
-
static const struct ethtool_ops ops = {
.get_drvinfo = veth_get_drvinfo,
.get_settings = veth_get_settings,
- .get_link = veth_get_link,
+ .get_link = ethtool_op_get_link,
};
static const struct net_device_ops veth_netdev_ops = {
@@ -1605,7 +1591,7 @@ static int veth_probe(struct vio_dev *vdev, const struct vio_device_id *id)
}
veth_dev[i] = dev;
- port = (struct veth_port*)netdev_priv(dev);
+ port = netdev_priv(dev);
/* Start the state machine on each connection on this vlan. If we're
* the first dev to do so this will commence link negotiation */
@@ -1658,15 +1644,14 @@ static void __exit veth_module_cleanup(void)
/* Disconnect our "irq" to stop events coming from the Hypervisor. */
HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan);
- /* Make sure any work queued from Hypervisor callbacks is finished. */
- flush_scheduled_work();
-
for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) {
cnx = veth_cnx[i];
if (!cnx)
continue;
+ /* Cancel work queued from Hypervisor callbacks */
+ cancel_delayed_work_sync(&cnx->statemachine_wq);
/* Remove the connection from sysfs */
kobject_del(&cnx->kobject);
/* Drop the driver's reference to the connection */
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 521c0c732998..8f3df044e81e 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -149,7 +149,7 @@ struct ixgb_desc_ring {
struct ixgb_adapter {
struct timer_list watchdog_timer;
- struct vlan_group *vlgrp;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u32 bd_number;
u32 rx_buffer_len;
u32 part_num;
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index 43994c199991..cc53aa1541ba 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -706,6 +706,43 @@ ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
}
+static int ixgb_set_flags(struct net_device *netdev, u32 data)
+{
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+ bool need_reset;
+ int rc;
+
+ /*
+ * Tx VLAN insertion does not work per HW design when Rx stripping is
+ * disabled. Disable txvlan when rxvlan is turned off, and enable
+ * rxvlan when txvlan is turned on.
+ */
+ if (!(data & ETH_FLAG_RXVLAN) &&
+ (netdev->features & NETIF_F_HW_VLAN_TX))
+ data &= ~ETH_FLAG_TXVLAN;
+ else if (data & ETH_FLAG_TXVLAN)
+ data |= ETH_FLAG_RXVLAN;
+
+ need_reset = (data & ETH_FLAG_RXVLAN) !=
+ (netdev->features & NETIF_F_HW_VLAN_RX);
+
+ rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_RXVLAN |
+ ETH_FLAG_TXVLAN);
+ if (rc)
+ return rc;
+
+ if (need_reset) {
+ if (netif_running(netdev)) {
+ ixgb_down(adapter, true);
+ ixgb_up(adapter);
+ ixgb_set_speed_duplex(netdev);
+ } else
+ ixgb_reset(adapter);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops ixgb_ethtool_ops = {
.get_settings = ixgb_get_settings,
.set_settings = ixgb_set_settings,
@@ -732,6 +769,8 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
.phys_id = ixgb_phys_id,
.get_sset_count = ixgb_get_sset_count,
.get_ethtool_stats = ixgb_get_ethtool_stats,
+ .get_flags = ethtool_op_get_flags,
+ .set_flags = ixgb_set_flags,
};
void ixgb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index caa8192fff2a..0f681ac2da8d 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -98,8 +98,8 @@ static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
static void ixgb_tx_timeout(struct net_device *dev);
static void ixgb_tx_timeout_task(struct work_struct *work);
-static void ixgb_vlan_rx_register(struct net_device *netdev,
- struct vlan_group *grp);
+static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
+static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
@@ -334,7 +334,6 @@ static const struct net_device_ops ixgb_netdev_ops = {
.ndo_set_mac_address = ixgb_set_mac,
.ndo_change_mtu = ixgb_change_mtu,
.ndo_tx_timeout = ixgb_tx_timeout,
- .ndo_vlan_rx_register = ixgb_vlan_rx_register,
.ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -525,7 +524,7 @@ ixgb_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgb_adapter *adapter = netdev_priv(netdev);
- flush_scheduled_work();
+ cancel_work_sync(&adapter->tx_timeout_task);
unregister_netdev(netdev);
@@ -669,13 +668,12 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
int size;
size = sizeof(struct ixgb_buffer) * txdr->count;
- txdr->buffer_info = vmalloc(size);
+ txdr->buffer_info = vzalloc(size);
if (!txdr->buffer_info) {
netif_err(adapter, probe, adapter->netdev,
"Unable to allocate transmit descriptor ring memory\n");
return -ENOMEM;
}
- memset(txdr->buffer_info, 0, size);
/* round up to nearest 4K */
@@ -759,13 +757,12 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
int size;
size = sizeof(struct ixgb_buffer) * rxdr->count;
- rxdr->buffer_info = vmalloc(size);
+ rxdr->buffer_info = vzalloc(size);
if (!rxdr->buffer_info) {
netif_err(adapter, probe, adapter->netdev,
"Unable to allocate receive descriptor ring\n");
return -ENOMEM;
}
- memset(rxdr->buffer_info, 0, size);
/* Round up to nearest 4K */
@@ -1078,6 +1075,8 @@ ixgb_set_multi(struct net_device *netdev)
if (netdev->flags & IFF_PROMISC) {
rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
+ /* disable VLAN filtering */
+ rctl &= ~IXGB_RCTL_CFIEN;
rctl &= ~IXGB_RCTL_VFE;
} else {
if (netdev->flags & IFF_ALLMULTI) {
@@ -1086,7 +1085,9 @@ ixgb_set_multi(struct net_device *netdev)
} else {
rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
}
+ /* enable VLAN filtering */
rctl |= IXGB_RCTL_VFE;
+ rctl &= ~IXGB_RCTL_CFIEN;
}
if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
@@ -1105,6 +1106,12 @@ ixgb_set_multi(struct net_device *netdev)
ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
}
+
+ if (netdev->features & NETIF_F_HW_VLAN_RX)
+ ixgb_vlan_strip_enable(adapter);
+ else
+ ixgb_vlan_strip_disable(adapter);
+
}
/**
@@ -1252,7 +1259,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
struct ixgb_buffer *buffer_info;
- css = skb_transport_offset(skb);
+ css = skb_checksum_start_offset(skb);
cso = css + skb->csum_offset;
i = adapter->tx_ring.next_to_use;
@@ -1498,7 +1505,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
DESC_NEEDED)))
return NETDEV_TX_BUSY;
- if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+ if (vlan_tx_tag_present(skb)) {
tx_flags |= IXGB_TX_FLAGS_VLAN;
vlan_id = vlan_tx_tag_get(skb);
}
@@ -2039,12 +2046,11 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
ixgb_rx_checksum(adapter, rx_desc, skb);
skb->protocol = eth_type_trans(skb, netdev);
- if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
- vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
- le16_to_cpu(rx_desc->special));
- } else {
- netif_receive_skb(skb);
- }
+ if (status & IXGB_RX_DESC_STATUS_VP)
+ __vlan_hwaccel_put_tag(skb,
+ le16_to_cpu(rx_desc->special));
+
+ netif_receive_skb(skb);
rxdesc_done:
/* clean up descriptor, might be written over by hw */
@@ -2142,43 +2148,26 @@ map_skb:
}
}
-/**
- * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping.
- *
- * @param netdev network interface device structure
- * @param grp indicates to enable or disable tagging/stripping
- **/
static void
-ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
+ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- u32 ctrl, rctl;
-
- ixgb_irq_disable(adapter);
- adapter->vlgrp = grp;
-
- if (grp) {
- /* enable VLAN tag insert/strip */
- ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
- ctrl |= IXGB_CTRL0_VME;
- IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
+ u32 ctrl;
- /* enable VLAN receive filtering */
-
- rctl = IXGB_READ_REG(&adapter->hw, RCTL);
- rctl &= ~IXGB_RCTL_CFIEN;
- IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
- } else {
- /* disable VLAN tag insert/strip */
+ /* enable VLAN tag insert/strip */
+ ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
+ ctrl |= IXGB_CTRL0_VME;
+ IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
+}
- ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
- ctrl &= ~IXGB_CTRL0_VME;
- IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
- }
+static void
+ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
+{
+ u32 ctrl;
- /* don't enable interrupts unless we are UP */
- if (adapter->netdev->flags & IFF_UP)
- ixgb_irq_enable(adapter);
+ /* disable VLAN tag insert/strip */
+ ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
+ ctrl &= ~IXGB_CTRL0_VME;
+ IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
}
static void
@@ -2193,6 +2182,7 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
vfta |= (1 << (vid & 0x1F));
ixgb_write_vfta(&adapter->hw, index, vfta);
+ set_bit(vid, adapter->active_vlans);
}
static void
@@ -2201,35 +2191,22 @@ ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
struct ixgb_adapter *adapter = netdev_priv(netdev);
u32 vfta, index;
- ixgb_irq_disable(adapter);
-
- vlan_group_set_device(adapter->vlgrp, vid, NULL);
-
- /* don't enable interrupts unless we are UP */
- if (adapter->netdev->flags & IFF_UP)
- ixgb_irq_enable(adapter);
-
/* remove VID from filter table */
index = (vid >> 5) & 0x7F;
vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
vfta &= ~(1 << (vid & 0x1F));
ixgb_write_vfta(&adapter->hw, index, vfta);
+ clear_bit(vid, adapter->active_vlans);
}
static void
ixgb_restore_vlan(struct ixgb_adapter *adapter)
{
- ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
-
- if (adapter->vlgrp) {
- u16 vid;
- for (vid = 0; vid < VLAN_N_VID; vid++) {
- if (!vlan_group_get_device(adapter->vlgrp, vid))
- continue;
- ixgb_vlan_rx_add_vid(adapter->netdev, vid);
- }
- }
+ u16 vid;
+
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ ixgb_vlan_rx_add_vid(adapter->netdev, vid);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
index 88a08f056241..dd7fbeb1f7d1 100644
--- a/drivers/net/ixgb/ixgb_param.c
+++ b/drivers/net/ixgb/ixgb_param.c
@@ -191,9 +191,9 @@ struct ixgb_option {
} r;
struct { /* list_option info */
int nr;
- struct ixgb_opt_list {
+ const struct ixgb_opt_list {
int i;
- char *str;
+ const char *str;
} *p;
} l;
} arg;
@@ -226,7 +226,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
break;
case list_option: {
int i;
- struct ixgb_opt_list *ent;
+ const struct ixgb_opt_list *ent;
for (i = 0; i < opt->arg.l.nr; i++) {
ent = &opt->arg.l.p[i];
@@ -322,14 +322,15 @@ ixgb_check_options(struct ixgb_adapter *adapter)
}
{ /* Flow Control */
- struct ixgb_opt_list fc_list[] =
- {{ ixgb_fc_none, "Flow Control Disabled" },
- { ixgb_fc_rx_pause,"Flow Control Receive Only" },
- { ixgb_fc_tx_pause,"Flow Control Transmit Only" },
- { ixgb_fc_full, "Flow Control Enabled" },
- { ixgb_fc_default, "Flow Control Hardware Default" }};
+ static const struct ixgb_opt_list fc_list[] = {
+ { ixgb_fc_none, "Flow Control Disabled" },
+ { ixgb_fc_rx_pause, "Flow Control Receive Only" },
+ { ixgb_fc_tx_pause, "Flow Control Transmit Only" },
+ { ixgb_fc_full, "Flow Control Enabled" },
+ { ixgb_fc_default, "Flow Control Hardware Default" }
+ };
- const struct ixgb_option opt = {
+ static const struct ixgb_option opt = {
.type = list_option,
.name = "Flow Control",
.err = "reading default settings from EEPROM",
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 8f81efb49169..7d7387fbdecd 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
- ixgbe_mbx.o
+ ixgbe_mbx.o ixgbe_x540.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index ed8703cfffb7..8d468028bb55 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -61,10 +61,8 @@
#define IXGBE_MIN_RXD 64
/* flow control */
-#define IXGBE_DEFAULT_FCRTL 0x10000
#define IXGBE_MIN_FCRTL 0x40
#define IXGBE_MAX_FCRTL 0x7FF80
-#define IXGBE_DEFAULT_FCRTH 0x20000
#define IXGBE_MIN_FCRTH 0x600
#define IXGBE_MAX_FCRTH 0x7FFF0
#define IXGBE_DEFAULT_FCPAUSE 0xFFFF
@@ -120,6 +118,7 @@ struct vf_data_storage {
bool pf_set_mac;
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
+ u16 tx_rate;
};
/* wrapper around a pointer to a socket buffer,
@@ -130,7 +129,9 @@ struct ixgbe_tx_buffer {
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
- u16 mapped_as_page;
+ unsigned int bytecount;
+ u16 gso_segs;
+ u8 mapped_as_page;
};
struct ixgbe_rx_buffer {
@@ -146,12 +147,56 @@ struct ixgbe_queue_stats {
u64 bytes;
};
+struct ixgbe_tx_queue_stats {
+ u64 restart_queue;
+ u64 tx_busy;
+ u64 completed;
+ u64 tx_done_old;
+};
+
+struct ixgbe_rx_queue_stats {
+ u64 rsc_count;
+ u64 rsc_flush;
+ u64 non_eop_descs;
+ u64 alloc_rx_page_failed;
+ u64 alloc_rx_buff_failed;
+};
+
+enum ixbge_ring_state_t {
+ __IXGBE_TX_FDIR_INIT_DONE,
+ __IXGBE_TX_DETECT_HANG,
+ __IXGBE_HANG_CHECK_ARMED,
+ __IXGBE_RX_PS_ENABLED,
+ __IXGBE_RX_RSC_ENABLED,
+};
+
+#define ring_is_ps_enabled(ring) \
+ test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
+#define set_ring_ps_enabled(ring) \
+ set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
+#define clear_ring_ps_enabled(ring) \
+ clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
+#define check_for_tx_hang(ring) \
+ test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+ set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+ clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define ring_is_rsc_enabled(ring) \
+ test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+#define set_ring_rsc_enabled(ring) \
+ set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+#define clear_ring_rsc_enabled(ring) \
+ clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
struct ixgbe_ring {
void *desc; /* descriptor ring memory */
+ struct device *dev; /* device for DMA mapping */
+ struct net_device *netdev; /* netdev ring belongs to */
union {
struct ixgbe_tx_buffer *tx_buffer_info;
struct ixgbe_rx_buffer *rx_buffer_info;
};
+ unsigned long state;
u8 atr_sample_rate;
u8 atr_count;
u16 count; /* amount of descriptors */
@@ -160,38 +205,31 @@ struct ixgbe_ring {
u16 next_to_clean;
u8 queue_index; /* needed for multiqueue queue management */
-
-#define IXGBE_RING_RX_PS_ENABLED (u8)(1)
- u8 flags; /* per ring feature flags */
- u16 head;
- u16 tail;
-
- unsigned int total_bytes;
- unsigned int total_packets;
-
-#ifdef CONFIG_IXGBE_DCA
- /* cpu for tx queue */
- int cpu;
-#endif
-
- u16 work_limit; /* max work per interrupt */
- u16 reg_idx; /* holds the special value that gets
+ u8 reg_idx; /* holds the special value that gets
* the hardware register offset
* associated with this ring, which is
* different for DCB and RSS modes
*/
+ u8 dcb_tc;
+
+ u16 work_limit; /* max work per interrupt */
+
+ u8 __iomem *tail;
+
+ unsigned int total_bytes;
+ unsigned int total_packets;
struct ixgbe_queue_stats stats;
struct u64_stats_sync syncp;
+ union {
+ struct ixgbe_tx_queue_stats tx_stats;
+ struct ixgbe_rx_queue_stats rx_stats;
+ };
int numa_node;
- unsigned long reinit_state;
- u64 rsc_count; /* stat for coalesced packets */
- u64 rsc_flush; /* stats for flushed packets */
- u32 restart_queue; /* track tx queue restarts */
- u32 non_eop_descs; /* track hardware descriptor chaining */
-
unsigned int size; /* length in bytes */
dma_addr_t dma; /* phys. address of descriptor ring */
+ struct rcu_head rcu;
+ struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */
} ____cacheline_internodealigned_in_smp;
enum ixgbe_ring_f_enum {
@@ -207,7 +245,7 @@ enum ixgbe_ring_f_enum {
RING_F_ARRAY_SIZE /* must be last in enum set */
};
-#define IXGBE_MAX_DCB_INDICES 8
+#define IXGBE_MAX_DCB_INDICES 64
#define IXGBE_MAX_RSS_INDICES 16
#define IXGBE_MAX_VMDQ_INDICES 64
#define IXGBE_MAX_FDIR_INDICES 64
@@ -237,6 +275,9 @@ struct ixgbe_q_vector {
unsigned int v_idx; /* index of q_vector within array, also used for
* finding the bit in EICR and friends that
* represents the vector for this ring */
+#ifdef CONFIG_IXGBE_DCA
+ int cpu; /* CPU for DCA */
+#endif
struct napi_struct napi;
DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
@@ -246,6 +287,7 @@ struct ixgbe_q_vector {
u8 rx_itr;
u32 eitr;
cpumask_var_t affinity_mask;
+ char name[IFNAMSIZ + 9];
};
/* Helper macros to switch between ints/sec and what the register uses.
@@ -294,10 +336,14 @@ struct ixgbe_adapter {
u16 bd_number;
struct work_struct reset_task;
struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
- char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
+
+ /* DCB parameters */
+ struct ieee_pfc *ixgbe_ieee_pfc;
+ struct ieee_ets *ixgbe_ieee_ets;
struct ixgbe_dcb_config dcb_cfg;
struct ixgbe_dcb_config temp_dcb_cfg;
u8 dcb_set_bitmap;
+ u8 dcbx_cap;
enum ixgbe_fc_mode last_lfc_mode;
/* Interrupt Throttle Rate */
@@ -417,28 +463,38 @@ struct ixgbe_adapter {
int node;
struct work_struct check_overtemp_task;
u32 interrupt_event;
+ char lsc_int_name[IFNAMSIZ + 9];
/* SR-IOV */
DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
unsigned int num_vfs;
struct vf_data_storage *vfinfo;
+ int vf_rate_link_speed;
};
enum ixbge_state_t {
__IXGBE_TESTING,
__IXGBE_RESETTING,
__IXGBE_DOWN,
- __IXGBE_FDIR_INIT_DONE,
__IXGBE_SFP_MODULE_NOT_FOUND
};
+struct ixgbe_rsc_cb {
+ dma_addr_t dma;
+ u16 skb_cnt;
+ bool delay_unmap;
+};
+#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
+
enum ixgbe_boards {
board_82598,
board_82599,
+ board_X540,
};
extern struct ixgbe_info ixgbe_82598_info;
extern struct ixgbe_info ixgbe_82599_info;
+extern struct ixgbe_info ixgbe_X540_info;
#ifdef CONFIG_IXGBE_DCB
extern const struct dcbnl_rtnl_ops dcbnl_ops;
extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
@@ -454,51 +510,42 @@ extern void ixgbe_down(struct ixgbe_adapter *adapter);
extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
extern void ixgbe_reset(struct ixgbe_adapter *adapter);
extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
-extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
-extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
-extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
+extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
+extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
+extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
+extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
+extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *);
extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
- struct net_device *,
struct ixgbe_adapter *,
struct ixgbe_ring *);
-extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *,
+extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
struct ixgbe_tx_buffer *);
-extern void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring,
- int cleaned_count);
+extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
extern int ethtool_ioctl(struct ifreq *ifr);
extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
u8 queue);
extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
+ union ixgbe_atr_input *input,
struct ixgbe_atr_input_masks *input_masks,
u16 soft_id, u8 queue);
-extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input,
- u16 vlan_id);
-extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input,
- u32 src_addr);
-extern s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input,
- u32 dst_addr);
-extern s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input,
- u16 src_port);
-extern s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input,
- u16 dst_port);
-extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
- u16 flex_byte);
-extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
- u8 l4type);
+extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring);
+extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring);
extern void ixgbe_set_rx_mode(struct net_device *netdev);
+extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
#ifdef IXGBE_FCOE
extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fso(struct ixgbe_adapter *adapter,
@@ -510,6 +557,8 @@ extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
struct sk_buff *skb);
extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc);
+extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc);
extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
extern int ixgbe_fcoe_enable(struct net_device *netdev);
extern int ixgbe_fcoe_disable(struct net_device *netdev);
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 9c02d6014cc4..845c679c8b87 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -38,9 +38,6 @@
#define IXGBE_82598_MC_TBL_SIZE 128
#define IXGBE_82598_VFT_TBL_SIZE 128
-static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg);
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
@@ -156,11 +153,12 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
mac->ops.get_link_capabilities =
- &ixgbe_get_copper_link_capabilities_82598;
+ &ixgbe_get_copper_link_capabilities_generic;
}
switch (hw->phy.type) {
case ixgbe_phy_tn:
+ phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
phy->ops.get_firmware_version =
&ixgbe_get_phy_firmware_version_tnx;
@@ -274,37 +272,6 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
}
/**
- * ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities
- * @hw: pointer to hardware structure
- * @speed: pointer to link speed
- * @autoneg: boolean auto-negotiation value
- *
- * Determines the link capabilities by reading the AUTOC register.
- **/
-static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
-{
- s32 status = IXGBE_ERR_LINK_SETUP;
- u16 speed_ability;
-
- *speed = 0;
- *autoneg = true;
-
- status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
- &speed_ability);
-
- if (status == 0) {
- if (speed_ability & MDIO_SPEED_10G)
- *speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (speed_ability & MDIO_PMA_SPEED_1000)
- *speed |= IXGBE_LINK_SPEED_1GB_FULL;
- }
-
- return status;
-}
-
-/**
* ixgbe_get_media_type_82598 - Determines media type
* @hw: pointer to hardware structure
*
@@ -314,10 +281,22 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
{
enum ixgbe_media_type media_type;
+ /* Detect if there is a copper PHY attached. */
+ switch (hw->phy.type) {
+ case ixgbe_phy_cu_unknown:
+ case ixgbe_phy_tn:
+ case ixgbe_phy_aq:
+ media_type = ixgbe_media_type_copper;
+ goto out;
+ default:
+ break;
+ }
+
/* Media type for I82598 is based on device ID */
switch (hw->device_id) {
case IXGBE_DEV_ID_82598:
case IXGBE_DEV_ID_82598_BX:
+ /* Default device ID is mezzanine card KX/KX4 */
media_type = ixgbe_media_type_backplane;
break;
case IXGBE_DEV_ID_82598AF_DUAL_PORT:
@@ -340,7 +319,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
media_type = ixgbe_media_type_unknown;
break;
}
-
+out:
return media_type;
}
@@ -357,6 +336,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
u32 fctrl_reg;
u32 rmcs_reg;
u32 reg;
+ u32 rx_pba_size;
u32 link_speed = 0;
bool link_up;
@@ -387,7 +367,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
/* Negotiate the fc mode to use */
ret_val = ixgbe_fc_autoneg(hw);
- if (ret_val)
+ if (ret_val == IXGBE_ERR_FLOW_CONTROL)
goto out;
/* Disable any previous flow control settings */
@@ -405,10 +385,10 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
* 2: Tx flow control is enabled (we can send pause frames but
* we do not support receiving pause frames).
* 3: Both Rx and Tx flow control (symmetric) are enabled.
- * other: Invalid.
#ifdef CONFIG_DCB
* 4: Priority Flow Control is enabled.
#endif
+ * other: Invalid.
*/
switch (hw->fc.current_mode) {
case ixgbe_fc_none:
@@ -459,16 +439,19 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
- if (hw->fc.send_xon) {
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
- (hw->fc.low_water | IXGBE_FCRTL_XONE));
- } else {
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
- hw->fc.low_water);
- }
+ rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+ rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
+
+ reg = (rx_pba_size - hw->fc.low_water) << 6;
+ if (hw->fc.send_xon)
+ reg |= IXGBE_FCRTL_XONE;
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
- (hw->fc.high_water | IXGBE_FCRTH_FCEN));
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
+
+ reg = (rx_pba_size - hw->fc.high_water) << 6;
+ reg |= IXGBE_FCRTH_FCEN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
}
/* Configure pause time (2 TCs per register) */
@@ -658,13 +641,12 @@ out:
return 0;
}
-
/**
* ixgbe_setup_mac_link_82598 - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
* @autoneg: true if auto-negotiation enabled
- * @autoneg_wait_to_complete: true if waiting is needed to complete
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Set the link speed in the AUTOC register and restarts link.
**/
@@ -703,7 +685,8 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
* ixgbe_hw This will write the AUTOC register based on the new
* stored values
*/
- status = ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
+ status = ixgbe_start_mac_link_82598(hw,
+ autoneg_wait_to_complete);
}
return status;
@@ -729,7 +712,6 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
autoneg_wait_to_complete);
-
/* Set up MAC */
ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
@@ -801,7 +783,6 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
goto no_phy_reset;
-
hw->phy.ops.reset(hw);
}
@@ -810,12 +791,9 @@ no_phy_reset:
* Prevent the PCI-E bus from from hanging by disabling PCI-E master
* access and verify no pending requests before reset
*/
- status = ixgbe_disable_pcie_master(hw);
- if (status != 0) {
- status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
- hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
- }
+ ixgbe_disable_pcie_master(hw);
+mac_reset_top:
/*
* Issue global reset to the MAC. This needs to be a SW reset.
* If link reset is used, it might reset the MAC when mng is using it
@@ -836,6 +814,19 @@ no_phy_reset:
hw_dbg(hw, "Reset polling failed to complete.\n");
}
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete. We use 1usec since that is
+ * what is needed for ixgbe_disable_pcie_master(). The second reset
+ * then clears out any effects of those events.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ udelay(1);
+ goto mac_reset_top;
+ }
+
msleep(50);
gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
@@ -855,15 +846,15 @@ no_phy_reset:
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
}
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
/*
* Store MAC address from RAR0, clear receive address registers, and
* clear the multicast table
*/
hw->mac.ops.init_rx_addrs(hw);
- /* Store the permanent mac address */
- hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
-
reset_hw_out:
if (phy_status)
status = phy_status;
@@ -880,6 +871,13 @@ reset_hw_out:
static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 rar_high;
+ u32 rar_entries = hw->mac.num_rar_entries;
+
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
rar_high &= ~IXGBE_RAH_VIND_MASK;
@@ -899,14 +897,17 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
- if (rar < rar_entries) {
- rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
- if (rar_high & IXGBE_RAH_VIND_MASK) {
- rar_high &= ~IXGBE_RAH_VIND_MASK;
- IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
- }
- } else {
+
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+ if (rar_high & IXGBE_RAH_VIND_MASK) {
+ rar_high &= ~IXGBE_RAH_VIND_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
}
return 0;
@@ -1025,13 +1026,12 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
}
/**
- * ixgbe_read_i2c_eeprom_82598 - Read 8 bit EEPROM word of an SFP+ module
- * over I2C interface through an intermediate phy.
+ * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
* @hw: pointer to hardware structure
* @byte_offset: EEPROM byte offset to read
* @eeprom_data: value read
*
- * Performs byte read operation to SFP module's EEPROM over I2C interface.
+ * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
**/
static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data)
@@ -1105,10 +1105,12 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
/* Copper PHY must be checked before AUTOC LMS to determine correct
* physical layer because 10GBase-T PHYs use LMS = KX4/KX */
- if (hw->phy.type == ixgbe_phy_tn ||
- hw->phy.type == ixgbe_phy_cu_unknown) {
- hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
- &ext_ability);
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ case ixgbe_phy_aq:
+ case ixgbe_phy_cu_unknown:
+ hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE,
+ MDIO_MMD_PMAPMD, &ext_ability);
if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
@@ -1116,6 +1118,8 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
goto out;
+ default:
+ break;
}
switch (autoc & IXGBE_AUTOC_LMS_MASK) {
@@ -1210,18 +1214,20 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
.set_vmdq = &ixgbe_set_vmdq_82598,
.clear_vmdq = &ixgbe_clear_vmdq_82598,
.init_rx_addrs = &ixgbe_init_rx_addrs_generic,
- .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
.enable_mc = &ixgbe_enable_mc_generic,
.disable_mc = &ixgbe_disable_mc_generic,
.clear_vfta = &ixgbe_clear_vfta_82598,
.set_vfta = &ixgbe_set_vfta_82598,
.fc_enable = &ixgbe_fc_enable_82598,
+ .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
+ .release_swfw_sync = &ixgbe_release_swfw_sync,
};
static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
.init_params = &ixgbe_init_eeprom_params_generic,
.read = &ixgbe_read_eerd_generic,
+ .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
};
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 0bd8fbb5bfd0..00aeba385a2f 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -56,9 +56,6 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
-static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg);
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
@@ -68,9 +65,9 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
- if (hw->phy.multispeed_fiber) {
- /* Set up dual speed SFP+ support */
- mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
+
+ /* enable the laser control functions for SFP+ fiber */
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
mac->ops.disable_tx_laser =
&ixgbe_disable_tx_laser_multispeed_fiber;
mac->ops.enable_tx_laser =
@@ -80,6 +77,12 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
mac->ops.disable_tx_laser = NULL;
mac->ops.enable_tx_laser = NULL;
mac->ops.flap_tx_laser = NULL;
+ }
+
+ if (hw->phy.multispeed_fiber) {
+ /* Set up dual speed SFP+ support */
+ mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
+ } else {
if ((mac->ops.get_media_type(hw) ==
ixgbe_media_type_backplane) &&
(hw->phy.smart_speed == ixgbe_smart_speed_auto ||
@@ -93,6 +96,8 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
{
s32 ret_val = 0;
+ u32 reg_anlp1 = 0;
+ u32 i = 0;
u16 list_offset, data_offset, data_value;
if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
@@ -107,7 +112,8 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
goto setup_sfp_out;
/* PHY config will finish before releasing the semaphore */
- ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
if (ret_val != 0) {
ret_val = IXGBE_ERR_SWFW_SYNC;
goto setup_sfp_out;
@@ -119,14 +125,34 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
hw->eeprom.ops.read(hw, ++data_offset, &data_value);
}
- /* Now restart DSP by setting Restart_AN */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
- (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART));
/* Release the semaphore */
ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
/* Delay obtaining semaphore again to allow FW access */
msleep(hw->eeprom.semaphore_delay);
+
+ /* Now restart DSP by setting Restart_AN and clearing LMS */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
+ IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
+ IXGBE_AUTOC_AN_RESTART));
+
+ /* Wait for AN to leave state 0 */
+ for (i = 0; i < 10; i++) {
+ msleep(4);
+ reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+ if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
+ break;
+ }
+ if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
+ hw_dbg(hw, "sfp module setup not complete\n");
+ ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
+ goto setup_sfp_out;
+ }
+
+ /* Restart DSP by setting Restart_AN and return to SFI mode */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
+ IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
+ IXGBE_AUTOC_AN_RESTART));
}
setup_sfp_out:
@@ -174,7 +200,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
mac->ops.get_link_capabilities =
- &ixgbe_get_copper_link_capabilities_82599;
+ &ixgbe_get_copper_link_capabilities_generic;
}
/* Set necessary function pointers based on phy type */
@@ -184,6 +210,10 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
phy->ops.get_firmware_version =
&ixgbe_get_phy_firmware_version_tnx;
break;
+ case ixgbe_phy_aq:
+ phy->ops.get_firmware_version =
+ &ixgbe_get_phy_firmware_version_generic;
+ break;
default:
break;
}
@@ -290,37 +320,6 @@ out:
}
/**
- * ixgbe_get_copper_link_capabilities_82599 - Determines link capabilities
- * @hw: pointer to hardware structure
- * @speed: pointer to link speed
- * @autoneg: boolean auto-negotiation value
- *
- * Determines the link capabilities by reading the AUTOC register.
- **/
-static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
-{
- s32 status = IXGBE_ERR_LINK_SETUP;
- u16 speed_ability;
-
- *speed = 0;
- *autoneg = true;
-
- status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
- &speed_ability);
-
- if (status == 0) {
- if (speed_ability & MDIO_SPEED_10G)
- *speed |= IXGBE_LINK_SPEED_10GB_FULL;
- if (speed_ability & MDIO_PMA_SPEED_1000)
- *speed |= IXGBE_LINK_SPEED_1GB_FULL;
- }
-
- return status;
-}
-
-/**
* ixgbe_get_media_type_82599 - Get media type
* @hw: pointer to hardware structure
*
@@ -331,10 +330,14 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
enum ixgbe_media_type media_type;
/* Detect if there is a copper PHY attached. */
- if (hw->phy.type == ixgbe_phy_cu_unknown ||
- hw->phy.type == ixgbe_phy_tn) {
+ switch (hw->phy.type) {
+ case ixgbe_phy_cu_unknown:
+ case ixgbe_phy_tn:
+ case ixgbe_phy_aq:
media_type = ixgbe_media_type_copper;
goto out;
+ default:
+ break;
}
switch (hw->device_id) {
@@ -342,17 +345,22 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_KX4_MEZZ:
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
case IXGBE_DEV_ID_82599_KR:
+ case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
case IXGBE_DEV_ID_82599_XAUI_LOM:
/* Default device ID is mezzanine card KX/KX4 */
media_type = ixgbe_media_type_backplane;
break;
case IXGBE_DEV_ID_82599_SFP:
+ case IXGBE_DEV_ID_82599_SFP_FCOE:
case IXGBE_DEV_ID_82599_SFP_EM:
media_type = ixgbe_media_type_fiber;
break;
case IXGBE_DEV_ID_82599_CX4:
media_type = ixgbe_media_type_cx4;
break;
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ media_type = ixgbe_media_type_copper;
+ break;
default:
media_type = ixgbe_media_type_unknown;
break;
@@ -410,14 +418,14 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
return status;
}
- /**
- * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
- * @hw: pointer to hardware structure
- *
- * The base drivers may require better control over SFP+ module
- * PHY states. This includes selectively shutting down the Tx
- * laser on the PHY, effectively halting physical link.
- **/
+/**
+ * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
+ * @hw: pointer to hardware structure
+ *
+ * The base drivers may require better control over SFP+ module
+ * PHY states. This includes selectively shutting down the Tx
+ * laser on the PHY, effectively halting physical link.
+ **/
static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
{
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
@@ -462,8 +470,6 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
**/
static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
{
- hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n");
-
if (hw->mac.autotry_restart) {
ixgbe_disable_tx_laser_multispeed_fiber(hw);
ixgbe_enable_tx_laser_multispeed_fiber(hw);
@@ -486,17 +492,21 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete)
{
s32 status = 0;
- ixgbe_link_speed phy_link_speed;
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
u32 speedcnt = 0;
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ u32 i = 0;
bool link_up = false;
bool negotiation;
- int i;
/* Mask off requested but non-supported speeds */
- hw->mac.ops.get_link_capabilities(hw, &phy_link_speed, &negotiation);
- speed &= phy_link_speed;
+ status = hw->mac.ops.get_link_capabilities(hw, &link_speed,
+ &negotiation);
+ if (status != 0)
+ return status;
+
+ speed &= link_speed;
/*
* Try each speed one by one, highest priority first. We do this in
@@ -507,9 +517,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
/* If we already have link at this speed, just jump out */
- hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false);
+ status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
+ false);
+ if (status != 0)
+ return status;
- if ((phy_link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
+ if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
goto out;
/* Set the module link speed */
@@ -521,9 +534,9 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
msleep(40);
status = ixgbe_setup_mac_link_82599(hw,
- IXGBE_LINK_SPEED_10GB_FULL,
- autoneg,
- autoneg_wait_to_complete);
+ IXGBE_LINK_SPEED_10GB_FULL,
+ autoneg,
+ autoneg_wait_to_complete);
if (status != 0)
return status;
@@ -535,14 +548,16 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
* Section 73.10.2, we may have to wait up to 500ms if KR is
* attempted. 82599 uses the same timing for 10g SFI.
*/
-
for (i = 0; i < 5; i++) {
/* Wait for the link partner to also set speed */
msleep(100);
/* If we have link, just jump out */
- hw->mac.ops.check_link(hw, &phy_link_speed,
- &link_up, false);
+ status = hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false);
+ if (status != 0)
+ return status;
+
if (link_up)
goto out;
}
@@ -554,9 +569,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
/* If we already have link at this speed, just jump out */
- hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false);
+ status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
+ false);
+ if (status != 0)
+ return status;
- if ((phy_link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
+ if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
goto out;
/* Set the module link speed */
@@ -569,9 +587,9 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
msleep(40);
status = ixgbe_setup_mac_link_82599(hw,
- IXGBE_LINK_SPEED_1GB_FULL,
- autoneg,
- autoneg_wait_to_complete);
+ IXGBE_LINK_SPEED_1GB_FULL,
+ autoneg,
+ autoneg_wait_to_complete);
if (status != 0)
return status;
@@ -582,7 +600,11 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
msleep(100);
/* If we have link, just jump out */
- hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false);
+ status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
+ false);
+ if (status != 0)
+ return status;
+
if (link_up)
goto out;
}
@@ -625,13 +647,10 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete)
{
s32 status = 0;
- ixgbe_link_speed link_speed;
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
s32 i, j;
bool link_up = false;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- struct ixgbe_adapter *adapter = hw->back;
-
- hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n");
/* Set autoneg_advertised value based on input link speed */
hw->phy.autoneg_advertised = 0;
@@ -657,7 +676,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
autoneg_wait_to_complete);
- if (status)
+ if (status != 0)
goto out;
/*
@@ -670,8 +689,11 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
mdelay(100);
/* If we have link, just jump out */
- hw->mac.ops.check_link(hw, &link_speed,
- &link_up, false);
+ status = hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false);
+ if (status != 0)
+ goto out;
+
if (link_up)
goto out;
}
@@ -689,7 +711,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
hw->phy.smart_speed_active = true;
status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
autoneg_wait_to_complete);
- if (status)
+ if (status != 0)
goto out;
/*
@@ -702,8 +724,11 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
mdelay(100);
/* If we have link, just jump out */
- hw->mac.ops.check_link(hw, &link_speed,
- &link_up, false);
+ status = hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false);
+ if (status != 0)
+ goto out;
+
if (link_up)
goto out;
}
@@ -715,7 +740,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
out:
if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
- e_info(hw, "Smartspeed has downgraded the link speed from "
+ hw_dbg(hw, "Smartspeed has downgraded the link speed from "
"the maximum advertised\n");
return status;
}
@@ -747,6 +772,9 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
/* Check to see if speed passed in is supported. */
hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg);
+ if (status != 0)
+ goto out;
+
speed &= link_capabilities;
if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
@@ -760,7 +788,6 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
else
orig_autoc = autoc;
-
if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
@@ -877,7 +904,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
/* PHY ops must be identified and initialized prior to reset */
- /* Init PHY and function pointers, perform SFP setup */
+ /* Identify PHY and related function pointers */
status = hw->phy.ops.init(hw);
if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
@@ -889,6 +916,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
hw->phy.sfp_setup_needed = false;
}
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto reset_hw_out;
+
/* Reset PHY */
if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
hw->phy.ops.reset(hw);
@@ -897,12 +927,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
* Prevent the PCI-E bus from from hanging by disabling PCI-E master
* access and verify no pending requests before reset
*/
- status = ixgbe_disable_pcie_master(hw);
- if (status != 0) {
- status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
- hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
- }
+ ixgbe_disable_pcie_master(hw);
+mac_reset_top:
/*
* Issue global reset to the MAC. This needs to be a SW reset.
* If link reset is used, it might reset the MAC when mng is using it
@@ -923,6 +950,19 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
hw_dbg(hw, "Reset polling failed to complete.\n");
}
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete. We use 1usec since that is
+ * what is needed for ixgbe_disable_pcie_master(). The second reset
+ * then clears out any effects of those events.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ udelay(1);
+ goto mac_reset_top;
+ }
+
msleep(50);
/*
@@ -950,6 +990,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
}
}
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
/*
* Store MAC address from RAR0, clear receive address registers, and
* clear the multicast table. Also reset num_rar_entries to 128,
@@ -958,9 +1001,6 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
hw->mac.num_rar_entries = 128;
hw->mac.ops.init_rx_addrs(hw);
- /* Store the permanent mac address */
- hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
-
/* Store the permanent SAN mac address */
hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
@@ -1002,7 +1042,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
udelay(10);
}
if (i >= IXGBE_FDIRCMD_CMD_POLL) {
- hw_dbg(hw ,"Flow Director previous command isn't complete, "
+ hw_dbg(hw, "Flow Director previous command isn't complete, "
"aborting table re-initialization.\n");
return IXGBE_ERR_FDIR_REINIT_FAILED;
}
@@ -1078,7 +1118,7 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
/*
* The defaults in the HW for RX PB 1-7 are not zero and so should be
- * intialized to zero for non DCB mode otherwise actual total RX PB
+ * initialized to zero for non DCB mode otherwise actual total RX PB
* would be bigger than programmed and filter space would run into
* the PB 0 region.
*/
@@ -1112,13 +1152,10 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
/* Move the flexible bytes to use the ethertype - shift 6 words */
fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
- fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
/* Prime the keys for hashing */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
- htonl(IXGBE_ATR_BUCKET_HASH_KEY));
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
- htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
/*
* Poll init-done after we write the register. Estimated times:
@@ -1169,7 +1206,7 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
/*
* The defaults in the HW for RX PB 1-7 are not zero and so should be
- * intialized to zero for non DCB mode otherwise actual total RX PB
+ * initialized to zero for non DCB mode otherwise actual total RX PB
* would be bigger than programmed and filter space would run into
* the PB 0 region.
*/
@@ -1208,10 +1245,8 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
/* Prime the keys for hashing */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
- htonl(IXGBE_ATR_BUCKET_HASH_KEY));
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
- htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
/*
* Poll init-done after we write the register. Estimated times:
@@ -1250,8 +1285,8 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
* @stream: input bitstream to compute the hash on
* @key: 32-bit hash key
**/
-static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input,
- u32 key)
+static u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
+ u32 key)
{
/*
* The algorithm is as follows:
@@ -1271,410 +1306,250 @@ static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input,
* To simplify for programming, the algorithm is implemented
* in software this way:
*
- * Key[31:0], Stream[335:0]
+ * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
+ *
+ * for (i = 0; i < 352; i+=32)
+ * hi_hash_dword[31:0] ^= Stream[(i+31):i];
+ *
+ * lo_hash_dword[15:0] ^= Stream[15:0];
+ * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
+ * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
*
- * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times
- * int_key[350:0] = tmp_key[351:1]
- * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
+ * hi_hash_dword[31:0] ^= Stream[351:320];
*
- * hash[15:0] = 0;
- * for (i = 0; i < 351; i++) {
- * if (int_key[i])
- * hash ^= int_stream[(i + 15):i];
+ * if(key[0])
+ * hash[15:0] ^= Stream[15:0];
+ *
+ * for (i = 0; i < 16; i++) {
+ * if (key[i])
+ * hash[15:0] ^= lo_hash_dword[(i+15):i];
+ * if (key[i + 16])
+ * hash[15:0] ^= hi_hash_dword[(i+15):i];
* }
+ *
*/
+ __be32 common_hash_dword = 0;
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 hash_result = 0;
+ u8 i;
- union {
- u64 fill[6];
- u32 key[11];
- u8 key_stream[44];
- } tmp_key;
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = ntohl(atr_input->dword_stream[0]);
- u8 *stream = (u8 *)atr_input;
- u8 int_key[44]; /* upper-most bit unused */
- u8 hash_str[46]; /* upper-most 2 bits unused */
- u16 hash_result = 0;
- int i, j, k, h;
+ /* generate common hash dword */
+ for (i = 10; i; i -= 2)
+ common_hash_dword ^= atr_input->dword_stream[i] ^
+ atr_input->dword_stream[i - 1];
- /*
- * Initialize the fill member to prevent warnings
- * on some compilers
- */
- tmp_key.fill[0] = 0;
+ hi_hash_dword = ntohl(common_hash_dword);
- /* First load the temporary key stream */
- for (i = 0; i < 6; i++) {
- u64 fillkey = ((u64)key << 32) | key;
- tmp_key.fill[i] = fillkey;
- }
-
- /*
- * Set the interim key for the hashing. Bit 352 is unused, so we must
- * shift and compensate when building the key.
- */
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
- int_key[0] = tmp_key.key_stream[0] >> 1;
- for (i = 1, j = 0; i < 44; i++) {
- unsigned int this_key = tmp_key.key_stream[j] << 7;
- j++;
- int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
- }
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
- /*
- * Set the interim bit string for the hashing. Bits 368 and 367 are
- * unused, so shift and compensate when building the string.
- */
- hash_str[0] = (stream[40] & 0x7f) >> 1;
- for (i = 1, j = 40; i < 46; i++) {
- unsigned int this_str = stream[j] << 7;
- j++;
- if (j > 41)
- j = 0;
- hash_str[i] = (u8)(this_str | (stream[j] >> 1));
- }
+ /* Process bits 0 and 16 */
+ if (key & 0x0001) hash_result ^= lo_hash_dword;
+ if (key & 0x00010000) hash_result ^= hi_hash_dword;
/*
- * Now compute the hash. i is the index into hash_str, j is into our
- * key stream, k is counting the number of bits, and h interates within
- * each byte.
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the vlan until after bit 0 was processed
*/
- for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
- for (h = 0; h < 8 && k < 351; h++, k++) {
- if (int_key[j] & (1 << h)) {
- /*
- * Key bit is set, XOR in the current 16-bit
- * string. Example of processing:
- * h = 0,
- * tmp = (hash_str[i - 2] & 0 << 16) |
- * (hash_str[i - 1] & 0xff << 8) |
- * (hash_str[i] & 0xff >> 0)
- * So tmp = hash_str[15 + k:k], since the
- * i + 2 clause rolls off the 16-bit value
- * h = 7,
- * tmp = (hash_str[i - 2] & 0x7f << 9) |
- * (hash_str[i - 1] & 0xff << 1) |
- * (hash_str[i] & 0x80 >> 7)
- */
- int tmp = (hash_str[i] >> h);
- tmp |= (hash_str[i - 1] << (8 - h));
- tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
- << (16 - h);
- hash_result ^= (u16)tmp;
- }
- }
- }
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
- return hash_result;
-}
-/**
- * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
- * @input: input stream to modify
- * @vlan: the VLAN id to load
- **/
-s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
-{
- input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
- input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
- * @input: input stream to modify
- * @src_addr: the IP address to load
- **/
-s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
-{
- input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
- input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
- (src_addr >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
- (src_addr >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
- * @input: input stream to modify
- * @dst_addr: the IP address to load
- **/
-s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
-{
- input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
- input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
- (dst_addr >> 16) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
- (dst_addr >> 8) & 0xff;
- input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_set_src_port_82599 - Sets the source port
- * @input: input stream to modify
- * @src_port: the source port to load
- **/
-s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
-{
- input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
- input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_set_dst_port_82599 - Sets the destination port
- * @input: input stream to modify
- * @dst_port: the destination port to load
- **/
-s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
-{
- input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
- input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
- * @input: input stream to modify
- * @flex_bytes: the flexible bytes to load
- **/
-s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
-{
- input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
- input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
- * @input: input stream to modify
- * @l4type: the layer 4 type value to load
- **/
-s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
-{
- input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
- * @input: input stream to search
- * @vlan: the VLAN id to load
- **/
-static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
-{
- *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
- *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
- * @input: input stream to search
- * @src_addr: the IP address to load
- **/
-static s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input,
- u32 *src_addr)
-{
- *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
- *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
- *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
- *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
+ /* process the remaining 30 bits in the key 2 bits at a time */
+ for (i = 15; i; i-- ) {
+ if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
+ if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
+ }
- return 0;
+ return hash_result & IXGBE_ATR_HASH_MASK;
}
-/**
- * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address
- * @input: input stream to search
- * @dst_addr: the IP address to load
- **/
-static s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input,
- u32 *dst_addr)
-{
- *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
- *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
- *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
- *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
-
- return 0;
-}
+/*
+ * These defines allow us to quickly generate all of the necessary instructions
+ * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
+ * for values 0 through 15
+ */
+#define IXGBE_ATR_COMMON_HASH_KEY \
+ (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
+#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
+do { \
+ u32 n = (_n); \
+ if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
+ common_hash ^= lo_hash_dword >> n; \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ bucket_hash ^= lo_hash_dword >> n; \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
+ sig_hash ^= lo_hash_dword << (16 - n); \
+ if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
+ common_hash ^= hi_hash_dword >> n; \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ bucket_hash ^= hi_hash_dword >> n; \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
+ sig_hash ^= hi_hash_dword << (16 - n); \
+} while (0);
/**
- * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
- * @input: input stream to search
- * @src_addr_1: the first 4 bytes of the IP address to load
- * @src_addr_2: the second 4 bytes of the IP address to load
- * @src_addr_3: the third 4 bytes of the IP address to load
- * @src_addr_4: the fourth 4 bytes of the IP address to load
- **/
-static s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
- u32 *src_addr_1, u32 *src_addr_2,
- u32 *src_addr_3, u32 *src_addr_4)
-{
- *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
- *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
- *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
- *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
-
- *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
- *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
- *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
- *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
-
- *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
- *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
- *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
- *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
-
- *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
- *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
- *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
- *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
-
- return 0;
-}
-
-/**
- * ixgbe_atr_get_src_port_82599 - Gets the source port
- * @input: input stream to modify
- * @src_port: the source port to load
+ * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
+ * @stream: input bitstream to compute the hash on
*
- * Even though the input is given in big-endian, the FDIRPORT registers
- * expect the ports to be programmed in little-endian. Hence the need to swap
- * endianness when retrieving the data. This can be confusing since the
- * internal hash engine expects it to be big-endian.
+ * This function is almost identical to the function above but contains
+ * several optomizations such as unwinding all of the loops, letting the
+ * compiler work out all of the conditional ifs since the keys are static
+ * defines, and computing two keys at once since the hashed dword stream
+ * will be the same for both keys.
**/
-static s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input,
- u16 *src_port)
+static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common)
{
- *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8;
- *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1];
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
- return 0;
-}
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = ntohl(input.dword);
-/**
- * ixgbe_atr_get_dst_port_82599 - Gets the destination port
- * @input: input stream to modify
- * @dst_port: the destination port to load
- *
- * Even though the input is given in big-endian, the FDIRPORT registers
- * expect the ports to be programmed in little-endian. Hence the need to swap
- * endianness when retrieving the data. This can be confusing since the
- * internal hash engine expects it to be big-endian.
- **/
-static s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input,
- u16 *dst_port)
-{
- *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
- *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
+ /* generate common hash dword */
+ hi_hash_dword = ntohl(common.dword);
- return 0;
-}
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
-/**
- * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes
- * @input: input stream to modify
- * @flex_bytes: the flexible bytes to load
- **/
-static s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input,
- u16 *flex_byte)
-{
- *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
- *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
- return 0;
-}
+ /* Process bits 0 and 16 */
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
-/**
- * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
- * @input: input stream to modify
- * @l4type: the layer 4 type value to load
- **/
-static s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input,
- u8 *l4type)
-{
- *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET];
+ /*
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the vlan until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+ /* Process remaining 30 bit of the key */
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
+ IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
+
+ /* combine common_hash result with signature and bucket hashes */
+ bucket_hash ^= common_hash;
+ bucket_hash &= IXGBE_ATR_HASH_MASK;
- return 0;
+ sig_hash ^= common_hash << 16;
+ sig_hash &= IXGBE_ATR_HASH_MASK << 16;
+
+ /* return completed signature hash */
+ return sig_hash ^ bucket_hash;
}
/**
* ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
* @hw: pointer to hardware structure
- * @stream: input bitstream
+ * @input: unique input dword
+ * @common: compressed common input dword
* @queue: queue index to direct traffic to
**/
s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
u8 queue)
{
u64 fdirhashcmd;
- u64 fdircmd;
- u32 fdirhash;
- u16 bucket_hash, sig_hash;
- u8 l4type;
-
- bucket_hash = ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_BUCKET_HASH_KEY);
-
- /* bucket_hash is only 15 bits */
- bucket_hash &= IXGBE_ATR_HASH_MASK;
-
- sig_hash = ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_SIGNATURE_HASH_KEY);
-
- /* Get the l4type in order to program FDIRCMD properly */
- /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
- ixgbe_atr_get_l4type_82599(input, &l4type);
+ u32 fdircmd;
/*
- * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
- * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
+ * Get the flow_type in order to program FDIRCMD properly
+ * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
*/
- fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
-
- fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
- IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN);
-
- switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
- case IXGBE_ATR_L4TYPE_TCP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
- break;
- case IXGBE_ATR_L4TYPE_UDP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
- break;
- case IXGBE_ATR_L4TYPE_SCTP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
+ switch (input.formatted.flow_type) {
+ case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ case IXGBE_ATR_FLOW_TYPE_UDPV4:
+ case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ case IXGBE_ATR_FLOW_TYPE_TCPV6:
+ case IXGBE_ATR_FLOW_TYPE_UDPV6:
+ case IXGBE_ATR_FLOW_TYPE_SCTPV6:
break;
default:
- hw_dbg(hw, "Error on l4type input\n");
+ hw_dbg(hw, " Error on flow type input\n");
return IXGBE_ERR_CONFIG;
}
- if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK)
- fdircmd |= IXGBE_FDIRCMD_IPV6;
+ /* configure FDIRCMD register */
+ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
- fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
- fdirhashcmd = ((fdircmd << 32) | fdirhash);
+ /*
+ * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
+ * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
+ */
+ fdirhashcmd = (u64)fdircmd << 32;
+ fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
+ hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
+
return 0;
}
/**
+ * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
+ * @input_mask: mask to be bit swapped
+ *
+ * The source and destination port masks for flow director are bit swapped
+ * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
+ * generate a correctly swapped value we need to bit swap the mask and that
+ * is what is accomplished by this function.
+ **/
+static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
+{
+ u32 mask = ntohs(input_masks->dst_port_mask);
+ mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
+ mask |= ntohs(input_masks->src_port_mask);
+ mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+ mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+ mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+ return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+/*
+ * These two macros are meant to address the fact that we have registers
+ * that are either all or in part big-endian. As a result on big-endian
+ * systems we will end up byte swapping the value to little-endian before
+ * it is byte swapped again and written to the hardware in the original
+ * big-endian format.
+ */
+#define IXGBE_STORE_AS_BE32(_value) \
+ (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
+ (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
+
+#define IXGBE_WRITE_REG_BE32(a, reg, value) \
+ IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
+
+#define IXGBE_STORE_AS_BE16(_value) \
+ (((u16)(_value) >> 8) | ((u16)(_value) << 8))
+
+/**
* ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
* @hw: pointer to hardware structure
* @input: input bitstream
@@ -1686,135 +1561,139 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
* hardware writes must be protected from one another.
**/
s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
+ union ixgbe_atr_input *input,
struct ixgbe_atr_input_masks *input_masks,
u16 soft_id, u8 queue)
{
- u32 fdircmd = 0;
u32 fdirhash;
- u32 src_ipv4 = 0, dst_ipv4 = 0;
- u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
- u16 src_port, dst_port, vlan_id, flex_bytes;
- u16 bucket_hash;
- u8 l4type;
- u8 fdirm = 0;
-
- /* Get our input values */
- ixgbe_atr_get_l4type_82599(input, &l4type);
+ u32 fdircmd;
+ u32 fdirport, fdirtcpm;
+ u32 fdirvlan;
+ /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
+ u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX |
+ IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
/*
- * Check l4type formatting, and bail out before we touch the hardware
+ * Check flow_type formatting, and bail out before we touch the hardware
* if there's a configuration issue
*/
- switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
- case IXGBE_ATR_L4TYPE_TCP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
- break;
- case IXGBE_ATR_L4TYPE_UDP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
- break;
- case IXGBE_ATR_L4TYPE_SCTP:
- fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
+ switch (input->formatted.flow_type) {
+ case IXGBE_ATR_FLOW_TYPE_IPV4:
+ /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
+ fdirm |= IXGBE_FDIRM_L4P;
+ case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ if (input_masks->dst_port_mask || input_masks->src_port_mask) {
+ hw_dbg(hw, " Error on src/dst port mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ case IXGBE_ATR_FLOW_TYPE_UDPV4:
break;
default:
- hw_dbg(hw, "Error on l4type input\n");
+ hw_dbg(hw, " Error on flow type input\n");
return IXGBE_ERR_CONFIG;
}
- bucket_hash = ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_BUCKET_HASH_KEY);
-
- /* bucket_hash is only 15 bits */
- bucket_hash &= IXGBE_ATR_HASH_MASK;
-
- ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
- ixgbe_atr_get_src_port_82599(input, &src_port);
- ixgbe_atr_get_dst_port_82599(input, &dst_port);
- ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
-
- fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
-
- /* Now figure out if we're IPv4 or IPv6 */
- if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
- /* IPv6 */
- ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
- &src_ipv6_3, &src_ipv6_4);
-
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
- /* The last 4 bytes is the same register as IPv4 */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
-
- fdircmd |= IXGBE_FDIRCMD_IPV6;
- fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
- } else {
- /* IPv4 */
- ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
- }
-
- ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
-
- IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
- (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
- IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
- (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
-
/*
- * Program the relevant mask registers. L4type cannot be
- * masked out in this implementation.
+ * Program the relevant mask registers. If src/dst_port or src/dst_addr
+ * are zero, then assume a full mask for that field. Also assume that
+ * a VLAN of 0 is unspecified, so mask that out as well. L4type
+ * cannot be masked out in this implementation.
*
* This also assumes IPv4 only. IPv6 masking isn't supported at this
* point in time.
*/
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
-
- switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
- case IXGBE_ATR_L4TYPE_TCP:
- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, input_masks->src_port_mask);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
- (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
- (input_masks->dst_port_mask << 16)));
+
+ /* Program FDIRM */
+ switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) {
+ case 0xEFFF:
+ /* Unmask VLAN ID - bit 0 and fall through to unmask prio */
+ fdirm &= ~IXGBE_FDIRM_VLANID;
+ case 0xE000:
+ /* Unmask VLAN prio - bit 1 */
+ fdirm &= ~IXGBE_FDIRM_VLANP;
break;
- case IXGBE_ATR_L4TYPE_UDP:
- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, input_masks->src_port_mask);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
- (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
- (input_masks->src_port_mask << 16)));
+ case 0x0FFF:
+ /* Unmask VLAN ID - bit 0 */
+ fdirm &= ~IXGBE_FDIRM_VLANID;
break;
- default:
- /* this already would have failed above */
+ case 0x0000:
+ /* do nothing, vlans already masked */
break;
+ default:
+ hw_dbg(hw, " Error on VLAN mask\n");
+ return IXGBE_ERR_CONFIG;
}
- /* Program the last mask register, FDIRM */
- if (input_masks->vlan_id_mask)
- /* Mask both VLAN and VLANP - bits 0 and 1 */
- fdirm |= 0x3;
-
- if (input_masks->data_mask)
- /* Flex bytes need masking, so mask the whole thing - bit 4 */
- fdirm |= 0x10;
+ if (input_masks->flex_mask & 0xFFFF) {
+ if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) {
+ hw_dbg(hw, " Error on flexible byte mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ /* Unmask Flex Bytes - bit 4 */
+ fdirm &= ~IXGBE_FDIRM_FLEX;
+ }
/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
- fdirm |= 0x24;
-
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
- fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
- fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
- fdircmd |= IXGBE_FDIRCMD_LAST;
- fdircmd |= IXGBE_FDIRCMD_QUEUE_EN;
- fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+ /* store the TCP/UDP port masks, bit reversed from port layout */
+ fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks);
+
+ /* write both the same so that UDP and TCP use the same mask */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+
+ /* store source and destination IP masks (big-enian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
+ ~input_masks->src_ip_mask[0]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
+ ~input_masks->dst_ip_mask[0]);
+
+ /* Apply masks to input data */
+ input->formatted.vlan_id &= input_masks->vlan_id_mask;
+ input->formatted.flex_bytes &= input_masks->flex_mask;
+ input->formatted.src_port &= input_masks->src_port_mask;
+ input->formatted.dst_port &= input_masks->dst_port_mask;
+ input->formatted.src_ip[0] &= input_masks->src_ip_mask[0];
+ input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0];
+
+ /* record vlan (little-endian) and flex_bytes(big-endian) */
+ fdirvlan =
+ IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes));
+ fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
+ fdirvlan |= ntohs(input->formatted.vlan_id);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+
+ /* record source and destination port (little-endian)*/
+ fdirport = ntohs(input->formatted.dst_port);
+ fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+ fdirport |= ntohs(input->formatted.src_port);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+
+ /* record the first 32 bits of the destination address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
+
+ /* record the source address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
+
+ /* configure FDIRCMD register */
+ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+ fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+
+ /* we only want the bucket hash so drop the upper 16 bits */
+ fdirhash = ixgbe_atr_compute_hash_82599(input,
+ IXGBE_ATR_BUCKET_HASH_KEY);
+ fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
return 0;
}
+
/**
* ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
* @hw: pointer to hardware structure
@@ -1893,13 +1772,34 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
*
* Determines the physical layer module found on the current adapter.
+ * If PHY already detected, maintains current PHY type in hw struct,
+ * otherwise executes the PHY detection routine.
**/
-static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
+s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
{
s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+
+ /* Detect PHY if not unknown - returns success if already detected. */
status = ixgbe_identify_phy_generic(hw);
- if (status != 0)
- status = ixgbe_identify_sfp_module_generic(hw);
+ if (status != 0) {
+ /* 82599 10GBASE-T requires an external PHY */
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
+ goto out;
+ else
+ status = ixgbe_identify_sfp_module_generic(hw);
+ }
+
+ /* Set PHY type none if no PHY detected */
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ hw->phy.type = ixgbe_phy_none;
+ status = 0;
+ }
+
+ /* Return error if SFP module has been detected but is not supported */
+ if (hw->phy.type == ixgbe_phy_sfp_unsupported)
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+out:
return status;
}
@@ -1923,10 +1823,12 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
hw->phy.ops.identify(hw);
- if (hw->phy.type == ixgbe_phy_tn ||
- hw->phy.type == ixgbe_phy_cu_unknown) {
+ switch (hw->phy.type) {
+ case ixgbe_phy_tn:
+ case ixgbe_phy_aq:
+ case ixgbe_phy_cu_unknown:
hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
- &ext_ability);
+ &ext_ability);
if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
@@ -1934,6 +1836,8 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
goto out;
+ default:
+ break;
}
switch (autoc & IXGBE_AUTOC_LMS_MASK) {
@@ -2045,6 +1949,7 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
break;
else
+ /* Use interrupt-safe sleep just in case */
udelay(10);
}
@@ -2125,51 +2030,6 @@ fw_version_out:
return status;
}
-/**
- * ixgbe_get_wwn_prefix_82599 - Get alternative WWNN/WWPN prefix from
- * the EEPROM
- * @hw: pointer to hardware structure
- * @wwnn_prefix: the alternative WWNN prefix
- * @wwpn_prefix: the alternative WWPN prefix
- *
- * This function will read the EEPROM from the alternative SAN MAC address
- * block to check the support for the alternative WWNN/WWPN prefix support.
- **/
-static s32 ixgbe_get_wwn_prefix_82599(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix)
-{
- u16 offset, caps;
- u16 alt_san_mac_blk_offset;
-
- /* clear output first */
- *wwnn_prefix = 0xFFFF;
- *wwpn_prefix = 0xFFFF;
-
- /* check if alternative SAN MAC is supported */
- hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
- &alt_san_mac_blk_offset);
-
- if ((alt_san_mac_blk_offset == 0) ||
- (alt_san_mac_blk_offset == 0xFFFF))
- goto wwn_prefix_out;
-
- /* check capability in alternative san mac address block */
- offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
- hw->eeprom.ops.read(hw, offset, &caps);
- if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
- goto wwn_prefix_out;
-
- /* get the corresponding prefix for WWNN/WWPN */
- offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
- hw->eeprom.ops.read(hw, offset, wwnn_prefix);
-
- offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
- hw->eeprom.ops.read(hw, offset, wwpn_prefix);
-
-wwn_prefix_out:
- return 0;
-}
-
static struct ixgbe_mac_operations mac_ops_82599 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82599,
@@ -2181,7 +2041,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.get_mac_addr = &ixgbe_get_mac_addr_generic,
.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
.get_device_caps = &ixgbe_get_device_caps_82599,
- .get_wwn_prefix = &ixgbe_get_wwn_prefix_82599,
+ .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
.stop_adapter = &ixgbe_stop_adapter_generic,
.get_bus_info = &ixgbe_get_bus_info_generic,
.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
@@ -2199,7 +2059,6 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.set_vmdq = &ixgbe_set_vmdq_generic,
.clear_vmdq = &ixgbe_clear_vmdq_generic,
.init_rx_addrs = &ixgbe_init_rx_addrs_generic,
- .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
.enable_mc = &ixgbe_enable_mc_generic,
.disable_mc = &ixgbe_disable_mc_generic,
@@ -2208,30 +2067,36 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.fc_enable = &ixgbe_fc_enable_generic,
.init_uta_tables = &ixgbe_init_uta_tables_generic,
.setup_sfp = &ixgbe_setup_sfp_modules_82599,
+ .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
+ .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
+ .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
+ .release_swfw_sync = &ixgbe_release_swfw_sync,
+
};
static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
- .init_params = &ixgbe_init_eeprom_params_generic,
- .read = &ixgbe_read_eerd_generic,
- .write = &ixgbe_write_eeprom_generic,
- .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
- .update_checksum = &ixgbe_update_eeprom_checksum_generic,
+ .init_params = &ixgbe_init_eeprom_params_generic,
+ .read = &ixgbe_read_eerd_generic,
+ .write = &ixgbe_write_eeprom_generic,
+ .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
+ .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
+ .update_checksum = &ixgbe_update_eeprom_checksum_generic,
};
static struct ixgbe_phy_operations phy_ops_82599 = {
- .identify = &ixgbe_identify_phy_82599,
- .identify_sfp = &ixgbe_identify_sfp_module_generic,
- .init = &ixgbe_init_phy_ops_82599,
- .reset = &ixgbe_reset_phy_generic,
- .read_reg = &ixgbe_read_phy_reg_generic,
- .write_reg = &ixgbe_write_phy_reg_generic,
- .setup_link = &ixgbe_setup_phy_link_generic,
- .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
- .read_i2c_byte = &ixgbe_read_i2c_byte_generic,
- .write_i2c_byte = &ixgbe_write_i2c_byte_generic,
- .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
- .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
- .check_overtemp = &ixgbe_tn_check_overtemp,
+ .identify = &ixgbe_identify_phy_82599,
+ .identify_sfp = &ixgbe_identify_sfp_module_generic,
+ .init = &ixgbe_init_phy_ops_82599,
+ .reset = &ixgbe_reset_phy_generic,
+ .read_reg = &ixgbe_read_phy_reg_generic,
+ .write_reg = &ixgbe_write_phy_reg_generic,
+ .setup_link = &ixgbe_setup_phy_link_generic,
+ .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
+ .read_i2c_byte = &ixgbe_read_i2c_byte_generic,
+ .write_i2c_byte = &ixgbe_write_i2c_byte_generic,
+ .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
+ .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
+ .check_overtemp = &ixgbe_tn_check_overtemp,
};
struct ixgbe_info ixgbe_82599_info = {
@@ -2240,5 +2105,5 @@ struct ixgbe_info ixgbe_82599_info = {
.mac_ops = &mac_ops_82599,
.eeprom_ops = &eeprom_ops_82599,
.phy_ops = &phy_ops_82599,
- .mbx_ops = &mbx_ops_82599,
+ .mbx_ops = &mbx_ops_generic,
};
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index e3eca1316389..bcd952916eb2 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -45,14 +45,15 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
-static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
-static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
-static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
-static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
+static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
+static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
+static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
+static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
+static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
/**
* ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
@@ -141,17 +142,29 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
IXGBE_READ_REG(hw, IXGBE_MRFC);
IXGBE_READ_REG(hw, IXGBE_RLEC);
IXGBE_READ_REG(hw, IXGBE_LXONTXC);
- IXGBE_READ_REG(hw, IXGBE_LXONRXC);
IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
- IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+ if (hw->mac.type >= ixgbe_mac_82599EB) {
+ IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+ IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+ } else {
+ IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+ IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+ }
for (i = 0; i < 8; i++) {
IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
- IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
- IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+ if (hw->mac.type >= ixgbe_mac_82599EB) {
+ IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+ IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+ } else {
+ IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+ IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+ }
}
-
+ if (hw->mac.type >= ixgbe_mac_82599EB)
+ for (i = 0; i < 8; i++)
+ IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
IXGBE_READ_REG(hw, IXGBE_PRC64);
IXGBE_READ_REG(hw, IXGBE_PRC127);
IXGBE_READ_REG(hw, IXGBE_PRC255);
@@ -189,39 +202,136 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
IXGBE_READ_REG(hw, IXGBE_BPTC);
for (i = 0; i < 16; i++) {
IXGBE_READ_REG(hw, IXGBE_QPRC(i));
- IXGBE_READ_REG(hw, IXGBE_QBRC(i));
IXGBE_READ_REG(hw, IXGBE_QPTC(i));
- IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+ if (hw->mac.type >= ixgbe_mac_82599EB) {
+ IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
+ IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
+ IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+ } else {
+ IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+ IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+ }
+ }
+
+ if (hw->mac.type == ixgbe_mac_X540) {
+ if (hw->phy.id == 0)
+ hw->phy.ops.identify(hw);
+ hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i);
+ hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i);
+ hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i);
+ hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i);
}
return 0;
}
/**
- * ixgbe_read_pba_num_generic - Reads part number from EEPROM
+ * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
* @hw: pointer to hardware structure
- * @pba_num: stores the part number from the EEPROM
+ * @pba_num: stores the part number string from the EEPROM
+ * @pba_num_size: part number string buffer length
*
- * Reads the part number from the EEPROM.
+ * Reads the part number string from the EEPROM.
**/
-s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
+s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
{
s32 ret_val;
u16 data;
+ u16 pba_ptr;
+ u16 offset;
+ u16 length;
+
+ if (pba_num == NULL) {
+ hw_dbg(hw, "PBA string buffer was null\n");
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
if (ret_val) {
hw_dbg(hw, "NVM Read Error\n");
return ret_val;
}
- *pba_num = (u32)(data << 16);
- ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
+ if (ret_val) {
+ hw_dbg(hw, "NVM Read Error\n");
+ return ret_val;
+ }
+
+ /*
+ * if data is not ptr guard the PBA must be in legacy format which
+ * means pba_ptr is actually our second data word for the PBA number
+ * and we can decode it into an ascii string
+ */
+ if (data != IXGBE_PBANUM_PTR_GUARD) {
+ hw_dbg(hw, "NVM PBA number is not stored as string\n");
+
+ /* we will need 11 characters to store the PBA */
+ if (pba_num_size < 11) {
+ hw_dbg(hw, "PBA string buffer too small\n");
+ return IXGBE_ERR_NO_SPACE;
+ }
+
+ /* extract hex string from data and pba_ptr */
+ pba_num[0] = (data >> 12) & 0xF;
+ pba_num[1] = (data >> 8) & 0xF;
+ pba_num[2] = (data >> 4) & 0xF;
+ pba_num[3] = data & 0xF;
+ pba_num[4] = (pba_ptr >> 12) & 0xF;
+ pba_num[5] = (pba_ptr >> 8) & 0xF;
+ pba_num[6] = '-';
+ pba_num[7] = 0;
+ pba_num[8] = (pba_ptr >> 4) & 0xF;
+ pba_num[9] = pba_ptr & 0xF;
+
+ /* put a null character on the end of our string */
+ pba_num[10] = '\0';
+
+ /* switch all the data but the '-' to hex char */
+ for (offset = 0; offset < 10; offset++) {
+ if (pba_num[offset] < 0xA)
+ pba_num[offset] += '0';
+ else if (pba_num[offset] < 0x10)
+ pba_num[offset] += 'A' - 0xA;
+ }
+
+ return 0;
+ }
+
+ ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
if (ret_val) {
hw_dbg(hw, "NVM Read Error\n");
return ret_val;
}
- *pba_num |= data;
+
+ if (length == 0xFFFF || length == 0) {
+ hw_dbg(hw, "NVM PBA number section invalid length\n");
+ return IXGBE_ERR_PBA_SECTION;
+ }
+
+ /* check if pba_num buffer is big enough */
+ if (pba_num_size < (((u32)length * 2) - 1)) {
+ hw_dbg(hw, "PBA string buffer too small\n");
+ return IXGBE_ERR_NO_SPACE;
+ }
+
+ /* trim pba length from start of string */
+ pba_ptr++;
+ length--;
+
+ for (offset = 0; offset < length; offset++) {
+ ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
+ if (ret_val) {
+ hw_dbg(hw, "NVM Read Error\n");
+ return ret_val;
+ }
+ pba_num[offset * 2] = (u8)(data >> 8);
+ pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
+ }
+ pba_num[offset * 2] = '\0';
return 0;
}
@@ -376,8 +486,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
* Prevent the PCI-E bus from from hanging by disabling PCI-E master
* access and verify no pending requests
*/
- if (ixgbe_disable_pcie_master(hw) != 0)
- hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
+ ixgbe_disable_pcie_master(hw);
return 0;
}
@@ -525,7 +634,6 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
ixgbe_shift_out_eeprom_bits(hw, data, 16);
ixgbe_standby_eeprom(hw);
- msleep(hw->eeprom.semaphore_delay);
/* Done with writing - release the EEPROM */
ixgbe_release_eeprom(hw);
}
@@ -638,7 +746,7 @@ out:
* Polls the status bit (bit 1) of the EERD or EEWR to determine when the
* read or write is done respectively.
**/
-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
{
u32 i;
u32 reg;
@@ -669,10 +777,10 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
{
s32 status = 0;
- u32 eec = 0;
+ u32 eec;
u32 i;
- if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
status = IXGBE_ERR_SWFW_SYNC;
if (status == 0) {
@@ -695,18 +803,18 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
hw_dbg(hw, "Could not acquire EEPROM grant\n");
- ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
status = IXGBE_ERR_EEPROM;
}
- }
- /* Setup EEPROM for Read/Write */
- if (status == 0) {
- /* Clear CS and SK */
- eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
- IXGBE_WRITE_FLUSH(hw);
- udelay(1);
+ /* Setup EEPROM for Read/Write */
+ if (status == 0) {
+ /* Clear CS and SK */
+ eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_FLUSH(hw);
+ udelay(1);
+ }
}
return status;
}
@@ -720,13 +828,10 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
{
s32 status = IXGBE_ERR_EEPROM;
- u32 timeout;
+ u32 timeout = 2000;
u32 i;
u32 swsm;
- /* Set timeout value based on size of EEPROM */
- timeout = hw->eeprom.word_size + 1;
-
/* Get SMBI software semaphore between device drivers first */
for (i = 0; i < timeout; i++) {
/*
@@ -738,7 +843,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
status = 0;
break;
}
- msleep(1);
+ udelay(50);
}
/* Now get the semaphore between SW/FW through the SWESMBI bit */
@@ -766,11 +871,14 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
* was not granted because we don't have access to the EEPROM
*/
if (i >= timeout) {
- hw_dbg(hw, "Driver can't access the Eeprom - Semaphore "
+ hw_dbg(hw, "SWESMBI Software EEPROM semaphore "
"not granted.\n");
ixgbe_release_eeprom_semaphore(hw);
status = IXGBE_ERR_EEPROM;
}
+ } else {
+ hw_dbg(hw, "Software semaphore SMBI between device drivers "
+ "not granted.\n");
}
return status;
@@ -1002,14 +1110,17 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
eec &= ~IXGBE_EEC_REQ;
IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
- ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+ /* Delay before attempt to obtain semaphore again to allow FW access */
+ msleep(hw->eeprom.semaphore_delay);
}
/**
- * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum
+ * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
* @hw: pointer to hardware structure
**/
-static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw)
+u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
@@ -1072,7 +1183,7 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
status = hw->eeprom.ops.read(hw, 0, &checksum);
if (status == 0) {
- checksum = ixgbe_calc_eeprom_checksum(hw);
+ checksum = hw->eeprom.ops.calc_checksum(hw);
hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
@@ -1110,9 +1221,9 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
status = hw->eeprom.ops.read(hw, 0, &checksum);
if (status == 0) {
- checksum = ixgbe_calc_eeprom_checksum(hw);
+ checksum = hw->eeprom.ops.calc_checksum(hw);
status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
- checksum);
+ checksum);
} else {
hw_dbg(hw, "EEPROM read failed\n");
}
@@ -1160,37 +1271,37 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 rar_low, rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
+ /* Make sure we are using a valid rar index range */
+ if (index >= rar_entries) {
+ hw_dbg(hw, "RAR index %d is out of range.\n", index);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
/* setup VMDq pool selection before this RAR gets enabled */
hw->mac.ops.set_vmdq(hw, index, vmdq);
- /* Make sure we are using a valid rar index range */
- if (index < rar_entries) {
- /*
- * HW expects these in little endian so we reverse the byte
- * order from network order (big endian) to little endian
- */
- rar_low = ((u32)addr[0] |
- ((u32)addr[1] << 8) |
- ((u32)addr[2] << 16) |
- ((u32)addr[3] << 24));
- /*
- * Some parts put the VMDq setting in the extra RAH bits,
- * so save everything except the lower 16 bits that hold part
- * of the address and the address valid bit.
- */
- rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
- rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
- rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
+ /*
+ * HW expects these in little endian so we reverse the byte
+ * order from network order (big endian) to little endian
+ */
+ rar_low = ((u32)addr[0] |
+ ((u32)addr[1] << 8) |
+ ((u32)addr[2] << 16) |
+ ((u32)addr[3] << 24));
+ /*
+ * Some parts put the VMDq setting in the extra RAH bits,
+ * so save everything except the lower 16 bits that hold part
+ * of the address and the address valid bit.
+ */
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+ rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+ rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
- if (enable_addr != 0)
- rar_high |= IXGBE_RAH_AV;
+ if (enable_addr != 0)
+ rar_high |= IXGBE_RAH_AV;
- IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
- IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
- } else {
- hw_dbg(hw, "RAR index %d is out of range.\n", index);
- return IXGBE_ERR_RAR_INDEX;
- }
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
return 0;
}
@@ -1208,58 +1319,26 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
u32 rar_entries = hw->mac.num_rar_entries;
/* Make sure we are using a valid rar index range */
- if (index < rar_entries) {
- /*
- * Some parts put the VMDq setting in the extra RAH bits,
- * so save everything except the lower 16 bits that hold part
- * of the address and the address valid bit.
- */
- rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
- rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
-
- IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
- IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
- } else {
+ if (index >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", index);
- return IXGBE_ERR_RAR_INDEX;
+ return IXGBE_ERR_INVALID_ARGUMENT;
}
- /* clear VMDq pool/queue selection for this RAR */
- hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
-
- return 0;
-}
-
-/**
- * ixgbe_enable_rar - Enable Rx address register
- * @hw: pointer to hardware structure
- * @index: index into the RAR table
- *
- * Enables the select receive address register.
- **/
-static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index)
-{
- u32 rar_high;
-
+ /*
+ * Some parts put the VMDq setting in the extra RAH bits,
+ * so save everything except the lower 16 bits that hold part
+ * of the address and the address valid bit.
+ */
rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
- rar_high |= IXGBE_RAH_AV;
+ rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
-}
-/**
- * ixgbe_disable_rar - Disable Rx address register
- * @hw: pointer to hardware structure
- * @index: index into the RAR table
- *
- * Disables the select receive address register.
- **/
-static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index)
-{
- u32 rar_high;
+ /* clear VMDq pool/queue selection for this RAR */
+ hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
- rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
- rar_high &= (~IXGBE_RAH_AV);
- IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+ return 0;
}
/**
@@ -1292,6 +1371,9 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+ /* clear VMDq pool/queue selection for RAR 0 */
+ hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
}
hw->addr_ctrl.overflow_promisc = 0;
@@ -1305,7 +1387,6 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
}
/* Clear the MTA */
- hw->addr_ctrl.mc_addr_in_rar_count = 0;
hw->addr_ctrl.mta_in_use = 0;
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
@@ -1320,105 +1401,6 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
}
/**
- * ixgbe_add_uc_addr - Adds a secondary unicast address.
- * @hw: pointer to hardware structure
- * @addr: new address
- *
- * Adds it to unused receive address register or goes into promiscuous mode.
- **/
-static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
-{
- u32 rar_entries = hw->mac.num_rar_entries;
- u32 rar;
-
- hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
-
- /*
- * Place this address in the RAR if there is room,
- * else put the controller into promiscuous mode
- */
- if (hw->addr_ctrl.rar_used_count < rar_entries) {
- rar = hw->addr_ctrl.rar_used_count -
- hw->addr_ctrl.mc_addr_in_rar_count;
- hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
- hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
- hw->addr_ctrl.rar_used_count++;
- } else {
- hw->addr_ctrl.overflow_promisc++;
- }
-
- hw_dbg(hw, "ixgbe_add_uc_addr Complete\n");
-}
-
-/**
- * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
- * @hw: pointer to hardware structure
- * @netdev: pointer to net device structure
- *
- * The given list replaces any existing list. Clears the secondary addrs from
- * receive address registers. Uses unused receive address registers for the
- * first secondary addresses, and falls back to promiscuous mode as needed.
- *
- * Drivers using secondary unicast addresses must set user_set_promisc when
- * manually putting the device into promiscuous mode.
- **/
-s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
- struct net_device *netdev)
-{
- u32 i;
- u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
- u32 uc_addr_in_use;
- u32 fctrl;
- struct netdev_hw_addr *ha;
-
- /*
- * Clear accounting of old secondary address list,
- * don't count RAR[0]
- */
- uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
- hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
- hw->addr_ctrl.overflow_promisc = 0;
-
- /* Zero out the other receive addresses */
- hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use + 1);
- for (i = 0; i < uc_addr_in_use; i++) {
- IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
- IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
- }
-
- /* Add the new addresses */
- netdev_for_each_uc_addr(ha, netdev) {
- hw_dbg(hw, " Adding the secondary addresses:\n");
- ixgbe_add_uc_addr(hw, ha->addr, 0);
- }
-
- if (hw->addr_ctrl.overflow_promisc) {
- /* enable promisc if not already in overflow or set by user */
- if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
- hw_dbg(hw, " Entering address overflow promisc mode\n");
- fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- fctrl |= IXGBE_FCTRL_UPE;
- IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
- hw->addr_ctrl.uc_set_promisc = true;
- }
- } else {
- /* only disable if set by overflow, not by user */
- if ((old_promisc_setting && hw->addr_ctrl.uc_set_promisc) &&
- !(hw->addr_ctrl.user_set_promisc)) {
- hw_dbg(hw, " Leaving address overflow promisc mode\n");
- fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- fctrl &= ~IXGBE_FCTRL_UPE;
- IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
- hw->addr_ctrl.uc_set_promisc = false;
- }
- }
-
- hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n");
- return 0;
-}
-
-/**
* ixgbe_mta_vector - Determines bit-vector in multicast table to set
* @hw: pointer to hardware structure
* @mc_addr: the multicast address
@@ -1469,7 +1451,6 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
u32 vector;
u32 vector_bit;
u32 vector_reg;
- u32 mta_reg;
hw->addr_ctrl.mta_in_use++;
@@ -1487,9 +1468,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
*/
vector_reg = (vector >> 5) & 0x7F;
vector_bit = vector & 0x1F;
- mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
- mta_reg |= (1 << vector_bit);
- IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
+ hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
}
/**
@@ -1515,18 +1494,21 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
hw->addr_ctrl.mta_in_use = 0;
- /* Clear the MTA */
+ /* Clear mta_shadow */
hw_dbg(hw, " Clearing MTA\n");
- for (i = 0; i < hw->mac.mcft_size; i++)
- IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
- /* Add the new addresses */
+ /* Update mta shadow */
netdev_for_each_mc_addr(ha, netdev) {
hw_dbg(hw, " Adding the multicast addresses:\n");
ixgbe_set_mta(hw, ha->addr);
}
/* Enable mta */
+ for (i = 0; i < hw->mac.mcft_size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
+ hw->mac.mta_shadow[i]);
+
if (hw->addr_ctrl.mta_in_use > 0)
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
@@ -1543,15 +1525,8 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
**/
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
{
- u32 i;
- u32 rar_entries = hw->mac.num_rar_entries;
struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
- if (a->mc_addr_in_rar_count > 0)
- for (i = (rar_entries - a->mc_addr_in_rar_count);
- i < rar_entries; i++)
- ixgbe_enable_rar(hw, i);
-
if (a->mta_in_use > 0)
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
hw->mac.mc_filter_type);
@@ -1567,15 +1542,8 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
**/
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
{
- u32 i;
- u32 rar_entries = hw->mac.num_rar_entries;
struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
- if (a->mc_addr_in_rar_count > 0)
- for (i = (rar_entries - a->mc_addr_in_rar_count);
- i < rar_entries; i++)
- ixgbe_disable_rar(hw, i);
-
if (a->mta_in_use > 0)
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
@@ -1595,6 +1563,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
u32 mflcn_reg, fccfg_reg;
u32 reg;
u32 rx_pba_size;
+ u32 fcrtl, fcrth;
#ifdef CONFIG_DCB
if (hw->fc.requested_mode == ixgbe_fc_pfc)
@@ -1603,7 +1572,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
#endif /* CONFIG_DCB */
/* Negotiate the fc mode to use */
ret_val = ixgbe_fc_autoneg(hw);
- if (ret_val)
+ if (ret_val == IXGBE_ERR_FLOW_CONTROL)
goto out;
/* Disable any previous flow control settings */
@@ -1621,7 +1590,9 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
* 2: Tx flow control is enabled (we can send pause frames but
* we do not support receiving pause frames).
* 3: Both Rx and Tx flow control (symmetric) are enabled.
+#ifdef CONFIG_DCB
* 4: Priority Flow Control is enabled.
+#endif
* other: Invalid.
*/
switch (hw->fc.current_mode) {
@@ -1671,41 +1642,21 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
- reg = IXGBE_READ_REG(hw, IXGBE_MTQC);
- /* Thresholds are different for link flow control when in DCB mode */
- if (reg & IXGBE_MTQC_RT_ENA) {
- rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+ rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+ rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
- /* Always disable XON for LFC when in DCB mode */
- reg = (rx_pba_size >> 5) & 0xFFE0;
- IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg);
+ fcrth = (rx_pba_size - hw->fc.high_water) << 10;
+ fcrtl = (rx_pba_size - hw->fc.low_water) << 10;
- reg = (rx_pba_size >> 2) & 0xFFE0;
- if (hw->fc.current_mode & ixgbe_fc_tx_pause)
- reg |= IXGBE_FCRTH_FCEN;
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), reg);
- } else {
- /*
- * Set up and enable Rx high/low water mark thresholds,
- * enable XON.
- */
- if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
- if (hw->fc.send_xon) {
- IXGBE_WRITE_REG(hw,
- IXGBE_FCRTL_82599(packetbuf_num),
- (hw->fc.low_water |
- IXGBE_FCRTL_XONE));
- } else {
- IXGBE_WRITE_REG(hw,
- IXGBE_FCRTL_82599(packetbuf_num),
- hw->fc.low_water);
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num),
- (hw->fc.high_water | IXGBE_FCRTH_FCEN));
- }
+ if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
+ fcrth |= IXGBE_FCRTH_FCEN;
+ if (hw->fc.send_xon)
+ fcrtl |= IXGBE_FCRTL_XONE;
}
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl);
+
/* Configure pause time (2 TCs per register) */
reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
if ((packetbuf_num & 1) == 0)
@@ -1729,12 +1680,13 @@ out:
**/
s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
{
- s32 ret_val = 0;
+ s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
ixgbe_link_speed speed;
- u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
- u32 links2, anlp1_reg, autoc_reg, links;
bool link_up;
+ if (hw->fc.disable_fc_autoneg)
+ goto out;
+
/*
* AN should have completed when the cable was plugged in.
* Look for reasons to bail out. Bail out if:
@@ -1745,153 +1697,199 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
* So use link_up_wait_to_complete=false.
*/
hw->mac.ops.check_link(hw, &speed, &link_up, false);
-
- if (hw->fc.disable_fc_autoneg || (!link_up)) {
- hw->fc.fc_was_autonegged = false;
- hw->fc.current_mode = hw->fc.requested_mode;
+ if (!link_up) {
+ ret_val = IXGBE_ERR_FLOW_CONTROL;
goto out;
}
- /*
- * On backplane, bail out if
- * - backplane autoneg was not completed, or if
- * - we are 82599 and link partner is not AN enabled
- */
- if (hw->phy.media_type == ixgbe_media_type_backplane) {
- links = IXGBE_READ_REG(hw, IXGBE_LINKS);
- if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
- hw->fc.fc_was_autonegged = false;
- hw->fc.current_mode = hw->fc.requested_mode;
- goto out;
- }
+ switch (hw->phy.media_type) {
+ /* Autoneg flow control on fiber adapters */
+ case ixgbe_media_type_fiber:
+ if (speed == IXGBE_LINK_SPEED_1GB_FULL)
+ ret_val = ixgbe_fc_autoneg_fiber(hw);
+ break;
- if (hw->mac.type == ixgbe_mac_82599EB) {
- links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
- if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
- hw->fc.fc_was_autonegged = false;
- hw->fc.current_mode = hw->fc.requested_mode;
- goto out;
- }
- }
+ /* Autoneg flow control on backplane adapters */
+ case ixgbe_media_type_backplane:
+ ret_val = ixgbe_fc_autoneg_backplane(hw);
+ break;
+
+ /* Autoneg flow control on copper adapters */
+ case ixgbe_media_type_copper:
+ if (ixgbe_device_supports_autoneg_fc(hw) == 0)
+ ret_val = ixgbe_fc_autoneg_copper(hw);
+ break;
+
+ default:
+ break;
+ }
+
+out:
+ if (ret_val == 0) {
+ hw->fc.fc_was_autonegged = true;
+ } else {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
}
+ return ret_val;
+}
+
+/**
+ * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according on 1 gig fiber.
+ **/
+static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
+{
+ u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
+ s32 ret_val;
/*
* On multispeed fiber at 1g, bail out if
* - link is up but AN did not complete, or if
* - link is up and AN completed but timed out
*/
- if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) {
- linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
- if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
- ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
- hw->fc.fc_was_autonegged = false;
- hw->fc.current_mode = hw->fc.requested_mode;
- goto out;
- }
+
+ linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+ if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
+ ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
+ ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
}
+ pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
+
+ ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
+ pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
+ IXGBE_PCS1GANA_ASM_PAUSE,
+ IXGBE_PCS1GANA_SYM_PAUSE,
+ IXGBE_PCS1GANA_ASM_PAUSE);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
+{
+ u32 links2, anlp1_reg, autoc_reg, links;
+ s32 ret_val;
+
/*
- * Bail out on
- * - copper or CX4 adapters
- * - fiber adapters running at 10gig
+ * On backplane, bail out if
+ * - backplane autoneg was not completed, or if
+ * - we are 82599 and link partner is not AN enabled
*/
- if ((hw->phy.media_type == ixgbe_media_type_copper) ||
- (hw->phy.media_type == ixgbe_media_type_cx4) ||
- ((hw->phy.media_type == ixgbe_media_type_fiber) &&
- (speed == IXGBE_LINK_SPEED_10GB_FULL))) {
+ links = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
hw->fc.fc_was_autonegged = false;
hw->fc.current_mode = hw->fc.requested_mode;
+ ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
goto out;
}
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
+ if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+ }
/*
- * Read the AN advertisement and LP ability registers and resolve
+ * Read the 10g AN autoc and LP ability registers and resolve
* local flow control settings accordingly
*/
- if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
- (hw->phy.media_type != ixgbe_media_type_backplane)) {
- pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
- pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
- if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
- /*
- * Now we need to check if the user selected Rx ONLY
- * of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise RX
- * ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
- */
- if (hw->fc.requested_mode == ixgbe_fc_full) {
- hw->fc.current_mode = ixgbe_fc_full;
- hw_dbg(hw, "Flow Control = FULL.\n");
- } else {
- hw->fc.current_mode = ixgbe_fc_rx_pause;
- hw_dbg(hw, "Flow Control=RX PAUSE only\n");
- }
- } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
- hw->fc.current_mode = ixgbe_fc_tx_pause;
- hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
- } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
- !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
- hw->fc.current_mode = ixgbe_fc_rx_pause;
- hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
- } else {
- hw->fc.current_mode = ixgbe_fc_none;
- hw_dbg(hw, "Flow Control = NONE.\n");
- }
- }
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
- if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
+ anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
+ IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
+{
+ u16 technology_ability_reg = 0;
+ u16 lp_technology_ability_reg = 0;
+
+ hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
+ MDIO_MMD_AN,
+ &technology_ability_reg);
+ hw->phy.ops.read_reg(hw, MDIO_AN_LPA,
+ MDIO_MMD_AN,
+ &lp_technology_ability_reg);
+
+ return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
+ (u32)lp_technology_ability_reg,
+ IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
+ IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
+}
+
+/**
+ * ixgbe_negotiate_fc - Negotiate flow control
+ * @hw: pointer to hardware structure
+ * @adv_reg: flow control advertised settings
+ * @lp_reg: link partner's flow control settings
+ * @adv_sym: symmetric pause bit in advertisement
+ * @adv_asm: asymmetric pause bit in advertisement
+ * @lp_sym: symmetric pause bit in link partner advertisement
+ * @lp_asm: asymmetric pause bit in link partner advertisement
+ *
+ * Find the intersection between advertised settings and link partner's
+ * advertised settings
+ **/
+static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
+{
+ if ((!(adv_reg)) || (!(lp_reg)))
+ return IXGBE_ERR_FC_NOT_NEGOTIATED;
+
+ if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
/*
- * Read the 10g AN autoc and LP ability registers and resolve
- * local flow control settings accordingly
+ * Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
*/
- autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
-
- if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
- (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) {
- /*
- * Now we need to check if the user selected Rx ONLY
- * of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise RX
- * ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
- */
- if (hw->fc.requested_mode == ixgbe_fc_full) {
- hw->fc.current_mode = ixgbe_fc_full;
- hw_dbg(hw, "Flow Control = FULL.\n");
- } else {
- hw->fc.current_mode = ixgbe_fc_rx_pause;
- hw_dbg(hw, "Flow Control=RX PAUSE only\n");
- }
- } else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
- (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
- (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
- (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
- hw->fc.current_mode = ixgbe_fc_tx_pause;
- hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
- } else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
- (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
- !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
- (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
- hw->fc.current_mode = ixgbe_fc_rx_pause;
- hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
+ if (hw->fc.requested_mode == ixgbe_fc_full) {
+ hw->fc.current_mode = ixgbe_fc_full;
+ hw_dbg(hw, "Flow Control = FULL.\n");
} else {
- hw->fc.current_mode = ixgbe_fc_none;
- hw_dbg(hw, "Flow Control = NONE.\n");
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
}
+ } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+ (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+ hw->fc.current_mode = ixgbe_fc_tx_pause;
+ hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
+ } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+ !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
+ } else {
+ hw->fc.current_mode = ixgbe_fc_none;
+ hw_dbg(hw, "Flow Control = NONE.\n");
}
- /* Record that current_mode is the result of a successful autoneg */
- hw->fc.fc_was_autonegged = true;
-
-out:
- return ret_val;
+ return 0;
}
/**
@@ -1903,7 +1901,8 @@ out:
static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
{
s32 ret_val = 0;
- u32 reg;
+ u32 reg = 0, reg_bp = 0;
+ u16 reg_cu = 0;
#ifdef CONFIG_DCB
if (hw->fc.requested_mode == ixgbe_fc_pfc) {
@@ -1911,7 +1910,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
goto out;
}
-#endif
+#endif /* CONFIG_DCB */
/* Validate the packetbuf configuration */
if (packetbuf_num < 0 || packetbuf_num > 7) {
hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
@@ -1949,11 +1948,26 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
hw->fc.requested_mode = ixgbe_fc_full;
/*
- * Set up the 1G flow control advertisement registers so the HW will be
- * able to do fc autoneg once the cable is plugged in. If we end up
- * using 10g instead, this is harmless.
+ * Set up the 1G and 10G flow control advertisement registers so the
+ * HW will be able to do fc autoneg once the cable is plugged in. If
+ * we link at 10G, the 1G advertisement is harmless and vice versa.
*/
- reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber:
+ case ixgbe_media_type_backplane:
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ break;
+
+ case ixgbe_media_type_copper:
+ hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
+ MDIO_MMD_AN, &reg_cu);
+ break;
+
+ default:
+ ;
+ }
/*
* The possible values of fc.requested_mode are:
@@ -1972,6 +1986,11 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
case ixgbe_fc_none:
/* Flow control completely disabled by software override. */
reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
+ IXGBE_AUTOC_ASM_PAUSE);
+ else if (hw->phy.media_type == ixgbe_media_type_copper)
+ reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
break;
case ixgbe_fc_rx_pause:
/*
@@ -1983,6 +2002,11 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
* disable the adapter's ability to send PAUSE frames.
*/
reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
+ IXGBE_AUTOC_ASM_PAUSE);
+ else if (hw->phy.media_type == ixgbe_media_type_copper)
+ reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
break;
case ixgbe_fc_tx_pause:
/*
@@ -1991,10 +2015,22 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
*/
reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
+ if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
+ reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
+ } else if (hw->phy.media_type == ixgbe_media_type_copper) {
+ reg_cu |= (IXGBE_TAF_ASM_PAUSE);
+ reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
+ }
break;
case ixgbe_fc_full:
/* Flow control (both Rx and Tx) is enabled by SW override. */
reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+ if (hw->phy.media_type == ixgbe_media_type_backplane)
+ reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
+ IXGBE_AUTOC_ASM_PAUSE);
+ else if (hw->phy.media_type == ixgbe_media_type_copper)
+ reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
break;
#ifdef CONFIG_DCB
case ixgbe_fc_pfc:
@@ -2008,80 +2044,37 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
break;
}
- IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
- reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
-
- /* Disable AN timeout */
- if (hw->fc.strict_ieee)
- reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
+ if (hw->mac.type != ixgbe_mac_X540) {
+ /*
+ * Enable auto-negotiation between the MAC & PHY;
+ * the MAC will advertise clause 37 flow control.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
- IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
- hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+ /* Disable AN timeout */
+ if (hw->fc.strict_ieee)
+ reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
- /*
- * Set up the 10G flow control advertisement registers so the HW
- * can do fc autoneg once the cable is plugged in. If we end up
- * using 1g instead, this is harmless.
- */
- reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
+ hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+ }
/*
- * The possible values of fc.requested_mode are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames,
- * but not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames but
- * we do not support receiving pause frames).
- * 3: Both Rx and Tx flow control (symmetric) are enabled.
- * other: Invalid.
+ * AUTOC restart handles negotiation of 1G and 10G on backplane
+ * and copper. There is no need to set the PCS1GCTL register.
+ *
*/
- switch (hw->fc.requested_mode) {
- case ixgbe_fc_none:
- /* Flow control completely disabled by software override. */
- reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
- break;
- case ixgbe_fc_rx_pause:
- /*
- * Rx Flow control is enabled and Tx Flow control is
- * disabled by software override. Since there really
- * isn't a way to advertise that we are capable of RX
- * Pause ONLY, we will advertise that we support both
- * symmetric and asymmetric Rx PAUSE. Later, we will
- * disable the adapter's ability to send PAUSE frames.
- */
- reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
- break;
- case ixgbe_fc_tx_pause:
- /*
- * Tx Flow control is enabled, and Rx Flow control is
- * disabled by software override.
- */
- reg |= (IXGBE_AUTOC_ASM_PAUSE);
- reg &= ~(IXGBE_AUTOC_SYM_PAUSE);
- break;
- case ixgbe_fc_full:
- /* Flow control (both Rx and Tx) is enabled by SW override. */
- reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
- break;
-#ifdef CONFIG_DCB
- case ixgbe_fc_pfc:
- goto out;
- break;
-#endif /* CONFIG_DCB */
- default:
- hw_dbg(hw, "Flow control param set incorrectly\n");
- ret_val = IXGBE_ERR_CONFIG;
- goto out;
- break;
+ if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ reg_bp |= IXGBE_AUTOC_AN_RESTART;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
+ } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
+ (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
+ hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
+ MDIO_MMD_AN, reg_cu);
}
- /*
- * AUTOC restart handles negotiation of 1G and 10G. There is
- * no need to set the PCS1GCTL register.
- */
- reg |= IXGBE_AUTOC_AN_RESTART;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg);
- hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
+ hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
out:
return ret_val;
}
@@ -2097,10 +2090,16 @@ out:
**/
s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
{
+ struct ixgbe_adapter *adapter = hw->back;
u32 i;
u32 reg_val;
u32 number_of_queues;
- s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+ s32 status = 0;
+ u16 dev_status = 0;
+
+ /* Just jump out if bus mastering is already disabled */
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+ goto out;
/* Disable the receive unit by stopping each queue */
number_of_queues = hw->mac.max_rx_queues;
@@ -2117,13 +2116,43 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
- if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) {
- status = 0;
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+ goto check_device_status;
+ udelay(100);
+ }
+
+ hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
+ status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+
+ /*
+ * Before proceeding, make sure that the PCIe block does not have
+ * transactions pending.
+ */
+check_device_status:
+ for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS,
+ &dev_status);
+ if (!(dev_status & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
break;
- }
udelay(100);
}
+ if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT)
+ hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
+ else
+ goto out;
+
+ /*
+ * Two consecutive resets are required via CTRL.RST per datasheet
+ * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
+ * of this need. The first reset prevents new master requests from
+ * being issued by our device. We then must wait 1usec for any
+ * remaining completions from the PCIe bus to trickle in, and then reset
+ * again to clear out any effects they may have had on our device.
+ */
+ hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+
+out:
return status;
}
@@ -2133,7 +2162,7 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
* @mask: Mask to specify which semaphore to acquire
*
- * Acquires the SWFW semaphore thought the GSSR register for the specified
+ * Acquires the SWFW semaphore through the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -2144,6 +2173,10 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
s32 timeout = 200;
while (timeout) {
+ /*
+ * SW EEPROM semaphore bit is used for access to all
+ * SW_FW_SYNC/GSSR bits (not just EEPROM)
+ */
if (ixgbe_get_eeprom_semaphore(hw))
return IXGBE_ERR_SWFW_SYNC;
@@ -2161,7 +2194,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
}
if (!timeout) {
- hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n");
+ hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n");
return IXGBE_ERR_SWFW_SYNC;
}
@@ -2177,7 +2210,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
* @hw: pointer to hardware structure
* @mask: Mask to specify which semaphore to release
*
- * Releases the SWFW semaphore thought the GSSR register for the specified
+ * Releases the SWFW semaphore through the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -2365,37 +2398,38 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
u32 mpsar_lo, mpsar_hi;
u32 rar_entries = hw->mac.num_rar_entries;
- if (rar < rar_entries) {
- mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
- mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
+ hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
- if (!mpsar_lo && !mpsar_hi)
- goto done;
+ mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+ mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
- if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
- if (mpsar_lo) {
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
- mpsar_lo = 0;
- }
- if (mpsar_hi) {
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
- mpsar_hi = 0;
- }
- } else if (vmdq < 32) {
- mpsar_lo &= ~(1 << vmdq);
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
- } else {
- mpsar_hi &= ~(1 << (vmdq - 32));
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
- }
+ if (!mpsar_lo && !mpsar_hi)
+ goto done;
- /* was that the last pool using this rar? */
- if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
- hw->mac.ops.clear_rar(hw, rar);
+ if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
+ if (mpsar_lo) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+ mpsar_lo = 0;
+ }
+ if (mpsar_hi) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+ mpsar_hi = 0;
+ }
+ } else if (vmdq < 32) {
+ mpsar_lo &= ~(1 << vmdq);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
} else {
- hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+ mpsar_hi &= ~(1 << (vmdq - 32));
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
}
+ /* was that the last pool using this rar? */
+ if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+ hw->mac.ops.clear_rar(hw, rar);
done:
return 0;
}
@@ -2411,18 +2445,20 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
u32 mpsar;
u32 rar_entries = hw->mac.num_rar_entries;
- if (rar < rar_entries) {
- if (vmdq < 32) {
- mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
- mpsar |= 1 << vmdq;
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
- } else {
- mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
- mpsar |= 1 << (vmdq - 32);
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
- }
- } else {
+ /* Make sure we are using a valid rar index range */
+ if (rar >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", rar);
+ return IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ if (vmdq < 32) {
+ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+ mpsar |= 1 << vmdq;
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
+ } else {
+ mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+ mpsar |= 1 << (vmdq - 32);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
}
return 0;
}
@@ -2435,7 +2471,6 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
{
int i;
-
for (i = 0; i < 128; i++)
IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
@@ -2664,12 +2699,21 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
* Reads the links register to determine if link is up and the current speed
**/
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
- bool *link_up, bool link_up_wait_to_complete)
+ bool *link_up, bool link_up_wait_to_complete)
{
- u32 links_reg;
+ u32 links_reg, links_orig;
u32 i;
+ /* clear the old state */
+ links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
+
links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+
+ if (links_orig != links_reg) {
+ hw_dbg(hw, "LINKS changed from %08X to %08X\n",
+ links_orig, links_reg);
+ }
+
if (link_up_wait_to_complete) {
for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
if (links_reg & IXGBE_LINKS_UP) {
@@ -2692,10 +2736,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
IXGBE_LINKS_SPEED_10G_82599)
*speed = IXGBE_LINK_SPEED_10GB_FULL;
else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
- IXGBE_LINKS_SPEED_1G_82599)
+ IXGBE_LINKS_SPEED_1G_82599)
*speed = IXGBE_LINK_SPEED_1GB_FULL;
- else
+ else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+ IXGBE_LINKS_SPEED_100_82599)
*speed = IXGBE_LINK_SPEED_100_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
/* if link is down, zero out the current_mode */
if (*link_up == false) {
@@ -2705,3 +2752,134 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
return 0;
}
+
+/**
+ * ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from
+ * the EEPROM
+ * @hw: pointer to hardware structure
+ * @wwnn_prefix: the alternative WWNN prefix
+ * @wwpn_prefix: the alternative WWPN prefix
+ *
+ * This function will read the EEPROM from the alternative SAN MAC address
+ * block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix)
+{
+ u16 offset, caps;
+ u16 alt_san_mac_blk_offset;
+
+ /* clear output first */
+ *wwnn_prefix = 0xFFFF;
+ *wwpn_prefix = 0xFFFF;
+
+ /* check if alternative SAN MAC is supported */
+ hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
+ &alt_san_mac_blk_offset);
+
+ if ((alt_san_mac_blk_offset == 0) ||
+ (alt_san_mac_blk_offset == 0xFFFF))
+ goto wwn_prefix_out;
+
+ /* check capability in alternative san mac address block */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
+ hw->eeprom.ops.read(hw, offset, &caps);
+ if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
+ goto wwn_prefix_out;
+
+ /* get the corresponding prefix for WWNN/WWPN */
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
+ hw->eeprom.ops.read(hw, offset, wwnn_prefix);
+
+ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
+ hw->eeprom.ops.read(hw, offset, wwpn_prefix);
+
+wwn_prefix_out:
+ return 0;
+}
+
+/**
+ * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
+ * control
+ * @hw: pointer to hardware structure
+ *
+ * There are several phys that do not support autoneg flow control. This
+ * function check the device id to see if the associated phy supports
+ * autoneg flow control.
+ **/
+static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+{
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X540T:
+ return 0;
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ return 0;
+ default:
+ return IXGBE_ERR_FC_NOT_SUPPORTED;
+ }
+}
+
+/**
+ * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable switch for anti-spoofing
+ * @pf: Physical Function pool - do not enable anti-spoofing for the PF
+ *
+ **/
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
+{
+ int j;
+ int pf_target_reg = pf >> 3;
+ int pf_target_shift = pf % 8;
+ u32 pfvfspoof = 0;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ if (enable)
+ pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
+
+ /*
+ * PFVFSPOOF register array is size 8 with 8 bits assigned to
+ * MAC anti-spoof enables in each register array element.
+ */
+ for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
+
+ /* If not enabling anti-spoofing then done */
+ if (!enable)
+ return;
+
+ /*
+ * The PF should be allowed to spoof so that it can support
+ * emulation mode NICs. Reset the bit assigned to the PF
+ */
+ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg));
+ pfvfspoof ^= (1 << pf_target_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof);
+}
+
+/**
+ * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
+ * @hw: pointer to hardware structure
+ * @enable: enable or disable switch for VLAN anti-spoofing
+ * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
+ *
+ **/
+void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
+{
+ int vf_target_reg = vf >> 3;
+ int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
+ u32 pfvfspoof;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
+ if (enable)
+ pfvfspoof |= (1 << vf_target_shift);
+ else
+ pfvfspoof &= ~(1 << vf_target_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
+}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 424c223437dc..508f635fc2ca 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -29,13 +29,15 @@
#define _IXGBE_COMMON_H_
#include "ixgbe_type.h"
+#include "ixgbe.h"
u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
-s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
+s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
@@ -49,9 +51,11 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data);
+u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
@@ -59,8 +63,6 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
struct net_device *netdev);
-s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
- struct net_device *netdev);
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
@@ -81,9 +83,12 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete);
-
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix);
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
+void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
@@ -104,9 +109,8 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
-extern struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw);
#define hw_dbg(hw, format, arg...) \
- netdev_dbg(ixgbe_get_hw_dev(hw), format, ##arg)
+ netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg)
#define e_dev_info(format, arg...) \
dev_info(&adapter->pdev->dev, format, ## arg)
#define e_dev_warn(format, arg...) \
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index 0d44c6470ca3..41c529fac0ab 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -34,6 +34,42 @@
#include "ixgbe_dcb_82599.h"
/**
+ * ixgbe_ieee_credits - This calculates the ieee traffic class
+ * credits from the configured bandwidth percentages. Credits
+ * are the smallest unit programable into the underlying
+ * hardware. The IEEE 802.1Qaz specification do not use bandwidth
+ * groups so this is much simplified from the CEE case.
+ */
+s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame)
+{
+ int min_percent = 100;
+ int min_credit, multiplier;
+ int i;
+
+ min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
+ DCB_CREDIT_QUANTUM;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ if (bw[i] < min_percent && bw[i])
+ min_percent = bw[i];
+ }
+
+ multiplier = (min_credit / min_percent) + 1;
+
+ /* Find out the hw credits for each TC */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL);
+
+ if (val < min_credit)
+ val = min_credit;
+ refill[i] = val;
+
+ max[i] = bw[i] ? (bw[i] * MAX_CREDIT)/100 : min_credit;
+ }
+ return 0;
+}
+
+/**
* ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
* @ixgbe_dcb_config: Struct containing DCB settings.
* @direction: Configuring either Tx or Rx.
@@ -42,7 +78,8 @@
* It should be called only after the rules are checked by
* ixgbe_dcb_check_config().
*/
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
+s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
+ struct ixgbe_dcb_config *dcb_config,
int max_frame, u8 direction)
{
struct tc_bw_alloc *p;
@@ -124,7 +161,8 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
* credit may not be enough to send out a TSO
* packet in descriptor plane arbitration.
*/
- if (credit_max &&
+ if ((hw->mac.type == ixgbe_mac_82598EB) &&
+ credit_max &&
(credit_max < MINIMUM_CREDIT_FOR_TSO))
credit_max = MINIMUM_CREDIT_FOR_TSO;
@@ -139,6 +177,59 @@ out:
return ret_val;
}
+void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
+{
+ int i;
+
+ *pfc_en = 0;
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+ *pfc_en |= (cfg->tc_config[i].dcb_pfc & 0xF) << i;
+}
+
+void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction,
+ u16 *refill)
+{
+ struct tc_bw_alloc *p;
+ int i;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ p = &cfg->tc_config[i].path[direction];
+ refill[i] = p->data_credits_refill;
+ }
+}
+
+void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max)
+{
+ int i;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+ max[i] = cfg->tc_config[i].desc_credits_max;
+}
+
+void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction,
+ u8 *bwgid)
+{
+ struct tc_bw_alloc *p;
+ int i;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ p = &cfg->tc_config[i].path[direction];
+ bwgid[i] = p->bwg_id;
+ }
+}
+
+void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
+ u8 *ptype)
+{
+ struct tc_bw_alloc *p;
+ int i;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ p = &cfg->tc_config[i].path[direction];
+ ptype[i] = p->prio_type;
+ }
+}
+
/**
* ixgbe_dcb_hw_config - Config and enable DCB
* @hw: pointer to hardware structure
@@ -150,10 +241,82 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
s32 ret = 0;
- if (hw->mac.type == ixgbe_mac_82598EB)
- ret = ixgbe_dcb_hw_config_82598(hw, dcb_config);
- else if (hw->mac.type == ixgbe_mac_82599EB)
- ret = ixgbe_dcb_hw_config_82599(hw, dcb_config);
+ u8 pfc_en;
+ u8 ptype[MAX_TRAFFIC_CLASS];
+ u8 bwgid[MAX_TRAFFIC_CLASS];
+ u16 refill[MAX_TRAFFIC_CLASS];
+ u16 max[MAX_TRAFFIC_CLASS];
+ /* CEE does not define a priority to tc mapping so map 1:1 */
+ u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7};
+
+ /* Unpack CEE standard containers */
+ ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en);
+ ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill);
+ ixgbe_dcb_unpack_max(dcb_config, max);
+ ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid);
+ ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->rx_pba_cfg,
+ pfc_en, refill, max, bwgid,
+ ptype);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->rx_pba_cfg,
+ pfc_en, refill, max, bwgid,
+ ptype, prio_tc);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+/* Helper routines to abstract HW specifics from DCB netlink ops */
+s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en)
+{
+ int ret = -EINVAL;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en);
+ break;
+ default:
+ break;
+ }
return ret;
}
+s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
+ u16 *refill, u16 *max, u8 *bwg_id,
+ u8 *prio_type, u8 *prio_tc)
+{
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max,
+ prio_type);
+ ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
+ bwg_id, prio_type);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
+ bwg_id, prio_type, prio_tc);
+ ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
+ prio_type, prio_tc);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 0208a87b129e..944838fc7b59 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -139,7 +139,6 @@ struct ixgbe_dcb_config {
struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
bool pfc_mode_enable;
- bool round_robin_enable;
enum dcb_rx_pba_cfg rx_pba_cfg;
@@ -148,11 +147,21 @@ struct ixgbe_dcb_config {
};
/* DCB driver APIs */
+void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en);
+void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *);
+void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *);
+void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *);
+void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);
/* DCB credits calculation */
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, int, u8);
+s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame);
+s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
+ struct ixgbe_dcb_config *, int, u8);
/* DCB hw initialization */
+s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
+ u8 *bwg_id, u8 *prio_type, u8 *tc_prio);
+s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en);
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
/* DCB definitions for credit calculation */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index 50288bcadc59..1bc57e52cee3 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -38,15 +38,14 @@
*
* Configure packet buffers for DCB mode.
*/
-static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, u8 rx_pba)
{
s32 ret_val = 0;
u32 value = IXGBE_RXPBSIZE_64KB;
u8 i = 0;
/* Setup Rx packet buffer sizes */
- switch (dcb_config->rx_pba_cfg) {
+ switch (rx_pba) {
case pba_80_48:
/* Setup the first four at 80KB */
value = IXGBE_RXPBSIZE_80KB;
@@ -78,10 +77,11 @@ static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
*
* Configure Rx Data Arbiter and credits for each traffic class.
*/
-static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *prio_type)
{
- struct tc_bw_alloc *p;
u32 reg = 0;
u32 credit_refill = 0;
u32 credit_max = 0;
@@ -102,13 +102,12 @@ static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
- credit_refill = p->data_credits_refill;
- credit_max = p->data_credits_max;
+ credit_refill = refill[i];
+ credit_max = max[i];
reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
- if (p->prio_type == prio_link)
+ if (prio_type[i] == prio_link)
reg |= IXGBE_RT2CR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
@@ -135,10 +134,12 @@ static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
-static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type)
{
- struct tc_bw_alloc *p;
u32 reg, max_credits;
u8 i;
@@ -146,10 +147,8 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
/* Enable arbiter */
reg &= ~IXGBE_DPMCS_ARBDIS;
- if (!(dcb_config->round_robin_enable)) {
- /* Enable DFP and Recycle mode */
- reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
- }
+ /* Enable DFP and Recycle mode */
+ reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
reg |= IXGBE_DPMCS_TSOEF;
/* Configure Max TSO packet size 34KB including payload and headers */
reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
@@ -158,16 +157,15 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
- max_credits = dcb_config->tc_config[i].desc_credits_max;
+ max_credits = max[i];
reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
- reg |= p->data_credits_refill;
- reg |= (u32)(p->bwg_id) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
+ reg |= refill[i];
+ reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
- if (p->prio_type == prio_group)
+ if (prio_type[i] == prio_group)
reg |= IXGBE_TDTQ2TCCR_GSP;
- if (p->prio_type == prio_link)
+ if (prio_type[i] == prio_link)
reg |= IXGBE_TDTQ2TCCR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
@@ -183,10 +181,12 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Tx Data Arbiter and credits for each traffic class.
*/
-static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type)
{
- struct tc_bw_alloc *p;
u32 reg;
u8 i;
@@ -200,15 +200,14 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
- reg = p->data_credits_refill;
- reg |= (u32)(p->data_credits_max) << IXGBE_TDPT2TCCR_MCL_SHIFT;
- reg |= (u32)(p->bwg_id) << IXGBE_TDPT2TCCR_BWG_SHIFT;
+ reg = refill[i];
+ reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
+ reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
- if (p->prio_type == prio_group)
+ if (prio_type[i] == prio_group)
reg |= IXGBE_TDPT2TCCR_GSP;
- if (p->prio_type == prio_link)
+ if (prio_type[i] == prio_link)
reg |= IXGBE_TDPT2TCCR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
@@ -229,63 +228,57 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Priority Flow Control for each traffic class.
*/
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
{
u32 reg, rx_pba_size;
u8 i;
- if (!dcb_config->pfc_mode_enable)
- goto out;
-
- /* Enable Transmit Priority Flow Control */
- reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
- reg &= ~IXGBE_RMCS_TFCE_802_3X;
- /* correct the reporting of our flow control status */
- reg |= IXGBE_RMCS_TFCE_PRIORITY;
- IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
-
- /* Enable Receive Priority Flow Control */
- reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- reg &= ~IXGBE_FCTRL_RFCE;
- reg |= IXGBE_FCTRL_RPFCE;
- IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
+ if (pfc_en) {
+ /* Enable Transmit Priority Flow Control */
+ reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+ reg &= ~IXGBE_RMCS_TFCE_802_3X;
+ /* correct the reporting of our flow control status */
+ reg |= IXGBE_RMCS_TFCE_PRIORITY;
+ IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
+
+ /* Enable Receive Priority Flow Control */
+ reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ reg &= ~IXGBE_FCTRL_RFCE;
+ reg |= IXGBE_FCTRL_RPFCE;
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
+
+ /* Configure pause time */
+ for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
+
+ /* Configure flow control refresh threshold value */
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
+ }
/*
* Configure flow control thresholds and enable priority flow control
* for each traffic class.
*/
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- if (dcb_config->rx_pba_cfg == pba_equal) {
- rx_pba_size = IXGBE_RXPBSIZE_64KB;
- } else {
- rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
- : IXGBE_RXPBSIZE_48KB;
- }
+ int enabled = pfc_en & (1 << i);
+ rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+ rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
+ reg = (rx_pba_size - hw->fc.low_water) << 10;
- reg = ((rx_pba_size >> 5) & 0xFFF0);
- if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
- dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
+ if (enabled == pfc_enabled_tx ||
+ enabled == pfc_enabled_full)
reg |= IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
- reg = ((rx_pba_size >> 2) & 0xFFF0);
- if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
- dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
+ reg = (rx_pba_size - hw->fc.high_water) << 10;
+ if (enabled == pfc_enabled_tx ||
+ enabled == pfc_enabled_full)
reg |= IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
}
- /* Configure pause time */
- for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
- IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
-
- /* Configure flow control refresh threshold value */
- IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
-
-out:
return 0;
}
@@ -296,7 +289,7 @@ out:
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
-static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
+s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
@@ -329,13 +322,16 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ u8 rx_pba, u8 pfc_en, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type)
{
- ixgbe_dcb_config_packet_buffers_82598(hw, dcb_config);
- ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
- ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
- ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config);
- ixgbe_dcb_config_pfc_82598(hw, dcb_config);
+ ixgbe_dcb_config_packet_buffers_82598(hw, rx_pba);
+ ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
+ ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_pfc_82598(hw, pfc_en);
ixgbe_dcb_config_tc_stats_82598(hw);
return 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h
index abc03ccfa088..1e9750c2b46b 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -71,9 +71,28 @@
/* DCB hardware-specific driver APIs */
/* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
/* DCB hw initialization */
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type);
+
+s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
+ u8 rx_pba, u8 pfc_en, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type);
#endif /* _DCB_82598_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 05f224715073..025af8c53ddb 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -33,19 +33,18 @@
/**
* ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @rx_pba: method to distribute packet buffer
*
* Configure packet buffers for DCB mode.
*/
-static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba)
{
s32 ret_val = 0;
u32 value = IXGBE_RXPBSIZE_64KB;
u8 i = 0;
/* Setup Rx packet buffer sizes */
- switch (dcb_config->rx_pba_cfg) {
+ switch (rx_pba) {
case pba_80_48:
/* Setup the first four at 80KB */
value = IXGBE_RXPBSIZE_80KB;
@@ -75,14 +74,20 @@ static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure Rx Packet Arbiter and credits for each traffic class.
*/
-static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type,
+ u8 *prio_tc)
{
- struct tc_bw_alloc *p;
u32 reg = 0;
u32 credit_refill = 0;
u32 credit_max = 0;
@@ -98,20 +103,18 @@ static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
/* Map all traffic classes to their UP, 1 to 1 */
reg = 0;
for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
- reg |= (i << (i * IXGBE_RTRUP2TC_UP_SHIFT));
+ reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
-
- credit_refill = p->data_credits_refill;
- credit_max = p->data_credits_max;
+ credit_refill = refill[i];
+ credit_max = max[i];
reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
- reg |= (u32)(p->bwg_id) << IXGBE_RTRPT4C_BWG_SHIFT;
+ reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
- if (p->prio_type == prio_link)
+ if (prio_type[i] == prio_link)
reg |= IXGBE_RTRPT4C_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
@@ -130,14 +133,19 @@ static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
-static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type)
{
- struct tc_bw_alloc *p;
u32 reg, max_credits;
u8 i;
@@ -149,16 +157,15 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
- max_credits = dcb_config->tc_config[i].desc_credits_max;
+ max_credits = max[i];
reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
- reg |= p->data_credits_refill;
- reg |= (u32)(p->bwg_id) << IXGBE_RTTDT2C_BWG_SHIFT;
+ reg |= refill[i];
+ reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
- if (p->prio_type == prio_group)
+ if (prio_type[i] == prio_group)
reg |= IXGBE_RTTDT2C_GSP;
- if (p->prio_type == prio_link)
+ if (prio_type[i] == prio_link)
reg |= IXGBE_RTTDT2C_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
@@ -177,14 +184,20 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure Tx Packet Arbiter and credits for each traffic class.
*/
-static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type,
+ u8 *prio_tc)
{
- struct tc_bw_alloc *p;
u32 reg;
u8 i;
@@ -200,20 +213,19 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
/* Map all traffic classes to their UP, 1 to 1 */
reg = 0;
for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
- reg |= (i << (i * IXGBE_RTTUP2TC_UP_SHIFT));
+ reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
- reg = p->data_credits_refill;
- reg |= (u32)(p->data_credits_max) << IXGBE_RTTPT2C_MCL_SHIFT;
- reg |= (u32)(p->bwg_id) << IXGBE_RTTPT2C_BWG_SHIFT;
+ reg = refill[i];
+ reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
+ reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
- if (p->prio_type == prio_group)
+ if (prio_type[i] == prio_group)
reg |= IXGBE_RTTPT2C_GSP;
- if (p->prio_type == prio_link)
+ if (prio_type[i] == prio_link)
reg |= IXGBE_RTTPT2C_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
@@ -233,65 +245,59 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_pfc_82599 - Configure priority flow control
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @pfc_en: enabled pfc bitmask
*
* Configure Priority Flow Control (PFC) for each traffic class.
*/
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en)
{
u32 i, reg, rx_pba_size;
- /* If PFC is disabled globally then fall back to LFC. */
- if (!dcb_config->pfc_mode_enable) {
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
- hw->mac.ops.fc_enable(hw, i);
- goto out;
- }
-
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- if (dcb_config->rx_pba_cfg == pba_equal)
- rx_pba_size = IXGBE_RXPBSIZE_64KB;
- else
- rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
- : IXGBE_RXPBSIZE_48KB;
+ int enabled = pfc_en & (1 << i);
+ rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+ rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
+
+ reg = (rx_pba_size - hw->fc.low_water) << 10;
- reg = ((rx_pba_size >> 5) & 0xFFE0);
- if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
- dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
+ if (enabled)
reg |= IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
- reg = ((rx_pba_size >> 2) & 0xFFE0);
- if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
- dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
+ reg = (rx_pba_size - hw->fc.high_water) << 10;
+ if (enabled)
reg |= IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
}
- /* Configure pause time (2 TCs per register) */
- reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
- for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
- IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
-
- /* Configure flow control refresh threshold value */
- IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
-
- /* Enable Transmit PFC */
- reg = IXGBE_FCCFG_TFCE_PRIORITY;
- IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
+ if (pfc_en) {
+ /* Configure pause time (2 TCs per register) */
+ reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
+ for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+ /* Configure flow control refresh threshold value */
+ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+
+ reg = IXGBE_FCCFG_TFCE_PRIORITY;
+ IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
+ /*
+ * Enable Receive PFC
+ * We will always honor XOFF frames we receive when
+ * we are in PFC mode.
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+ reg &= ~IXGBE_MFLCN_RFCE;
+ reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
+ IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
+
+ } else {
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+ hw->mac.ops.fc_enable(hw, i);
+ }
- /*
- * Enable Receive PFC
- * We will always honor XOFF frames we receive when
- * we are in PFC mode.
- */
- reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
- reg &= ~IXGBE_MFLCN_RFCE;
- reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
- IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
-out:
return 0;
}
@@ -351,7 +357,6 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
/**
* ixgbe_dcb_config_82599 - Configure general DCB parameters
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure general DCB parameters.
*/
@@ -408,19 +413,28 @@ static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
/**
* ixgbe_dcb_hw_config_82599 - Configure and enable DCB
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @rx_pba: method to distribute packet buffer
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
+ * @pfc_en: enabled pfc bitmask
*
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ u8 rx_pba, u8 pfc_en, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc)
{
- ixgbe_dcb_config_packet_buffers_82599(hw, dcb_config);
+ ixgbe_dcb_config_packet_buffers_82599(hw, rx_pba);
ixgbe_dcb_config_82599(hw);
- ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config);
- ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config);
- ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config);
- ixgbe_dcb_config_pfc_82599(hw, dcb_config);
+ ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
+ prio_type, prio_tc);
+ ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
+ bwg_id, prio_type, prio_tc);
+ ixgbe_dcb_config_pfc_82599(hw, pfc_en);
ixgbe_dcb_config_tc_stats_82599(hw);
return 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 3841649fb954..148fd8b477a9 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -102,11 +102,32 @@
/* DCB hardware-specific driver APIs */
/* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config);
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en);
/* DCB hw initialization */
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type,
+ u8 *prio_tc);
+
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type,
+ u8 *prio_tc);
+
s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *config);
+ u8 rx_pba, u8 pfc_en, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type,
+ u8 *prio_tc);
#endif /* _DCB_82599_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index b53b465e24af..fec4c724c37a 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -37,7 +37,6 @@
#define BIT_PG_RX 0x04
#define BIT_PG_TX 0x08
#define BIT_APP_UPCHG 0x10
-#define BIT_RESETLINK 0x40
#define BIT_LINKSPEED 0x80
/* Responses for the DCB_C_SET_ALL command */
@@ -130,16 +129,24 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
netdev->netdev_ops->ndo_stop(netdev);
ixgbe_clear_interrupt_scheme(adapter);
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
adapter->last_lfc_mode = adapter->hw.fc.current_mode;
adapter->hw.fc.requested_mode = ixgbe_fc_none;
- }
- adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ break;
+ default:
+ break;
}
+
adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
+ if (!netdev_get_num_tc(netdev))
+ ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS);
+
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev);
@@ -154,9 +161,16 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
adapter->temp_dcb_cfg.pfc_mode_enable = false;
adapter->dcb_cfg.pfc_mode_enable = false;
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
- adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ break;
+ default:
+ break;
+ }
+
+ ixgbe_setup_tc(netdev, 0);
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
@@ -178,9 +192,14 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
for (i = 0; i < netdev->addr_len; i++)
perm_addr[i] = adapter->hw.mac.perm_addr[i];
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
for (j = 0; j < netdev->addr_len; j++, i++)
perm_addr[i] = adapter->hw.mac.san_addr[j];
+ break;
+ default:
+ break;
}
}
@@ -208,10 +227,8 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
(adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
(adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
- adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) {
+ adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
adapter->dcb_set_bitmap |= BIT_PG_TX;
- adapter->dcb_set_bitmap |= BIT_RESETLINK;
- }
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -222,10 +239,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
- adapter->dcb_cfg.bw_percentage[0][bwg_id]) {
+ adapter->dcb_cfg.bw_percentage[0][bwg_id])
adapter->dcb_set_bitmap |= BIT_PG_TX;
- adapter->dcb_set_bitmap |= BIT_RESETLINK;
- }
}
static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
@@ -252,10 +267,8 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
(adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
(adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
- adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) {
+ adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
adapter->dcb_set_bitmap |= BIT_PG_RX;
- adapter->dcb_set_bitmap |= BIT_RESETLINK;
- }
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
@@ -266,10 +279,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
- adapter->dcb_cfg.bw_percentage[1][bwg_id]) {
+ adapter->dcb_cfg.bw_percentage[1][bwg_id])
adapter->dcb_set_bitmap |= BIT_PG_RX;
- adapter->dcb_set_bitmap |= BIT_RESETLINK;
- }
}
static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
@@ -338,68 +349,103 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int ret;
- if (!adapter->dcb_set_bitmap)
+ if (!adapter->dcb_set_bitmap ||
+ !(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return DCB_NO_HW_CHG;
ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
- adapter->ring_feature[RING_F_DCB].indices);
+ MAX_TRAFFIC_CLASS);
if (ret)
return DCB_NO_HW_CHG;
/*
- * Only take down the adapter if the configuration change
- * requires a reset.
+ * Only take down the adapter if an app change occured. FCoE
+ * may shuffle tx rings in this case and this can not be done
+ * without a reset currently.
*/
- if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
+ if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
msleep(1);
- if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
- if (netif_running(netdev))
- netdev->netdev_ops->ndo_stop(netdev);
- ixgbe_clear_interrupt_scheme(adapter);
- } else {
- if (netif_running(netdev))
- ixgbe_down(adapter);
- }
+ if (netif_running(netdev))
+ netdev->netdev_ops->ndo_stop(netdev);
+ ixgbe_clear_interrupt_scheme(adapter);
}
if (adapter->dcb_cfg.pfc_mode_enable) {
- if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
- (adapter->hw.fc.current_mode != ixgbe_fc_pfc))
- adapter->last_lfc_mode = adapter->hw.fc.current_mode;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ if (adapter->hw.fc.current_mode != ixgbe_fc_pfc)
+ adapter->last_lfc_mode =
+ adapter->hw.fc.current_mode;
+ break;
+ default:
+ break;
+ }
adapter->hw.fc.requested_mode = ixgbe_fc_pfc;
} else {
- if (adapter->hw.mac.type != ixgbe_mac_82598EB)
- adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
- else
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
adapter->hw.fc.requested_mode = ixgbe_fc_none;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
+ break;
+ default:
+ break;
+ }
}
- if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
- if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
- ixgbe_init_interrupt_scheme(adapter);
- if (netif_running(netdev))
- netdev->netdev_ops->ndo_open(netdev);
- } else {
- if (netif_running(netdev))
- ixgbe_up(adapter);
- }
+ if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
+ ixgbe_init_interrupt_scheme(adapter);
+ if (netif_running(netdev))
+ netdev->netdev_ops->ndo_open(netdev);
ret = DCB_HW_CHG_RST;
- } else if (adapter->dcb_set_bitmap & BIT_PFC) {
- if (adapter->hw.mac.type == ixgbe_mac_82598EB)
- ixgbe_dcb_config_pfc_82598(&adapter->hw,
- &adapter->dcb_cfg);
- else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
- ixgbe_dcb_config_pfc_82599(&adapter->hw,
- &adapter->dcb_cfg);
+ }
+
+ if (adapter->dcb_set_bitmap & BIT_PFC) {
+ u8 pfc_en;
+ ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
+ ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en);
ret = DCB_HW_CHG;
}
+
+ if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
+ u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
+ u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
+ /* Priority to TC mapping in CEE case default to 1:1 */
+ u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7};
+ int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+
+#ifdef CONFIG_FCOE
+ if (adapter->netdev->features & NETIF_F_FCOE_MTU)
+ max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
+#endif
+
+ ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
+ max_frame, DCB_TX_CONFIG);
+ ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
+ max_frame, DCB_RX_CONFIG);
+
+ ixgbe_dcb_unpack_refill(&adapter->dcb_cfg,
+ DCB_TX_CONFIG, refill);
+ ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max);
+ ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg,
+ DCB_TX_CONFIG, bwg_id);
+ ixgbe_dcb_unpack_prio(&adapter->dcb_cfg,
+ DCB_TX_CONFIG, prio_type);
+
+ ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
+ bwg_id, prio_type, prio_tc);
+ }
+
if (adapter->dcb_cfg.pfc_mode_enable)
adapter->hw.fc.current_mode = ixgbe_fc_pfc;
- if (adapter->dcb_set_bitmap & BIT_RESETLINK)
+ if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
clear_bit(__IXGBE_RESETTING, &adapter->state);
adapter->dcb_set_bitmap = 0x00;
return ret;
@@ -408,40 +454,38 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u8 rval = 0;
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- switch (capid) {
- case DCB_CAP_ATTR_PG:
- *cap = true;
- break;
- case DCB_CAP_ATTR_PFC:
- *cap = true;
- break;
- case DCB_CAP_ATTR_UP2TC:
- *cap = false;
- break;
- case DCB_CAP_ATTR_PG_TCS:
- *cap = 0x80;
- break;
- case DCB_CAP_ATTR_PFC_TCS:
- *cap = 0x80;
- break;
- case DCB_CAP_ATTR_GSP:
- *cap = true;
- break;
- case DCB_CAP_ATTR_BCN:
- *cap = false;
- break;
- default:
- rval = -EINVAL;
- break;
- }
- } else {
- rval = -EINVAL;
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PFC:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_UP2TC:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ *cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_GSP:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_BCN:
+ *cap = false;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = adapter->dcbx_cap;
+ break;
+ default:
+ *cap = false;
+ break;
}
- return rval;
+ return 0;
}
static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
@@ -502,21 +546,16 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
*/
static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
{
- u8 rval = 0;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct dcb_app app = {
+ .selector = idtype,
+ .protocol = id,
+ };
- switch (idtype) {
- case DCB_APP_IDTYPE_ETHTYPE:
-#ifdef IXGBE_FCOE
- if (id == ETH_P_FCOE)
- rval = ixgbe_fcoe_getapp(netdev_priv(netdev));
-#endif
- break;
- case DCB_APP_IDTYPE_PORTNUM:
- break;
- default:
- break;
- }
- return rval;
+ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return 0;
+
+ return dcb_getapp(netdev, &app);
}
/**
@@ -531,24 +570,45 @@ static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
u8 idtype, u16 id, u8 up)
{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
u8 rval = 1;
+ struct dcb_app app = {
+ .selector = idtype,
+ .protocol = id,
+ .priority = up
+ };
+
+ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return rval;
+
+ rval = dcb_setapp(netdev, &app);
switch (idtype) {
case DCB_APP_IDTYPE_ETHTYPE:
#ifdef IXGBE_FCOE
if (id == ETH_P_FCOE) {
- u8 tc;
- struct ixgbe_adapter *adapter;
+ u8 old_tc;
- adapter = netdev_priv(netdev);
- tc = adapter->fcoe.tc;
+ /* Get current programmed tc */
+ old_tc = adapter->fcoe.tc;
rval = ixgbe_fcoe_setapp(adapter, up);
- if ((!rval) && (tc != adapter->fcoe.tc) &&
- (adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
- (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
+
+ if (rval ||
+ !(adapter->flags & IXGBE_FLAG_DCB_ENABLED) ||
+ !(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ break;
+
+ /* The FCoE application priority may be changed multiple
+ * times in quick sucession with switches that build up
+ * TLVs. To avoid creating uneeded device resets this
+ * checks the actual HW configuration and clears
+ * BIT_APP_UPCHG if a HW configuration change is not
+ * need
+ */
+ if (old_tc == adapter->fcoe.tc)
+ adapter->dcb_set_bitmap &= ~BIT_APP_UPCHG;
+ else
adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
- adapter->dcb_set_bitmap |= BIT_RESETLINK;
- }
}
#endif
break;
@@ -560,7 +620,204 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
return rval;
}
+static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets;
+
+ /* No IEEE PFC settings available */
+ if (!my_ets)
+ return -EINVAL;
+
+ ets->ets_cap = MAX_TRAFFIC_CLASS;
+ ets->cbs = my_ets->cbs;
+ memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
+ memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
+ return 0;
+}
+
+static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
+ __u8 prio_type[IEEE_8021QAZ_MAX_TCS];
+ int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ int i, err;
+ __u64 *p = (__u64 *) ets->prio_tc;
+ /* naively give each TC a bwg to map onto CEE hardware */
+ __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7};
+
+ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ if (!adapter->ixgbe_ieee_ets) {
+ adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets),
+ GFP_KERNEL);
+ if (!adapter->ixgbe_ieee_ets)
+ return -ENOMEM;
+ }
+
+ memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
+
+ /* Map TSA onto CEE prio type */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ prio_type[i] = 2;
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ prio_type[i] = 0;
+ break;
+ default:
+ /* Hardware only supports priority strict or
+ * ETS transmission selection algorithms if
+ * we receive some other value from dcbnl
+ * throw an error
+ */
+ return -EINVAL;
+ }
+ }
+
+ if (*p)
+ ixgbe_dcbnl_set_state(dev, 1);
+ else
+ ixgbe_dcbnl_set_state(dev, 0);
+
+ ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame);
+ err = ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
+ bwg_id, prio_type, ets->prio_tc);
+ return err;
+}
+
+static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc;
+ int i;
+
+ /* No IEEE PFC settings available */
+ if (!my_pfc)
+ return -EINVAL;
+
+ pfc->pfc_cap = MAX_TRAFFIC_CLASS;
+ pfc->pfc_en = my_pfc->pfc_en;
+ pfc->mbc = my_pfc->mbc;
+ pfc->delay = my_pfc->delay;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ pfc->requests[i] = adapter->stats.pxoffrxc[i];
+ pfc->indications[i] = adapter->stats.pxofftxc[i];
+ }
+
+ return 0;
+}
+
+static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ int err;
+
+ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ if (!adapter->ixgbe_ieee_pfc) {
+ adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc),
+ GFP_KERNEL);
+ if (!adapter->ixgbe_ieee_pfc)
+ return -ENOMEM;
+ }
+
+ memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc));
+ err = ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en);
+ return err;
+}
+
+static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+#ifdef IXGBE_FCOE
+ if (app->selector == 1 && app->protocol == ETH_P_FCOE) {
+ if (adapter->fcoe.tc == app->priority)
+ goto setapp;
+
+ /* In IEEE mode map up to tc 1:1 */
+ adapter->fcoe.tc = app->priority;
+ adapter->fcoe.up = app->priority;
+
+ /* Force hardware reset required to push FCoE
+ * setup on {tx|rx}_rings
+ */
+ adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
+ ixgbe_dcbnl_set_all(dev);
+ }
+
+setapp:
+#endif
+ dcb_setapp(dev, app);
+ return 0;
+}
+
+static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ return adapter->dcbx_cap;
+}
+
+static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ieee_ets ets = {0};
+ struct ieee_pfc pfc = {0};
+
+ /* no support for LLD_MANAGED modes or CEE+IEEE */
+ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
+ ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
+ !(mode & DCB_CAP_DCBX_HOST))
+ return 1;
+
+ if (mode == adapter->dcbx_cap)
+ return 0;
+
+ adapter->dcbx_cap = mode;
+
+ /* ETS and PFC defaults */
+ ets.ets_cap = 8;
+ pfc.pfc_cap = 8;
+
+ if (mode & DCB_CAP_DCBX_VER_IEEE) {
+ ixgbe_dcbnl_ieee_setets(dev, &ets);
+ ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
+ } else if (mode & DCB_CAP_DCBX_VER_CEE) {
+ adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX);
+ ixgbe_dcbnl_set_all(dev);
+ } else {
+ /* Drop into single TC mode strict priority as this
+ * indicates CEE and IEEE versions are disabled
+ */
+ ixgbe_dcbnl_ieee_setets(dev, &ets);
+ ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
+ ixgbe_dcbnl_set_state(dev, 0);
+ }
+
+ return 0;
+}
+
const struct dcbnl_rtnl_ops dcbnl_ops = {
+ .ieee_getets = ixgbe_dcbnl_ieee_getets,
+ .ieee_setets = ixgbe_dcbnl_ieee_setets,
+ .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc,
+ .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc,
+ .ieee_setapp = ixgbe_dcbnl_ieee_setapp,
.getstate = ixgbe_dcbnl_get_state,
.setstate = ixgbe_dcbnl_set_state,
.getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
@@ -582,5 +839,6 @@ const struct dcbnl_rtnl_ops dcbnl_ops = {
.setpfcstate = ixgbe_dcbnl_setpfcstate,
.getapp = ixgbe_dcbnl_getapp,
.setapp = ixgbe_dcbnl_setapp,
+ .getdcbx = ixgbe_dcbnl_getdcbx,
+ .setdcbx = ixgbe_dcbnl_setdcbx,
};
-
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 3dc731c22ff2..76380a2b35aa 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -152,20 +152,35 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->supported |= (SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg);
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
+ ecmd->supported |= SUPPORTED_100baseT_Full;
+ break;
+ default:
+ break;
+ }
+
ecmd->advertising = ADVERTISED_Autoneg;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
- ecmd->advertising |= ADVERTISED_10000baseT_Full;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
- /*
- * It's possible that phy.autoneg_advertised may not be
- * set yet. If so display what the default would be -
- * both 1G and 10G supported.
- */
- if (!(ecmd->advertising & (ADVERTISED_1000baseT_Full |
- ADVERTISED_10000baseT_Full)))
+ if (hw->phy.autoneg_advertised) {
+ if (hw->phy.autoneg_advertised &
+ IXGBE_LINK_SPEED_100_FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ if (hw->phy.autoneg_advertised &
+ IXGBE_LINK_SPEED_10GB_FULL)
+ ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (hw->phy.autoneg_advertised &
+ IXGBE_LINK_SPEED_1GB_FULL)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ } else {
+ /*
+ * Default advertised modes in case
+ * phy.autoneg_advertised isn't set.
+ */
ecmd->advertising |= (ADVERTISED_10000baseT_Full |
ADVERTISED_1000baseT_Full);
+ if (hw->mac.type == ixgbe_mac_X540)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ }
if (hw->phy.media_type == ixgbe_media_type_copper) {
ecmd->supported |= SUPPORTED_TP;
@@ -185,6 +200,16 @@ static int ixgbe_get_settings(struct net_device *netdev,
ADVERTISED_FIBRE);
ecmd->port = PORT_FIBRE;
ecmd->autoneg = AUTONEG_DISABLE;
+ } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) ||
+ (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
+ ecmd->supported |= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_FIBRE);
+ ecmd->advertising = (ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_Autoneg |
+ ADVERTISED_FIBRE);
+ ecmd->port = PORT_FIBRE;
} else {
ecmd->supported |= (SUPPORTED_1000baseT_Full |
SUPPORTED_FIBRE);
@@ -204,6 +229,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
/* Get PHY type */
switch (adapter->hw.phy.type) {
case ixgbe_phy_tn:
+ case ixgbe_phy_aq:
case ixgbe_phy_cu_unknown:
/* Copper 10G-BASET */
ecmd->port = PORT_TP;
@@ -260,8 +286,19 @@ static int ixgbe_get_settings(struct net_device *netdev,
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
if (link_up) {
- ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
- SPEED_10000 : SPEED_1000;
+ switch (link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ecmd->speed = SPEED_10000;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ ecmd->speed = SPEED_1000;
+ break;
+ case IXGBE_LINK_SPEED_100_FULL:
+ ecmd->speed = SPEED_100;
+ break;
+ default:
+ break;
+ }
ecmd->duplex = DUPLEX_FULL;
} else {
ecmd->speed = -1;
@@ -295,6 +332,9 @@ static int ixgbe_set_settings(struct net_device *netdev,
if (ecmd->advertising & ADVERTISED_1000baseT_Full)
advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (ecmd->advertising & ADVERTISED_100baseT_Full)
+ advertised |= IXGBE_LINK_SPEED_100_FULL;
+
if (old == advertised)
return err;
/* this sets the link speed and restarts auto-neg */
@@ -332,13 +372,6 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
else
pause->autoneg = 1;
-#ifdef CONFIG_DCB
- if (hw->fc.current_mode == ixgbe_fc_pfc) {
- pause->rx_pause = 0;
- pause->tx_pause = 0;
- }
-
-#endif
if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
pause->rx_pause = 1;
} else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
@@ -346,6 +379,11 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
} else if (hw->fc.current_mode == ixgbe_fc_full) {
pause->rx_pause = 1;
pause->tx_pause = 1;
+#ifdef CONFIG_DCB
+ } else if (hw->fc.current_mode == ixgbe_fc_pfc) {
+ pause->rx_pause = 0;
+ pause->tx_pause = 0;
+#endif
}
}
@@ -363,7 +401,6 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
return -EINVAL;
#endif
-
fc = hw->fc;
if (pause->autoneg != AUTONEG_ENABLE)
@@ -412,11 +449,6 @@ static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
else
adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
- if (netif_running(netdev))
- ixgbe_reinit_locked(adapter);
- else
- ixgbe_reset(adapter);
-
return 0;
}
@@ -428,16 +460,21 @@ static u32 ixgbe_get_tx_csum(struct net_device *netdev)
static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ u32 feature_list;
- if (data) {
- netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
- netdev->features |= NETIF_F_SCTP_CSUM;
- } else {
- netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
- netdev->features &= ~NETIF_F_SCTP_CSUM;
+ feature_list = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ feature_list |= NETIF_F_SCTP_CSUM;
+ break;
+ default:
+ break;
}
+ if (data)
+ netdev->features |= feature_list;
+ else
+ netdev->features &= ~feature_list;
return 0;
}
@@ -530,10 +567,20 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
- for (i = 0; i < 8; i++)
- regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
- for (i = 0; i < 8; i++)
- regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
+ for (i = 0; i < 8; i++) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
+ regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
+ break;
+ case ixgbe_mac_82599EB:
+ regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
+ regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
+ break;
+ default:
+ break;
+ }
+ }
regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
@@ -615,6 +662,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
+ /* DCB */
regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
@@ -820,9 +868,10 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
char firmware_version[32];
- strncpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
+ strncpy(drvinfo->driver, ixgbe_driver_name,
+ sizeof(drvinfo->driver) - 1);
strncpy(drvinfo->version, ixgbe_driver_version,
- sizeof(drvinfo->version));
+ sizeof(drvinfo->version) - 1);
snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
(adapter->eeprom_version & 0xF000) >> 12,
@@ -905,13 +954,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
sizeof(struct ixgbe_ring));
temp_tx_ring[i].count = new_tx_count;
- err = ixgbe_setup_tx_resources(adapter,
- &temp_tx_ring[i]);
+ err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
if (err) {
while (i) {
i--;
- ixgbe_free_tx_resources(adapter,
- &temp_tx_ring[i]);
+ ixgbe_free_tx_resources(&temp_tx_ring[i]);
}
goto clear_reset;
}
@@ -930,13 +977,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
sizeof(struct ixgbe_ring));
temp_rx_ring[i].count = new_rx_count;
- err = ixgbe_setup_rx_resources(adapter,
- &temp_rx_ring[i]);
+ err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
if (err) {
while (i) {
i--;
- ixgbe_free_rx_resources(adapter,
- &temp_rx_ring[i]);
+ ixgbe_free_rx_resources(&temp_rx_ring[i]);
}
goto err_setup;
}
@@ -951,8 +996,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
/* tx */
if (new_tx_count != adapter->tx_ring_count) {
for (i = 0; i < adapter->num_tx_queues; i++) {
- ixgbe_free_tx_resources(adapter,
- adapter->tx_ring[i]);
+ ixgbe_free_tx_resources(adapter->tx_ring[i]);
memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
sizeof(struct ixgbe_ring));
}
@@ -962,8 +1006,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
/* rx */
if (new_rx_count != adapter->rx_ring_count) {
for (i = 0; i < adapter->num_rx_queues; i++) {
- ixgbe_free_rx_resources(adapter,
- adapter->rx_ring[i]);
+ ixgbe_free_rx_resources(adapter->rx_ring[i]);
memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
sizeof(struct ixgbe_ring));
}
@@ -1144,7 +1187,7 @@ struct ixgbe_reg_test {
#define TABLE64_TEST_HI 6
/* default 82599 register test */
-static struct ixgbe_reg_test reg_test_82599[] = {
+static const struct ixgbe_reg_test reg_test_82599[] = {
{ IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
{ IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
{ IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1168,7 +1211,7 @@ static struct ixgbe_reg_test reg_test_82599[] = {
};
/* default 82598 register test */
-static struct ixgbe_reg_test reg_test_82598[] = {
+static const struct ixgbe_reg_test reg_test_82598[] = {
{ IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
{ IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
{ IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1195,18 +1238,22 @@ static struct ixgbe_reg_test reg_test_82598[] = {
{ 0, 0, 0, 0 }
};
+static const u32 register_test_patterns[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
+};
+
#define REG_PATTERN_TEST(R, M, W) \
{ \
u32 pat, val, before; \
- const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
- for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
+ for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \
before = readl(adapter->hw.hw_addr + R); \
- writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
+ writel((register_test_patterns[pat] & W), \
+ (adapter->hw.hw_addr + R)); \
val = readl(adapter->hw.hw_addr + R); \
- if (val != (_test[pat] & W & M)) { \
- e_err(drv, "pattern test reg %04X failed: got " \
- "0x%08X expected 0x%08X\n", \
- R, val, (_test[pat] & W & M)); \
+ if (val != (register_test_patterns[pat] & W & M)) { \
+ e_err(drv, "pattern test reg %04X failed: got " \
+ "0x%08X expected 0x%08X\n", \
+ R, val, (register_test_patterns[pat] & W & M)); \
*data = R; \
writel(before, adapter->hw.hw_addr + R); \
return 1; \
@@ -1233,16 +1280,24 @@ static struct ixgbe_reg_test reg_test_82598[] = {
static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
{
- struct ixgbe_reg_test *test;
+ const struct ixgbe_reg_test *test;
u32 value, before, after;
u32 i, toggle;
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- toggle = 0x7FFFF30F;
- test = reg_test_82599;
- } else {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
toggle = 0x7FFFF3FF;
test = reg_test_82598;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ toggle = 0x7FFFF30F;
+ test = reg_test_82599;
+ break;
+ default:
+ *data = 1;
+ return 1;
+ break;
}
/*
@@ -1451,25 +1506,28 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
reg_ctl &= ~IXGBE_RXCTRL_RXEN;
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
- reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx));
- reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl);
+ ixgbe_disable_rx_queue(adapter, rx_ring);
/* now Tx */
reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
- if (hw->mac.type == ixgbe_mac_82599EB) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
reg_ctl &= ~IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
+ break;
+ default:
+ break;
}
ixgbe_reset(adapter);
- ixgbe_free_tx_resources(adapter, &adapter->test_tx_ring);
- ixgbe_free_rx_resources(adapter, &adapter->test_rx_ring);
+ ixgbe_free_tx_resources(&adapter->test_tx_ring);
+ ixgbe_free_rx_resources(&adapter->test_rx_ring);
}
static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
@@ -1483,17 +1541,24 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
/* Setup Tx descriptor ring and Tx buffers */
tx_ring->count = IXGBE_DEFAULT_TXD;
tx_ring->queue_index = 0;
+ tx_ring->dev = &adapter->pdev->dev;
+ tx_ring->netdev = adapter->netdev;
tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
tx_ring->numa_node = adapter->node;
- err = ixgbe_setup_tx_resources(adapter, tx_ring);
+ err = ixgbe_setup_tx_resources(tx_ring);
if (err)
return 1;
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
reg_data |= IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
+ break;
+ default:
+ break;
}
ixgbe_configure_tx_ring(adapter, tx_ring);
@@ -1501,11 +1566,13 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
/* Setup Rx Descriptor ring and Rx buffers */
rx_ring->count = IXGBE_DEFAULT_RXD;
rx_ring->queue_index = 0;
+ rx_ring->dev = &adapter->pdev->dev;
+ rx_ring->netdev = adapter->netdev;
rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
rx_ring->numa_node = adapter->node;
- err = ixgbe_setup_rx_resources(adapter, rx_ring);
+ err = ixgbe_setup_rx_resources(rx_ring);
if (err) {
ret_val = 4;
goto err_nomem;
@@ -1604,8 +1671,7 @@ static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
return 13;
}
-static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring,
+static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
struct ixgbe_ring *tx_ring,
unsigned int size)
{
@@ -1627,7 +1693,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
/* unmap Rx buffer, will be remapped by alloc_rx_buffers */
- dma_unmap_single(&adapter->pdev->dev,
+ dma_unmap_single(rx_ring->dev,
rx_buffer_info->dma,
bufsz,
DMA_FROM_DEVICE);
@@ -1639,7 +1705,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
/* unmap buffer on Tx side */
tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
- ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
/* increment Rx/Tx next to clean counters */
rx_ntc++;
@@ -1655,7 +1721,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
}
/* re-map buffers to ring, store next to clean values */
- ixgbe_alloc_rx_buffers(adapter, rx_ring, count);
+ ixgbe_alloc_rx_buffers(rx_ring, count);
rx_ring->next_to_clean = rx_ntc;
tx_ring->next_to_clean = tx_ntc;
@@ -1699,7 +1765,6 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
for (i = 0; i < 64; i++) {
skb_get(skb);
tx_ret_val = ixgbe_xmit_frame_ring(skb,
- adapter->netdev,
adapter,
tx_ring);
if (tx_ret_val == NETDEV_TX_OK)
@@ -1714,8 +1779,7 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
/* allow 200 milliseconds for packets to go from Tx to Rx */
msleep(200);
- good_cnt = ixgbe_clean_test_rings(adapter, rx_ring,
- tx_ring, size);
+ good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
if (good_cnt != 64) {
ret_val = 13;
break;
@@ -1847,7 +1911,25 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw;
int retval = 1;
+ /* WOL not supported except for the following */
switch(hw->device_id) {
+ case IXGBE_DEV_ID_82599_SFP:
+ /* Only this subdevice supports WOL */
+ if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) {
+ wol->supported = 0;
+ break;
+ }
+ retval = 0;
+ break;
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ /* All except this subdevice support WOL */
+ if (hw->subsystem_device_id ==
+ IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
+ wol->supported = 0;
+ break;
+ }
+ retval = 0;
+ break;
case IXGBE_DEV_ID_82599_KX4:
retval = 0;
break;
@@ -1985,6 +2067,41 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
return 0;
}
+/*
+ * this function must be called before setting the new value of
+ * rx_itr_setting
+ */
+static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter,
+ struct ethtool_coalesce *ec)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
+ return false;
+
+ /* if interrupt rate is too high then disable RSC */
+ if (ec->rx_coalesce_usecs != 1 &&
+ ec->rx_coalesce_usecs <= 1000000/IXGBE_MAX_RSC_INT_RATE) {
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+ e_info(probe, "rx-usecs set too low, "
+ "disabling RSC\n");
+ adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+ return true;
+ }
+ } else {
+ /* check the feature flag value and enable RSC if necessary */
+ if ((netdev->features & NETIF_F_LRO) &&
+ !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
+ e_info(probe, "rx-usecs set to %d, "
+ "re-enabling RSC\n",
+ ec->rx_coalesce_usecs);
+ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+ return true;
+ }
+ }
+ return false;
+}
+
static int ixgbe_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
@@ -2002,17 +2119,14 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
if (ec->rx_coalesce_usecs > 1) {
- u32 max_int;
- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
- max_int = IXGBE_MAX_RSC_INT_RATE;
- else
- max_int = IXGBE_MAX_INT_RATE;
-
/* check the limits */
- if ((1000000/ec->rx_coalesce_usecs > max_int) ||
+ if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
(1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
return -EINVAL;
+ /* check the old value and enable RSC if necessary */
+ need_reset = ixgbe_update_rsc(adapter, ec);
+
/* store the value in ints/second */
adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
@@ -2021,32 +2135,21 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
/* clear the lower bit as its used for dynamic state */
adapter->rx_itr_setting &= ~1;
} else if (ec->rx_coalesce_usecs == 1) {
+ /* check the old value and enable RSC if necessary */
+ need_reset = ixgbe_update_rsc(adapter, ec);
+
/* 1 means dynamic mode */
adapter->rx_eitr_param = 20000;
adapter->rx_itr_setting = 1;
} else {
+ /* check the old value and enable RSC if necessary */
+ need_reset = ixgbe_update_rsc(adapter, ec);
/*
* any other value means disable eitr, which is best
* served by setting the interrupt rate very high
*/
adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
adapter->rx_itr_setting = 0;
-
- /*
- * if hardware RSC is enabled, disable it when
- * setting low latency mode, to avoid errata, assuming
- * that when the user set low latency mode they want
- * it at the cost of anything else
- */
- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
- adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
- if (netdev->features & NETIF_F_LRO) {
- netdev->features &= ~NETIF_F_LRO;
- e_info(probe, "rx-usecs set to 0, "
- "disabling RSC\n");
- }
- need_reset = true;
- }
}
if (ec->tx_coalesce_usecs > 1) {
@@ -2127,34 +2230,45 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
need_reset = (data & ETH_FLAG_RXVLAN) !=
(netdev->features & NETIF_F_HW_VLAN_RX);
- rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO |
+ rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE |
ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
if (rc)
return rc;
/* if state changes we need to update adapter->flags and reset */
- if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
- /*
- * cast both to bool and verify if they are set the same
- * but only enable RSC if itr is non-zero, as
- * itr=0 and RSC are mutually exclusive
- */
- if (((!!(data & ETH_FLAG_LRO)) !=
- (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) &&
- adapter->rx_itr_setting) {
+ if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
+ (!!(data & ETH_FLAG_LRO) !=
+ !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
+ if ((data & ETH_FLAG_LRO) &&
+ (!adapter->rx_itr_setting ||
+ (adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE))) {
+ e_info(probe, "rx-usecs set too low, "
+ "not enabling RSC.\n");
+ } else {
adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
need_reset = true;
break;
+ case ixgbe_mac_X540: {
+ int i;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *ring =
+ adapter->rx_ring[i];
+ if (adapter->flags2 &
+ IXGBE_FLAG2_RSC_ENABLED) {
+ ixgbe_configure_rscctl(adapter,
+ ring);
+ } else {
+ ixgbe_clear_rscctl(adapter,
+ ring);
+ }
+ }
+ }
+ break;
default:
break;
}
- } else if (!adapter->rx_itr_setting) {
- netdev->features &= ~NETIF_F_LRO;
- if (data & ETH_FLAG_LRO)
- e_info(probe, "rx-usecs set to 0, "
- "LRO/RSC cannot be enabled.\n");
}
}
@@ -2192,10 +2306,11 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev,
struct ethtool_rx_ntuple *cmd)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
- struct ethtool_rx_ntuple_flow_spec fs = cmd->fs;
- struct ixgbe_atr_input input_struct;
+ struct ethtool_rx_ntuple_flow_spec *fs = &cmd->fs;
+ union ixgbe_atr_input input_struct;
struct ixgbe_atr_input_masks input_masks;
int target_queue;
+ int err;
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
return -EOPNOTSUPP;
@@ -2204,67 +2319,122 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev,
* Don't allow programming if the action is a queue greater than
* the number of online Tx queues.
*/
- if ((fs.action >= adapter->num_tx_queues) ||
- (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP))
+ if ((fs->action >= adapter->num_tx_queues) ||
+ (fs->action < ETHTOOL_RXNTUPLE_ACTION_DROP))
return -EINVAL;
- memset(&input_struct, 0, sizeof(struct ixgbe_atr_input));
+ memset(&input_struct, 0, sizeof(union ixgbe_atr_input));
memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
- input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src;
- input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst;
- input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc;
- input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst;
- input_masks.vlan_id_mask = fs.vlan_tag_mask;
- /* only use the lowest 2 bytes for flex bytes */
- input_masks.data_mask = (fs.data_mask & 0xffff);
-
- switch (fs.flow_type) {
+ /* record flow type */
+ switch (fs->flow_type) {
+ case IPV4_FLOW:
+ input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
+ break;
case TCP_V4_FLOW:
- ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP);
+ input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
break;
case UDP_V4_FLOW:
- ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP);
+ input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
break;
case SCTP_V4_FLOW:
- ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP);
+ input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
break;
default:
return -1;
}
- /* Mask bits from the inputs based on user-supplied mask */
- ixgbe_atr_set_src_ipv4_82599(&input_struct,
- (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src));
- ixgbe_atr_set_dst_ipv4_82599(&input_struct,
- (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst));
- /* 82599 expects these to be byte-swapped for perfect filtering */
- ixgbe_atr_set_src_port_82599(&input_struct,
- ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc));
- ixgbe_atr_set_dst_port_82599(&input_struct,
- ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst));
-
- /* VLAN and Flex bytes are either completely masked or not */
- if (!fs.vlan_tag_mask)
- ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag);
-
- if (!input_masks.data_mask)
- /* make sure we only use the first 2 bytes of user data */
- ixgbe_atr_set_flex_byte_82599(&input_struct,
- (fs.data & 0xffff));
+ /* copy vlan tag minus the CFI bit */
+ if ((fs->vlan_tag & 0xEFFF) || (~fs->vlan_tag_mask & 0xEFFF)) {
+ input_struct.formatted.vlan_id = htons(fs->vlan_tag & 0xEFFF);
+ if (!fs->vlan_tag_mask) {
+ input_masks.vlan_id_mask = htons(0xEFFF);
+ } else {
+ switch (~fs->vlan_tag_mask & 0xEFFF) {
+ /* all of these are valid vlan-mask values */
+ case 0xEFFF:
+ case 0xE000:
+ case 0x0FFF:
+ case 0x0000:
+ input_masks.vlan_id_mask =
+ htons(~fs->vlan_tag_mask);
+ break;
+ /* exit with error if vlan-mask is invalid */
+ default:
+ e_err(drv, "Partial VLAN ID or "
+ "priority mask in vlan-mask is not "
+ "supported by hardware\n");
+ return -1;
+ }
+ }
+ }
+
+ /* make sure we only use the first 2 bytes of user data */
+ if ((fs->data & 0xFFFF) || (~fs->data_mask & 0xFFFF)) {
+ input_struct.formatted.flex_bytes = htons(fs->data & 0xFFFF);
+ if (!(fs->data_mask & 0xFFFF)) {
+ input_masks.flex_mask = 0xFFFF;
+ } else if (~fs->data_mask & 0xFFFF) {
+ e_err(drv, "Partial user-def-mask is not "
+ "supported by hardware\n");
+ return -1;
+ }
+ }
+
+ /*
+ * Copy input into formatted structures
+ *
+ * These assignments are based on the following logic
+ * If neither input or mask are set assume value is masked out.
+ * If input is set, but mask is not mask should default to accept all.
+ * If input is not set, but mask is set then mask likely results in 0.
+ * If input is set and mask is set then assign both.
+ */
+ if (fs->h_u.tcp_ip4_spec.ip4src || ~fs->m_u.tcp_ip4_spec.ip4src) {
+ input_struct.formatted.src_ip[0] = fs->h_u.tcp_ip4_spec.ip4src;
+ if (!fs->m_u.tcp_ip4_spec.ip4src)
+ input_masks.src_ip_mask[0] = 0xFFFFFFFF;
+ else
+ input_masks.src_ip_mask[0] =
+ ~fs->m_u.tcp_ip4_spec.ip4src;
+ }
+ if (fs->h_u.tcp_ip4_spec.ip4dst || ~fs->m_u.tcp_ip4_spec.ip4dst) {
+ input_struct.formatted.dst_ip[0] = fs->h_u.tcp_ip4_spec.ip4dst;
+ if (!fs->m_u.tcp_ip4_spec.ip4dst)
+ input_masks.dst_ip_mask[0] = 0xFFFFFFFF;
+ else
+ input_masks.dst_ip_mask[0] =
+ ~fs->m_u.tcp_ip4_spec.ip4dst;
+ }
+ if (fs->h_u.tcp_ip4_spec.psrc || ~fs->m_u.tcp_ip4_spec.psrc) {
+ input_struct.formatted.src_port = fs->h_u.tcp_ip4_spec.psrc;
+ if (!fs->m_u.tcp_ip4_spec.psrc)
+ input_masks.src_port_mask = 0xFFFF;
+ else
+ input_masks.src_port_mask = ~fs->m_u.tcp_ip4_spec.psrc;
+ }
+ if (fs->h_u.tcp_ip4_spec.pdst || ~fs->m_u.tcp_ip4_spec.pdst) {
+ input_struct.formatted.dst_port = fs->h_u.tcp_ip4_spec.pdst;
+ if (!fs->m_u.tcp_ip4_spec.pdst)
+ input_masks.dst_port_mask = 0xFFFF;
+ else
+ input_masks.dst_port_mask = ~fs->m_u.tcp_ip4_spec.pdst;
+ }
/* determine if we need to drop or route the packet */
- if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
+ if (fs->action == ETHTOOL_RXNTUPLE_ACTION_DROP)
target_queue = MAX_RX_QUEUES - 1;
else
- target_queue = fs.action;
+ target_queue = fs->action;
spin_lock(&adapter->fdir_perfect_lock);
- ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct,
- &input_masks, 0, target_queue);
+ err = ixgbe_fdir_add_perfect_filter_82599(&adapter->hw,
+ &input_struct,
+ &input_masks, 0,
+ target_queue);
spin_unlock(&adapter->fdir_perfect_lock);
- return 0;
+ return err ? -1 : 0;
}
static const struct ethtool_ops ixgbe_ethtool_ops = {
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 05efa6a8ce8e..dba7d77588ef 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -68,7 +68,7 @@ static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc)
static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
{
ddp->len = 0;
- ddp->err = 0;
+ ddp->err = 1;
ddp->udl = NULL;
ddp->udp = 0UL;
ddp->sgl = NULL;
@@ -92,6 +92,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
struct ixgbe_fcoe *fcoe;
struct ixgbe_adapter *adapter;
struct ixgbe_fcoe_ddp *ddp;
+ u32 fcbuff;
if (!netdev)
goto out_ddp_put;
@@ -115,7 +116,14 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
(xid | IXGBE_FCDMARW_WE));
+
+ /* guaranteed to be invalidated after 100us */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
+ (xid | IXGBE_FCDMARW_RE));
+ fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF);
spin_unlock_bh(&fcoe->lock);
+ if (fcbuff & IXGBE_FCBUFF_VALID)
+ udelay(100);
}
if (ddp->sgl)
pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
@@ -127,22 +135,19 @@ out_ddp_put:
return len;
}
+
/**
- * ixgbe_fcoe_ddp_get - called to set up ddp context
+ * ixgbe_fcoe_ddp_setup - called to set up ddp context
* @netdev: the corresponding net_device
* @xid: the exchange id requesting ddp
* @sgl: the scatter-gather list for this request
* @sgc: the number of scatter-gather items
*
- * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
- * and is expected to be called from ULD, e.g., FCP layer of libfc
- * to set up ddp for the corresponding xid of the given sglist for
- * the corresponding I/O.
- *
* Returns : 1 for success and 0 for no ddp
*/
-int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
- struct scatterlist *sgl, unsigned int sgc)
+static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc,
+ int target_mode)
{
struct ixgbe_adapter *adapter;
struct ixgbe_hw *hw;
@@ -151,13 +156,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
struct scatterlist *sg;
unsigned int i, j, dmacount;
unsigned int len;
- static const unsigned int bufflen = 4096;
+ static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
unsigned int firstoff = 0;
unsigned int lastsize;
unsigned int thisoff = 0;
unsigned int thislen = 0;
- u32 fcbuff, fcdmarw, fcfltrw;
- dma_addr_t addr;
+ u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
+ dma_addr_t addr = 0;
if (!netdev || !sgl)
return 0;
@@ -168,6 +173,11 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
return 0;
}
+ /* no DDP if we are already down or resetting */
+ if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_RESETTING, &adapter->state))
+ return 0;
+
fcoe = &adapter->fcoe;
if (!fcoe->pool) {
e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
@@ -241,9 +251,30 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
/* only the last buffer may have non-full bufflen */
lastsize = thisoff + thislen;
+ /*
+ * lastsize can not be buffer len.
+ * If it is then adding another buffer with lastsize = 1.
+ */
+ if (lastsize == bufflen) {
+ if (j >= IXGBE_BUFFCNT_MAX) {
+ e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
+ "not enough user buffers. We need an extra "
+ "buffer because lastsize is bufflen.\n",
+ xid, i, j, dmacount, (u64)addr);
+ goto out_noddp_free;
+ }
+
+ ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
+ j++;
+ lastsize = 1;
+ }
+
fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
+ /* Set WRCONTX bit to allow DDP for target */
+ if (target_mode)
+ fcbuff |= (IXGBE_FCBUFF_WRCONTX);
fcbuff |= (IXGBE_FCBUFF_VALID);
fcdmarw = xid;
@@ -256,6 +287,16 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
/* program DMA context */
hw = &adapter->hw;
spin_lock_bh(&fcoe->lock);
+
+ /* turn on last frame indication for target mode as FCP_RSPtarget is
+ * supposed to send FCP_RSP when it is done. */
+ if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) {
+ set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode);
+ fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL);
+ fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
+ }
+
IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
@@ -264,6 +305,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
+
spin_unlock_bh(&fcoe->lock);
return 1;
@@ -278,6 +320,47 @@ out_noddp_unmap:
}
/**
+ * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
+ * @netdev: the corresponding net_device
+ * @xid: the exchange id requesting ddp
+ * @sgl: the scatter-gather list for this request
+ * @sgc: the number of scatter-gather items
+ *
+ * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
+ * and is expected to be called from ULD, e.g., FCP layer of libfc
+ * to set up ddp for the corresponding xid of the given sglist for
+ * the corresponding I/O.
+ *
+ * Returns : 1 for success and 0 for no ddp
+ */
+int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc)
+{
+ return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
+}
+
+/**
+ * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
+ * @netdev: the corresponding net_device
+ * @xid: the exchange id requesting ddp
+ * @sgl: the scatter-gather list for this request
+ * @sgc: the number of scatter-gather items
+ *
+ * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
+ * and is expected to be called from ULD, e.g., FCP layer of libfc
+ * to set up ddp for the corresponding xid of the given sglist for
+ * the corresponding I/O. The DDP in target mode is a write I/O request
+ * from the initiator.
+ *
+ * Returns : 1 for success and 0 for no ddp
+ */
+int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc)
+{
+ return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
+}
+
+/**
* ixgbe_fcoe_ddp - check ddp status and mark it done
* @adapter: ixgbe adapter
* @rx_desc: advanced rx descriptor
@@ -300,6 +383,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
struct ixgbe_fcoe *fcoe;
struct ixgbe_fcoe_ddp *ddp;
struct fc_frame_header *fh;
+ struct fcoe_crc_eof *crc;
if (!ixgbe_rx_is_fcoe(rx_desc))
goto ddp_out;
@@ -353,7 +437,18 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
else if (ddp->len)
rc = ddp->len;
}
-
+ /* In target mode, check the last data frame of the sequence.
+ * For DDP in target mode, data is already DDPed but the header
+ * indication of the last data frame ould allow is to tell if we
+ * got all the data and the ULP can send FCP_RSP back, as this is
+ * not a full fcoe frame, we fill the trailer here so it won't be
+ * dropped by the ULP stack.
+ */
+ if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
+ (fctl & FC_FC_END_SEQ)) {
+ crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
+ crc->fcoe_eof = FC_EOF_T;
+ }
ddp_out:
return rc;
}
@@ -519,6 +614,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
e_err(drv, "failed to allocated FCoE DDP pool\n");
spin_lock_init(&fcoe->lock);
+
+ /* Extra buffer to be shared by all DDPs for HW work around */
+ fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
+ if (fcoe->extra_ddp_buffer == NULL) {
+ e_err(drv, "failed to allocated extra DDP buffer\n");
+ goto out_extra_ddp_buffer_alloc;
+ }
+
+ fcoe->extra_ddp_buffer_dma =
+ dma_map_single(&adapter->pdev->dev,
+ fcoe->extra_ddp_buffer,
+ IXGBE_FCBUFF_MIN,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ fcoe->extra_ddp_buffer_dma)) {
+ e_err(drv, "failed to map extra DDP buffer\n");
+ goto out_extra_ddp_buffer_dma;
+ }
}
/* Enable L2 eth type filter for FCoE */
@@ -568,6 +681,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
}
}
#endif
+
+ return;
+
+out_extra_ddp_buffer_dma:
+ kfree(fcoe->extra_ddp_buffer);
+out_extra_ddp_buffer_alloc:
+ pci_pool_destroy(fcoe->pool);
+ fcoe->pool = NULL;
}
/**
@@ -587,6 +708,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
if (fcoe->pool) {
for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
ixgbe_fcoe_ddp_put(adapter->netdev, i);
+ dma_unmap_single(&adapter->pdev->dev,
+ fcoe->extra_ddp_buffer_dma,
+ IXGBE_FCBUFF_MIN,
+ DMA_FROM_DEVICE);
+ kfree(fcoe->extra_ddp_buffer);
pci_pool_destroy(fcoe->pool);
fcoe->pool = NULL;
}
@@ -687,21 +813,6 @@ out_disable:
#ifdef CONFIG_IXGBE_DCB
/**
- * ixgbe_fcoe_getapp - retrieves current user priority bitmap for FCoE
- * @adapter : ixgbe adapter
- *
- * Finds out the corresponding user priority bitmap from the current
- * traffic class that FCoE belongs to. Returns 0 as the invalid user
- * priority bitmap to indicate an error.
- *
- * Returns : 802.1p user priority bitmap for FCoE
- */
-u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter)
-{
- return 1 << adapter->fcoe.up;
-}
-
-/**
* ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE
* @adapter : ixgbe adapter
* @up : 802.1p user priority bitmap
@@ -778,5 +889,3 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
}
return rc;
}
-
-
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index 4bc2c551c8db..5a650a4ace66 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -52,6 +52,9 @@
/* fcerr */
#define IXGBE_FCERR_BADCRC 0x00100000
+/* FCoE DDP for target mode */
+#define __IXGBE_FCOE_TARGET 1
+
struct ixgbe_fcoe_ddp {
int len;
u32 err;
@@ -66,10 +69,13 @@ struct ixgbe_fcoe {
u8 tc;
u8 up;
#endif
+ unsigned long mode;
atomic_t refcnt;
spinlock_t lock;
struct pci_pool *pool;
struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
+ unsigned char *extra_ddp_buffer;
+ dma_addr_t extra_ddp_buffer_dma;
};
#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index fbad4d819608..f17e4a7ee731 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -52,13 +52,15 @@ char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver";
-#define DRV_VERSION "2.0.84-k2"
+#define DRV_VERSION "3.2.9-k2"
const char ixgbe_driver_version[] = DRV_VERSION;
-static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
+static const char ixgbe_copyright[] =
+ "Copyright (c) 1999-2011 Intel Corporation.";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
[board_82599] = &ixgbe_82599_info,
+ [board_X540] = &ixgbe_X540_info,
};
/* ixgbe_pci_tbl - PCI Device ID Table
@@ -108,10 +110,16 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE),
+ board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE),
+ board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM),
board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T),
+ board_X540 },
/* required last entry */
{0, }
@@ -560,6 +568,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
break;
case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
if (direction == -1) {
/* other causes */
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -589,29 +598,34 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
{
u32 mask;
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
- } else {
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
mask = (qmask & 0xFFFFFFFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
mask = (qmask >> 32);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
+ break;
+ default:
+ break;
}
}
-void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
- struct ixgbe_tx_buffer
- *tx_buffer_info)
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *tx_buffer_info)
{
if (tx_buffer_info->dma) {
if (tx_buffer_info->mapped_as_page)
- dma_unmap_page(&adapter->pdev->dev,
+ dma_unmap_page(tx_ring->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
DMA_TO_DEVICE);
else
- dma_unmap_single(&adapter->pdev->dev,
+ dma_unmap_single(tx_ring->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
DMA_TO_DEVICE);
@@ -626,92 +640,166 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
}
/**
- * ixgbe_tx_xon_state - check the tx ring xon state
- * @adapter: the ixgbe adapter
- * @tx_ring: the corresponding tx_ring
+ * ixgbe_dcb_txq_to_tc - convert a reg index to a traffic class
+ * @adapter: driver private struct
+ * @index: reg idx of queue to query (0-127)
*
- * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
- * corresponding TC of this tx_ring when checking TFCS.
+ * Helper function to determine the traffic index for a paticular
+ * register index.
*
- * Returns : true if in xon state (currently not paused)
+ * Returns : a tc index for use in range 0-7, or 0-3
*/
-static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+static u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
{
- u32 txoff = IXGBE_TFCS_TXOFF;
+ int tc = -1;
+ int dcb_i = netdev_get_num_tc(adapter->netdev);
-#ifdef CONFIG_IXGBE_DCB
- if (adapter->dcb_cfg.pfc_mode_enable) {
- int tc;
- int reg_idx = tx_ring->reg_idx;
- int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
+ /* if DCB is not enabled the queues have no TC */
+ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ return tc;
+
+ /* check valid range */
+ if (reg_idx >= adapter->hw.mac.max_tx_queues)
+ return tc;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ tc = reg_idx >> 2;
+ break;
+ default:
+ if (dcb_i != 4 && dcb_i != 8)
+ break;
+
+ /* if VMDq is enabled the lowest order bits determine TC */
+ if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
+ IXGBE_FLAG_VMDQ_ENABLED)) {
+ tc = reg_idx & (dcb_i - 1);
+ break;
+ }
- switch (adapter->hw.mac.type) {
+ /*
+ * Convert the reg_idx into the correct TC. This bitmask
+ * targets the last full 32 ring traffic class and assigns
+ * it a value of 1. From there the rest of the rings are
+ * based on shifting the mask further up to include the
+ * reg_idx / 16 and then reg_idx / 8. It assumes dcB_i
+ * will only ever be 8 or 4 and that reg_idx will never
+ * be greater then 128. The code without the power of 2
+ * optimizations would be:
+ * (((reg_idx % 32) + 32) * dcb_i) >> (9 - reg_idx / 32)
+ */
+ tc = ((reg_idx & 0X1F) + 0x20) * dcb_i;
+ tc >>= 9 - (reg_idx >> 5);
+ }
+
+ return tc;
+}
+
+static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_hw_stats *hwstats = &adapter->stats;
+ u32 data = 0;
+ u32 xoff[8] = {0};
+ int i;
+
+ if ((hw->fc.current_mode == ixgbe_fc_full) ||
+ (hw->fc.current_mode == ixgbe_fc_rx_pause)) {
+ switch (hw->mac.type) {
case ixgbe_mac_82598EB:
- tc = reg_idx >> 2;
- txoff = IXGBE_TFCS_TXOFF0;
+ data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
break;
- case ixgbe_mac_82599EB:
- tc = 0;
- txoff = IXGBE_TFCS_TXOFF;
- if (dcb_i == 8) {
- /* TC0, TC1 */
- tc = reg_idx >> 5;
- if (tc == 2) /* TC2, TC3 */
- tc += (reg_idx - 64) >> 4;
- else if (tc == 3) /* TC4, TC5, TC6, TC7 */
- tc += 1 + ((reg_idx - 96) >> 3);
- } else if (dcb_i == 4) {
- /* TC0, TC1 */
- tc = reg_idx >> 6;
- if (tc == 1) {
- tc += (reg_idx - 64) >> 5;
- if (tc == 2) /* TC2, TC3 */
- tc += (reg_idx - 96) >> 4;
- }
- }
+ default:
+ data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+ }
+ hwstats->lxoffrxc += data;
+
+ /* refill credits (no tx hang) if we received xoff */
+ if (!data)
+ return;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ clear_bit(__IXGBE_HANG_CHECK_ARMED,
+ &adapter->tx_ring[i]->state);
+ return;
+ } else if (!(adapter->dcb_cfg.pfc_mode_enable))
+ return;
+
+ /* update stats for each tc, only valid with PFC enabled */
+ for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
break;
default:
- tc = 0;
+ xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
}
- txoff <<= tc;
+ hwstats->pxoffrxc[i] += xoff[i];
+ }
+
+ /* disarm tx queues that have received xoff frames */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ u32 tc = ixgbe_dcb_txq_to_tc(adapter, tx_ring->reg_idx);
+
+ if (xoff[tc])
+ clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
}
-#endif
- return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
}
-static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring,
- unsigned int eop)
+static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
{
+ return ring->tx_stats.completed;
+}
+
+static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
struct ixgbe_hw *hw = &adapter->hw;
- /* Detect a transmit hang in hardware, this serializes the
- * check with the clearing of time_stamp and movement of eop */
- adapter->detect_tx_hung = false;
- if (tx_ring->tx_buffer_info[eop].time_stamp &&
- time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
- ixgbe_tx_xon_state(adapter, tx_ring)) {
- /* detected Tx unit hang */
- union ixgbe_adv_tx_desc *tx_desc;
- tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
- e_err(drv, "Detected Tx Unit Hang\n"
- " Tx Queue <%d>\n"
- " TDH, TDT <%x>, <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "tx_buffer_info[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " jiffies <%lx>\n",
- tx_ring->queue_index,
- IXGBE_READ_REG(hw, tx_ring->head),
- IXGBE_READ_REG(hw, tx_ring->tail),
- tx_ring->next_to_use, eop,
- tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
- return true;
+ u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
+ u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
+
+ if (head != tail)
+ return (head < tail) ?
+ tail - head : (tail + ring->count - head);
+
+ return 0;
+}
+
+static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
+{
+ u32 tx_done = ixgbe_get_tx_completed(tx_ring);
+ u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
+ u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
+ bool ret = false;
+
+ clear_check_for_tx_hang(tx_ring);
+
+ /*
+ * Check for a hung queue, but be thorough. This verifies
+ * that a transmit has been completed since the previous
+ * check AND there is at least one packet pending. The
+ * ARMED bit is set to indicate a potential hang. The
+ * bit is cleared if a pause frame is received to remove
+ * false hang detection due to PFC or 802.3x frames. By
+ * requiring this to fail twice we avoid races with
+ * pfc clearing the ARMED bit and conditions where we
+ * run the check_tx_hang logic with a transmit completion
+ * pending but without time to complete it yet.
+ */
+ if ((tx_done_old == tx_done) && tx_pending) {
+ /* make sure it is true for two checks in a row */
+ ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
+ &tx_ring->state);
+ } else {
+ /* update completed stats and continue */
+ tx_ring->tx_stats.tx_done_old = tx_done;
+ /* reset the countdown */
+ clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
}
- return false;
+ return ret;
}
#define IXGBE_MAX_TXD_PWR 14
@@ -734,11 +822,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *tx_ring)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
- struct net_device *netdev = adapter->netdev;
union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
struct ixgbe_tx_buffer *tx_buffer_info;
- unsigned int i, eop, count = 0;
unsigned int total_bytes = 0, total_packets = 0;
+ u16 i, eop, count = 0;
i = tx_ring->next_to_clean;
eop = tx_ring->tx_buffer_info[i].next_to_watch;
@@ -749,148 +836,182 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
bool cleaned = false;
rmb(); /* read buffer_info after eop_desc */
for ( ; !cleaned; count++) {
- struct sk_buff *skb;
tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- cleaned = (i == eop);
- skb = tx_buffer_info->skb;
-
- if (cleaned && skb) {
- unsigned int segs, bytecount;
- unsigned int hlen = skb_headlen(skb);
-
- /* gso_segs is currently only valid for tcp */
- segs = skb_shinfo(skb)->gso_segs ?: 1;
-#ifdef IXGBE_FCOE
- /* adjust for FCoE Sequence Offload */
- if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
- && skb_is_gso(skb)
- && vlan_get_protocol(skb) ==
- htons(ETH_P_FCOE)) {
- hlen = skb_transport_offset(skb) +
- sizeof(struct fc_frame_header) +
- sizeof(struct fcoe_crc_eof);
- segs = DIV_ROUND_UP(skb->len - hlen,
- skb_shinfo(skb)->gso_size);
- }
-#endif /* IXGBE_FCOE */
- /* multiply data chunks by size of headers */
- bytecount = ((segs - 1) * hlen) + skb->len;
- total_packets += segs;
- total_bytes += bytecount;
- }
-
- ixgbe_unmap_and_free_tx_resource(adapter,
- tx_buffer_info);
tx_desc->wb.status = 0;
+ cleaned = (i == eop);
i++;
if (i == tx_ring->count)
i = 0;
+
+ if (cleaned && tx_buffer_info->skb) {
+ total_bytes += tx_buffer_info->bytecount;
+ total_packets += tx_buffer_info->gso_segs;
+ }
+
+ ixgbe_unmap_and_free_tx_resource(tx_ring,
+ tx_buffer_info);
}
+ tx_ring->tx_stats.completed++;
eop = tx_ring->tx_buffer_info[i].next_to_watch;
eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
}
tx_ring->next_to_clean = i;
+ tx_ring->total_bytes += total_bytes;
+ tx_ring->total_packets += total_packets;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.packets += total_packets;
+ tx_ring->stats.bytes += total_bytes;
+ u64_stats_update_end(&tx_ring->syncp);
+
+ if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
+ /* schedule immediate reset if we believe we hung */
+ struct ixgbe_hw *hw = &adapter->hw;
+ tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
+ e_err(drv, "Detected Tx Unit Hang\n"
+ " Tx Queue <%d>\n"
+ " TDH, TDT <%x>, <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "tx_buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ tx_ring->queue_index,
+ IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
+ IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
+ tx_ring->next_to_use, eop,
+ tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
+
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+ e_info(probe,
+ "tx hang %d detected on queue %d, resetting adapter\n",
+ adapter->tx_timeout_count + 1, tx_ring->queue_index);
+
+ /* schedule immediate reset if we believe we hung */
+ ixgbe_tx_timeout(adapter->netdev);
+
+ /* the adapter is about to reset, no point in enabling stuff */
+ return true;
+ }
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
- if (unlikely(count && netif_carrier_ok(netdev) &&
+ if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
(IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
smp_mb();
- if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
+ if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
!test_bit(__IXGBE_DOWN, &adapter->state)) {
- netif_wake_subqueue(netdev, tx_ring->queue_index);
- ++tx_ring->restart_queue;
- }
- }
-
- if (adapter->detect_tx_hung) {
- if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
- /* schedule immediate reset if we believe we hung */
- e_info(probe, "tx hang %d detected, resetting "
- "adapter\n", adapter->tx_timeout_count + 1);
- ixgbe_tx_timeout(adapter->netdev);
+ netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
}
}
- /* re-arm the interrupt */
- if (count >= tx_ring->work_limit)
- ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
-
- tx_ring->total_bytes += total_bytes;
- tx_ring->total_packets += total_packets;
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->stats.packets += total_packets;
- tx_ring->stats.bytes += total_bytes;
- u64_stats_update_end(&tx_ring->syncp);
return count < tx_ring->work_limit;
}
#ifdef CONFIG_IXGBE_DCA
static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+ struct ixgbe_ring *rx_ring,
+ int cpu)
{
+ struct ixgbe_hw *hw = &adapter->hw;
u32 rxctrl;
- int cpu = get_cpu();
- int q = rx_ring->reg_idx;
-
- if (rx_ring->cpu != cpu) {
- rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
- rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
- rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
- rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
- IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
- }
- rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
- rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
- rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
- rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
- rx_ring->cpu = cpu;
+ u8 reg_idx = rx_ring->reg_idx;
+
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
+ rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
+ rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
+ IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
+ break;
+ default:
+ break;
}
- put_cpu();
+ rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
+ rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
+ rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
+ rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
+ IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
}
static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+ struct ixgbe_ring *tx_ring,
+ int cpu)
{
+ struct ixgbe_hw *hw = &adapter->hw;
u32 txctrl;
+ u8 reg_idx = tx_ring->reg_idx;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
+ txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
+ txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+ txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+ txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
+ txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
+ txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
+ IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
+ txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+ txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
+{
+ struct ixgbe_adapter *adapter = q_vector->adapter;
int cpu = get_cpu();
- int q = tx_ring->reg_idx;
- struct ixgbe_hw *hw = &adapter->hw;
+ long r_idx;
+ int i;
- if (tx_ring->cpu != cpu) {
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q));
- txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
- txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
- txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl);
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
- txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
- txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
- IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
- txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
- }
- tx_ring->cpu = cpu;
+ if (q_vector->cpu == cpu)
+ goto out_no_update;
+
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->txr_count; i++) {
+ ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
+ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx + 1);
}
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+ q_vector->cpu = cpu;
+out_no_update:
put_cpu();
}
static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
{
+ int num_q_vectors;
int i;
if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
@@ -899,22 +1020,25 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
/* always use CB2 mode, difference is masked in the CB driver */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
- for (i = 0; i < adapter->num_tx_queues; i++) {
- adapter->tx_ring[i]->cpu = -1;
- ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
- }
- for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->rx_ring[i]->cpu = -1;
- ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ else
+ num_q_vectors = 1;
+
+ for (i = 0; i < num_q_vectors; i++) {
+ adapter->q_vector[i]->cpu = -1;
+ ixgbe_update_dca(adapter->q_vector[i]);
}
}
static int __ixgbe_notify_dca(struct device *dev, void *data)
{
- struct net_device *netdev = dev_get_drvdata(dev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
unsigned long event = *(unsigned long *)data;
+ if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
+ return 0;
+
switch (event) {
case DCA_PROVIDER_ADD:
/* if we're already enabled, don't do it again */
@@ -1013,8 +1137,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
-static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
- struct ixgbe_ring *rx_ring, u32 val)
+static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
{
/*
* Force memory writes to complete before letting h/w
@@ -1023,72 +1146,81 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
* such as IA-64).
*/
wmb();
- IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
+ writel(val, rx_ring->tail);
}
/**
* ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
- * @adapter: address of board private structure
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
**/
-void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring,
- int cleaned_count)
+void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
{
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
- unsigned int i;
- unsigned int bufsz = rx_ring->rx_buf_len;
+ struct sk_buff *skb;
+ u16 i = rx_ring->next_to_use;
- i = rx_ring->next_to_use;
- bi = &rx_ring->rx_buffer_info[i];
+ /* do nothing if no valid netdev defined */
+ if (!rx_ring->netdev)
+ return;
while (cleaned_count--) {
rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
+ skb = bi->skb;
- if (!bi->page_dma &&
- (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
- if (!bi->page) {
- bi->page = netdev_alloc_page(netdev);
- if (!bi->page) {
- adapter->alloc_rx_page_failed++;
- goto no_buffers;
- }
- bi->page_offset = 0;
- } else {
- /* use a half page if we're re-using */
- bi->page_offset ^= (PAGE_SIZE / 2);
- }
-
- bi->page_dma = dma_map_page(&pdev->dev, bi->page,
- bi->page_offset,
- (PAGE_SIZE / 2),
- DMA_FROM_DEVICE);
- }
-
- if (!bi->skb) {
- struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
- bufsz);
- bi->skb = skb;
-
+ if (!skb) {
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_buf_len);
if (!skb) {
- adapter->alloc_rx_buff_failed++;
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
goto no_buffers;
}
/* initialize queue mapping */
skb_record_rx_queue(skb, rx_ring->queue_index);
+ bi->skb = skb;
}
if (!bi->dma) {
- bi->dma = dma_map_single(&pdev->dev,
- bi->skb->data,
+ bi->dma = dma_map_single(rx_ring->dev,
+ skb->data,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev, bi->dma)) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ bi->dma = 0;
+ goto no_buffers;
+ }
}
- /* Refresh the desc even if buffer_addrs didn't change because
- * each write-back erases this info. */
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ if (!bi->page) {
+ bi->page = netdev_alloc_page(rx_ring->netdev);
+ if (!bi->page) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ goto no_buffers;
+ }
+ }
+
+ if (!bi->page_dma) {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= PAGE_SIZE / 2;
+ bi->page_dma = dma_map_page(rx_ring->dev,
+ bi->page,
+ bi->page_offset,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ bi->page_dma)) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ bi->page_dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info. */
rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
} else {
@@ -1099,56 +1231,48 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
i++;
if (i == rx_ring->count)
i = 0;
- bi = &rx_ring->rx_buffer_info[i];
}
no_buffers:
if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i;
- if (i-- == 0)
- i = (rx_ring->count - 1);
-
- ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
+ ixgbe_release_rx_desc(rx_ring, i);
}
}
-static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
+static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
{
- return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
-}
-
-static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
-{
- return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
-}
-
-static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
-{
- return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
- IXGBE_RXDADV_RSCCNT_MASK) >>
- IXGBE_RXDADV_RSCCNT_SHIFT;
+ /* HW will not DMA in data larger than the given buffer, even if it
+ * parses the (NFS, of course) header to be larger. In that case, it
+ * fills the header buffer and spills the rest into the page.
+ */
+ u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
+ u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+ IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+ if (hlen > IXGBE_RX_HDR_SIZE)
+ hlen = IXGBE_RX_HDR_SIZE;
+ return hlen;
}
/**
* ixgbe_transform_rsc_queue - change rsc queue into a full packet
* @skb: pointer to the last skb in the rsc queue
- * @count: pointer to number of packets coalesced in this context
*
* This function changes a queue full of hw rsc buffers into a completed
* packet. It uses the ->prev pointers to find the first packet and then
* turns it into the frag list owner.
**/
-static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
- u64 *count)
+static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
{
unsigned int frag_list_size = 0;
+ unsigned int skb_cnt = 1;
while (skb->prev) {
struct sk_buff *prev = skb->prev;
frag_list_size += skb->len;
skb->prev = NULL;
skb = prev;
- *count += 1;
+ skb_cnt++;
}
skb_shinfo(skb)->frag_list = skb->next;
@@ -1156,68 +1280,59 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
skb->len += frag_list_size;
skb->data_len += frag_list_size;
skb->truesize += frag_list_size;
+ IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
+
return skb;
}
-struct ixgbe_rsc_cb {
- dma_addr_t dma;
- bool delay_unmap;
-};
-
-#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
+static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
+{
+ return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
+ IXGBE_RXDADV_RSCCNT_MASK);
+}
-static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
+static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
int *work_done, int work_to_do)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
- struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
struct sk_buff *skb;
- unsigned int i, rsc_count = 0;
- u32 len, staterr;
- u16 hdr_info;
- bool cleaned = false;
- int cleaned_count = 0;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ const int current_node = numa_node_id();
#ifdef IXGBE_FCOE
int ddp_bytes = 0;
#endif /* IXGBE_FCOE */
+ u32 staterr;
+ u16 i;
+ u16 cleaned_count = 0;
+ bool pkt_is_rsc = false;
i = rx_ring->next_to_clean;
rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
- rx_buffer_info = &rx_ring->rx_buffer_info[i];
while (staterr & IXGBE_RXD_STAT_DD) {
u32 upper_len = 0;
- if (*work_done >= work_to_do)
- break;
- (*work_done)++;
rmb(); /* read descriptor and rx_buffer_info after status DD */
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
- hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
- len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
- IXGBE_RXDADV_HDRBUFLEN_SHIFT;
- upper_len = le16_to_cpu(rx_desc->wb.upper.length);
- if ((len > IXGBE_RX_HDR_SIZE) ||
- (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
- len = IXGBE_RX_HDR_SIZE;
- } else {
- len = le16_to_cpu(rx_desc->wb.upper.length);
- }
- cleaned = true;
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
skb = rx_buffer_info->skb;
- prefetch(skb->data);
rx_buffer_info->skb = NULL;
+ prefetch(skb->data);
+
+ if (ring_is_rsc_enabled(rx_ring))
+ pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
+ /* if this is a skb from previous receive DMA will be 0 */
if (rx_buffer_info->dma) {
- if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
- (!(staterr & IXGBE_RXD_STAT_EOP)) &&
- (!(skb->prev))) {
+ u16 hlen;
+ if (pkt_is_rsc &&
+ !(staterr & IXGBE_RXD_STAT_EOP) &&
+ !skb->prev) {
/*
* When HWRSC is enabled, delay unmapping
* of the first packet. It carries the
@@ -1228,29 +1343,42 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
IXGBE_RSC_CB(skb)->delay_unmap = true;
IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
} else {
- dma_unmap_single(&pdev->dev,
+ dma_unmap_single(rx_ring->dev,
rx_buffer_info->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
}
rx_buffer_info->dma = 0;
- skb_put(skb, len);
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ hlen = ixgbe_get_hlen(rx_desc);
+ upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+ } else {
+ hlen = le16_to_cpu(rx_desc->wb.upper.length);
+ }
+
+ skb_put(skb, hlen);
+ } else {
+ /* assume packet split since header is unmapped */
+ upper_len = le16_to_cpu(rx_desc->wb.upper.length);
}
if (upper_len) {
- dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, DMA_FROM_DEVICE);
+ dma_unmap_page(rx_ring->dev,
+ rx_buffer_info->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buffer_info->page,
rx_buffer_info->page_offset,
upper_len);
- if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
- (page_count(rx_buffer_info->page) != 1))
- rx_buffer_info->page = NULL;
- else
+ if ((page_count(rx_buffer_info->page) == 1) &&
+ (page_to_nid(rx_buffer_info->page) == current_node))
get_page(rx_buffer_info->page);
+ else
+ rx_buffer_info->page = NULL;
skb->len += upper_len;
skb->data_len += upper_len;
@@ -1265,10 +1393,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
prefetch(next_rxd);
cleaned_count++;
- if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
- rsc_count = ixgbe_get_rsc_count(rx_desc);
-
- if (rsc_count) {
+ if (pkt_is_rsc) {
u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
IXGBE_RXDADV_NEXTP_SHIFT;
next_buffer = &rx_ring->rx_buffer_info[nextp];
@@ -1276,32 +1401,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
next_buffer = &rx_ring->rx_buffer_info[i];
}
- if (staterr & IXGBE_RXD_STAT_EOP) {
- if (skb->prev)
- skb = ixgbe_transform_rsc_queue(skb,
- &(rx_ring->rsc_count));
- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
- if (IXGBE_RSC_CB(skb)->delay_unmap) {
- dma_unmap_single(&pdev->dev,
- IXGBE_RSC_CB(skb)->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- IXGBE_RSC_CB(skb)->dma = 0;
- IXGBE_RSC_CB(skb)->delay_unmap = false;
- }
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
- rx_ring->rsc_count +=
- skb_shinfo(skb)->nr_frags;
- else
- rx_ring->rsc_count++;
- rx_ring->rsc_flush++;
- }
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.packets++;
- rx_ring->stats.bytes += skb->len;
- u64_stats_update_end(&rx_ring->syncp);
- } else {
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+ if (!(staterr & IXGBE_RXD_STAT_EOP)) {
+ if (ring_is_ps_enabled(rx_ring)) {
rx_buffer_info->skb = next_buffer->skb;
rx_buffer_info->dma = next_buffer->dma;
next_buffer->skb = skb;
@@ -1310,12 +1411,45 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
skb->next = next_buffer->skb;
skb->next->prev = skb;
}
- rx_ring->non_eop_descs++;
+ rx_ring->rx_stats.non_eop_descs++;
goto next_desc;
}
+ if (skb->prev) {
+ skb = ixgbe_transform_rsc_queue(skb);
+ /* if we got here without RSC the packet is invalid */
+ if (!pkt_is_rsc) {
+ __pskb_trim(skb, 0);
+ rx_buffer_info->skb = skb;
+ goto next_desc;
+ }
+ }
+
+ if (ring_is_rsc_enabled(rx_ring)) {
+ if (IXGBE_RSC_CB(skb)->delay_unmap) {
+ dma_unmap_single(rx_ring->dev,
+ IXGBE_RSC_CB(skb)->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ IXGBE_RSC_CB(skb)->dma = 0;
+ IXGBE_RSC_CB(skb)->delay_unmap = false;
+ }
+ }
+ if (pkt_is_rsc) {
+ if (ring_is_ps_enabled(rx_ring))
+ rx_ring->rx_stats.rsc_count +=
+ skb_shinfo(skb)->nr_frags;
+ else
+ rx_ring->rx_stats.rsc_count +=
+ IXGBE_RSC_CB(skb)->skb_cnt;
+ rx_ring->rx_stats.rsc_flush++;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
- dev_kfree_skb_irq(skb);
+ /* trim packet back to size 0 and recycle it */
+ __pskb_trim(skb, 0);
+ rx_buffer_info->skb = skb;
goto next_desc;
}
@@ -1325,7 +1459,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
total_rx_bytes += skb->len;
total_rx_packets++;
- skb->protocol = eth_type_trans(skb, adapter->netdev);
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
#ifdef IXGBE_FCOE
/* if ddp, not passing to ULD unless for FCP_RSP or error */
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
@@ -1339,16 +1473,18 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
next_desc:
rx_desc->wb.upper.status_error = 0;
+ (*work_done)++;
+ if (*work_done >= work_to_do)
+ break;
+
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
- ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+ ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
/* use prefetched values */
rx_desc = next_rxd;
- rx_buffer_info = &rx_ring->rx_buffer_info[i];
-
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
}
@@ -1356,14 +1492,14 @@ next_desc:
cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
if (cleaned_count)
- ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+ ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
#ifdef IXGBE_FCOE
/* include DDPed FCoE data */
if (ddp_bytes > 0) {
unsigned int mss;
- mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
+ mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
sizeof(struct fc_frame_header) -
sizeof(struct fcoe_crc_eof);
if (mss > 512)
@@ -1375,8 +1511,10 @@ next_desc:
rx_ring->total_packets += total_rx_packets;
rx_ring->total_bytes += total_rx_bytes;
-
- return cleaned;
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
}
static int ixgbe_clean_rxonly(struct napi_struct *, int);
@@ -1390,7 +1528,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *, int);
static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
{
struct ixgbe_q_vector *q_vector;
- int i, j, q_vectors, v_idx, r_idx;
+ int i, q_vectors, v_idx, r_idx;
u32 mask;
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -1406,8 +1544,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- j = adapter->rx_ring[r_idx]->reg_idx;
- ixgbe_set_ivar(adapter, 0, j, v_idx);
+ u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
+ ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
r_idx = find_next_bit(q_vector->rxr_idx,
adapter->num_rx_queues,
r_idx + 1);
@@ -1416,8 +1554,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- j = adapter->tx_ring[r_idx]->reg_idx;
- ixgbe_set_ivar(adapter, 1, j, v_idx);
+ u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
+ ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
r_idx = find_next_bit(q_vector->txr_idx,
adapter->num_tx_queues,
r_idx + 1);
@@ -1448,11 +1586,19 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
}
}
- if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
v_idx);
- else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
ixgbe_set_ivar(adapter, -1, 1, v_idx);
+ break;
+
+ default:
+ break;
+ }
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
/* set up to autoclear timer, and the vectors */
@@ -1548,12 +1694,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
int v_idx = q_vector->v_idx;
u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
/* must write high and low 16 bits to reset counter */
itr_reg |= (itr_reg << 16);
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
/*
- * 82599 can support a value of zero, so allow it for
+ * 82599 and X540 can support a value of zero, so allow it for
* max interrupt rate, but there is an errata where it can
* not be zero with RSC
*/
@@ -1566,6 +1715,9 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
* immediate assertion of the interrupt
*/
itr_reg |= IXGBE_EITR_CNT_WDIS;
+ break;
+ default:
+ break;
}
IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
}
@@ -1573,14 +1725,13 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
+ int i, r_idx;
u32 new_itr;
u8 current_itr, ret_itr;
- int i, r_idx;
- struct ixgbe_ring *rx_ring, *tx_ring;
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = adapter->tx_ring[r_idx];
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
q_vector->tx_itr,
tx_ring->total_packets,
@@ -1595,7 +1746,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = adapter->rx_ring[r_idx];
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
q_vector->rx_itr,
rx_ring->total_packets,
@@ -1626,7 +1777,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
if (new_itr != q_vector->eitr) {
/* do an exponential smoothing */
- new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+ new_itr = ((q_vector->eitr * 9) + new_itr)/10;
/* save the algorithm value here, not the smoothed one */
q_vector->eitr = new_itr;
@@ -1694,17 +1845,18 @@ static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
{
struct ixgbe_hw *hw = &adapter->hw;
+ if (eicr & IXGBE_EICR_GPI_SDP2) {
+ /* Clear the interrupt */
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ schedule_work(&adapter->sfp_config_module_task);
+ }
+
if (eicr & IXGBE_EICR_GPI_SDP1) {
/* Clear the interrupt */
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
- schedule_work(&adapter->multispeed_fiber_task);
- } else if (eicr & IXGBE_EICR_GPI_SDP2) {
- /* Clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
- schedule_work(&adapter->sfp_config_module_task);
- } else {
- /* Interrupt isn't for us... */
- return;
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ schedule_work(&adapter->multispeed_fiber_task);
}
}
@@ -1744,16 +1896,16 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
if (eicr & IXGBE_EICR_MAILBOX)
ixgbe_msg_task(adapter);
- if (hw->mac.type == ixgbe_mac_82598EB)
- ixgbe_check_fan_failure(adapter, eicr);
-
- if (hw->mac.type == ixgbe_mac_82599EB) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
ixgbe_check_sfp_event(adapter, eicr);
- adapter->interrupt_event = eicr;
if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
- ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
+ ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
+ adapter->interrupt_event = eicr;
schedule_work(&adapter->check_overtemp_task);
-
+ }
+ /* now fallthrough to handle Flow Director */
+ case ixgbe_mac_X540:
/* Handle Flow Director Full threshold interrupt */
if (eicr & IXGBE_EICR_FLOW_DIR) {
int i;
@@ -1763,12 +1915,18 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *tx_ring =
adapter->tx_ring[i];
- if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
- &tx_ring->reinit_state))
+ if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
+ &tx_ring->state))
schedule_work(&adapter->fdir_reinit_task);
}
}
+ break;
+ default:
+ break;
}
+
+ ixgbe_check_fan_failure(adapter, eicr);
+
if (!test_bit(__IXGBE_DOWN, &adapter->state))
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
@@ -1779,15 +1937,24 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
u32 mask;
+ struct ixgbe_hw *hw = &adapter->hw;
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
- } else {
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
mask = (qmask & 0xFFFFFFFF);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
mask = (qmask >> 32);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+ break;
+ default:
+ break;
}
/* skip the flush */
}
@@ -1796,15 +1963,24 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
u32 mask;
+ struct ixgbe_hw *hw = &adapter->hw;
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
- } else {
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
mask = (qmask & 0xFFFFFFFF);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
mask = (qmask >> 32);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
+ break;
+ default:
+ break;
}
/* skip the flush */
}
@@ -1847,8 +2023,13 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
int r_idx;
int i;
+#ifdef CONFIG_IXGBE_DCA
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ ixgbe_update_dca(q_vector);
+#endif
+
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
+ for (i = 0; i < q_vector->rxr_count; i++) {
rx_ring = adapter->rx_ring[r_idx];
rx_ring->total_bytes = 0;
rx_ring->total_packets = 0;
@@ -1859,7 +2040,6 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
if (!q_vector->rxr_count)
return IRQ_HANDLED;
- /* disable interrupts on this vector only */
/* EIAM disabled interrupts (on this vector) for us */
napi_schedule(&q_vector->napi);
@@ -1918,13 +2098,14 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
int work_done = 0;
long r_idx;
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = adapter->rx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_rx_dca(adapter, rx_ring);
+ ixgbe_update_dca(q_vector);
#endif
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = adapter->rx_ring[r_idx];
+
ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
/* If all Rx work done, exit the polling mode */
@@ -1958,13 +2139,14 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
long r_idx;
bool tx_clean_complete = true;
+#ifdef CONFIG_IXGBE_DCA
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ ixgbe_update_dca(q_vector);
+#endif
+
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
ring = adapter->tx_ring[r_idx];
-#ifdef CONFIG_IXGBE_DCA
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_tx_dca(adapter, ring);
-#endif
tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
r_idx + 1);
@@ -1977,10 +2159,6 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
ring = adapter->rx_ring[r_idx];
-#ifdef CONFIG_IXGBE_DCA
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_rx_dca(adapter, ring);
-#endif
ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
r_idx + 1);
@@ -2019,13 +2197,14 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
int work_done = 0;
long r_idx;
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- tx_ring = adapter->tx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_tx_dca(adapter, tx_ring);
+ ixgbe_update_dca(q_vector);
#endif
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ tx_ring = adapter->tx_ring[r_idx];
+
if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
work_done = budget;
@@ -2046,24 +2225,27 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
int r_idx)
{
struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+ struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
set_bit(r_idx, q_vector->rxr_idx);
q_vector->rxr_count++;
+ rx_ring->q_vector = q_vector;
}
static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
int t_idx)
{
struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+ struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
set_bit(t_idx, q_vector->txr_idx);
q_vector->txr_count++;
+ tx_ring->q_vector = q_vector;
}
/**
* ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
* @adapter: board private structure to initialize
- * @vectors: allotted vector count for descriptor rings
*
* This function maps descriptor rings to the queue-specific vectors
* we were allotted through the MSI-X enabling code. Ideally, we'd have
@@ -2071,9 +2253,9 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
* group the rings as "efficiently" as possible. You would add new
* mapping configurations in here.
**/
-static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
- int vectors)
+static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
{
+ int q_vectors;
int v_start = 0;
int rxr_idx = 0, txr_idx = 0;
int rxr_remaining = adapter->num_rx_queues;
@@ -2086,11 +2268,13 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
goto out;
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
/*
* The ideal configuration...
* We have enough vectors to map one per queue.
*/
- if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
+ if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
map_vector_to_rxq(adapter, v_start, rxr_idx);
@@ -2106,23 +2290,20 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
* multiple queues per vector.
*/
/* Re-adjusting *qpv takes care of the remainder. */
- for (i = v_start; i < vectors; i++) {
- rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
+ for (i = v_start; i < q_vectors; i++) {
+ rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
for (j = 0; j < rqpv; j++) {
map_vector_to_rxq(adapter, i, rxr_idx);
rxr_idx++;
rxr_remaining--;
}
- }
- for (i = v_start; i < vectors; i++) {
- tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
+ tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
for (j = 0; j < tqpv; j++) {
map_vector_to_txq(adapter, i, txr_idx);
txr_idx++;
txr_remaining--;
}
}
-
out:
return err;
}
@@ -2144,30 +2325,36 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
/* Decrement for Other and TCP Timer vectors */
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- /* Map the Tx/Rx rings to the vectors we were allotted. */
- err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
+ err = ixgbe_map_rings_to_vectors(adapter);
if (err)
- goto out;
+ return err;
-#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
- (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
- &ixgbe_msix_clean_many)
+#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
+ ? &ixgbe_msix_clean_many : \
+ (_v)->rxr_count ? &ixgbe_msix_clean_rx : \
+ (_v)->txr_count ? &ixgbe_msix_clean_tx : \
+ NULL)
for (vector = 0; vector < q_vectors; vector++) {
- handler = SET_HANDLER(adapter->q_vector[vector]);
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
+ handler = SET_HANDLER(q_vector);
if (handler == &ixgbe_msix_clean_rx) {
- sprintf(adapter->name[vector], "%s-%s-%d",
- netdev->name, "rx", ri++);
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "rx", ri++);
} else if (handler == &ixgbe_msix_clean_tx) {
- sprintf(adapter->name[vector], "%s-%s-%d",
- netdev->name, "tx", ti++);
- } else
- sprintf(adapter->name[vector], "%s-%s-%d",
- netdev->name, "TxRx", vector);
-
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "tx", ti++);
+ } else if (handler == &ixgbe_msix_clean_many) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "TxRx", ri++);
+ ti++;
+ } else {
+ /* skip this unused q_vector */
+ continue;
+ }
err = request_irq(adapter->msix_entries[vector].vector,
- handler, 0, adapter->name[vector],
- adapter->q_vector[vector]);
+ handler, 0, q_vector->name,
+ q_vector);
if (err) {
e_err(probe, "request_irq failed for MSIX interrupt "
"Error: %d\n", err);
@@ -2175,9 +2362,9 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
}
}
- sprintf(adapter->name[vector], "%s:lsc", netdev->name);
+ sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
err = request_irq(adapter->msix_entries[vector].vector,
- ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
+ ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev);
if (err) {
e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
goto free_queue_irqs;
@@ -2193,17 +2380,16 @@ free_queue_irqs:
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
-out:
return err;
}
static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
{
struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
- u8 current_itr;
- u32 new_itr = q_vector->eitr;
struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
+ u32 new_itr = q_vector->eitr;
+ u8 current_itr;
q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
q_vector->tx_itr,
@@ -2233,9 +2419,9 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
if (new_itr != q_vector->eitr) {
/* do an exponential smoothing */
- new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+ new_itr = ((q_vector->eitr * 9) + new_itr)/10;
- /* save the algorithm value here, not the smoothed one */
+ /* save the algorithm value here */
q_vector->eitr = new_itr;
ixgbe_write_eitr(q_vector);
@@ -2256,12 +2442,17 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
mask |= IXGBE_EIMS_GPI_SDP0;
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
mask |= IXGBE_EIMS_GPI_SDP1;
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
mask |= IXGBE_EIMS_ECC;
mask |= IXGBE_EIMS_GPI_SDP1;
mask |= IXGBE_EIMS_GPI_SDP2;
if (adapter->num_vfs)
mask |= IXGBE_EIMS_MAILBOX;
+ break;
+ default:
+ break;
}
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -2317,13 +2508,20 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
if (eicr & IXGBE_EICR_LSC)
ixgbe_check_lsc(adapter);
- if (hw->mac.type == ixgbe_mac_82599EB)
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
ixgbe_check_sfp_event(adapter, eicr);
+ if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+ ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
+ adapter->interrupt_event = eicr;
+ schedule_work(&adapter->check_overtemp_task);
+ }
+ break;
+ default:
+ break;
+ }
ixgbe_check_fan_failure(adapter, eicr);
- if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
- ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
- schedule_work(&adapter->check_overtemp_task);
if (napi_schedule_prep(&(q_vector->napi))) {
adapter->tx_ring[0]->total_packets = 0;
@@ -2400,6 +2598,11 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
i--;
for (; i >= 0; i--) {
+ /* free only the irqs that were actually requested */
+ if (!adapter->q_vector[i]->rxr_count &&
+ !adapter->q_vector[i]->txr_count)
+ continue;
+
free_irq(adapter->msix_entries[i].vector,
adapter->q_vector[i]);
}
@@ -2416,14 +2619,20 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
**/
static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
{
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
- } else {
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
if (adapter->num_vfs > 32)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
+ break;
+ default:
+ break;
}
IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -2469,7 +2678,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
u64 tdba = ring->dma;
int wait_loop = 10;
u32 txdctl;
- u16 reg_idx = ring->reg_idx;
+ u8 reg_idx = ring->reg_idx;
/* disable queue to avoid issues while updating state */
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
@@ -2484,8 +2693,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
ring->count * sizeof(union ixgbe_adv_tx_desc));
IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
- ring->head = IXGBE_TDH(reg_idx);
- ring->tail = IXGBE_TDT(reg_idx);
+ ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
/* configure fetching thresholds */
if (adapter->rx_itr_setting == 0) {
@@ -2501,7 +2709,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
}
/* reinitialize flowdirector state */
- set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state);
+ if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
+ adapter->atr_sample_rate) {
+ ring->atr_sample_rate = adapter->atr_sample_rate;
+ ring->atr_count = 0;
+ set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
+ } else {
+ ring->atr_sample_rate = 0;
+ }
+
+ clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
/* enable queue */
txdctl |= IXGBE_TXDCTL_ENABLE;
@@ -2592,16 +2809,22 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring)
{
u32 srrctl;
- int index;
- struct ixgbe_ring_feature *feature = adapter->ring_feature;
+ u8 reg_idx = rx_ring->reg_idx;
- index = rx_ring->reg_idx;
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
- unsigned long mask;
- mask = (unsigned long) feature[RING_F_RSS].mask;
- index = index & mask;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB: {
+ struct ixgbe_ring_feature *feature = adapter->ring_feature;
+ const int mask = feature[RING_F_RSS].mask;
+ reg_idx = reg_idx & mask;
}
- srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ default:
+ break;
+ }
+
+ srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
@@ -2611,7 +2834,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
IXGBE_SRRCTL_BSIZEHDR_MASK;
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+ if (ring_is_ps_enabled(rx_ring)) {
#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
#else
@@ -2624,7 +2847,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
}
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
}
static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
@@ -2669,17 +2892,20 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
);
switch (mask) {
+#ifdef CONFIG_IXGBE_DCB
+ case (IXGBE_FLAG_DCB_ENABLED | IXGBE_FLAG_RSS_ENABLED):
+ mrqc = IXGBE_MRQC_RTRSS8TCEN;
+ break;
+ case (IXGBE_FLAG_DCB_ENABLED):
+ mrqc = IXGBE_MRQC_RT8TCEN;
+ break;
+#endif /* CONFIG_IXGBE_DCB */
case (IXGBE_FLAG_RSS_ENABLED):
mrqc = IXGBE_MRQC_RSSEN;
break;
case (IXGBE_FLAG_SRIOV_ENABLED):
mrqc = IXGBE_MRQC_VMDQEN;
break;
-#ifdef CONFIG_IXGBE_DCB
- case (IXGBE_FLAG_DCB_ENABLED):
- mrqc = IXGBE_MRQC_RT8TCEN;
- break;
-#endif /* CONFIG_IXGBE_DCB */
default:
break;
}
@@ -2694,19 +2920,36 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_clear_rscctl - disable RSC for the indicated ring
+ * @adapter: address of board private structure
+ * @ring: structure containing ring specific data
+ **/
+void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 rscctrl;
+ u8 reg_idx = ring->reg_idx;
+
+ rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
+ rscctrl &= ~IXGBE_RSCCTL_RSCEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
+}
+
+/**
* ixgbe_configure_rscctl - enable RSC for the indicated ring
* @adapter: address of board private structure
* @index: index of ring to set
**/
-static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
+void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 rscctrl;
int rx_buf_len;
- u16 reg_idx = ring->reg_idx;
+ u8 reg_idx = ring->reg_idx;
- if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
+ if (!ring_is_rsc_enabled(ring))
return;
rx_buf_len = ring->rx_buf_len;
@@ -2717,7 +2960,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
* total size of max desc * buf_len is not greater
* than 65535
*/
- if (ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+ if (ring_is_ps_enabled(ring)) {
#if (MAX_SKB_FRAGS > 16)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
#elif (MAX_SKB_FRAGS > 8)
@@ -2770,9 +3013,9 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
- int reg_idx = ring->reg_idx;
int wait_loop = IXGBE_MAX_RX_DESC_POLL;
u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
/* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
if (hw->mac.type == ixgbe_mac_82598EB &&
@@ -2790,19 +3033,47 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
}
}
+void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int wait_loop = IXGBE_MAX_RX_DESC_POLL;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+
+ if (hw->mac.type == ixgbe_mac_82598EB &&
+ !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+ return;
+
+ /* the hardware may take up to 100us to really disable the rx queue */
+ do {
+ udelay(10);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+ } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+
+ if (!wait_loop) {
+ e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
+ "the polling period\n", reg_idx);
+ }
+}
+
void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
u64 rdba = ring->dma;
u32 rxdctl;
- u16 reg_idx = ring->reg_idx;
+ u8 reg_idx = ring->reg_idx;
/* disable queue to avoid issues while updating state */
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx),
- rxdctl & ~IXGBE_RXDCTL_ENABLE);
- IXGBE_WRITE_FLUSH(hw);
+ ixgbe_disable_rx_queue(adapter, ring);
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
@@ -2810,12 +3081,19 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
ring->count * sizeof(union ixgbe_adv_rx_desc));
IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
- ring->head = IXGBE_RDH(reg_idx);
- ring->tail = IXGBE_RDT(reg_idx);
+ ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
ixgbe_configure_srrctl(adapter, ring);
ixgbe_configure_rscctl(adapter, ring);
+ /* If operating in IOV mode set RLPML for X540 */
+ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
+ hw->mac.type == ixgbe_mac_X540) {
+ rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
+ rxdctl |= ((ring->netdev->mtu + ETH_HLEN +
+ ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN);
+ }
+
if (hw->mac.type == ixgbe_mac_82598EB) {
/*
* enable cache line friendly hardware writes:
@@ -2833,7 +3111,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
ixgbe_rx_desc_queue_enable(adapter, ring);
- ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring));
+ ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring));
}
static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
@@ -2899,6 +3177,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
/* enable Tx loopback for VF/PF communication */
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+ /* Enable MAC Anti-Spoofing */
+ hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
+ adapter->num_vfs);
}
static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
@@ -2912,9 +3193,16 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
u32 mhadd, hlreg0;
/* Decide whether to use packet split mode or not */
+ /* On by default */
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+
/* Do not use packet split if we're in SR-IOV Mode */
- if (!adapter->num_vfs)
- adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+ if (adapter->num_vfs)
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+
+ /* Disable packet split due to 82599 erratum #45 */
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
/* Set the RX buffer length according to the mode */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -2956,24 +3244,32 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
rx_ring->rx_buf_len = rx_buf_len;
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
- rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
+ set_ring_ps_enabled(rx_ring);
+ else
+ clear_ring_ps_enabled(rx_ring);
+
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+ set_ring_rsc_enabled(rx_ring);
else
- rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
+ clear_ring_rsc_enabled(rx_ring);
#ifdef IXGBE_FCOE
if (netdev->features & NETIF_F_FCOE_MTU) {
struct ixgbe_ring_feature *f;
f = &adapter->ring_feature[RING_F_FCOE];
if ((i >= f->mask) && (i < f->mask + f->indices)) {
- rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
+ clear_ring_ps_enabled(rx_ring);
if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
rx_ring->rx_buf_len =
IXGBE_FCOE_JUMBO_FRAME_SIZE;
+ } else if (!ring_is_rsc_enabled(rx_ring) &&
+ !ring_is_ps_enabled(rx_ring)) {
+ rx_ring->rx_buf_len =
+ IXGBE_FCOE_JUMBO_FRAME_SIZE;
}
}
#endif /* IXGBE_FCOE */
}
-
}
static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
@@ -2996,6 +3292,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
rdrxctl |= IXGBE_RDRXCTL_MVMEN;
break;
case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
/* Disable RSC for ACK packets */
IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
(IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
@@ -3123,6 +3420,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
break;
case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
for (i = 0; i < adapter->num_rx_queues; i++) {
j = adapter->rx_ring[i]->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@ -3152,6 +3450,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
break;
case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
for (i = 0; i < adapter->num_rx_queues; i++) {
j = adapter->rx_ring[i]->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@ -3349,8 +3648,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
- u32 txdctl;
- int i, j;
if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
if (hw->mac.type == ixgbe_mac_82598EB)
@@ -3361,30 +3658,50 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
if (hw->mac.type == ixgbe_mac_82598EB)
netif_set_gso_max_size(adapter->netdev, 32768);
-#ifdef CONFIG_FCOE
- if (adapter->netdev->features & NETIF_F_FCOE_MTU)
- max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
-#endif
- ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
- DCB_TX_CONFIG);
- ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
- DCB_RX_CONFIG);
+ /* Enable VLAN tag insert/strip */
+ adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
+
+ hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
/* reconfigure the hardware */
- ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
+ if (adapter->dcbx_cap & (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE)) {
+#ifdef CONFIG_FCOE
+ if (adapter->netdev->features & NETIF_F_FCOE_MTU)
+ max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
+#endif
+ ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
+ DCB_TX_CONFIG);
+ ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
+ DCB_RX_CONFIG);
+ ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
+ } else {
+ struct net_device *dev = adapter->netdev;
- for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i]->reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
- /* PThresh workaround for Tx hang with DFP enabled. */
- txdctl |= 32;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
+ if (adapter->ixgbe_ieee_ets)
+ dev->dcbnl_ops->ieee_setets(dev,
+ adapter->ixgbe_ieee_ets);
+ if (adapter->ixgbe_ieee_pfc)
+ dev->dcbnl_ops->ieee_setpfc(dev,
+ adapter->ixgbe_ieee_pfc);
}
- /* Enable VLAN tag insert/strip */
- adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
- hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
+ /* Enable RSS Hash per TC */
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ int i;
+ u32 reg = 0;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ u8 msb = 0;
+ u8 cnt = adapter->netdev->tc_to_txq[i].count;
+
+ while (cnt >>= 1)
+ msb++;
+
+ reg |= msb << IXGBE_RQTC_SHIFT_TC(i);
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg);
+ }
}
#endif
@@ -3455,7 +3772,8 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
* We need to try and force an autonegotiation
* session, then bring up link.
*/
- hw->mac.ops.setup_sfp(hw);
+ if (hw->mac.ops.setup_sfp)
+ hw->mac.ops.setup_sfp(hw);
if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
schedule_work(&adapter->multispeed_fiber_task);
} else {
@@ -3487,7 +3805,8 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
if (ret)
goto link_cfg_out;
- if (hw->mac.ops.get_link_capabilities)
+ autoneg = hw->phy.autoneg_advertised;
+ if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
&negotiation);
if (ret)
@@ -3516,8 +3835,9 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
case ixgbe_mac_82598EB:
IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
break;
- default:
case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ default:
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
break;
@@ -3561,13 +3881,24 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
else
ixgbe_configure_msi_and_legacy(adapter);
- /* enable the optics */
- if (hw->phy.multispeed_fiber)
+ /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */
+ if (hw->mac.ops.enable_tx_laser &&
+ ((hw->phy.multispeed_fiber) ||
+ ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+ (hw->mac.type == ixgbe_mac_82599EB))))
hw->mac.ops.enable_tx_laser(hw);
clear_bit(__IXGBE_DOWN, &adapter->state);
ixgbe_napi_enable_all(adapter);
+ if (ixgbe_is_sfp(hw)) {
+ ixgbe_sfp_link_config(adapter);
+ } else {
+ err = ixgbe_non_sfp_link_config(hw);
+ if (err)
+ e_err(probe, "link_config FAILED %d\n", err);
+ }
+
/* clear any pending interrupts, may auto mask */
IXGBE_READ_REG(hw, IXGBE_EICR);
ixgbe_irq_enable(adapter, true, true);
@@ -3590,26 +3921,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
* If we're not hot-pluggable SFP+, we just need to configure link
* and bring it up.
*/
- if (hw->phy.type == ixgbe_phy_unknown) {
- err = hw->phy.ops.identify(hw);
- if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- /*
- * Take the device down and schedule the sfp tasklet
- * which will unregister_netdev and log it.
- */
- ixgbe_down(adapter);
- schedule_work(&adapter->sfp_config_module_task);
- return err;
- }
- }
-
- if (ixgbe_is_sfp(hw)) {
- ixgbe_sfp_link_config(adapter);
- } else {
- err = ixgbe_non_sfp_link_config(hw);
- if (err)
- e_err(probe, "link_config FAILED %d\n", err);
- }
+ if (hw->phy.type == ixgbe_phy_none)
+ schedule_work(&adapter->sfp_config_module_task);
/* enable transmits */
netif_tx_start_all_queues(adapter->netdev);
@@ -3687,15 +4000,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
/**
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
- * @adapter: board private structure
* @rx_ring: ring to free buffers from
**/
-static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = rx_ring->dev;
unsigned long size;
- unsigned int i;
+ u16 i;
/* ring already cleared, nothing to do */
if (!rx_ring->rx_buffer_info)
@@ -3707,7 +4018,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) {
- dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
+ dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
@@ -3718,7 +4029,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
do {
struct sk_buff *this = skb;
if (IXGBE_RSC_CB(this)->delay_unmap) {
- dma_unmap_single(&pdev->dev,
+ dma_unmap_single(dev,
IXGBE_RSC_CB(this)->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
@@ -3732,7 +4043,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
if (!rx_buffer_info->page)
continue;
if (rx_buffer_info->page_dma) {
- dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+ dma_unmap_page(dev, rx_buffer_info->page_dma,
PAGE_SIZE / 2, DMA_FROM_DEVICE);
rx_buffer_info->page_dma = 0;
}
@@ -3749,24 +4060,17 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
-
- if (rx_ring->head)
- writel(0, adapter->hw.hw_addr + rx_ring->head);
- if (rx_ring->tail)
- writel(0, adapter->hw.hw_addr + rx_ring->tail);
}
/**
* ixgbe_clean_tx_ring - Free Tx Buffers
- * @adapter: board private structure
* @tx_ring: ring to be cleaned
**/
-static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
{
struct ixgbe_tx_buffer *tx_buffer_info;
unsigned long size;
- unsigned int i;
+ u16 i;
/* ring already cleared, nothing to do */
if (!tx_ring->tx_buffer_info)
@@ -3775,7 +4079,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
/* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
}
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
@@ -3786,11 +4090,6 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
-
- if (tx_ring->head)
- writel(0, adapter->hw.hw_addr + tx_ring->head);
- if (tx_ring->tail)
- writel(0, adapter->hw.hw_addr + tx_ring->tail);
}
/**
@@ -3802,7 +4101,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
+ ixgbe_clean_rx_ring(adapter->rx_ring[i]);
}
/**
@@ -3814,7 +4113,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
+ ixgbe_clean_tx_ring(adapter->tx_ring[i]);
}
void ixgbe_down(struct ixgbe_adapter *adapter)
@@ -3823,7 +4122,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
u32 rxctrl;
u32 txdctl;
- int i, j;
+ int i;
int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
/* signal that we are down to the interrupt handler */
@@ -3846,7 +4145,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
- IXGBE_WRITE_FLUSH(hw);
+ /* disable all enabled rx queues */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ /* this call also flushes the previous write */
+ ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
+
msleep(10);
netif_tx_stop_all_queues(netdev);
@@ -3881,26 +4184,36 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i]->reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
+ u8 reg_idx = adapter->tx_ring[i]->reg_idx;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
(txdctl & ~IXGBE_TXDCTL_ENABLE));
}
/* Disable the Tx DMA engine on 82599 */
- if (hw->mac.type == ixgbe_mac_82599EB)
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
-
- /* power down the optics */
- if (hw->phy.multispeed_fiber)
- hw->mac.ops.disable_tx_laser(hw);
+ break;
+ default:
+ break;
+ }
/* clear n-tuple filters that are cached */
ethtool_ntuple_flush(netdev);
if (!pci_channel_offline(adapter->pdev))
ixgbe_reset(adapter);
+
+ /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
+ if (hw->mac.ops.disable_tx_laser &&
+ ((hw->phy.multispeed_fiber) ||
+ ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+ (hw->mac.type == ixgbe_mac_82599EB))))
+ hw->mac.ops.disable_tx_laser(hw);
+
ixgbe_clean_all_tx_rings(adapter);
ixgbe_clean_all_rx_rings(adapter);
@@ -3925,10 +4238,8 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
int tx_clean_complete, work_done = 0;
#ifdef CONFIG_IXGBE_DCA
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
- ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
- ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
- }
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ ixgbe_update_dca(q_vector);
#endif
tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
@@ -3956,6 +4267,8 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ adapter->tx_timeout_count++;
+
/* Do the reset outside of interrupt context */
schedule_work(&adapter->reset_task);
}
@@ -3970,31 +4283,11 @@ static void ixgbe_reset_task(struct work_struct *work)
test_bit(__IXGBE_RESETTING, &adapter->state))
return;
- adapter->tx_timeout_count++;
-
ixgbe_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
ixgbe_reinit_locked(adapter);
}
-#ifdef CONFIG_IXGBE_DCB
-static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
-{
- bool ret = false;
- struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
-
- if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
- return ret;
-
- f->mask = 0x7 << 3;
- adapter->num_rx_queues = f->indices;
- adapter->num_tx_queues = f->indices;
- ret = true;
-
- return ret;
-}
-#endif
-
/**
* ixgbe_set_rss_queues: Allocate queues for RSS
* @adapter: board private structure to initialize
@@ -4065,19 +4358,26 @@ static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
**/
static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
{
- bool ret = false;
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
- f->indices = min((int)num_online_cpus(), f->indices);
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
- adapter->num_rx_queues = 1;
- adapter->num_tx_queues = 1;
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ return false;
+
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
#ifdef CONFIG_IXGBE_DCB
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- e_info(probe, "FCoE enabled with DCB\n");
- ixgbe_set_dcb_queues(adapter);
- }
+ int tc;
+ struct net_device *dev = adapter->netdev;
+
+ tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
+ f->indices = dev->tc_to_txq[tc].count;
+ f->mask = dev->tc_to_txq[tc].offset;
#endif
+ } else {
+ f->indices = min((int)num_online_cpus(), f->indices);
+
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
e_info(probe, "FCoE enabled with RSS\n");
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
@@ -4090,14 +4390,45 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
f->mask = adapter->num_rx_queues;
adapter->num_rx_queues += f->indices;
adapter->num_tx_queues += f->indices;
+ }
- ret = true;
+ return true;
+}
+#endif /* IXGBE_FCOE */
+
+#ifdef CONFIG_IXGBE_DCB
+static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
+{
+ bool ret = false;
+ struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
+ int i, q;
+
+ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ return ret;
+
+ f->indices = 0;
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ q = min((int)num_online_cpus(), MAX_TRAFFIC_CLASS);
+ f->indices += q;
}
+ f->mask = 0x7 << 3;
+ adapter->num_rx_queues = f->indices;
+ adapter->num_tx_queues = f->indices;
+ ret = true;
+
+#ifdef IXGBE_FCOE
+ /* FCoE enabled queues require special configuration done through
+ * configure_fcoe() and others. Here we map FCoE indices onto the
+ * DCB queue pairs allowing FCoE to own configuration later.
+ */
+ ixgbe_set_fcoe_queues(adapter);
+#endif
+
return ret;
}
+#endif
-#endif /* IXGBE_FCOE */
/**
* ixgbe_set_sriov_queues: Allocate queues for IOV use
* @adapter: board private structure to initialize
@@ -4133,16 +4464,16 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
if (ixgbe_set_sriov_queues(adapter))
goto done;
-#ifdef IXGBE_FCOE
- if (ixgbe_set_fcoe_queues(adapter))
- goto done;
-
-#endif /* IXGBE_FCOE */
#ifdef CONFIG_IXGBE_DCB
if (ixgbe_set_dcb_queues(adapter))
goto done;
#endif
+#ifdef IXGBE_FCOE
+ if (ixgbe_set_fcoe_queues(adapter))
+ goto done;
+
+#endif /* IXGBE_FCOE */
if (ixgbe_set_fdir_queues(adapter))
goto done;
@@ -4221,22 +4552,123 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
{
int i;
- bool ret = false;
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->reg_idx = i;
- for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i]->reg_idx = i;
- ret = true;
- } else {
- ret = false;
- }
+ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+ return false;
- return ret;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->reg_idx = i;
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i]->reg_idx = i;
+
+ return true;
}
#ifdef CONFIG_IXGBE_DCB
+
+/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
+void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
+ unsigned int *tx, unsigned int *rx)
+{
+ struct net_device *dev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 num_tcs = netdev_get_num_tc(dev);
+
+ *tx = 0;
+ *rx = 0;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ *tx = tc << 3;
+ *rx = tc << 2;
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ if (num_tcs == 8) {
+ if (tc < 3) {
+ *tx = tc << 5;
+ *rx = tc << 4;
+ } else if (tc < 5) {
+ *tx = ((tc + 2) << 4);
+ *rx = tc << 4;
+ } else if (tc < num_tcs) {
+ *tx = ((tc + 8) << 3);
+ *rx = tc << 4;
+ }
+ } else if (num_tcs == 4) {
+ *rx = tc << 5;
+ switch (tc) {
+ case 0:
+ *tx = 0;
+ break;
+ case 1:
+ *tx = 64;
+ break;
+ case 2:
+ *tx = 96;
+ break;
+ case 3:
+ *tx = 112;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+#define IXGBE_MAX_Q_PER_TC (IXGBE_MAX_DCB_INDICES / MAX_TRAFFIC_CLASS)
+
+/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
+ * classes.
+ *
+ * @netdev: net device to configure
+ * @tc: number of traffic classes to enable
+ */
+int ixgbe_setup_tc(struct net_device *dev, u8 tc)
+{
+ int i;
+ unsigned int q, offset = 0;
+
+ if (!tc) {
+ netdev_reset_tc(dev);
+ } else {
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+ /* Hardware supports up to 8 traffic classes */
+ if (tc > MAX_TRAFFIC_CLASS || netdev_set_num_tc(dev, tc))
+ return -EINVAL;
+
+ /* Partition Tx queues evenly amongst traffic classes */
+ for (i = 0; i < tc; i++) {
+ q = min((int)num_online_cpus(), IXGBE_MAX_Q_PER_TC);
+ netdev_set_prio_tc_map(dev, i, i);
+ netdev_set_tc_queue(dev, i, q, offset);
+ offset += q;
+ }
+
+ /* This enables multiple traffic class support in the hardware
+ * which defaults to strict priority transmission by default.
+ * If traffic classes are already enabled perhaps through DCB
+ * code path then existing configuration will be used.
+ */
+ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
+ dev->dcbnl_ops && dev->dcbnl_ops->setdcbx) {
+ struct ieee_ets ets = {
+ .prio_tc = {0, 1, 2, 3, 4, 5, 6, 7},
+ };
+ u8 mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+
+ dev->dcbnl_ops->setdcbx(dev, mode);
+ dev->dcbnl_ops->ieee_setets(dev, &ets);
+ }
+ }
+ return 0;
+}
+
/**
* ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
* @adapter: board private structure to initialize
@@ -4246,76 +4678,27 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
**/
static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
{
- int i;
- bool ret = false;
- int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
+ struct net_device *dev = adapter->netdev;
+ int i, j, k;
+ u8 num_tcs = netdev_get_num_tc(dev);
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
- /* the number of queues is assumed to be symmetric */
- for (i = 0; i < dcb_i; i++) {
- adapter->rx_ring[i]->reg_idx = i << 3;
- adapter->tx_ring[i]->reg_idx = i << 2;
- }
- ret = true;
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- if (dcb_i == 8) {
- /*
- * Tx TC0 starts at: descriptor queue 0
- * Tx TC1 starts at: descriptor queue 32
- * Tx TC2 starts at: descriptor queue 64
- * Tx TC3 starts at: descriptor queue 80
- * Tx TC4 starts at: descriptor queue 96
- * Tx TC5 starts at: descriptor queue 104
- * Tx TC6 starts at: descriptor queue 112
- * Tx TC7 starts at: descriptor queue 120
- *
- * Rx TC0-TC7 are offset by 16 queues each
- */
- for (i = 0; i < 3; i++) {
- adapter->tx_ring[i]->reg_idx = i << 5;
- adapter->rx_ring[i]->reg_idx = i << 4;
- }
- for ( ; i < 5; i++) {
- adapter->tx_ring[i]->reg_idx =
- ((i + 2) << 4);
- adapter->rx_ring[i]->reg_idx = i << 4;
- }
- for ( ; i < dcb_i; i++) {
- adapter->tx_ring[i]->reg_idx =
- ((i + 8) << 3);
- adapter->rx_ring[i]->reg_idx = i << 4;
- }
+ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ return false;
- ret = true;
- } else if (dcb_i == 4) {
- /*
- * Tx TC0 starts at: descriptor queue 0
- * Tx TC1 starts at: descriptor queue 64
- * Tx TC2 starts at: descriptor queue 96
- * Tx TC3 starts at: descriptor queue 112
- *
- * Rx TC0-TC3 are offset by 32 queues each
- */
- adapter->tx_ring[0]->reg_idx = 0;
- adapter->tx_ring[1]->reg_idx = 64;
- adapter->tx_ring[2]->reg_idx = 96;
- adapter->tx_ring[3]->reg_idx = 112;
- for (i = 0 ; i < dcb_i; i++)
- adapter->rx_ring[i]->reg_idx = i << 5;
-
- ret = true;
- } else {
- ret = false;
- }
- } else {
- ret = false;
+ for (i = 0, k = 0; i < num_tcs; i++) {
+ unsigned int tx_s, rx_s;
+ u16 count = dev->tc_to_txq[i].count;
+
+ ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
+ for (j = 0; j < count; j++, k++) {
+ adapter->tx_ring[k]->reg_idx = tx_s + j;
+ adapter->rx_ring[k]->reg_idx = rx_s + j;
+ adapter->tx_ring[k]->dcb_tc = i;
+ adapter->rx_ring[k]->dcb_tc = i;
}
- } else {
- ret = false;
}
- return ret;
+ return true;
}
#endif
@@ -4354,55 +4737,28 @@ static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
*/
static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
{
- int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
- bool ret = false;
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+ int i;
+ u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
-#ifdef CONFIG_IXGBE_DCB
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ return false;
- ixgbe_cache_ring_dcb(adapter);
- /* find out queues in TC for FCoE */
- fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
- fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
- /*
- * In 82599, the number of Tx queues for each traffic
- * class for both 8-TC and 4-TC modes are:
- * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
- * 8 TCs: 32 32 16 16 8 8 8 8
- * 4 TCs: 64 64 32 32
- * We have max 8 queues for FCoE, where 8 the is
- * FCoE redirection table size. If TC for FCoE is
- * less than or equal to TC3, we have enough queues
- * to add max of 8 queues for FCoE, so we start FCoE
- * tx descriptor from the next one, i.e., reg_idx + 1.
- * If TC for FCoE is above TC3, implying 8 TC mode,
- * and we need 8 for FCoE, we have to take all queues
- * in that traffic class for FCoE.
- */
- if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
- fcoe_tx_i--;
- }
-#endif /* CONFIG_IXGBE_DCB */
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
- (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
- ixgbe_cache_ring_fdir(adapter);
- else
- ixgbe_cache_ring_rss(adapter);
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+ if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
+ (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+ ixgbe_cache_ring_fdir(adapter);
+ else
+ ixgbe_cache_ring_rss(adapter);
- fcoe_rx_i = f->mask;
- fcoe_tx_i = f->mask;
- }
- for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
- adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
- adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
- }
- ret = true;
+ fcoe_rx_i = f->mask;
+ fcoe_tx_i = f->mask;
}
- return ret;
+ for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
+ adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
+ adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
+ }
+ return true;
}
#endif /* IXGBE_FCOE */
@@ -4444,16 +4800,16 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
if (ixgbe_cache_ring_sriov(adapter))
return;
+#ifdef CONFIG_IXGBE_DCB
+ if (ixgbe_cache_ring_dcb(adapter))
+ return;
+#endif
+
#ifdef IXGBE_FCOE
if (ixgbe_cache_ring_fcoe(adapter))
return;
-
#endif /* IXGBE_FCOE */
-#ifdef CONFIG_IXGBE_DCB
- if (ixgbe_cache_ring_dcb(adapter))
- return;
-#endif
if (ixgbe_cache_ring_fdir(adapter))
return;
@@ -4471,65 +4827,55 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
**/
static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
{
- int i;
- int orig_node = adapter->node;
+ int rx = 0, tx = 0, nid = adapter->node;
- for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *ring = adapter->tx_ring[i];
- if (orig_node == -1) {
- int cur_node = next_online_node(adapter->node);
- if (cur_node == MAX_NUMNODES)
- cur_node = first_online_node;
- adapter->node = cur_node;
- }
- ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
- adapter->node);
+ if (nid < 0 || !node_online(nid))
+ nid = first_online_node;
+
+ for (; tx < adapter->num_tx_queues; tx++) {
+ struct ixgbe_ring *ring;
+
+ ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
if (!ring)
- ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
- goto err_tx_ring_allocation;
+ goto err_allocation;
ring->count = adapter->tx_ring_count;
- ring->queue_index = i;
- ring->numa_node = adapter->node;
+ ring->queue_index = tx;
+ ring->numa_node = nid;
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
- adapter->tx_ring[i] = ring;
+ adapter->tx_ring[tx] = ring;
}
- /* Restore the adapter's original node */
- adapter->node = orig_node;
+ for (; rx < adapter->num_rx_queues; rx++) {
+ struct ixgbe_ring *ring;
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *ring = adapter->rx_ring[i];
- if (orig_node == -1) {
- int cur_node = next_online_node(adapter->node);
- if (cur_node == MAX_NUMNODES)
- cur_node = first_online_node;
- adapter->node = cur_node;
- }
- ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
- adapter->node);
+ ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
if (!ring)
- ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
- goto err_rx_ring_allocation;
+ goto err_allocation;
ring->count = adapter->rx_ring_count;
- ring->queue_index = i;
- ring->numa_node = adapter->node;
+ ring->queue_index = rx;
+ ring->numa_node = nid;
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
- adapter->rx_ring[i] = ring;
+ adapter->rx_ring[rx] = ring;
}
- /* Restore the adapter's original node */
- adapter->node = orig_node;
-
ixgbe_cache_ring_register(adapter);
return 0;
-err_rx_ring_allocation:
- for (i = 0; i < adapter->num_tx_queues; i++)
- kfree(adapter->tx_ring[i]);
-err_tx_ring_allocation:
+err_allocation:
+ while (tx)
+ kfree(adapter->tx_ring[--tx]);
+
+ while (rx)
+ kfree(adapter->rx_ring[--rx]);
return -ENOMEM;
}
@@ -4580,6 +4926,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+ if (adapter->flags & (IXGBE_FLAG_FDIR_HASH_CAPABLE |
+ IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+ e_err(probe,
+ "Flow Director is not supported while multiple "
+ "queues are disabled. Disabling Flow Director\n");
+ }
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
adapter->atr_sample_rate = 0;
@@ -4616,16 +4968,13 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
{
int q_idx, num_q_vectors;
struct ixgbe_q_vector *q_vector;
- int napi_vectors;
int (*poll)(struct napi_struct *, int);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- napi_vectors = adapter->num_rx_queues;
poll = &ixgbe_clean_rxtx_many;
} else {
num_q_vectors = 1;
- napi_vectors = 1;
poll = &ixgbe_poll;
}
@@ -4751,6 +5100,11 @@ err_set_interrupt:
return err;
}
+static void ring_free_rcu(struct rcu_head *head)
+{
+ kfree(container_of(head, struct ixgbe_ring, rcu));
+}
+
/**
* ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
* @adapter: board private structure to clear interrupt scheme on
@@ -4767,10 +5121,18 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
adapter->tx_ring[i] = NULL;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- kfree(adapter->rx_ring[i]);
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+ /* ixgbe_get_stats64() might access this ring, we must wait
+ * a grace period before freeing it.
+ */
+ call_rcu(&ring->rcu, ring_free_rcu);
adapter->rx_ring[i] = NULL;
}
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+
ixgbe_free_q_vectors(adapter);
ixgbe_reset_interrupt_capability(adapter);
}
@@ -4844,6 +5206,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
int j;
struct tc_configuration *tc;
#endif
+ int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
/* PCI config space info */
@@ -4858,26 +5221,24 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->ring_feature[RING_F_RSS].indices = rss;
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
- if (hw->mac.type == ixgbe_mac_82598EB) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
if (hw->device_id == IXGBE_DEV_ID_82598AT)
adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
- } else if (hw->mac.type == ixgbe_mac_82599EB) {
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
- if (dev->features & NETIF_F_NTUPLE) {
- /* Flow Director perfect filter enabled */
- adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
- adapter->atr_sample_rate = 0;
- spin_lock_init(&adapter->fdir_perfect_lock);
- } else {
- /* Flow Director hash filters enabled */
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
- adapter->atr_sample_rate = 20;
- }
+ /* n-tuple support exists, always init our spinlock */
+ spin_lock_init(&adapter->fdir_perfect_lock);
+ /* Flow Director hash filters enabled */
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->atr_sample_rate = 20;
adapter->ring_feature[RING_F_FDIR].indices =
IXGBE_MAX_FDIR_INDICES;
adapter->fdir_pballoc = 0;
@@ -4891,6 +5252,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->fcoe.up = IXGBE_FCOE_DEFTC;
#endif
#endif /* IXGBE_FCOE */
+ break;
+ default:
+ break;
}
#ifdef CONFIG_IXGBE_DCB
@@ -4907,10 +5271,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
adapter->dcb_cfg.rx_pba_cfg = pba_equal;
adapter->dcb_cfg.pfc_mode_enable = false;
- adapter->dcb_cfg.round_robin_enable = false;
adapter->dcb_set_bitmap = 0x00;
+ adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
- adapter->ring_feature[RING_F_DCB].indices);
+ MAX_TRAFFIC_CLASS);
#endif
@@ -4920,8 +5284,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
#ifdef CONFIG_DCB
adapter->last_lfc_mode = hw->fc.current_mode;
#endif
- hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
- hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
+ hw->fc.high_water = FC_HIGH_WATER(max_frame);
+ hw->fc.low_water = FC_LOW_WATER(max_frame);
hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
hw->fc.send_xon = true;
hw->fc.disable_fc_autoneg = false;
@@ -4959,30 +5323,27 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
/**
* ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
* @tx_ring: tx descriptor ring (for a specific queue) to setup
*
* Return 0 on success, negative on failure
**/
-int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = tx_ring->dev;
int size;
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
+ tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
if (!tx_ring->tx_buffer_info)
- tx_ring->tx_buffer_info = vmalloc(size);
+ tx_ring->tx_buffer_info = vzalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
- memset(tx_ring->tx_buffer_info, 0, size);
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -4995,7 +5356,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
err:
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n");
+ dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
return -ENOMEM;
}
@@ -5014,7 +5375,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
- err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
+ err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
if (!err)
continue;
e_err(probe, "Allocation for Tx Queue %u failed\n", i);
@@ -5026,48 +5387,40 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
/**
* ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
- * @adapter: board private structure
* @rx_ring: rx descriptor ring (for a specific queue) to setup
*
* Returns 0 on success, negative on failure
**/
-int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = rx_ring->dev;
int size;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
+ rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
if (!rx_ring->rx_buffer_info)
- rx_ring->rx_buffer_info = vmalloc(size);
- if (!rx_ring->rx_buffer_info) {
- e_err(probe, "vmalloc allocation failed for the Rx "
- "descriptor ring\n");
- goto alloc_failed;
- }
- memset(rx_ring->rx_buffer_info, 0, size);
+ rx_ring->rx_buffer_info = vzalloc(size);
+ if (!rx_ring->rx_buffer_info)
+ goto err;
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
- if (!rx_ring->desc) {
- e_err(probe, "Memory allocation failed for the Rx "
- "descriptor ring\n");
- vfree(rx_ring->rx_buffer_info);
- goto alloc_failed;
- }
+ if (!rx_ring->desc)
+ goto err;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
return 0;
-
-alloc_failed:
+err:
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
return -ENOMEM;
}
@@ -5081,13 +5434,12 @@ alloc_failed:
*
* Return 0 on success, negative on failure
**/
-
static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
{
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
+ err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
if (!err)
continue;
e_err(probe, "Allocation for Rx Queue %u failed\n", i);
@@ -5099,23 +5451,23 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
/**
* ixgbe_free_tx_resources - Free Tx Resources per Queue
- * @adapter: board private structure
* @tx_ring: Tx descriptor ring for a specific queue
*
* Free all transmit software resources
**/
-void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
-
- ixgbe_clean_tx_ring(adapter, tx_ring);
+ ixgbe_clean_tx_ring(tx_ring);
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
- tx_ring->dma);
+ /* if not set, then don't free */
+ if (!tx_ring->desc)
+ return;
+
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -5132,28 +5484,28 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++)
if (adapter->tx_ring[i]->desc)
- ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
+ ixgbe_free_tx_resources(adapter->tx_ring[i]);
}
/**
* ixgbe_free_rx_resources - Free Rx Resources
- * @adapter: board private structure
* @rx_ring: ring to clean the resources from
*
* Free all receive software resources
**/
-void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
-
- ixgbe_clean_rx_ring(adapter, rx_ring);
+ ixgbe_clean_rx_ring(rx_ring);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
- rx_ring->dma);
+ /* if not set, then don't free */
+ if (!rx_ring->desc)
+ return;
+
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -5170,7 +5522,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++)
if (adapter->rx_ring[i]->desc)
- ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
+ ixgbe_free_rx_resources(adapter->rx_ring[i]);
}
/**
@@ -5183,16 +5535,26 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
/* MTU < 68 is an error and causes problems on some kernels */
- if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
- return -EINVAL;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED &&
+ hw->mac.type != ixgbe_mac_X540) {
+ if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
+ return -EINVAL;
+ } else {
+ if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
+ return -EINVAL;
+ }
e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
/* must set new MTU before calling down or up */
netdev->mtu = new_mtu;
+ hw->fc.high_water = FC_HIGH_WATER(max_frame);
+ hw->fc.low_water = FC_LOW_WATER(max_frame);
+
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
@@ -5288,8 +5650,8 @@ static int ixgbe_close(struct net_device *netdev)
#ifdef CONFIG_PM
static int ixgbe_resume(struct pci_dev *pdev)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
u32 err;
pci_set_power_state(pdev, PCI_D0);
@@ -5320,7 +5682,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
if (netif_running(netdev)) {
- err = ixgbe_open(adapter->netdev);
+ err = ixgbe_open(netdev);
if (err)
return err;
}
@@ -5333,8 +5695,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
u32 ctrl, fctrl;
u32 wufc = adapter->wol;
@@ -5351,6 +5713,12 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
ixgbe_free_all_rx_resources(adapter);
}
+ ixgbe_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_DCB
+ kfree(adapter->ixgbe_ieee_pfc);
+ kfree(adapter->ixgbe_ieee_ets);
+#endif
+
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
if (retval)
@@ -5377,15 +5745,20 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
}
- if (wufc && hw->mac.type == ixgbe_mac_82599EB)
- pci_wake_from_d3(pdev, true);
- else
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
pci_wake_from_d3(pdev, false);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ pci_wake_from_d3(pdev, !!wufc);
+ break;
+ default:
+ break;
+ }
*enable_wake = !!wufc;
- ixgbe_clear_interrupt_scheme(adapter);
-
ixgbe_release_hw_control(adapter);
pci_disable_device(pdev);
@@ -5434,10 +5807,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_hw_stats *hwstats = &adapter->stats;
u64 total_mpc = 0;
u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
- u64 non_eop_descs = 0, restart_queue = 0;
- struct ixgbe_hw_stats *hwstats = &adapter->stats;
+ u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
+ u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
+ u64 bytes = 0, packets = 0;
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
@@ -5450,21 +5825,41 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
adapter->hw_rx_no_dma_resources +=
IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
for (i = 0; i < adapter->num_rx_queues; i++) {
- rsc_count += adapter->rx_ring[i]->rsc_count;
- rsc_flush += adapter->rx_ring[i]->rsc_flush;
+ rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
+ rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
}
adapter->rsc_total_count = rsc_count;
adapter->rsc_total_flush = rsc_flush;
}
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
+ non_eop_descs += rx_ring->rx_stats.non_eop_descs;
+ alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
+ alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
+ bytes += rx_ring->stats.bytes;
+ packets += rx_ring->stats.packets;
+ }
+ adapter->non_eop_descs = non_eop_descs;
+ adapter->alloc_rx_page_failed = alloc_rx_page_failed;
+ adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
+ netdev->stats.rx_bytes = bytes;
+ netdev->stats.rx_packets = packets;
+
+ bytes = 0;
+ packets = 0;
/* gather some stats to the adapter struct that are per queue */
- for (i = 0; i < adapter->num_tx_queues; i++)
- restart_queue += adapter->tx_ring[i]->restart_queue;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ restart_queue += tx_ring->tx_stats.restart_queue;
+ tx_busy += tx_ring->tx_stats.tx_busy;
+ bytes += tx_ring->stats.bytes;
+ packets += tx_ring->stats.packets;
+ }
adapter->restart_queue = restart_queue;
-
- for (i = 0; i < adapter->num_rx_queues; i++)
- non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
- adapter->non_eop_descs = non_eop_descs;
+ adapter->tx_busy = tx_busy;
+ netdev->stats.tx_bytes = bytes;
+ netdev->stats.tx_packets = packets;
hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
for (i = 0; i < 8; i++) {
@@ -5479,17 +5874,18 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
- if (hw->mac.type == ixgbe_mac_82599EB) {
- hwstats->pxonrxc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
- hwstats->pxoffrxc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
- hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
- } else {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
hwstats->pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
- hwstats->pxoffrxc[i] +=
- IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ hwstats->pxonrxc[i] +=
+ IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+ break;
+ default:
+ break;
}
hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
@@ -5498,21 +5894,25 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
/* work around hardware counting issue */
hwstats->gprc -= missed_rx;
+ ixgbe_update_xoff_received(adapter);
+
/* 82598 hardware only has a 32 bit counter in the high register */
- if (hw->mac.type == ixgbe_mac_82599EB) {
- u64 tmp;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+ hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+ hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+ hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
- tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF;
- /* 4 high bits of GORC */
- hwstats->gorc += (tmp << 32);
+ IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
- tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF;
- /* 4 high bits of GOTC */
- hwstats->gotc += (tmp << 32);
+ IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
- IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
+ IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
- hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
#ifdef IXGBE_FCOE
@@ -5523,12 +5923,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
#endif /* IXGBE_FCOE */
- } else {
- hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
- hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
- hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
- hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
- hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+ break;
+ default:
+ break;
}
bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
hwstats->bprc += bprc;
@@ -5679,7 +6076,8 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
unregister_netdev(adapter->netdev);
return;
}
- hw->mac.ops.setup_sfp(hw);
+ if (hw->mac.ops.setup_sfp)
+ hw->mac.ops.setup_sfp(hw);
if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
/* This will also work for DA Twinax connections */
@@ -5701,8 +6099,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
for (i = 0; i < adapter->num_tx_queues; i++)
- set_bit(__IXGBE_FDIR_INIT_DONE,
- &(adapter->tx_ring[i]->reinit_state));
+ set_bit(__IXGBE_TX_FDIR_INIT_DONE,
+ &(adapter->tx_ring[i]->state));
} else {
e_err(probe, "failed to finish FDIR re-initialization, "
"ignored adding FDIR ATR filters\n");
@@ -5711,6 +6109,26 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
netif_tx_start_all_queues(adapter->netdev);
}
+static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
+{
+ u32 ssvpc;
+
+ /* Do not perform spoof check for 82598 */
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ return;
+
+ ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
+
+ /*
+ * ssvpc register is cleared on read, if zero then no
+ * spoofed packets in the last interval.
+ */
+ if (!ssvpc)
+ return;
+
+ e_warn(drv, "%d Spoofed packets detected\n", ssvpc);
+}
+
static DEFINE_MUTEX(ixgbe_watchdog_lock);
/**
@@ -5764,31 +6182,48 @@ static void ixgbe_watchdog_task(struct work_struct *work)
if (!netif_carrier_ok(netdev)) {
bool flow_rx, flow_tx;
- if (hw->mac.type == ixgbe_mac_82599EB) {
- u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
- u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
- flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
- flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
- } else {
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB: {
u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
}
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540: {
+ u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+ u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+ flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
+ flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
+ }
+ break;
+ default:
+ flow_tx = false;
+ flow_rx = false;
+ break;
+ }
e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
(link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
"10 Gbps" :
(link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
- "1 Gbps" : "unknown speed")),
+ "1 Gbps" :
+ (link_speed == IXGBE_LINK_SPEED_100_FULL ?
+ "100 Mbps" :
+ "unknown speed"))),
((flow_rx && flow_tx) ? "RX/TX" :
(flow_rx ? "RX" :
(flow_tx ? "TX" : "None"))));
netif_carrier_on(netdev);
+ ixgbe_check_vf_rate_limit(adapter);
} else {
/* Force detection of hung controller */
- adapter->detect_tx_hung = true;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tx_ring = adapter->tx_ring[i];
+ set_check_for_tx_hang(tx_ring);
+ }
}
} else {
adapter->link_up = false;
@@ -5818,6 +6253,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
}
}
+ ixgbe_spoof_check(adapter);
ixgbe_update_stats(adapter);
mutex_unlock(&ixgbe_watchdog_lock);
}
@@ -6000,15 +6436,17 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags,
- unsigned int first)
+ unsigned int first, const u8 hdr_len)
{
- struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = tx_ring->dev;
struct ixgbe_tx_buffer *tx_buffer_info;
unsigned int len;
unsigned int total = skb->len;
unsigned int offset = 0, size, count = 0, i;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int f;
+ unsigned int bytecount = skb->len;
+ u16 gso_segs = 1;
i = tx_ring->next_to_use;
@@ -6023,10 +6461,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
tx_buffer_info->length = size;
tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = dma_map_single(&pdev->dev,
+ tx_buffer_info->dma = dma_map_single(dev,
skb->data + offset,
size, DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+ if (dma_mapping_error(dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -6059,12 +6497,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
+ tx_buffer_info->dma = dma_map_page(dev,
frag->page,
offset, size,
DMA_TO_DEVICE);
tx_buffer_info->mapped_as_page = true;
- if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+ if (dma_mapping_error(dev, tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -6078,6 +6516,19 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
break;
}
+ if (tx_flags & IXGBE_TX_FLAGS_TSO)
+ gso_segs = skb_shinfo(skb)->gso_segs;
+#ifdef IXGBE_FCOE
+ /* adjust for FCoE Sequence Offload */
+ else if (tx_flags & IXGBE_TX_FLAGS_FSO)
+ gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
+ skb_shinfo(skb)->gso_size);
+#endif /* IXGBE_FCOE */
+ bytecount += (gso_segs - 1) * hdr_len;
+
+ /* multiply data chunks by size of headers */
+ tx_ring->tx_buffer_info[i].bytecount = bytecount;
+ tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
tx_ring->tx_buffer_info[i].skb = skb;
tx_ring->tx_buffer_info[first].next_to_watch = i;
@@ -6099,14 +6550,13 @@ dma_error:
i += tx_ring->count;
i--;
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
}
return 0;
}
-static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring,
+static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
int tx_flags, int count, u32 paylen, u8 hdr_len)
{
union ixgbe_adv_tx_desc *tx_desc = NULL;
@@ -6171,60 +6621,100 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
wmb();
tx_ring->next_to_use = i;
- writel(i, adapter->hw.hw_addr + tx_ring->tail);
+ writel(i, tx_ring->tail);
}
-static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
- int queue, u32 tx_flags, __be16 protocol)
+static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol)
{
- struct ixgbe_atr_input atr_input;
+ struct ixgbe_q_vector *q_vector = ring->q_vector;
+ union ixgbe_atr_hash_dword input = { .dword = 0 };
+ union ixgbe_atr_hash_dword common = { .dword = 0 };
+ union {
+ unsigned char *network;
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ } hdr;
struct tcphdr *th;
- struct iphdr *iph = ip_hdr(skb);
- struct ethhdr *eth = (struct ethhdr *)skb->data;
- u16 vlan_id, src_port, dst_port, flex_bytes;
- u32 src_ipv4_addr, dst_ipv4_addr;
- u8 l4type = 0;
-
- /* Right now, we support IPv4 only */
- if (protocol != htons(ETH_P_IP))
+ __be16 vlan_id;
+
+ /* if ring doesn't have a interrupt vector, cannot perform ATR */
+ if (!q_vector)
return;
- /* check if we're UDP or TCP */
- if (iph->protocol == IPPROTO_TCP) {
- th = tcp_hdr(skb);
- src_port = th->source;
- dst_port = th->dest;
- l4type |= IXGBE_ATR_L4TYPE_TCP;
- /* l4type IPv4 type is 0, no need to assign */
- } else {
- /* Unsupported L4 header, just bail here */
+
+ /* do nothing if sampling is disabled */
+ if (!ring->atr_sample_rate)
return;
- }
- memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
+ ring->atr_count++;
- vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
- IXGBE_TX_FLAGS_VLAN_SHIFT;
- src_ipv4_addr = iph->saddr;
- dst_ipv4_addr = iph->daddr;
- flex_bytes = eth->h_proto;
+ /* snag network header to get L4 type and address */
+ hdr.network = skb_network_header(skb);
+
+ /* Currently only IPv4/IPv6 with TCP is supported */
+ if ((protocol != __constant_htons(ETH_P_IPV6) ||
+ hdr.ipv6->nexthdr != IPPROTO_TCP) &&
+ (protocol != __constant_htons(ETH_P_IP) ||
+ hdr.ipv4->protocol != IPPROTO_TCP))
+ return;
+
+ th = tcp_hdr(skb);
+
+ /* skip this packet since the socket is closing */
+ if (th->fin)
+ return;
- ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
- ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
- ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
- ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
- ixgbe_atr_set_l4type_82599(&atr_input, l4type);
- /* src and dst are inverted, think how the receiver sees them */
- ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
- ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
+ /* sample on all syn packets or once every atr sample count */
+ if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
+ return;
+
+ /* reset sample count */
+ ring->atr_count = 0;
+
+ vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
+
+ /*
+ * src and dst are inverted, think how the receiver sees them
+ *
+ * The input is broken into two sections, a non-compressed section
+ * containing vm_pool, vlan_id, and flow_type. The rest of the data
+ * is XORed together and stored in the compressed dword.
+ */
+ input.formatted.vlan_id = vlan_id;
+
+ /*
+ * since src port and flex bytes occupy the same word XOR them together
+ * and write the value to source port portion of compressed dword
+ */
+ if (vlan_id)
+ common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
+ else
+ common.port.src ^= th->dest ^ protocol;
+ common.port.dst ^= th->source;
+
+ if (protocol == __constant_htons(ETH_P_IP)) {
+ input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
+ common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
+ } else {
+ input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
+ common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
+ hdr.ipv6->saddr.s6_addr32[1] ^
+ hdr.ipv6->saddr.s6_addr32[2] ^
+ hdr.ipv6->saddr.s6_addr32[3] ^
+ hdr.ipv6->daddr.s6_addr32[0] ^
+ hdr.ipv6->daddr.s6_addr32[1] ^
+ hdr.ipv6->daddr.s6_addr32[2] ^
+ hdr.ipv6->daddr.s6_addr32[3];
+ }
/* This assumes the Rx queue and Tx queue are bound to the same CPU */
- ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
+ ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
+ input, common, ring->queue_index);
}
-static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
- struct ixgbe_ring *tx_ring, int size)
+static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
{
- netif_stop_subqueue(netdev, tx_ring->queue_index);
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
* but since that doesn't exist yet, just open code it. */
@@ -6236,17 +6726,16 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
return -EBUSY;
/* A reprieve! - use start_queue because it doesn't call schedule */
- netif_start_subqueue(netdev, tx_ring->queue_index);
- ++tx_ring->restart_queue;
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
return 0;
}
-static int ixgbe_maybe_stop_tx(struct net_device *netdev,
- struct ixgbe_ring *tx_ring, int size)
+static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
{
if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
return 0;
- return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
+ return __ixgbe_maybe_stop_tx(tx_ring, size);
}
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
@@ -6258,18 +6747,12 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
protocol = vlan_get_protocol(skb);
- if ((protocol == htons(ETH_P_FCOE)) ||
- (protocol == htons(ETH_P_FIP))) {
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
- txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
- txq += adapter->ring_feature[RING_F_FCOE].mask;
- return txq;
-#ifdef CONFIG_IXGBE_DCB
- } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- txq = adapter->fcoe.up;
- return txq;
-#endif
- }
+ if (((protocol == htons(ETH_P_FCOE)) ||
+ (protocol == htons(ETH_P_FIP))) &&
+ (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
+ txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
+ txq += adapter->ring_feature[RING_F_FCOE].mask;
+ return txq;
}
#endif
@@ -6279,23 +6762,13 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
return txq;
}
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- if (skb->priority == TC_PRIO_CONTROL)
- txq = adapter->ring_feature[RING_F_DCB].indices-1;
- else
- txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK)
- >> 13;
- return txq;
- }
-
return skb_tx_hash(dev, skb);
}
-netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev,
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
{
- struct netdev_queue *txq;
unsigned int first;
unsigned int tx_flags = 0;
u8 hdr_len = 0;
@@ -6310,13 +6783,13 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
tx_flags |= vlan_tx_tag_get(skb);
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
- tx_flags |= ((skb->queue_mapping & 0x7) << 13);
+ tx_flags |= tx_ring->dcb_tc << 13;
}
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
} else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
skb->priority != TC_PRIO_CONTROL) {
- tx_flags |= ((skb->queue_mapping & 0x7) << 13);
+ tx_flags |= tx_ring->dcb_tc << 13;
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
}
@@ -6325,20 +6798,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
/* for FCoE with DCB, we force the priority to what
* was specified by the switch */
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
- (protocol == htons(ETH_P_FCOE) ||
- protocol == htons(ETH_P_FIP))) {
-#ifdef CONFIG_IXGBE_DCB
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
- << IXGBE_TX_FLAGS_VLAN_SHIFT);
- tx_flags |= ((adapter->fcoe.up << 13)
- << IXGBE_TX_FLAGS_VLAN_SHIFT);
- }
-#endif
- /* flag for FCoE offloads */
- if (protocol == htons(ETH_P_FCOE))
- tx_flags |= IXGBE_TX_FLAGS_FCOE;
- }
+ (protocol == htons(ETH_P_FCOE)))
+ tx_flags |= IXGBE_TX_FLAGS_FCOE;
#endif
/* four things can cause us to need a context descriptor */
@@ -6352,8 +6813,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
- if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
- adapter->tx_busy++;
+ if (ixgbe_maybe_stop_tx(tx_ring, count)) {
+ tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
}
@@ -6387,25 +6848,13 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
tx_flags |= IXGBE_TX_FLAGS_CSUM;
}
- count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
+ count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
if (count) {
/* add the ATR filter if ATR is on */
- if (tx_ring->atr_sample_rate) {
- ++tx_ring->atr_count;
- if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
- test_bit(__IXGBE_FDIR_INIT_DONE,
- &tx_ring->reinit_state)) {
- ixgbe_atr(adapter, skb, tx_ring->queue_index,
- tx_flags, protocol);
- tx_ring->atr_count = 0;
- }
- }
- txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
- txq->tx_bytes += skb->len;
- txq->tx_packets++;
- ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
- hdr_len);
- ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
+ if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
+ ixgbe_atr(tx_ring, skb, tx_flags, protocol);
+ ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
+ ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
} else {
dev_kfree_skb_any(skb);
@@ -6422,7 +6871,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netd
struct ixgbe_ring *tx_ring;
tx_ring = adapter->tx_ring[skb->queue_mapping];
- return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring);
+ return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
}
/**
@@ -6561,22 +7010,39 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int i;
- /* accurate rx/tx bytes/packets stats */
- dev_txq_stats_fold(netdev, stats);
+ rcu_read_lock();
for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *ring = adapter->rx_ring[i];
+ struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
u64 bytes, packets;
unsigned int start;
- do {
- start = u64_stats_fetch_begin_bh(&ring->syncp);
- packets = ring->stats.packets;
- bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
- stats->rx_packets += packets;
- stats->rx_bytes += bytes;
+ if (ring) {
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ stats->rx_packets += packets;
+ stats->rx_bytes += bytes;
+ }
}
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
+ u64 bytes, packets;
+ unsigned int start;
+
+ if (ring) {
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+ }
+ }
+ rcu_read_unlock();
/* following stats updated by ixgbe_watchdog_task() */
stats->multicast = netdev->stats.multicast;
stats->rx_errors = netdev->stats.rx_errors;
@@ -6606,11 +7072,15 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
.ndo_get_stats64 = ixgbe_get_stats64,
+#ifdef CONFIG_IXGBE_DCB
+ .ndo_setup_tc = ixgbe_setup_tc,
+#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbe_netpoll,
#endif
#ifdef IXGBE_FCOE
.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
+ .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
.ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
.ndo_fcoe_enable = ixgbe_fcoe_enable,
.ndo_fcoe_disable = ixgbe_fcoe_disable,
@@ -6625,7 +7095,7 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw;
int err;
- if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
+ if (hw->mac.type == ixgbe_mac_82598EB || !max_vfs)
return;
/* The 82599 supports up to 64 VFs per physical function
@@ -6691,11 +7161,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
static int cards_found;
int i, err, pci_using_dac;
+ u8 part_str[IXGBE_PBANUM_LENGTH];
unsigned int indices = num_possible_cpus();
#ifdef IXGBE_FCOE
u16 device_caps;
#endif
- u32 part_num, eec;
+ u32 eec;
/* Catch broken hardware that put the wrong VF device ID in
* the PCIe SR-IOV capability.
@@ -6745,8 +7216,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
else
indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
+#if defined(CONFIG_DCB)
indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
-#ifdef IXGBE_FCOE
+#elif defined(IXGBE_FCOE)
indices += min_t(unsigned int, num_possible_cpus(),
IXGBE_MAX_FCOE_INDICES);
#endif
@@ -6758,8 +7230,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
SET_NETDEV_DEV(netdev, &pdev->dev);
- pci_set_drvdata(pdev, netdev);
adapter = netdev_priv(netdev);
+ pci_set_drvdata(pdev, adapter);
adapter->netdev = netdev;
adapter->pdev = pdev;
@@ -6782,7 +7254,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->netdev_ops = &ixgbe_netdev_ops;
ixgbe_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- strcpy(netdev->name, pci_name(pdev));
+ strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
adapter->bd_number = cards_found;
@@ -6832,8 +7304,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_sw_init;
/* Make it possible the adapter to be woken up via WOL */
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
+ break;
+ default:
+ break;
+ }
/*
* If there is a fan on this device and it has failed log the
@@ -6896,8 +7374,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
IXGBE_FLAG_DCB_ENABLED);
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
- adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
#ifdef CONFIG_IXGBE_DCB
netdev->dcbnl_ops = &dcbnl_ops;
@@ -6941,8 +7417,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_eeprom;
}
- /* power down the optics */
- if (hw->phy.multispeed_fiber)
+ /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
+ if (hw->mac.ops.disable_tx_laser &&
+ ((hw->phy.multispeed_fiber) ||
+ ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+ (hw->mac.type == ixgbe_mac_82599EB))))
hw->mac.ops.disable_tx_laser(hw);
init_timer(&adapter->watchdog_timer);
@@ -6957,6 +7436,18 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_sw_init;
switch (pdev->device) {
+ case IXGBE_DEV_ID_82599_SFP:
+ /* Only this subdevice supports WOL */
+ if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
+ adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
+ IXGBE_WUFC_MC | IXGBE_WUFC_BC);
+ break;
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ /* All except this subdevice support WOL */
+ if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
+ adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
+ IXGBE_WUFC_MC | IXGBE_WUFC_BC);
+ break;
case IXGBE_DEV_ID_82599_KX4:
adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
IXGBE_WUFC_MC | IXGBE_WUFC_BC);
@@ -6980,16 +7471,17 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
"Unknown"),
netdev->dev_addr);
- ixgbe_read_pba_num_generic(hw, &part_num);
+
+ err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
+ if (err)
+ strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
- e_dev_info("MAC: %d, PHY: %d, SFP+: %d, "
- "PBA No: %06x-%03x\n",
+ e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, hw->phy.sfp_type,
- (part_num >> 8), (part_num & 0xff));
+ part_str);
else
- e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
- hw->mac.type, hw->phy.type,
- (part_num >> 8), (part_num & 0xff));
+ e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
+ hw->mac.type, hw->phy.type, part_str);
if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
e_dev_warn("PCI-Express bandwidth available for this card is "
@@ -7082,17 +7574,19 @@ err_dma:
**/
static void __devexit ixgbe_remove(struct pci_dev *pdev)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
set_bit(__IXGBE_DOWN, &adapter->state);
- /* clear the module not found bit to make sure the worker won't
- * reschedule
+
+ /*
+ * The timers may be rescheduled, so explicitly disable them
+ * from being rescheduled.
*/
clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
-
del_timer_sync(&adapter->sfp_timer);
+
cancel_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->sfp_task);
cancel_work_sync(&adapter->multispeed_fiber_task);
@@ -7100,7 +7594,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
cancel_work_sync(&adapter->fdir_reinit_task);
- flush_scheduled_work();
+ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
+ cancel_work_sync(&adapter->check_overtemp_task);
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
@@ -7153,8 +7648,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
netif_device_detach(netdev);
@@ -7177,8 +7672,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
*/
static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
pci_ers_result_t result;
int err;
@@ -7216,8 +7710,8 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
*/
static void ixgbe_io_resume(struct pci_dev *pdev)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
if (netif_running(netdev)) {
if (ixgbe_up(adapter)) {
@@ -7282,6 +7776,7 @@ static void __exit ixgbe_exit_module(void)
dca_unregister_notify(&dca_notifier);
#endif
pci_unregister_driver(&ixgbe_driver);
+ rcu_barrier(); /* Wait for completion of call_rcu()'s */
}
#ifdef CONFIG_IXGBE_DCA
@@ -7298,16 +7793,6 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
#endif /* CONFIG_IXGBE_DCA */
-/**
- * ixgbe_get_hw_dev return device
- * used by hardware layer to print debugging information
- **/
-struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw)
-{
- struct ixgbe_adapter *adapter = hw->back;
- return adapter->netdev;
-}
-
module_exit(ixgbe_exit_module);
/* ixgbe_main.c */
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
index 471f0f2cdb98..1ff0eefcfd0a 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -154,9 +154,6 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
udelay(mbx->usec_delay);
}
- /* if we failed, all future posted messages fail until reset */
- if (!countdown)
- mbx->timeout = 0;
out:
return countdown ? 0 : IXGBE_ERR_MBX;
}
@@ -183,9 +180,6 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
udelay(mbx->usec_delay);
}
- /* if we failed, all future posted messages fail until reset */
- if (!countdown)
- mbx->timeout = 0;
out:
return countdown ? 0 : IXGBE_ERR_MBX;
}
@@ -319,8 +313,16 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
u32 vflre = 0;
s32 ret_val = IXGBE_ERR_MBX;
- if (hw->mac.type == ixgbe_mac_82599EB)
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
+ break;
+ case ixgbe_mac_X540:
+ vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
+ break;
+ default:
+ break;
+ }
if (vflre & (1 << vf_shift)) {
ret_val = 0;
@@ -429,6 +431,7 @@ out_no_read:
return ret_val;
}
+#ifdef CONFIG_PCI_IOV
/**
* ixgbe_init_mbx_params_pf - set initial values for pf mailbox
* @hw: pointer to the HW structure
@@ -439,22 +442,24 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- if (hw->mac.type != ixgbe_mac_82599EB)
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X540)
return;
mbx->timeout = 0;
mbx->usec_delay = 0;
- mbx->size = IXGBE_VFMAILBOX_SIZE;
-
mbx->stats.msgs_tx = 0;
mbx->stats.msgs_rx = 0;
mbx->stats.reqs = 0;
mbx->stats.acks = 0;
mbx->stats.rsts = 0;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
}
+#endif /* CONFIG_PCI_IOV */
-struct ixgbe_mbx_operations mbx_ops_82599 = {
+struct ixgbe_mbx_operations mbx_ops_generic = {
.read = ixgbe_read_mbx_pf,
.write = ixgbe_write_mbx_pf,
.read_posted = ixgbe_read_posted_mbx,
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
index 7e0d08ff5b53..fe6ea81dc7f8 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -86,8 +86,10 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+#ifdef CONFIG_PCI_IOV
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+#endif /* CONFIG_PCI_IOV */
-extern struct ixgbe_mbx_operations mbx_ops_82599;
+extern struct ixgbe_mbx_operations mbx_ops_generic;
#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 6c0d42e33f21..f72f705f6183 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -57,6 +57,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
{
s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
u32 phy_addr;
+ u16 ext_ability = 0;
if (hw->phy.type == ixgbe_phy_unknown) {
for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
@@ -65,12 +66,29 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
ixgbe_get_phy_id(hw);
hw->phy.type =
ixgbe_get_phy_type_from_id(hw->phy.id);
+
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ hw->phy.ops.read_reg(hw,
+ MDIO_PMA_EXTABLE,
+ MDIO_MMD_PMAPMD,
+ &ext_ability);
+ if (ext_ability &
+ (MDIO_PMA_EXTABLE_10GBT |
+ MDIO_PMA_EXTABLE_1000BT))
+ hw->phy.type =
+ ixgbe_phy_cu_unknown;
+ else
+ hw->phy.type =
+ ixgbe_phy_generic;
+ }
+
status = 0;
break;
}
}
/* clear value if nothing found */
- hw->phy.mdio.prtad = 0;
+ if (status != 0)
+ hw->phy.mdio.prtad = 0;
} else {
status = 0;
}
@@ -115,6 +133,9 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case TN1010_PHY_ID:
phy_type = ixgbe_phy_tn;
break;
+ case X540_PHY_ID:
+ phy_type = ixgbe_phy_aq;
+ break;
case QT2022_PHY_ID:
phy_type = ixgbe_phy_qt;
break;
@@ -135,17 +156,51 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
**/
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
{
+ u32 i;
+ u16 ctrl = 0;
+ s32 status = 0;
+
+ if (hw->phy.type == ixgbe_phy_unknown)
+ status = ixgbe_identify_phy_generic(hw);
+
+ if (status != 0 || hw->phy.type == ixgbe_phy_none)
+ goto out;
+
/* Don't reset PHY if it's shut down due to overtemp. */
if (!hw->phy.reset_if_overtemp &&
(IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
- return 0;
+ goto out;
/*
* Perform soft PHY reset to the PHY_XS.
* This will cause a soft reset to the PHY
*/
- return hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
- MDIO_CTRL1_RESET);
+ hw->phy.ops.write_reg(hw, MDIO_CTRL1,
+ MDIO_MMD_PHYXS,
+ MDIO_CTRL1_RESET);
+
+ /*
+ * Poll for reset bit to self-clear indicating reset is complete.
+ * Some PHYs could take up to 3 seconds to complete and need about
+ * 1.7 usec delay after the reset is complete.
+ */
+ for (i = 0; i < 30; i++) {
+ msleep(100);
+ hw->phy.ops.read_reg(hw, MDIO_CTRL1,
+ MDIO_MMD_PHYXS, &ctrl);
+ if (!(ctrl & MDIO_CTRL1_RESET)) {
+ udelay(2);
+ break;
+ }
+ }
+
+ if (ctrl & MDIO_CTRL1_RESET) {
+ status = IXGBE_ERR_RESET_FAILED;
+ hw_dbg(hw, "PHY reset polling failed to complete.\n");
+ }
+
+out:
+ return status;
}
/**
@@ -168,7 +223,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
else
gssr = IXGBE_GSSR_PHY0_SM;
- if (ixgbe_acquire_swfw_sync(hw, gssr) != 0)
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
status = IXGBE_ERR_SWFW_SYNC;
if (status == 0) {
@@ -240,7 +295,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
}
}
- ixgbe_release_swfw_sync(hw, gssr);
+ hw->mac.ops.release_swfw_sync(hw, gssr);
}
return status;
@@ -266,7 +321,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
else
gssr = IXGBE_GSSR_PHY0_SM;
- if (ixgbe_acquire_swfw_sync(hw, gssr) != 0)
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
status = IXGBE_ERR_SWFW_SYNC;
if (status == 0) {
@@ -333,7 +388,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
}
}
- ixgbe_release_swfw_sync(hw, gssr);
+ hw->mac.ops.release_swfw_sync(hw, gssr);
}
return status;
@@ -347,49 +402,89 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
**/
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
{
- s32 status = IXGBE_NOT_IMPLEMENTED;
+ s32 status = 0;
u32 time_out;
u32 max_time_out = 10;
- u16 autoneg_reg;
+ u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
+ bool autoneg = false;
+ ixgbe_link_speed speed;
- /*
- * Set advertisement settings in PHY based on autoneg_advertised
- * settings. If autoneg_advertised = 0, then advertise default values
- * tnx devices cannot be "forced" to a autoneg 10G and fail. But can
- * for a 1G.
- */
- hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg);
+ ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ /* Set or unset auto-negotiation 10G advertisement */
+ hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
+ MDIO_MMD_AN,
+ &autoneg_reg);
- if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL)
autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
- else
- autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+ autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
+
+ hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
+ MDIO_MMD_AN,
+ autoneg_reg);
+ }
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+ /* Set or unset auto-negotiation 1G advertisement */
+ hw->phy.ops.read_reg(hw,
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ MDIO_MMD_AN,
+ &autoneg_reg);
+
+ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+ autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
+
+ hw->phy.ops.write_reg(hw,
+ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ MDIO_MMD_AN,
+ autoneg_reg);
+ }
- hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg);
+ if (speed & IXGBE_LINK_SPEED_100_FULL) {
+ /* Set or unset auto-negotiation 100M advertisement */
+ hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
+ MDIO_MMD_AN,
+ &autoneg_reg);
+
+ autoneg_reg &= ~ADVERTISE_100FULL;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+ autoneg_reg |= ADVERTISE_100FULL;
+
+ hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
+ MDIO_MMD_AN,
+ autoneg_reg);
+ }
/* Restart PHY autonegotiation and wait for completion */
- hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, &autoneg_reg);
+ hw->phy.ops.read_reg(hw, MDIO_CTRL1,
+ MDIO_MMD_AN, &autoneg_reg);
autoneg_reg |= MDIO_AN_CTRL1_RESTART;
- hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, autoneg_reg);
+ hw->phy.ops.write_reg(hw, MDIO_CTRL1,
+ MDIO_MMD_AN, autoneg_reg);
/* Wait for autonegotiation to finish */
for (time_out = 0; time_out < max_time_out; time_out++) {
udelay(10);
/* Restart PHY autonegotiation and wait for completion */
- status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN,
- &autoneg_reg);
+ status = hw->phy.ops.read_reg(hw, MDIO_STAT1,
+ MDIO_MMD_AN,
+ &autoneg_reg);
autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) {
- status = 0;
break;
}
}
- if (time_out == max_time_out)
+ if (time_out == max_time_out) {
status = IXGBE_ERR_LINK_SETUP;
+ hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out");
+ }
return status;
}
@@ -418,6 +513,9 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (speed & IXGBE_LINK_SPEED_100_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+
/* Setup link based on the new speed settings */
hw->phy.ops.setup_link(hw);
@@ -425,6 +523,213 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
}
/**
+ * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: boolean auto-negotiation value
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ */
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ s32 status = IXGBE_ERR_LINK_SETUP;
+ u16 speed_ability;
+
+ *speed = 0;
+ *autoneg = true;
+
+ status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
+ &speed_ability);
+
+ if (status == 0) {
+ if (speed_ability & MDIO_SPEED_10G)
+ *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (speed_ability & MDIO_PMA_SPEED_1000)
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (speed_ability & MDIO_PMA_SPEED_100)
+ *speed |= IXGBE_LINK_SPEED_100_FULL;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_check_phy_link_tnx - Determine link and speed status
+ * @hw: pointer to hardware structure
+ *
+ * Reads the VS1 register to determine if link is up and the current speed for
+ * the PHY.
+ **/
+s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up)
+{
+ s32 status = 0;
+ u32 time_out;
+ u32 max_time_out = 10;
+ u16 phy_link = 0;
+ u16 phy_speed = 0;
+ u16 phy_data = 0;
+
+ /* Initialize speed and link to default case */
+ *link_up = false;
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+ /*
+ * Check current speed and link status of the PHY register.
+ * This is a vendor specific register and may have to
+ * be changed for other copper PHYs.
+ */
+ for (time_out = 0; time_out < max_time_out; time_out++) {
+ udelay(10);
+ status = hw->phy.ops.read_reg(hw,
+ MDIO_STAT1,
+ MDIO_MMD_VEND1,
+ &phy_data);
+ phy_link = phy_data &
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
+ phy_speed = phy_data &
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
+ if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
+ *link_up = true;
+ if (phy_speed ==
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_phy_link_tnx - Set and restart autoneg
+ * @hw: pointer to hardware structure
+ *
+ * Restart autonegotiation and PHY and waits for completion.
+ **/
+s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
+{
+ s32 status = 0;
+ u32 time_out;
+ u32 max_time_out = 10;
+ u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
+ bool autoneg = false;
+ ixgbe_link_speed speed;
+
+ ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
+
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+ /* Set or unset auto-negotiation 10G advertisement */
+ hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
+ MDIO_MMD_AN,
+ &autoneg_reg);
+
+ autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+ autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
+
+ hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
+ MDIO_MMD_AN,
+ autoneg_reg);
+ }
+
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+ /* Set or unset auto-negotiation 1G advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
+ MDIO_MMD_AN,
+ &autoneg_reg);
+
+ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+ autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
+
+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
+ MDIO_MMD_AN,
+ autoneg_reg);
+ }
+
+ if (speed & IXGBE_LINK_SPEED_100_FULL) {
+ /* Set or unset auto-negotiation 100M advertisement */
+ hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
+ MDIO_MMD_AN,
+ &autoneg_reg);
+
+ autoneg_reg &= ~ADVERTISE_100FULL;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+ autoneg_reg |= ADVERTISE_100FULL;
+
+ hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
+ MDIO_MMD_AN,
+ autoneg_reg);
+ }
+
+ /* Restart PHY autonegotiation and wait for completion */
+ hw->phy.ops.read_reg(hw, MDIO_CTRL1,
+ MDIO_MMD_AN, &autoneg_reg);
+
+ autoneg_reg |= MDIO_AN_CTRL1_RESTART;
+
+ hw->phy.ops.write_reg(hw, MDIO_CTRL1,
+ MDIO_MMD_AN, autoneg_reg);
+
+ /* Wait for autonegotiation to finish */
+ for (time_out = 0; time_out < max_time_out; time_out++) {
+ udelay(10);
+ /* Restart PHY autonegotiation and wait for completion */
+ status = hw->phy.ops.read_reg(hw, MDIO_STAT1,
+ MDIO_MMD_AN,
+ &autoneg_reg);
+
+ autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
+ if (autoneg_reg == MDIO_AN_STAT1_COMPLETE)
+ break;
+ }
+
+ if (time_out == max_time_out) {
+ status = IXGBE_ERR_LINK_SETUP;
+ hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out");
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to the PHY Firmware Version
+ **/
+s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
+ u16 *firmware_version)
+{
+ s32 status = 0;
+
+ status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
+ MDIO_MMD_VEND1,
+ firmware_version);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to the PHY Firmware Version
+ **/
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+ u16 *firmware_version)
+{
+ s32 status = 0;
+
+ status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
+ MDIO_MMD_VEND1,
+ firmware_version);
+
+ return status;
+}
+
+/**
* ixgbe_reset_phy_nl - Performs a PHY reset
* @hw: pointer to hardware structure
**/
@@ -520,11 +825,10 @@ out:
}
/**
- * ixgbe_identify_sfp_module_generic - Identifies SFP module and assigns
- * the PHY type.
+ * ixgbe_identify_sfp_module_generic - Identifies SFP modules
* @hw: pointer to hardware structure
*
- * Searches for and indentifies the SFP module. Assings appropriate PHY type.
+ * Searches for and identifies the SFP module and assigns appropriate PHY type.
**/
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
{
@@ -545,41 +849,62 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
goto out;
}
- status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_IDENTIFIER,
&identifier);
- if (status == IXGBE_ERR_SFP_NOT_PRESENT || status == IXGBE_ERR_I2C) {
- status = IXGBE_ERR_SFP_NOT_PRESENT;
- hw->phy.sfp_type = ixgbe_sfp_type_not_present;
- if (hw->phy.type != ixgbe_phy_nl) {
- hw->phy.id = 0;
- hw->phy.type = ixgbe_phy_unknown;
- }
- goto out;
- }
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
- if (identifier == IXGBE_SFF_IDENTIFIER_SFP) {
- hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES,
- &comp_codes_1g);
- hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES,
- &comp_codes_10g);
- hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY,
- &cable_tech);
-
- /* ID Module
- * =========
- * 0 SFP_DA_CU
- * 1 SFP_SR
- * 2 SFP_LR
- * 3 SFP_DA_CORE0 - 82599-specific
- * 4 SFP_DA_CORE1 - 82599-specific
- * 5 SFP_SR/LR_CORE0 - 82599-specific
- * 6 SFP_SR/LR_CORE1 - 82599-specific
- * 7 SFP_act_lmt_DA_CORE0 - 82599-specific
- * 8 SFP_act_lmt_DA_CORE1 - 82599-specific
- * 9 SFP_1g_cu_CORE0 - 82599-specific
- * 10 SFP_1g_cu_CORE1 - 82599-specific
- */
+ /* LAN ID is needed for sfp_type determination */
+ hw->mac.ops.set_lan_id(hw);
+
+ if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ } else {
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_1GBE_COMP_CODES,
+ &comp_codes_1g);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_10GBE_COMP_CODES,
+ &comp_codes_10g);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_CABLE_TECHNOLOGY,
+ &cable_tech);
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
+ /* ID Module
+ * =========
+ * 0 SFP_DA_CU
+ * 1 SFP_SR
+ * 2 SFP_LR
+ * 3 SFP_DA_CORE0 - 82599-specific
+ * 4 SFP_DA_CORE1 - 82599-specific
+ * 5 SFP_SR/LR_CORE0 - 82599-specific
+ * 6 SFP_SR/LR_CORE1 - 82599-specific
+ * 7 SFP_act_lmt_DA_CORE0 - 82599-specific
+ * 8 SFP_act_lmt_DA_CORE1 - 82599-specific
+ * 9 SFP_1g_cu_CORE0 - 82599-specific
+ * 10 SFP_1g_cu_CORE1 - 82599-specific
+ */
if (hw->mac.type == ixgbe_mac_82598EB) {
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
@@ -611,31 +936,27 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
ixgbe_sfp_type_da_act_lmt_core1;
} else {
hw->phy.sfp_type =
- ixgbe_sfp_type_unknown;
+ ixgbe_sfp_type_unknown;
}
- } else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
- if (hw->bus.lan_id == 0)
- hw->phy.sfp_type =
- ixgbe_sfp_type_srlr_core0;
- else
- hw->phy.sfp_type =
- ixgbe_sfp_type_srlr_core1;
- else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+ } else if (comp_codes_10g &
+ (IXGBE_SFF_10GBASESR_CAPABLE |
+ IXGBE_SFF_10GBASELR_CAPABLE)) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_srlr_core0;
else
hw->phy.sfp_type =
ixgbe_sfp_type_srlr_core1;
- else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
+ } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_1g_cu_core0;
else
hw->phy.sfp_type =
ixgbe_sfp_type_1g_cu_core1;
- else
+ } else {
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+ }
}
if (hw->phy.sfp_type != stored_sfp_type)
@@ -652,16 +973,33 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
/* Determine PHY vendor */
if (hw->phy.type != ixgbe_phy_nl) {
hw->phy.id = identifier;
- hw->phy.ops.read_i2c_eeprom(hw,
+ status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE0,
&oui_bytes[0]);
- hw->phy.ops.read_i2c_eeprom(hw,
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE1,
&oui_bytes[1]);
- hw->phy.ops.read_i2c_eeprom(hw,
+
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE2,
&oui_bytes[2]);
+ if (status == IXGBE_ERR_SWFW_SYNC ||
+ status == IXGBE_ERR_I2C ||
+ status == IXGBE_ERR_SFP_NOT_PRESENT)
+ goto err_read_i2c_eeprom;
+
vendor_oui =
((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
(oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
@@ -671,7 +1009,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
case IXGBE_SFF_VENDOR_OUI_TYCO:
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
hw->phy.type =
- ixgbe_phy_sfp_passive_tyco;
+ ixgbe_phy_sfp_passive_tyco;
break;
case IXGBE_SFF_VENDOR_OUI_FTL:
if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
@@ -688,7 +1026,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
default:
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
hw->phy.type =
- ixgbe_phy_sfp_passive_unknown;
+ ixgbe_phy_sfp_passive_unknown;
else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
hw->phy.type =
ixgbe_phy_sfp_active_unknown;
@@ -698,7 +1036,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
}
}
- /* All passive DA cables are supported */
+ /* Allow any DA cable vendor */
if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
IXGBE_SFF_DA_ACTIVE_CABLE)) {
status = 0;
@@ -720,7 +1058,6 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
goto out;
}
- /* This is guaranteed to be 82599, no need to check for NULL */
hw->mac.ops.get_device_caps(hw, &enforce_sfp);
if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
!((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
@@ -740,15 +1077,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
out:
return status;
+
+err_read_i2c_eeprom:
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ if (hw->phy.type != ixgbe_phy_nl) {
+ hw->phy.id = 0;
+ hw->phy.type = ixgbe_phy_unknown;
+ }
+ return IXGBE_ERR_SFP_NOT_PRESENT;
}
/**
- * ixgbe_get_sfp_init_sequence_offsets - Checks the MAC's EEPROM to see
- * if it supports a given SFP+ module type, if so it returns the offsets to the
- * phy init sequence block.
+ * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
* @hw: pointer to hardware structure
* @list_offset: offset to the SFP ID list
* @data_offset: offset to the SFP data block
+ *
+ * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
+ * so it returns the offsets to the phy init sequence block.
**/
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
@@ -863,11 +1209,22 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data)
{
s32 status = 0;
- u32 max_retry = 1;
+ u32 max_retry = 10;
u32 retry = 0;
+ u16 swfw_mask = 0;
bool nack = 1;
+ if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+ swfw_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ swfw_mask = IXGBE_GSSR_PHY0_SM;
+
do {
+ if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) {
+ status = IXGBE_ERR_SWFW_SYNC;
+ goto read_byte_out;
+ }
+
ixgbe_i2c_start(hw);
/* Device Address and write indication */
@@ -910,6 +1267,8 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
break;
fail:
+ ixgbe_release_swfw_sync(hw, swfw_mask);
+ msleep(100);
ixgbe_i2c_bus_clear(hw);
retry++;
if (retry < max_retry)
@@ -919,6 +1278,9 @@ fail:
} while (retry < max_retry);
+ ixgbe_release_swfw_sync(hw, swfw_mask);
+
+read_byte_out:
return status;
}
@@ -937,6 +1299,17 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
s32 status = 0;
u32 max_retry = 1;
u32 retry = 0;
+ u16 swfw_mask = 0;
+
+ if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+ swfw_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ swfw_mask = IXGBE_GSSR_PHY0_SM;
+
+ if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) {
+ status = IXGBE_ERR_SWFW_SYNC;
+ goto write_byte_out;
+ }
do {
ixgbe_i2c_start(hw);
@@ -977,6 +1350,9 @@ fail:
hw_dbg(hw, "I2C byte write error.\n");
} while (retry < max_retry);
+ ixgbe_release_swfw_sync(hw, swfw_mask);
+
+write_byte_out:
return status;
}
@@ -1295,6 +1671,8 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
u32 i;
+ ixgbe_i2c_start(hw);
+
ixgbe_set_i2c_data(hw, &i2cctl, 1);
for (i = 0; i < 9; i++) {
@@ -1309,75 +1687,13 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
udelay(IXGBE_I2C_T_LOW);
}
+ ixgbe_i2c_start(hw);
+
/* Put the i2c bus back to default state */
ixgbe_i2c_stop(hw);
}
/**
- * ixgbe_check_phy_link_tnx - Determine link and speed status
- * @hw: pointer to hardware structure
- *
- * Reads the VS1 register to determine if link is up and the current speed for
- * the PHY.
- **/
-s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
- bool *link_up)
-{
- s32 status = 0;
- u32 time_out;
- u32 max_time_out = 10;
- u16 phy_link = 0;
- u16 phy_speed = 0;
- u16 phy_data = 0;
-
- /* Initialize speed and link to default case */
- *link_up = false;
- *speed = IXGBE_LINK_SPEED_10GB_FULL;
-
- /*
- * Check current speed and link status of the PHY register.
- * This is a vendor specific register and may have to
- * be changed for other copper PHYs.
- */
- for (time_out = 0; time_out < max_time_out; time_out++) {
- udelay(10);
- status = hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
- MDIO_MMD_VEND1,
- &phy_data);
- phy_link = phy_data &
- IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
- phy_speed = phy_data &
- IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
- if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
- *link_up = true;
- if (phy_speed ==
- IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
- *speed = IXGBE_LINK_SPEED_1GB_FULL;
- break;
- }
- }
-
- return status;
-}
-
-/**
- * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
- * @hw: pointer to hardware structure
- * @firmware_version: pointer to the PHY Firmware Version
- **/
-s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
- u16 *firmware_version)
-{
- s32 status = 0;
-
- status = hw->phy.ops.read_reg(hw, TNX_FW_REV, MDIO_MMD_VEND1,
- firmware_version);
-
- return status;
-}
-
-/**
* ixgbe_tn_check_overtemp - Checks if an overtemp occured.
* @hw: pointer to hardware structure
*
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index fb3898f12fc5..197bdd13106a 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -58,6 +58,10 @@
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
+/* Flow control defines */
+#define IXGBE_TAF_SYM_PAUSE 0x400
+#define IXGBE_TAF_ASM_PAUSE 0x800
+
/* Bit-shift macros */
#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24
#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16
@@ -96,13 +100,19 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
/* PHY specific */
s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up);
+s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
u16 *firmware_version);
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+ u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 5428153af8f3..6e50d8328942 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -68,7 +68,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
* addresses
*/
for (i = 0; i < entries; i++) {
- vfinfo->vf_mc_hashes[i] = hash_list[i];;
+ vfinfo->vf_mc_hashes[i] = hash_list[i];
}
for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
@@ -110,12 +110,37 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
}
+void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int new_mtu = msgbuf[1];
+ u32 max_frs;
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ /* Only X540 supports jumbo frames in IOV mode */
+ if (adapter->hw.mac.type != ixgbe_mac_X540)
+ return;
+
+ /* MTU < 68 is an error and causes problems on some kernels */
+ if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) {
+ e_err(drv, "VF mtu %d out of range\n", new_mtu);
+ return;
+ }
+
+ max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
+ IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
+ if (max_frs < new_mtu) {
+ max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
+ }
+
+ e_info(hw, "VF requests change max MTU to %d\n", new_mtu);
+}
static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
{
u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
vmolr |= (IXGBE_VMOLR_ROMPE |
- IXGBE_VMOLR_ROPE |
IXGBE_VMOLR_BAM);
if (aupe)
vmolr |= IXGBE_VMOLR_AUPE;
@@ -178,8 +203,7 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
{
unsigned char vf_mac_addr[6];
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
unsigned int vfn = (event_mask & 0x3f);
bool enable = ((event_mask & 0x10000000U) != 0);
@@ -216,6 +240,11 @@ static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
reg |= (reg | (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
+ /* Enable counting of spoofed packets in the SSVPC register */
+ reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
+ reg |= (1 << vf_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
+
ixgbe_vf_reset_event(adapter, vf);
}
@@ -228,6 +257,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
int entries;
u16 *hash_list;
int add, vid;
+ u8 *new_mac;
retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
@@ -245,15 +275,22 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
if (msgbuf[0] == IXGBE_VF_RESET) {
unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
- u8 *addr = (u8 *)(&msgbuf[1]);
+ new_mac = (u8 *)(&msgbuf[1]);
e_info(probe, "VF Reset msg received from vf %d\n", vf);
adapter->vfinfo[vf].clear_to_send = false;
ixgbe_vf_reset_msg(adapter, vf);
adapter->vfinfo[vf].clear_to_send = true;
+ if (is_valid_ether_addr(new_mac) &&
+ !adapter->vfinfo[vf].pf_set_mac)
+ ixgbe_set_vf_mac(adapter, vf, vf_mac);
+ else
+ ixgbe_set_vf_mac(adapter,
+ vf, adapter->vfinfo[vf].vf_mac_addresses);
+
/* reply to reset with ack and vf mac address */
msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
- memcpy(addr, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
/*
* Piggyback the multicast filter type so VF can compute the
* correct vectors
@@ -272,14 +309,16 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
switch ((msgbuf[0] & 0xFFFF)) {
case IXGBE_VF_SET_MAC_ADDR:
- {
- u8 *new_mac = ((u8 *)(&msgbuf[1]));
- if (is_valid_ether_addr(new_mac) &&
- !adapter->vfinfo[vf].pf_set_mac)
- ixgbe_set_vf_mac(adapter, vf, new_mac);
- else
- ixgbe_set_vf_mac(adapter,
- vf, adapter->vfinfo[vf].vf_mac_addresses);
+ new_mac = ((u8 *)(&msgbuf[1]));
+ if (is_valid_ether_addr(new_mac) &&
+ !adapter->vfinfo[vf].pf_set_mac) {
+ ixgbe_set_vf_mac(adapter, vf, new_mac);
+ } else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses,
+ new_mac, ETH_ALEN)) {
+ e_warn(drv, "VF %d attempted to override "
+ "administratively set MAC address\nReload "
+ "the VF driver to resume operations\n", vf);
+ retval = -1;
}
break;
case IXGBE_VF_SET_MULTICAST:
@@ -290,13 +329,21 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
hash_list, vf);
break;
case IXGBE_VF_SET_LPE:
- WARN_ON((msgbuf[0] & 0xFFFF) == IXGBE_VF_SET_LPE);
+ ixgbe_set_vf_lpe(adapter, msgbuf);
break;
case IXGBE_VF_SET_VLAN:
add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
>> IXGBE_VT_MSGINFO_SHIFT;
vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
- retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
+ if (adapter->vfinfo[vf].pf_vlan) {
+ e_warn(drv, "VF %d attempted to override "
+ "administratively set VLAN configuration\n"
+ "Reload the VF driver to resume operations\n",
+ vf);
+ retval = -1;
+ } else {
+ retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
+ }
break;
default:
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
@@ -395,6 +442,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
{
int err = 0;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
return -EINVAL;
@@ -403,7 +451,8 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
if (err)
goto out;
ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
- ixgbe_set_vmolr(&adapter->hw, vf, false);
+ ixgbe_set_vmolr(hw, vf, false);
+ hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
adapter->vfinfo[vf].pf_vlan = vlan;
adapter->vfinfo[vf].pf_qos = qos;
dev_info(&adapter->pdev->dev,
@@ -420,7 +469,8 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
err = ixgbe_set_vf_vlan(adapter, false,
adapter->vfinfo[vf].pf_vlan, vf);
ixgbe_set_vmvir(adapter, vlan, vf);
- ixgbe_set_vmolr(&adapter->hw, vf, true);
+ ixgbe_set_vmolr(hw, vf, true);
+ hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
adapter->vfinfo[vf].pf_vlan = 0;
adapter->vfinfo[vf].pf_qos = 0;
}
@@ -428,9 +478,90 @@ out:
return err;
}
+static int ixgbe_link_mbps(int internal_link_speed)
+{
+ switch (internal_link_speed) {
+ case IXGBE_LINK_SPEED_100_FULL:
+ return 100;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ return 1000;
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ return 10000;
+ default:
+ return 0;
+ }
+}
+
+static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
+ int link_speed)
+{
+ int rf_dec, rf_int;
+ u32 bcnrc_val;
+
+ if (tx_rate != 0) {
+ /* Calculate the rate factor values to set */
+ rf_int = link_speed / tx_rate;
+ rf_dec = (link_speed - (rf_int * tx_rate));
+ rf_dec = (rf_dec * (1<<IXGBE_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
+
+ bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
+ bcnrc_val |= ((rf_int<<IXGBE_RTTBCNRC_RF_INT_SHIFT) &
+ IXGBE_RTTBCNRC_RF_INT_MASK);
+ bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
+ } else {
+ bcnrc_val = 0;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+}
+
+void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
+{
+ int actual_link_speed, i;
+ bool reset_rate = false;
+
+ /* VF Tx rate limit was not set */
+ if (adapter->vf_rate_link_speed == 0)
+ return;
+
+ actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
+ if (actual_link_speed != adapter->vf_rate_link_speed) {
+ reset_rate = true;
+ adapter->vf_rate_link_speed = 0;
+ dev_info(&adapter->pdev->dev,
+ "Link speed has been changed. VF Transmit rate "
+ "is disabled\n");
+ }
+
+ for (i = 0; i < adapter->num_vfs; i++) {
+ if (reset_rate)
+ adapter->vfinfo[i].tx_rate = 0;
+
+ ixgbe_set_vf_rate_limit(&adapter->hw, i,
+ adapter->vfinfo[i].tx_rate,
+ actual_link_speed);
+ }
+}
+
int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
{
- return -EOPNOTSUPP;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int actual_link_speed;
+
+ actual_link_speed = ixgbe_link_mbps(adapter->link_speed);
+ if ((vf >= adapter->num_vfs) || (!adapter->link_up) ||
+ (tx_rate > actual_link_speed) || (actual_link_speed != 10000) ||
+ ((tx_rate != 0) && (tx_rate <= 10)))
+ /* rate limit cannot be set to 10Mb or less in 10Gb adapters */
+ return -EINVAL;
+
+ adapter->vf_rate_link_speed = actual_link_speed;
+ adapter->vfinfo[vf].tx_rate = (u16)tx_rate;
+ ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
+
+ return 0;
}
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
@@ -441,7 +572,7 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
return -EINVAL;
ivi->vf = vf;
memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
- ivi->tx_rate = 0;
+ ivi->tx_rate = adapter->vfinfo[vf].tx_rate;
ivi->vlan = adapter->vfinfo[vf].pf_vlan;
ivi->qos = adapter->vfinfo[vf].pf_qos;
return 0;
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
index 49dc14debef7..34175564bb78 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -40,6 +40,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
+void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
#endif /* _IXGBE_SRIOV_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index d3cc6ce7c973..25c1fb7eda06 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -54,9 +54,14 @@
#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
#define IXGBE_DEV_ID_82599_CX4 0x10F9
#define IXGBE_DEV_ID_82599_SFP 0x10FB
+#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a
+#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
+#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
+#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
+#define IXGBE_DEV_ID_X540T 0x1528
/* General Registers */
#define IXGBE_CTRL 0x00000
@@ -86,7 +91,7 @@
/* General Receive Control */
#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
-#define IXGBE_GRC_APME 0x00000002 /* Advanced Power Management Enable */
+#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
#define IXGBE_VPDDIAG0 0x10204
#define IXGBE_VPDDIAG1 0x10208
@@ -225,6 +230,7 @@
#define IXGBE_VT_CTL 0x051B0
#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
+#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4))
#define IXGBE_QDE 0x2F04
#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */
#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4))
@@ -279,7 +285,8 @@
#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
#define IXGBE_DTXCTL 0x07E00
-#define IXGBE_DMATXCTL 0x04A80
+#define IXGBE_DMATXCTL 0x04A80
+#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */
#define IXGBE_PFDTXGSWC 0x08220
#define IXGBE_DTXMXSZRQ 0x08100
#define IXGBE_DTXTCPFLGL 0x04A88
@@ -293,6 +300,13 @@
#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
+
+/* Anti-spoofing defines */
+#define IXGBE_SPOOF_MACAS_MASK 0xFF
+#define IXGBE_SPOOF_VLANAS_MASK 0xFF00
+#define IXGBE_SPOOF_VLANAS_SHIFT 8
+#define IXGBE_PFVFSPOOF_REG_COUNT 8
+
#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
/* Tx DCA Control register : 128 of these (0-127) */
#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
@@ -328,7 +342,7 @@
/* Wake Up Control */
#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */
#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
-#define IXGBE_WUC_ADVD3WUC 0x00000010 /* D3Cold wake up cap. enable*/
+#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */
/* Wake Up Filter Control */
#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
@@ -519,6 +533,12 @@
#define IXGBE_RTTDTECC 0x04990
#define IXGBE_RTTDTECC_NO_BCN 0x00000100
#define IXGBE_RTTBCNRC 0x04984
+#define IXGBE_RTTBCNRC_RS_ENA 0x80000000
+#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF
+#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14
+#define IXGBE_RTTBCNRC_RF_INT_MASK \
+ (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
+
/* FCoE registers */
#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
@@ -645,6 +665,8 @@
#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */
#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */
#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */
#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */
@@ -655,6 +677,11 @@
#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */
#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */
#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */
+#define IXGBE_PCRC8ECL 0x0E810
+#define IXGBE_PCRC8ECH 0x0E811
+#define IXGBE_PCRC8ECH_MASK 0x1F
+#define IXGBE_LDPCECL 0x0E820
+#define IXGBE_LDPCECH 0x0E821
/* Management */
#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -988,14 +1015,23 @@
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
+/* MII clause 22/28 definitions */
+#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
+#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */
+#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/
+#define IXGBE_MII_AUTONEG_REG 0x0
+
#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0
#define IXGBE_MAX_PHY_ADDR 32
/* PHY IDs*/
#define TN1010_PHY_ID 0x00A19410
#define TNX_FW_REV 0xB
+#define X540_PHY_ID 0x01540200
#define QT2022_PHY_ID 0x0043A400
#define ATH_PHY_ID 0x03429050
+#define AQ_FW_REV 0x20
/* PHY Types */
#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
@@ -1463,6 +1499,8 @@
#define IXGBE_ANLP1_PAUSE 0x0C00
#define IXGBE_ANLP1_SYM_PAUSE 0x0400
#define IXGBE_ANLP1_ASM_PAUSE 0x0800
+#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000
+
/* SW Semaphore Register bitmasks */
#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
@@ -1491,6 +1529,7 @@
#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */
+#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */
#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */
/* EEPROM Addressing bits based on type (0-small, 1-large) */
#define IXGBE_EEC_ADDR_SIZE 0x00000400
@@ -1500,12 +1539,18 @@
#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6
#define IXGBE_EEPROM_OPCODE_BITS 8
+/* Part Number String Length */
+#define IXGBE_PBANUM_LENGTH 11
+
/* Checksum and EEPROM pointers */
+#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
#define IXGBE_EEPROM_CHECKSUM 0x3F
#define IXGBE_EEPROM_SUM 0xBABA
#define IXGBE_PCIE_ANALOG_PTR 0x03
#define IXGBE_ATLAS0_CONFIG_PTR 0x04
+#define IXGBE_PHY_PTR 0x04
#define IXGBE_ATLAS1_CONFIG_PTR 0x05
+#define IXGBE_OPTION_ROM_PTR 0x05
#define IXGBE_PCIE_GENERAL_PTR 0x06
#define IXGBE_PCIE_CONFIG0_PTR 0x07
#define IXGBE_PCIE_CONFIG1_PTR 0x08
@@ -1589,6 +1634,8 @@
#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */
/* PCI Bus Info */
+#define IXGBE_PCI_DEVICE_STATUS 0xAA
+#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
#define IXGBE_PCI_LINK_STATUS 0xB2
#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
#define IXGBE_PCI_LINK_WIDTH 0x3F0
@@ -1655,6 +1702,8 @@
#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
+#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */
+#define IXGBE_RXDCTL_RLPML_EN 0x00008000
#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
@@ -1922,10 +1971,9 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRM_VLANID 0x00000001
#define IXGBE_FDIRM_VLANP 0x00000002
#define IXGBE_FDIRM_POOL 0x00000004
-#define IXGBE_FDIRM_L3P 0x00000008
-#define IXGBE_FDIRM_L4P 0x00000010
-#define IXGBE_FDIRM_FLEX 0x00000020
-#define IXGBE_FDIRM_DIPv6 0x00000040
+#define IXGBE_FDIRM_L4P 0x00000008
+#define IXGBE_FDIRM_FLEX 0x00000010
+#define IXGBE_FDIRM_DIPv6 0x00000020
#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
#define IXGBE_FDIRFREE_FREE_SHIFT 0
@@ -1965,6 +2013,7 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRCMD_LAST 0x00000800
#define IXGBE_FDIRCMD_COLLISION 0x00001000
#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
+#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5
#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
#define IXGBE_FDIR_INIT_DONE_POLL 10
@@ -2113,57 +2162,95 @@ typedef u32 ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
+/* Flow Control Macros */
+#define PAUSE_RTT 8
+#define PAUSE_MTU(MTU) ((MTU + 1024 - 1) / 1024)
+
+#define FC_HIGH_WATER(MTU) ((((PAUSE_RTT + PAUSE_MTU(MTU)) * 144) + 99) / 100 +\
+ PAUSE_MTU(MTU))
+#define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT))
+
/* Software ATR hash keys */
-#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D
-#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17
-
-/* Software ATR input stream offsets and masks */
-#define IXGBE_ATR_VLAN_OFFSET 0
-#define IXGBE_ATR_SRC_IPV6_OFFSET 2
-#define IXGBE_ATR_SRC_IPV4_OFFSET 14
-#define IXGBE_ATR_DST_IPV6_OFFSET 18
-#define IXGBE_ATR_DST_IPV4_OFFSET 30
-#define IXGBE_ATR_SRC_PORT_OFFSET 34
-#define IXGBE_ATR_DST_PORT_OFFSET 36
-#define IXGBE_ATR_FLEX_BYTE_OFFSET 38
-#define IXGBE_ATR_VM_POOL_OFFSET 40
-#define IXGBE_ATR_L4TYPE_OFFSET 41
+#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
+#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
+/* Software ATR input stream values and masks */
+#define IXGBE_ATR_HASH_MASK 0x7fff
#define IXGBE_ATR_L4TYPE_MASK 0x3
-#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
#define IXGBE_ATR_L4TYPE_UDP 0x1
#define IXGBE_ATR_L4TYPE_TCP 0x2
#define IXGBE_ATR_L4TYPE_SCTP 0x3
-#define IXGBE_ATR_HASH_MASK 0x7fff
+#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+enum ixgbe_atr_flow_type {
+ IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
+ IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
+ IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2,
+ IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+ IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4,
+ IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
+ IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
+ IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+};
/* Flow Director ATR input struct. */
-struct ixgbe_atr_input {
- /* Byte layout in order, all values with MSB first:
+union ixgbe_atr_input {
+ /*
+ * Byte layout in order, all values with MSB first:
*
+ * vm_pool - 1 byte
+ * flow_type - 1 byte
* vlan_id - 2 bytes
* src_ip - 16 bytes
* dst_ip - 16 bytes
* src_port - 2 bytes
* dst_port - 2 bytes
* flex_bytes - 2 bytes
- * vm_pool - 1 byte
- * l4type - 1 byte
+ * rsvd0 - 2 bytes - space reserved must be 0.
*/
- u8 byte_stream[42];
+ struct {
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 vlan_id;
+ __be32 dst_ip[4];
+ __be32 src_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 flex_bytes;
+ __be16 rsvd0;
+ } formatted;
+ __be32 dword_stream[11];
+};
+
+/* Flow Director compressed ATR hash input struct */
+union ixgbe_atr_hash_dword {
+ struct {
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 vlan_id;
+ } formatted;
+ __be32 ip;
+ struct {
+ __be16 src;
+ __be16 dst;
+ } port;
+ __be16 flex_bytes;
+ __be32 dword;
};
struct ixgbe_atr_input_masks {
- u32 src_ip_mask;
- u32 dst_ip_mask;
- u16 src_port_mask;
- u16 dst_port_mask;
- u16 vlan_id_mask;
- u16 data_mask;
+ __be16 rsvd0;
+ __be16 vlan_id_mask;
+ __be32 dst_ip_mask[4];
+ __be32 src_ip_mask[4];
+ __be16 src_port_mask;
+ __be16 dst_port_mask;
+ __be16 flex_mask;
};
enum ixgbe_eeprom_type {
ixgbe_eeprom_uninitialized = 0,
ixgbe_eeprom_spi,
+ ixgbe_flash,
ixgbe_eeprom_none /* No NVM support */
};
@@ -2171,12 +2258,15 @@ enum ixgbe_mac_type {
ixgbe_mac_unknown = 0,
ixgbe_mac_82598EB,
ixgbe_mac_82599EB,
+ ixgbe_mac_X540,
ixgbe_num_macs
};
enum ixgbe_phy_type {
ixgbe_phy_unknown = 0,
+ ixgbe_phy_none,
ixgbe_phy_tn,
+ ixgbe_phy_aq,
ixgbe_phy_cu_unknown,
ixgbe_phy_qt,
ixgbe_phy_xaui,
@@ -2263,32 +2353,31 @@ enum ixgbe_bus_type {
/* PCI bus speeds */
enum ixgbe_bus_speed {
ixgbe_bus_speed_unknown = 0,
- ixgbe_bus_speed_33,
- ixgbe_bus_speed_66,
- ixgbe_bus_speed_100,
- ixgbe_bus_speed_120,
- ixgbe_bus_speed_133,
- ixgbe_bus_speed_2500,
- ixgbe_bus_speed_5000,
+ ixgbe_bus_speed_33 = 33,
+ ixgbe_bus_speed_66 = 66,
+ ixgbe_bus_speed_100 = 100,
+ ixgbe_bus_speed_120 = 120,
+ ixgbe_bus_speed_133 = 133,
+ ixgbe_bus_speed_2500 = 2500,
+ ixgbe_bus_speed_5000 = 5000,
ixgbe_bus_speed_reserved
};
/* PCI bus widths */
enum ixgbe_bus_width {
ixgbe_bus_width_unknown = 0,
- ixgbe_bus_width_pcie_x1,
- ixgbe_bus_width_pcie_x2,
+ ixgbe_bus_width_pcie_x1 = 1,
+ ixgbe_bus_width_pcie_x2 = 2,
ixgbe_bus_width_pcie_x4 = 4,
ixgbe_bus_width_pcie_x8 = 8,
- ixgbe_bus_width_32,
- ixgbe_bus_width_64,
+ ixgbe_bus_width_32 = 32,
+ ixgbe_bus_width_64 = 64,
ixgbe_bus_width_reserved
};
struct ixgbe_addr_filter_info {
u32 num_mc_addrs;
u32 rar_used_count;
- u32 mc_addr_in_rar_count;
u32 mta_in_use;
u32 overflow_promisc;
bool uc_set_promisc;
@@ -2405,6 +2494,7 @@ struct ixgbe_eeprom_operations {
s32 (*write)(struct ixgbe_hw *, u16, u16);
s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
s32 (*update_checksum)(struct ixgbe_hw *);
+ u16 (*calc_checksum)(struct ixgbe_hw *);
};
struct ixgbe_mac_operations {
@@ -2425,6 +2515,8 @@ struct ixgbe_mac_operations {
s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
s32 (*setup_sfp)(struct ixgbe_hw *);
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
+ s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
+ void (*release_swfw_sync)(struct ixgbe_hw *, u16);
/* Link */
void (*disable_tx_laser)(struct ixgbe_hw *);
@@ -2447,13 +2539,14 @@ struct ixgbe_mac_operations {
s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
- s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
s32 (*init_uta_tables)(struct ixgbe_hw *);
+ void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
+ void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
/* Flow Control */
s32 (*fc_enable)(struct ixgbe_hw *, s32);
@@ -2486,6 +2579,7 @@ struct ixgbe_eeprom_info {
u16 address_bits;
};
+#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
struct ixgbe_mac_info {
struct ixgbe_mac_operations ops;
enum ixgbe_mac_type type;
@@ -2496,6 +2590,8 @@ struct ixgbe_mac_info {
u16 wwnn_prefix;
/* prefix for World Wide Port Name (WWPN) */
u16 wwpn_prefix;
+#define IXGBE_MAX_MTA 128
+ u32 mta_shadow[IXGBE_MAX_MTA];
s32 mc_filter_type;
u32 mcft_size;
u32 vft_size;
@@ -2508,6 +2604,7 @@ struct ixgbe_mac_info {
u32 orig_autoc2;
bool orig_link_settings_stored;
bool autotry_restart;
+ u8 flags;
};
struct ixgbe_phy_info {
@@ -2574,6 +2671,7 @@ struct ixgbe_hw {
u16 subsystem_vendor_id;
u8 revision_id;
bool adapter_stopped;
+ bool force_full_reset;
};
struct ixgbe_info {
@@ -2613,7 +2711,12 @@ struct ixgbe_info {
#define IXGBE_ERR_EEPROM_VERSION -24
#define IXGBE_ERR_NO_SPACE -25
#define IXGBE_ERR_OVERTEMP -26
-#define IXGBE_ERR_RAR_INDEX -27
+#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
+#define IXGBE_ERR_FC_NOT_SUPPORTED -28
+#define IXGBE_ERR_FLOW_CONTROL -29
+#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
+#define IXGBE_ERR_PBA_SECTION -31
+#define IXGBE_ERR_INVALID_ARGUMENT -32
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
new file mode 100644
index 000000000000..f47e93fe32be
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -0,0 +1,737 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2011 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include "ixgbe.h"
+#include "ixgbe_phy.h"
+
+#define IXGBE_X540_MAX_TX_QUEUES 128
+#define IXGBE_X540_MAX_RX_QUEUES 128
+#define IXGBE_X540_RAR_ENTRIES 128
+#define IXGBE_X540_MC_TBL_SIZE 128
+#define IXGBE_X540_VFT_TBL_SIZE 128
+
+static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
+static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
+static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
+static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
+static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
+static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
+
+static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
+{
+ return ixgbe_media_type_copper;
+}
+
+static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+
+ /* Call PHY identify routine to get the phy type */
+ ixgbe_identify_phy_generic(hw);
+
+ mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
+ mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
+ mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
+ mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
+ mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
+ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg: true if autonegotiation enabled
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ **/
+static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+ autoneg_wait_to_complete);
+}
+
+/**
+ * ixgbe_reset_hw_X540 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ * reset.
+ **/
+static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+{
+ ixgbe_link_speed link_speed;
+ s32 status = 0;
+ u32 ctrl;
+ u32 ctrl_ext;
+ u32 reset_bit;
+ u32 i;
+ u32 autoc;
+ u32 autoc2;
+ bool link_up = false;
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ hw->mac.ops.stop_adapter(hw);
+
+ /*
+ * Prevent the PCI-E bus from from hanging by disabling PCI-E master
+ * access and verify no pending requests before reset
+ */
+ ixgbe_disable_pcie_master(hw);
+
+mac_reset_top:
+ /*
+ * Issue global reset to the MAC. Needs to be SW reset if link is up.
+ * If link reset is used when link is up, it might reset the PHY when
+ * mng is using it. If link is down or the flag to force full link
+ * reset is set, then perform link reset.
+ */
+ if (hw->force_full_reset) {
+ reset_bit = IXGBE_CTRL_LNK_RST;
+ } else {
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (!link_up)
+ reset_bit = IXGBE_CTRL_LNK_RST;
+ else
+ reset_bit = IXGBE_CTRL_RST;
+ }
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit));
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ udelay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & reset_bit))
+ break;
+ }
+ if (ctrl & reset_bit) {
+ status = IXGBE_ERR_RESET_FAILED;
+ hw_dbg(hw, "Reset polling failed to complete.\n");
+ }
+
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete. We use 1usec since that is
+ * what is needed for ixgbe_disable_pcie_master(). The second reset
+ * then clears out any effects of those events.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ udelay(1);
+ goto mac_reset_top;
+ }
+
+ /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
+ msleep(50);
+
+ /* Set the Rx packet buffer size. */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /*
+ * Store the original AUTOC/AUTOC2 values if they have not been
+ * stored off yet. Otherwise restore the stored original
+ * values since the reset operation sets back to defaults.
+ */
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ if (hw->mac.orig_link_settings_stored == false) {
+ hw->mac.orig_autoc = autoc;
+ hw->mac.orig_autoc2 = autoc2;
+ hw->mac.orig_link_settings_stored = true;
+ } else {
+ if (autoc != hw->mac.orig_autoc)
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
+ IXGBE_AUTOC_AN_RESTART));
+
+ if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
+ (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
+ autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
+ autoc2 |= (hw->mac.orig_autoc2 &
+ IXGBE_AUTOC2_UPPER_MASK);
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
+ }
+ }
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /* Store the permanent SAN mac address */
+ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
+
+ /* Add the SAN MAC address to the RAR only if it's a valid address */
+ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+ hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+ /* Reserve the last RAR for the SAN MAC address */
+ hw->mac.num_rar_entries--;
+ }
+
+ /* Store the alternative WWNN/WWPN prefix */
+ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+ &hw->mac.wwpn_prefix);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ **/
+static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
+{
+ u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u16 ext_ability = 0;
+
+ hw->phy.ops.identify(hw);
+
+ hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
+ &ext_ability);
+ if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+
+ return physical_layer;
+}
+
+/**
+ * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ **/
+static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 eec;
+ u16 eeprom_size;
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->semaphore_delay = 10;
+ eeprom->type = ixgbe_flash;
+
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+ IXGBE_EEC_SIZE_SHIFT);
+ eeprom->word_size = 1 << (eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
+
+ hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
+ eeprom->type, eeprom->word_size);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_eerd_X540 - Read EEPROM word using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EERPOM
+ **/
+static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ s32 status;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0)
+ status = ixgbe_read_eerd_generic(hw, offset, data);
+ else
+ status = IXGBE_ERR_SWFW_SYNC;
+
+ ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
+
+/**
+ * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ u32 eewr;
+ s32 status;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) |
+ (data << IXGBE_EEPROM_RW_REG_DATA) |
+ IXGBE_EEPROM_RW_REG_START;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+ if (status != 0) {
+ hw_dbg(hw, "Eeprom write EEWR timed out\n");
+ goto out;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
+
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+ if (status != 0) {
+ hw_dbg(hw, "Eeprom write EEWR timed out\n");
+ goto out;
+ }
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+out:
+ ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ **/
+static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+ u16 i;
+ u16 j;
+ u16 checksum = 0;
+ u16 length = 0;
+ u16 pointer = 0;
+ u16 word = 0;
+
+ /* Include 0x0-0x3F in the checksum */
+ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
+ if (hw->eeprom.ops.read(hw, i, &word) != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+ checksum += word;
+ }
+
+ /*
+ * Include all data from pointers 0x3, 0x6-0xE. This excludes the
+ * FW, PHY module, and PCIe Expansion/Option ROM pointers.
+ */
+ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+ if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
+ continue;
+
+ if (hw->eeprom.ops.read(hw, i, &pointer) != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+
+ /* Skip pointer section if the pointer is invalid. */
+ if (pointer == 0xFFFF || pointer == 0 ||
+ pointer >= hw->eeprom.word_size)
+ continue;
+
+ if (hw->eeprom.ops.read(hw, pointer, &length) != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+
+ /* Skip pointer section if length is invalid. */
+ if (length == 0xFFFF || length == 0 ||
+ (pointer + length) >= hw->eeprom.word_size)
+ continue;
+
+ for (j = pointer+1; j <= pointer+length; j++) {
+ if (hw->eeprom.ops.read(hw, j, &word) != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ break;
+ }
+ checksum += word;
+ }
+ }
+
+ checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+ return checksum;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to shadow RAM using EEWR register, software calculates
+ * checksum and updates the EEPROM and instructs the hardware to update
+ * the flash.
+ **/
+static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ status = ixgbe_update_eeprom_checksum_generic(hw);
+
+ if (status)
+ status = ixgbe_update_flash_X540(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
+ * @hw: pointer to hardware structure
+ *
+ * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
+ * EEPROM from shadow RAM to the flash device.
+ **/
+static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
+{
+ u32 flup;
+ s32 status = IXGBE_ERR_EEPROM;
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status == IXGBE_ERR_EEPROM) {
+ hw_dbg(hw, "Flash update time out\n");
+ goto out;
+ }
+
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status)
+ hw_dbg(hw, "Flash update complete\n");
+ else
+ hw_dbg(hw, "Flash update time out\n");
+
+ if (hw->revision_id == 0) {
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+ if (flup & IXGBE_EEC_SEC1VAL) {
+ flup |= IXGBE_EEC_FLUP;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+ }
+
+ status = ixgbe_poll_flash_update_done_X540(hw);
+ if (status)
+ hw_dbg(hw, "Flash update complete\n");
+ else
+ hw_dbg(hw, "Flash update time out\n");
+
+ }
+out:
+ return status;
+}
+
+/**
+ * ixgbe_poll_flash_update_done_X540 - Poll flash update status
+ * @hw: pointer to hardware structure
+ *
+ * Polls the FLUDONE (bit 26) of the EEC Register to determine when the
+ * flash update is done.
+ **/
+static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u32 reg;
+ s32 status = IXGBE_ERR_EEPROM;
+
+ for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
+ reg = IXGBE_READ_REG(hw, IXGBE_EEC);
+ if (reg & IXGBE_EEC_FLUDONE) {
+ status = 0;
+ break;
+ }
+ udelay(5);
+ }
+ return status;
+}
+
+/**
+ * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
+ * the specified function (CSR, PHY0, PHY1, NVM, Flash)
+ **/
+static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+ u32 fwmask = mask << 5;
+ u32 hwmask = 0;
+ u32 timeout = 200;
+ u32 i;
+
+ if (swmask == IXGBE_GSSR_EEP_SM)
+ hwmask = IXGBE_GSSR_FLASH_SM;
+
+ for (i = 0; i < timeout; i++) {
+ /*
+ * SW NVM semaphore bit is used for access to all
+ * SW_FW_SYNC bits (not just NVM)
+ */
+ if (ixgbe_get_swfw_sync_semaphore(hw))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ if (!(swfw_sync & (fwmask | swmask | hwmask))) {
+ swfw_sync |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ break;
+ } else {
+ /*
+ * Firmware currently using resource (fwmask),
+ * hardware currently using resource (hwmask),
+ * or other software thread currently using
+ * resource (swmask)
+ */
+ ixgbe_release_swfw_sync_semaphore(hw);
+ msleep(5);
+ }
+ }
+
+ /*
+ * If the resource is not released by the FW/HW the SW can assume that
+ * the FW/HW malfunctions. In that case the SW should sets the
+ * SW bit(s) of the requested resource(s) while ignoring the
+ * corresponding FW/HW bits in the SW_FW_SYNC register.
+ */
+ if (i >= timeout) {
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ if (swfw_sync & (fwmask | hwmask)) {
+ if (ixgbe_get_swfw_sync_semaphore(hw))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ swfw_sync |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ ixgbe_release_swfw_sync_semaphore(hw);
+ }
+ }
+
+ msleep(5);
+ return 0;
+}
+
+/**
+ * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore throught the SW_FW_SYNC register
+ * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
+ **/
+static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+{
+ u32 swfw_sync;
+ u32 swmask = mask;
+
+ ixgbe_get_swfw_sync_semaphore(hw);
+
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swfw_sync &= ~swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+
+ ixgbe_release_swfw_sync_semaphore(hw);
+ msleep(5);
+}
+
+/**
+ * ixgbe_get_nvm_semaphore - Get hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * Sets the hardware semaphores so SW/FW can gain control of shared resources
+ **/
+static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_EEPROM;
+ u32 timeout = 2000;
+ u32 i;
+ u32 swsm;
+
+ /* Get SMBI software semaphore between device drivers first */
+ for (i = 0; i < timeout; i++) {
+ /*
+ * If the SMBI bit is 0 when we read it, then the bit will be
+ * set and we have the semaphore
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ if (!(swsm & IXGBE_SWSM_SMBI)) {
+ status = 0;
+ break;
+ }
+ udelay(50);
+ }
+
+ /* Now get the semaphore between SW/FW through the REGSMP bit */
+ if (status) {
+ for (i = 0; i < timeout; i++) {
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ if (!(swsm & IXGBE_SWFW_REGSMP))
+ break;
+
+ udelay(50);
+ }
+ } else {
+ hw_dbg(hw, "Software semaphore SMBI between device drivers "
+ "not granted.\n");
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_release_nvm_semaphore - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function clears hardware semaphore bits.
+ **/
+static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+ u32 swsm;
+
+ /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm &= ~IXGBE_SWSM_SMBI;
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swsm &= ~IXGBE_SWFW_REGSMP;
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+static struct ixgbe_mac_operations mac_ops_X540 = {
+ .init_hw = &ixgbe_init_hw_generic,
+ .reset_hw = &ixgbe_reset_hw_X540,
+ .start_hw = &ixgbe_start_hw_generic,
+ .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
+ .get_media_type = &ixgbe_get_media_type_X540,
+ .get_supported_physical_layer =
+ &ixgbe_get_supported_physical_layer_X540,
+ .enable_rx_dma = &ixgbe_enable_rx_dma_generic,
+ .get_mac_addr = &ixgbe_get_mac_addr_generic,
+ .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
+ .get_device_caps = NULL,
+ .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
+ .stop_adapter = &ixgbe_stop_adapter_generic,
+ .get_bus_info = &ixgbe_get_bus_info_generic,
+ .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
+ .read_analog_reg8 = NULL,
+ .write_analog_reg8 = NULL,
+ .setup_link = &ixgbe_setup_mac_link_X540,
+ .check_link = &ixgbe_check_mac_link_generic,
+ .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
+ .led_on = &ixgbe_led_on_generic,
+ .led_off = &ixgbe_led_off_generic,
+ .blink_led_start = &ixgbe_blink_led_start_generic,
+ .blink_led_stop = &ixgbe_blink_led_stop_generic,
+ .set_rar = &ixgbe_set_rar_generic,
+ .clear_rar = &ixgbe_clear_rar_generic,
+ .set_vmdq = &ixgbe_set_vmdq_generic,
+ .clear_vmdq = &ixgbe_clear_vmdq_generic,
+ .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
+ .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
+ .enable_mc = &ixgbe_enable_mc_generic,
+ .disable_mc = &ixgbe_disable_mc_generic,
+ .clear_vfta = &ixgbe_clear_vfta_generic,
+ .set_vfta = &ixgbe_set_vfta_generic,
+ .fc_enable = &ixgbe_fc_enable_generic,
+ .init_uta_tables = &ixgbe_init_uta_tables_generic,
+ .setup_sfp = NULL,
+ .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
+ .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
+ .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
+ .release_swfw_sync = &ixgbe_release_swfw_sync_X540,
+};
+
+static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
+ .init_params = &ixgbe_init_eeprom_params_X540,
+ .read = &ixgbe_read_eerd_X540,
+ .write = &ixgbe_write_eewr_X540,
+ .calc_checksum = &ixgbe_calc_eeprom_checksum_X540,
+ .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
+ .update_checksum = &ixgbe_update_eeprom_checksum_X540,
+};
+
+static struct ixgbe_phy_operations phy_ops_X540 = {
+ .identify = &ixgbe_identify_phy_generic,
+ .identify_sfp = &ixgbe_identify_sfp_module_generic,
+ .init = NULL,
+ .reset = NULL,
+ .read_reg = &ixgbe_read_phy_reg_generic,
+ .write_reg = &ixgbe_write_phy_reg_generic,
+ .setup_link = &ixgbe_setup_phy_link_generic,
+ .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
+ .read_i2c_byte = &ixgbe_read_i2c_byte_generic,
+ .write_i2c_byte = &ixgbe_write_i2c_byte_generic,
+ .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
+ .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
+ .check_overtemp = &ixgbe_tn_check_overtemp,
+};
+
+struct ixgbe_info ixgbe_X540_info = {
+ .mac = ixgbe_mac_X540,
+ .get_invariants = &ixgbe_get_invariants_X540,
+ .mac_ops = &mac_ops_X540,
+ .eeprom_ops = &eeprom_ops_X540,
+ .phy_ops = &phy_ops_X540,
+ .mbx_ops = &mbx_ops_generic,
+};
diff --git a/drivers/net/ixgbevf/Makefile b/drivers/net/ixgbevf/Makefile
index dd4e0d27e8cc..1f35d229e71a 100644
--- a/drivers/net/ixgbevf/Makefile
+++ b/drivers/net/ixgbevf/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel 82599 Virtual Function driver
-# Copyright(c) 1999 - 2009 Intel Corporation.
+# Copyright(c) 1999 - 2010 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
index ca2c81f49a05..78abb6f1a866 100644
--- a/drivers/net/ixgbevf/defines.h
+++ b/drivers/net/ixgbevf/defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -30,6 +30,7 @@
/* Device IDs */
#define IXGBE_DEV_ID_82599_VF 0x10ED
+#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 1
@@ -64,6 +65,8 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
+#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */
+#define IXGBE_RXDCTL_RLPML_EN 0x00008000
/* DCA Control */
#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
index 4cc817acfb62..0563ab29264e 100644
--- a/drivers/net/ixgbevf/ethtool.c
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -172,7 +172,7 @@ static char *ixgbevf_reg_names[] = {
"IXGBE_VFSTATUS",
"IXGBE_VFLINKS",
"IXGBE_VFRXMEMWRAP",
- "IXGBE_VFRTIMER",
+ "IXGBE_VFFRTIMER",
"IXGBE_VTEICR",
"IXGBE_VTEICS",
"IXGBE_VTEIMS",
@@ -240,7 +240,7 @@ static void ixgbevf_get_regs(struct net_device *netdev,
regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
- regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFRTIMER);
+ regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER);
/* Interrupt */
/* don't read EICR because it can clear interrupt causes, instead
@@ -544,7 +544,7 @@ struct ixgbevf_reg_test {
#define TABLE64_TEST_HI 6
/* default VF register test */
-static struct ixgbevf_reg_test reg_test_vf[] = {
+static const struct ixgbevf_reg_test reg_test_vf[] = {
{ IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
{ IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
@@ -557,19 +557,23 @@ static struct ixgbevf_reg_test reg_test_vf[] = {
{ 0, 0, 0, 0 }
};
+static const u32 register_test_patterns[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
+};
+
#define REG_PATTERN_TEST(R, M, W) \
{ \
u32 pat, val, before; \
- const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
- for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
+ for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \
before = readl(adapter->hw.hw_addr + R); \
- writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
+ writel((register_test_patterns[pat] & W), \
+ (adapter->hw.hw_addr + R)); \
val = readl(adapter->hw.hw_addr + R); \
- if (val != (_test[pat] & W & M)) { \
+ if (val != (register_test_patterns[pat] & W & M)) { \
hw_dbg(&adapter->hw, \
"pattern test reg %04X failed: got " \
"0x%08X expected 0x%08X\n", \
- R, val, (_test[pat] & W & M)); \
+ R, val, (register_test_patterns[pat] & W & M)); \
*data = R; \
writel(before, adapter->hw.hw_addr + R); \
return 1; \
@@ -596,7 +600,7 @@ static struct ixgbevf_reg_test reg_test_vf[] = {
static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
{
- struct ixgbevf_reg_test *test;
+ const struct ixgbevf_reg_test *test;
u32 i;
test = reg_test_vf;
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
index da4033c6efa2..b703f60be3b7 100644
--- a/drivers/net/ixgbevf/ixgbevf.h
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -207,7 +207,6 @@ struct ixgbevf_adapter {
u64 hw_tso_ctxt;
u64 hw_tso6_ctxt;
u32 tx_timeout_count;
- bool detect_tx_hung;
/* RX */
struct ixgbevf_ring *rx_ring; /* One per active queue */
@@ -275,9 +274,11 @@ enum ixbgevf_state_t {
enum ixgbevf_boards {
board_82599_vf,
+ board_X540_vf,
};
-extern struct ixgbevf_info ixgbevf_vf_info;
+extern struct ixgbevf_info ixgbevf_82599_vf_info;
+extern struct ixgbevf_info ixgbevf_X540_vf_info;
extern struct ixgbe_mac_operations ixgbevf_mbx_ops;
/* needed by ethtool.c */
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index dc03c9652389..054ab05b7c6a 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -49,14 +49,16 @@
char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
- "Intel(R) 82599 Virtual Function";
+ "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
-#define DRV_VERSION "1.0.0-k0"
+#define DRV_VERSION "2.0.0-k2"
const char ixgbevf_driver_version[] = DRV_VERSION;
-static char ixgbevf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
+static char ixgbevf_copyright[] =
+ "Copyright (c) 2009 - 2010 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
- [board_82599_vf] = &ixgbevf_vf_info,
+ [board_82599_vf] = &ixgbevf_82599_vf_info,
+ [board_X540_vf] = &ixgbevf_X540_vf_info,
};
/* ixgbevf_pci_tbl - PCI Device ID Table
@@ -70,6 +72,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
static struct pci_device_id ixgbevf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
board_82599_vf},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
+ board_X540_vf},
/* required last entry */
{0, }
@@ -103,7 +107,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
}
/*
- * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
+ * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
* @adapter: pointer to adapter struct
* @direction: 0 for Rx, 1 for Tx, -1 for other causes
* @queue: queue to map the corresponding interrupt to
@@ -158,42 +162,6 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
/* tx_buffer_info must be completely set up in the transmit path */
}
-static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring,
- unsigned int eop)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 head, tail;
-
- /* Detect a transmit hang in hardware, this serializes the
- * check with the clearing of time_stamp and movement of eop */
- head = readl(hw->hw_addr + tx_ring->head);
- tail = readl(hw->hw_addr + tx_ring->tail);
- adapter->detect_tx_hung = false;
- if ((head != tail) &&
- tx_ring->tx_buffer_info[eop].time_stamp &&
- time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) {
- /* detected Tx unit hang */
- union ixgbe_adv_tx_desc *tx_desc;
- tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
- printk(KERN_ERR "Detected Tx Unit Hang\n"
- " Tx Queue <%d>\n"
- " TDH, TDT <%x>, <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "tx_buffer_info[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " jiffies <%lx>\n",
- tx_ring->queue_index,
- head, tail,
- tx_ring->next_to_use, eop,
- tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
- return true;
- }
-
- return false;
-}
-
#define IXGBE_MAX_TXD_PWR 14
#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
@@ -289,16 +257,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
#endif
}
- if (adapter->detect_tx_hung) {
- if (ixgbevf_check_tx_hang(adapter, tx_ring, i)) {
- /* schedule immediate reset if we believe we hung */
- printk(KERN_INFO
- "tx hang %d detected, resetting adapter\n",
- adapter->tx_timeout_count + 1);
- ixgbevf_tx_timeout(adapter->netdev);
- }
- }
-
/* re-arm the interrupt */
if ((count >= tx_ring->work_limit) &&
(!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
@@ -330,7 +288,6 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_adapter *adapter = q_vector->adapter;
bool is_vlan = (status & IXGBE_RXD_STAT_VP);
u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
- int ret;
if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
if (adapter->vlgrp && is_vlan)
@@ -341,9 +298,9 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
napi_gro_receive(&q_vector->napi, skb);
} else {
if (adapter->vlgrp && is_vlan)
- ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
+ vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
else
- ret = netif_rx(skb);
+ netif_rx(skb);
}
}
@@ -1013,7 +970,7 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
}
/**
- * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
+ * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues)
* @irq: unused
* @data: pointer to our q_vector struct for this interrupt vector
**/
@@ -1661,6 +1618,11 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
j = adapter->rx_ring[i].reg_idx;
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
rxdctl |= IXGBE_RXDCTL_ENABLE;
+ if (hw->mac.type == ixgbe_mac_X540_vf) {
+ rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
+ rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
+ IXGBE_RXDCTL_RLPML_EN);
+ }
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
ixgbevf_rx_desc_queue_enable(adapter, i);
}
@@ -1963,7 +1925,7 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
}
/*
- * ixgbe_set_num_queues: Allocate queues for device, feature dependant
+ * ixgbevf_set_num_queues: Allocate queues for device, feature dependant
* @adapter: board private structure to initialize
*
* This is the top level queue allocation routine. The order here is very
@@ -2212,7 +2174,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
hw->vendor_id = pdev->vendor;
hw->device_id = pdev->device;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->revision_id = pdev->revision;
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
@@ -2406,9 +2368,6 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
10 : 1);
netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
- } else {
- /* Force detection of hung controller */
- adapter->detect_tx_hung = true;
}
} else {
adapter->link_up = false;
@@ -2423,9 +2382,6 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
ixgbevf_update_stats(adapter);
pf_has_reset:
- /* Force detection of hung controller every watchdog period */
- adapter->detect_tx_hung = true;
-
/* Reset the timer */
if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer,
@@ -2488,10 +2444,9 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
int size;
size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vmalloc(size);
+ tx_ring->tx_buffer_info = vzalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
- memset(tx_ring->tx_buffer_info, 0, size);
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
@@ -2555,14 +2510,13 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
int size;
size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vmalloc(size);
+ rx_ring->rx_buffer_info = vzalloc(size);
if (!rx_ring->rx_buffer_info) {
hw_dbg(&adapter->hw,
"Unable to vmalloc buffer memory for "
"the receive descriptor ring\n");
goto alloc_failed;
}
- memset(rx_ring->rx_buffer_info, 0, size);
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
@@ -3215,10 +3169,16 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+ int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
+ u32 msg[2];
+
+ if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
+ max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
/* MTU < 68 is an error and causes problems on some kernels */
- if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
+ if ((new_mtu < 68) || (max_frame > max_possible_frame))
return -EINVAL;
hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
@@ -3226,6 +3186,10 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
/* must set new MTU before calling down or up */
netdev->mtu = new_mtu;
+ msg[0] = IXGBE_VF_SET_LPE;
+ msg[1] = max_frame;
+ hw->mbx.ops.write_posted(hw, msg, 2);
+
if (netif_running(netdev))
ixgbevf_reinit_locked(adapter);
@@ -3270,8 +3234,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
{
- struct ixgbevf_adapter *adapter;
- adapter = netdev_priv(dev);
dev->netdev_ops = &ixgbe_netdev_ops;
ixgbevf_set_ethtool_ops(dev);
dev->watchdog_timeo = 5 * HZ;
@@ -3424,10 +3386,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
if (hw->mac.ops.get_bus_info)
hw->mac.ops.get_bus_info(hw);
-
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
-
strcpy(netdev->name, "eth%d");
err = register_netdev(netdev);
@@ -3436,6 +3394,8 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
adapter->netdev_registered = true;
+ netif_carrier_off(netdev);
+
ixgbevf_init_last_counter_stats(adapter);
/* print the MAC address */
@@ -3487,10 +3447,9 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
del_timer_sync(&adapter->watchdog_timer);
+ cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->watchdog_task);
- flush_scheduled_work();
-
if (adapter->netdev_registered) {
unregister_netdev(netdev);
adapter->netdev_registered = false;
@@ -3520,9 +3479,9 @@ static struct pci_driver ixgbevf_driver = {
};
/**
- * ixgbe_init_module - Driver Registration Routine
+ * ixgbevf_init_module - Driver Registration Routine
*
- * ixgbe_init_module is the first routine called when the driver is
+ * ixgbevf_init_module is the first routine called when the driver is
* loaded. All it does is register with the PCI subsystem.
**/
static int __init ixgbevf_init_module(void)
@@ -3540,9 +3499,9 @@ static int __init ixgbevf_init_module(void)
module_init(ixgbevf_init_module);
/**
- * ixgbe_exit_module - Driver Exit Cleanup Routine
+ * ixgbevf_exit_module - Driver Exit Cleanup Routine
*
- * ixgbe_exit_module is called just before the driver is removed
+ * ixgbevf_exit_module is called just before the driver is removed
* from memory.
**/
static void __exit ixgbevf_exit_module(void)
@@ -3552,7 +3511,7 @@ static void __exit ixgbevf_exit_module(void)
#ifdef DEBUG
/**
- * ixgbe_get_hw_dev_name - return device name string
+ * ixgbevf_get_hw_dev_name - return device name string
* used by hardware layer to print debugging information
**/
char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
diff --git a/drivers/net/ixgbevf/mbx.c b/drivers/net/ixgbevf/mbx.c
index 84ac486f4a65..7a8833125770 100644
--- a/drivers/net/ixgbevf/mbx.c
+++ b/drivers/net/ixgbevf/mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ixgbevf/mbx.h
index 8c063bebee7f..b2b5bf5daa3d 100644
--- a/drivers/net/ixgbevf/mbx.h
+++ b/drivers/net/ixgbevf/mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ixgbevf/regs.h
index 12f75960aec1..189200eeca26 100644
--- a/drivers/net/ixgbevf/regs.h
+++ b/drivers/net/ixgbevf/regs.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -31,7 +31,7 @@
#define IXGBE_VFCTRL 0x00000
#define IXGBE_VFSTATUS 0x00008
#define IXGBE_VFLINKS 0x00010
-#define IXGBE_VFRTIMER 0x00048
+#define IXGBE_VFFRTIMER 0x00048
#define IXGBE_VFRXMEMWRAP 0x03190
#define IXGBE_VTEICR 0x00100
#define IXGBE_VTEICS 0x00104
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
index bfe42c1fcfaf..eecd3bf6833f 100644
--- a/drivers/net/ixgbevf/vf.c
+++ b/drivers/net/ixgbevf/vf.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -381,8 +381,12 @@ static struct ixgbe_mac_operations ixgbevf_mac_ops = {
.set_vfta = ixgbevf_set_vfta_vf,
};
-struct ixgbevf_info ixgbevf_vf_info = {
+struct ixgbevf_info ixgbevf_82599_vf_info = {
.mac = ixgbe_mac_82599_vf,
.mac_ops = &ixgbevf_mac_ops,
};
+struct ixgbevf_info ixgbevf_X540_vf_info = {
+ .mac = ixgbe_mac_X540_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+};
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
index 61f9dc831424..23eb114c149f 100644
--- a/drivers/net/ixgbevf/vf.h
+++ b/drivers/net/ixgbevf/vf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -73,6 +73,7 @@ struct ixgbe_mac_operations {
enum ixgbe_mac_type {
ixgbe_mac_unknown = 0,
ixgbe_mac_82599_vf,
+ ixgbe_mac_X540_vf,
ixgbe_num_macs
};
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index c57d9a43ceca..f690474f4409 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -135,7 +135,7 @@ jme_reset_phy_processor(struct jme_adapter *jme)
static void
jme_setup_wakeup_frame(struct jme_adapter *jme,
- u32 *mask, u32 crc, int fnr)
+ const u32 *mask, u32 crc, int fnr)
{
int i;
@@ -161,16 +161,92 @@ jme_setup_wakeup_frame(struct jme_adapter *jme,
}
static inline void
+jme_mac_rxclk_off(struct jme_adapter *jme)
+{
+ jme->reg_gpreg1 |= GPREG1_RXCLKOFF;
+ jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
+}
+
+static inline void
+jme_mac_rxclk_on(struct jme_adapter *jme)
+{
+ jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF;
+ jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
+}
+
+static inline void
+jme_mac_txclk_off(struct jme_adapter *jme)
+{
+ jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC);
+ jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_mac_txclk_on(struct jme_adapter *jme)
+{
+ u32 speed = jme->reg_ghc & GHC_SPEED;
+ if (speed == GHC_SPEED_1000M)
+ jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
+ else
+ jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
+ jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_reset_ghc_speed(struct jme_adapter *jme)
+{
+ jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX);
+ jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_reset_250A2_workaround(struct jme_adapter *jme)
+{
+ jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
+ GPREG1_RSSPATCH);
+ jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
+}
+
+static inline void
+jme_assert_ghc_reset(struct jme_adapter *jme)
+{
+ jme->reg_ghc |= GHC_SWRST;
+ jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_clear_ghc_reset(struct jme_adapter *jme)
+{
+ jme->reg_ghc &= ~GHC_SWRST;
+ jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
jme_reset_mac_processor(struct jme_adapter *jme)
{
- u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
+ static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
u32 crc = 0xCDCDCDCD;
u32 gpreg0;
int i;
- jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
- udelay(2);
- jwrite32(jme, JME_GHC, jme->reg_ghc);
+ jme_reset_ghc_speed(jme);
+ jme_reset_250A2_workaround(jme);
+
+ jme_mac_rxclk_on(jme);
+ jme_mac_txclk_on(jme);
+ udelay(1);
+ jme_assert_ghc_reset(jme);
+ udelay(1);
+ jme_mac_rxclk_off(jme);
+ jme_mac_txclk_off(jme);
+ udelay(1);
+ jme_clear_ghc_reset(jme);
+ udelay(1);
+ jme_mac_rxclk_on(jme);
+ jme_mac_txclk_on(jme);
+ udelay(1);
+ jme_mac_rxclk_off(jme);
+ jme_mac_txclk_off(jme);
jwrite32(jme, JME_RXDBA_LO, 0x00000000);
jwrite32(jme, JME_RXDBA_HI, 0x00000000);
@@ -190,14 +266,6 @@ jme_reset_mac_processor(struct jme_adapter *jme)
else
gpreg0 = GPREG0_DEFAULT;
jwrite32(jme, JME_GPREG0, gpreg0);
- jwrite32(jme, JME_GPREG1, GPREG1_DEFAULT);
-}
-
-static inline void
-jme_reset_ghc_speed(struct jme_adapter *jme)
-{
- jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX);
- jwrite32(jme, JME_GHC, jme->reg_ghc);
}
static inline void
@@ -336,13 +404,13 @@ jme_linkstat_from_phy(struct jme_adapter *jme)
}
static inline void
-jme_set_phyfifoa(struct jme_adapter *jme)
+jme_set_phyfifo_5level(struct jme_adapter *jme)
{
jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
}
static inline void
-jme_set_phyfifob(struct jme_adapter *jme)
+jme_set_phyfifo_8level(struct jme_adapter *jme)
{
jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
}
@@ -351,7 +419,7 @@ static int
jme_check_link(struct net_device *netdev, int testonly)
{
struct jme_adapter *jme = netdev_priv(netdev);
- u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr, gpreg1;
+ u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr;
char linkmsg[64];
int rc = 0;
@@ -414,23 +482,21 @@ jme_check_link(struct net_device *netdev, int testonly)
jme->phylink = phylink;
- ghc = jme->reg_ghc & ~(GHC_SPEED | GHC_DPX |
- GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE |
- GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY);
+ /*
+ * The speed/duplex setting of jme->reg_ghc already cleared
+ * by jme_reset_mac_processor()
+ */
switch (phylink & PHY_LINK_SPEED_MASK) {
case PHY_LINK_SPEED_10M:
- ghc |= GHC_SPEED_10M |
- GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
+ jme->reg_ghc |= GHC_SPEED_10M;
strcat(linkmsg, "10 Mbps, ");
break;
case PHY_LINK_SPEED_100M:
- ghc |= GHC_SPEED_100M |
- GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
+ jme->reg_ghc |= GHC_SPEED_100M;
strcat(linkmsg, "100 Mbps, ");
break;
case PHY_LINK_SPEED_1000M:
- ghc |= GHC_SPEED_1000M |
- GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
+ jme->reg_ghc |= GHC_SPEED_1000M;
strcat(linkmsg, "1000 Mbps, ");
break;
default:
@@ -439,42 +505,40 @@ jme_check_link(struct net_device *netdev, int testonly)
if (phylink & PHY_LINK_DUPLEX) {
jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
- ghc |= GHC_DPX;
+ jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX);
+ jme->reg_ghc |= GHC_DPX;
} else {
jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
TXMCS_BACKOFF |
TXMCS_CARRIERSENSE |
TXMCS_COLLISION);
- jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
- ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
- TXTRHD_TXREN |
- ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
+ jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX);
}
- gpreg1 = GPREG1_DEFAULT;
+ jwrite32(jme, JME_GHC, jme->reg_ghc);
+
if (is_buggy250(jme->pdev->device, jme->chiprev)) {
+ jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
+ GPREG1_RSSPATCH);
if (!(phylink & PHY_LINK_DUPLEX))
- gpreg1 |= GPREG1_HALFMODEPATCH;
+ jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH;
switch (phylink & PHY_LINK_SPEED_MASK) {
case PHY_LINK_SPEED_10M:
- jme_set_phyfifoa(jme);
- gpreg1 |= GPREG1_RSSPATCH;
+ jme_set_phyfifo_8level(jme);
+ jme->reg_gpreg1 |= GPREG1_RSSPATCH;
break;
case PHY_LINK_SPEED_100M:
- jme_set_phyfifob(jme);
- gpreg1 |= GPREG1_RSSPATCH;
+ jme_set_phyfifo_5level(jme);
+ jme->reg_gpreg1 |= GPREG1_RSSPATCH;
break;
case PHY_LINK_SPEED_1000M:
- jme_set_phyfifoa(jme);
+ jme_set_phyfifo_8level(jme);
break;
default:
break;
}
}
-
- jwrite32(jme, JME_GPREG1, gpreg1);
- jwrite32(jme, JME_GHC, ghc);
- jme->reg_ghc = ghc;
+ jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
"Full-Duplex, " :
@@ -613,10 +677,14 @@ jme_enable_tx_engine(struct jme_adapter *jme)
* Enable TX Engine
*/
wmb();
- jwrite32(jme, JME_TXCS, jme->reg_txcs |
+ jwrite32f(jme, JME_TXCS, jme->reg_txcs |
TXCS_SELECT_QUEUE0 |
TXCS_ENABLE);
+ /*
+ * Start clock for TX MAC Processor
+ */
+ jme_mac_txclk_on(jme);
}
static inline void
@@ -651,6 +719,11 @@ jme_disable_tx_engine(struct jme_adapter *jme)
if (!i)
pr_err("Disable TX engine timeout\n");
+
+ /*
+ * Stop clock for TX MAC Processor
+ */
+ jme_mac_txclk_off(jme);
}
static void
@@ -825,16 +898,22 @@ jme_enable_rx_engine(struct jme_adapter *jme)
/*
* Setup Unicast Filter
*/
+ jme_set_unicastaddr(jme->dev);
jme_set_multi(jme->dev);
/*
* Enable RX Engine
*/
wmb();
- jwrite32(jme, JME_RXCS, jme->reg_rxcs |
+ jwrite32f(jme, JME_RXCS, jme->reg_rxcs |
RXCS_QUEUESEL_Q0 |
RXCS_ENABLE |
RXCS_QST);
+
+ /*
+ * Start clock for RX MAC Processor
+ */
+ jme_mac_rxclk_on(jme);
}
static inline void
@@ -871,10 +950,40 @@ jme_disable_rx_engine(struct jme_adapter *jme)
if (!i)
pr_err("Disable RX engine timeout\n");
+ /*
+ * Stop clock for RX MAC Processor
+ */
+ jme_mac_rxclk_off(jme);
+}
+
+static u16
+jme_udpsum(struct sk_buff *skb)
+{
+ u16 csum = 0xFFFFu;
+
+ if (skb->len < (ETH_HLEN + sizeof(struct iphdr)))
+ return csum;
+ if (skb->protocol != htons(ETH_P_IP))
+ return csum;
+ skb_set_network_header(skb, ETH_HLEN);
+ if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
+ (skb->len < (ETH_HLEN +
+ (ip_hdr(skb)->ihl << 2) +
+ sizeof(struct udphdr)))) {
+ skb_reset_network_header(skb);
+ return csum;
+ }
+ skb_set_transport_header(skb,
+ ETH_HLEN + (ip_hdr(skb)->ihl << 2));
+ csum = udp_hdr(skb)->check;
+ skb_reset_transport_header(skb);
+ skb_reset_network_header(skb);
+
+ return csum;
}
static int
-jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
+jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb)
{
if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
return false;
@@ -887,7 +996,7 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
}
if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
- == RXWBFLAG_UDPON)) {
+ == RXWBFLAG_UDPON) && jme_udpsum(skb)) {
if (flags & RXWBFLAG_IPV4)
netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
return false;
@@ -935,7 +1044,7 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
skb_put(skb, framesize);
skb->protocol = eth_type_trans(skb, jme->dev);
- if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags)))
+ if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
@@ -1207,7 +1316,6 @@ jme_link_change_tasklet(unsigned long arg)
tasklet_disable(&jme->rxempty_task);
if (netif_carrier_ok(netdev)) {
- jme_reset_ghc_speed(jme);
jme_disable_rx_engine(jme);
jme_disable_tx_engine(jme);
jme_reset_mac_processor(jme);
@@ -1577,6 +1685,38 @@ jme_free_irq(struct jme_adapter *jme)
}
static inline void
+jme_new_phy_on(struct jme_adapter *jme)
+{
+ u32 reg;
+
+ reg = jread32(jme, JME_PHY_PWR);
+ reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
+ PHY_PWR_DWN2 | PHY_PWR_CLKSEL);
+ jwrite32(jme, JME_PHY_PWR, reg);
+
+ pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
+ reg &= ~PE1_GPREG0_PBG;
+ reg |= PE1_GPREG0_ENBG;
+ pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
+}
+
+static inline void
+jme_new_phy_off(struct jme_adapter *jme)
+{
+ u32 reg;
+
+ reg = jread32(jme, JME_PHY_PWR);
+ reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
+ PHY_PWR_DWN2 | PHY_PWR_CLKSEL;
+ jwrite32(jme, JME_PHY_PWR, reg);
+
+ pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
+ reg &= ~PE1_GPREG0_PBG;
+ reg |= PE1_GPREG0_PDD3COLD;
+ pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
+}
+
+static inline void
jme_phy_on(struct jme_adapter *jme)
{
u32 bmcr;
@@ -1584,6 +1724,22 @@ jme_phy_on(struct jme_adapter *jme)
bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
bmcr &= ~BMCR_PDOWN;
jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
+
+ if (new_phy_power_ctrl(jme->chip_main_rev))
+ jme_new_phy_on(jme);
+}
+
+static inline void
+jme_phy_off(struct jme_adapter *jme)
+{
+ u32 bmcr;
+
+ bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
+ bmcr |= BMCR_PDOWN;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
+
+ if (new_phy_power_ctrl(jme->chip_main_rev))
+ jme_new_phy_off(jme);
}
static int
@@ -1606,12 +1762,11 @@ jme_open(struct net_device *netdev)
jme_start_irq(jme);
- if (test_bit(JME_FLAG_SSET, &jme->flags)) {
- jme_phy_on(jme);
+ jme_phy_on(jme);
+ if (test_bit(JME_FLAG_SSET, &jme->flags))
jme_set_settings(netdev, &jme->old_ecmd);
- } else {
+ else
jme_reset_phy_processor(jme);
- }
jme_reset_link(jme);
@@ -1657,12 +1812,6 @@ jme_wait_link(struct jme_adapter *jme)
}
}
-static inline void
-jme_phy_off(struct jme_adapter *jme)
-{
- jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
-}
-
static void
jme_powersave_phy(struct jme_adapter *jme)
{
@@ -1696,7 +1845,6 @@ jme_close(struct net_device *netdev)
tasklet_disable(&jme->rxclean_task);
tasklet_disable(&jme->rxempty_task);
- jme_reset_ghc_speed(jme);
jme_disable_rx_engine(jme);
jme_disable_tx_engine(jme);
jme_reset_mac_processor(jme);
@@ -1993,27 +2141,34 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+static void
+jme_set_unicastaddr(struct net_device *netdev)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ u32 val;
+
+ val = (netdev->dev_addr[3] & 0xff) << 24 |
+ (netdev->dev_addr[2] & 0xff) << 16 |
+ (netdev->dev_addr[1] & 0xff) << 8 |
+ (netdev->dev_addr[0] & 0xff);
+ jwrite32(jme, JME_RXUMA_LO, val);
+ val = (netdev->dev_addr[5] & 0xff) << 8 |
+ (netdev->dev_addr[4] & 0xff);
+ jwrite32(jme, JME_RXUMA_HI, val);
+}
+
static int
jme_set_macaddr(struct net_device *netdev, void *p)
{
struct jme_adapter *jme = netdev_priv(netdev);
struct sockaddr *addr = p;
- u32 val;
if (netif_running(netdev))
return -EBUSY;
spin_lock_bh(&jme->macaddr_lock);
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-
- val = (addr->sa_data[3] & 0xff) << 24 |
- (addr->sa_data[2] & 0xff) << 16 |
- (addr->sa_data[1] & 0xff) << 8 |
- (addr->sa_data[0] & 0xff);
- jwrite32(jme, JME_RXUMA_LO, val);
- val = (addr->sa_data[5] & 0xff) << 8 |
- (addr->sa_data[4] & 0xff);
- jwrite32(jme, JME_RXUMA_HI, val);
+ jme_set_unicastaddr(netdev);
spin_unlock_bh(&jme->macaddr_lock);
return 0;
@@ -2076,12 +2231,11 @@ jme_change_mtu(struct net_device *netdev, int new_mtu)
}
if (new_mtu > 1900) {
- netdev->features &= ~(NETIF_F_HW_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6);
+ netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6);
} else {
if (test_bit(JME_FLAG_TXCSUM, &jme->flags))
- netdev->features |= NETIF_F_HW_CSUM;
+ netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
if (test_bit(JME_FLAG_TSO, &jme->flags))
netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
}
@@ -2514,10 +2668,12 @@ jme_set_tx_csum(struct net_device *netdev, u32 on)
if (on) {
set_bit(JME_FLAG_TXCSUM, &jme->flags);
if (netdev->mtu <= 1900)
- netdev->features |= NETIF_F_HW_CSUM;
+ netdev->features |=
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
} else {
clear_bit(JME_FLAG_TXCSUM, &jme->flags);
- netdev->features &= ~NETIF_F_HW_CSUM;
+ netdev->features &=
+ ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
}
return 0;
@@ -2730,6 +2886,8 @@ jme_check_hw_ver(struct jme_adapter *jme)
jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
+ jme->chip_main_rev = jme->chiprev & 0xF;
+ jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF;
}
static const struct net_device_ops jme_netdev_ops = {
@@ -2797,7 +2955,8 @@ jme_init_one(struct pci_dev *pdev,
netdev->netdev_ops = &jme_netdev_ops;
netdev->ethtool_ops = &jme_ethtool_ops;
netdev->watchdog_timeo = TX_TIMEOUT;
- netdev->features = NETIF_F_HW_CSUM |
+ netdev->features = NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
NETIF_F_SG |
NETIF_F_TSO |
NETIF_F_TSO6 |
@@ -2878,6 +3037,7 @@ jme_init_one(struct pci_dev *pdev,
jme->reg_rxmcs = RXMCS_DEFAULT;
jme->reg_txpfc = 0;
jme->reg_pmcs = PMCS_MFEN;
+ jme->reg_gpreg1 = GPREG1_DEFAULT;
set_bit(JME_FLAG_TXCSUM, &jme->flags);
set_bit(JME_FLAG_TSO, &jme->flags);
@@ -2934,8 +3094,8 @@ jme_init_one(struct pci_dev *pdev,
jme->mii_if.mdio_write = jme_mdio_write;
jme_clear_pm(jme);
- jme_set_phyfifoa(jme);
- pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->rev);
+ jme_set_phyfifo_5level(jme);
+ jme->pcirev = pdev->revision;
if (!jme->fpgaver)
jme_phy_init(jme);
jme_phy_off(jme);
@@ -2962,14 +3122,14 @@ jme_init_one(struct pci_dev *pdev,
goto err_out_unmap;
}
- netif_info(jme, probe, jme->dev, "%s%s ver:%x rev:%x macaddr:%pM\n",
+ netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n",
(jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
"JMC250 Gigabit Ethernet" :
(jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
"JMC260 Fast Ethernet" : "Unknown",
(jme->fpgaver != 0) ? " (FPGA)" : "",
(jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
- jme->rev, netdev->dev_addr);
+ jme->pcirev, netdev->dev_addr);
return 0;
@@ -3033,7 +3193,6 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state)
jme_polling_mode(jme);
jme_stop_pcc_timer(jme);
- jme_reset_ghc_speed(jme);
jme_disable_rx_engine(jme);
jme_disable_tx_engine(jme);
jme_reset_mac_processor(jme);
@@ -3064,12 +3223,11 @@ jme_resume(struct pci_dev *pdev)
jme_clear_pm(jme);
pci_restore_state(pdev);
- if (test_bit(JME_FLAG_SSET, &jme->flags)) {
- jme_phy_on(jme);
+ jme_phy_on(jme);
+ if (test_bit(JME_FLAG_SSET, &jme->flags))
jme_set_settings(netdev, &jme->old_ecmd);
- } else {
+ else
jme_reset_phy_processor(jme);
- }
jme_start_irq(jme);
netif_device_attach(netdev);
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index eac09264bf2a..8bf30451e821 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -26,7 +26,7 @@
#define __JME_H_INCLUDED__
#define DRV_NAME "jme"
-#define DRV_VERSION "1.0.7"
+#define DRV_VERSION "1.0.8"
#define PFX DRV_NAME ": "
#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250
@@ -103,6 +103,37 @@ enum jme_spi_op_bits {
#define HALF_US 500 /* 500 ns */
#define JMESPIIOCTL SIOCDEVPRIVATE
+#define PCI_PRIV_PE1 0xE4
+
+enum pci_priv_pe1_bit_masks {
+ PE1_ASPMSUPRT = 0x00000003, /*
+ * RW:
+ * Aspm_support[1:0]
+ * (R/W Port of 5C[11:10])
+ */
+ PE1_MULTIFUN = 0x00000004, /* RW: Multi_fun_bit */
+ PE1_RDYDMA = 0x00000008, /* RO: ~link.rdy_for_dma */
+ PE1_ASPMOPTL = 0x00000030, /* RW: link.rx10s_option[1:0] */
+ PE1_ASPMOPTH = 0x000000C0, /* RW: 10_req=[3]?HW:[2] */
+ PE1_GPREG0 = 0x0000FF00, /*
+ * SRW:
+ * Cfg_gp_reg0
+ * [7:6] phy_giga BG control
+ * [5] CREQ_N as CREQ_N1 (CPPE# as CREQ#)
+ * [4:0] Reserved
+ */
+ PE1_GPREG0_PBG = 0x0000C000, /* phy_giga BG control */
+ PE1_GPREG1 = 0x00FF0000, /* RW: Cfg_gp_reg1 */
+ PE1_REVID = 0xFF000000, /* RO: Rev ID */
+};
+
+enum pci_priv_pe1_values {
+ PE1_GPREG0_ENBG = 0x00000000, /* en BG */
+ PE1_GPREG0_PDD3COLD = 0x00004000, /* giga_PD + d3cold */
+ PE1_GPREG0_PDPCIESD = 0x00008000, /* giga_PD + pcie_shutdown */
+ PE1_GPREG0_PDPCIEIDDQ = 0x0000C000, /* giga_PD + pcie_iddq */
+};
+
/*
* Dynamic(adaptive)/Static PCC values
*/
@@ -403,6 +434,7 @@ struct jme_adapter {
u32 reg_rxmcs;
u32 reg_ghc;
u32 reg_pmcs;
+ u32 reg_gpreg1;
u32 phylink;
u32 tx_ring_size;
u32 tx_ring_mask;
@@ -411,8 +443,10 @@ struct jme_adapter {
u32 rx_ring_mask;
u8 mrrs;
unsigned int fpgaver;
- unsigned int chiprev;
- u8 rev;
+ u8 chiprev;
+ u8 chip_main_rev;
+ u8 chip_sub_rev;
+ u8 pcirev;
u32 msg_enable;
struct ethtool_cmd old_ecmd;
unsigned int old_mtu;
@@ -497,6 +531,7 @@ enum jme_iomap_regs {
JME_PMCS = JME_MAC | 0x60, /* Power Management Control/Stat */
+ JME_PHY_PWR = JME_PHY | 0x24, /* New PHY Power Ctrl Register */
JME_PHY_CS = JME_PHY | 0x28, /* PHY Ctrl and Status Register */
JME_PHY_LINK = JME_PHY | 0x30, /* PHY Link Status Register */
JME_SMBCSR = JME_PHY | 0x40, /* SMB Control and Status */
@@ -624,6 +659,14 @@ enum jme_txtrhd_shifts {
TXTRHD_TXRL_SHIFT = 0,
};
+enum jme_txtrhd_values {
+ TXTRHD_FULLDUPLEX = 0x00000000,
+ TXTRHD_HALFDUPLEX = TXTRHD_TXPEN |
+ ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
+ TXTRHD_TXREN |
+ ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL),
+};
+
/*
* RX Control/Status Bits
*/
@@ -779,6 +822,8 @@ static inline u32 smi_phy_addr(int x)
*/
enum jme_ghc_bit_mask {
GHC_SWRST = 0x40000000,
+ GHC_TO_CLK_SRC = 0x00C00000,
+ GHC_TXMAC_CLK_SRC = 0x00300000,
GHC_DPX = 0x00000040,
GHC_SPEED = 0x00000030,
GHC_LINK_POLL = 0x00000001,
@@ -833,6 +878,21 @@ enum jme_pmcs_bit_masks {
};
/*
+ * New PHY Power Control Register
+ */
+enum jme_phy_pwr_bit_masks {
+ PHY_PWR_DWN1SEL = 0x01000000, /* Phy_giga.p_PWR_DOWN1_SEL */
+ PHY_PWR_DWN1SW = 0x02000000, /* Phy_giga.p_PWR_DOWN1_SW */
+ PHY_PWR_DWN2 = 0x04000000, /* Phy_giga.p_PWR_DOWN2 */
+ PHY_PWR_CLKSEL = 0x08000000, /*
+ * XTL_OUT Clock select
+ * (an internal free-running clock)
+ * 0: xtl_out = phy_giga.A_XTL25_O
+ * 1: xtl_out = phy_giga.PD_OSC
+ */
+};
+
+/*
* Giga PHY Status Registers
*/
enum jme_phy_link_bit_mask {
@@ -942,18 +1002,17 @@ enum jme_gpreg0_vals {
/*
* General Purpose REG-1
- * Note: All theses bits defined here are for
- * Chip mode revision 0x11 only
*/
-enum jme_gpreg1_masks {
+enum jme_gpreg1_bit_masks {
+ GPREG1_RXCLKOFF = 0x04000000,
+ GPREG1_PCREQN = 0x00020000,
+ GPREG1_HALFMODEPATCH = 0x00000040, /* For Chip revision 0x11 only */
+ GPREG1_RSSPATCH = 0x00000020, /* For Chip revision 0x11 only */
GPREG1_INTRDELAYUNIT = 0x00000018,
GPREG1_INTRDELAYENABLE = 0x00000007,
};
enum jme_gpreg1_vals {
- GPREG1_RSSPATCH = 0x00000040,
- GPREG1_HALFMODEPATCH = 0x00000020,
-
GPREG1_INTDLYUNIT_16NS = 0x00000000,
GPREG1_INTDLYUNIT_256NS = 0x00000008,
GPREG1_INTDLYUNIT_1US = 0x00000010,
@@ -967,7 +1026,7 @@ enum jme_gpreg1_vals {
GPREG1_INTDLYEN_6U = 0x00000006,
GPREG1_INTDLYEN_7U = 0x00000007,
- GPREG1_DEFAULT = 0x00000000,
+ GPREG1_DEFAULT = GPREG1_PCREQN,
};
/*
@@ -1184,16 +1243,22 @@ enum jme_phy_reg17_vals {
/*
* Workaround
*/
-static inline int is_buggy250(unsigned short device, unsigned int chiprev)
+static inline int is_buggy250(unsigned short device, u8 chiprev)
{
return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11;
}
+static inline int new_phy_power_ctrl(u8 chip_main_rev)
+{
+ return chip_main_rev >= 5;
+}
+
/*
* Function prototypes
*/
static int jme_set_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd);
+static void jme_set_unicastaddr(struct net_device *netdev);
static void jme_set_multi(struct net_device *netdev);
#endif
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 51919fcd50c2..0fa4a9887ba2 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -1545,6 +1545,37 @@ static int ks8851_read_selftest(struct ks8851_net *ks)
/* driver bus management functions */
+#ifdef CONFIG_PM
+static int ks8851_suspend(struct spi_device *spi, pm_message_t state)
+{
+ struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
+ struct net_device *dev = ks->netdev;
+
+ if (netif_running(dev)) {
+ netif_device_detach(dev);
+ ks8851_net_stop(dev);
+ }
+
+ return 0;
+}
+
+static int ks8851_resume(struct spi_device *spi)
+{
+ struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
+ struct net_device *dev = ks->netdev;
+
+ if (netif_running(dev)) {
+ ks8851_net_open(dev);
+ netif_device_attach(dev);
+ }
+
+ return 0;
+}
+#else
+#define ks8851_suspend NULL
+#define ks8851_resume NULL
+#endif
+
static int __devinit ks8851_probe(struct spi_device *spi)
{
struct net_device *ndev;
@@ -1679,6 +1710,8 @@ static struct spi_driver ks8851_driver = {
},
.probe = ks8851_probe,
.remove = __devexit_p(ks8851_remove),
+ .suspend = ks8851_suspend,
+ .resume = ks8851_resume,
};
static int __init ks8851_init(void)
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 37504a398906..540a8dcbcc46 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -3570,7 +3570,7 @@ static void hw_cfg_wol(struct ksz_hw *hw, u16 frame, int set)
* This routine is used to program Wake-on-LAN pattern.
*/
static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size,
- u8 *mask, uint frame_size, u8 *pattern)
+ const u8 *mask, uint frame_size, const u8 *pattern)
{
int bits;
int from;
@@ -3626,9 +3626,9 @@ static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size,
*
* This routine is used to add ARP pattern for waking up the host.
*/
-static void hw_add_wol_arp(struct ksz_hw *hw, u8 *ip_addr)
+static void hw_add_wol_arp(struct ksz_hw *hw, const u8 *ip_addr)
{
- u8 mask[6] = { 0x3F, 0xF0, 0x3F, 0x00, 0xC0, 0x03 };
+ static const u8 mask[6] = { 0x3F, 0xF0, 0x3F, 0x00, 0xC0, 0x03 };
u8 pattern[42] = {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -3651,8 +3651,8 @@ static void hw_add_wol_arp(struct ksz_hw *hw, u8 *ip_addr)
*/
static void hw_add_wol_bcast(struct ksz_hw *hw)
{
- u8 mask[] = { 0x3F };
- u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ static const u8 mask[] = { 0x3F };
+ static const u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
hw_set_wol_frame(hw, 2, 1, mask, MAC_ADDR_LEN, pattern);
}
@@ -3669,7 +3669,7 @@ static void hw_add_wol_bcast(struct ksz_hw *hw)
*/
static void hw_add_wol_mcast(struct ksz_hw *hw)
{
- u8 mask[] = { 0x3F };
+ static const u8 mask[] = { 0x3F };
u8 pattern[] = { 0x33, 0x33, 0xFF, 0x00, 0x00, 0x00 };
memcpy(&pattern[3], &hw->override_addr[3], 3);
@@ -3687,7 +3687,7 @@ static void hw_add_wol_mcast(struct ksz_hw *hw)
*/
static void hw_add_wol_ucast(struct ksz_hw *hw)
{
- u8 mask[] = { 0x3F };
+ static const u8 mask[] = { 0x3F };
hw_set_wol_frame(hw, 0, 1, mask, MAC_ADDR_LEN, hw->override_addr);
}
@@ -3700,7 +3700,7 @@ static void hw_add_wol_ucast(struct ksz_hw *hw)
*
* This routine is used to enable Wake-on-LAN depending on driver settings.
*/
-static void hw_enable_wol(struct ksz_hw *hw, u32 wol_enable, u8 *net_addr)
+static void hw_enable_wol(struct ksz_hw *hw, u32 wol_enable, const u8 *net_addr)
{
hw_cfg_wol(hw, KS8841_WOL_MAGIC_ENABLE, (wol_enable & WAKE_MAGIC));
hw_cfg_wol(hw, KS8841_WOL_FRAME0_ENABLE, (wol_enable & WAKE_UCAST));
@@ -6208,7 +6208,7 @@ static int netdev_set_wol(struct net_device *dev,
struct dev_info *hw_priv = priv->adapter;
/* Need to find a way to retrieve the device IP address. */
- u8 net_addr[] = { 192, 168, 1, 1 };
+ static const u8 net_addr[] = { 192, 168, 1, 1 };
if (wol->wolopts & ~hw_priv->wol_support)
return -EINVAL;
@@ -6953,7 +6953,7 @@ static void read_other_addr(struct ksz_hw *hw)
#define PCI_VENDOR_ID_MICREL_KS 0x16c6
#endif
-static int __init pcidev_init(struct pci_dev *pdev,
+static int __devinit pcidev_init(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct net_device *dev;
@@ -7241,7 +7241,7 @@ static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
struct ksz_hw *hw = &hw_priv->hw;
/* Need to find a way to retrieve the device IP address. */
- u8 net_addr[] = { 192, 168, 1, 1 };
+ static const u8 net_addr[] = { 192, 168, 1, 1 };
for (i = 0; i < hw->dev_count; i++) {
if (info->netdev[i]) {
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index f06296bfe293..02336edce748 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -207,7 +207,7 @@ tx_full and tbusy flags.
#define LANCE_BUS_IF 0x16
#define LANCE_TOTAL_SIZE 0x18
-#define TX_TIMEOUT 20
+#define TX_TIMEOUT (HZ/5)
/* The LANCE Rx and Tx ring descriptors. */
struct lance_rx_head {
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index c27f4291b350..9e042894479b 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -161,7 +161,7 @@ enum commands {
#define RX_SUSPEND 0x0030
#define RX_ABORT 0x0040
-#define TX_TIMEOUT 5
+#define TX_TIMEOUT (HZ/20)
struct i596_reg {
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index e7030ceb178b..da74db4a03d4 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -203,7 +203,7 @@ static void __NS8390_init(struct net_device *dev, int startp);
static int __ei_open(struct net_device *dev)
{
unsigned long flags;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
if (dev->watchdog_timeo <= 0)
dev->watchdog_timeo = TX_TIMEOUT;
@@ -231,7 +231,7 @@ static int __ei_open(struct net_device *dev)
*/
static int __ei_close(struct net_device *dev)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned long flags;
/*
@@ -256,7 +256,7 @@ static int __ei_close(struct net_device *dev)
static void __ei_tx_timeout(struct net_device *dev)
{
unsigned long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
unsigned long flags;
@@ -303,7 +303,7 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
unsigned long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int send_length = skb->len, output_page;
unsigned long flags;
char buf[ETH_ZLEN];
@@ -592,7 +592,7 @@ static void ei_tx_err(struct net_device *dev)
static void ei_tx_intr(struct net_device *dev)
{
unsigned long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int status = ei_inb(e8390_base + EN0_TSR);
ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
@@ -675,7 +675,7 @@ static void ei_tx_intr(struct net_device *dev)
static void ei_receive(struct net_device *dev)
{
unsigned long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned char rxing_page, this_frame, next_frame;
unsigned short current_offset;
int rx_pkt_count = 0;
@@ -879,7 +879,7 @@ static void ei_rx_overrun(struct net_device *dev)
static struct net_device_stats *__ei_get_stats(struct net_device *dev)
{
unsigned long ioaddr = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned long flags;
/* If the card is stopped, just return the present stats. */
@@ -927,7 +927,7 @@ static void do_set_multicast_list(struct net_device *dev)
{
unsigned long e8390_base = dev->base_addr;
int i;
- struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
{
@@ -981,7 +981,7 @@ static void do_set_multicast_list(struct net_device *dev)
static void __ei_set_multicast_list(struct net_device *dev)
{
unsigned long flags;
- struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
spin_lock_irqsave(&ei_local->page_lock, flags);
do_set_multicast_list(dev);
@@ -998,7 +998,7 @@ static void __ei_set_multicast_list(struct net_device *dev)
static void ethdev_setup(struct net_device *dev)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
if (ei_debug > 1)
printk(version);
@@ -1036,7 +1036,7 @@ static struct net_device *____alloc_ei_netdev(int size)
static void __NS8390_init(struct net_device *dev, int startp)
{
unsigned long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int i;
int endcfg = ei_local->word16
? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
@@ -1099,7 +1099,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
int start_page)
{
unsigned long e8390_base = dev->base_addr;
- struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index 9f8e7027b0b3..b7948ccfcf7d 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -238,7 +238,7 @@ static int temac_dma_bd_init(struct net_device *ndev)
goto out;
}
/* allocate the tx and rx ring buffer descriptors. */
- /* returns a virtual addres and a physical address. */
+ /* returns a virtual address and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM,
&lp->tx_bd_p, GFP_KERNEL);
@@ -692,7 +692,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- unsigned int csum_start_off = skb_transport_offset(skb);
+ unsigned int csum_start_off = skb_checksum_start_offset(skb);
unsigned int csum_index_off = csum_start_off + skb->csum_offset;
cur_p->app0 |= 1; /* TX Checksum Enabled */
@@ -952,8 +952,7 @@ static const struct attribute_group temac_attr_group = {
.attrs = temac_device_attrs,
};
-static int __init
-temac_of_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit temac_of_probe(struct platform_device *op)
{
struct device_node *np;
struct temac_local *lp;
@@ -1123,7 +1122,7 @@ static struct of_device_id temac_of_match[] __devinitdata = {
};
MODULE_DEVICE_TABLE(of, temac_of_match);
-static struct of_platform_driver temac_of_driver = {
+static struct platform_driver temac_of_driver = {
.probe = temac_of_probe,
.remove = __devexit_p(temac_of_remove),
.driver = {
@@ -1135,13 +1134,13 @@ static struct of_platform_driver temac_of_driver = {
static int __init temac_init(void)
{
- return of_register_platform_driver(&temac_of_driver);
+ return platform_driver_register(&temac_of_driver);
}
module_init(temac_init);
static void __exit temac_exit(void)
{
- of_unregister_platform_driver(&temac_of_driver);
+ platform_driver_unregister(&temac_of_driver);
}
module_exit(temac_exit);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 2d9663a1c54d..ea0dc451da9c 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -129,10 +129,6 @@ static u32 always_on(struct net_device *dev)
static const struct ethtool_ops loopback_ethtool_ops = {
.get_link = always_on,
- .set_tso = ethtool_op_set_tso,
- .get_tx_csum = always_on,
- .get_sg = always_on,
- .get_rx_csum = always_on,
};
static int loopback_dev_init(struct net_device *dev)
@@ -169,9 +165,12 @@ static void loopback_setup(struct net_device *dev)
dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
dev->flags = IFF_LOOPBACK;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
- | NETIF_F_TSO
+ | NETIF_F_ALL_TSO
+ | NETIF_F_UFO
| NETIF_F_NO_CSUM
+ | NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
| NETIF_F_LLTX
| NETIF_F_NETNS_LOCAL;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index f69e73e2191e..79ccb54ab00c 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -260,7 +260,7 @@ static int macb_mii_init(struct macb *bp)
for (i = 0; i < PHY_MAX_ADDR; i++)
bp->mii_bus->irq[i] = PHY_POLL;
- platform_set_drvdata(bp->dev, bp->mii_bus);
+ dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
if (mdiobus_register(bp->mii_bus))
goto err_out_free_mdio_irq;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 0fc9dc7f20db..5b37d3c191e4 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -38,6 +38,7 @@ struct macvlan_port {
struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
struct list_head vlans;
struct rcu_head rcu;
+ bool passthru;
};
#define macvlan_port_get_rcu(dev) \
@@ -151,9 +152,10 @@ static void macvlan_broadcast(struct sk_buff *skb,
}
/* called under rcu_read_lock() from netif_receive_skb */
-static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
+static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
{
struct macvlan_port *port;
+ struct sk_buff *skb = *pskb;
const struct ethhdr *eth = eth_hdr(skb);
const struct macvlan_dev *vlan;
const struct macvlan_dev *src;
@@ -169,6 +171,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
macvlan_broadcast(skb, port, NULL,
MACVLAN_MODE_PRIVATE |
MACVLAN_MODE_VEPA |
+ MACVLAN_MODE_PASSTHRU|
MACVLAN_MODE_BRIDGE);
else if (src->mode == MACVLAN_MODE_VEPA)
/* flood to everyone except source */
@@ -182,17 +185,20 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
*/
macvlan_broadcast(skb, port, src->dev,
MACVLAN_MODE_VEPA);
- return skb;
+ return RX_HANDLER_PASS;
}
- vlan = macvlan_hash_lookup(port, eth->h_dest);
+ if (port->passthru)
+ vlan = list_first_entry(&port->vlans, struct macvlan_dev, list);
+ else
+ vlan = macvlan_hash_lookup(port, eth->h_dest);
if (vlan == NULL)
- return skb;
+ return RX_HANDLER_PASS;
dev = vlan->dev;
if (unlikely(!(dev->flags & IFF_UP))) {
kfree_skb(skb);
- return NULL;
+ return RX_HANDLER_CONSUMED;
}
len = skb->len + ETH_HLEN;
skb = skb_share_check(skb, GFP_ATOMIC);
@@ -206,7 +212,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
out:
macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0);
- return NULL;
+ return RX_HANDLER_CONSUMED;
}
static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -214,9 +220,11 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
const struct macvlan_dev *vlan = netdev_priv(dev);
const struct macvlan_port *port = vlan->port;
const struct macvlan_dev *dest;
+ __u8 ip_summed = skb->ip_summed;
if (vlan->mode == MACVLAN_MODE_BRIDGE) {
const struct ethhdr *eth = (void *)skb->data;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
/* send to other bridge ports directly */
if (is_multicast_ether_addr(eth->h_dest)) {
@@ -236,6 +244,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
}
xmit_world:
+ skb->ip_summed = ip_summed;
skb_set_dev(skb, vlan->lowerdev);
return dev_queue_xmit(skb);
}
@@ -243,18 +252,22 @@ xmit_world:
netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- int i = skb_get_queue_mapping(skb);
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
unsigned int len = skb->len;
int ret;
+ const struct macvlan_dev *vlan = netdev_priv(dev);
ret = macvlan_queue_xmit(skb, dev);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
- txq->tx_packets++;
- txq->tx_bytes += len;
- } else
- txq->tx_dropped++;
+ struct macvlan_pcpu_stats *pcpu_stats;
+ pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->tx_packets++;
+ pcpu_stats->tx_bytes += len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+ } else {
+ this_cpu_inc(vlan->pcpu_stats->tx_dropped);
+ }
return ret;
}
EXPORT_SYMBOL_GPL(macvlan_start_xmit);
@@ -284,6 +297,11 @@ static int macvlan_open(struct net_device *dev)
struct net_device *lowerdev = vlan->lowerdev;
int err;
+ if (vlan->port->passthru) {
+ dev_set_promiscuity(lowerdev, 1);
+ goto hash_add;
+ }
+
err = -EBUSY;
if (macvlan_addr_busy(vlan->port, dev->dev_addr))
goto out;
@@ -296,6 +314,8 @@ static int macvlan_open(struct net_device *dev)
if (err < 0)
goto del_unicast;
}
+
+hash_add:
macvlan_hash_add(vlan);
return 0;
@@ -310,12 +330,18 @@ static int macvlan_stop(struct net_device *dev)
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
+ if (vlan->port->passthru) {
+ dev_set_promiscuity(lowerdev, -1);
+ goto hash_del;
+ }
+
dev_mc_unsync(lowerdev, dev);
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(lowerdev, -1);
dev_uc_del(lowerdev, dev->dev_addr);
+hash_del:
macvlan_hash_del(vlan);
return 0;
}
@@ -414,14 +440,15 @@ static int macvlan_init(struct net_device *dev)
dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
(lowerdev->state & MACVLAN_STATE_MASK);
dev->features = lowerdev->features & MACVLAN_FEATURES;
+ dev->features |= NETIF_F_LLTX;
dev->gso_max_size = lowerdev->gso_max_size;
dev->iflink = lowerdev->ifindex;
dev->hard_header_len = lowerdev->hard_header_len;
macvlan_set_lockdep_class(dev);
- vlan->rx_stats = alloc_percpu(struct macvlan_rx_stats);
- if (!vlan->rx_stats)
+ vlan->pcpu_stats = alloc_percpu(struct macvlan_pcpu_stats);
+ if (!vlan->pcpu_stats)
return -ENOMEM;
return 0;
@@ -431,7 +458,7 @@ static void macvlan_uninit(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
- free_percpu(vlan->rx_stats);
+ free_percpu(vlan->pcpu_stats);
}
static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
@@ -439,33 +466,38 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
{
struct macvlan_dev *vlan = netdev_priv(dev);
- dev_txq_stats_fold(dev, stats);
-
- if (vlan->rx_stats) {
- struct macvlan_rx_stats *p, accum = {0};
- u64 rx_packets, rx_bytes, rx_multicast;
+ if (vlan->pcpu_stats) {
+ struct macvlan_pcpu_stats *p;
+ u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
+ u32 rx_errors = 0, tx_dropped = 0;
unsigned int start;
int i;
for_each_possible_cpu(i) {
- p = per_cpu_ptr(vlan->rx_stats, i);
+ p = per_cpu_ptr(vlan->pcpu_stats, i);
do {
start = u64_stats_fetch_begin_bh(&p->syncp);
rx_packets = p->rx_packets;
rx_bytes = p->rx_bytes;
rx_multicast = p->rx_multicast;
+ tx_packets = p->tx_packets;
+ tx_bytes = p->tx_bytes;
} while (u64_stats_fetch_retry_bh(&p->syncp, start));
- accum.rx_packets += rx_packets;
- accum.rx_bytes += rx_bytes;
- accum.rx_multicast += rx_multicast;
- /* rx_errors is an ulong, updated without syncp protection */
- accum.rx_errors += p->rx_errors;
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->multicast += rx_multicast;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ /* rx_errors & tx_dropped are u32, updated
+ * without syncp protection.
+ */
+ rx_errors += p->rx_errors;
+ tx_dropped += p->tx_dropped;
}
- stats->rx_packets = accum.rx_packets;
- stats->rx_bytes = accum.rx_bytes;
- stats->rx_errors = accum.rx_errors;
- stats->rx_dropped = accum.rx_errors;
- stats->multicast = accum.rx_multicast;
+ stats->rx_errors = rx_errors;
+ stats->rx_dropped = rx_errors;
+ stats->tx_dropped = tx_dropped;
}
return stats;
}
@@ -549,6 +581,7 @@ static int macvlan_port_create(struct net_device *dev)
if (port == NULL)
return -ENOMEM;
+ port->passthru = false;
port->dev = dev;
INIT_LIST_HEAD(&port->vlans);
for (i = 0; i < MACVLAN_HASH_SIZE; i++)
@@ -593,6 +626,7 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
case MACVLAN_MODE_PRIVATE:
case MACVLAN_MODE_VEPA:
case MACVLAN_MODE_BRIDGE:
+ case MACVLAN_MODE_PASSTHRU:
break;
default:
return -EINVAL;
@@ -601,25 +635,6 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
return 0;
}
-static int macvlan_get_tx_queues(struct net *net,
- struct nlattr *tb[],
- unsigned int *num_tx_queues,
- unsigned int *real_num_tx_queues)
-{
- struct net_device *real_dev;
-
- if (!tb[IFLA_LINK])
- return -EINVAL;
-
- real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
- if (!real_dev)
- return -ENODEV;
-
- *num_tx_queues = real_dev->num_tx_queues;
- *real_num_tx_queues = real_dev->real_num_tx_queues;
- return 0;
-}
-
int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
int (*receive)(struct sk_buff *skb),
@@ -661,6 +676,10 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
}
port = macvlan_port_get(lowerdev);
+ /* Only 1 macvlan device can be created in passthru mode */
+ if (port->passthru)
+ return -EINVAL;
+
vlan->lowerdev = lowerdev;
vlan->dev = dev;
vlan->port = port;
@@ -671,6 +690,13 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
if (data && data[IFLA_MACVLAN_MODE])
vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
+ if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
+ if (!list_empty(&port->vlans))
+ return -EINVAL;
+ port->passthru = true;
+ memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN);
+ }
+
err = register_netdevice(dev);
if (err < 0)
goto destroy_port;
@@ -743,7 +769,6 @@ int macvlan_link_register(struct rtnl_link_ops *ops)
{
/* common fields */
ops->priv_size = sizeof(struct macvlan_dev);
- ops->get_tx_queues = macvlan_get_tx_queues;
ops->validate = macvlan_validate;
ops->maxtype = IFLA_MACVLAN_MAX;
ops->policy = macvlan_policy;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 42567279843e..6696e56e6320 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -39,7 +39,7 @@ struct macvtap_queue {
struct socket sock;
struct socket_wq wq;
int vnet_hdr_sz;
- struct macvlan_dev *vlan;
+ struct macvlan_dev __rcu *vlan;
struct file *file;
unsigned int flags;
};
@@ -141,7 +141,8 @@ static void macvtap_put_queue(struct macvtap_queue *q)
struct macvlan_dev *vlan;
spin_lock(&macvtap_lock);
- vlan = rcu_dereference(q->vlan);
+ vlan = rcu_dereference_protected(q->vlan,
+ lockdep_is_held(&macvtap_lock));
if (vlan) {
int index = get_slot(vlan, q);
@@ -219,7 +220,8 @@ static void macvtap_del_queues(struct net_device *dev)
/* macvtap_put_queue can free some slots, so go through all slots */
spin_lock(&macvtap_lock);
for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) {
- q = rcu_dereference(vlan->taps[i]);
+ q = rcu_dereference_protected(vlan->taps[i],
+ lockdep_is_held(&macvtap_lock));
if (q) {
qlist[j++] = q;
rcu_assign_pointer(vlan->taps[i], NULL);
@@ -504,8 +506,7 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- vnet_hdr->csum_start = skb->csum_start -
- skb_headroom(skb);
+ vnet_hdr->csum_start = skb_checksum_start_offset(skb);
vnet_hdr->csum_offset = skb->csum_offset;
} /* else everything is zero */
@@ -529,8 +530,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
vnet_hdr_len = q->vnet_hdr_sz;
err = -EINVAL;
- if ((len -= vnet_hdr_len) < 0)
+ if (len < vnet_hdr_len)
goto err;
+ len -= vnet_hdr_len;
err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
sizeof(vnet_hdr));
@@ -570,7 +572,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
}
rcu_read_lock_bh();
- vlan = rcu_dereference(q->vlan);
+ vlan = rcu_dereference_bh(q->vlan);
if (vlan)
macvlan_start_xmit(skb, vlan->dev);
else
@@ -584,9 +586,9 @@ err_kfree:
err:
rcu_read_lock_bh();
- vlan = rcu_dereference(q->vlan);
+ vlan = rcu_dereference_bh(q->vlan);
if (vlan)
- netdev_get_tx_queue(vlan->dev, 0)->tx_dropped++;
+ vlan->dev->stats.tx_dropped++;
rcu_read_unlock_bh();
return err;
@@ -632,7 +634,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
rcu_read_lock_bh();
- vlan = rcu_dereference(q->vlan);
+ vlan = rcu_dereference_bh(q->vlan);
if (vlan)
macvlan_count_rx(vlan, len, ret == 0, 0);
rcu_read_unlock_bh();
@@ -728,7 +730,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
case TUNGETIFF:
rcu_read_lock_bh();
- vlan = rcu_dereference(q->vlan);
+ vlan = rcu_dereference_bh(q->vlan);
if (vlan)
dev_hold(vlan->dev);
rcu_read_unlock_bh();
@@ -737,7 +739,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
return -ENOLINK;
ret = 0;
- if (copy_to_user(&ifr->ifr_name, q->vlan->dev->name, IFNAMSIZ) ||
+ if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
put_user(q->flags, &ifr->ifr_flags))
ret = -EFAULT;
dev_put(vlan->dev);
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index 210b2b164b30..0a6c6a2e7550 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -354,7 +354,7 @@ unsigned int mii_check_media (struct mii_if_info *mii,
if (!new_carrier) {
netif_carrier_off(mii->dev);
if (ok_to_print)
- printk(KERN_INFO "%s: link down\n", mii->dev->name);
+ netdev_info(mii->dev, "link down\n");
return 0; /* duplex did not change */
}
@@ -381,12 +381,12 @@ unsigned int mii_check_media (struct mii_if_info *mii,
duplex = 1;
if (ok_to_print)
- printk(KERN_INFO "%s: link up, %sMbps, %s-duplex, lpa 0x%04X\n",
- mii->dev->name,
- lpa2 & (LPA_1000FULL | LPA_1000HALF) ? "1000" :
- media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? "100" : "10",
- duplex ? "full" : "half",
- lpa);
+ netdev_info(mii->dev, "link up, %uMbps, %s-duplex, lpa 0x%04X\n",
+ lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 :
+ media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ?
+ 100 : 10,
+ duplex ? "full" : "half",
+ lpa);
if ((init_media) || (mii->full_duplex != duplex)) {
mii->full_duplex = duplex;
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 8f4bf1f07c11..3a4277f6fac4 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -178,6 +178,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
} else {
int i;
+ buf->direct.buf = NULL;
buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT;
@@ -229,7 +230,7 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
buf->direct.map);
else {
- if (BITS_PER_LONG == 64)
+ if (BITS_PER_LONG == 64 && buf->direct.buf)
vunmap(buf->direct.buf);
for (i = 0; i < buf->nbufs; ++i)
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
index 68aaa42d0ced..32f947154c33 100644
--- a/drivers/net/mlx4/catas.c
+++ b/drivers/net/mlx4/catas.c
@@ -113,7 +113,7 @@ static void catas_reset(struct work_struct *work)
void mlx4_start_catas_poll(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- unsigned long addr;
+ phys_addr_t addr;
INIT_LIST_HEAD(&priv->catas_err.list);
init_timer(&priv->catas_err.timer);
@@ -124,8 +124,8 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
if (!priv->catas_err.map) {
- mlx4_warn(dev, "Failed to map internal error buffer at 0x%lx\n",
- addr);
+ mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
+ (unsigned long long) addr);
return;
}
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index f6e0d40cd876..1ff6ca6466ed 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -202,7 +202,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
if (mlx4_uar_alloc(dev, &mdev->priv_uar))
goto err_pd;
- mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+ mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT,
+ PAGE_SIZE);
if (!mdev->uar_map)
goto err_uar;
spin_lock_init(&mdev->uar_lock);
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 6d6806b361e3..897f576b8b17 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -972,7 +972,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
int i;
int err;
- dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
+ dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
+ prof->tx_ring_num, prof->rx_ring_num);
if (dev == NULL) {
mlx4_err(mdev, "Net device allocation failed\n");
return -ENOMEM;
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 7a7e18ba278a..5de1db897835 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -289,10 +289,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
dev_cap->bf_reg_size = 1 << (field & 0x1f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
- if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) {
- mlx4_warn(dev, "firmware bug: log2 # of blue flame regs is invalid (%d), forcing 3\n", field & 0x1f);
+ if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
field = 3;
- }
dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 782f11d8fa71..2765a3ce9c24 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -829,7 +829,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_uar_table_free;
}
- priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+ priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
if (!priv->kar) {
mlx4_err(dev, "Couldn't map kernel access region, "
"aborting.\n");
@@ -1286,6 +1286,21 @@ static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
{ PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
{ PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
{ PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
+ { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
+ { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
+ { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
{ 0, }
};
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index c4f88b7ef7b6..79cf42db2ea9 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -95,7 +95,8 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
* entry in hash chain and *mgm holds end of hash chain.
*/
static int find_mgm(struct mlx4_dev *dev,
- u8 *gid, struct mlx4_cmd_mailbox *mgm_mailbox,
+ u8 *gid, enum mlx4_protocol protocol,
+ struct mlx4_cmd_mailbox *mgm_mailbox,
u16 *hash, int *prev, int *index)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -134,7 +135,8 @@ static int find_mgm(struct mlx4_dev *dev,
return err;
}
- if (!memcmp(mgm->gid, gid, 16))
+ if (!memcmp(mgm->gid, gid, 16) &&
+ be32_to_cpu(mgm->members_count) >> 30 == protocol)
return err;
*prev = *index;
@@ -146,7 +148,7 @@ static int find_mgm(struct mlx4_dev *dev,
}
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
- int block_mcast_loopback)
+ int block_mcast_loopback, enum mlx4_protocol protocol)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
@@ -165,7 +167,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
mutex_lock(&priv->mcg_table.mutex);
- err = find_mgm(dev, gid, mailbox, &hash, &prev, &index);
+ err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index);
if (err)
goto out;
@@ -187,7 +189,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
memcpy(mgm->gid, gid, 16);
}
- members_count = be32_to_cpu(mgm->members_count);
+ members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
if (members_count == MLX4_QP_PER_MGM) {
mlx4_err(dev, "MGM at index %x is full.\n", index);
err = -ENOMEM;
@@ -207,7 +209,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
else
mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
- mgm->members_count = cpu_to_be32(members_count);
+ mgm->members_count = cpu_to_be32(members_count | (u32) protocol << 30);
err = mlx4_WRITE_MCG(dev, index, mailbox);
if (err)
@@ -242,7 +244,8 @@ out:
}
EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
-int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
+int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ enum mlx4_protocol protocol)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
@@ -260,7 +263,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
mutex_lock(&priv->mcg_table.mutex);
- err = find_mgm(dev, gid, mailbox, &hash, &prev, &index);
+ err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index);
if (err)
goto out;
@@ -270,7 +273,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
goto out;
}
- members_count = be32_to_cpu(mgm->members_count);
+ members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
for (loc = -1, i = 0; i < members_count; ++i)
if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
loc = i;
@@ -282,7 +285,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
}
- mgm->members_count = cpu_to_be32(--members_count);
+ mgm->members_count = cpu_to_be32(--members_count | (u32) protocol << 30);
mgm->qp[loc] = mgm->qp[i - 1];
mgm->qp[i - 1] = 0;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index dd2b6a71c6d7..34425b94452f 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -35,6 +35,8 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/in.h>
@@ -627,9 +629,8 @@ err:
if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
(RX_FIRST_DESC | RX_LAST_DESC)) {
if (net_ratelimit())
- dev_printk(KERN_ERR, &mp->dev->dev,
- "received packet spanning "
- "multiple descriptors\n");
+ netdev_err(mp->dev,
+ "received packet spanning multiple descriptors\n");
}
if (cmd_sts & ERROR_SUMMARY)
@@ -868,15 +869,14 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
txq->tx_dropped++;
- dev_printk(KERN_DEBUG, &dev->dev,
- "failed to linearize skb with tiny "
- "unaligned fragment\n");
+ netdev_printk(KERN_DEBUG, dev,
+ "failed to linearize skb with tiny unaligned fragment\n");
return NETDEV_TX_BUSY;
}
if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
if (net_ratelimit())
- dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n");
+ netdev_err(dev, "tx queue full?!\n");
kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -959,7 +959,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
skb = __skb_dequeue(&txq->tx_skb);
if (cmd_sts & ERROR_SUMMARY) {
- dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
+ netdev_info(mp->dev, "tx error\n");
mp->dev->stats.tx_errors++;
}
@@ -1122,20 +1122,20 @@ static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
int ret;
if (smi_wait_ready(msp)) {
- printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
+ pr_warn("SMI bus busy timeout\n");
return -ETIMEDOUT;
}
writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
if (smi_wait_ready(msp)) {
- printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
+ pr_warn("SMI bus busy timeout\n");
return -ETIMEDOUT;
}
ret = readl(smi_reg);
if (!(ret & SMI_READ_VALID)) {
- printk(KERN_WARNING "mv643xx_eth: SMI bus read not valid\n");
+ pr_warn("SMI bus read not valid\n");
return -ENODEV;
}
@@ -1148,7 +1148,7 @@ static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
void __iomem *smi_reg = msp->base + SMI_REG;
if (smi_wait_ready(msp)) {
- printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
+ pr_warn("SMI bus busy timeout\n");
return -ETIMEDOUT;
}
@@ -1156,7 +1156,7 @@ static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
(addr << 16) | (val & 0xffff), smi_reg);
if (smi_wait_ready(msp)) {
- printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
+ pr_warn("SMI bus busy timeout\n");
return -ETIMEDOUT;
}
@@ -1514,11 +1514,6 @@ static int mv643xx_eth_nway_reset(struct net_device *dev)
return genphy_restart_aneg(mp->phy);
}
-static u32 mv643xx_eth_get_link(struct net_device *dev)
-{
- return !!netif_carrier_ok(dev);
-}
-
static int
mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
{
@@ -1571,9 +1566,8 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
if (netif_running(dev)) {
mv643xx_eth_stop(dev);
if (mv643xx_eth_open(dev)) {
- dev_printk(KERN_ERR, &dev->dev,
- "fatal error on re-opening device after "
- "ring param change\n");
+ netdev_err(dev,
+ "fatal error on re-opening device after ring param change\n");
return -ENOMEM;
}
}
@@ -1658,7 +1652,7 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
.set_settings = mv643xx_eth_set_settings,
.get_drvinfo = mv643xx_eth_get_drvinfo,
.nway_reset = mv643xx_eth_nway_reset,
- .get_link = mv643xx_eth_get_link,
+ .get_link = ethtool_op_get_link,
.get_coalesce = mv643xx_eth_get_coalesce,
.set_coalesce = mv643xx_eth_set_coalesce,
.get_ringparam = mv643xx_eth_get_ringparam,
@@ -1879,7 +1873,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
}
if (rxq->rx_desc_area == NULL) {
- dev_printk(KERN_ERR, &mp->dev->dev,
+ netdev_err(mp->dev,
"can't allocate rx ring (%d bytes)\n", size);
goto out;
}
@@ -1889,8 +1883,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
GFP_KERNEL);
if (rxq->rx_skb == NULL) {
- dev_printk(KERN_ERR, &mp->dev->dev,
- "can't allocate rx skb ring\n");
+ netdev_err(mp->dev, "can't allocate rx skb ring\n");
goto out_free;
}
@@ -1949,8 +1942,7 @@ static void rxq_deinit(struct rx_queue *rxq)
}
if (rxq->rx_desc_count) {
- dev_printk(KERN_ERR, &mp->dev->dev,
- "error freeing rx ring -- %d skbs stuck\n",
+ netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
rxq->rx_desc_count);
}
@@ -1992,7 +1984,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
}
if (txq->tx_desc_area == NULL) {
- dev_printk(KERN_ERR, &mp->dev->dev,
+ netdev_err(mp->dev,
"can't allocate tx ring (%d bytes)\n", size);
return -ENOMEM;
}
@@ -2098,7 +2090,7 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
if (netif_carrier_ok(dev)) {
int i;
- printk(KERN_INFO "%s: link down\n", dev->name);
+ netdev_info(dev, "link down\n");
netif_carrier_off(dev);
@@ -2129,10 +2121,8 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
- printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
- "flow control %sabled\n", dev->name,
- speed, duplex ? "full" : "half",
- fc ? "en" : "dis");
+ netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
+ speed, duplex ? "full" : "half", fc ? "en" : "dis");
if (!netif_carrier_ok(dev))
netif_carrier_on(dev);
@@ -2342,7 +2332,7 @@ static int mv643xx_eth_open(struct net_device *dev)
err = request_irq(dev->irq, mv643xx_eth_irq,
IRQF_SHARED, dev->name, dev);
if (err) {
- dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
+ netdev_err(dev, "can't assign irq\n");
return -EAGAIN;
}
@@ -2488,9 +2478,8 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
*/
mv643xx_eth_stop(dev);
if (mv643xx_eth_open(dev)) {
- dev_printk(KERN_ERR, &dev->dev,
- "fatal error on re-opening device after "
- "MTU change\n");
+ netdev_err(dev,
+ "fatal error on re-opening device after MTU change\n");
}
return 0;
@@ -2513,7 +2502,7 @@ static void mv643xx_eth_tx_timeout(struct net_device *dev)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
- dev_printk(KERN_INFO, &dev->dev, "tx timeout\n");
+ netdev_info(dev, "tx timeout\n");
schedule_work(&mp->tx_timeout_task);
}
@@ -2608,8 +2597,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
int ret;
if (!mv643xx_eth_version_printed++)
- printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet "
- "driver version %s\n", mv643xx_eth_driver_version);
+ pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
+ mv643xx_eth_driver_version);
ret = -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2876,14 +2865,12 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
pd = pdev->dev.platform_data;
if (pd == NULL) {
- dev_printk(KERN_ERR, &pdev->dev,
- "no mv643xx_eth_platform_data\n");
+ dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
return -ENODEV;
}
if (pd->shared == NULL) {
- dev_printk(KERN_ERR, &pdev->dev,
- "no mv643xx_eth_platform_data->shared\n");
+ dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
return -ENODEV;
}
@@ -2962,11 +2949,11 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
if (err)
goto out;
- dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %pM\n",
- mp->port_num, dev->dev_addr);
+ netdev_notice(dev, "port %d with MAC address %pM\n",
+ mp->port_num, dev->dev_addr);
if (mp->tx_desc_sram_size > 0)
- dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n");
+ netdev_notice(dev, "configured with sram\n");
return 0;
@@ -2983,7 +2970,7 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
unregister_netdev(mp->dev);
if (mp->phy != NULL)
phy_detach(mp->phy);
- flush_scheduled_work();
+ cancel_work_sync(&mp->tx_timeout_task);
free_netdev(mp->dev);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 8524cc40ec57..a7f2eed9a08a 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -253,7 +253,7 @@ struct myri10ge_priv {
unsigned long serial_number;
int vendor_specific_offset;
int fw_multicast_support;
- unsigned long features;
+ u32 features;
u32 max_tso6;
u32 read_dma;
u32 write_dma;
@@ -1776,7 +1776,7 @@ static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
- unsigned long flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
+ u32 flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
if (tso_enabled)
netdev->features |= flags;
@@ -2736,7 +2736,7 @@ again:
odd_flag = 0;
flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
- cksum_offset = skb_transport_offset(skb);
+ cksum_offset = skb_checksum_start_offset(skb);
pseudo_hdr_offset = cksum_offset + skb->csum_offset;
/* If the headers are excessively large, then we must
* fall back to a software checksum */
@@ -3403,9 +3403,7 @@ static int myri10ge_resume(struct pci_dev *pdev)
return -EIO;
}
- status = pci_restore_state(pdev);
- if (status)
- return status;
+ pci_restore_state(pdev);
status = pci_enable_device(pdev);
if (status) {
@@ -4067,7 +4065,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
if (mgp == NULL)
return;
- flush_scheduled_work();
+ cancel_work_sync(&mgp->watchdog_work);
netdev = mgp->dev;
unregister_netdev(netdev);
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index 4846e131a04e..a761076b69c3 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -926,7 +926,7 @@ static const struct net_device_ops myri_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __devinit myri_sbus_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit myri_sbus_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
static unsigned version_printed;
@@ -1160,7 +1160,7 @@ static const struct of_device_id myri_sbus_match[] = {
MODULE_DEVICE_TABLE(of, myri_sbus_match);
-static struct of_platform_driver myri_sbus_driver = {
+static struct platform_driver myri_sbus_driver = {
.driver = {
.name = "myri",
.owner = THIS_MODULE,
@@ -1172,12 +1172,12 @@ static struct of_platform_driver myri_sbus_driver = {
static int __init myri_sbus_init(void)
{
- return of_register_platform_driver(&myri_sbus_driver);
+ return platform_driver_register(&myri_sbus_driver);
}
static void __exit myri_sbus_exit(void)
{
- of_unregister_platform_driver(&myri_sbus_driver);
+ platform_driver_unregister(&myri_sbus_driver);
}
module_init(myri_sbus_init);
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c
index e0b0ef11f110..30be8c634ebd 100644
--- a/drivers/net/ne-h8300.c
+++ b/drivers/net/ne-h8300.c
@@ -86,7 +86,7 @@ static u32 reg_offset[16];
static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int i;
unsigned char bus_width;
@@ -218,7 +218,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
int start_page, stop_page;
int reg0, ret;
static unsigned version_printed;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned char bus_width;
if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
@@ -371,7 +371,7 @@ static int ne_close(struct net_device *dev)
static void ne_reset_8390(struct net_device *dev)
{
unsigned long reset_start_time = jiffies;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
if (ei_debug > 1)
printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
@@ -397,7 +397,7 @@ static void ne_reset_8390(struct net_device *dev)
static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing)
@@ -437,7 +437,7 @@ static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, i
static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
#ifdef NE_SANITY_CHECK
int xfer_count = count;
#endif
@@ -507,7 +507,7 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
static void ne_block_output(struct net_device *dev, int count,
const unsigned char *buf, const int start_page)
{
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned long dma_start;
#ifdef NE_SANITY_CHECK
int retries = 0;
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 94255f09093d..dfb67eb2a94b 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -664,6 +664,7 @@ static int netconsole_netdev_event(struct notifier_block *this,
unsigned long flags;
struct netconsole_target *nt;
struct net_device *dev = ptr;
+ bool stopped = false;
if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER ||
event == NETDEV_BONDING_DESLAVE || event == NETDEV_GOING_DOWN))
@@ -690,15 +691,16 @@ static int netconsole_netdev_event(struct notifier_block *this,
case NETDEV_GOING_DOWN:
case NETDEV_BONDING_DESLAVE:
nt->enabled = 0;
+ stopped = true;
break;
}
}
netconsole_target_put(nt);
}
spin_unlock_irqrestore(&target_list_lock, flags);
- if (event == NETDEV_UNREGISTER || event == NETDEV_BONDING_DESLAVE)
- printk(KERN_INFO "netconsole: network logging stopped, "
- "interface %s %s\n", dev->name,
+ if (stopped && (event == NETDEV_UNREGISTER || event == NETDEV_BONDING_DESLAVE))
+ printk(KERN_INFO "netconsole: network logging stopped on "
+ "interface %s as it %s\n", dev->name,
event == NETDEV_UNREGISTER ? "unregistered" : "released slaves");
done:
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 8e8a97839cb0..d7299f1a4940 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 74
-#define NETXEN_NIC_LINUX_VERSIONID "4.0.74"
+#define _NETXEN_NIC_LINUX_SUBVERSION 75
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.75"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
@@ -739,7 +739,8 @@ struct netxen_recv_context {
#define NX_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c
#define NX_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d
#define NX_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e
-#define NX_CDRP_CMD_MAX 0x0000001f
+#define NX_CDRP_CMD_CONFIG_GBE_PORT 0x0000001f
+#define NX_CDRP_CMD_MAX 0x00000020
#define NX_RCODE_SUCCESS 0
#define NX_RCODE_NO_HOST_MEM 1
@@ -1054,6 +1055,7 @@ typedef struct {
#define NX_FW_CAPABILITY_BDG (1 << 8)
#define NX_FW_CAPABILITY_FVLANTX (1 << 9)
#define NX_FW_CAPABILITY_HW_LRO (1 << 10)
+#define NX_FW_CAPABILITY_GBE_LINK_CFG (1 << 11)
/* module types */
#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -1132,6 +1134,7 @@ typedef struct {
#define NETXEN_NIC_MSI_ENABLED 0x02
#define NETXEN_NIC_MSIX_ENABLED 0x04
#define NETXEN_NIC_LRO_ENABLED 0x08
+#define NETXEN_NIC_LRO_DISABLED 0x00
#define NETXEN_NIC_BRIDGE_ENABLED 0X10
#define NETXEN_NIC_DIAG_ENABLED 0x20
#define NETXEN_IS_MSI_FAMILY(adapter) \
@@ -1348,6 +1351,8 @@ void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup);
void netxen_pci_camqm_read_2M(struct netxen_adapter *, u64, u64 *);
void netxen_pci_camqm_write_2M(struct netxen_adapter *, u64, u64);
+int nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
+ u32 speed, u32 duplex, u32 autoneg);
int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu);
int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable);
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index f7d06cbc70ae..f16966afa64e 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -112,6 +112,21 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
return 0;
}
+int
+nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
+ u32 speed, u32 duplex, u32 autoneg)
+{
+
+ return netxen_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ NXHAL_VERSION,
+ speed,
+ duplex,
+ autoneg,
+ NX_CDRP_CMD_CONFIG_GBE_PORT);
+
+}
+
static int
nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
{
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index b30de24f4a52..653d308e0f5d 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -214,7 +214,6 @@ skip:
check_sfp_module = netif_running(dev) &&
adapter->has_link_events;
} else {
- ecmd->autoneg = AUTONEG_ENABLE;
ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg);
ecmd->advertising |=
(ADVERTISED_TP | ADVERTISED_Autoneg);
@@ -252,53 +251,24 @@ static int
netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct netxen_adapter *adapter = netdev_priv(dev);
- __u32 status;
+ int ret;
- /* read which mode */
- if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
- /* autonegotiation */
- if (adapter->phy_write &&
- adapter->phy_write(adapter,
- NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
- ecmd->autoneg) != 0)
- return -EIO;
- else
- adapter->link_autoneg = ecmd->autoneg;
+ if (adapter->ahw.port_type != NETXEN_NIC_GBE)
+ return -EOPNOTSUPP;
- if (adapter->phy_read &&
- adapter->phy_read(adapter,
- NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
- &status) != 0)
- return -EIO;
+ if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG))
+ return -EOPNOTSUPP;
- /* speed */
- switch (ecmd->speed) {
- case SPEED_10:
- netxen_set_phy_speed(status, 0);
- break;
- case SPEED_100:
- netxen_set_phy_speed(status, 1);
- break;
- case SPEED_1000:
- netxen_set_phy_speed(status, 2);
- break;
- }
- /* set duplex mode */
- if (ecmd->duplex == DUPLEX_HALF)
- netxen_clear_phy_duplex(status);
- if (ecmd->duplex == DUPLEX_FULL)
- netxen_set_phy_duplex(status);
- if (adapter->phy_write &&
- adapter->phy_write(adapter,
- NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
- *((int *)&status)) != 0)
- return -EIO;
- else {
- adapter->link_speed = ecmd->speed;
- adapter->link_duplex = ecmd->duplex;
- }
- } else
+ ret = nx_fw_cmd_set_gbe_port(adapter, ecmd->speed, ecmd->duplex,
+ ecmd->autoneg);
+ if (ret == NX_RCODE_NOT_SUPPORTED)
return -EOPNOTSUPP;
+ else if (ret)
+ return -EIO;
+
+ adapter->link_speed = ecmd->speed;
+ adapter->link_duplex = ecmd->duplex;
+ adapter->link_autoneg = ecmd->autoneg;
if (!netif_running(dev))
return 0;
@@ -720,7 +690,21 @@ static u32 netxen_nic_get_rx_csum(struct net_device *dev)
static int netxen_nic_set_rx_csum(struct net_device *dev, u32 data)
{
struct netxen_adapter *adapter = netdev_priv(dev);
- adapter->rx_csum = !!data;
+
+ if (data) {
+ adapter->rx_csum = data;
+ return 0;
+ }
+
+ if (dev->features & NETIF_F_LRO) {
+ if (netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_DISABLED))
+ return -EIO;
+
+ dev->features &= ~NETIF_F_LRO;
+ netxen_send_lro_cleanup(adapter);
+ netdev_info(dev, "disabling LRO as rx_csum is off\n");
+ }
+ adapter->rx_csum = data;
return 0;
}
@@ -893,11 +877,19 @@ static int netxen_nic_set_flags(struct net_device *netdev, u32 data)
if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO))
return -EINVAL;
+ if (!adapter->rx_csum) {
+ netdev_info(netdev, "rx csum is off, cannot toggle LRO\n");
+ return -EINVAL;
+ }
+
+ if (!!(data & ETH_FLAG_LRO) == !!(netdev->features & NETIF_F_LRO))
+ return 0;
+
if (data & ETH_FLAG_LRO) {
hw_lro = NETXEN_NIC_LRO_ENABLED;
netdev->features |= NETIF_F_LRO;
} else {
- hw_lro = 0;
+ hw_lro = NETXEN_NIC_LRO_DISABLED;
netdev->features &= ~NETIF_F_LRO;
}
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 37d3ebd65be8..5cef718fe35f 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -655,7 +655,7 @@ nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op)
}
static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
- u8 *addr, struct list_head *del_list)
+ const u8 *addr, struct list_head *del_list)
{
struct list_head *head;
nx_mac_list_t *cur;
@@ -686,7 +686,9 @@ static void netxen_p3_nic_set_multi(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
struct netdev_hw_addr *ha;
- u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
u32 mode = VPORT_MISS_MODE_DROP;
LIST_HEAD(del_list);
struct list_head *head;
@@ -807,9 +809,6 @@ int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable)
u64 word;
int rv = 0;
- if ((adapter->flags & NETXEN_NIC_LRO_ENABLED) == enable)
- return 0;
-
memset(&req, 0, sizeof(nx_nic_req_t));
req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
@@ -825,8 +824,6 @@ int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable)
"configure hw lro request\n");
}
- adapter->flags ^= NETXEN_NIC_LRO_ENABLED;
-
return rv;
}
@@ -869,9 +866,11 @@ int netxen_config_rss(struct netxen_adapter *adapter, int enable)
u64 word;
int i, rv;
- u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
- 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
- 0x255b0ec26d5a56daULL };
+ static const u64 key[] = {
+ 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL
+ };
memset(&req, 0, sizeof(nx_nic_req_t));
@@ -895,7 +894,7 @@ int netxen_config_rss(struct netxen_adapter *adapter, int enable)
((u64)(enable & 0x1) << 8) |
((0x7ULL) << 48);
req.words[0] = cpu_to_le64(word);
- for (i = 0; i < 5; i++)
+ for (i = 0; i < ARRAY_SIZE(key); i++)
req.words[i+1] = cpu_to_le64(key[i]);
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 95fe552aa279..731077d8d962 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -214,13 +214,12 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
tx_ring->num_desc = adapter->num_txd;
tx_ring->txq = netdev_get_tx_queue(netdev, 0);
- cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
+ cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
if (cmd_buf_arr == NULL) {
dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n",
netdev->name);
goto err_out;
}
- memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
tx_ring->cmd_buf_arr = cmd_buf_arr;
recv_ctx = &adapter->recv_ctx;
@@ -279,8 +278,7 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
break;
}
- rds_ring->rx_buf_arr = (struct netxen_rx_buffer *)
- vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
+ rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
if (rds_ring->rx_buf_arr == NULL) {
printk(KERN_ERR "%s: Failed to allocate "
"rx buffer ring %d\n",
@@ -288,7 +286,6 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
/* free whatever was already allocated */
goto err_out;
}
- memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
INIT_LIST_HEAD(&rds_ring->free_list);
/*
* Now go through all of them, set reference handles
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index e1d30d7f2071..83348dc4b184 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -38,7 +38,7 @@
#include <linux/sysfs.h>
#include <linux/aer.h>
-MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
+MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Intelligent Ethernet Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
@@ -762,8 +762,6 @@ netxen_check_options(struct netxen_adapter *adapter)
if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222))
adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1);
- adapter->flags &= ~NETXEN_NIC_LRO_ENABLED;
-
if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
@@ -990,7 +988,7 @@ __netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
netxen_config_intr_coalesce(adapter);
- if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)
+ if (netdev->features & NETIF_F_LRO)
netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_ENABLED);
netxen_napi_enable(adapter);
@@ -1034,6 +1032,9 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
+ if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
+ netxen_linkevent_request(adapter, 0);
+
if (adapter->stop_port)
adapter->stop_port(adapter);
@@ -1277,6 +1278,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int i = 0, err;
int pci_func_id = PCI_FUNC(pdev->devfn);
uint8_t revision_id;
+ u32 val;
if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) {
pr_warning("%s: chip revisions between 0x%x-0x%x "
@@ -1352,8 +1354,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
- if (reset_devices) {
- if (adapter->portnum == 0) {
+ if (adapter->portnum == 0) {
+ val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+ if (val != 0xffffffff && val != 0) {
NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0);
adapter->need_fw_reset = 1;
}
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index 33618edc61f9..d973fc6c6b88 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -388,9 +388,9 @@ static long memend; /* e.g 0xd4000 */
struct net_device * __init ni52_probe(int unit)
{
struct net_device *dev = alloc_etherdev(sizeof(struct priv));
- static int ports[] = {0x300, 0x280, 0x360 , 0x320 , 0x340, 0};
+ static const int ports[] = {0x300, 0x280, 0x360, 0x320, 0x340, 0};
+ const int *port;
struct priv *p;
- int *port;
int err = 0;
if (!dev)
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index da228a0dd6cd..c75ae85eb918 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -361,8 +361,8 @@ static int dma;
struct net_device * __init ni65_probe(int unit)
{
struct net_device *dev = alloc_etherdev(0);
- static int ports[] = {0x360,0x300,0x320,0x340, 0};
- int *port;
+ static const int ports[] = { 0x360, 0x300, 0x320, 0x340, 0 };
+ const int *port;
int err = 0;
if (!dev)
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 781e368329f9..40fa59e2fd5c 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -4489,6 +4489,9 @@ static int niu_alloc_channels(struct niu *np)
{
struct niu_parent *parent = np->parent;
int first_rx_channel, first_tx_channel;
+ int num_rx_rings, num_tx_rings;
+ struct rx_ring_info *rx_rings;
+ struct tx_ring_info *tx_rings;
int i, port, err;
port = np->port;
@@ -4498,18 +4501,21 @@ static int niu_alloc_channels(struct niu *np)
first_tx_channel += parent->txchan_per_port[i];
}
- np->num_rx_rings = parent->rxchan_per_port[port];
- np->num_tx_rings = parent->txchan_per_port[port];
+ num_rx_rings = parent->rxchan_per_port[port];
+ num_tx_rings = parent->txchan_per_port[port];
- netif_set_real_num_rx_queues(np->dev, np->num_rx_rings);
- netif_set_real_num_tx_queues(np->dev, np->num_tx_rings);
-
- np->rx_rings = kcalloc(np->num_rx_rings, sizeof(struct rx_ring_info),
- GFP_KERNEL);
+ rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
+ GFP_KERNEL);
err = -ENOMEM;
- if (!np->rx_rings)
+ if (!rx_rings)
goto out_err;
+ np->num_rx_rings = num_rx_rings;
+ smp_wmb();
+ np->rx_rings = rx_rings;
+
+ netif_set_real_num_rx_queues(np->dev, num_rx_rings);
+
for (i = 0; i < np->num_rx_rings; i++) {
struct rx_ring_info *rp = &np->rx_rings[i];
@@ -4538,12 +4544,18 @@ static int niu_alloc_channels(struct niu *np)
return err;
}
- np->tx_rings = kcalloc(np->num_tx_rings, sizeof(struct tx_ring_info),
- GFP_KERNEL);
+ tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
+ GFP_KERNEL);
err = -ENOMEM;
- if (!np->tx_rings)
+ if (!tx_rings)
goto out_err;
+ np->num_tx_rings = num_tx_rings;
+ smp_wmb();
+ np->tx_rings = tx_rings;
+
+ netif_set_real_num_tx_queues(np->dev, num_tx_rings);
+
for (i = 0; i < np->num_tx_rings; i++) {
struct tx_ring_info *rp = &np->tx_rings[i];
@@ -6246,11 +6258,17 @@ static void niu_sync_mac_stats(struct niu *np)
static void niu_get_rx_stats(struct niu *np)
{
unsigned long pkts, dropped, errors, bytes;
+ struct rx_ring_info *rx_rings;
int i;
pkts = dropped = errors = bytes = 0;
+
+ rx_rings = ACCESS_ONCE(np->rx_rings);
+ if (!rx_rings)
+ goto no_rings;
+
for (i = 0; i < np->num_rx_rings; i++) {
- struct rx_ring_info *rp = &np->rx_rings[i];
+ struct rx_ring_info *rp = &rx_rings[i];
niu_sync_rx_discard_stats(np, rp, 0);
@@ -6259,6 +6277,8 @@ static void niu_get_rx_stats(struct niu *np)
dropped += rp->rx_dropped;
errors += rp->rx_errors;
}
+
+no_rings:
np->dev->stats.rx_packets = pkts;
np->dev->stats.rx_bytes = bytes;
np->dev->stats.rx_dropped = dropped;
@@ -6268,16 +6288,24 @@ static void niu_get_rx_stats(struct niu *np)
static void niu_get_tx_stats(struct niu *np)
{
unsigned long pkts, errors, bytes;
+ struct tx_ring_info *tx_rings;
int i;
pkts = errors = bytes = 0;
+
+ tx_rings = ACCESS_ONCE(np->tx_rings);
+ if (!tx_rings)
+ goto no_rings;
+
for (i = 0; i < np->num_tx_rings; i++) {
- struct tx_ring_info *rp = &np->tx_rings[i];
+ struct tx_ring_info *rp = &tx_rings[i];
pkts += rp->tx_packets;
bytes += rp->tx_bytes;
errors += rp->tx_errors;
}
+
+no_rings:
np->dev->stats.tx_packets = pkts;
np->dev->stats.tx_bytes = bytes;
np->dev->stats.tx_errors = errors;
@@ -6287,9 +6315,10 @@ static struct net_device_stats *niu_get_stats(struct net_device *dev)
{
struct niu *np = netdev_priv(dev);
- niu_get_rx_stats(np);
- niu_get_tx_stats(np);
-
+ if (netif_running(dev)) {
+ niu_get_rx_stats(np);
+ niu_get_tx_stats(np);
+ }
return &dev->stats;
}
@@ -6589,7 +6618,7 @@ static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
(ip_proto == IPPROTO_UDP ?
TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
- start = skb_transport_offset(skb) -
+ start = skb_checksum_start_offset(skb) -
(pad_bytes + sizeof(struct tx_pkt_hdr));
stuff = start + skb->csum_offset;
@@ -9917,7 +9946,7 @@ static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
if (!netif_running(dev))
return 0;
- flush_scheduled_work();
+ flush_work_sync(&np->reset_task);
niu_netif_stop(np);
del_timer_sync(&np->timer);
@@ -10033,8 +10062,7 @@ static const struct niu_ops niu_phys_ops = {
.unmap_single = niu_phys_unmap_single,
};
-static int __devinit niu_of_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit niu_of_probe(struct platform_device *op)
{
union niu_parent_id parent_id;
struct net_device *dev;
@@ -10194,7 +10222,7 @@ static const struct of_device_id niu_match[] = {
};
MODULE_DEVICE_TABLE(of, niu_match);
-static struct of_platform_driver niu_of_driver = {
+static struct platform_driver niu_of_driver = {
.driver = {
.name = "niu",
.owner = THIS_MODULE,
@@ -10215,14 +10243,14 @@ static int __init niu_init(void)
niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
#ifdef CONFIG_SPARC64
- err = of_register_platform_driver(&niu_of_driver);
+ err = platform_driver_register(&niu_of_driver);
#endif
if (!err) {
err = pci_register_driver(&niu_pci_driver);
#ifdef CONFIG_SPARC64
if (err)
- of_unregister_platform_driver(&niu_of_driver);
+ platform_driver_unregister(&niu_of_driver);
#endif
}
@@ -10233,7 +10261,7 @@ static void __exit niu_exit(void)
{
pci_unregister_driver(&niu_pci_driver);
#ifdef CONFIG_SPARC64
- of_unregister_platform_driver(&niu_of_driver);
+ platform_driver_unregister(&niu_of_driver);
#endif
}
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 84134c766f3a..a41b2cf4d917 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1988,12 +1988,11 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
}
ndev = alloc_etherdev(sizeof(struct ns83820));
- dev = PRIV(ndev);
-
err = -ENOMEM;
- if (!dev)
+ if (!ndev)
goto out;
+ dev = PRIV(ndev);
dev->ndev = ndev;
spin_lock_init(&dev->rx_info.lock);
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
index a0c26a99520f..e1e33c80fb25 100644
--- a/drivers/net/pch_gbe/pch_gbe.h
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -73,7 +73,7 @@ struct pch_gbe_regs {
struct pch_gbe_regs_mac_adr mac_adr[16];
u32 ADDR_MASK;
u32 MIIM;
- u32 reserve2;
+ u32 MAC_ADDR_LOAD;
u32 RGMII_ST;
u32 RGMII_CTRL;
u32 reserve3[3];
diff --git a/drivers/net/pch_gbe/pch_gbe_ethtool.c b/drivers/net/pch_gbe/pch_gbe_ethtool.c
index c8cc32c0edc9..c8c873b31a89 100644
--- a/drivers/net/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/pch_gbe/pch_gbe_ethtool.c
@@ -469,18 +469,6 @@ static int pch_gbe_set_rx_csum(struct net_device *netdev, u32 data)
}
/**
- * pch_gbe_get_tx_csum - Report whether transmit checksums are turned on or off
- * @netdev: Network interface device structure
- * Returns
- * true(1): Checksum On
- * false(0): Checksum Off
- */
-static u32 pch_gbe_get_tx_csum(struct net_device *netdev)
-{
- return (netdev->features & NETIF_F_HW_CSUM) != 0;
-}
-
-/**
* pch_gbe_set_tx_csum - Turn transmit checksums on or off
* @netdev: Network interface device structure
* @data: Checksum on[true] or off[false]
@@ -493,11 +481,7 @@ static int pch_gbe_set_tx_csum(struct net_device *netdev, u32 data)
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
adapter->tx_csum = data;
- if (data)
- netdev->features |= NETIF_F_HW_CSUM;
- else
- netdev->features &= ~NETIF_F_HW_CSUM;
- return 0;
+ return ethtool_op_set_tx_ipv6_csum(netdev, data);
}
/**
@@ -572,7 +556,6 @@ static const struct ethtool_ops pch_gbe_ethtool_ops = {
.set_pauseparam = pch_gbe_set_pauseparam,
.get_rx_csum = pch_gbe_get_rx_csum,
.set_rx_csum = pch_gbe_set_rx_csum,
- .get_tx_csum = pch_gbe_get_tx_csum,
.set_tx_csum = pch_gbe_set_tx_csum,
.get_strings = pch_gbe_get_strings,
.get_ethtool_stats = pch_gbe_get_ethtool_stats,
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 03a1d280105f..8c66e22c3a0a 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -29,6 +29,7 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_SHORT_PKT 64
#define DSC_INIT16 0xC000
#define PCH_GBE_DMA_ALIGN 0
+#define PCH_GBE_DMA_PADDING 2
#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
#define PCH_GBE_COPYBREAK_DEFAULT 256
#define PCH_GBE_PCI_BAR 1
@@ -88,6 +89,12 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
int data);
+
+inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
+{
+ iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
+}
+
/**
* pch_gbe_mac_read_mac_addr - Read MAC address
* @hw: Pointer to the HW structure
@@ -519,7 +526,9 @@ static void pch_gbe_reset_task(struct work_struct *work)
struct pch_gbe_adapter *adapter;
adapter = container_of(work, struct pch_gbe_adapter, reset_task);
+ rtnl_lock();
pch_gbe_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
@@ -528,14 +537,8 @@ static void pch_gbe_reset_task(struct work_struct *work)
*/
void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
-
- rtnl_lock();
- if (netif_running(netdev)) {
- pch_gbe_down(adapter);
- pch_gbe_up(adapter);
- }
- rtnl_unlock();
+ pch_gbe_down(adapter);
+ pch_gbe_up(adapter);
}
/**
@@ -1369,16 +1372,13 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
struct pch_gbe_buffer *buffer_info;
struct pch_gbe_rx_desc *rx_desc;
u32 length;
- unsigned char tmp_packet[ETH_HLEN];
unsigned int i;
unsigned int cleaned_count = 0;
bool cleaned = false;
- struct sk_buff *skb;
+ struct sk_buff *skb, *new_skb;
u8 dma_status;
u16 gbec_status;
u32 tcp_ip_status;
- u8 skb_copy_flag = 0;
- u8 skb_padding_flag = 0;
i = rx_ring->next_to_clean;
@@ -1422,55 +1422,70 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
pr_err("Receive CRC Error\n");
} else {
/* get receive length */
- /* length convert[-3], padding[-2] */
- length = (rx_desc->rx_words_eob) - 3 - 2;
+ /* length convert[-3] */
+ length = (rx_desc->rx_words_eob) - 3;
/* Decide the data conversion method */
if (!adapter->rx_csum) {
/* [Header:14][payload] */
- skb_padding_flag = 0;
- skb_copy_flag = 1;
+ if (NET_IP_ALIGN) {
+ /* Because alignment differs,
+ * the new_skb is newly allocated,
+ * and data is copied to new_skb.*/
+ new_skb = netdev_alloc_skb(netdev,
+ length + NET_IP_ALIGN);
+ if (!new_skb) {
+ /* dorrop error */
+ pr_err("New skb allocation "
+ "Error\n");
+ goto dorrop;
+ }
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ memcpy(new_skb->data, skb->data,
+ length);
+ skb = new_skb;
+ } else {
+ /* DMA buffer is used as SKB as it is.*/
+ buffer_info->skb = NULL;
+ }
} else {
/* [Header:14][padding:2][payload] */
- skb_padding_flag = 1;
- if (length < copybreak)
- skb_copy_flag = 1;
- else
- skb_copy_flag = 0;
- }
-
- /* Data conversion */
- if (skb_copy_flag) { /* recycle skb */
- struct sk_buff *new_skb;
- new_skb =
- netdev_alloc_skb(netdev,
- length + NET_IP_ALIGN);
- if (new_skb) {
- if (!skb_padding_flag) {
- skb_reserve(new_skb,
- NET_IP_ALIGN);
+ /* The length includes padding length */
+ length = length - PCH_GBE_DMA_PADDING;
+ if ((length < copybreak) ||
+ (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
+ /* Because alignment differs,
+ * the new_skb is newly allocated,
+ * and data is copied to new_skb.
+ * Padding data is deleted
+ * at the time of a copy.*/
+ new_skb = netdev_alloc_skb(netdev,
+ length + NET_IP_ALIGN);
+ if (!new_skb) {
+ /* dorrop error */
+ pr_err("New skb allocation "
+ "Error\n");
+ goto dorrop;
}
+ skb_reserve(new_skb, NET_IP_ALIGN);
memcpy(new_skb->data, skb->data,
- length);
- /* save the skb
- * in buffer_info as good */
+ ETH_HLEN);
+ memcpy(&new_skb->data[ETH_HLEN],
+ &skb->data[ETH_HLEN +
+ PCH_GBE_DMA_PADDING],
+ length - ETH_HLEN);
skb = new_skb;
- } else if (!skb_padding_flag) {
- /* dorrop error */
- pr_err("New skb allocation Error\n");
- goto dorrop;
+ } else {
+ /* Padding data is deleted
+ * by moving header data.*/
+ memmove(&skb->data[PCH_GBE_DMA_PADDING],
+ &skb->data[0], ETH_HLEN);
+ skb_reserve(skb, NET_IP_ALIGN);
+ buffer_info->skb = NULL;
}
- } else {
- buffer_info->skb = NULL;
- }
- if (skb_padding_flag) {
- memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN);
- memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0],
- ETH_HLEN);
- skb_reserve(skb, NET_IP_ALIGN);
-
}
-
+ /* The length includes FCS length */
+ length = length - ETH_FCS_LEN;
/* update status of driver */
adapter->stats.rx_bytes += length;
adapter->stats.rx_packets++;
@@ -1523,12 +1538,11 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
int desNo;
size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
- tx_ring->buffer_info = vmalloc(size);
+ tx_ring->buffer_info = vzalloc(size);
if (!tx_ring->buffer_info) {
pr_err("Unable to allocate memory for the buffer infomation\n");
return -ENOMEM;
}
- memset(tx_ring->buffer_info, 0, size);
tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
@@ -1573,12 +1587,11 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
int desNo;
size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
- rx_ring->buffer_info = vmalloc(size);
+ rx_ring->buffer_info = vzalloc(size);
if (!rx_ring->buffer_info) {
pr_err("Unable to allocate memory for the receive descriptor ring\n");
return -ENOMEM;
}
- memset(rx_ring->buffer_info, 0, size);
rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -2249,7 +2262,7 @@ static void pch_gbe_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
- flush_scheduled_work();
+ cancel_work_sync(&adapter->reset_task);
unregister_netdev(netdev);
pch_gbe_hal_phy_hw_reset(&adapter->hw);
@@ -2321,9 +2334,10 @@ static int pch_gbe_probe(struct pci_dev *pdev,
netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
netif_napi_add(netdev, &adapter->napi,
pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
- netdev->features = NETIF_F_HW_CSUM | NETIF_F_GRO;
+ netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
pch_gbe_set_ethtool_ops(netdev);
+ pch_gbe_mac_load_mac_addr(&adapter->hw);
pch_gbe_mac_reset_hw(&adapter->hw);
/* setup the private structure */
@@ -2360,9 +2374,9 @@ static int pch_gbe_probe(struct pci_dev *pdev,
pch_gbe_check_options(adapter);
if (adapter->tx_csum)
- netdev->features |= NETIF_F_HW_CSUM;
+ netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
else
- netdev->features &= ~NETIF_F_HW_CSUM;
+ netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
/* initialize the wol settings based on the eeprom settings */
adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
@@ -2432,7 +2446,7 @@ static struct pci_driver pch_gbe_pcidev = {
.id_table = pch_gbe_pcidev_id,
.probe = pch_gbe_probe,
.remove = pch_gbe_remove,
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
.driver.pm = &pch_gbe_pm_ops,
#endif
.shutdown = pch_gbe_shutdown,
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 8a4d19e5de06..d3cb77205863 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -690,6 +690,7 @@ static void block_output(struct net_device *dev, int count,
static struct pcmcia_device_id axnet_ids[] = {
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x016c, 0x0081),
PCMCIA_DEVICE_MANF_CARD(0x018a, 0x0301),
+ PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328),
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0301),
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0303),
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0309),
@@ -875,7 +876,7 @@ static void do_set_multicast_list(struct net_device *dev);
static int ax_open(struct net_device *dev)
{
unsigned long flags;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
/*
* Grab the page lock so we own the register set, then call
@@ -926,7 +927,7 @@ static int ax_close(struct net_device *dev)
static void axnet_tx_timeout(struct net_device *dev)
{
long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
unsigned long flags;
@@ -973,7 +974,7 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int length, send_length, output_page;
unsigned long flags;
u8 packet[ETH_ZLEN];
@@ -1270,7 +1271,7 @@ static void ei_tx_err(struct net_device *dev)
static void ei_tx_intr(struct net_device *dev)
{
long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int status = inb(e8390_base + EN0_TSR);
/*
@@ -1354,7 +1355,7 @@ static void ei_tx_intr(struct net_device *dev)
static void ei_receive(struct net_device *dev)
{
long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned char rxing_page, this_frame, next_frame;
unsigned short current_offset;
int rx_pkt_count = 0;
@@ -1487,12 +1488,10 @@ static void ei_rx_overrun(struct net_device *dev)
/*
* Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
- * Early datasheets said to poll the reset bit, but now they say that
- * it "is not a reliable indicator and subsequently should be ignored."
- * We wait at least 10ms.
+ * We wait at least 2ms.
*/
- mdelay(10);
+ mdelay(2);
/*
* Reset RBCR[01] back to zero as per magic incantation.
@@ -1539,7 +1538,7 @@ static void ei_rx_overrun(struct net_device *dev)
static struct net_device_stats *get_stats(struct net_device *dev)
{
long ioaddr = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
unsigned long flags;
/* If the card is stopped, just return the present stats. */
@@ -1588,7 +1587,7 @@ static void do_set_multicast_list(struct net_device *dev)
{
long e8390_base = dev->base_addr;
int i;
- struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
memset(ei_local->mcfilter, 0, 8);
@@ -1646,7 +1645,7 @@ static void AX88190_init(struct net_device *dev, int startp)
{
axnet_dev_t *info = PRIV(dev);
long e8390_base = dev->base_addr;
- struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local = netdev_priv(dev);
int i;
int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48;
@@ -1712,7 +1711,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
int start_page)
{
long e8390_base = dev->base_addr;
- struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev);
+ struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
if (inb_p(e8390_base) & E8390_TRANS)
{
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 9226cda4d054..530ab5a10bd3 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -691,6 +691,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = {
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
PCMCIA_DEVICE_NULL,
};
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 0a2b0f9cdf33..76683d97d83b 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -1291,7 +1291,7 @@ updateCRC
static void updateCRC(int *CRC, int bit)
{
- int poly[]={
+ static const int poly[]={
1,1,1,0, 1,1,0,1,
1,0,1,1, 1,0,0,0,
1,0,0,0, 0,0,1,1,
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index d05c44692f08..e953793a33ff 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1493,7 +1493,6 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x0149, 0x4530),
PCMCIA_DEVICE_MANF_CARD(0x0149, 0xc1ab),
PCMCIA_DEVICE_MANF_CARD(0x0186, 0x0110),
- PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328),
PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x8041),
PCMCIA_DEVICE_MANF_CARD(0x0213, 0x2452),
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0300),
@@ -1537,6 +1536,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722),
PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2),
PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a),
+ PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether CF-TD LAN Card", 0x5261440f, 0x8797663b),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d),
PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index cb3d13e4e074..392a6c4b72e5 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -64,7 +64,7 @@ config BCM63XX_PHY
config ICPLUS_PHY
tristate "Drivers for ICPlus PHYs"
---help---
- Currently supports the IP175C PHY.
+ Currently supports the IP175C and IP1001 PHYs.
config REALTEK_PHY
tristate "Drivers for Realtek PHYs"
@@ -77,7 +77,6 @@ config NATIONAL_PHY
Currently supports the DP83865 PHY.
config STE10XP
- depends on PHYLIB
tristate "Driver for STMicroelectronics STe10Xp PHYs"
---help---
This is the driver for the STe100p and STe101p PHYs.
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index c1d2d251fe8b..9a09e24c30bc 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -30,7 +30,7 @@
#include <asm/irq.h>
#include <asm/uaccess.h>
-MODULE_DESCRIPTION("ICPlus IP175C PHY driver");
+MODULE_DESCRIPTION("ICPlus IP175C/IC1001 PHY drivers");
MODULE_AUTHOR("Michael Barkowski");
MODULE_LICENSE("GPL");
@@ -89,6 +89,33 @@ static int ip175c_config_init(struct phy_device *phydev)
return 0;
}
+static int ip1001_config_init(struct phy_device *phydev)
+{
+ int err, value;
+
+ /* Software Reset PHY */
+ value = phy_read(phydev, MII_BMCR);
+ value |= BMCR_RESET;
+ err = phy_write(phydev, MII_BMCR, value);
+ if (err < 0)
+ return err;
+
+ do {
+ value = phy_read(phydev, MII_BMCR);
+ } while (value & BMCR_RESET);
+
+ /* Additional delay (2ns) used to adjust RX clock phase
+ * at GMII/ RGMII interface */
+ value = phy_read(phydev, 16);
+ value |= 0x3;
+
+ err = phy_write(phydev, 16, value);
+ if (err < 0)
+ return err;
+
+ return err;
+}
+
static int ip175c_read_status(struct phy_device *phydev)
{
if (phydev->addr == 4) /* WAN port */
@@ -121,21 +148,43 @@ static struct phy_driver ip175c_driver = {
.driver = { .owner = THIS_MODULE,},
};
-static int __init ip175c_init(void)
+static struct phy_driver ip1001_driver = {
+ .phy_id = 0x02430d90,
+ .name = "ICPlus IP1001",
+ .phy_id_mask = 0x0ffffff0,
+ .features = PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause,
+ .config_init = &ip1001_config_init,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static int __init icplus_init(void)
{
+ int ret = 0;
+
+ ret = phy_driver_register(&ip1001_driver);
+ if (ret < 0)
+ return -ENODEV;
+
return phy_driver_register(&ip175c_driver);
}
-static void __exit ip175c_exit(void)
+static void __exit icplus_exit(void)
{
+ phy_driver_unregister(&ip1001_driver);
phy_driver_unregister(&ip175c_driver);
}
-module_init(ip175c_init);
-module_exit(ip175c_exit);
+module_init(icplus_init);
+module_exit(icplus_exit);
static struct mdio_device_id __maybe_unused icplus_tbl[] = {
{ 0x02430d80, 0x0ffffff0 },
+ { 0x02430d90, 0x0ffffff0 },
{ }
};
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index f62c7b717bc8..47c8339a0359 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -188,8 +188,7 @@ static int __devexit mdio_gpio_remove(struct platform_device *pdev)
#ifdef CONFIG_OF_GPIO
-static int __devinit mdio_ofgpio_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit mdio_ofgpio_probe(struct platform_device *ofdev)
{
struct mdio_gpio_platform_data *pdata;
struct mii_bus *new_bus;
@@ -240,7 +239,7 @@ static struct of_device_id mdio_ofgpio_match[] = {
};
MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
-static struct of_platform_driver mdio_ofgpio_driver = {
+static struct platform_driver mdio_ofgpio_driver = {
.driver = {
.name = "mdio-gpio",
.owner = THIS_MODULE,
@@ -252,12 +251,12 @@ static struct of_platform_driver mdio_ofgpio_driver = {
static inline int __init mdio_ofgpio_init(void)
{
- return of_register_platform_driver(&mdio_ofgpio_driver);
+ return platform_driver_register(&mdio_ofgpio_driver);
}
static inline void __exit mdio_ofgpio_exit(void)
{
- of_unregister_platform_driver(&mdio_ofgpio_driver);
+ platform_driver_unregister(&mdio_ofgpio_driver);
}
#else
static inline int __init mdio_ofgpio_init(void) { return 0; }
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 0fd1678bc5a9..590f902deb6b 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -19,13 +19,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/phy.h>
-
-#define PHY_ID_KSZ9021 0x00221611
-#define PHY_ID_KS8737 0x00221720
-#define PHY_ID_KS8041 0x00221510
-#define PHY_ID_KS8051 0x00221550
-/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */
-#define PHY_ID_KS8001 0x0022161A
+#include <linux/micrel_phy.h>
/* general Interrupt control/status reg in vendor specific block. */
#define MII_KSZPHY_INTCS 0x1B
@@ -46,6 +40,7 @@
#define KSZPHY_CTRL_INT_ACTIVE_HIGH (1 << 9)
#define KSZ9021_CTRL_INT_ACTIVE_HIGH (1 << 14)
#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14)
+#define KSZ8051_RMII_50MHZ_CLK (1 << 7)
static int kszphy_ack_interrupt(struct phy_device *phydev)
{
@@ -106,6 +101,19 @@ static int kszphy_config_init(struct phy_device *phydev)
return 0;
}
+static int ks8051_config_init(struct phy_device *phydev)
+{
+ int regval;
+
+ if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
+ regval = phy_read(phydev, MII_KSZPHY_CTRL);
+ regval |= KSZ8051_RMII_50MHZ_CLK;
+ phy_write(phydev, MII_KSZPHY_CTRL, regval);
+ }
+
+ return 0;
+}
+
static struct phy_driver ks8737_driver = {
.phy_id = PHY_ID_KS8737,
.phy_id_mask = 0x00fffff0,
@@ -142,7 +150,7 @@ static struct phy_driver ks8051_driver = {
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init,
+ .config_init = ks8051_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7670aac0e93f..f7670330f988 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -47,11 +47,11 @@ void phy_print_status(struct phy_device *phydev)
pr_info("PHY: %s - Link is %s", dev_name(&phydev->dev),
phydev->link ? "Up" : "Down");
if (phydev->link)
- printk(" - %d/%s", phydev->speed,
+ printk(KERN_CONT " - %d/%s", phydev->speed,
DUPLEX_FULL == phydev->duplex ?
"Full" : "Half");
- printk("\n");
+ printk(KERN_CONT "\n");
}
EXPORT_SYMBOL(phy_print_status);
@@ -319,7 +319,8 @@ int phy_mii_ioctl(struct phy_device *phydev,
/* fall through */
case SIOCGMIIREG:
- mii_data->val_out = phy_read(phydev, mii_data->reg_num);
+ mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id,
+ mii_data->reg_num);
break;
case SIOCSMIIREG:
@@ -350,8 +351,9 @@ int phy_mii_ioctl(struct phy_device *phydev,
}
}
- phy_write(phydev, mii_data->reg_num, val);
-
+ mdiobus_write(phydev->bus, mii_data->phy_id,
+ mii_data->reg_num, val);
+
if (mii_data->reg_num == MII_BMCR &&
val & BMCR_RESET &&
phydev->drv->config_init) {
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 78d70a6481bf..a1b82c9c67d2 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -32,6 +32,7 @@
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
+#include <asm/unaligned.h>
#include <asm/uaccess.h>
#include <asm/string.h>
@@ -542,7 +543,7 @@ ppp_async_encode(struct asyncppp *ap)
data = ap->tpkt->data;
count = ap->tpkt->len;
fcs = ap->tfcs;
- proto = (data[0] << 8) + data[1];
+ proto = get_unaligned_be16(data);
/*
* LCP packets with code values between 1 (configure-reqest)
@@ -963,7 +964,7 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
code = data[0];
if (code != CONFACK && code != CONFREQ)
return;
- dlen = (data[2] << 8) + data[3];
+ dlen = get_unaligned_be16(data + 2);
if (len < dlen)
return; /* packet got truncated or length is bogus */
@@ -997,15 +998,14 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) {
switch (data[0]) {
case LCP_MRU:
- val = (data[2] << 8) + data[3];
+ val = get_unaligned_be16(data + 2);
if (inbound)
ap->mru = val;
else
ap->chan.mtu = val;
break;
case LCP_ASYNCMAP:
- val = (data[2] << 24) + (data[3] << 16)
- + (data[4] << 8) + data[5];
+ val = get_unaligned_be32(data + 2);
if (inbound)
ap->raccm = val;
else
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index 695bc83e0cfd..43583309a65d 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -41,6 +41,7 @@
#include <linux/ppp-comp.h>
#include <linux/zlib.h>
+#include <asm/unaligned.h>
/*
* State for a Deflate (de)compressor.
@@ -232,11 +233,9 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
*/
wptr[0] = PPP_ADDRESS(rptr);
wptr[1] = PPP_CONTROL(rptr);
- wptr[2] = PPP_COMP >> 8;
- wptr[3] = PPP_COMP;
+ put_unaligned_be16(PPP_COMP, wptr + 2);
wptr += PPP_HDRLEN;
- wptr[0] = state->seqno >> 8;
- wptr[1] = state->seqno;
+ put_unaligned_be16(state->seqno, wptr);
wptr += DEFLATE_OVHD;
olen = PPP_HDRLEN + DEFLATE_OVHD;
state->strm.next_out = wptr;
@@ -451,7 +450,7 @@ static int z_decompress(void *arg, unsigned char *ibuf, int isize,
}
/* Check the sequence number. */
- seq = (ibuf[PPP_HDRLEN] << 8) + ibuf[PPP_HDRLEN+1];
+ seq = get_unaligned_be16(ibuf + PPP_HDRLEN);
if (seq != (state->seqno & 0xffff)) {
if (state->debug)
printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n",
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 39659976a1ac..9f6d670748d1 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -46,6 +46,7 @@
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <asm/unaligned.h>
#include <net/slhc_vj.h>
#include <asm/atomic.h>
@@ -210,7 +211,7 @@ struct ppp_net {
};
/* Get the PPP protocol number from a skb */
-#define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1])
+#define PPP_PROTO(skb) get_unaligned_be16((skb)->data)
/* We limit the length of ppp->file.rq to this (arbitrary) value */
#define PPP_MAX_RQLEN 32
@@ -591,8 +592,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ppp_release(NULL, file);
err = 0;
} else
- printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n",
- atomic_long_read(&file->f_count));
+ pr_warn("PPPIOCDETACH file->f_count=%ld\n",
+ atomic_long_read(&file->f_count));
mutex_unlock(&ppp_mutex);
return err;
}
@@ -629,7 +630,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (pf->kind != INTERFACE) {
/* can't happen */
- printk(KERN_ERR "PPP: not interface or channel??\n");
+ pr_err("PPP: not interface or channel??\n");
return -EINVAL;
}
@@ -703,7 +704,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
vj = slhc_init(val2+1, val+1);
if (!vj) {
- printk(KERN_ERR "PPP: no memory (VJ compressor)\n");
+ netdev_err(ppp->dev,
+ "PPP: no memory (VJ compressor)\n");
err = -ENOMEM;
break;
}
@@ -897,17 +899,17 @@ static int __init ppp_init(void)
{
int err;
- printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n");
+ pr_info("PPP generic driver version " PPP_VERSION "\n");
err = register_pernet_device(&ppp_net_ops);
if (err) {
- printk(KERN_ERR "failed to register PPP pernet device (%d)\n", err);
+ pr_err("failed to register PPP pernet device (%d)\n", err);
goto out;
}
err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
if (err) {
- printk(KERN_ERR "failed to register PPP device (%d)\n", err);
+ pr_err("failed to register PPP device (%d)\n", err);
goto out_net;
}
@@ -964,8 +966,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
pp = skb_push(skb, 2);
proto = npindex_to_proto[npi];
- pp[0] = proto >> 8;
- pp[1] = proto;
+ put_unaligned_be16(proto, pp);
netif_stop_queue(dev);
skb_queue_tail(&ppp->file.xq, skb);
@@ -1078,7 +1079,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
if (!new_skb) {
if (net_ratelimit())
- printk(KERN_ERR "PPP: no memory (comp pkt)\n");
+ netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n");
return NULL;
}
if (ppp->dev->hard_header_len > PPP_HDRLEN)
@@ -1108,7 +1109,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
* the same number.
*/
if (net_ratelimit())
- printk(KERN_ERR "ppp: compressor dropped pkt\n");
+ netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
kfree_skb(skb);
kfree_skb(new_skb);
new_skb = NULL;
@@ -1136,17 +1137,17 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
a four-byte PPP header on each packet */
*skb_push(skb, 2) = 1;
if (ppp->pass_filter &&
- sk_run_filter(skb, ppp->pass_filter,
- ppp->pass_len) == 0) {
+ sk_run_filter(skb, ppp->pass_filter) == 0) {
if (ppp->debug & 1)
- printk(KERN_DEBUG "PPP: outbound frame not passed\n");
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "PPP: outbound frame "
+ "not passed\n");
kfree_skb(skb);
return;
}
/* if this packet passes the active filter, record the time */
if (!(ppp->active_filter &&
- sk_run_filter(skb, ppp->active_filter,
- ppp->active_len) == 0))
+ sk_run_filter(skb, ppp->active_filter) == 0))
ppp->last_xmit = jiffies;
skb_pull(skb, 2);
#else
@@ -1166,7 +1167,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
GFP_ATOMIC);
if (!new_skb) {
- printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n");
+ netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n");
goto drop;
}
skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
@@ -1204,7 +1205,9 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
proto != PPP_LCP && proto != PPP_CCP) {
if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
if (net_ratelimit())
- printk(KERN_ERR "ppp: compression required but down - pkt dropped.\n");
+ netdev_err(ppp->dev,
+ "ppp: compression required but "
+ "down - pkt dropped.\n");
goto drop;
}
skb = pad_compress_skb(ppp, skb);
@@ -1285,6 +1288,11 @@ ppp_push(struct ppp *ppp)
}
#ifdef CONFIG_PPP_MULTILINK
+static bool mp_protocol_compress __read_mostly = true;
+module_param(mp_protocol_compress, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(mp_protocol_compress,
+ "compress protocol id in multilink fragments");
+
/*
* Divide a packet to be transmitted into fragments and
* send them out the individual links.
@@ -1347,10 +1355,10 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
if (nfree == 0 || nfree < navail / 2)
return 0; /* can't take now, leave it in xmit_pending */
- /* Do protocol field compression (XXX this should be optional) */
+ /* Do protocol field compression */
p = skb->data;
len = skb->len;
- if (*p == 0) {
+ if (*p == 0 && mp_protocol_compress) {
++p;
--len;
}
@@ -1470,8 +1478,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
q = skb_put(frag, flen + hdrlen);
/* make the MP header */
- q[0] = PPP_MP >> 8;
- q[1] = PPP_MP;
+ put_unaligned_be16(PPP_MP, q);
if (ppp->flags & SC_MP_XSHORTSEQ) {
q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
q[3] = ppp->nxseq;
@@ -1503,7 +1510,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
noskb:
spin_unlock_bh(&pch->downl);
if (ppp->debug & 1)
- printk(KERN_ERR "PPP: no memory (fragment)\n");
+ netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
++ppp->dev->stats.tx_errors;
++ppp->nxseq;
return 1; /* abandon the frame */
@@ -1684,7 +1691,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
/* copy to a new sk_buff with more tailroom */
ns = dev_alloc_skb(skb->len + 128);
if (!ns) {
- printk(KERN_ERR"PPP: no memory (VJ decomp)\n");
+ netdev_err(ppp->dev, "PPP: no memory "
+ "(VJ decomp)\n");
goto err;
}
skb_reserve(ns, 2);
@@ -1697,7 +1705,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
if (len <= 0) {
- printk(KERN_DEBUG "PPP: VJ decompression error\n");
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "PPP: VJ decompression error\n");
goto err;
}
len += 2;
@@ -1719,7 +1728,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
goto err;
if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
- printk(KERN_ERR "PPP: VJ uncompressed error\n");
+ netdev_err(ppp->dev, "PPP: VJ uncompressed error\n");
goto err;
}
proto = PPP_IP;
@@ -1758,17 +1767,16 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
*skb_push(skb, 2) = 0;
if (ppp->pass_filter &&
- sk_run_filter(skb, ppp->pass_filter,
- ppp->pass_len) == 0) {
+ sk_run_filter(skb, ppp->pass_filter) == 0) {
if (ppp->debug & 1)
- printk(KERN_DEBUG "PPP: inbound frame "
- "not passed\n");
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "PPP: inbound frame "
+ "not passed\n");
kfree_skb(skb);
return;
}
if (!(ppp->active_filter &&
- sk_run_filter(skb, ppp->active_filter,
- ppp->active_len) == 0))
+ sk_run_filter(skb, ppp->active_filter) == 0))
ppp->last_recv = jiffies;
__skb_pull(skb, 2);
} else
@@ -1821,7 +1829,8 @@ ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
ns = dev_alloc_skb(obuff_size);
if (!ns) {
- printk(KERN_ERR "ppp_decompress_frame: no memory\n");
+ netdev_err(ppp->dev, "ppp_decompress_frame: "
+ "no memory\n");
goto err;
}
/* the decompressor still expects the A/C bytes in the hdr */
@@ -1989,7 +1998,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
u32 seq = ppp->nextseq;
u32 minseq = ppp->minseq;
struct sk_buff_head *list = &ppp->mrq;
- struct sk_buff *p, *next;
+ struct sk_buff *p, *tmp;
struct sk_buff *head, *tail;
struct sk_buff *skb = NULL;
int lost = 0, len = 0;
@@ -1998,13 +2007,15 @@ ppp_mp_reconstruct(struct ppp *ppp)
return NULL;
head = list->next;
tail = NULL;
- for (p = head; p != (struct sk_buff *) list; p = next) {
- next = p->next;
+ skb_queue_walk_safe(list, p, tmp) {
+ again:
if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
/* this can't happen, anyway ignore the skb */
- printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n",
- PPP_MP_CB(p)->sequence, seq);
- head = next;
+ netdev_err(ppp->dev, "ppp_mp_reconstruct bad "
+ "seq %u < %u\n",
+ PPP_MP_CB(p)->sequence, seq);
+ __skb_unlink(p, list);
+ kfree_skb(p);
continue;
}
if (PPP_MP_CB(p)->sequence != seq) {
@@ -2016,8 +2027,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
lost = 1;
seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
minseq + 1: PPP_MP_CB(p)->sequence;
- next = p;
- continue;
+ goto again;
}
/*
@@ -2042,17 +2052,9 @@ ppp_mp_reconstruct(struct ppp *ppp)
(PPP_MP_CB(head)->BEbits & B)) {
if (len > ppp->mrru + 2) {
++ppp->dev->stats.rx_length_errors;
- printk(KERN_DEBUG "PPP: reconstructed packet"
- " is too long (%d)\n", len);
- } else if (p == head) {
- /* fragment is complete packet - reuse skb */
- tail = p;
- skb = skb_get(p);
- break;
- } else if ((skb = dev_alloc_skb(len)) == NULL) {
- ++ppp->dev->stats.rx_missed_errors;
- printk(KERN_DEBUG "PPP: no memory for "
- "reconstructed packet");
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "PPP: reconstructed packet"
+ " is too long (%d)\n", len);
} else {
tail = p;
break;
@@ -2065,9 +2067,17 @@ ppp_mp_reconstruct(struct ppp *ppp)
* and we haven't found a complete valid packet yet,
* we can discard up to and including this fragment.
*/
- if (PPP_MP_CB(p)->BEbits & E)
- head = next;
+ if (PPP_MP_CB(p)->BEbits & E) {
+ struct sk_buff *tmp2;
+ skb_queue_reverse_walk_from_safe(list, p, tmp2) {
+ __skb_unlink(p, list);
+ kfree_skb(p);
+ }
+ head = skb_peek(list);
+ if (!head)
+ break;
+ }
++seq;
}
@@ -2077,26 +2087,37 @@ ppp_mp_reconstruct(struct ppp *ppp)
signal a receive error. */
if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
if (ppp->debug & 1)
- printk(KERN_DEBUG " missed pkts %u..%u\n",
- ppp->nextseq,
- PPP_MP_CB(head)->sequence-1);
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ " missed pkts %u..%u\n",
+ ppp->nextseq,
+ PPP_MP_CB(head)->sequence-1);
++ppp->dev->stats.rx_dropped;
ppp_receive_error(ppp);
}
- if (head != tail)
- /* copy to a single skb */
- for (p = head; p != tail->next; p = p->next)
- skb_copy_bits(p, 0, skb_put(skb, p->len), p->len);
- ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
- head = tail->next;
- }
+ skb = head;
+ if (head != tail) {
+ struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
+ p = skb_queue_next(list, head);
+ __skb_unlink(skb, list);
+ skb_queue_walk_from_safe(list, p, tmp) {
+ __skb_unlink(p, list);
+ *fragpp = p;
+ p->next = NULL;
+ fragpp = &p->next;
+
+ skb->len += p->len;
+ skb->data_len += p->len;
+ skb->truesize += p->len;
+
+ if (p == tail)
+ break;
+ }
+ } else {
+ __skb_unlink(skb, list);
+ }
- /* Discard all the skbuffs that we have copied the data out of
- or that we can't use. */
- while ((p = list->next) != head) {
- __skb_unlink(p, list);
- kfree_skb(p);
+ ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
}
return skb;
@@ -2617,8 +2638,8 @@ ppp_create_interface(struct net *net, int unit, int *retp)
ret = register_netdev(dev);
if (ret != 0) {
unit_put(&pn->units_idr, unit);
- printk(KERN_ERR "PPP: couldn't register device %s (%d)\n",
- dev->name, ret);
+ netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
+ dev->name, ret);
goto out2;
}
@@ -2690,9 +2711,9 @@ static void ppp_destroy_interface(struct ppp *ppp)
if (!ppp->file.dead || ppp->n_channels) {
/* "can't happen" */
- printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d "
- "n_channels=%d !\n", ppp, ppp->file.dead,
- ppp->n_channels);
+ netdev_err(ppp->dev, "ppp: destroying ppp struct %p "
+ "but dead=%d n_channels=%d !\n",
+ ppp, ppp->file.dead, ppp->n_channels);
return;
}
@@ -2834,8 +2855,7 @@ static void ppp_destroy_channel(struct channel *pch)
if (!pch->file.dead) {
/* "can't happen" */
- printk(KERN_ERR "ppp: destroying undead channel %p !\n",
- pch);
+ pr_err("ppp: destroying undead channel %p !\n", pch);
return;
}
skb_queue_purge(&pch->file.xq);
@@ -2847,7 +2867,7 @@ static void __exit ppp_cleanup(void)
{
/* should never happen */
if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
- printk(KERN_ERR "PPP: removing module but units remain!\n");
+ pr_err("PPP: removing module but units remain!\n");
unregister_chrdev(PPP_MAJOR, "ppp");
device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
class_destroy(ppp_class);
@@ -2865,7 +2885,7 @@ static int __unit_alloc(struct idr *p, void *ptr, int n)
again:
if (!idr_pre_get(p, GFP_KERNEL)) {
- printk(KERN_ERR "PPP: No free memory for idr\n");
+ pr_err("PPP: No free memory for idr\n");
return -ENOMEM;
}
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index 6d1a1b80cc3e..9a1849a83e2a 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -55,6 +55,7 @@
#include <linux/ppp_defs.h>
#include <linux/ppp-comp.h>
#include <linux/scatterlist.h>
+#include <asm/unaligned.h>
#include "ppp_mppe.h"
@@ -395,16 +396,14 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
*/
obuf[0] = PPP_ADDRESS(ibuf);
obuf[1] = PPP_CONTROL(ibuf);
- obuf[2] = PPP_COMP >> 8; /* isize + MPPE_OVHD + 1 */
- obuf[3] = PPP_COMP; /* isize + MPPE_OVHD + 2 */
+ put_unaligned_be16(PPP_COMP, obuf + 2);
obuf += PPP_HDRLEN;
state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
if (state->debug >= 7)
printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit,
state->ccount);
- obuf[0] = state->ccount >> 8;
- obuf[1] = state->ccount & 0xff;
+ put_unaligned_be16(state->ccount, obuf);
if (!state->stateful || /* stateless mode */
((state->ccount & 0xff) == 0xff) || /* "flag" packet */
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 4c95ec3fb8d4..4e6b72f57de8 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -45,6 +45,7 @@
#include <linux/completion.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <asm/unaligned.h>
#include <asm/uaccess.h>
#define PPP_VERSION "2.4.2"
@@ -563,7 +564,7 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
int islcp;
data = skb->data;
- proto = (data[0] << 8) + data[1];
+ proto = get_unaligned_be16(data);
/* LCP packets with codes between 1 (configure-request)
* and 7 (code-reject) must be sent as though no options
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index d72fb0519a2a..78c0e3c9b2b5 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -948,7 +948,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
abort:
kfree_skb(skb);
- return 0;
+ return 1;
}
/************************************************************************
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
index ccbc91326bfa..51dfcf8023c7 100644
--- a/drivers/net/pptp.c
+++ b/drivers/net/pptp.c
@@ -175,7 +175,6 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
struct pptp_opt *opt = &po->proto.pptp;
struct pptp_gre_header *hdr;
unsigned int header_len = sizeof(*hdr);
- int err = 0;
int islcp;
int len;
unsigned char *data;
@@ -190,18 +189,14 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
if (sk_pppox(po)->sk_state & PPPOX_DEAD)
goto tx_error;
- {
- struct flowi fl = { .oif = 0,
- .nl_u = {
- .ip4_u = {
- .daddr = opt->dst_addr.sin_addr.s_addr,
- .saddr = opt->src_addr.sin_addr.s_addr,
- .tos = RT_TOS(0) } },
- .proto = IPPROTO_GRE };
- err = ip_route_output_key(&init_net, &rt, &fl);
- if (err)
- goto tx_error;
- }
+ rt = ip_route_output_ports(&init_net, NULL,
+ opt->dst_addr.sin_addr.s_addr,
+ opt->src_addr.sin_addr.s_addr,
+ 0, 0, IPPROTO_GRE,
+ RT_TOS(0), 0);
+ if (IS_ERR(rt))
+ goto tx_error;
+
tdev = rt->dst.dev;
max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph) + sizeof(*hdr) + 2;
@@ -277,7 +272,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
iph->tos = 0;
iph->daddr = rt->rt_dst;
iph->saddr = rt->rt_src;
- iph->ttl = dst_metric(&rt->dst, RTAX_HOPLIMIT);
+ iph->ttl = ip4_dst_hoplimit(&rt->dst);
iph->tot_len = htons(skb->len);
skb_dst_drop(skb);
@@ -468,21 +463,17 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.private = sk;
po->chan.ops = &pptp_chan_ops;
- {
- struct flowi fl = {
- .nl_u = {
- .ip4_u = {
- .daddr = opt->dst_addr.sin_addr.s_addr,
- .saddr = opt->src_addr.sin_addr.s_addr,
- .tos = RT_CONN_FLAGS(sk) } },
- .proto = IPPROTO_GRE };
- security_sk_classify_flow(sk, &fl);
- if (ip_route_output_key(&init_net, &rt, &fl)) {
- error = -EHOSTUNREACH;
- goto end;
- }
- sk_setup_caps(sk, &rt->dst);
+ rt = ip_route_output_ports(&init_net, sk,
+ opt->dst_addr.sin_addr.s_addr,
+ opt->src_addr.sin_addr.s_addr,
+ 0, 0,
+ IPPROTO_GRE, RT_CONN_FLAGS(sk), 0);
+ if (IS_ERR(rt)) {
+ error = -EHOSTUNREACH;
+ goto end;
}
+ sk_setup_caps(sk, &rt->dst);
+
po->chan.mtu = dst_mtu(&rt->dst);
if (!po->chan.mtu)
po->chan.mtu = PPP_MTU;
@@ -673,8 +664,7 @@ static int __init pptp_init_module(void)
int err = 0;
pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n");
- callid_sock = __vmalloc((MAX_CALLID + 1) * sizeof(void *),
- GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+ callid_sock = vzalloc((MAX_CALLID + 1) * sizeof(void *));
if (!callid_sock) {
pr_err("PPTP: cann't allocate memory\n");
return -ENOMEM;
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index 18c0297743f1..1b63c8aef121 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -1450,16 +1450,11 @@ static void pxa168_get_drvinfo(struct net_device *dev,
strncpy(info->bus_info, "N/A", 32);
}
-static u32 pxa168_get_link(struct net_device *dev)
-{
- return !!netif_carrier_ok(dev);
-}
-
static const struct ethtool_ops pxa168_ethtool_ops = {
.get_settings = pxa168_get_settings,
.set_settings = pxa168_set_settings,
.get_drvinfo = pxa168_get_drvinfo,
- .get_link = pxa168_get_link,
+ .get_link = ethtool_op_get_link,
};
static const struct net_device_ops pxa168_eth_netdev_ops = {
@@ -1607,7 +1602,7 @@ static int pxa168_eth_remove(struct platform_device *pdev)
mdiobus_unregister(pep->smi_bus);
mdiobus_free(pep->smi_bus);
unregister_netdev(dev);
- flush_scheduled_work();
+ cancel_work_sync(&pep->tx_timeout_task);
free_netdev(dev);
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 7496ed2c34ab..348b4f1367c9 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -379,7 +379,7 @@ static void fm93c56a_select(struct ql3_adapter *qdev)
{
struct ql3xxx_port_registers __iomem *port_regs =
qdev->mem_map_registers;
- u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
@@ -398,7 +398,7 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
u32 previousBit;
struct ql3xxx_port_registers __iomem *port_regs =
qdev->mem_map_registers;
- u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
/* Clock in a zero, then do the start bit */
ql_write_nvram_reg(qdev, spir,
@@ -467,7 +467,7 @@ static void fm93c56a_deselect(struct ql3_adapter *qdev)
{
struct ql3xxx_port_registers __iomem *port_regs =
qdev->mem_map_registers;
- u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
@@ -483,7 +483,7 @@ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
u32 dataBit;
struct ql3xxx_port_registers __iomem *port_regs =
qdev->mem_map_registers;
- u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
/* Read the data bits */
/* The first bit is a dummy. Clock right over it. */
@@ -2460,14 +2460,14 @@ map_error:
* The 3032 supports sglists by using the 3 addr/len pairs (ALP)
* in the IOCB plus a chain of outbound address lists (OAL) that
* each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
- * will used to point to an OAL when more ALP entries are required.
+ * will be used to point to an OAL when more ALP entries are required.
* The IOCB is always the top of the chain followed by one or more
* OALs (when necessary).
*/
static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
struct net_device *ndev)
{
- struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+ struct ql3_adapter *qdev = netdev_priv(ndev);
struct ql3xxx_port_registers __iomem *port_regs =
qdev->mem_map_registers;
struct ql_tx_buf_cb *tx_cb;
@@ -3011,7 +3011,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
u32 value;
struct ql3xxx_port_registers __iomem *port_regs =
qdev->mem_map_registers;
- u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
struct ql3xxx_host_memory_registers __iomem *hmem_regs =
(void __iomem *)port_regs;
u32 delay = 10;
@@ -3390,7 +3390,7 @@ static void ql_set_mac_info(struct ql3_adapter *qdev)
static void ql_display_dev_info(struct net_device *ndev)
{
- struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+ struct ql3_adapter *qdev = netdev_priv(ndev);
struct pci_dev *pdev = qdev->pdev;
netdev_info(ndev,
@@ -3573,7 +3573,7 @@ static int ql3xxx_open(struct net_device *ndev)
static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
{
- struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+ struct ql3_adapter *qdev = netdev_priv(ndev);
struct ql3xxx_port_registers __iomem *port_regs =
qdev->mem_map_registers;
struct sockaddr *addr = p;
@@ -3608,7 +3608,7 @@ static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
static void ql3xxx_tx_timeout(struct net_device *ndev)
{
- struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+ struct ql3_adapter *qdev = netdev_priv(ndev);
netdev_err(ndev, "Resetting...\n");
/*
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 8ecc170c9b74..dc44564ef6f9 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -1,25 +1,8 @@
/*
- * Copyright (C) 2009 - QLogic Corporation.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called "COPYING".
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2010 QLogic Corporation
*
+ * See LICENSE.qlcnic for copyright and licensing details.
*/
#ifndef _QLCNIC_H_
@@ -51,8 +34,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 11
-#define QLCNIC_LINUX_VERSIONID "5.0.11"
+#define _QLCNIC_LINUX_SUBVERSION 15
+#define QLCNIC_LINUX_VERSIONID "5.0.15"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -306,6 +289,26 @@ struct uni_data_desc{
u32 reserved[5];
};
+/* Flash Defines and Structures */
+#define QLCNIC_FLT_LOCATION 0x3F1000
+#define QLCNIC_FW_IMAGE_REGION 0x74
+struct qlcnic_flt_header {
+ u16 version;
+ u16 len;
+ u16 checksum;
+ u16 reserved;
+};
+
+struct qlcnic_flt_entry {
+ u8 region;
+ u8 reserved0;
+ u8 attrib;
+ u8 reserved1;
+ u32 size;
+ u32 start_addr;
+ u32 end_add;
+};
+
/* Magic number to let user know flash is programmed */
#define QLCNIC_BDINFO_MAGIC 0x12345678
@@ -798,7 +801,6 @@ struct qlcnic_nic_intr_coalesce {
#define QLCNIC_H2C_OPCODE_GET_NET_STATS 16
#define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V 17
#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 18
-#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 19
#define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE 20
#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 21
#define QLCNIC_C2C_OPCODE 22
@@ -865,7 +867,6 @@ struct qlcnic_nic_intr_coalesce {
#define LINKEVENT_LINKSPEED_MBPS 0
#define LINKEVENT_LINKSPEED_ENCODED 1
-#define AUTO_FW_RESET_ENABLED 0x01
/* firmware response header:
* 63:58 - message type
* 57:56 - owner
@@ -923,6 +924,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_MACSPOOF 0x200
#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400
#define QLCNIC_PROMISC_DISABLED 0x800
+#define QLCNIC_NEED_FLR 0x1000
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
@@ -942,6 +944,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_INTERRUPT_TEST 1
#define QLCNIC_LOOPBACK_TEST 2
+#define QLCNIC_LED_TEST 3
#define QLCNIC_FILTER_AGE 80
#define QLCNIC_READD_AGE 20
@@ -1126,18 +1129,13 @@ struct qlcnic_eswitch {
/* Return codes for Error handling */
#define QL_STATUS_INVALID_PARAM -1
-#define MAX_BW 100
-#define MIN_BW 1
+#define MAX_BW 100 /* % of link speed */
#define MAX_VLAN_ID 4095
#define MIN_VLAN_ID 2
-#define MAX_TX_QUEUES 1
-#define MAX_RX_QUEUES 4
#define DEFAULT_MAC_LEARN 1
#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
-#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW)
-#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
-#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
+#define IS_VALID_BW(bw) (bw <= MAX_BW)
struct qlcnic_pci_func_cfg {
u16 func_type;
@@ -1314,21 +1312,15 @@ int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring);
-void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter);
-int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *);
/* Functions from qlcnic_main.c */
-int qlcnic_request_quiscent_mode(struct qlcnic_adapter *adapter);
-void qlcnic_clear_quiscent_mode(struct qlcnic_adapter *adapter);
int qlcnic_reset_context(struct qlcnic_adapter *);
u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd);
void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
-int qlcnic_check_loopback_buff(unsigned char *data);
netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
/* Management functions */
int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
@@ -1377,6 +1369,8 @@ static const struct qlcnic_brdinfo qlcnic_boards[] = {
"3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
{0x1077, 0x8020, 0x103c, 0x3733,
"NC523SFP 10Gb 2-port Server Adapter"},
+ {0x1077, 0x8020, 0x103c, 0x3346,
+ "CN1000Q Dual Port Converged Network Adapter"},
{0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
};
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index 1cdc05dade6b..27631f23b3fd 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -1,25 +1,8 @@
/*
- * Copyright (C) 2009 - QLogic Corporation.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called "COPYING".
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2010 QLogic Corporation
*
+ * See LICENSE.qlcnic for copyright and licensing details.
*/
#include "qlcnic.h"
@@ -480,6 +463,11 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter)
{
int err;
+ if (adapter->flags & QLCNIC_NEED_FLR) {
+ pci_reset_function(adapter->pdev);
+ adapter->flags &= ~QLCNIC_NEED_FLR;
+ }
+
err = qlcnic_fw_cmd_create_rx_ctx(adapter);
if (err)
return err;
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index ec21d24015c4..4c14510e2a87 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -1,25 +1,8 @@
/*
- * Copyright (C) 2009 - QLogic Corporation.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called "COPYING".
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2010 QLogic Corporation
*
+ * See LICENSE.qlcnic for copyright and licensing details.
*/
#include <linux/types.h>
@@ -101,8 +84,7 @@ static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
"Register_Test_on_offline",
"Link_Test_on_offline",
- "Interrupt_Test_offline",
- "Loopback_Test_offline"
+ "Interrupt_Test_offline"
};
#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
@@ -643,104 +625,6 @@ static int qlcnic_get_sset_count(struct net_device *dev, int sset)
}
}
-#define QLC_ILB_PKT_SIZE 64
-#define QLC_NUM_ILB_PKT 16
-#define QLC_ILB_MAX_RCV_LOOP 10
-
-static void qlcnic_create_loopback_buff(unsigned char *data)
-{
- unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00};
- memset(data, 0x4e, QLC_ILB_PKT_SIZE);
- memset(data, 0xff, 12);
- memcpy(data + 12, random_data, sizeof(random_data));
-}
-
-int qlcnic_check_loopback_buff(unsigned char *data)
-{
- unsigned char buff[QLC_ILB_PKT_SIZE];
- qlcnic_create_loopback_buff(buff);
- return memcmp(data, buff, QLC_ILB_PKT_SIZE);
-}
-
-static int qlcnic_do_ilb_test(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
- struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
- struct sk_buff *skb;
- int i, loop, cnt = 0;
-
- for (i = 0; i < QLC_NUM_ILB_PKT; i++) {
- skb = dev_alloc_skb(QLC_ILB_PKT_SIZE);
- qlcnic_create_loopback_buff(skb->data);
- skb_put(skb, QLC_ILB_PKT_SIZE);
-
- adapter->diag_cnt = 0;
- qlcnic_xmit_frame(skb, adapter->netdev);
-
- loop = 0;
- do {
- msleep(1);
- qlcnic_process_rcv_ring_diag(sds_ring);
- } while (loop++ < QLC_ILB_MAX_RCV_LOOP &&
- !adapter->diag_cnt);
-
- dev_kfree_skb_any(skb);
-
- if (!adapter->diag_cnt)
- dev_warn(&adapter->pdev->dev, "ILB Test: %dth packet"
- " not recevied\n", i + 1);
- else
- cnt++;
- }
- if (cnt != i) {
- dev_warn(&adapter->pdev->dev, "ILB Test failed\n");
- return -1;
- }
- return 0;
-}
-
-static int qlcnic_loopback_test(struct net_device *netdev)
-{
- struct qlcnic_adapter *adapter = netdev_priv(netdev);
- int max_sds_rings = adapter->max_sds_rings;
- int ret;
-
- if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
- dev_warn(&adapter->pdev->dev, "Loopback test not supported"
- "for non privilege function\n");
- return 0;
- }
-
- if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
- return -EIO;
-
- if (qlcnic_request_quiscent_mode(adapter)) {
- clear_bit(__QLCNIC_RESETTING, &adapter->state);
- return -EIO;
- }
-
- ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
- if (ret)
- goto clear_it;
-
- ret = qlcnic_set_ilb_mode(adapter);
- if (ret)
- goto done;
-
- ret = qlcnic_do_ilb_test(adapter);
-
- qlcnic_clear_ilb_mode(adapter);
-
-done:
- qlcnic_diag_free_res(netdev, max_sds_rings);
-
-clear_it:
- qlcnic_clear_quiscent_mode(adapter);
- adapter->max_sds_rings = max_sds_rings;
- clear_bit(__QLCNIC_RESETTING, &adapter->state);
- return ret;
-}
-
static int qlcnic_irq_test(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -788,14 +672,11 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
if (data[1])
eth_test->flags |= ETH_TEST_FL_FAILED;
- if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ if (eth_test->flags & ETH_TEST_FL_OFFLINE) {
data[2] = qlcnic_irq_test(dev);
if (data[2])
eth_test->flags |= ETH_TEST_FL_FAILED;
- data[3] = qlcnic_loopback_test(dev);
- if (data[3])
- eth_test->flags |= ETH_TEST_FL_FAILED;
}
}
@@ -925,9 +806,10 @@ static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
dev->features &= ~NETIF_F_LRO;
qlcnic_send_lro_cleanup(adapter);
+ dev_info(&adapter->pdev->dev,
+ "disabling LRO as rx_csum is off\n");
}
adapter->rx_csum = !!data;
- dev_info(&adapter->pdev->dev, "disabling LRO as rx_csum is off\n");
return 0;
}
@@ -952,16 +834,27 @@ static int qlcnic_set_tso(struct net_device *dev, u32 data)
static int qlcnic_blink_led(struct net_device *dev, u32 val)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int max_sds_rings = adapter->max_sds_rings;
+ int dev_down = 0;
int ret;
- if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
- return -EIO;
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ dev_down = 1;
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+
+ ret = qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST);
+ if (ret) {
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+ }
+ }
ret = adapter->nic_ops->config_led(adapter, 1, 0xf);
if (ret) {
dev_err(&adapter->pdev->dev,
"Failed to set LED blink state.\n");
- return ret;
+ goto done;
}
msleep_interruptible(val * 1000);
@@ -970,10 +863,16 @@ static int qlcnic_blink_led(struct net_device *dev, u32 val)
if (ret) {
dev_err(&adapter->pdev->dev,
"Failed to reset LED blink state.\n");
- return ret;
+ goto done;
}
- return 0;
+done:
+ if (dev_down) {
+ qlcnic_diag_free_res(dev, max_sds_rings);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ }
+ return ret;
+
}
static void
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index 4290b80cde1a..726ef555b6bc 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -1,25 +1,8 @@
/*
- * Copyright (C) 2009 - QLogic Corporation.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called "COPYING".
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2010 QLogic Corporation
*
+ * See LICENSE.qlcnic for copyright and licensing details.
*/
#ifndef __QLCNIC_HDR_H_
@@ -638,7 +621,7 @@ enum {
#define PCIX_INT_MASK (0x10104)
#define PCIX_OCM_WINDOW (0x10800)
-#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x20 * (func))
+#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x4 * (func))
#define PCIX_TARGET_STATUS (0x10118)
#define PCIX_TARGET_STATUS_F1 (0x10160)
@@ -722,7 +705,7 @@ enum {
#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */
#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */
-#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) &= (1 << (FN * 4)))
+#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) & (1 << (FN * 4)))
#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index 7a47a2a7ee27..616940f0a8d0 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -1,25 +1,8 @@
/*
- * Copyright (C) 2009 - QLogic Corporation.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called "COPYING".
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2010 QLogic Corporation
*
+ * See LICENSE.qlcnic for copyright and licensing details.
*/
#include "qlcnic.h"
@@ -398,7 +381,7 @@ qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
}
-static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, u8 *addr)
+static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
{
struct list_head *head;
struct qlcnic_mac_list_s *cur;
@@ -432,7 +415,9 @@ void qlcnic_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct netdev_hw_addr *ha;
- u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
u32 mode = VPORT_MISS_MODE_DROP;
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
@@ -638,10 +623,11 @@ int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
u64 word;
int i, rv;
- const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
- 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
- 0x255b0ec26d5a56daULL };
-
+ static const u64 key[] = {
+ 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL
+ };
memset(&req, 0, sizeof(struct qlcnic_nic_req));
req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
@@ -1234,56 +1220,3 @@ int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
return rv;
}
-
-static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u32 flag)
-{
- struct qlcnic_nic_req req;
- int rv;
- u64 word;
-
- memset(&req, 0, sizeof(struct qlcnic_nic_req));
- req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
-
- word = QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
- ((u64)adapter->portnum << 16);
- req.req_hdr = cpu_to_le64(word);
- req.words[0] = cpu_to_le64(flag);
-
- rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
- if (rv)
- dev_err(&adapter->pdev->dev,
- "%sting loopback mode failed.\n",
- flag ? "Set" : "Reset");
- return rv;
-}
-
-int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter)
-{
- if (qlcnic_set_fw_loopback(adapter, 1))
- return -EIO;
-
- if (qlcnic_nic_set_promisc(adapter,
- VPORT_MISS_MODE_ACCEPT_ALL)) {
- qlcnic_set_fw_loopback(adapter, 0);
- return -EIO;
- }
-
- msleep(1000);
- return 0;
-}
-
-void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter)
-{
- int mode = VPORT_MISS_MODE_DROP;
- struct net_device *netdev = adapter->netdev;
-
- qlcnic_set_fw_loopback(adapter, 0);
-
- if (netdev->flags & IFF_PROMISC)
- mode = VPORT_MISS_MODE_ACCEPT_ALL;
- else if (netdev->flags & IFF_ALLMULTI)
- mode = VPORT_MISS_MODE_ACCEPT_MULTI;
-
- qlcnic_nic_set_promisc(adapter, mode);
- msleep(1000);
-}
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 0d180c6e41fe..a7f1d5b7e811 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -1,25 +1,8 @@
/*
- * Copyright (C) 2009 - QLogic Corporation.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called "COPYING".
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2010 QLogic Corporation
*
+ * See LICENSE.qlcnic for copyright and licensing details.
*/
#include <linux/netdevice.h>
@@ -236,12 +219,11 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
tx_ring->num_desc = adapter->num_txd;
tx_ring->txq = netdev_get_tx_queue(netdev, 0);
- cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
+ cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
if (cmd_buf_arr == NULL) {
dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
goto err_out;
}
- memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
tx_ring->cmd_buf_arr = cmd_buf_arr;
recv_ctx = &adapter->recv_ctx;
@@ -275,14 +257,12 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
rds_ring->dma_size + NET_IP_ALIGN;
break;
}
- rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *)
- vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
+ rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
if (rds_ring->rx_buf_arr == NULL) {
dev_err(&netdev->dev, "Failed to allocate "
"rx buffer ring %d\n", ring);
goto err_out;
}
- memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
INIT_LIST_HEAD(&rds_ring->free_list);
/*
* Now go through all of them, set reference handles
@@ -647,12 +627,73 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
return 0;
}
+static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region,
+ struct qlcnic_flt_entry *region_entry)
+{
+ struct qlcnic_flt_header flt_hdr;
+ struct qlcnic_flt_entry *flt_entry;
+ int i = 0, ret;
+ u32 entry_size;
+
+ memset(region_entry, 0, sizeof(struct qlcnic_flt_entry));
+ ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION,
+ (u8 *)&flt_hdr,
+ sizeof(struct qlcnic_flt_header));
+ if (ret) {
+ dev_warn(&adapter->pdev->dev,
+ "error reading flash layout header\n");
+ return -EIO;
+ }
+
+ entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header);
+ flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size);
+ if (flt_entry == NULL) {
+ dev_warn(&adapter->pdev->dev, "error allocating memory\n");
+ return -EIO;
+ }
+
+ ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION +
+ sizeof(struct qlcnic_flt_header),
+ (u8 *)flt_entry, entry_size);
+ if (ret) {
+ dev_warn(&adapter->pdev->dev,
+ "error reading flash layout entries\n");
+ goto err_out;
+ }
+
+ while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) {
+ if (flt_entry[i].region == region)
+ break;
+ i++;
+ }
+ if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) {
+ dev_warn(&adapter->pdev->dev,
+ "region=%x not found in %d regions\n", region, i);
+ ret = -EIO;
+ goto err_out;
+ }
+ memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry));
+
+err_out:
+ vfree(flt_entry);
+ return ret;
+}
+
int
qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_flt_entry fw_entry;
u32 ver = -1, min_ver;
+ int ret;
- qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver);
+ ret = qlcnic_get_flt_entry(adapter, QLCNIC_FW_IMAGE_REGION, &fw_entry);
+ if (!ret)
+ /* 0-4:-signature, 4-8:-fw version */
+ qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4,
+ (int *)&ver);
+ else
+ qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET,
+ (int *)&ver);
ver = QLCNIC_DECODE_VERSION(ver);
min_ver = QLCNIC_MIN_FW_VERSION;
@@ -1693,99 +1734,6 @@ qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
spin_unlock(&rds_ring->lock);
}
-static void dump_skb(struct sk_buff *skb)
-{
- int i;
- unsigned char *data = skb->data;
-
- for (i = 0; i < skb->len; i++) {
- printk("%02x ", data[i]);
- if ((i & 0x0f) == 8)
- printk("\n");
- }
-}
-
-static struct qlcnic_rx_buffer *
-qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
- struct qlcnic_host_sds_ring *sds_ring,
- int ring, u64 sts_data0)
-{
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
- struct qlcnic_rx_buffer *buffer;
- struct sk_buff *skb;
- struct qlcnic_host_rds_ring *rds_ring;
- int index, length, cksum, pkt_offset;
-
- if (unlikely(ring >= adapter->max_rds_rings))
- return NULL;
-
- rds_ring = &recv_ctx->rds_rings[ring];
-
- index = qlcnic_get_sts_refhandle(sts_data0);
- if (unlikely(index >= rds_ring->num_desc))
- return NULL;
-
- buffer = &rds_ring->rx_buf_arr[index];
-
- length = qlcnic_get_sts_totallength(sts_data0);
- cksum = qlcnic_get_sts_status(sts_data0);
- pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
-
- skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
- if (!skb)
- return buffer;
-
- if (length > rds_ring->skb_size)
- skb_put(skb, rds_ring->skb_size);
- else
- skb_put(skb, length);
-
- if (pkt_offset)
- skb_pull(skb, pkt_offset);
-
- if (!qlcnic_check_loopback_buff(skb->data))
- adapter->diag_cnt++;
- else
- dump_skb(skb);
-
- dev_kfree_skb_any(skb);
- adapter->stats.rx_pkts++;
- adapter->stats.rxbytes += length;
-
- return buffer;
-}
-
-void
-qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
-{
- struct qlcnic_adapter *adapter = sds_ring->adapter;
- struct status_desc *desc;
- struct qlcnic_rx_buffer *rxbuf;
- u64 sts_data0;
-
- int opcode, ring, desc_cnt;
- u32 consumer = sds_ring->consumer;
-
- desc = &sds_ring->desc_head[consumer];
- sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
-
- if (!(sts_data0 & STATUS_OWNER_HOST))
- return;
-
- desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
- opcode = qlcnic_get_sts_opcode(sts_data0);
-
- ring = qlcnic_get_sts_type(sts_data0);
- rxbuf = qlcnic_process_rcv_diag(adapter, sds_ring,
- ring, sts_data0);
-
- desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
- consumer = get_next_index(consumer, sds_ring->num_desc);
-
- sds_ring->consumer = consumer;
- writel(consumer, sds_ring->crb_sts_consumer);
-}
-
void
qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2,
u8 alt_mac, u8 *mac)
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index a3dcd04be22f..cd88c7e1bfa9 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -1,25 +1,8 @@
/*
- * Copyright (C) 2009 - QLogic Corporation.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called "COPYING".
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2010 QLogic Corporation
*
+ * See LICENSE.qlcnic for copyright and licensing details.
*/
#include <linux/slab.h>
@@ -48,27 +31,27 @@ static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
static struct workqueue_struct *qlcnic_wq;
static int qlcnic_mac_learn;
-module_param(qlcnic_mac_learn, int, 0644);
+module_param(qlcnic_mac_learn, int, 0444);
MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
static int use_msi = 1;
-module_param(use_msi, int, 0644);
+module_param(use_msi, int, 0444);
MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
static int use_msi_x = 1;
-module_param(use_msi_x, int, 0644);
+module_param(use_msi_x, int, 0444);
MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
-static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
+static int auto_fw_reset = 1;
module_param(auto_fw_reset, int, 0644);
MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
static int load_fw_file;
-module_param(load_fw_file, int, 0644);
+module_param(load_fw_file, int, 0444);
MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
static int qlcnic_config_npars;
-module_param(qlcnic_config_npars, int, 0644);
+module_param(qlcnic_config_npars, int, 0444);
MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
static int __devinit qlcnic_probe(struct pci_dev *pdev,
@@ -1546,6 +1529,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_out_iounmap;
+ adapter->flags |= QLCNIC_NEED_FLR;
+
err = adapter->nic_ops->start_firmware(adapter);
if (err) {
dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
@@ -2854,61 +2839,6 @@ qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
qlcnic_api_unlock(adapter);
}
-/* Caller should held RESETTING bit.
- * This should be call in sync with qlcnic_request_quiscent_mode.
- */
-void qlcnic_clear_quiscent_mode(struct qlcnic_adapter *adapter)
-{
- qlcnic_clr_drv_state(adapter);
- qlcnic_api_lock(adapter);
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
- qlcnic_api_unlock(adapter);
-}
-
-/* Caller should held RESETTING bit.
- */
-int qlcnic_request_quiscent_mode(struct qlcnic_adapter *adapter)
-{
- u8 timeo = adapter->dev_init_timeo / 2;
- u32 state;
-
- if (qlcnic_api_lock(adapter))
- return -EIO;
-
- state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state != QLCNIC_DEV_READY)
- return -EIO;
-
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_QUISCENT);
- qlcnic_api_unlock(adapter);
- QLCDB(adapter, DRV, "NEED QUISCENT state set\n");
- qlcnic_idc_debug_info(adapter, 0);
-
- qlcnic_set_drv_state(adapter, QLCNIC_DEV_NEED_QUISCENT);
-
- do {
- msleep(2000);
- state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_QUISCENT)
- return 0;
- if (!qlcnic_check_drv_state(adapter)) {
- if (qlcnic_api_lock(adapter))
- return -EIO;
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
- QLCNIC_DEV_QUISCENT);
- qlcnic_api_unlock(adapter);
- QLCDB(adapter, DRV, "QUISCENT mode set\n");
- return 0;
- }
- } while (--timeo);
-
- dev_err(&adapter->pdev->dev, "Failed to quiesce device, DRV_STATE=%08x"
- " DRV_ACTIVE=%08x\n", QLCRD32(adapter, QLCNIC_CRB_DRV_STATE),
- QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE));
- qlcnic_clear_quiscent_mode(adapter);
- return -EIO;
-}
-
/*Transit to RESET state from READY state only */
static void
qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
@@ -3029,8 +2959,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
if (adapter->need_fw_reset)
goto detach;
- if (adapter->reset_context &&
- auto_fw_reset == AUTO_FW_RESET_ENABLED) {
+ if (adapter->reset_context && auto_fw_reset) {
qlcnic_reset_hw_context(adapter);
adapter->netdev->trans_start = jiffies;
}
@@ -3043,7 +2972,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
qlcnic_dev_request_reset(adapter);
- if ((auto_fw_reset == AUTO_FW_RESET_ENABLED))
+ if (auto_fw_reset)
clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
dev_info(&netdev->dev, "firmware hang detected\n");
@@ -3052,7 +2981,7 @@ detach:
adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
QLCNIC_DEV_NEED_RESET;
- if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
+ if (auto_fw_reset &&
!test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
@@ -3587,9 +3516,12 @@ validate_esw_config(struct qlcnic_adapter *adapter,
case QLCNIC_PORT_DEFAULTS:
if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
QLCNIC_NON_PRIV_FUNC) {
- esw_cfg[i].mac_anti_spoof = 0;
- esw_cfg[i].mac_override = 1;
- esw_cfg[i].promisc_mode = 1;
+ if (esw_cfg[i].mac_anti_spoof != 0)
+ return QL_STATUS_INVALID_PARAM;
+ if (esw_cfg[i].mac_override != 1)
+ return QL_STATUS_INVALID_PARAM;
+ if (esw_cfg[i].promisc_mode != 1)
+ return QL_STATUS_INVALID_PARAM;
}
break;
case QLCNIC_ADD_VLAN:
@@ -3721,10 +3653,8 @@ validate_npar_config(struct qlcnic_adapter *adapter,
if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
return QL_STATUS_INVALID_PARAM;
- if (!IS_VALID_BW(np_cfg[i].min_bw)
- || !IS_VALID_BW(np_cfg[i].max_bw)
- || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
- || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
+ if (!IS_VALID_BW(np_cfg[i].min_bw) ||
+ !IS_VALID_BW(np_cfg[i].max_bw))
return QL_STATUS_INVALID_PARAM;
}
return 0;
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 22821398fc63..4757c59a07a2 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -16,7 +16,7 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "v1.00.00.25.00.00-01"
+#define DRV_VERSION "v1.00.00.27.00.00-01"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
@@ -2083,6 +2083,7 @@ struct ql_adapter {
u32 mailbox_in;
u32 mailbox_out;
struct mbox_params idc_mbc;
+ struct mutex mpi_mutex;
int tx_ring_size;
int rx_ring_size;
@@ -2221,6 +2222,7 @@ int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
int ql_unpause_mpi_risc(struct ql_adapter *qdev);
int ql_pause_mpi_risc(struct ql_adapter *qdev);
int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
+int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
u32 ram_addr, int word_count);
int ql_core_dump(struct ql_adapter *qdev,
@@ -2236,6 +2238,7 @@ int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
int ql_mb_get_port_cfg(struct ql_adapter *qdev);
int ql_mb_set_port_cfg(struct ql_adapter *qdev);
int ql_wait_fifo_empty(struct ql_adapter *qdev);
+void ql_get_dump(struct ql_adapter *qdev, void *buff);
void ql_gen_reg_dump(struct ql_adapter *qdev,
struct ql_reg_dump *mpi_coredump);
netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 4747492935ef..fca804f36d61 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1317,9 +1317,28 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
if (status)
return;
+}
+
+void ql_get_dump(struct ql_adapter *qdev, void *buff)
+{
+ /*
+ * If the dump has already been taken and is stored
+ * in our internal buffer and if force dump is set then
+ * just start the spool to dump it to the log file
+ * and also, take a snapshot of the general regs to
+ * to the user's buffer or else take complete dump
+ * to the user's buffer if force is not set.
+ */
- if (test_bit(QL_FRC_COREDUMP, &qdev->flags))
+ if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
+ if (!ql_core_dump(qdev, buff))
+ ql_soft_reset_mpi_risc(qdev);
+ else
+ netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
+ } else {
+ ql_gen_reg_dump(qdev, buff);
ql_get_core_dump(qdev);
+ }
}
/* Coredump to messages log file using separate worker thread */
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 4892d64f4e05..8149cc9de4ca 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -375,7 +375,10 @@ static void ql_get_drvinfo(struct net_device *ndev,
strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
drvinfo->n_stats = 0;
drvinfo->testinfo_len = 0;
- drvinfo->regdump_len = 0;
+ if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+ drvinfo->regdump_len = sizeof(struct ql_mpi_coredump);
+ else
+ drvinfo->regdump_len = sizeof(struct ql_reg_dump);
drvinfo->eedump_len = 0;
}
@@ -547,7 +550,12 @@ static void ql_self_test(struct net_device *ndev,
static int ql_get_regs_len(struct net_device *ndev)
{
- return sizeof(struct ql_reg_dump);
+ struct ql_adapter *qdev = netdev_priv(ndev);
+
+ if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+ return sizeof(struct ql_mpi_coredump);
+ else
+ return sizeof(struct ql_reg_dump);
}
static void ql_get_regs(struct net_device *ndev,
@@ -555,7 +563,12 @@ static void ql_get_regs(struct net_device *ndev,
{
struct ql_adapter *qdev = netdev_priv(ndev);
- ql_gen_reg_dump(qdev, p);
+ ql_get_dump(qdev, p);
+ qdev->core_is_dumped = 0;
+ if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+ regs->len = sizeof(struct ql_mpi_coredump);
+ else
+ regs->len = sizeof(struct ql_reg_dump);
}
static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 528eaef5308f..49bfa5813068 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -3548,12 +3548,13 @@ err_irq:
static int ql_start_rss(struct ql_adapter *qdev)
{
- u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
- 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
- 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
- 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
- 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
- 0xbe, 0xac, 0x01, 0xfa};
+ static const u8 init_hash_seed[] = {
+ 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
+ 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
+ 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
+ 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
+ 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
+ };
struct ricb *ricb = &qdev->ricb;
int status = 0;
int i;
@@ -3844,7 +3845,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
static void ql_display_dev_info(struct net_device *ndev)
{
- struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
netif_info(qdev, probe, qdev->ndev,
"Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
@@ -4264,7 +4265,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
static void qlge_set_multicast_list(struct net_device *ndev)
{
- struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
struct netdev_hw_addr *ha;
int i, status;
@@ -4354,7 +4355,7 @@ exit:
static int qlge_set_mac_address(struct net_device *ndev, void *p)
{
- struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
struct sockaddr *addr = p;
int status;
@@ -4377,7 +4378,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
static void qlge_tx_timeout(struct net_device *ndev)
{
- struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
ql_queue_asic_error(qdev);
}
@@ -4629,6 +4630,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
init_completion(&qdev->ide_completion);
+ mutex_init(&qdev->mpi_mutex);
if (!cards_found) {
dev_info(&pdev->dev, "%s\n", DRV_STRING);
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 0e7c7c7ee164..ff2bf8a4e247 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -87,7 +87,7 @@ exit:
return status;
}
-static int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
+int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
{
int status;
status = ql_write_mpi_reg(qdev, 0x00001010, 1);
@@ -534,6 +534,7 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
int status;
unsigned long count;
+ mutex_lock(&qdev->mpi_mutex);
/* Begin polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
@@ -603,6 +604,7 @@ done:
end:
/* End polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
+ mutex_unlock(&qdev->mpi_mutex);
return status;
}
@@ -1099,9 +1101,7 @@ int ql_wait_fifo_empty(struct ql_adapter *qdev)
static int ql_set_port_cfg(struct ql_adapter *qdev)
{
int status;
- rtnl_lock();
status = ql_mb_set_port_cfg(qdev);
- rtnl_unlock();
if (status)
return status;
status = ql_idc_wait(qdev);
@@ -1122,9 +1122,7 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
int status;
- rtnl_lock();
status = ql_mb_get_port_cfg(qdev);
- rtnl_unlock();
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Bug: Failed to get port config data.\n");
@@ -1167,7 +1165,6 @@ void ql_mpi_idc_work(struct work_struct *work)
u32 aen;
int timeout;
- rtnl_lock();
aen = mbcp->mbox_out[1] >> 16;
timeout = (mbcp->mbox_out[1] >> 8) & 0xf;
@@ -1231,7 +1228,6 @@ void ql_mpi_idc_work(struct work_struct *work)
}
break;
}
- rtnl_unlock();
}
void ql_mpi_work(struct work_struct *work)
@@ -1242,7 +1238,7 @@ void ql_mpi_work(struct work_struct *work)
struct mbox_params *mbcp = &mbc;
int err = 0;
- rtnl_lock();
+ mutex_lock(&qdev->mpi_mutex);
/* Begin polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
@@ -1259,7 +1255,7 @@ void ql_mpi_work(struct work_struct *work)
/* End polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
- rtnl_unlock();
+ mutex_unlock(&qdev->mpi_mutex);
ql_enable_completion_interrupt(qdev, 0);
}
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 0b014c894686..e3ebd90ae651 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -49,8 +49,8 @@
#include <asm/processor.h>
#define DRV_NAME "r6040"
-#define DRV_VERSION "0.26"
-#define DRV_RELDATE "30May2010"
+#define DRV_VERSION "0.27"
+#define DRV_RELDATE "23Feb2011"
/* PHY CHIP Address */
#define PHY1_ADDR 1 /* For MAC1 */
@@ -69,6 +69,8 @@
/* MAC registers */
#define MCR0 0x00 /* Control register 0 */
+#define MCR0_PROMISC 0x0020 /* Promiscuous mode */
+#define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */
#define MCR1 0x04 /* Control register 1 */
#define MAC_RST 0x0001 /* Reset the MAC */
#define MBCR 0x08 /* Bus control */
@@ -851,77 +853,92 @@ static void r6040_multicast_list(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
- u16 *adrp;
- u16 reg;
unsigned long flags;
struct netdev_hw_addr *ha;
int i;
+ u16 *adrp;
+ u16 hash_table[4] = { 0 };
+
+ spin_lock_irqsave(&lp->lock, flags);
- /* MAC Address */
+ /* Keep our MAC Address */
adrp = (u16 *)dev->dev_addr;
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
- /* Promiscous Mode */
- spin_lock_irqsave(&lp->lock, flags);
-
/* Clear AMCP & PROM bits */
- reg = ioread16(ioaddr) & ~0x0120;
- if (dev->flags & IFF_PROMISC) {
- reg |= 0x0020;
- lp->mcr0 |= 0x0020;
- }
- /* Too many multicast addresses
- * accept all traffic */
- else if ((netdev_mc_count(dev) > MCAST_MAX) ||
- (dev->flags & IFF_ALLMULTI))
- reg |= 0x0020;
+ lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN);
- iowrite16(reg, ioaddr);
- spin_unlock_irqrestore(&lp->lock, flags);
+ /* Promiscuous mode */
+ if (dev->flags & IFF_PROMISC)
+ lp->mcr0 |= MCR0_PROMISC;
- /* Build the hash table */
- if (netdev_mc_count(dev) > MCAST_MAX) {
- u16 hash_table[4];
- u32 crc;
+ /* Enable multicast hash table function to
+ * receive all multicast packets. */
+ else if (dev->flags & IFF_ALLMULTI) {
+ lp->mcr0 |= MCR0_HASH_EN;
- for (i = 0; i < 4; i++)
- hash_table[i] = 0;
+ for (i = 0; i < MCAST_MAX ; i++) {
+ iowrite16(0, ioaddr + MID_1L + 8 * i);
+ iowrite16(0, ioaddr + MID_1M + 8 * i);
+ iowrite16(0, ioaddr + MID_1H + 8 * i);
+ }
+ for (i = 0; i < 4; i++)
+ hash_table[i] = 0xffff;
+ }
+ /* Use internal multicast address registers if the number of
+ * multicast addresses is not greater than MCAST_MAX. */
+ else if (netdev_mc_count(dev) <= MCAST_MAX) {
+ i = 0;
netdev_for_each_mc_addr(ha, dev) {
- char *addrs = ha->addr;
+ u16 *adrp = (u16 *) ha->addr;
+ iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
+ iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
+ iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
+ i++;
+ }
+ while (i < MCAST_MAX) {
+ iowrite16(0, ioaddr + MID_1L + 8 * i);
+ iowrite16(0, ioaddr + MID_1M + 8 * i);
+ iowrite16(0, ioaddr + MID_1H + 8 * i);
+ i++;
+ }
+ }
+ /* Otherwise, Enable multicast hash table function. */
+ else {
+ u32 crc;
+
+ lp->mcr0 |= MCR0_HASH_EN;
+
+ for (i = 0; i < MCAST_MAX ; i++) {
+ iowrite16(0, ioaddr + MID_1L + 8 * i);
+ iowrite16(0, ioaddr + MID_1M + 8 * i);
+ iowrite16(0, ioaddr + MID_1H + 8 * i);
+ }
- if (!(*addrs & 1))
- continue;
+ /* Build multicast hash table */
+ netdev_for_each_mc_addr(ha, dev) {
+ u8 *addrs = ha->addr;
- crc = ether_crc_le(6, addrs);
+ crc = ether_crc(ETH_ALEN, addrs);
crc >>= 26;
- hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
+ hash_table[crc >> 4] |= 1 << (crc & 0xf);
}
- /* Fill the MAC hash tables with their values */
+ }
+
+ iowrite16(lp->mcr0, ioaddr + MCR0);
+
+ /* Fill the MAC hash tables with their values */
+ if (lp->mcr0 && MCR0_HASH_EN) {
iowrite16(hash_table[0], ioaddr + MAR0);
iowrite16(hash_table[1], ioaddr + MAR1);
iowrite16(hash_table[2], ioaddr + MAR2);
iowrite16(hash_table[3], ioaddr + MAR3);
}
- /* Multicast Address 1~4 case */
- i = 0;
- netdev_for_each_mc_addr(ha, dev) {
- if (i >= MCAST_MAX)
- break;
- adrp = (u16 *) ha->addr;
- iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
- iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
- iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
- i++;
- }
- while (i < MCAST_MAX) {
- iowrite16(0xffff, ioaddr + MID_1L + 8 * i);
- iowrite16(0xffff, ioaddr + MID_1M + 8 * i);
- iowrite16(0xffff, ioaddr + MID_1H + 8 * i);
- i++;
- }
+
+ spin_unlock_irqrestore(&lp->lock, flags);
}
static void netdev_get_drvinfo(struct net_device *dev,
@@ -1153,6 +1170,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
lp->mii_bus = mdiobus_alloc();
if (!lp->mii_bus) {
dev_err(&pdev->dev, "mdiobus_alloc() failed\n");
+ err = -ENOMEM;
goto err_out_unmap;
}
@@ -1165,6 +1183,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
if (!lp->mii_bus->irq) {
dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
+ err = -ENOMEM;
goto err_out_mdio;
}
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 7d33ef4bcb4a..493b0de3848b 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -24,6 +24,8 @@
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
+#include <linux/firmware.h>
+#include <linux/pci-aspm.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -33,6 +35,10 @@
#define MODULENAME "r8169"
#define PFX MODULENAME ": "
+#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
+#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
+#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
+
#ifdef RTL8169_DEBUG
#define assert(expr) \
if (!(expr)) { \
@@ -63,7 +69,6 @@ static const int multicast_filter_limit = 32;
#define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */
#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
-#define EarlyTxThld 0x3F /* 0x3F means NO early transmit */
#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@ -118,7 +123,10 @@ enum mac_version {
RTL_GIGA_MAC_VER_24 = 0x18, // 8168CP
RTL_GIGA_MAC_VER_25 = 0x19, // 8168D
RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D
- RTL_GIGA_MAC_VER_27 = 0x1b // 8168DP
+ RTL_GIGA_MAC_VER_27 = 0x1b, // 8168DP
+ RTL_GIGA_MAC_VER_28 = 0x1c, // 8168DP
+ RTL_GIGA_MAC_VER_29 = 0x1d, // 8105E
+ RTL_GIGA_MAC_VER_30 = 0x1e, // 8105E
};
#define _R(NAME,MAC,MASK) \
@@ -155,7 +163,10 @@ static const struct {
_R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_24, 0xff7e1880), // PCI-E
_R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E
_R("RTL8168d/8111d", RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E
- _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_27, 0xff7e1880) // PCI-E
+ _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_27, 0xff7e1880), // PCI-E
+ _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_28, 0xff7e1880), // PCI-E
+ _R("RTL8105e", RTL_GIGA_MAC_VER_29, 0xff7e1880), // PCI-E
+ _R("RTL8105e", RTL_GIGA_MAC_VER_30, 0xff7e1880) // PCI-E
};
#undef _R
@@ -227,7 +238,14 @@ enum rtl_registers {
IntrMitigate = 0xe2,
RxDescAddrLow = 0xe4,
RxDescAddrHigh = 0xe8,
- EarlyTxThres = 0xec,
+ EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
+
+#define NoEarlyTx 0x3f /* Max value : no early transmit. */
+
+ MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
+
+#define TxPacketMax (8064 >> 7)
+
FuncEvent = 0xf0,
FuncEventMask = 0xf4,
FuncPresetState = 0xf8,
@@ -248,16 +266,22 @@ enum rtl8168_8101_registers {
#define CSIAR_BYTE_ENABLE 0x0f
#define CSIAR_BYTE_ENABLE_SHIFT 12
#define CSIAR_ADDR_MASK 0x0fff
-
+ PMCH = 0x6f,
EPHYAR = 0x80,
#define EPHYAR_FLAG 0x80000000
#define EPHYAR_WRITE_CMD 0x80000000
#define EPHYAR_REG_MASK 0x1f
#define EPHYAR_REG_SHIFT 16
#define EPHYAR_DATA_MASK 0xffff
+ DLLPR = 0xd0,
+#define PM_SWITCH (1 << 6)
DBG_REG = 0xd1,
#define FIX_NAK_1 (1 << 4)
#define FIX_NAK_2 (1 << 3)
+ TWSI = 0xd2,
+ MCU = 0xd3,
+#define EN_NDP (1 << 3)
+#define EN_OOB_RESET (1 << 2)
EFUSEAR = 0xdc,
#define EFUSEAR_FLAG 0x80000000
#define EFUSEAR_WRITE_CMD 0x80000000
@@ -267,6 +291,33 @@ enum rtl8168_8101_registers {
#define EFUSEAR_DATA_MASK 0xff
};
+enum rtl8168_registers {
+ ERIDR = 0x70,
+ ERIAR = 0x74,
+#define ERIAR_FLAG 0x80000000
+#define ERIAR_WRITE_CMD 0x80000000
+#define ERIAR_READ_CMD 0x00000000
+#define ERIAR_ADDR_BYTE_ALIGN 4
+#define ERIAR_EXGMAC 0
+#define ERIAR_MSIX 1
+#define ERIAR_ASF 2
+#define ERIAR_TYPE_SHIFT 16
+#define ERIAR_BYTEEN 0x0f
+#define ERIAR_BYTEEN_SHIFT 12
+ EPHY_RXER_NUM = 0x7c,
+ OCPDR = 0xb0, /* OCP GPHY access */
+#define OCPDR_WRITE_CMD 0x80000000
+#define OCPDR_READ_CMD 0x00000000
+#define OCPDR_REG_MASK 0x7f
+#define OCPDR_GPHY_REG_SHIFT 16
+#define OCPDR_DATA_MASK 0xffff
+ OCPAR = 0xb4,
+#define OCPAR_FLAG 0x80000000
+#define OCPAR_GPHY_WRITE_CMD 0x8000f060
+#define OCPAR_GPHY_READ_CMD 0x0000f060
+ RDSAR1 = 0xd0 /* 8168c only. Undocumented on 8168dp */
+};
+
enum rtl_register_content {
/* InterruptStatusBits */
SYSErr = 0x8000,
@@ -487,14 +538,22 @@ struct rtl8169_private {
u16 napi_event;
u16 intr_mask;
int phy_1000_ctrl_reg;
-#ifdef CONFIG_R8169_VLAN
- struct vlan_group *vlgrp;
-#endif
- int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
+
+ struct mdio_ops {
+ void (*write)(void __iomem *, int, int);
+ int (*read)(void __iomem *, int);
+ } mdio_ops;
+
+ struct pll_power_ops {
+ void (*down)(struct rtl8169_private *);
+ void (*up)(struct rtl8169_private *);
+ } pll_power_ops;
+
+ int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
int (*get_settings)(struct net_device *, struct ethtool_cmd *);
- void (*phy_reset_enable)(void __iomem *);
+ void (*phy_reset_enable)(struct rtl8169_private *tp);
void (*hw_start)(struct net_device *);
- unsigned int (*phy_reset_pending)(void __iomem *);
+ unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
unsigned int (*link_ok)(void __iomem *);
int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
int pcie_cap;
@@ -504,6 +563,8 @@ struct rtl8169_private {
struct mii_if_info mii;
struct rtl8169_counters counters;
u32 saved_wolopts;
+
+ const struct firmware *fw;
};
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -514,6 +575,9 @@ module_param_named(debug, debug.msg_enable, int, 0);
MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
MODULE_LICENSE("GPL");
MODULE_VERSION(RTL8169_VERSION);
+MODULE_FIRMWARE(FIRMWARE_8168D_1);
+MODULE_FIRMWARE(FIRMWARE_8168D_2);
+MODULE_FIRMWARE(FIRMWARE_8105E_1);
static int rtl8169_open(struct net_device *dev);
static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
@@ -535,7 +599,83 @@ static int rtl8169_poll(struct napi_struct *napi, int budget);
static const unsigned int rtl8169_rx_config =
(RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
-static void mdio_write(void __iomem *ioaddr, int reg_addr, int value)
+static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ int i;
+
+ RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
+ for (i = 0; i < 20; i++) {
+ udelay(100);
+ if (RTL_R32(OCPAR) & OCPAR_FLAG)
+ break;
+ }
+ return RTL_R32(OCPDR);
+}
+
+static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ int i;
+
+ RTL_W32(OCPDR, data);
+ RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
+ for (i = 0; i < 20; i++) {
+ udelay(100);
+ if ((RTL_R32(OCPAR) & OCPAR_FLAG) == 0)
+ break;
+ }
+}
+
+static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ int i;
+
+ RTL_W8(ERIDR, cmd);
+ RTL_W32(ERIAR, 0x800010e8);
+ msleep(2);
+ for (i = 0; i < 5; i++) {
+ udelay(100);
+ if (!(RTL_R32(ERIDR) & ERIAR_FLAG))
+ break;
+ }
+
+ ocp_write(tp, 0x1, 0x30, 0x00000001);
+}
+
+#define OOB_CMD_RESET 0x00
+#define OOB_CMD_DRIVER_START 0x05
+#define OOB_CMD_DRIVER_STOP 0x06
+
+static void rtl8168_driver_start(struct rtl8169_private *tp)
+{
+ int i;
+
+ rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
+
+ for (i = 0; i < 10; i++) {
+ msleep(10);
+ if (ocp_read(tp, 0x0f, 0x0010) & 0x00000800)
+ break;
+ }
+}
+
+static void rtl8168_driver_stop(struct rtl8169_private *tp)
+{
+ int i;
+
+ rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
+
+ for (i = 0; i < 10; i++) {
+ msleep(10);
+ if ((ocp_read(tp, 0x0f, 0x0010) & 0x00000800) == 0)
+ break;
+ }
+}
+
+
+static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
{
int i;
@@ -557,7 +697,7 @@ static void mdio_write(void __iomem *ioaddr, int reg_addr, int value)
udelay(20);
}
-static int mdio_read(void __iomem *ioaddr, int reg_addr)
+static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr)
{
int i, value = -1;
@@ -583,34 +723,117 @@ static int mdio_read(void __iomem *ioaddr, int reg_addr)
return value;
}
-static void mdio_patch(void __iomem *ioaddr, int reg_addr, int value)
+static void r8168dp_1_mdio_access(void __iomem *ioaddr, int reg_addr, u32 data)
{
- mdio_write(ioaddr, reg_addr, mdio_read(ioaddr, reg_addr) | value);
+ int i;
+
+ RTL_W32(OCPDR, data |
+ ((reg_addr & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
+ RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
+ RTL_W32(EPHY_RXER_NUM, 0);
+
+ for (i = 0; i < 100; i++) {
+ mdelay(1);
+ if (!(RTL_R32(OCPAR) & OCPAR_FLAG))
+ break;
+ }
}
-static void mdio_plus_minus(void __iomem *ioaddr, int reg_addr, int p, int m)
+static void r8168dp_1_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
+{
+ r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_WRITE_CMD |
+ (value & OCPDR_DATA_MASK));
+}
+
+static int r8168dp_1_mdio_read(void __iomem *ioaddr, int reg_addr)
+{
+ int i;
+
+ r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_READ_CMD);
+
+ mdelay(1);
+ RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
+ RTL_W32(EPHY_RXER_NUM, 0);
+
+ for (i = 0; i < 100; i++) {
+ mdelay(1);
+ if (RTL_R32(OCPAR) & OCPAR_FLAG)
+ break;
+ }
+
+ return RTL_R32(OCPDR) & OCPDR_DATA_MASK;
+}
+
+#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
+
+static void r8168dp_2_mdio_start(void __iomem *ioaddr)
+{
+ RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
+}
+
+static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
+{
+ RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
+}
+
+static void r8168dp_2_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
+{
+ r8168dp_2_mdio_start(ioaddr);
+
+ r8169_mdio_write(ioaddr, reg_addr, value);
+
+ r8168dp_2_mdio_stop(ioaddr);
+}
+
+static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr)
+{
+ int value;
+
+ r8168dp_2_mdio_start(ioaddr);
+
+ value = r8169_mdio_read(ioaddr, reg_addr);
+
+ r8168dp_2_mdio_stop(ioaddr);
+
+ return value;
+}
+
+static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
+{
+ tp->mdio_ops.write(tp->mmio_addr, location, val);
+}
+
+static int rtl_readphy(struct rtl8169_private *tp, int location)
+{
+ return tp->mdio_ops.read(tp->mmio_addr, location);
+}
+
+static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
+{
+ rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
+}
+
+static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
{
int val;
- val = mdio_read(ioaddr, reg_addr);
- mdio_write(ioaddr, reg_addr, (val | p) & ~m);
+ val = rtl_readphy(tp, reg_addr);
+ rtl_writephy(tp, reg_addr, (val | p) & ~m);
}
static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
int val)
{
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
- mdio_write(ioaddr, location, val);
+ rtl_writephy(tp, location, val);
}
static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
{
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
- return mdio_read(ioaddr, location);
+ return rtl_readphy(tp, location);
}
static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value)
@@ -711,14 +934,16 @@ static void rtl8169_asic_down(void __iomem *ioaddr)
RTL_R16(CPlusCmd);
}
-static unsigned int rtl8169_tbi_reset_pending(void __iomem *ioaddr)
+static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
{
+ void __iomem *ioaddr = tp->mmio_addr;
+
return RTL_R32(TBICSR) & TBIReset;
}
-static unsigned int rtl8169_xmii_reset_pending(void __iomem *ioaddr)
+static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
{
- return mdio_read(ioaddr, MII_BMCR) & BMCR_RESET;
+ return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
}
static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
@@ -731,39 +956,52 @@ static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
return RTL_R8(PHYstatus) & LinkStatus;
}
-static void rtl8169_tbi_reset_enable(void __iomem *ioaddr)
+static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
{
+ void __iomem *ioaddr = tp->mmio_addr;
+
RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
}
-static void rtl8169_xmii_reset_enable(void __iomem *ioaddr)
+static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
{
unsigned int val;
- val = mdio_read(ioaddr, MII_BMCR) | BMCR_RESET;
- mdio_write(ioaddr, MII_BMCR, val & 0xffff);
+ val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
+ rtl_writephy(tp, MII_BMCR, val & 0xffff);
}
-static void rtl8169_check_link_status(struct net_device *dev,
+static void __rtl8169_check_link_status(struct net_device *dev,
struct rtl8169_private *tp,
- void __iomem *ioaddr)
+ void __iomem *ioaddr,
+ bool pm)
{
unsigned long flags;
spin_lock_irqsave(&tp->lock, flags);
if (tp->link_ok(ioaddr)) {
/* This is to cancel a scheduled suspend if there's one. */
- pm_request_resume(&tp->pci_dev->dev);
+ if (pm)
+ pm_request_resume(&tp->pci_dev->dev);
netif_carrier_on(dev);
- netif_info(tp, ifup, dev, "link up\n");
+ if (net_ratelimit())
+ netif_info(tp, ifup, dev, "link up\n");
} else {
netif_carrier_off(dev);
netif_info(tp, ifdown, dev, "link down\n");
- pm_schedule_suspend(&tp->pci_dev->dev, 100);
+ if (pm)
+ pm_schedule_suspend(&tp->pci_dev->dev, 100);
}
spin_unlock_irqrestore(&tp->lock, flags);
}
+static void rtl8169_check_link_status(struct net_device *dev,
+ struct rtl8169_private *tp,
+ void __iomem *ioaddr)
+{
+ __rtl8169_check_link_status(dev, tp, ioaddr, false);
+}
+
#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
@@ -869,7 +1107,7 @@ static int rtl8169_get_regs_len(struct net_device *dev)
}
static int rtl8169_set_speed_tbi(struct net_device *dev,
- u8 autoneg, u16 speed, u8 duplex)
+ u8 autoneg, u16 speed, u8 duplex, u32 ignored)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
@@ -892,21 +1130,33 @@ static int rtl8169_set_speed_tbi(struct net_device *dev,
}
static int rtl8169_set_speed_xmii(struct net_device *dev,
- u8 autoneg, u16 speed, u8 duplex)
+ u8 autoneg, u16 speed, u8 duplex, u32 adv)
{
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
int giga_ctrl, bmcr;
+ int rc = -EINVAL;
+
+ rtl_writephy(tp, 0x1f, 0x0000);
if (autoneg == AUTONEG_ENABLE) {
int auto_nego;
- auto_nego = mdio_read(ioaddr, MII_ADVERTISE);
- auto_nego |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
- ADVERTISE_100HALF | ADVERTISE_100FULL);
+ auto_nego = rtl_readphy(tp, MII_ADVERTISE);
+ auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL);
+
+ if (adv & ADVERTISED_10baseT_Half)
+ auto_nego |= ADVERTISE_10HALF;
+ if (adv & ADVERTISED_10baseT_Full)
+ auto_nego |= ADVERTISE_10FULL;
+ if (adv & ADVERTISED_100baseT_Half)
+ auto_nego |= ADVERTISE_100HALF;
+ if (adv & ADVERTISED_100baseT_Full)
+ auto_nego |= ADVERTISE_100FULL;
+
auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
- giga_ctrl = mdio_read(ioaddr, MII_CTRL1000);
+ giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
/* The 8100e/8101e/8102e do Fast Ethernet only. */
@@ -917,29 +1167,24 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
(tp->mac_version != RTL_GIGA_MAC_VER_13) &&
(tp->mac_version != RTL_GIGA_MAC_VER_14) &&
(tp->mac_version != RTL_GIGA_MAC_VER_15) &&
- (tp->mac_version != RTL_GIGA_MAC_VER_16)) {
- giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
- } else {
+ (tp->mac_version != RTL_GIGA_MAC_VER_16) &&
+ (tp->mac_version != RTL_GIGA_MAC_VER_29) &&
+ (tp->mac_version != RTL_GIGA_MAC_VER_30)) {
+ if (adv & ADVERTISED_1000baseT_Half)
+ giga_ctrl |= ADVERTISE_1000HALF;
+ if (adv & ADVERTISED_1000baseT_Full)
+ giga_ctrl |= ADVERTISE_1000FULL;
+ } else if (adv & (ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full)) {
netif_info(tp, link, dev,
"PHY does not support 1000Mbps\n");
+ goto out;
}
bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
- if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
- (tp->mac_version >= RTL_GIGA_MAC_VER_17)) {
- /*
- * Wake up the PHY.
- * Vendor specific (0x1f) and reserved (0x0e) MII
- * registers.
- */
- mdio_write(ioaddr, 0x1f, 0x0000);
- mdio_write(ioaddr, 0x0e, 0x0000);
- }
-
- mdio_write(ioaddr, MII_ADVERTISE, auto_nego);
- mdio_write(ioaddr, MII_CTRL1000, giga_ctrl);
+ rtl_writephy(tp, MII_ADVERTISE, auto_nego);
+ rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
} else {
giga_ctrl = 0;
@@ -948,39 +1193,39 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
else if (speed == SPEED_100)
bmcr = BMCR_SPEED100;
else
- return -EINVAL;
+ goto out;
if (duplex == DUPLEX_FULL)
bmcr |= BMCR_FULLDPLX;
-
- mdio_write(ioaddr, 0x1f, 0x0000);
}
tp->phy_1000_ctrl_reg = giga_ctrl;
- mdio_write(ioaddr, MII_BMCR, bmcr);
+ rtl_writephy(tp, MII_BMCR, bmcr);
if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
(tp->mac_version == RTL_GIGA_MAC_VER_03)) {
if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
- mdio_write(ioaddr, 0x17, 0x2138);
- mdio_write(ioaddr, 0x0e, 0x0260);
+ rtl_writephy(tp, 0x17, 0x2138);
+ rtl_writephy(tp, 0x0e, 0x0260);
} else {
- mdio_write(ioaddr, 0x17, 0x2108);
- mdio_write(ioaddr, 0x0e, 0x0000);
+ rtl_writephy(tp, 0x17, 0x2108);
+ rtl_writephy(tp, 0x0e, 0x0000);
}
}
- return 0;
+ rc = 0;
+out:
+ return rc;
}
static int rtl8169_set_speed(struct net_device *dev,
- u8 autoneg, u16 speed, u8 duplex)
+ u8 autoneg, u16 speed, u8 duplex, u32 advertising)
{
struct rtl8169_private *tp = netdev_priv(dev);
int ret;
- ret = tp->set_speed(dev, autoneg, speed, duplex);
+ ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
@@ -995,7 +1240,8 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
int ret;
spin_lock_irqsave(&tp->lock, flags);
- ret = rtl8169_set_speed(dev, cmd->autoneg, cmd->speed, cmd->duplex);
+ ret = rtl8169_set_speed(dev,
+ cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
spin_unlock_irqrestore(&tp->lock, flags);
return ret;
@@ -1029,8 +1275,6 @@ static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
return 0;
}
-#ifdef CONFIG_R8169_VLAN
-
static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
struct sk_buff *skb)
{
@@ -1038,64 +1282,37 @@ static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
}
-static void rtl8169_vlan_rx_register(struct net_device *dev,
- struct vlan_group *grp)
+#define NETIF_F_HW_VLAN_TX_RX (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX)
+
+static void rtl8169_vlan_mode(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
unsigned long flags;
spin_lock_irqsave(&tp->lock, flags);
- tp->vlgrp = grp;
- /*
- * Do not disable RxVlan on 8110SCd.
- */
- if (tp->vlgrp || (tp->mac_version == RTL_GIGA_MAC_VER_05))
+ if (dev->features & NETIF_F_HW_VLAN_RX)
tp->cp_cmd |= RxVlan;
else
tp->cp_cmd &= ~RxVlan;
RTL_W16(CPlusCmd, tp->cp_cmd);
+ /* PCI commit */
RTL_R16(CPlusCmd);
spin_unlock_irqrestore(&tp->lock, flags);
+
+ dev->vlan_features = dev->features &~ NETIF_F_HW_VLAN_TX_RX;
}
-static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
- struct sk_buff *skb, int polling)
+static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
{
u32 opts2 = le32_to_cpu(desc->opts2);
- struct vlan_group *vlgrp = tp->vlgrp;
- int ret;
- if (vlgrp && (opts2 & RxVlanTag)) {
- u16 vtag = swab16(opts2 & 0xffff);
+ if (opts2 & RxVlanTag)
+ __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
- if (likely(polling))
- vlan_gro_receive(&tp->napi, vlgrp, vtag, skb);
- else
- __vlan_hwaccel_rx(skb, vlgrp, vtag, polling);
- ret = 0;
- } else
- ret = -1;
desc->opts2 = 0;
- return ret;
}
-#else /* !CONFIG_R8169_VLAN */
-
-static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
- struct sk_buff *skb)
-{
- return 0;
-}
-
-static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
- struct sk_buff *skb, int polling)
-{
- return -1;
-}
-
-#endif
-
static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -1266,6 +1483,28 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
}
}
+static int rtl8169_set_flags(struct net_device *dev, u32 data)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ unsigned long old_feat = dev->features;
+ int rc;
+
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_05) &&
+ !(data & ETH_FLAG_RXVLAN)) {
+ netif_info(tp, drv, dev, "8110SCd requires hardware Rx VLAN\n");
+ return -EINVAL;
+ }
+
+ rc = ethtool_op_set_flags(dev, data, ETH_FLAG_TXVLAN | ETH_FLAG_RXVLAN);
+ if (rc)
+ return rc;
+
+ if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX)
+ rtl8169_vlan_mode(dev);
+
+ return 0;
+}
+
static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_drvinfo = rtl8169_get_drvinfo,
.get_regs_len = rtl8169_get_regs_len,
@@ -1285,6 +1524,8 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_strings = rtl8169_get_strings,
.get_sset_count = rtl8169_get_sset_count,
.get_ethtool_stats = rtl8169_get_ethtool_stats,
+ .set_flags = rtl8169_set_flags,
+ .get_flags = ethtool_op_get_flags,
};
static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -1309,9 +1550,12 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
/* 8168D family. */
{ 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
{ 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
- { 0x7c800000, 0x28800000, RTL_GIGA_MAC_VER_27 },
{ 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
+ /* 8168DP family. */
+ { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
+ { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
+
/* 8168C family. */
{ 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
{ 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
@@ -1330,6 +1574,9 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
{ 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
/* 8101 family. */
+ { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
+ { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
+ { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
{ 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
{ 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
{ 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
@@ -1375,15 +1622,195 @@ struct phy_reg {
u16 val;
};
-static void rtl_phy_write(void __iomem *ioaddr, const struct phy_reg *regs, int len)
+static void rtl_writephy_batch(struct rtl8169_private *tp,
+ const struct phy_reg *regs, int len)
{
while (len-- > 0) {
- mdio_write(ioaddr, regs->reg, regs->val);
+ rtl_writephy(tp, regs->reg, regs->val);
regs++;
}
}
-static void rtl8169s_hw_phy_config(void __iomem *ioaddr)
+#define PHY_READ 0x00000000
+#define PHY_DATA_OR 0x10000000
+#define PHY_DATA_AND 0x20000000
+#define PHY_BJMPN 0x30000000
+#define PHY_READ_EFUSE 0x40000000
+#define PHY_READ_MAC_BYTE 0x50000000
+#define PHY_WRITE_MAC_BYTE 0x60000000
+#define PHY_CLEAR_READCOUNT 0x70000000
+#define PHY_WRITE 0x80000000
+#define PHY_READCOUNT_EQ_SKIP 0x90000000
+#define PHY_COMP_EQ_SKIPN 0xa0000000
+#define PHY_COMP_NEQ_SKIPN 0xb0000000
+#define PHY_WRITE_PREVIOUS 0xc0000000
+#define PHY_SKIPN 0xd0000000
+#define PHY_DELAY_MS 0xe0000000
+#define PHY_WRITE_ERI_WORD 0xf0000000
+
+static void
+rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
+{
+ __le32 *phytable = (__le32 *)fw->data;
+ struct net_device *dev = tp->dev;
+ size_t index, fw_size = fw->size / sizeof(*phytable);
+ u32 predata, count;
+
+ if (fw->size % sizeof(*phytable)) {
+ netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size);
+ return;
+ }
+
+ for (index = 0; index < fw_size; index++) {
+ u32 action = le32_to_cpu(phytable[index]);
+ u32 regno = (action & 0x0fff0000) >> 16;
+
+ switch(action & 0xf0000000) {
+ case PHY_READ:
+ case PHY_DATA_OR:
+ case PHY_DATA_AND:
+ case PHY_READ_EFUSE:
+ case PHY_CLEAR_READCOUNT:
+ case PHY_WRITE:
+ case PHY_WRITE_PREVIOUS:
+ case PHY_DELAY_MS:
+ break;
+
+ case PHY_BJMPN:
+ if (regno > index) {
+ netif_err(tp, probe, tp->dev,
+ "Out of range of firmware\n");
+ return;
+ }
+ break;
+ case PHY_READCOUNT_EQ_SKIP:
+ if (index + 2 >= fw_size) {
+ netif_err(tp, probe, tp->dev,
+ "Out of range of firmware\n");
+ return;
+ }
+ break;
+ case PHY_COMP_EQ_SKIPN:
+ case PHY_COMP_NEQ_SKIPN:
+ case PHY_SKIPN:
+ if (index + 1 + regno >= fw_size) {
+ netif_err(tp, probe, tp->dev,
+ "Out of range of firmware\n");
+ return;
+ }
+ break;
+
+ case PHY_READ_MAC_BYTE:
+ case PHY_WRITE_MAC_BYTE:
+ case PHY_WRITE_ERI_WORD:
+ default:
+ netif_err(tp, probe, tp->dev,
+ "Invalid action 0x%08x\n", action);
+ return;
+ }
+ }
+
+ predata = 0;
+ count = 0;
+
+ for (index = 0; index < fw_size; ) {
+ u32 action = le32_to_cpu(phytable[index]);
+ u32 data = action & 0x0000ffff;
+ u32 regno = (action & 0x0fff0000) >> 16;
+
+ if (!action)
+ break;
+
+ switch(action & 0xf0000000) {
+ case PHY_READ:
+ predata = rtl_readphy(tp, regno);
+ count++;
+ index++;
+ break;
+ case PHY_DATA_OR:
+ predata |= data;
+ index++;
+ break;
+ case PHY_DATA_AND:
+ predata &= data;
+ index++;
+ break;
+ case PHY_BJMPN:
+ index -= regno;
+ break;
+ case PHY_READ_EFUSE:
+ predata = rtl8168d_efuse_read(tp->mmio_addr, regno);
+ index++;
+ break;
+ case PHY_CLEAR_READCOUNT:
+ count = 0;
+ index++;
+ break;
+ case PHY_WRITE:
+ rtl_writephy(tp, regno, data);
+ index++;
+ break;
+ case PHY_READCOUNT_EQ_SKIP:
+ if (count == data)
+ index += 2;
+ else
+ index += 1;
+ break;
+ case PHY_COMP_EQ_SKIPN:
+ if (predata == data)
+ index += regno;
+ index++;
+ break;
+ case PHY_COMP_NEQ_SKIPN:
+ if (predata != data)
+ index += regno;
+ index++;
+ break;
+ case PHY_WRITE_PREVIOUS:
+ rtl_writephy(tp, regno, predata);
+ index++;
+ break;
+ case PHY_SKIPN:
+ index += regno + 1;
+ break;
+ case PHY_DELAY_MS:
+ mdelay(data);
+ index++;
+ break;
+
+ case PHY_READ_MAC_BYTE:
+ case PHY_WRITE_MAC_BYTE:
+ case PHY_WRITE_ERI_WORD:
+ default:
+ BUG();
+ }
+ }
+}
+
+static void rtl_release_firmware(struct rtl8169_private *tp)
+{
+ release_firmware(tp->fw);
+ tp->fw = NULL;
+}
+
+static int rtl_apply_firmware(struct rtl8169_private *tp, const char *fw_name)
+{
+ const struct firmware **fw = &tp->fw;
+ int rc = !*fw;
+
+ if (rc) {
+ rc = request_firmware(fw, fw_name, &tp->pci_dev->dev);
+ if (rc < 0)
+ goto out;
+ }
+
+ /* TODO: release firmware once rtl_phy_write_fw signals failures. */
+ rtl_phy_write_fw(tp, *fw);
+out:
+ return rc;
+}
+
+static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0001 },
@@ -1447,10 +1874,10 @@ static void rtl8169s_hw_phy_config(void __iomem *ioaddr)
{ 0x00, 0x9200 }
};
- rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
}
-static void rtl8169sb_hw_phy_config(void __iomem *ioaddr)
+static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0002 },
@@ -1458,11 +1885,10 @@ static void rtl8169sb_hw_phy_config(void __iomem *ioaddr)
{ 0x1f, 0x0000 }
};
- rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
}
-static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp,
- void __iomem *ioaddr)
+static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
{
struct pci_dev *pdev = tp->pci_dev;
u16 vendor_id, device_id;
@@ -1473,13 +1899,12 @@ static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp,
if ((vendor_id != PCI_VENDOR_ID_GIGABYTE) || (device_id != 0xe000))
return;
- mdio_write(ioaddr, 0x1f, 0x0001);
- mdio_write(ioaddr, 0x10, 0xf01b);
- mdio_write(ioaddr, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0001);
+ rtl_writephy(tp, 0x10, 0xf01b);
+ rtl_writephy(tp, 0x1f, 0x0000);
}
-static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp,
- void __iomem *ioaddr)
+static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0001 },
@@ -1521,12 +1946,12 @@ static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp,
{ 0x1f, 0x0000 }
};
- rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
- rtl8169scd_hw_phy_config_quirk(tp, ioaddr);
+ rtl8169scd_hw_phy_config_quirk(tp);
}
-static void rtl8169sce_hw_phy_config(void __iomem *ioaddr)
+static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0001 },
@@ -1576,23 +2001,23 @@ static void rtl8169sce_hw_phy_config(void __iomem *ioaddr)
{ 0x1f, 0x0000 }
};
- rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
}
-static void rtl8168bb_hw_phy_config(void __iomem *ioaddr)
+static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
{ 0x10, 0xf41b },
{ 0x1f, 0x0000 }
};
- mdio_write(ioaddr, 0x1f, 0x0001);
- mdio_patch(ioaddr, 0x16, 1 << 0);
+ rtl_writephy(tp, 0x1f, 0x0001);
+ rtl_patchphy(tp, 0x16, 1 << 0);
- rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
}
-static void rtl8168bef_hw_phy_config(void __iomem *ioaddr)
+static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0001 },
@@ -1600,10 +2025,10 @@ static void rtl8168bef_hw_phy_config(void __iomem *ioaddr)
{ 0x1f, 0x0000 }
};
- rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
}
-static void rtl8168cp_1_hw_phy_config(void __iomem *ioaddr)
+static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0000 },
@@ -1613,10 +2038,10 @@ static void rtl8168cp_1_hw_phy_config(void __iomem *ioaddr)
{ 0x1f, 0x0000 }
};
- rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
}
-static void rtl8168cp_2_hw_phy_config(void __iomem *ioaddr)
+static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0001 },
@@ -1624,14 +2049,14 @@ static void rtl8168cp_2_hw_phy_config(void __iomem *ioaddr)
{ 0x1f, 0x0000 }
};
- mdio_write(ioaddr, 0x1f, 0x0000);
- mdio_patch(ioaddr, 0x14, 1 << 5);
- mdio_patch(ioaddr, 0x0d, 1 << 5);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_patchphy(tp, 0x14, 1 << 5);
+ rtl_patchphy(tp, 0x0d, 1 << 5);
- rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
}
-static void rtl8168c_1_hw_phy_config(void __iomem *ioaddr)
+static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0001 },
@@ -1653,14 +2078,14 @@ static void rtl8168c_1_hw_phy_config(void __iomem *ioaddr)
{ 0x09, 0x0000 }
};
- rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
- mdio_patch(ioaddr, 0x14, 1 << 5);
- mdio_patch(ioaddr, 0x0d, 1 << 5);
- mdio_write(ioaddr, 0x1f, 0x0000);
+ rtl_patchphy(tp, 0x14, 1 << 5);
+ rtl_patchphy(tp, 0x0d, 1 << 5);
+ rtl_writephy(tp, 0x1f, 0x0000);
}
-static void rtl8168c_2_hw_phy_config(void __iomem *ioaddr)
+static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0001 },
@@ -1680,15 +2105,15 @@ static void rtl8168c_2_hw_phy_config(void __iomem *ioaddr)
{ 0x1f, 0x0000 }
};
- rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
- mdio_patch(ioaddr, 0x16, 1 << 0);
- mdio_patch(ioaddr, 0x14, 1 << 5);
- mdio_patch(ioaddr, 0x0d, 1 << 5);
- mdio_write(ioaddr, 0x1f, 0x0000);
+ rtl_patchphy(tp, 0x16, 1 << 0);
+ rtl_patchphy(tp, 0x14, 1 << 5);
+ rtl_patchphy(tp, 0x0d, 1 << 5);
+ rtl_writephy(tp, 0x1f, 0x0000);
}
-static void rtl8168c_3_hw_phy_config(void __iomem *ioaddr)
+static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0001 },
@@ -1702,22 +2127,23 @@ static void rtl8168c_3_hw_phy_config(void __iomem *ioaddr)
{ 0x1f, 0x0000 }
};
- rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
- mdio_patch(ioaddr, 0x16, 1 << 0);
- mdio_patch(ioaddr, 0x14, 1 << 5);
- mdio_patch(ioaddr, 0x0d, 1 << 5);
- mdio_write(ioaddr, 0x1f, 0x0000);
+ rtl_patchphy(tp, 0x16, 1 << 0);
+ rtl_patchphy(tp, 0x14, 1 << 5);
+ rtl_patchphy(tp, 0x0d, 1 << 5);
+ rtl_writephy(tp, 0x1f, 0x0000);
}
-static void rtl8168c_4_hw_phy_config(void __iomem *ioaddr)
+static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
{
- rtl8168c_3_hw_phy_config(ioaddr);
+ rtl8168c_3_hw_phy_config(tp);
}
-static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
+static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init_0[] = {
+ /* Channel Estimation */
{ 0x1f, 0x0001 },
{ 0x06, 0x4064 },
{ 0x07, 0x2863 },
@@ -1734,378 +2160,39 @@ static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
{ 0x12, 0xf49f },
{ 0x13, 0x070b },
{ 0x1a, 0x05ad },
- { 0x14, 0x94c0 }
- };
- static const struct phy_reg phy_reg_init_1[] = {
+ { 0x14, 0x94c0 },
+
+ /*
+ * Tx Error Issue
+ * enhance line driver power
+ */
{ 0x1f, 0x0002 },
{ 0x06, 0x5561 },
{ 0x1f, 0x0005 },
{ 0x05, 0x8332 },
- { 0x06, 0x5561 }
- };
- static const struct phy_reg phy_reg_init_2[] = {
- { 0x1f, 0x0005 },
- { 0x05, 0xffc2 },
- { 0x1f, 0x0005 },
- { 0x05, 0x8000 },
- { 0x06, 0xf8f9 },
- { 0x06, 0xfaef },
- { 0x06, 0x59ee },
- { 0x06, 0xf8ea },
- { 0x06, 0x00ee },
- { 0x06, 0xf8eb },
- { 0x06, 0x00e0 },
- { 0x06, 0xf87c },
- { 0x06, 0xe1f8 },
- { 0x06, 0x7d59 },
- { 0x06, 0x0fef },
- { 0x06, 0x0139 },
- { 0x06, 0x029e },
- { 0x06, 0x06ef },
- { 0x06, 0x1039 },
- { 0x06, 0x089f },
- { 0x06, 0x2aee },
- { 0x06, 0xf8ea },
- { 0x06, 0x00ee },
- { 0x06, 0xf8eb },
- { 0x06, 0x01e0 },
- { 0x06, 0xf87c },
- { 0x06, 0xe1f8 },
- { 0x06, 0x7d58 },
- { 0x06, 0x409e },
- { 0x06, 0x0f39 },
- { 0x06, 0x46aa },
- { 0x06, 0x0bbf },
- { 0x06, 0x8290 },
- { 0x06, 0xd682 },
- { 0x06, 0x9802 },
- { 0x06, 0x014f },
- { 0x06, 0xae09 },
- { 0x06, 0xbf82 },
- { 0x06, 0x98d6 },
- { 0x06, 0x82a0 },
- { 0x06, 0x0201 },
- { 0x06, 0x4fef },
- { 0x06, 0x95fe },
- { 0x06, 0xfdfc },
- { 0x06, 0x05f8 },
- { 0x06, 0xf9fa },
- { 0x06, 0xeef8 },
- { 0x06, 0xea00 },
- { 0x06, 0xeef8 },
- { 0x06, 0xeb00 },
- { 0x06, 0xe2f8 },
- { 0x06, 0x7ce3 },
- { 0x06, 0xf87d },
- { 0x06, 0xa511 },
- { 0x06, 0x1112 },
- { 0x06, 0xd240 },
- { 0x06, 0xd644 },
- { 0x06, 0x4402 },
- { 0x06, 0x8217 },
- { 0x06, 0xd2a0 },
- { 0x06, 0xd6aa },
- { 0x06, 0xaa02 },
- { 0x06, 0x8217 },
- { 0x06, 0xae0f },
- { 0x06, 0xa544 },
- { 0x06, 0x4402 },
- { 0x06, 0xae4d },
- { 0x06, 0xa5aa },
- { 0x06, 0xaa02 },
- { 0x06, 0xae47 },
- { 0x06, 0xaf82 },
- { 0x06, 0x13ee },
- { 0x06, 0x834e },
- { 0x06, 0x00ee },
- { 0x06, 0x834d },
- { 0x06, 0x0fee },
- { 0x06, 0x834c },
- { 0x06, 0x0fee },
- { 0x06, 0x834f },
- { 0x06, 0x00ee },
- { 0x06, 0x8351 },
- { 0x06, 0x00ee },
- { 0x06, 0x834a },
- { 0x06, 0xffee },
- { 0x06, 0x834b },
- { 0x06, 0xffe0 },
- { 0x06, 0x8330 },
- { 0x06, 0xe183 },
- { 0x06, 0x3158 },
- { 0x06, 0xfee4 },
- { 0x06, 0xf88a },
- { 0x06, 0xe5f8 },
- { 0x06, 0x8be0 },
- { 0x06, 0x8332 },
- { 0x06, 0xe183 },
- { 0x06, 0x3359 },
- { 0x06, 0x0fe2 },
- { 0x06, 0x834d },
- { 0x06, 0x0c24 },
- { 0x06, 0x5af0 },
- { 0x06, 0x1e12 },
- { 0x06, 0xe4f8 },
- { 0x06, 0x8ce5 },
- { 0x06, 0xf88d },
- { 0x06, 0xaf82 },
- { 0x06, 0x13e0 },
- { 0x06, 0x834f },
- { 0x06, 0x10e4 },
- { 0x06, 0x834f },
- { 0x06, 0xe083 },
- { 0x06, 0x4e78 },
- { 0x06, 0x009f },
- { 0x06, 0x0ae0 },
- { 0x06, 0x834f },
- { 0x06, 0xa010 },
- { 0x06, 0xa5ee },
- { 0x06, 0x834e },
- { 0x06, 0x01e0 },
- { 0x06, 0x834e },
- { 0x06, 0x7805 },
- { 0x06, 0x9e9a },
- { 0x06, 0xe083 },
- { 0x06, 0x4e78 },
- { 0x06, 0x049e },
- { 0x06, 0x10e0 },
- { 0x06, 0x834e },
- { 0x06, 0x7803 },
- { 0x06, 0x9e0f },
- { 0x06, 0xe083 },
- { 0x06, 0x4e78 },
- { 0x06, 0x019e },
- { 0x06, 0x05ae },
- { 0x06, 0x0caf },
- { 0x06, 0x81f8 },
- { 0x06, 0xaf81 },
- { 0x06, 0xa3af },
- { 0x06, 0x81dc },
- { 0x06, 0xaf82 },
- { 0x06, 0x13ee },
- { 0x06, 0x8348 },
- { 0x06, 0x00ee },
- { 0x06, 0x8349 },
- { 0x06, 0x00e0 },
- { 0x06, 0x8351 },
- { 0x06, 0x10e4 },
- { 0x06, 0x8351 },
- { 0x06, 0x5801 },
- { 0x06, 0x9fea },
- { 0x06, 0xd000 },
- { 0x06, 0xd180 },
- { 0x06, 0x1f66 },
- { 0x06, 0xe2f8 },
- { 0x06, 0xeae3 },
- { 0x06, 0xf8eb },
- { 0x06, 0x5af8 },
- { 0x06, 0x1e20 },
- { 0x06, 0xe6f8 },
- { 0x06, 0xeae5 },
- { 0x06, 0xf8eb },
- { 0x06, 0xd302 },
- { 0x06, 0xb3fe },
- { 0x06, 0xe2f8 },
- { 0x06, 0x7cef },
- { 0x06, 0x325b },
- { 0x06, 0x80e3 },
- { 0x06, 0xf87d },
- { 0x06, 0x9e03 },
- { 0x06, 0x7dff },
- { 0x06, 0xff0d },
- { 0x06, 0x581c },
- { 0x06, 0x551a },
- { 0x06, 0x6511 },
- { 0x06, 0xa190 },
- { 0x06, 0xd3e2 },
- { 0x06, 0x8348 },
- { 0x06, 0xe383 },
- { 0x06, 0x491b },
- { 0x06, 0x56ab },
- { 0x06, 0x08ef },
- { 0x06, 0x56e6 },
- { 0x06, 0x8348 },
- { 0x06, 0xe783 },
- { 0x06, 0x4910 },
- { 0x06, 0xd180 },
- { 0x06, 0x1f66 },
- { 0x06, 0xa004 },
- { 0x06, 0xb9e2 },
- { 0x06, 0x8348 },
- { 0x06, 0xe383 },
- { 0x06, 0x49ef },
- { 0x06, 0x65e2 },
- { 0x06, 0x834a },
- { 0x06, 0xe383 },
- { 0x06, 0x4b1b },
- { 0x06, 0x56aa },
- { 0x06, 0x0eef },
- { 0x06, 0x56e6 },
- { 0x06, 0x834a },
- { 0x06, 0xe783 },
- { 0x06, 0x4be2 },
- { 0x06, 0x834d },
- { 0x06, 0xe683 },
- { 0x06, 0x4ce0 },
- { 0x06, 0x834d },
- { 0x06, 0xa000 },
- { 0x06, 0x0caf },
- { 0x06, 0x81dc },
- { 0x06, 0xe083 },
- { 0x06, 0x4d10 },
- { 0x06, 0xe483 },
- { 0x06, 0x4dae },
- { 0x06, 0x0480 },
- { 0x06, 0xe483 },
- { 0x06, 0x4de0 },
- { 0x06, 0x834e },
- { 0x06, 0x7803 },
- { 0x06, 0x9e0b },
- { 0x06, 0xe083 },
- { 0x06, 0x4e78 },
- { 0x06, 0x049e },
- { 0x06, 0x04ee },
- { 0x06, 0x834e },
- { 0x06, 0x02e0 },
- { 0x06, 0x8332 },
- { 0x06, 0xe183 },
- { 0x06, 0x3359 },
- { 0x06, 0x0fe2 },
- { 0x06, 0x834d },
- { 0x06, 0x0c24 },
- { 0x06, 0x5af0 },
- { 0x06, 0x1e12 },
- { 0x06, 0xe4f8 },
- { 0x06, 0x8ce5 },
- { 0x06, 0xf88d },
- { 0x06, 0xe083 },
- { 0x06, 0x30e1 },
- { 0x06, 0x8331 },
- { 0x06, 0x6801 },
- { 0x06, 0xe4f8 },
- { 0x06, 0x8ae5 },
- { 0x06, 0xf88b },
- { 0x06, 0xae37 },
- { 0x06, 0xee83 },
- { 0x06, 0x4e03 },
- { 0x06, 0xe083 },
- { 0x06, 0x4ce1 },
- { 0x06, 0x834d },
- { 0x06, 0x1b01 },
- { 0x06, 0x9e04 },
- { 0x06, 0xaaa1 },
- { 0x06, 0xaea8 },
- { 0x06, 0xee83 },
- { 0x06, 0x4e04 },
- { 0x06, 0xee83 },
- { 0x06, 0x4f00 },
- { 0x06, 0xaeab },
- { 0x06, 0xe083 },
- { 0x06, 0x4f78 },
- { 0x06, 0x039f },
- { 0x06, 0x14ee },
- { 0x06, 0x834e },
- { 0x06, 0x05d2 },
- { 0x06, 0x40d6 },
- { 0x06, 0x5554 },
- { 0x06, 0x0282 },
- { 0x06, 0x17d2 },
- { 0x06, 0xa0d6 },
- { 0x06, 0xba00 },
- { 0x06, 0x0282 },
- { 0x06, 0x17fe },
- { 0x06, 0xfdfc },
- { 0x06, 0x05f8 },
- { 0x06, 0xe0f8 },
- { 0x06, 0x60e1 },
- { 0x06, 0xf861 },
- { 0x06, 0x6802 },
- { 0x06, 0xe4f8 },
- { 0x06, 0x60e5 },
- { 0x06, 0xf861 },
- { 0x06, 0xe0f8 },
- { 0x06, 0x48e1 },
- { 0x06, 0xf849 },
- { 0x06, 0x580f },
- { 0x06, 0x1e02 },
- { 0x06, 0xe4f8 },
- { 0x06, 0x48e5 },
- { 0x06, 0xf849 },
- { 0x06, 0xd000 },
- { 0x06, 0x0282 },
- { 0x06, 0x5bbf },
- { 0x06, 0x8350 },
- { 0x06, 0xef46 },
- { 0x06, 0xdc19 },
- { 0x06, 0xddd0 },
- { 0x06, 0x0102 },
- { 0x06, 0x825b },
- { 0x06, 0x0282 },
- { 0x06, 0x77e0 },
- { 0x06, 0xf860 },
- { 0x06, 0xe1f8 },
- { 0x06, 0x6158 },
- { 0x06, 0xfde4 },
- { 0x06, 0xf860 },
- { 0x06, 0xe5f8 },
- { 0x06, 0x61fc },
- { 0x06, 0x04f9 },
- { 0x06, 0xfafb },
- { 0x06, 0xc6bf },
- { 0x06, 0xf840 },
- { 0x06, 0xbe83 },
- { 0x06, 0x50a0 },
- { 0x06, 0x0101 },
- { 0x06, 0x071b },
- { 0x06, 0x89cf },
- { 0x06, 0xd208 },
- { 0x06, 0xebdb },
- { 0x06, 0x19b2 },
- { 0x06, 0xfbff },
- { 0x06, 0xfefd },
- { 0x06, 0x04f8 },
- { 0x06, 0xe0f8 },
- { 0x06, 0x48e1 },
- { 0x06, 0xf849 },
- { 0x06, 0x6808 },
- { 0x06, 0xe4f8 },
- { 0x06, 0x48e5 },
- { 0x06, 0xf849 },
- { 0x06, 0x58f7 },
- { 0x06, 0xe4f8 },
- { 0x06, 0x48e5 },
- { 0x06, 0xf849 },
- { 0x06, 0xfc04 },
- { 0x06, 0x4d20 },
- { 0x06, 0x0002 },
- { 0x06, 0x4e22 },
- { 0x06, 0x0002 },
- { 0x06, 0x4ddf },
- { 0x06, 0xff01 },
- { 0x06, 0x4edd },
- { 0x06, 0xff01 },
- { 0x05, 0x83d4 },
- { 0x06, 0x8000 },
- { 0x05, 0x83d8 },
- { 0x06, 0x8051 },
- { 0x02, 0x6010 },
- { 0x03, 0xdc00 },
- { 0x05, 0xfff6 },
- { 0x06, 0x00fc },
- { 0x1f, 0x0000 },
+ { 0x06, 0x5561 },
+
+ /*
+ * Can not link to 1Gbps with bad cable
+ * Decrease SNR threshold form 21.07dB to 19.04dB
+ */
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
{ 0x1f, 0x0000 },
- { 0x0d, 0xf880 },
- { 0x1f, 0x0000 }
+ { 0x0d, 0xf880 }
};
+ void __iomem *ioaddr = tp->mmio_addr;
- rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
-
- mdio_write(ioaddr, 0x1f, 0x0002);
- mdio_plus_minus(ioaddr, 0x0b, 0x0010, 0x00ef);
- mdio_plus_minus(ioaddr, 0x0c, 0xa200, 0x5d00);
+ rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
- rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1));
+ /*
+ * Rx Error Issue
+ * Fine Tune Switching regulator parameter
+ */
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
+ rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
static const struct phy_reg phy_reg_init[] = {
@@ -2118,9 +2205,9 @@ static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
};
int val;
- rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
- val = mdio_read(ioaddr, 0x0d);
+ val = rtl_readphy(tp, 0x0d);
if ((val & 0x00ff) != 0x006c) {
static const u32 set[] = {
@@ -2129,11 +2216,11 @@ static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
};
int i;
- mdio_write(ioaddr, 0x1f, 0x0002);
+ rtl_writephy(tp, 0x1f, 0x0002);
val &= 0xff00;
for (i = 0; i < ARRAY_SIZE(set); i++)
- mdio_write(ioaddr, 0x0d, val | set[i]);
+ rtl_writephy(tp, 0x0d, val | set[i]);
}
} else {
static const struct phy_reg phy_reg_init[] = {
@@ -2144,23 +2231,33 @@ static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
{ 0x06, 0x6662 }
};
- rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
}
- mdio_write(ioaddr, 0x1f, 0x0002);
- mdio_patch(ioaddr, 0x0d, 0x0300);
- mdio_patch(ioaddr, 0x0f, 0x0010);
-
- mdio_write(ioaddr, 0x1f, 0x0002);
- mdio_plus_minus(ioaddr, 0x02, 0x0100, 0x0600);
- mdio_plus_minus(ioaddr, 0x03, 0x0000, 0xe000);
+ /* RSET couple improve */
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_patchphy(tp, 0x0d, 0x0300);
+ rtl_patchphy(tp, 0x0f, 0x0010);
+
+ /* Fine tune PLL performance */
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
+ rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
+
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x001b);
+ if ((rtl_readphy(tp, 0x06) != 0xbf00) ||
+ (rtl_apply_firmware(tp, FIRMWARE_8168D_1) < 0)) {
+ netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
+ }
- rtl_phy_write(ioaddr, phy_reg_init_2, ARRAY_SIZE(phy_reg_init_2));
+ rtl_writephy(tp, 0x1f, 0x0000);
}
-static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
+static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init_0[] = {
+ /* Channel Estimation */
{ 0x1f, 0x0001 },
{ 0x06, 0x4064 },
{ 0x07, 0x2863 },
@@ -2179,326 +2276,29 @@ static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
{ 0x1a, 0x05ad },
{ 0x14, 0x94c0 },
+ /*
+ * Tx Error Issue
+ * enhance line driver power
+ */
{ 0x1f, 0x0002 },
{ 0x06, 0x5561 },
{ 0x1f, 0x0005 },
{ 0x05, 0x8332 },
- { 0x06, 0x5561 }
- };
- static const struct phy_reg phy_reg_init_1[] = {
- { 0x1f, 0x0005 },
- { 0x05, 0xffc2 },
- { 0x1f, 0x0005 },
- { 0x05, 0x8000 },
- { 0x06, 0xf8f9 },
- { 0x06, 0xfaee },
- { 0x06, 0xf8ea },
- { 0x06, 0x00ee },
- { 0x06, 0xf8eb },
- { 0x06, 0x00e2 },
- { 0x06, 0xf87c },
- { 0x06, 0xe3f8 },
- { 0x06, 0x7da5 },
- { 0x06, 0x1111 },
- { 0x06, 0x12d2 },
- { 0x06, 0x40d6 },
- { 0x06, 0x4444 },
- { 0x06, 0x0281 },
- { 0x06, 0xc6d2 },
- { 0x06, 0xa0d6 },
- { 0x06, 0xaaaa },
- { 0x06, 0x0281 },
- { 0x06, 0xc6ae },
- { 0x06, 0x0fa5 },
- { 0x06, 0x4444 },
- { 0x06, 0x02ae },
- { 0x06, 0x4da5 },
- { 0x06, 0xaaaa },
- { 0x06, 0x02ae },
- { 0x06, 0x47af },
- { 0x06, 0x81c2 },
- { 0x06, 0xee83 },
- { 0x06, 0x4e00 },
- { 0x06, 0xee83 },
- { 0x06, 0x4d0f },
- { 0x06, 0xee83 },
- { 0x06, 0x4c0f },
- { 0x06, 0xee83 },
- { 0x06, 0x4f00 },
- { 0x06, 0xee83 },
- { 0x06, 0x5100 },
- { 0x06, 0xee83 },
- { 0x06, 0x4aff },
- { 0x06, 0xee83 },
- { 0x06, 0x4bff },
- { 0x06, 0xe083 },
- { 0x06, 0x30e1 },
- { 0x06, 0x8331 },
- { 0x06, 0x58fe },
- { 0x06, 0xe4f8 },
- { 0x06, 0x8ae5 },
- { 0x06, 0xf88b },
- { 0x06, 0xe083 },
- { 0x06, 0x32e1 },
- { 0x06, 0x8333 },
- { 0x06, 0x590f },
- { 0x06, 0xe283 },
- { 0x06, 0x4d0c },
- { 0x06, 0x245a },
- { 0x06, 0xf01e },
- { 0x06, 0x12e4 },
- { 0x06, 0xf88c },
- { 0x06, 0xe5f8 },
- { 0x06, 0x8daf },
- { 0x06, 0x81c2 },
- { 0x06, 0xe083 },
- { 0x06, 0x4f10 },
- { 0x06, 0xe483 },
- { 0x06, 0x4fe0 },
- { 0x06, 0x834e },
- { 0x06, 0x7800 },
- { 0x06, 0x9f0a },
- { 0x06, 0xe083 },
- { 0x06, 0x4fa0 },
- { 0x06, 0x10a5 },
- { 0x06, 0xee83 },
- { 0x06, 0x4e01 },
- { 0x06, 0xe083 },
- { 0x06, 0x4e78 },
- { 0x06, 0x059e },
- { 0x06, 0x9ae0 },
- { 0x06, 0x834e },
- { 0x06, 0x7804 },
- { 0x06, 0x9e10 },
- { 0x06, 0xe083 },
- { 0x06, 0x4e78 },
- { 0x06, 0x039e },
- { 0x06, 0x0fe0 },
- { 0x06, 0x834e },
- { 0x06, 0x7801 },
- { 0x06, 0x9e05 },
- { 0x06, 0xae0c },
- { 0x06, 0xaf81 },
- { 0x06, 0xa7af },
- { 0x06, 0x8152 },
- { 0x06, 0xaf81 },
- { 0x06, 0x8baf },
- { 0x06, 0x81c2 },
- { 0x06, 0xee83 },
- { 0x06, 0x4800 },
- { 0x06, 0xee83 },
- { 0x06, 0x4900 },
- { 0x06, 0xe083 },
- { 0x06, 0x5110 },
- { 0x06, 0xe483 },
- { 0x06, 0x5158 },
- { 0x06, 0x019f },
- { 0x06, 0xead0 },
- { 0x06, 0x00d1 },
- { 0x06, 0x801f },
- { 0x06, 0x66e2 },
- { 0x06, 0xf8ea },
- { 0x06, 0xe3f8 },
- { 0x06, 0xeb5a },
- { 0x06, 0xf81e },
- { 0x06, 0x20e6 },
- { 0x06, 0xf8ea },
- { 0x06, 0xe5f8 },
- { 0x06, 0xebd3 },
- { 0x06, 0x02b3 },
- { 0x06, 0xfee2 },
- { 0x06, 0xf87c },
- { 0x06, 0xef32 },
- { 0x06, 0x5b80 },
- { 0x06, 0xe3f8 },
- { 0x06, 0x7d9e },
- { 0x06, 0x037d },
- { 0x06, 0xffff },
- { 0x06, 0x0d58 },
- { 0x06, 0x1c55 },
- { 0x06, 0x1a65 },
- { 0x06, 0x11a1 },
- { 0x06, 0x90d3 },
- { 0x06, 0xe283 },
- { 0x06, 0x48e3 },
- { 0x06, 0x8349 },
- { 0x06, 0x1b56 },
- { 0x06, 0xab08 },
- { 0x06, 0xef56 },
- { 0x06, 0xe683 },
- { 0x06, 0x48e7 },
- { 0x06, 0x8349 },
- { 0x06, 0x10d1 },
- { 0x06, 0x801f },
- { 0x06, 0x66a0 },
- { 0x06, 0x04b9 },
- { 0x06, 0xe283 },
- { 0x06, 0x48e3 },
- { 0x06, 0x8349 },
- { 0x06, 0xef65 },
- { 0x06, 0xe283 },
- { 0x06, 0x4ae3 },
- { 0x06, 0x834b },
- { 0x06, 0x1b56 },
- { 0x06, 0xaa0e },
- { 0x06, 0xef56 },
- { 0x06, 0xe683 },
- { 0x06, 0x4ae7 },
- { 0x06, 0x834b },
- { 0x06, 0xe283 },
- { 0x06, 0x4de6 },
- { 0x06, 0x834c },
- { 0x06, 0xe083 },
- { 0x06, 0x4da0 },
- { 0x06, 0x000c },
- { 0x06, 0xaf81 },
- { 0x06, 0x8be0 },
- { 0x06, 0x834d },
- { 0x06, 0x10e4 },
- { 0x06, 0x834d },
- { 0x06, 0xae04 },
- { 0x06, 0x80e4 },
- { 0x06, 0x834d },
- { 0x06, 0xe083 },
- { 0x06, 0x4e78 },
- { 0x06, 0x039e },
- { 0x06, 0x0be0 },
- { 0x06, 0x834e },
- { 0x06, 0x7804 },
- { 0x06, 0x9e04 },
- { 0x06, 0xee83 },
- { 0x06, 0x4e02 },
- { 0x06, 0xe083 },
- { 0x06, 0x32e1 },
- { 0x06, 0x8333 },
- { 0x06, 0x590f },
- { 0x06, 0xe283 },
- { 0x06, 0x4d0c },
- { 0x06, 0x245a },
- { 0x06, 0xf01e },
- { 0x06, 0x12e4 },
- { 0x06, 0xf88c },
- { 0x06, 0xe5f8 },
- { 0x06, 0x8de0 },
- { 0x06, 0x8330 },
- { 0x06, 0xe183 },
- { 0x06, 0x3168 },
- { 0x06, 0x01e4 },
- { 0x06, 0xf88a },
- { 0x06, 0xe5f8 },
- { 0x06, 0x8bae },
- { 0x06, 0x37ee },
- { 0x06, 0x834e },
- { 0x06, 0x03e0 },
- { 0x06, 0x834c },
- { 0x06, 0xe183 },
- { 0x06, 0x4d1b },
- { 0x06, 0x019e },
- { 0x06, 0x04aa },
- { 0x06, 0xa1ae },
- { 0x06, 0xa8ee },
- { 0x06, 0x834e },
- { 0x06, 0x04ee },
- { 0x06, 0x834f },
- { 0x06, 0x00ae },
- { 0x06, 0xabe0 },
- { 0x06, 0x834f },
- { 0x06, 0x7803 },
- { 0x06, 0x9f14 },
- { 0x06, 0xee83 },
- { 0x06, 0x4e05 },
- { 0x06, 0xd240 },
- { 0x06, 0xd655 },
- { 0x06, 0x5402 },
- { 0x06, 0x81c6 },
- { 0x06, 0xd2a0 },
- { 0x06, 0xd6ba },
- { 0x06, 0x0002 },
- { 0x06, 0x81c6 },
- { 0x06, 0xfefd },
- { 0x06, 0xfc05 },
- { 0x06, 0xf8e0 },
- { 0x06, 0xf860 },
- { 0x06, 0xe1f8 },
- { 0x06, 0x6168 },
- { 0x06, 0x02e4 },
- { 0x06, 0xf860 },
- { 0x06, 0xe5f8 },
- { 0x06, 0x61e0 },
- { 0x06, 0xf848 },
- { 0x06, 0xe1f8 },
- { 0x06, 0x4958 },
- { 0x06, 0x0f1e },
- { 0x06, 0x02e4 },
- { 0x06, 0xf848 },
- { 0x06, 0xe5f8 },
- { 0x06, 0x49d0 },
- { 0x06, 0x0002 },
- { 0x06, 0x820a },
- { 0x06, 0xbf83 },
- { 0x06, 0x50ef },
- { 0x06, 0x46dc },
- { 0x06, 0x19dd },
- { 0x06, 0xd001 },
- { 0x06, 0x0282 },
- { 0x06, 0x0a02 },
- { 0x06, 0x8226 },
- { 0x06, 0xe0f8 },
- { 0x06, 0x60e1 },
- { 0x06, 0xf861 },
- { 0x06, 0x58fd },
- { 0x06, 0xe4f8 },
- { 0x06, 0x60e5 },
- { 0x06, 0xf861 },
- { 0x06, 0xfc04 },
- { 0x06, 0xf9fa },
- { 0x06, 0xfbc6 },
- { 0x06, 0xbff8 },
- { 0x06, 0x40be },
- { 0x06, 0x8350 },
- { 0x06, 0xa001 },
- { 0x06, 0x0107 },
- { 0x06, 0x1b89 },
- { 0x06, 0xcfd2 },
- { 0x06, 0x08eb },
- { 0x06, 0xdb19 },
- { 0x06, 0xb2fb },
- { 0x06, 0xfffe },
- { 0x06, 0xfd04 },
- { 0x06, 0xf8e0 },
- { 0x06, 0xf848 },
- { 0x06, 0xe1f8 },
- { 0x06, 0x4968 },
- { 0x06, 0x08e4 },
- { 0x06, 0xf848 },
- { 0x06, 0xe5f8 },
- { 0x06, 0x4958 },
- { 0x06, 0xf7e4 },
- { 0x06, 0xf848 },
- { 0x06, 0xe5f8 },
- { 0x06, 0x49fc },
- { 0x06, 0x044d },
- { 0x06, 0x2000 },
- { 0x06, 0x024e },
- { 0x06, 0x2200 },
- { 0x06, 0x024d },
- { 0x06, 0xdfff },
- { 0x06, 0x014e },
- { 0x06, 0xddff },
- { 0x06, 0x0100 },
- { 0x05, 0x83d8 },
- { 0x06, 0x8000 },
- { 0x03, 0xdc00 },
- { 0x05, 0xfff6 },
- { 0x06, 0x00fc },
- { 0x1f, 0x0000 },
+ { 0x06, 0x5561 },
+
+ /*
+ * Can not link to 1Gbps with bad cable
+ * Decrease SNR threshold form 21.07dB to 19.04dB
+ */
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
{ 0x1f, 0x0000 },
- { 0x0d, 0xf880 },
- { 0x1f, 0x0000 }
+ { 0x0d, 0xf880 }
};
+ void __iomem *ioaddr = tp->mmio_addr;
- rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
+ rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
static const struct phy_reg phy_reg_init[] = {
@@ -2512,21 +2312,21 @@ static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
};
int val;
- rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
- val = mdio_read(ioaddr, 0x0d);
+ val = rtl_readphy(tp, 0x0d);
if ((val & 0x00ff) != 0x006c) {
- u32 set[] = {
+ static const u32 set[] = {
0x0065, 0x0066, 0x0067, 0x0068,
0x0069, 0x006a, 0x006b, 0x006c
};
int i;
- mdio_write(ioaddr, 0x1f, 0x0002);
+ rtl_writephy(tp, 0x1f, 0x0002);
val &= 0xff00;
for (i = 0; i < ARRAY_SIZE(set); i++)
- mdio_write(ioaddr, 0x0d, val | set[i]);
+ rtl_writephy(tp, 0x0d, val | set[i]);
}
} else {
static const struct phy_reg phy_reg_init[] = {
@@ -2537,23 +2337,29 @@ static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
{ 0x06, 0x2642 }
};
- rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
}
- mdio_write(ioaddr, 0x1f, 0x0002);
- mdio_plus_minus(ioaddr, 0x02, 0x0100, 0x0600);
- mdio_plus_minus(ioaddr, 0x03, 0x0000, 0xe000);
+ /* Fine tune PLL performance */
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
+ rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
- mdio_write(ioaddr, 0x1f, 0x0001);
- mdio_write(ioaddr, 0x17, 0x0cc0);
+ /* Switching regulator Slew rate */
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_patchphy(tp, 0x0f, 0x0017);
- mdio_write(ioaddr, 0x1f, 0x0002);
- mdio_patch(ioaddr, 0x0f, 0x0017);
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x001b);
+ if ((rtl_readphy(tp, 0x06) != 0xb300) ||
+ (rtl_apply_firmware(tp, FIRMWARE_8168D_2) < 0)) {
+ netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
+ }
- rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1));
+ rtl_writephy(tp, 0x1f, 0x0000);
}
-static void rtl8168d_3_hw_phy_config(void __iomem *ioaddr)
+static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0002 },
@@ -2611,10 +2417,26 @@ static void rtl8168d_3_hw_phy_config(void __iomem *ioaddr)
{ 0x1f, 0x0000 }
};
- rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
+static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+
+ { 0x1f, 0x0007 },
+ { 0x1e, 0x002d },
+ { 0x18, 0x0040 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_patchphy(tp, 0x0d, 1 << 5);
}
-static void rtl8102e_hw_phy_config(void __iomem *ioaddr)
+static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0003 },
@@ -2623,18 +2445,44 @@ static void rtl8102e_hw_phy_config(void __iomem *ioaddr)
{ 0x1f, 0x0000 }
};
- mdio_write(ioaddr, 0x1f, 0x0000);
- mdio_patch(ioaddr, 0x11, 1 << 12);
- mdio_patch(ioaddr, 0x19, 1 << 13);
- mdio_patch(ioaddr, 0x10, 1 << 15);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_patchphy(tp, 0x11, 1 << 12);
+ rtl_patchphy(tp, 0x19, 1 << 13);
+ rtl_patchphy(tp, 0x10, 1 << 15);
- rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+}
+
+static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0005 },
+ { 0x1a, 0x0000 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0004 },
+ { 0x1c, 0x0000 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0001 },
+ { 0x15, 0x7701 },
+ { 0x1f, 0x0000 }
+ };
+
+ /* Disable ALDPS before ram code */
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x18, 0x0310);
+ msleep(100);
+
+ if (rtl_apply_firmware(tp, FIRMWARE_8105E_1) < 0)
+ netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
}
static void rtl_hw_phy_config(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
rtl8169_print_mac_version(tp);
@@ -2643,58 +2491,65 @@ static void rtl_hw_phy_config(struct net_device *dev)
break;
case RTL_GIGA_MAC_VER_02:
case RTL_GIGA_MAC_VER_03:
- rtl8169s_hw_phy_config(ioaddr);
+ rtl8169s_hw_phy_config(tp);
break;
case RTL_GIGA_MAC_VER_04:
- rtl8169sb_hw_phy_config(ioaddr);
+ rtl8169sb_hw_phy_config(tp);
break;
case RTL_GIGA_MAC_VER_05:
- rtl8169scd_hw_phy_config(tp, ioaddr);
+ rtl8169scd_hw_phy_config(tp);
break;
case RTL_GIGA_MAC_VER_06:
- rtl8169sce_hw_phy_config(ioaddr);
+ rtl8169sce_hw_phy_config(tp);
break;
case RTL_GIGA_MAC_VER_07:
case RTL_GIGA_MAC_VER_08:
case RTL_GIGA_MAC_VER_09:
- rtl8102e_hw_phy_config(ioaddr);
+ rtl8102e_hw_phy_config(tp);
break;
case RTL_GIGA_MAC_VER_11:
- rtl8168bb_hw_phy_config(ioaddr);
+ rtl8168bb_hw_phy_config(tp);
break;
case RTL_GIGA_MAC_VER_12:
- rtl8168bef_hw_phy_config(ioaddr);
+ rtl8168bef_hw_phy_config(tp);
break;
case RTL_GIGA_MAC_VER_17:
- rtl8168bef_hw_phy_config(ioaddr);
+ rtl8168bef_hw_phy_config(tp);
break;
case RTL_GIGA_MAC_VER_18:
- rtl8168cp_1_hw_phy_config(ioaddr);
+ rtl8168cp_1_hw_phy_config(tp);
break;
case RTL_GIGA_MAC_VER_19:
- rtl8168c_1_hw_phy_config(ioaddr);
+ rtl8168c_1_hw_phy_config(tp);
break;
case RTL_GIGA_MAC_VER_20:
- rtl8168c_2_hw_phy_config(ioaddr);
+ rtl8168c_2_hw_phy_config(tp);
break;
case RTL_GIGA_MAC_VER_21:
- rtl8168c_3_hw_phy_config(ioaddr);
+ rtl8168c_3_hw_phy_config(tp);
break;
case RTL_GIGA_MAC_VER_22:
- rtl8168c_4_hw_phy_config(ioaddr);
+ rtl8168c_4_hw_phy_config(tp);
break;
case RTL_GIGA_MAC_VER_23:
case RTL_GIGA_MAC_VER_24:
- rtl8168cp_2_hw_phy_config(ioaddr);
+ rtl8168cp_2_hw_phy_config(tp);
break;
case RTL_GIGA_MAC_VER_25:
- rtl8168d_1_hw_phy_config(ioaddr);
+ rtl8168d_1_hw_phy_config(tp);
break;
case RTL_GIGA_MAC_VER_26:
- rtl8168d_2_hw_phy_config(ioaddr);
+ rtl8168d_2_hw_phy_config(tp);
break;
case RTL_GIGA_MAC_VER_27:
- rtl8168d_3_hw_phy_config(ioaddr);
+ rtl8168d_3_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_28:
+ rtl8168d_4_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_29:
+ case RTL_GIGA_MAC_VER_30:
+ rtl8105e_hw_phy_config(tp);
break;
default:
@@ -2717,7 +2572,7 @@ static void rtl8169_phy_timer(unsigned long __opaque)
spin_lock_irq(&tp->lock);
- if (tp->phy_reset_pending(ioaddr)) {
+ if (tp->phy_reset_pending(tp)) {
/*
* A busy loop could burn quite a few cycles on nowadays CPU.
* Let's delay the execution of the timer for a few ticks.
@@ -2731,7 +2586,7 @@ static void rtl8169_phy_timer(unsigned long __opaque)
netif_warn(tp, link, dev, "PHY reset until link up\n");
- tp->phy_reset_enable(ioaddr);
+ tp->phy_reset_enable(tp);
out_mod_timer:
mod_timer(timer, jiffies + timeout);
@@ -2791,12 +2646,11 @@ static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
static void rtl8169_phy_reset(struct net_device *dev,
struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
unsigned int i;
- tp->phy_reset_enable(ioaddr);
+ tp->phy_reset_enable(tp);
for (i = 0; i < 100; i++) {
- if (!tp->phy_reset_pending(ioaddr))
+ if (!tp->phy_reset_pending(tp))
return;
msleep(1);
}
@@ -2823,16 +2677,17 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
RTL_W8(0x82, 0x01);
dprintk("Set PHY Reg 0x0bh = 0x00h\n");
- mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0
+ rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
}
rtl8169_phy_reset(dev, tp);
- /*
- * rtl8169_set_speed_xmii takes good care of the Fast Ethernet
- * only 8101. Don't panic.
- */
- rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL);
+ rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
+ ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
+ (tp->mii.supports_gmii ?
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full : 0));
if (RTL_R8(PHYstatus) & TBI_Enable)
netif_info(tp, link, dev, "TBI auto-negotiating\n");
@@ -2893,11 +2748,11 @@ static int rtl_xmii_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *dat
return 0;
case SIOCGMIIREG:
- data->val_out = mdio_read(tp->mmio_addr, data->reg_num & 0x1f);
+ data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
return 0;
case SIOCSMIIREG:
- mdio_write(tp->mmio_addr, data->reg_num & 0x1f, data->val_in);
+ rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
return 0;
}
return -EOPNOTSUPP;
@@ -2988,15 +2843,191 @@ static const struct net_device_ops rtl8169_netdev_ops = {
.ndo_set_mac_address = rtl_set_mac_address,
.ndo_do_ioctl = rtl8169_ioctl,
.ndo_set_multicast_list = rtl_set_rx_mode,
-#ifdef CONFIG_R8169_VLAN
- .ndo_vlan_rx_register = rtl8169_vlan_rx_register,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = rtl8169_netpoll,
#endif
};
+static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
+{
+ struct mdio_ops *ops = &tp->mdio_ops;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_27:
+ ops->write = r8168dp_1_mdio_write;
+ ops->read = r8168dp_1_mdio_read;
+ break;
+ case RTL_GIGA_MAC_VER_28:
+ ops->write = r8168dp_2_mdio_write;
+ ops->read = r8168dp_2_mdio_read;
+ break;
+ default:
+ ops->write = r8169_mdio_write;
+ ops->read = r8169_mdio_read;
+ break;
+ }
+}
+
+static void r810x_phy_power_down(struct rtl8169_private *tp)
+{
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
+}
+
+static void r810x_phy_power_up(struct rtl8169_private *tp)
+{
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
+}
+
+static void r810x_pll_power_down(struct rtl8169_private *tp)
+{
+ if (__rtl8169_get_wol(tp) & WAKE_ANY) {
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, MII_BMCR, 0x0000);
+ return;
+ }
+
+ r810x_phy_power_down(tp);
+}
+
+static void r810x_pll_power_up(struct rtl8169_private *tp)
+{
+ r810x_phy_power_up(tp);
+}
+
+static void r8168_phy_power_up(struct rtl8169_private *tp)
+{
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x0e, 0x0000);
+ rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
+}
+
+static void r8168_phy_power_down(struct rtl8169_private *tp)
+{
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x0e, 0x0200);
+ rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
+}
+
+static void r8168_pll_power_down(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
+ (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
+ return;
+ }
+
+ if (((tp->mac_version == RTL_GIGA_MAC_VER_23) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_24)) &&
+ (RTL_R16(CPlusCmd) & ASF)) {
+ return;
+ }
+
+ if (__rtl8169_get_wol(tp) & WAKE_ANY) {
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, MII_BMCR, 0x0000);
+
+ RTL_W32(RxConfig, RTL_R32(RxConfig) |
+ AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
+ return;
+ }
+
+ r8168_phy_power_down(tp);
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
+ break;
+ }
+}
+
+static void r8168_pll_power_up(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
+ (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
+ return;
+ }
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
+ break;
+ }
+
+ r8168_phy_power_up(tp);
+}
+
+static void rtl_pll_power_op(struct rtl8169_private *tp,
+ void (*op)(struct rtl8169_private *))
+{
+ if (op)
+ op(tp);
+}
+
+static void rtl_pll_power_down(struct rtl8169_private *tp)
+{
+ rtl_pll_power_op(tp, tp->pll_power_ops.down);
+}
+
+static void rtl_pll_power_up(struct rtl8169_private *tp)
+{
+ rtl_pll_power_op(tp, tp->pll_power_ops.up);
+}
+
+static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
+{
+ struct pll_power_ops *ops = &tp->pll_power_ops;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_07:
+ case RTL_GIGA_MAC_VER_08:
+ case RTL_GIGA_MAC_VER_09:
+ case RTL_GIGA_MAC_VER_10:
+ case RTL_GIGA_MAC_VER_16:
+ case RTL_GIGA_MAC_VER_29:
+ case RTL_GIGA_MAC_VER_30:
+ ops->down = r810x_pll_power_down;
+ ops->up = r810x_pll_power_up;
+ break;
+
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_18:
+ case RTL_GIGA_MAC_VER_19:
+ case RTL_GIGA_MAC_VER_20:
+ case RTL_GIGA_MAC_VER_21:
+ case RTL_GIGA_MAC_VER_22:
+ case RTL_GIGA_MAC_VER_23:
+ case RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ ops->down = r8168_pll_power_down;
+ ops->up = r8168_pll_power_up;
+ break;
+
+ default:
+ ops->down = NULL;
+ ops->up = NULL;
+ break;
+ }
+}
+
static int __devinit
rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -3037,6 +3068,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
mii->reg_num_mask = 0x1f;
mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
+ /* disable ASPM completely as that cause random device stop working
+ * problems as well as full system hangs for some PCIe devices users */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pci_enable_device(pdev);
if (rc < 0) {
@@ -3070,7 +3106,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_mwi_2;
}
- tp->cp_cmd = PCIMulRW | RxChkSum;
+ tp->cp_cmd = RxChkSum;
if ((sizeof(dma_addr_t) > 4) &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
@@ -3115,6 +3151,16 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Identify chip attached to board */
rtl8169_get_mac_version(tp, ioaddr);
+ /*
+ * Pretend we are using VLANs; This bypasses a nasty bug where
+ * Interrupts stop flowing on high load on 8110SCd controllers.
+ */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+ tp->cp_cmd |= RxVlan;
+
+ rtl_init_mdio_ops(tp);
+ rtl_init_pll_power_ops(tp);
+
/* Use appropriate default if unknown */
if (tp->mac_version == RTL_GIGA_MAC_NONE) {
netif_notice(tp, probe, dev,
@@ -3180,10 +3226,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
-#ifdef CONFIG_R8169_VLAN
- dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-#endif
- dev->features |= NETIF_F_GRO;
+ dev->features |= NETIF_F_HW_VLAN_TX_RX | NETIF_F_GRO;
tp->intr_mask = 0xffff;
tp->hw_start = cfg->hw_start;
@@ -3205,20 +3248,18 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->base_addr, dev->dev_addr,
(u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq);
- rtl8169_init_phy(dev, tp);
-
- /*
- * Pretend we are using VLANs; This bypasses a nasty bug where
- * Interrupts stop flowing on high load on 8110SCd controllers.
- */
- if (tp->mac_version == RTL_GIGA_MAC_VER_05)
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_28)) {
+ rtl8168_driver_start(tp);
+ }
device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
if (pci_dev_run_wake(pdev))
pm_runtime_put_noidle(&pdev->dev);
+ netif_carrier_off(dev);
+
out:
return rc;
@@ -3240,7 +3281,14 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- flush_scheduled_work();
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_28)) {
+ rtl8168_driver_stop(tp);
+ }
+
+ cancel_delayed_work_sync(&tp->task);
+
+ rtl_release_firmware(tp);
unregister_netdev(dev);
@@ -3258,6 +3306,7 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
static int rtl8169_open(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
int retval = -ENOMEM;
@@ -3293,6 +3342,12 @@ static int rtl8169_open(struct net_device *dev)
napi_enable(&tp->napi);
+ rtl8169_init_phy(dev, tp);
+
+ rtl8169_vlan_mode(dev);
+
+ rtl_pll_power_up(tp);
+
rtl_hw_start(dev);
rtl8169_request_timer(dev);
@@ -3300,7 +3355,7 @@ static int rtl8169_open(struct net_device *dev)
tp->saved_wolopts = 0;
pm_runtime_put_noidle(&pdev->dev);
- rtl8169_check_link_status(dev, tp, tp->mmio_addr);
+ rtl8169_check_link_status(dev, tp, ioaddr);
out:
return retval;
@@ -3319,11 +3374,20 @@ err_pm_runtime_put:
goto out;
}
-static void rtl8169_hw_reset(void __iomem *ioaddr)
+static void rtl8169_hw_reset(struct rtl8169_private *tp)
{
+ void __iomem *ioaddr = tp->mmio_addr;
+
/* Disable interrupts */
rtl8169_irq_mask_and_ack(ioaddr);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28) {
+ while (RTL_R8(TxPoll) & NPQ)
+ udelay(20);
+
+ }
+
/* Reset the chipset */
RTL_W8(ChipCmd, CmdReset);
@@ -3437,7 +3501,7 @@ static void rtl_hw_start_8169(struct net_device *dev)
(tp->mac_version == RTL_GIGA_MAC_VER_04))
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
- RTL_W8(EarlyTxThres, EarlyTxThld);
+ RTL_W8(EarlyTxThres, NoEarlyTx);
rtl_set_rx_max_size(ioaddr, rx_buf_sz);
@@ -3507,12 +3571,22 @@ static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
}
}
-static void rtl_csi_access_enable(void __iomem *ioaddr)
+static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits)
{
u32 csi;
csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff;
- rtl_csi_write(ioaddr, 0x070c, csi | 0x27000000);
+ rtl_csi_write(ioaddr, 0x070c, csi | bits);
+}
+
+static void rtl_csi_access_enable_1(void __iomem *ioaddr)
+{
+ rtl_csi_access_enable(ioaddr, 0x17000000);
+}
+
+static void rtl_csi_access_enable_2(void __iomem *ioaddr)
+{
+ rtl_csi_access_enable(ioaddr, 0x27000000);
}
struct ephy_info {
@@ -3547,6 +3621,21 @@ static void rtl_disable_clock_request(struct pci_dev *pdev)
}
}
+static void rtl_enable_clock_request(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+ int cap = tp->pcie_cap;
+
+ if (cap) {
+ u16 ctl;
+
+ pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
+ ctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
+ pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
+ }
+}
+
#define R8168_CPCMD_QUIRK_MASK (\
EnableBist | \
Mac_dbgo_oe | \
@@ -3572,7 +3661,7 @@ static void rtl_hw_start_8168bef(void __iomem *ioaddr, struct pci_dev *pdev)
{
rtl_hw_start_8168bb(ioaddr, pdev);
- RTL_W8(EarlyTxThres, EarlyTxThld);
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
}
@@ -3600,7 +3689,7 @@ static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev)
{ 0x07, 0, 0x2000 }
};
- rtl_csi_access_enable(ioaddr);
+ rtl_csi_access_enable_2(ioaddr);
rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
@@ -3609,7 +3698,7 @@ static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev)
static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev)
{
- rtl_csi_access_enable(ioaddr);
+ rtl_csi_access_enable_2(ioaddr);
RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
@@ -3620,14 +3709,14 @@ static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev)
static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev)
{
- rtl_csi_access_enable(ioaddr);
+ rtl_csi_access_enable_2(ioaddr);
RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
/* Magic. */
RTL_W8(DBG_REG, 0x20);
- RTL_W8(EarlyTxThres, EarlyTxThld);
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
@@ -3642,7 +3731,7 @@ static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev)
{ 0x06, 0x0080, 0x0000 }
};
- rtl_csi_access_enable(ioaddr);
+ rtl_csi_access_enable_2(ioaddr);
RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
@@ -3658,7 +3747,7 @@ static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev)
{ 0x03, 0x0400, 0x0220 }
};
- rtl_csi_access_enable(ioaddr);
+ rtl_csi_access_enable_2(ioaddr);
rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
@@ -3672,24 +3761,50 @@ static void rtl_hw_start_8168c_3(void __iomem *ioaddr, struct pci_dev *pdev)
static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev)
{
- rtl_csi_access_enable(ioaddr);
+ rtl_csi_access_enable_2(ioaddr);
__rtl_hw_start_8168cp(ioaddr, pdev);
}
static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev)
{
- rtl_csi_access_enable(ioaddr);
+ rtl_csi_access_enable_2(ioaddr);
rtl_disable_clock_request(pdev);
- RTL_W8(EarlyTxThres, EarlyTxThld);
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
}
+static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ static const struct ephy_info e_info_8168d_4[] = {
+ { 0x0b, ~0, 0x48 },
+ { 0x19, 0x20, 0x50 },
+ { 0x0c, ~0, 0x20 }
+ };
+ int i;
+
+ rtl_csi_access_enable_1(ioaddr);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
+
+ for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
+ const struct ephy_info *e = e_info_8168d_4 + i;
+ u16 w;
+
+ w = rtl_ephy_read(ioaddr, e->offset);
+ rtl_ephy_write(ioaddr, 0x03, (w & e->mask) | e->bits);
+ }
+
+ rtl_enable_clock_request(pdev);
+}
+
static void rtl_hw_start_8168(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -3698,7 +3813,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
RTL_W8(Cfg9346, Cfg9346_Unlock);
- RTL_W8(EarlyTxThres, EarlyTxThld);
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
rtl_set_rx_max_size(ioaddr, rx_buf_sz);
@@ -3709,7 +3824,8 @@ static void rtl_hw_start_8168(struct net_device *dev)
RTL_W16(IntrMitigate, 0x5151);
/* Work around for RxFIFO overflow. */
- if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_22) {
tp->intr_event |= RxFIFOOver | PCSTimeout;
tp->intr_event &= ~RxOverflow;
}
@@ -3767,6 +3883,10 @@ static void rtl_hw_start_8168(struct net_device *dev)
rtl_hw_start_8168d(ioaddr, pdev);
break;
+ case RTL_GIGA_MAC_VER_28:
+ rtl_hw_start_8168d_4(ioaddr, pdev);
+ break;
+
default:
printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
dev->name, tp->mac_version);
@@ -3791,8 +3911,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
Cxpl_dbg_sel | \
ASF | \
PktCntrDisable | \
- PCIDAC | \
- PCIMulRW)
+ Mac_dbgo_sel)
static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
{
@@ -3808,7 +3927,7 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
};
u8 cfg1;
- rtl_csi_access_enable(ioaddr);
+ rtl_csi_access_enable_2(ioaddr);
RTL_W8(DBG_REG, FIX_NAK_1);
@@ -3822,21 +3941,17 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
RTL_W8(Config1, cfg1 & ~LEDS0);
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
-
rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
}
static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
{
- rtl_csi_access_enable(ioaddr);
+ rtl_csi_access_enable_2(ioaddr);
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
-
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
}
static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
@@ -3846,6 +3961,37 @@ static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
}
+static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ static const struct ephy_info e_info_8105e_1[] = {
+ { 0x07, 0, 0x4000 },
+ { 0x19, 0, 0x0200 },
+ { 0x19, 0, 0x0020 },
+ { 0x1e, 0, 0x2000 },
+ { 0x03, 0, 0x0001 },
+ { 0x19, 0, 0x0100 },
+ { 0x19, 0, 0x0004 },
+ { 0x0a, 0, 0x0020 }
+ };
+
+ /* Force LAN exit from ASPM if Rx/Tx are not idel */
+ RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
+
+ /* disable Early Tally Counter */
+ RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
+
+ RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
+ RTL_W8(DLLPR, RTL_R8(DLLPR) | PM_SWITCH);
+
+ rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
+}
+
+static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ rtl_hw_start_8105e_1(ioaddr, pdev);
+ rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
+}
+
static void rtl_hw_start_8101(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -3862,6 +4008,8 @@ static void rtl_hw_start_8101(struct net_device *dev)
}
}
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_07:
rtl_hw_start_8102e_1(ioaddr, pdev);
@@ -3874,16 +4022,22 @@ static void rtl_hw_start_8101(struct net_device *dev)
case RTL_GIGA_MAC_VER_09:
rtl_hw_start_8102e_2(ioaddr, pdev);
break;
+
+ case RTL_GIGA_MAC_VER_29:
+ rtl_hw_start_8105e_1(ioaddr, pdev);
+ break;
+ case RTL_GIGA_MAC_VER_30:
+ rtl_hw_start_8105e_2(ioaddr, pdev);
+ break;
}
- RTL_W8(Cfg9346, Cfg9346_Unlock);
+ RTL_W8(Cfg9346, Cfg9346_Lock);
- RTL_W8(EarlyTxThres, EarlyTxThld);
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
rtl_set_rx_max_size(ioaddr, rx_buf_sz);
- tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
-
+ tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
RTL_W16(CPlusCmd, tp->cp_cmd);
RTL_W16(IntrMitigate, 0x0000);
@@ -3893,14 +4047,10 @@ static void rtl_hw_start_8101(struct net_device *dev)
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_set_rx_tx_config_registers(tp);
- RTL_W8(Cfg9346, Cfg9346_Lock);
-
RTL_R8(IntrMask);
rtl_set_rx_mode(dev);
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
-
RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
RTL_W16(IntrMask, tp->intr_event);
@@ -4179,7 +4329,7 @@ static void rtl8169_tx_timeout(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- rtl8169_hw_reset(tp->mmio_addr);
+ rtl8169_hw_reset(tp);
/* Let's wait a bit while any (async) irq lands on */
rtl8169_schedule_work(dev, rtl8169_reset_task);
@@ -4337,7 +4487,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
- void __iomem *ioaddr = tp->mmio_addr;
u16 pci_status, pci_cmd;
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
@@ -4368,13 +4517,15 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
/* The infamous DAC f*ckup only happens at boot time */
if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
+ void __iomem *ioaddr = tp->mmio_addr;
+
netif_info(tp, intr, dev, "disabling PCI DAC\n");
tp->cp_cmd &= ~PCIDAC;
RTL_W16(CPlusCmd, tp->cp_cmd);
dev->features &= ~NETIF_F_HIGHDMA;
}
- rtl8169_hw_reset(ioaddr);
+ rtl8169_hw_reset(tp);
rtl8169_schedule_work(dev, rtl8169_reinit_task);
}
@@ -4536,12 +4687,12 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
skb_put(skb, pkt_size);
skb->protocol = eth_type_trans(skb, dev);
- if (rtl8169_rx_vlan_skb(tp, desc, skb, polling) < 0) {
- if (likely(polling))
- napi_gro_receive(&tp->napi, skb);
- else
- netif_rx(skb);
- }
+ rtl8169_rx_vlan_tag(desc, skb);
+
+ if (likely(polling))
+ napi_gro_receive(&tp->napi, skb);
+ else
+ netif_rx(skb);
dev->stats.rx_bytes += pkt_size;
dev->stats.rx_packets++;
@@ -4586,12 +4737,33 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
break;
}
- /* Work around for rx fifo overflow */
- if (unlikely(status & RxFIFOOver) &&
- (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
- netif_stop_queue(dev);
- rtl8169_tx_timeout(dev);
- break;
+ if (unlikely(status & RxFIFOOver)) {
+ switch (tp->mac_version) {
+ /* Work around for rx fifo overflow */
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_22:
+ case RTL_GIGA_MAC_VER_26:
+ netif_stop_queue(dev);
+ rtl8169_tx_timeout(dev);
+ goto done;
+ /* Testers needed. */
+ case RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_19:
+ case RTL_GIGA_MAC_VER_20:
+ case RTL_GIGA_MAC_VER_21:
+ case RTL_GIGA_MAC_VER_23:
+ case RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ /* Experimental science. Pktgen proof. */
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_25:
+ if (status == RxFIFOOver)
+ goto done;
+ break;
+ default:
+ break;
+ }
}
if (unlikely(status & SYSErr)) {
@@ -4600,7 +4772,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
}
if (status & LinkChg)
- rtl8169_check_link_status(dev, tp, ioaddr);
+ __rtl8169_check_link_status(dev, tp, ioaddr, true);
/* We need to see the lastest version of tp->intr_mask to
* avoid ignoring an MSI interrupt and having to wait for
@@ -4627,7 +4799,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
(status & RxFIFOOver) ? (status | RxOverflow) : status);
status = RTL_R16(IntrStatus);
}
-
+done:
return IRQ_RETVAL(handled);
}
@@ -4701,6 +4873,8 @@ static void rtl8169_down(struct net_device *dev)
rtl8169_tx_clear(tp);
rtl8169_rx_clear(tp);
+
+ rtl_pll_power_down(tp);
}
static int rtl8169_close(struct net_device *dev)
@@ -4805,9 +4979,13 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
static void rtl8169_net_suspend(struct net_device *dev)
{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
if (!netif_running(dev))
return;
+ rtl_pll_power_down(tp);
+
netif_device_detach(dev);
netif_stop_queue(dev);
}
@@ -4826,7 +5004,12 @@ static int rtl8169_suspend(struct device *device)
static void __rtl8169_resume(struct net_device *dev)
{
+ struct rtl8169_private *tp = netdev_priv(dev);
+
netif_device_attach(dev);
+
+ rtl_pll_power_up(tp);
+
rtl8169_schedule_work(dev, rtl8169_reset_task);
}
@@ -4890,11 +5073,7 @@ static int rtl8169_runtime_idle(struct device *device)
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- if (!tp->TxDescArray)
- return 0;
-
- rtl8169_check_link_status(dev, tp, tp->mmio_addr);
- return -EBUSY;
+ return tp->TxDescArray ? -EBUSY : 0;
}
static const struct dev_pm_ops rtl8169_pm_ops = {
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index ecc25aab896a..2ad6364103ea 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -88,14 +88,14 @@
#include "s2io.h"
#include "s2io-regs.h"
-#define DRV_VERSION "2.0.26.27"
+#define DRV_VERSION "2.0.26.28"
/* S2io Driver name & version. */
-static char s2io_driver_name[] = "Neterion";
-static char s2io_driver_version[] = DRV_VERSION;
+static const char s2io_driver_name[] = "Neterion";
+static const char s2io_driver_version[] = DRV_VERSION;
-static int rxd_size[2] = {32, 48};
-static int rxd_count[2] = {127, 85};
+static const int rxd_size[2] = {32, 48};
+static const int rxd_count[2] = {127, 85};
static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
{
@@ -3598,10 +3598,12 @@ static int s2io_set_swapper(struct s2io_nic *sp)
val64 = readq(&bar0->pif_rd_swapper_fb);
if (val64 != 0x0123456789ABCDEFULL) {
int i = 0;
- u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
- 0x8100008181000081ULL, /* FE=1, SE=0 */
- 0x4200004242000042ULL, /* FE=0, SE=1 */
- 0}; /* FE=0, SE=0 */
+ static const u64 value[] = {
+ 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
+ 0x8100008181000081ULL, /* FE=1, SE=0 */
+ 0x4200004242000042ULL, /* FE=0, SE=1 */
+ 0 /* FE=0, SE=0 */
+ };
while (i < 4) {
writeq(value[i], &bar0->swapper_ctrl);
@@ -3627,10 +3629,12 @@ static int s2io_set_swapper(struct s2io_nic *sp)
if (val64 != valt) {
int i = 0;
- u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
- 0x0081810000818100ULL, /* FE=1, SE=0 */
- 0x0042420000424200ULL, /* FE=0, SE=1 */
- 0}; /* FE=0, SE=0 */
+ static const u64 value[] = {
+ 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
+ 0x0081810000818100ULL, /* FE=1, SE=0 */
+ 0x0042420000424200ULL, /* FE=0, SE=1 */
+ 0 /* FE=0, SE=0 */
+ };
while (i < 4) {
writeq((value[i] | valr), &bar0->swapper_ctrl);
@@ -5568,30 +5572,27 @@ static void s2io_ethtool_gringparam(struct net_device *dev,
struct s2io_nic *sp = netdev_priv(dev);
int i, tx_desc_count = 0, rx_desc_count = 0;
- if (sp->rxd_mode == RXD_MODE_1)
+ if (sp->rxd_mode == RXD_MODE_1) {
ering->rx_max_pending = MAX_RX_DESC_1;
- else if (sp->rxd_mode == RXD_MODE_3B)
+ ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
+ } else {
ering->rx_max_pending = MAX_RX_DESC_2;
+ ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
+ }
+ ering->rx_mini_max_pending = 0;
ering->tx_max_pending = MAX_TX_DESC;
- for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
- tx_desc_count += sp->config.tx_cfg[i].fifo_len;
- DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
- ering->tx_pending = tx_desc_count;
- rx_desc_count = 0;
- for (i = 0 ; i < sp->config.rx_ring_num ; i++)
+ for (i = 0; i < sp->config.rx_ring_num; i++)
rx_desc_count += sp->config.rx_cfg[i].num_rxd;
-
ering->rx_pending = rx_desc_count;
-
- ering->rx_mini_max_pending = 0;
- ering->rx_mini_pending = 0;
- if (sp->rxd_mode == RXD_MODE_1)
- ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
- else if (sp->rxd_mode == RXD_MODE_3B)
- ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
ering->rx_jumbo_pending = rx_desc_count;
+ ering->rx_mini_pending = 0;
+
+ for (i = 0; i < sp->config.tx_fifo_num; i++)
+ tx_desc_count += sp->config.tx_cfg[i].fifo_len;
+ ering->tx_pending = tx_desc_count;
+ DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
}
/**
@@ -7555,7 +7556,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
*/
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (ring_data->lro) {
- u32 tcp_len;
+ u32 tcp_len = 0;
u8 *tcp;
int ret = 0;
@@ -7692,6 +7693,8 @@ static void s2io_init_pci(struct s2io_nic *sp)
static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
u8 *dev_multiq)
{
+ int i;
+
if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
"(%d) not supported\n", tx_fifo_num);
@@ -7750,6 +7753,15 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
rx_ring_mode = 1;
}
+
+ for (i = 0; i < MAX_RX_RINGS; i++)
+ if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
+ DBG_PRINT(ERR_DBG, "Requested rx ring size not "
+ "supported\nDefaulting to %d\n",
+ MAX_RX_BLOCKS_PER_RING);
+ rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
+ }
+
return SUCCESS;
}
@@ -8321,8 +8333,7 @@ mem_alloc_failed:
static void __devexit s2io_rem_nic(struct pci_dev *pdev)
{
- struct net_device *dev =
- (struct net_device *)pci_get_drvdata(pdev);
+ struct net_device *dev = pci_get_drvdata(pdev);
struct s2io_nic *sp;
if (dev == NULL) {
@@ -8330,9 +8341,11 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
return;
}
- flush_scheduled_work();
-
sp = netdev_priv(dev);
+
+ cancel_work_sync(&sp->rst_timer_task);
+ cancel_work_sync(&sp->set_link_task);
+
unregister_netdev(dev);
free_shared_mem(sp);
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 00b8614efe48..7d160306b651 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -355,13 +355,12 @@ struct stat_block {
#define FIFO_OTHER_MAX_NUM 1
-#define MAX_RX_DESC_1 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 127 )
-#define MAX_RX_DESC_2 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 85 )
-#define MAX_RX_DESC_3 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 85 )
+#define MAX_RX_DESC_1 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 128)
+#define MAX_RX_DESC_2 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 86)
#define MAX_TX_DESC (MAX_AVAILABLE_TXDS)
/* FIFO mappings for all possible number of fifos configured */
-static int fifo_map[][MAX_TX_FIFOS] = {
+static const int fifo_map[][MAX_TX_FIFOS] = {
{0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 1, 1, 1, 1},
{0, 0, 0, 1, 1, 1, 2, 2},
@@ -372,7 +371,7 @@ static int fifo_map[][MAX_TX_FIFOS] = {
{0, 1, 2, 3, 4, 5, 6, 7},
};
-static u16 fifo_selector[MAX_TX_FIFOS] = {0, 1, 3, 3, 7, 7, 7, 7};
+static const u16 fifo_selector[MAX_TX_FIFOS] = {0, 1, 3, 3, 7, 7, 7, 7};
/* Maintains Per FIFO related information. */
struct tx_fifo_config {
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 417adf372828..76290a8c3c14 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -1449,7 +1449,8 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
dev->irq = pdev->irq;
/* faked with skb_copy_and_csum_dev */
- dev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
+ dev->features = NETIF_F_SG | NETIF_F_HIGHDMA |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
dev->netdev_ops = &sc92031_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 05df20e47976..b8bd936374f2 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2009 Solarflare Communications Inc.
+ * Copyright 2005-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -21,9 +21,9 @@
#include <linux/ethtool.h>
#include <linux/topology.h>
#include <linux/gfp.h>
+#include <linux/cpu_rmap.h>
#include "net_driver.h"
#include "efx.h"
-#include "mdio_10g.h"
#include "nic.h"
#include "mcdi.h"
@@ -197,7 +197,9 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
static void efx_remove_channels(struct efx_nic *efx);
static void efx_remove_port(struct efx_nic *efx);
+static void efx_init_napi(struct efx_nic *efx);
static void efx_fini_napi(struct efx_nic *efx);
+static void efx_fini_napi_channel(struct efx_channel *channel);
static void efx_fini_struct(struct efx_nic *efx);
static void efx_start_all(struct efx_nic *efx);
static void efx_stop_all(struct efx_nic *efx);
@@ -306,6 +308,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
channel->irq_mod_score = 0;
}
+ efx_filter_rfs_expire(channel);
+
/* There is no race here; although napi_disable() will
* only wait for napi_complete(), this isn't a problem
* since efx_channel_processed() will have no effect if
@@ -335,8 +339,10 @@ void efx_process_channel_now(struct efx_channel *channel)
/* Disable interrupts and wait for ISRs to complete */
efx_nic_disable_interrupts(efx);
- if (efx->legacy_irq)
+ if (efx->legacy_irq) {
synchronize_irq(efx->legacy_irq);
+ efx->legacy_irq_enabled = false;
+ }
if (channel->irq)
synchronize_irq(channel->irq);
@@ -351,6 +357,8 @@ void efx_process_channel_now(struct efx_channel *channel)
efx_channel_processed(channel);
napi_enable(&channel->napi_str);
+ if (efx->legacy_irq)
+ efx->legacy_irq_enabled = true;
efx_nic_enable_interrupts(efx);
}
@@ -426,6 +434,7 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
*channel = *old_channel;
+ channel->napi_dev = NULL;
memset(&channel->eventq, 0, sizeof(channel->eventq));
rx_queue = &channel->rx_queue;
@@ -455,9 +464,6 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
}
}
- spin_lock_init(&channel->tx_stop_lock);
- atomic_set(&channel->tx_stop_count, 1);
-
rx_queue = &channel->rx_queue;
rx_queue->efx = efx;
setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
@@ -670,7 +676,7 @@ static void efx_fini_channels(struct efx_nic *efx)
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_fini_rx_queue(rx_queue);
- efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel)
efx_fini_tx_queue(tx_queue);
efx_fini_eventq(channel);
}
@@ -686,7 +692,7 @@ static void efx_remove_channel(struct efx_channel *channel)
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_remove_rx_queue(rx_queue);
- efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel)
efx_remove_tx_queue(tx_queue);
efx_remove_eventq(channel);
}
@@ -736,9 +742,13 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
if (rc)
goto rollback;
+ efx_init_napi(efx);
+
/* Destroy old channels */
- for (i = 0; i < efx->n_channels; i++)
+ for (i = 0; i < efx->n_channels; i++) {
+ efx_fini_napi_channel(other_channel[i]);
efx_remove_channel(other_channel[i]);
+ }
out:
/* Free unused channel structures */
for (i = 0; i < efx->n_channels; i++)
@@ -910,6 +920,7 @@ static void efx_mac_work(struct work_struct *data)
static int efx_probe_port(struct efx_nic *efx)
{
+ unsigned char *perm_addr;
int rc;
netif_dbg(efx, probe, efx->net_dev, "create port\n");
@@ -923,11 +934,12 @@ static int efx_probe_port(struct efx_nic *efx)
return rc;
/* Sanity check MAC address */
- if (is_valid_ether_addr(efx->mac_address)) {
- memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
+ perm_addr = efx->net_dev->perm_addr;
+ if (is_valid_ether_addr(perm_addr)) {
+ memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN);
} else {
netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n",
- efx->mac_address);
+ perm_addr);
if (!allow_bad_hwaddr) {
rc = -EINVAL;
goto err;
@@ -1092,8 +1104,8 @@ static int efx_init_io(struct efx_nic *efx)
rc = -EIO;
goto fail3;
}
- efx->membase = ioremap_nocache(efx->membase_phys,
- efx->type->mem_map_size);
+ efx->membase = ioremap_wc(efx->membase_phys,
+ efx->type->mem_map_size);
if (!efx->membase) {
netif_err(efx, probe, efx->net_dev,
"could not map memory BAR at %llx+%x\n",
@@ -1144,6 +1156,9 @@ static int efx_wanted_channels(void)
int count;
int cpu;
+ if (rss_cpus)
+ return rss_cpus;
+
if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
printk(KERN_WARNING
"sfc: RSS disabled due to allocation failure\n");
@@ -1163,10 +1178,32 @@ static int efx_wanted_channels(void)
return count;
}
+static int
+efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
+{
+#ifdef CONFIG_RFS_ACCEL
+ int i, rc;
+
+ efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
+ if (!efx->net_dev->rx_cpu_rmap)
+ return -ENOMEM;
+ for (i = 0; i < efx->n_rx_channels; i++) {
+ rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
+ xentries[i].vector);
+ if (rc) {
+ free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+ efx->net_dev->rx_cpu_rmap = NULL;
+ return rc;
+ }
+ }
+#endif
+ return 0;
+}
+
/* Probe the number and type of interrupts we are able to obtain, and
* the resulting numbers of channels and RX queues.
*/
-static void efx_probe_interrupts(struct efx_nic *efx)
+static int efx_probe_interrupts(struct efx_nic *efx)
{
int max_channels =
min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
@@ -1208,6 +1245,11 @@ static void efx_probe_interrupts(struct efx_nic *efx)
efx->n_tx_channels = efx->n_channels;
efx->n_rx_channels = efx->n_channels;
}
+ rc = efx_init_rx_cpu_rmap(efx, xentries);
+ if (rc) {
+ pci_disable_msix(efx->pci_dev);
+ return rc;
+ }
for (i = 0; i < n_channels; i++)
efx_get_channel(efx, i)->irq =
xentries[i].vector;
@@ -1241,6 +1283,8 @@ static void efx_probe_interrupts(struct efx_nic *efx)
efx->n_tx_channels = 1;
efx->legacy_irq = efx->pci_dev->irq;
}
+
+ return 0;
}
static void efx_remove_interrupts(struct efx_nic *efx)
@@ -1257,32 +1301,10 @@ static void efx_remove_interrupts(struct efx_nic *efx)
efx->legacy_irq = 0;
}
-struct efx_tx_queue *
-efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
-{
- unsigned tx_channel_offset =
- separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
- EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
- type >= EFX_TXQ_TYPES);
- return &efx->channel[tx_channel_offset + index]->tx_queue[type];
-}
-
static void efx_set_channels(struct efx_nic *efx)
{
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
- unsigned tx_channel_offset =
+ efx->tx_channel_offset =
separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
-
- /* Channel pointers were set in efx_init_struct() but we now
- * need to clear them for TX queues in any RX-only channels. */
- efx_for_each_channel(channel, efx) {
- if (channel->channel - tx_channel_offset >=
- efx->n_tx_channels) {
- efx_for_each_channel_tx_queue(tx_queue, channel)
- tx_queue->channel = NULL;
- }
- }
}
static int efx_probe_nic(struct efx_nic *efx)
@@ -1299,7 +1321,9 @@ static int efx_probe_nic(struct efx_nic *efx)
/* Determine the number of channels and queues by trying to hook
* in MSI-X interrupts. */
- efx_probe_interrupts(efx);
+ rc = efx_probe_interrupts(efx);
+ if (rc)
+ goto fail;
if (efx->n_channels > 1)
get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
@@ -1314,6 +1338,10 @@ static int efx_probe_nic(struct efx_nic *efx)
efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
return 0;
+
+fail:
+ efx->type->remove(efx);
+ return rc;
}
static void efx_remove_nic(struct efx_nic *efx)
@@ -1394,12 +1422,14 @@ static void efx_start_all(struct efx_nic *efx)
* restart the transmit interface early so the watchdog timer stops */
efx_start_port(efx);
- efx_for_each_channel(channel, efx) {
- if (efx_dev_registered(efx))
- efx_wake_queue(channel);
+ if (efx_dev_registered(efx))
+ netif_tx_wake_all_queues(efx->net_dev);
+
+ efx_for_each_channel(channel, efx)
efx_start_channel(channel);
- }
+ if (efx->legacy_irq)
+ efx->legacy_irq_enabled = true;
efx_nic_enable_interrupts(efx);
/* Switch to event based MCDI completions after enabling interrupts.
@@ -1460,8 +1490,10 @@ static void efx_stop_all(struct efx_nic *efx)
/* Disable interrupts and wait for ISR to complete */
efx_nic_disable_interrupts(efx);
- if (efx->legacy_irq)
+ if (efx->legacy_irq) {
synchronize_irq(efx->legacy_irq);
+ efx->legacy_irq_enabled = false;
+ }
efx_for_each_channel(channel, efx) {
if (channel->irq)
synchronize_irq(channel->irq);
@@ -1482,9 +1514,7 @@ static void efx_stop_all(struct efx_nic *efx)
/* Stop the kernel transmit interface late, so the watchdog
* timer isn't ticking over the flush */
if (efx_dev_registered(efx)) {
- struct efx_channel *channel;
- efx_for_each_channel(channel, efx)
- efx_stop_queue(channel);
+ netif_tx_stop_all_queues(efx->net_dev);
netif_tx_lock_bh(efx->net_dev);
netif_tx_unlock_bh(efx->net_dev);
}
@@ -1526,9 +1556,9 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
efx->irq_rx_adaptive = rx_adaptive;
efx->irq_rx_moderation = rx_ticks;
efx_for_each_channel(channel, efx) {
- if (efx_channel_get_rx_queue(channel))
+ if (efx_channel_has_rx_queue(channel))
channel->irq_moderation = rx_ticks;
- else if (efx_channel_get_tx_queue(channel, 0))
+ else if (efx_channel_has_tx_queues(channel))
channel->irq_moderation = tx_ticks;
}
}
@@ -1593,7 +1623,7 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
*
**************************************************************************/
-static int efx_init_napi(struct efx_nic *efx)
+static void efx_init_napi(struct efx_nic *efx)
{
struct efx_channel *channel;
@@ -1602,18 +1632,21 @@ static int efx_init_napi(struct efx_nic *efx)
netif_napi_add(channel->napi_dev, &channel->napi_str,
efx_poll, napi_weight);
}
- return 0;
+}
+
+static void efx_fini_napi_channel(struct efx_channel *channel)
+{
+ if (channel->napi_dev)
+ netif_napi_del(&channel->napi_str);
+ channel->napi_dev = NULL;
}
static void efx_fini_napi(struct efx_nic *efx)
{
struct efx_channel *channel;
- efx_for_each_channel(channel, efx) {
- if (channel->napi_dev)
- netif_napi_del(&channel->napi_str);
- channel->napi_dev = NULL;
- }
+ efx_for_each_channel(channel, efx)
+ efx_fini_napi_channel(channel);
}
/**************************************************************************
@@ -1841,6 +1874,10 @@ static const struct net_device_ops efx_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = efx_netpoll,
#endif
+ .ndo_setup_tc = efx_setup_tc,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = efx_filter_rfs,
+#endif
};
static void efx_update_name(struct efx_nic *efx)
@@ -1877,6 +1914,7 @@ static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
static int efx_register_netdev(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
+ struct efx_channel *channel;
int rc;
net_dev->watchdog_timeo = 5 * HZ;
@@ -1899,6 +1937,12 @@ static int efx_register_netdev(struct efx_nic *efx)
if (rc)
goto fail_locked;
+ efx_for_each_channel(channel, efx) {
+ struct efx_tx_queue *tx_queue;
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_init_tx_queue_core_txq(tx_queue);
+ }
+
/* Always start with carrier off; PHY events will detect the link */
netif_carrier_off(efx->net_dev);
@@ -1962,7 +2006,6 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
efx_stop_all(efx);
mutex_lock(&efx->mac_lock);
- mutex_lock(&efx->spi_lock);
efx_fini_channels(efx);
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
@@ -2004,7 +2047,6 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
efx_init_channels(efx);
efx_restore_filters(efx);
- mutex_unlock(&efx->spi_lock);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
@@ -2014,7 +2056,6 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
fail:
efx->port_initialized = false;
- mutex_unlock(&efx->spi_lock);
mutex_unlock(&efx->mac_lock);
return rc;
@@ -2202,8 +2243,6 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
/* Initialise common structures */
memset(efx, 0, sizeof(*efx));
spin_lock_init(&efx->biu_lock);
- mutex_init(&efx->mdio_lock);
- mutex_init(&efx->spi_lock);
#ifdef CONFIG_SFC_MTD
INIT_LIST_HEAD(&efx->mtd_list);
#endif
@@ -2276,6 +2315,10 @@ static void efx_fini_struct(struct efx_nic *efx)
*/
static void efx_pci_remove_main(struct efx_nic *efx)
{
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+ efx->net_dev->rx_cpu_rmap = NULL;
+#endif
efx_nic_fini_interrupt(efx);
efx_fini_channels(efx);
efx_fini_port(efx);
@@ -2335,9 +2378,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
if (rc)
goto fail1;
- rc = efx_init_napi(efx);
- if (rc)
- goto fail2;
+ efx_init_napi(efx);
rc = efx->type->init(efx);
if (rc) {
@@ -2368,7 +2409,6 @@ static int efx_pci_probe_main(struct efx_nic *efx)
efx->type->fini(efx);
fail3:
efx_fini_napi(efx);
- fail2:
efx_remove_all(efx);
fail1:
return rc;
@@ -2392,7 +2432,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
int i, rc;
/* Allocate and initialise a struct net_device and struct efx_nic */
- net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
+ net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
+ EFX_MAX_RX_QUEUES);
if (!net_dev)
return -ENOMEM;
net_dev->features |= (type->offload_features | NETIF_F_SG |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 10a1bf40da96..3d83a1f74fef 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -29,6 +29,7 @@
extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
extern netdev_tx_t
@@ -36,8 +37,7 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
extern netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
-extern void efx_stop_queue(struct efx_channel *channel);
-extern void efx_wake_queue(struct efx_channel *channel);
+extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
/* RX */
extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -74,9 +74,23 @@ extern int efx_filter_insert_filter(struct efx_nic *efx,
bool replace);
extern int efx_filter_remove_filter(struct efx_nic *efx,
struct efx_filter_spec *spec);
-extern void efx_filter_table_clear(struct efx_nic *efx,
- enum efx_filter_table_id table_id,
- enum efx_filter_priority priority);
+extern void efx_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+#ifdef CONFIG_RFS_ACCEL
+extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id);
+extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
+static inline void efx_filter_rfs_expire(struct efx_channel *channel)
+{
+ if (channel->rfs_filters_added >= 60 &&
+ __efx_filter_rfs_expire(channel->efx, 100))
+ channel->rfs_filters_added -= 60;
+}
+#define efx_filter_rfs_enabled() 1
+#else
+static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
+#define efx_filter_rfs_enabled() 0
+#endif
/* Channels */
extern void efx_process_channel_now(struct efx_channel *channel);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index edb9d16b8b47..807178ef65ad 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -11,14 +11,13 @@
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
+#include <linux/in.h>
#include "net_driver.h"
#include "workarounds.h"
#include "selftest.h"
#include "efx.h"
#include "filter.h"
#include "nic.h"
-#include "spi.h"
-#include "mdio_10g.h"
struct ethtool_string {
char name[ETH_GSTRING_LEN];
@@ -29,7 +28,8 @@ struct efx_ethtool_stat {
enum {
EFX_ETHTOOL_STAT_SOURCE_mac_stats,
EFX_ETHTOOL_STAT_SOURCE_nic,
- EFX_ETHTOOL_STAT_SOURCE_channel
+ EFX_ETHTOOL_STAT_SOURCE_channel,
+ EFX_ETHTOOL_STAT_SOURCE_tx_queue
} source;
unsigned offset;
u64(*get_stat) (void *field); /* Reader function */
@@ -87,6 +87,10 @@ static u64 efx_get_atomic_stat(void *field)
EFX_ETHTOOL_STAT(field, channel, n_##field, \
unsigned int, efx_get_uint_stat)
+#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
+ EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
+ unsigned int, efx_get_uint_stat)
+
static struct efx_ethtool_stat efx_ethtool_stats[] = {
EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
@@ -117,6 +121,10 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error),
EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
+ EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
@@ -238,8 +246,8 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
- siena_print_fwver(efx, info->fw_version,
- sizeof(info->fw_version));
+ efx_mcdi_print_fwver(efx, info->fw_version,
+ sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
}
@@ -471,6 +479,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
struct efx_mac_stats *mac_stats = &efx->mac_stats;
struct efx_ethtool_stat *stat;
struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
struct rtnl_link_stats64 temp;
int i;
@@ -496,6 +505,15 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
data[i] += stat->get_stat((void *)channel +
stat->offset);
break;
+ case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
+ data[i] = 0;
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ data[i] +=
+ stat->get_stat((void *)tx_queue
+ + stat->offset);
+ }
+ break;
}
}
}
@@ -503,7 +521,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
{
struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev);
- unsigned long features;
+ u32 features;
features = NETIF_F_TSO;
if (efx->type->offload_features & NETIF_F_V6_CSUM)
@@ -520,7 +538,7 @@ static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
{
struct efx_nic *efx = netdev_priv(net_dev);
- unsigned long features = efx->type->offload_features & NETIF_F_ALL_CSUM;
+ u32 features = efx->type->offload_features & NETIF_F_ALL_CSUM;
if (enable)
net_dev->features |= features;
@@ -560,12 +578,8 @@ static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data)
if (rc)
return rc;
- if (!(data & ETH_FLAG_NTUPLE)) {
- efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP,
- EFX_FILTER_PRI_MANUAL);
- efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC,
- EFX_FILTER_PRI_MANUAL);
- }
+ if (!(data & ETH_FLAG_NTUPLE))
+ efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
return 0;
}
@@ -574,9 +588,14 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
struct ethtool_test *test, u64 *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_self_tests efx_tests;
+ struct efx_self_tests *efx_tests;
int already_up;
- int rc;
+ int rc = -ENOMEM;
+
+ efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
+ if (!efx_tests)
+ goto fail;
+
ASSERT_RTNL();
if (efx->state != STATE_RUNNING) {
@@ -584,6 +603,9 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
goto fail1;
}
+ netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
+ (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+
/* We need rx buffers and interrupts. */
already_up = (efx->net_dev->flags & IFF_UP);
if (!already_up) {
@@ -591,25 +613,24 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed opening device.\n");
- goto fail2;
+ goto fail1;
}
}
- memset(&efx_tests, 0, sizeof(efx_tests));
-
- rc = efx_selftest(efx, &efx_tests, test->flags);
+ rc = efx_selftest(efx, efx_tests, test->flags);
if (!already_up)
dev_close(efx->net_dev);
- netif_dbg(efx, drv, efx->net_dev, "%s %sline self-tests\n",
- rc == 0 ? "passed" : "failed",
- (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+ netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
+ rc == 0 ? "passed" : "failed",
+ (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
- fail2:
- fail1:
+fail1:
/* Fill ethtool results structures */
- efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data);
+ efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
+ kfree(efx_tests);
+fail:
if (rc)
test->flags |= ETH_TEST_FL_FAILED;
}
@@ -622,68 +643,6 @@ static int efx_ethtool_nway_reset(struct net_device *net_dev)
return mdio45_nway_restart(&efx->mdio);
}
-static u32 efx_ethtool_get_link(struct net_device *net_dev)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- return efx->link_state.up;
-}
-
-static int efx_ethtool_get_eeprom_len(struct net_device *net_dev)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_spi_device *spi = efx->spi_eeprom;
-
- if (!spi)
- return 0;
- return min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
- min(spi->size, EFX_EEPROM_BOOTCONFIG_START);
-}
-
-static int efx_ethtool_get_eeprom(struct net_device *net_dev,
- struct ethtool_eeprom *eeprom, u8 *buf)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_spi_device *spi = efx->spi_eeprom;
- size_t len;
- int rc;
-
- rc = mutex_lock_interruptible(&efx->spi_lock);
- if (rc)
- return rc;
- rc = falcon_spi_read(efx, spi,
- eeprom->offset + EFX_EEPROM_BOOTCONFIG_START,
- eeprom->len, &len, buf);
- mutex_unlock(&efx->spi_lock);
-
- eeprom->magic = EFX_ETHTOOL_EEPROM_MAGIC;
- eeprom->len = len;
- return rc;
-}
-
-static int efx_ethtool_set_eeprom(struct net_device *net_dev,
- struct ethtool_eeprom *eeprom, u8 *buf)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_spi_device *spi = efx->spi_eeprom;
- size_t len;
- int rc;
-
- if (eeprom->magic != EFX_ETHTOOL_EEPROM_MAGIC)
- return -EINVAL;
-
- rc = mutex_lock_interruptible(&efx->spi_lock);
- if (rc)
- return rc;
- rc = falcon_spi_write(efx, spi,
- eeprom->offset + EFX_EEPROM_BOOTCONFIG_START,
- eeprom->len, &len, buf);
- mutex_unlock(&efx->spi_lock);
-
- eeprom->len = len;
- return rc;
-}
-
static int efx_ethtool_get_coalesce(struct net_device *net_dev,
struct ethtool_coalesce *coalesce)
{
@@ -695,7 +654,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
/* Find lowest IRQ moderation across all used TX queues */
coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
efx_for_each_channel(channel, efx) {
- if (!efx_channel_get_tx_queue(channel, 0))
+ if (!efx_channel_has_tx_queues(channel))
continue;
if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
if (channel->channel < efx->n_rx_channels)
@@ -740,8 +699,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
/* If the channel is shared only allow RX parameters to be set */
efx_for_each_channel(channel, efx) {
- if (efx_channel_get_rx_queue(channel) &&
- efx_channel_get_tx_queue(channel, 0) &&
+ if (efx_channel_has_rx_queue(channel) &&
+ efx_channel_has_tx_queues(channel) &&
tx_usecs) {
netif_err(efx, drv, efx->net_dev, "Channel is shared. "
"Only RX coalescing may be set\n");
@@ -978,6 +937,7 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec;
struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec;
struct efx_filter_spec filter;
+ int rc;
/* Range-check action */
if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR ||
@@ -987,9 +947,16 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
if (~ntuple->fs.data_mask)
return -EINVAL;
+ efx_filter_init_rx(&filter, EFX_FILTER_PRI_MANUAL, 0,
+ (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) ?
+ 0xfff : ntuple->fs.action);
+
switch (ntuple->fs.flow_type) {
case TCP_V4_FLOW:
- case UDP_V4_FLOW:
+ case UDP_V4_FLOW: {
+ u8 proto = (ntuple->fs.flow_type == TCP_V4_FLOW ?
+ IPPROTO_TCP : IPPROTO_UDP);
+
/* Must match all of destination, */
if (ip_mask->ip4dst | ip_mask->pdst)
return -EINVAL;
@@ -1001,7 +968,22 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
/* and nothing else */
if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask)
return -EINVAL;
+
+ if (!ip_mask->ip4src)
+ rc = efx_filter_set_ipv4_full(&filter, proto,
+ ip_entry->ip4dst,
+ ip_entry->pdst,
+ ip_entry->ip4src,
+ ip_entry->psrc);
+ else
+ rc = efx_filter_set_ipv4_local(&filter, proto,
+ ip_entry->ip4dst,
+ ip_entry->pdst);
+ if (rc)
+ return rc;
break;
+ }
+
case ETHER_FLOW:
/* Must match all of destination, */
if (!is_zero_ether_addr(mac_mask->h_dest))
@@ -1014,58 +996,24 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
if (!is_broadcast_ether_addr(mac_mask->h_source) ||
mac_mask->h_proto != htons(0xffff))
return -EINVAL;
+
+ rc = efx_filter_set_eth_local(
+ &filter,
+ (ntuple->fs.vlan_tag_mask == 0xf000) ?
+ ntuple->fs.vlan_tag : EFX_FILTER_VID_UNSPEC,
+ mac_entry->h_dest);
+ if (rc)
+ return rc;
break;
+
default:
return -EINVAL;
}
- filter.priority = EFX_FILTER_PRI_MANUAL;
- filter.flags = 0;
-
- switch (ntuple->fs.flow_type) {
- case TCP_V4_FLOW:
- if (!ip_mask->ip4src)
- efx_filter_set_rx_tcp_full(&filter,
- htonl(ip_entry->ip4src),
- htons(ip_entry->psrc),
- htonl(ip_entry->ip4dst),
- htons(ip_entry->pdst));
- else
- efx_filter_set_rx_tcp_wild(&filter,
- htonl(ip_entry->ip4dst),
- htons(ip_entry->pdst));
- break;
- case UDP_V4_FLOW:
- if (!ip_mask->ip4src)
- efx_filter_set_rx_udp_full(&filter,
- htonl(ip_entry->ip4src),
- htons(ip_entry->psrc),
- htonl(ip_entry->ip4dst),
- htons(ip_entry->pdst));
- else
- efx_filter_set_rx_udp_wild(&filter,
- htonl(ip_entry->ip4dst),
- htons(ip_entry->pdst));
- break;
- case ETHER_FLOW:
- if (ntuple->fs.vlan_tag_mask == 0xf000)
- efx_filter_set_rx_mac_full(&filter,
- ntuple->fs.vlan_tag & 0xfff,
- mac_entry->h_dest);
- else
- efx_filter_set_rx_mac_wild(&filter, mac_entry->h_dest);
- break;
- }
-
- if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR) {
+ if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR)
return efx_filter_remove_filter(efx, &filter);
- } else {
- if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
- filter.dmaq_id = 0xfff;
- else
- filter.dmaq_id = ntuple->fs.action;
+ else
return efx_filter_insert_filter(efx, &filter, true);
- }
}
static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
@@ -1115,10 +1063,7 @@ const struct ethtool_ops efx_ethtool_ops = {
.get_msglevel = efx_ethtool_get_msglevel,
.set_msglevel = efx_ethtool_set_msglevel,
.nway_reset = efx_ethtool_nway_reset,
- .get_link = efx_ethtool_get_link,
- .get_eeprom_len = efx_ethtool_get_eeprom_len,
- .get_eeprom = efx_ethtool_get_eeprom,
- .set_eeprom = efx_ethtool_set_eeprom,
+ .get_link = ethtool_op_get_link,
.get_coalesce = efx_ethtool_get_coalesce,
.set_coalesce = efx_ethtool_set_coalesce,
.get_ringparam = efx_ethtool_get_ringparam,
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 267019bb2b15..734fcfb52e85 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -24,7 +24,6 @@
#include "nic.h"
#include "regs.h"
#include "io.h"
-#include "mdio_10g.h"
#include "phy.h"
#include "workarounds.h"
@@ -255,7 +254,6 @@ int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
/* Input validation */
if (len > FALCON_SPI_MAX_LEN)
return -EINVAL;
- BUG_ON(!mutex_is_locked(&efx->spi_lock));
/* Check that previous command is not still running */
rc = falcon_spi_poll(efx);
@@ -719,6 +717,7 @@ static int falcon_mdio_write(struct net_device *net_dev,
int prtad, int devad, u16 addr, u16 value)
{
struct efx_nic *efx = netdev_priv(net_dev);
+ struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t reg;
int rc;
@@ -726,7 +725,7 @@ static int falcon_mdio_write(struct net_device *net_dev,
"writing MDIO %d register %d.%d with 0x%04x\n",
prtad, devad, addr, value);
- mutex_lock(&efx->mdio_lock);
+ mutex_lock(&nic_data->mdio_lock);
/* Check MDIO not currently being accessed */
rc = falcon_gmii_wait(efx);
@@ -762,7 +761,7 @@ static int falcon_mdio_write(struct net_device *net_dev,
}
out:
- mutex_unlock(&efx->mdio_lock);
+ mutex_unlock(&nic_data->mdio_lock);
return rc;
}
@@ -771,10 +770,11 @@ static int falcon_mdio_read(struct net_device *net_dev,
int prtad, int devad, u16 addr)
{
struct efx_nic *efx = netdev_priv(net_dev);
+ struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t reg;
int rc;
- mutex_lock(&efx->mdio_lock);
+ mutex_lock(&nic_data->mdio_lock);
/* Check MDIO not currently being accessed */
rc = falcon_gmii_wait(efx);
@@ -813,7 +813,7 @@ static int falcon_mdio_read(struct net_device *net_dev,
}
out:
- mutex_unlock(&efx->mdio_lock);
+ mutex_unlock(&nic_data->mdio_lock);
return rc;
}
@@ -841,6 +841,7 @@ static int falcon_probe_port(struct efx_nic *efx)
}
/* Fill out MDIO structure and loopback modes */
+ mutex_init(&nic_data->mdio_lock);
efx->mdio.mdio_read = falcon_mdio_read;
efx->mdio.mdio_write = falcon_mdio_write;
rc = efx->phy_op->probe(efx);
@@ -880,6 +881,41 @@ static void falcon_remove_port(struct efx_nic *efx)
efx_nic_free_buffer(efx, &efx->stats_buffer);
}
+/* Global events are basically PHY events */
+static bool
+falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
+ EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
+ EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR))
+ /* Ignored */
+ return true;
+
+ if ((efx_nic_rev(efx) == EFX_REV_FALCON_B0) &&
+ EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
+ nic_data->xmac_poll_required = true;
+ return true;
+ }
+
+ if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
+ EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
+ EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
+ netif_err(efx, rx_err, efx->net_dev,
+ "channel %d seen global RX_RESET event. Resetting.\n",
+ channel->channel);
+
+ atomic_inc(&efx->rx_reset);
+ efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
+ RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
+ return true;
+ }
+
+ return false;
+}
+
/**************************************************************************
*
* Falcon test code
@@ -889,6 +925,7 @@ static void falcon_remove_port(struct efx_nic *efx)
static int
falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
{
+ struct falcon_nic_data *nic_data = efx->nic_data;
struct falcon_nvconfig *nvconfig;
struct efx_spi_device *spi;
void *region;
@@ -896,8 +933,11 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
__le16 *word, *limit;
u32 csum;
- spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom;
- if (!spi)
+ if (efx_spi_present(&nic_data->spi_flash))
+ spi = &nic_data->spi_flash;
+ else if (efx_spi_present(&nic_data->spi_eeprom))
+ spi = &nic_data->spi_eeprom;
+ else
return -EINVAL;
region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
@@ -905,12 +945,13 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
return -ENOMEM;
nvconfig = region + FALCON_NVCONFIG_OFFSET;
- mutex_lock(&efx->spi_lock);
+ mutex_lock(&nic_data->spi_lock);
rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
- mutex_unlock(&efx->spi_lock);
+ mutex_unlock(&nic_data->spi_lock);
if (rc) {
netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
- efx->spi_flash ? "flash" : "EEPROM");
+ efx_spi_present(&nic_data->spi_flash) ?
+ "flash" : "EEPROM");
rc = -EIO;
goto out;
}
@@ -1012,7 +1053,7 @@ static int falcon_b0_test_registers(struct efx_nic *efx)
/* Resets NIC to known state. This routine must be called in process
* context and is allowed to sleep. */
-static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
+static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
{
struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t glb_ctl_reg_ker;
@@ -1066,22 +1107,9 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
/* Restore PCI configuration if needed */
if (method == RESET_TYPE_WORLD) {
- if (efx_nic_is_dual_func(efx)) {
- rc = pci_restore_state(nic_data->pci_dev2);
- if (rc) {
- netif_err(efx, drv, efx->net_dev,
- "failed to restore PCI config for "
- "the secondary function\n");
- goto fail3;
- }
- }
- rc = pci_restore_state(efx->pci_dev);
- if (rc) {
- netif_err(efx, drv, efx->net_dev,
- "failed to restore PCI config for the "
- "primary function\n");
- goto fail4;
- }
+ if (efx_nic_is_dual_func(efx))
+ pci_restore_state(nic_data->pci_dev2);
+ pci_restore_state(efx->pci_dev);
netif_dbg(efx, drv, efx->net_dev,
"successfully restored PCI config\n");
}
@@ -1092,7 +1120,7 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
rc = -ETIMEDOUT;
netif_err(efx, hw, efx->net_dev,
"timed out waiting for hardware reset\n");
- goto fail5;
+ goto fail3;
}
netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
@@ -1100,11 +1128,21 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
/* pci_save_state() and pci_restore_state() MUST be called in pairs */
fail2:
-fail3:
pci_restore_state(efx->pci_dev);
fail1:
-fail4:
-fail5:
+fail3:
+ return rc;
+}
+
+static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ mutex_lock(&nic_data->spi_lock);
+ rc = __falcon_reset_hw(efx, method);
+ mutex_unlock(&nic_data->spi_lock);
+
return rc;
}
@@ -1189,16 +1227,11 @@ static int falcon_reset_sram(struct efx_nic *efx)
return -ETIMEDOUT;
}
-static int falcon_spi_device_init(struct efx_nic *efx,
- struct efx_spi_device **spi_device_ret,
+static void falcon_spi_device_init(struct efx_nic *efx,
+ struct efx_spi_device *spi_device,
unsigned int device_id, u32 device_type)
{
- struct efx_spi_device *spi_device;
-
if (device_type != 0) {
- spi_device = kzalloc(sizeof(*spi_device), GFP_KERNEL);
- if (!spi_device)
- return -ENOMEM;
spi_device->device_id = device_id;
spi_device->size =
1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
@@ -1215,27 +1248,15 @@ static int falcon_spi_device_init(struct efx_nic *efx,
1 << SPI_DEV_TYPE_FIELD(device_type,
SPI_DEV_TYPE_BLOCK_SIZE);
} else {
- spi_device = NULL;
+ spi_device->size = 0;
}
-
- kfree(*spi_device_ret);
- *spi_device_ret = spi_device;
- return 0;
-}
-
-static void falcon_remove_spi_devices(struct efx_nic *efx)
-{
- kfree(efx->spi_eeprom);
- efx->spi_eeprom = NULL;
- kfree(efx->spi_flash);
- efx->spi_flash = NULL;
}
/* Extract non-volatile configuration */
static int falcon_probe_nvconfig(struct efx_nic *efx)
{
+ struct falcon_nic_data *nic_data = efx->nic_data;
struct falcon_nvconfig *nvconfig;
- int board_rev;
int rc;
nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
@@ -1243,55 +1264,32 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
return -ENOMEM;
rc = falcon_read_nvram(efx, nvconfig);
- if (rc == -EINVAL) {
- netif_err(efx, probe, efx->net_dev,
- "NVRAM is invalid therefore using defaults\n");
- efx->phy_type = PHY_TYPE_NONE;
- efx->mdio.prtad = MDIO_PRTAD_NONE;
- board_rev = 0;
- rc = 0;
- } else if (rc) {
- goto fail1;
- } else {
- struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
- struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3;
-
- efx->phy_type = v2->port0_phy_type;
- efx->mdio.prtad = v2->port0_phy_addr;
- board_rev = le16_to_cpu(v2->board_revision);
-
- if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
- rc = falcon_spi_device_init(
- efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
- le32_to_cpu(v3->spi_device_type
- [FFE_AB_SPI_DEVICE_FLASH]));
- if (rc)
- goto fail2;
- rc = falcon_spi_device_init(
- efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
- le32_to_cpu(v3->spi_device_type
- [FFE_AB_SPI_DEVICE_EEPROM]));
- if (rc)
- goto fail2;
- }
+ if (rc)
+ goto out;
+
+ efx->phy_type = nvconfig->board_v2.port0_phy_type;
+ efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr;
+
+ if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
+ falcon_spi_device_init(
+ efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
+ le32_to_cpu(nvconfig->board_v3
+ .spi_device_type[FFE_AB_SPI_DEVICE_FLASH]));
+ falcon_spi_device_init(
+ efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
+ le32_to_cpu(nvconfig->board_v3
+ .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM]));
}
/* Read the MAC addresses */
- memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
+ memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN);
netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
efx->phy_type, efx->mdio.prtad);
- rc = falcon_probe_board(efx, board_rev);
- if (rc)
- goto fail2;
-
- kfree(nvconfig);
- return 0;
-
- fail2:
- falcon_remove_spi_devices(efx);
- fail1:
+ rc = falcon_probe_board(efx,
+ le16_to_cpu(nvconfig->board_v2.board_revision));
+out:
kfree(nvconfig);
return rc;
}
@@ -1299,6 +1297,7 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
/* Probe all SPI devices on the NIC */
static void falcon_probe_spi_devices(struct efx_nic *efx)
{
+ struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
int boot_dev;
@@ -1327,12 +1326,14 @@ static void falcon_probe_spi_devices(struct efx_nic *efx)
efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
}
+ mutex_init(&nic_data->spi_lock);
+
if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
- falcon_spi_device_init(efx, &efx->spi_flash,
+ falcon_spi_device_init(efx, &nic_data->spi_flash,
FFE_AB_SPI_DEVICE_FLASH,
default_flash_type);
if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
- falcon_spi_device_init(efx, &efx->spi_eeprom,
+ falcon_spi_device_init(efx, &nic_data->spi_eeprom,
FFE_AB_SPI_DEVICE_EEPROM,
large_eeprom_type);
}
@@ -1397,7 +1398,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
}
/* Now we can reset the NIC */
- rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
+ rc = __falcon_reset_hw(efx, RESET_TYPE_ALL);
if (rc) {
netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
goto fail3;
@@ -1419,8 +1420,11 @@ static int falcon_probe_nic(struct efx_nic *efx)
/* Read in the non-volatile configuration */
rc = falcon_probe_nvconfig(efx);
- if (rc)
+ if (rc) {
+ if (rc == -EINVAL)
+ netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n");
goto fail5;
+ }
/* Initialise I2C adapter */
board = falcon_board(efx);
@@ -1452,7 +1456,6 @@ static int falcon_probe_nic(struct efx_nic *efx)
BUG_ON(i2c_del_adapter(&board->i2c_adap));
memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
fail5:
- falcon_remove_spi_devices(efx);
efx_nic_free_buffer(efx, &efx->irq_status);
fail4:
fail3:
@@ -1475,36 +1478,26 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
/* RX control FIFO thresholds (32 entries) */
const unsigned ctrl_xon_thr = 20;
const unsigned ctrl_xoff_thr = 25;
- /* RX data FIFO thresholds (256-byte units; size varies) */
- int data_xon_thr = efx_nic_rx_xon_thresh >> 8;
- int data_xoff_thr = efx_nic_rx_xoff_thresh >> 8;
efx_oword_t reg;
efx_reado(efx, &reg, FR_AZ_RX_CFG);
if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
/* Data FIFO size is 5.5K */
- if (data_xon_thr < 0)
- data_xon_thr = 512 >> 8;
- if (data_xoff_thr < 0)
- data_xoff_thr = 2048 >> 8;
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
huge_buf_size);
- EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr);
- EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr);
+ EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
+ EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
} else {
/* Data FIFO size is 80K; register fields moved */
- if (data_xon_thr < 0)
- data_xon_thr = 27648 >> 8; /* ~3*max MTU */
- if (data_xoff_thr < 0)
- data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
huge_buf_size);
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr);
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr);
+ /* Send XON and XOFF at ~3 * max MTU away from empty/full */
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
@@ -1606,10 +1599,9 @@ static void falcon_remove_nic(struct efx_nic *efx)
BUG_ON(rc);
memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
- falcon_remove_spi_devices(efx);
efx_nic_free_buffer(efx, &efx->irq_status);
- falcon_reset_hw(efx, RESET_TYPE_ALL);
+ __falcon_reset_hw(efx, RESET_TYPE_ALL);
/* Release the second function after the reset */
if (nic_data->pci_dev2) {
@@ -1720,6 +1712,7 @@ struct efx_nic_type falcon_a1_nic_type = {
.reset = falcon_reset_hw,
.probe_port = falcon_probe_port,
.remove_port = falcon_remove_port,
+ .handle_global_event = falcon_handle_global_event,
.prepare_flush = falcon_prepare_flush,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
@@ -1760,6 +1753,7 @@ struct efx_nic_type falcon_b0_nic_type = {
.reset = falcon_reset_hw,
.probe_port = falcon_probe_port,
.remove_port = falcon_remove_port,
+ .handle_global_event = falcon_handle_global_event,
.prepare_flush = falcon_prepare_flush,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index cfc6a5b5a477..b9cc846811d6 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2009 Solarflare Communications Inc.
+ * Copyright 2007-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -13,8 +13,6 @@
#include "phy.h"
#include "efx.h"
#include "nic.h"
-#include "regs.h"
-#include "io.h"
#include "workarounds.h"
/* Macros for unpacking the board revision */
@@ -30,17 +28,28 @@
#define FALCON_BOARD_SFN4112F 0x52
/* Board temperature is about 15°C above ambient when air flow is
- * limited. */
+ * limited. The maximum acceptable ambient temperature varies
+ * depending on the PHY specifications but the critical temperature
+ * above which we should shut down to avoid damage is 80°C. */
#define FALCON_BOARD_TEMP_BIAS 15
+#define FALCON_BOARD_TEMP_CRIT (80 + FALCON_BOARD_TEMP_BIAS)
/* SFC4000 datasheet says: 'The maximum permitted junction temperature
* is 125°C; the thermal design of the environment for the SFC4000
* should aim to keep this well below 100°C.' */
+#define FALCON_JUNC_TEMP_MIN 0
#define FALCON_JUNC_TEMP_MAX 90
+#define FALCON_JUNC_TEMP_CRIT 125
/*****************************************************************************
* Support for LM87 sensor chip used on several boards
*/
+#define LM87_REG_TEMP_HW_INT_LOCK 0x13
+#define LM87_REG_TEMP_HW_EXT_LOCK 0x14
+#define LM87_REG_TEMP_HW_INT 0x17
+#define LM87_REG_TEMP_HW_EXT 0x18
+#define LM87_REG_TEMP_EXT1 0x26
+#define LM87_REG_TEMP_INT 0x27
#define LM87_REG_ALARMS1 0x41
#define LM87_REG_ALARMS2 0x42
#define LM87_IN_LIMITS(nr, _min, _max) \
@@ -57,6 +66,27 @@
#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
+static int efx_poke_lm87(struct i2c_client *client, const u8 *reg_values)
+{
+ while (*reg_values) {
+ u8 reg = *reg_values++;
+ u8 value = *reg_values++;
+ int rc = i2c_smbus_write_byte_data(client, reg, value);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static const u8 falcon_lm87_common_regs[] = {
+ LM87_REG_TEMP_HW_INT_LOCK, FALCON_BOARD_TEMP_CRIT,
+ LM87_REG_TEMP_HW_INT, FALCON_BOARD_TEMP_CRIT,
+ LM87_TEMP_EXT1_LIMITS(FALCON_JUNC_TEMP_MIN, FALCON_JUNC_TEMP_MAX),
+ LM87_REG_TEMP_HW_EXT_LOCK, FALCON_JUNC_TEMP_CRIT,
+ LM87_REG_TEMP_HW_EXT, FALCON_JUNC_TEMP_CRIT,
+ 0
+};
+
static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
const u8 *reg_values)
{
@@ -67,13 +97,16 @@ static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
if (!client)
return -EIO;
- while (*reg_values) {
- u8 reg = *reg_values++;
- u8 value = *reg_values++;
- rc = i2c_smbus_write_byte_data(client, reg, value);
- if (rc)
- goto err;
- }
+ /* Read-to-clear alarm/interrupt status */
+ i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
+ i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
+
+ rc = efx_poke_lm87(client, reg_values);
+ if (rc)
+ goto err;
+ rc = efx_poke_lm87(client, falcon_lm87_common_regs);
+ if (rc)
+ goto err;
board->hwmon_client = client;
return 0;
@@ -91,36 +124,56 @@ static void efx_fini_lm87(struct efx_nic *efx)
static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
{
struct i2c_client *client = falcon_board(efx)->hwmon_client;
- s32 alarms1, alarms2;
+ bool temp_crit, elec_fault, is_failure;
+ u16 alarms;
+ s32 reg;
/* If link is up then do not monitor temperature */
if (EFX_WORKAROUND_7884(efx) && efx->link_state.up)
return 0;
- alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
- alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
- if (alarms1 < 0)
- return alarms1;
- if (alarms2 < 0)
- return alarms2;
- alarms1 &= mask;
- alarms2 &= mask >> 8;
- if (alarms1 || alarms2) {
+ reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
+ if (reg < 0)
+ return reg;
+ alarms = reg;
+ reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
+ if (reg < 0)
+ return reg;
+ alarms |= reg << 8;
+ alarms &= mask;
+
+ temp_crit = false;
+ if (alarms & LM87_ALARM_TEMP_INT) {
+ reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_INT);
+ if (reg < 0)
+ return reg;
+ if (reg > FALCON_BOARD_TEMP_CRIT)
+ temp_crit = true;
+ }
+ if (alarms & LM87_ALARM_TEMP_EXT1) {
+ reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_EXT1);
+ if (reg < 0)
+ return reg;
+ if (reg > FALCON_JUNC_TEMP_CRIT)
+ temp_crit = true;
+ }
+ elec_fault = alarms & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1);
+ is_failure = temp_crit || elec_fault;
+
+ if (alarms)
netif_err(efx, hw, efx->net_dev,
- "LM87 detected a hardware failure (status %02x:%02x)"
- "%s%s%s\n",
- alarms1, alarms2,
- (alarms1 & LM87_ALARM_TEMP_INT) ?
+ "LM87 detected a hardware %s (status %02x:%02x)"
+ "%s%s%s%s\n",
+ is_failure ? "failure" : "problem",
+ alarms & 0xff, alarms >> 8,
+ (alarms & LM87_ALARM_TEMP_INT) ?
"; board is overheating" : "",
- (alarms1 & LM87_ALARM_TEMP_EXT1) ?
+ (alarms & LM87_ALARM_TEMP_EXT1) ?
"; controller is overheating" : "",
- (alarms1 & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1)
- || alarms2) ?
- "; electrical fault" : "");
- return -ERANGE;
- }
+ temp_crit ? "; reached critical temperature" : "",
+ elec_fault ? "; electrical fault" : "");
- return 0;
+ return is_failure ? -ERANGE : 0;
}
#else /* !CONFIG_SENSORS_LM87 */
@@ -325,7 +378,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
new_mode = old_mode & ~PHY_MODE_SPECIAL;
else
new_mode = PHY_MODE_SPECIAL;
- if (old_mode == new_mode) {
+ if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
err = 0;
} else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
err = -EBUSY;
@@ -362,10 +415,11 @@ static void sfe4001_fini(struct efx_nic *efx)
static int sfe4001_check_hw(struct efx_nic *efx)
{
+ struct falcon_nic_data *nic_data = efx->nic_data;
s32 status;
/* If XAUI link is up then do not monitor */
- if (EFX_WORKAROUND_7884(efx) && !efx->xmac_poll_required)
+ if (EFX_WORKAROUND_7884(efx) && !nic_data->xmac_poll_required)
return 0;
/* Check the powered status of the PHY. Lack of power implies that
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index b31f595ebb5b..2c9ee5db3bf7 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -16,7 +16,6 @@
#include "io.h"
#include "mac.h"
#include "mdio_10g.h"
-#include "phy.h"
#include "workarounds.h"
/**************************************************************************
@@ -88,6 +87,7 @@ int falcon_reset_xaui(struct efx_nic *efx)
static void falcon_ack_status_intr(struct efx_nic *efx)
{
+ struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t reg;
if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
@@ -99,7 +99,7 @@ static void falcon_ack_status_intr(struct efx_nic *efx)
/* We can only use this interrupt to signal the negative edge of
* xaui_align [we have to poll the positive edge]. */
- if (efx->xmac_poll_required)
+ if (nic_data->xmac_poll_required)
return;
efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
@@ -277,12 +277,14 @@ static bool falcon_xmac_check_fault(struct efx_nic *efx)
static int falcon_reconfigure_xmac(struct efx_nic *efx)
{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
falcon_reconfigure_xgxs_core(efx);
falcon_reconfigure_xmac_core(efx);
falcon_reconfigure_mac_wrapper(efx);
- efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
+ nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
falcon_ack_status_intr(efx);
return 0;
@@ -350,11 +352,13 @@ static void falcon_update_stats_xmac(struct efx_nic *efx)
void falcon_poll_xmac(struct efx_nic *efx)
{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up ||
- !efx->xmac_poll_required)
+ !nic_data->xmac_poll_required)
return;
- efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
+ nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
falcon_ack_status_intr(efx);
}
diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c
index 52cb6082b910..95a980fd63d5 100644
--- a/drivers/net/sfc/filter.c
+++ b/drivers/net/sfc/filter.c
@@ -7,6 +7,8 @@
* by the Free Software Foundation, incorporated herein by reference.
*/
+#include <linux/in.h>
+#include <net/ip.h>
#include "efx.h"
#include "filter.h"
#include "io.h"
@@ -26,19 +28,34 @@
*/
#define FILTER_CTL_SRCH_MAX 200
+/* Don't try very hard to find space for performance hints, as this is
+ * counter-productive. */
+#define FILTER_CTL_SRCH_HINT_MAX 5
+
+enum efx_filter_table_id {
+ EFX_FILTER_TABLE_RX_IP = 0,
+ EFX_FILTER_TABLE_RX_MAC,
+ EFX_FILTER_TABLE_COUNT,
+};
+
struct efx_filter_table {
+ enum efx_filter_table_id id;
u32 offset; /* address of table relative to BAR */
unsigned size; /* number of entries */
unsigned step; /* step between entries */
unsigned used; /* number currently used */
unsigned long *used_bitmap;
struct efx_filter_spec *spec;
+ unsigned search_depth[EFX_FILTER_TYPE_COUNT];
};
struct efx_filter_state {
spinlock_t lock;
struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
- unsigned search_depth[EFX_FILTER_TYPE_COUNT];
+#ifdef CONFIG_RFS_ACCEL
+ u32 *rps_flow_id;
+ unsigned rps_expire_index;
+#endif
};
/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
@@ -65,68 +82,203 @@ static u16 efx_filter_increment(u32 key)
}
static enum efx_filter_table_id
-efx_filter_type_table_id(enum efx_filter_type type)
+efx_filter_spec_table_id(const struct efx_filter_spec *spec)
+{
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2));
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2));
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2));
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2));
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2));
+ BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2));
+ EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
+ return spec->type >> 2;
+}
+
+static struct efx_filter_table *
+efx_filter_spec_table(struct efx_filter_state *state,
+ const struct efx_filter_spec *spec)
{
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_TCP_FULL >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_TCP_WILD >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_UDP_FULL >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_UDP_WILD >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_RX_MAC_FULL >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_RX_MAC_WILD >> 2));
- return type >> 2;
+ if (spec->type == EFX_FILTER_UNSPEC)
+ return NULL;
+ else
+ return &state->table[efx_filter_spec_table_id(spec)];
}
-static void
-efx_filter_table_reset_search_depth(struct efx_filter_state *state,
- enum efx_filter_table_id table_id)
+static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
{
- memset(state->search_depth + (table_id << 2), 0,
- sizeof(state->search_depth[0]) << 2);
+ memset(table->search_depth, 0, sizeof(table->search_depth));
}
static void efx_filter_push_rx_limits(struct efx_nic *efx)
{
struct efx_filter_state *state = efx->filter_state;
+ struct efx_filter_table *table;
efx_oword_t filter_ctl;
efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
+ table = &state->table[EFX_FILTER_TABLE_RX_IP];
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
- state->search_depth[EFX_FILTER_RX_TCP_FULL] +
+ table->search_depth[EFX_FILTER_TCP_FULL] +
FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
- state->search_depth[EFX_FILTER_RX_TCP_WILD] +
+ table->search_depth[EFX_FILTER_TCP_WILD] +
FILTER_CTL_SRCH_FUDGE_WILD);
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
- state->search_depth[EFX_FILTER_RX_UDP_FULL] +
+ table->search_depth[EFX_FILTER_UDP_FULL] +
FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
- state->search_depth[EFX_FILTER_RX_UDP_WILD] +
+ table->search_depth[EFX_FILTER_UDP_WILD] +
FILTER_CTL_SRCH_FUDGE_WILD);
- if (state->table[EFX_FILTER_TABLE_RX_MAC].size) {
+ table = &state->table[EFX_FILTER_TABLE_RX_MAC];
+ if (table->size) {
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
- state->search_depth[EFX_FILTER_RX_MAC_FULL] +
+ table->search_depth[EFX_FILTER_MAC_FULL] +
FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
- state->search_depth[EFX_FILTER_RX_MAC_WILD] +
+ table->search_depth[EFX_FILTER_MAC_WILD] +
FILTER_CTL_SRCH_FUDGE_WILD);
}
efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
}
+static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
+ __be32 host1, __be16 port1,
+ __be32 host2, __be16 port2)
+{
+ spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
+ spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
+ spec->data[2] = ntohl(host2);
+}
+
+/**
+ * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @host: Local host address (network byte order)
+ * @port: Local port (network byte order)
+ */
+int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
+ __be32 host, __be16 port)
+{
+ __be32 host1;
+ __be16 port1;
+
+ EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
+
+ /* This cannot currently be combined with other filtering */
+ if (spec->type != EFX_FILTER_UNSPEC)
+ return -EPROTONOSUPPORT;
+
+ if (port == 0)
+ return -EINVAL;
+
+ switch (proto) {
+ case IPPROTO_TCP:
+ spec->type = EFX_FILTER_TCP_WILD;
+ break;
+ case IPPROTO_UDP:
+ spec->type = EFX_FILTER_UDP_WILD;
+ break;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ /* Filter is constructed in terms of source and destination,
+ * with the odd wrinkle that the ports are swapped in a UDP
+ * wildcard filter. We need to convert from local and remote
+ * (= zero for wildcard) addresses.
+ */
+ host1 = 0;
+ if (proto != IPPROTO_UDP) {
+ port1 = 0;
+ } else {
+ port1 = port;
+ port = 0;
+ }
+
+ __efx_filter_set_ipv4(spec, host1, port1, host, port);
+ return 0;
+}
+
+/**
+ * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @host: Local host address (network byte order)
+ * @port: Local port (network byte order)
+ * @rhost: Remote host address (network byte order)
+ * @rport: Remote port (network byte order)
+ */
+int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
+ __be32 host, __be16 port,
+ __be32 rhost, __be16 rport)
+{
+ EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
+
+ /* This cannot currently be combined with other filtering */
+ if (spec->type != EFX_FILTER_UNSPEC)
+ return -EPROTONOSUPPORT;
+
+ if (port == 0 || rport == 0)
+ return -EINVAL;
+
+ switch (proto) {
+ case IPPROTO_TCP:
+ spec->type = EFX_FILTER_TCP_FULL;
+ break;
+ case IPPROTO_UDP:
+ spec->type = EFX_FILTER_UDP_FULL;
+ break;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ __efx_filter_set_ipv4(spec, rhost, rport, host, port);
+ return 0;
+}
+
+/**
+ * efx_filter_set_eth_local - specify local Ethernet address and optional VID
+ * @spec: Specification to initialise
+ * @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
+ * @addr: Local Ethernet MAC address
+ */
+int efx_filter_set_eth_local(struct efx_filter_spec *spec,
+ u16 vid, const u8 *addr)
+{
+ EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
+
+ /* This cannot currently be combined with other filtering */
+ if (spec->type != EFX_FILTER_UNSPEC)
+ return -EPROTONOSUPPORT;
+
+ if (vid == EFX_FILTER_VID_UNSPEC) {
+ spec->type = EFX_FILTER_MAC_WILD;
+ spec->data[0] = 0;
+ } else {
+ spec->type = EFX_FILTER_MAC_FULL;
+ spec->data[0] = vid;
+ }
+
+ spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
+ spec->data[2] = addr[0] << 8 | addr[1];
+ return 0;
+}
+
/* Build a filter entry and return its n-tuple key. */
static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
{
u32 data3;
- switch (efx_filter_type_table_id(spec->type)) {
+ switch (efx_filter_spec_table_id(spec)) {
case EFX_FILTER_TABLE_RX_IP: {
- bool is_udp = (spec->type == EFX_FILTER_RX_UDP_FULL ||
- spec->type == EFX_FILTER_RX_UDP_WILD);
+ bool is_udp = (spec->type == EFX_FILTER_UDP_FULL ||
+ spec->type == EFX_FILTER_UDP_WILD);
EFX_POPULATE_OWORD_7(
*filter,
FRF_BZ_RSS_EN,
@@ -143,7 +295,7 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
}
case EFX_FILTER_TABLE_RX_MAC: {
- bool is_wild = spec->type == EFX_FILTER_RX_MAC_WILD;
+ bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
EFX_POPULATE_OWORD_8(
*filter,
FRF_CZ_RMFT_RSS_EN,
@@ -182,15 +334,16 @@ static int efx_filter_search(struct efx_filter_table *table,
struct efx_filter_spec *spec, u32 key,
bool for_insert, int *depth_required)
{
- unsigned hash, incr, filter_idx, depth;
+ unsigned hash, incr, filter_idx, depth, depth_max;
struct efx_filter_spec *cmp;
hash = efx_filter_hash(key);
incr = efx_filter_increment(key);
+ depth_max = (spec->priority <= EFX_FILTER_PRI_HINT ?
+ FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX);
for (depth = 1, filter_idx = hash & (table->size - 1);
- depth <= FILTER_CTL_SRCH_MAX &&
- test_bit(filter_idx, table->used_bitmap);
+ depth <= depth_max && test_bit(filter_idx, table->used_bitmap);
++depth) {
cmp = &table->spec[filter_idx];
if (efx_filter_equal(spec, cmp))
@@ -199,13 +352,21 @@ static int efx_filter_search(struct efx_filter_table *table,
}
if (!for_insert)
return -ENOENT;
- if (depth > FILTER_CTL_SRCH_MAX)
+ if (depth > depth_max)
return -EBUSY;
found:
*depth_required = depth;
return filter_idx;
}
+/* Construct/deconstruct external filter IDs */
+
+static inline int
+efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index)
+{
+ return table_id << 16 | index;
+}
+
/**
* efx_filter_insert_filter - add or replace a filter
* @efx: NIC in which to insert the filter
@@ -213,30 +374,28 @@ found:
* @replace: Flag for whether the specified filter may replace a filter
* with an identical match expression and equal or lower priority
*
- * On success, return the filter index within its table.
+ * On success, return the filter ID.
* On failure, return a negative error code.
*/
int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
bool replace)
{
struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id =
- efx_filter_type_table_id(spec->type);
- struct efx_filter_table *table = &state->table[table_id];
+ struct efx_filter_table *table = efx_filter_spec_table(state, spec);
struct efx_filter_spec *saved_spec;
efx_oword_t filter;
int filter_idx, depth;
u32 key;
int rc;
- if (table->size == 0)
+ if (!table || table->size == 0)
return -EINVAL;
key = efx_filter_build(&filter, spec);
netif_vdbg(efx, hw, efx->net_dev,
"%s: type %d search_depth=%d", __func__, spec->type,
- state->search_depth[spec->type]);
+ table->search_depth[spec->type]);
spin_lock_bh(&state->lock);
@@ -263,8 +422,8 @@ int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
}
*saved_spec = *spec;
- if (state->search_depth[spec->type] < depth) {
- state->search_depth[spec->type] = depth;
+ if (table->search_depth[spec->type] < depth) {
+ table->search_depth[spec->type] = depth;
efx_filter_push_rx_limits(efx);
}
@@ -273,6 +432,7 @@ int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
netif_vdbg(efx, hw, efx->net_dev,
"%s: filter type %d index %d rxq %u set",
__func__, spec->type, filter_idx, spec->dmaq_id);
+ rc = efx_filter_make_id(table->id, filter_idx);
out:
spin_unlock_bh(&state->lock);
@@ -306,15 +466,16 @@ static void efx_filter_table_clear_entry(struct efx_nic *efx,
int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec)
{
struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id =
- efx_filter_type_table_id(spec->type);
- struct efx_filter_table *table = &state->table[table_id];
+ struct efx_filter_table *table = efx_filter_spec_table(state, spec);
struct efx_filter_spec *saved_spec;
efx_oword_t filter;
int filter_idx, depth;
u32 key;
int rc;
+ if (!table)
+ return -EINVAL;
+
key = efx_filter_build(&filter, spec);
spin_lock_bh(&state->lock);
@@ -332,7 +493,7 @@ int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec)
efx_filter_table_clear_entry(efx, table, filter_idx);
if (table->used == 0)
- efx_filter_table_reset_search_depth(state, table_id);
+ efx_filter_table_reset_search_depth(table);
rc = 0;
out:
@@ -340,15 +501,9 @@ out:
return rc;
}
-/**
- * efx_filter_table_clear - remove filters from a table by priority
- * @efx: NIC from which to remove the filters
- * @table_id: Table from which to remove the filters
- * @priority: Maximum priority to remove
- */
-void efx_filter_table_clear(struct efx_nic *efx,
- enum efx_filter_table_id table_id,
- enum efx_filter_priority priority)
+static void efx_filter_table_clear(struct efx_nic *efx,
+ enum efx_filter_table_id table_id,
+ enum efx_filter_priority priority)
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = &state->table[table_id];
@@ -360,11 +515,22 @@ void efx_filter_table_clear(struct efx_nic *efx,
if (table->spec[filter_idx].priority <= priority)
efx_filter_table_clear_entry(efx, table, filter_idx);
if (table->used == 0)
- efx_filter_table_reset_search_depth(state, table_id);
+ efx_filter_table_reset_search_depth(table);
spin_unlock_bh(&state->lock);
}
+/**
+ * efx_filter_clear_rx - remove RX filters by priority
+ * @efx: NIC from which to remove the filters
+ * @priority: Maximum priority to remove
+ */
+void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
+{
+ efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority);
+ efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
+}
+
/* Restore filter stater after reset */
void efx_restore_filters(struct efx_nic *efx)
{
@@ -406,7 +572,15 @@ int efx_probe_filters(struct efx_nic *efx)
spin_lock_init(&state->lock);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+#ifdef CONFIG_RFS_ACCEL
+ state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
+ sizeof(*state->rps_flow_id),
+ GFP_KERNEL);
+ if (!state->rps_flow_id)
+ goto fail;
+#endif
table = &state->table[EFX_FILTER_TABLE_RX_IP];
+ table->id = EFX_FILTER_TABLE_RX_IP;
table->offset = FR_BZ_RX_FILTER_TBL0;
table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
table->step = FR_BZ_RX_FILTER_TBL0_STEP;
@@ -414,6 +588,7 @@ int efx_probe_filters(struct efx_nic *efx)
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
table = &state->table[EFX_FILTER_TABLE_RX_MAC];
+ table->id = EFX_FILTER_TABLE_RX_MAC;
table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
@@ -428,10 +603,9 @@ int efx_probe_filters(struct efx_nic *efx)
GFP_KERNEL);
if (!table->used_bitmap)
goto fail;
- table->spec = vmalloc(table->size * sizeof(*table->spec));
+ table->spec = vzalloc(table->size * sizeof(*table->spec));
if (!table->spec)
goto fail;
- memset(table->spec, 0, table->size * sizeof(*table->spec));
}
return 0;
@@ -450,5 +624,97 @@ void efx_remove_filters(struct efx_nic *efx)
kfree(state->table[table_id].used_bitmap);
vfree(state->table[table_id].spec);
}
+#ifdef CONFIG_RFS_ACCEL
+ kfree(state->rps_flow_id);
+#endif
kfree(state);
}
+
+#ifdef CONFIG_RFS_ACCEL
+
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_channel *channel;
+ struct efx_filter_state *state = efx->filter_state;
+ struct efx_filter_spec spec;
+ const struct iphdr *ip;
+ const __be16 *ports;
+ int nhoff;
+ int rc;
+
+ nhoff = skb_network_offset(skb);
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return -EPROTONOSUPPORT;
+
+ /* RFS must validate the IP header length before calling us */
+ EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + sizeof(*ip)));
+ ip = (const struct iphdr *)(skb->data + nhoff);
+ if (ip->frag_off & htons(IP_MF | IP_OFFSET))
+ return -EPROTONOSUPPORT;
+ EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + 4 * ip->ihl + 4));
+ ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
+ rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
+ ip->daddr, ports[1], ip->saddr, ports[0]);
+ if (rc)
+ return rc;
+
+ rc = efx_filter_insert_filter(efx, &spec, true);
+ if (rc < 0)
+ return rc;
+
+ /* Remember this so we can check whether to expire the filter later */
+ state->rps_flow_id[rc] = flow_id;
+ channel = efx_get_channel(efx, skb_get_rx_queue(skb));
+ ++channel->rfs_filters_added;
+
+ netif_info(efx, rx_status, efx->net_dev,
+ "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
+ (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
+ &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
+ rxq_index, flow_id, rc);
+
+ return rc;
+}
+
+bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
+ unsigned mask = table->size - 1;
+ unsigned index;
+ unsigned stop;
+
+ if (!spin_trylock_bh(&state->lock))
+ return false;
+
+ index = state->rps_expire_index;
+ stop = (index + quota) & mask;
+
+ while (index != stop) {
+ if (test_bit(index, table->used_bitmap) &&
+ table->spec[index].priority == EFX_FILTER_PRI_HINT &&
+ rps_may_expire_flow(efx->net_dev,
+ table->spec[index].dmaq_id,
+ state->rps_flow_id[index], index)) {
+ netif_info(efx, rx_status, efx->net_dev,
+ "expiring filter %d [flow %u]\n",
+ index, state->rps_flow_id[index]);
+ efx_filter_table_clear_entry(efx, table, index);
+ }
+ index = (index + 1) & mask;
+ }
+
+ state->rps_expire_index = stop;
+ if (table->used == 0)
+ efx_filter_table_reset_search_depth(table);
+
+ spin_unlock_bh(&state->lock);
+ return true;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/sfc/filter.h b/drivers/net/sfc/filter.h
index a53319ded79c..872f2132a496 100644
--- a/drivers/net/sfc/filter.h
+++ b/drivers/net/sfc/filter.h
@@ -12,31 +12,27 @@
#include <linux/types.h>
-enum efx_filter_table_id {
- EFX_FILTER_TABLE_RX_IP = 0,
- EFX_FILTER_TABLE_RX_MAC,
- EFX_FILTER_TABLE_COUNT,
-};
-
/**
* enum efx_filter_type - type of hardware filter
- * @EFX_FILTER_RX_TCP_FULL: RX, matching TCP/IPv4 4-tuple
- * @EFX_FILTER_RX_TCP_WILD: RX, matching TCP/IPv4 destination (host, port)
- * @EFX_FILTER_RX_UDP_FULL: RX, matching UDP/IPv4 4-tuple
- * @EFX_FILTER_RX_UDP_WILD: RX, matching UDP/IPv4 destination (host, port)
- * @EFX_FILTER_RX_MAC_FULL: RX, matching Ethernet destination MAC address, VID
- * @EFX_FILTER_RX_MAC_WILD: RX, matching Ethernet destination MAC address
+ * @EFX_FILTER_TCP_FULL: Matching TCP/IPv4 4-tuple
+ * @EFX_FILTER_TCP_WILD: Matching TCP/IPv4 destination (host, port)
+ * @EFX_FILTER_UDP_FULL: Matching UDP/IPv4 4-tuple
+ * @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port)
+ * @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID
+ * @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address
+ * @EFX_FILTER_UNSPEC: Match type is unspecified
*
- * Falcon NICs only support the RX TCP/IPv4 and UDP/IPv4 filter types.
+ * Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types.
*/
enum efx_filter_type {
- EFX_FILTER_RX_TCP_FULL = 0,
- EFX_FILTER_RX_TCP_WILD,
- EFX_FILTER_RX_UDP_FULL,
- EFX_FILTER_RX_UDP_WILD,
- EFX_FILTER_RX_MAC_FULL = 4,
- EFX_FILTER_RX_MAC_WILD,
- EFX_FILTER_TYPE_COUNT,
+ EFX_FILTER_TCP_FULL = 0,
+ EFX_FILTER_TCP_WILD,
+ EFX_FILTER_UDP_FULL,
+ EFX_FILTER_UDP_WILD,
+ EFX_FILTER_MAC_FULL = 4,
+ EFX_FILTER_MAC_WILD,
+ EFX_FILTER_TYPE_COUNT, /* number of specific types */
+ EFX_FILTER_UNSPEC = 0xf,
};
/**
@@ -63,13 +59,13 @@ enum efx_filter_priority {
* @EFX_FILTER_FLAG_RX_OVERRIDE_IP: Enables a MAC filter to override
* any IP filter that matches the same packet. By default, IP
* filters take precedence.
- *
- * Currently, no flags are defined for TX filters.
+ * @EFX_FILTER_FLAG_RX: Filter is for RX
*/
enum efx_filter_flags {
EFX_FILTER_FLAG_RX_RSS = 0x01,
EFX_FILTER_FLAG_RX_SCATTER = 0x02,
EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04,
+ EFX_FILTER_FLAG_RX = 0x08,
};
/**
@@ -91,99 +87,26 @@ struct efx_filter_spec {
u32 data[3];
};
-/**
- * efx_filter_set_rx_tcp_full - specify RX filter with TCP/IPv4 full match
- * @spec: Specification to initialise
- * @shost: Source host address (host byte order)
- * @sport: Source port (host byte order)
- * @dhost: Destination host address (host byte order)
- * @dport: Destination port (host byte order)
- */
-static inline void
-efx_filter_set_rx_tcp_full(struct efx_filter_spec *spec,
- u32 shost, u16 sport, u32 dhost, u16 dport)
-{
- spec->type = EFX_FILTER_RX_TCP_FULL;
- spec->data[0] = sport | shost << 16;
- spec->data[1] = dport << 16 | shost >> 16;
- spec->data[2] = dhost;
-}
-
-/**
- * efx_filter_set_rx_tcp_wild - specify RX filter with TCP/IPv4 wildcard match
- * @spec: Specification to initialise
- * @dhost: Destination host address (host byte order)
- * @dport: Destination port (host byte order)
- */
-static inline void
-efx_filter_set_rx_tcp_wild(struct efx_filter_spec *spec, u32 dhost, u16 dport)
-{
- spec->type = EFX_FILTER_RX_TCP_WILD;
- spec->data[0] = 0;
- spec->data[1] = dport << 16;
- spec->data[2] = dhost;
-}
-
-/**
- * efx_filter_set_rx_udp_full - specify RX filter with UDP/IPv4 full match
- * @spec: Specification to initialise
- * @shost: Source host address (host byte order)
- * @sport: Source port (host byte order)
- * @dhost: Destination host address (host byte order)
- * @dport: Destination port (host byte order)
- */
-static inline void
-efx_filter_set_rx_udp_full(struct efx_filter_spec *spec,
- u32 shost, u16 sport, u32 dhost, u16 dport)
-{
- spec->type = EFX_FILTER_RX_UDP_FULL;
- spec->data[0] = sport | shost << 16;
- spec->data[1] = dport << 16 | shost >> 16;
- spec->data[2] = dhost;
-}
-
-/**
- * efx_filter_set_rx_udp_wild - specify RX filter with UDP/IPv4 wildcard match
- * @spec: Specification to initialise
- * @dhost: Destination host address (host byte order)
- * @dport: Destination port (host byte order)
- */
-static inline void
-efx_filter_set_rx_udp_wild(struct efx_filter_spec *spec, u32 dhost, u16 dport)
+static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
+ enum efx_filter_priority priority,
+ enum efx_filter_flags flags,
+ unsigned rxq_id)
{
- spec->type = EFX_FILTER_RX_UDP_WILD;
- spec->data[0] = dport;
- spec->data[1] = 0;
- spec->data[2] = dhost;
+ spec->type = EFX_FILTER_UNSPEC;
+ spec->priority = priority;
+ spec->flags = EFX_FILTER_FLAG_RX | flags;
+ spec->dmaq_id = rxq_id;
}
-/**
- * efx_filter_set_rx_mac_full - specify RX filter with MAC full match
- * @spec: Specification to initialise
- * @vid: VLAN ID
- * @addr: Destination MAC address
- */
-static inline void efx_filter_set_rx_mac_full(struct efx_filter_spec *spec,
- u16 vid, const u8 *addr)
-{
- spec->type = EFX_FILTER_RX_MAC_FULL;
- spec->data[0] = vid;
- spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
- spec->data[2] = addr[0] << 8 | addr[1];
-}
-
-/**
- * efx_filter_set_rx_mac_full - specify RX filter with MAC wildcard match
- * @spec: Specification to initialise
- * @addr: Destination MAC address
- */
-static inline void efx_filter_set_rx_mac_wild(struct efx_filter_spec *spec,
- const u8 *addr)
-{
- spec->type = EFX_FILTER_RX_MAC_WILD;
- spec->data[0] = 0;
- spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
- spec->data[2] = addr[0] << 8 | addr[1];
-}
+extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
+ __be32 host, __be16 port);
+extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
+ __be32 host, __be16 port,
+ __be32 rhost, __be16 rport);
+extern int efx_filter_set_eth_local(struct efx_filter_spec *spec,
+ u16 vid, const u8 *addr);
+enum {
+ EFX_FILTER_VID_UNSPEC = 0xffff,
+};
#endif /* EFX_FILTER_H */
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index 85a99fe87437..d9d8c2ef1074 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -22,28 +22,39 @@
*
* Notes on locking strategy:
*
- * Most NIC registers require 16-byte (or 8-byte, for SRAM) atomic writes
- * which necessitates locking.
- * Under normal operation few writes to NIC registers are made and these
- * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special
- * cased to allow 4-byte (hence lockless) accesses.
+ * Most CSRs are 128-bit (oword) and therefore cannot be read or
+ * written atomically. Access from the host is buffered by the Bus
+ * Interface Unit (BIU). Whenever the host reads from the lowest
+ * address of such a register, or from the address of a different such
+ * register, the BIU latches the register's value. Subsequent reads
+ * from higher addresses of the same register will read the latched
+ * value. Whenever the host writes part of such a register, the BIU
+ * collects the written value and does not write to the underlying
+ * register until all 4 dwords have been written. A similar buffering
+ * scheme applies to host access to the NIC's 64-bit SRAM.
*
- * It *is* safe to write to these 4-byte registers in the middle of an
- * access to an 8-byte or 16-byte register. We therefore use a
- * spinlock to protect accesses to the larger registers, but no locks
- * for the 4-byte registers.
+ * Access to different CSRs and 64-bit SRAM words must be serialised,
+ * since interleaved access can result in lost writes or lost
+ * information from read-to-clear fields. We use efx_nic::biu_lock
+ * for this. (We could use separate locks for read and write, but
+ * this is not normally a performance bottleneck.)
*
- * A write barrier is needed to ensure that DW3 is written after DW0/1/2
- * due to the way the 16byte registers are "collected" in the BIU.
+ * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
+ * 128-bit but are special-cased in the BIU to avoid the need for
+ * locking in the host:
*
- * We also lock when carrying out reads, to ensure consistency of the
- * data (made possible since the BIU reads all 128 bits into a cache).
- * Reads are very rare, so this isn't a significant performance
- * impact. (Most data transferred from NIC to host is DMAed directly
- * into host memory).
- *
- * I/O BAR access uses locks for both reads and writes (but is only provided
- * for testing purposes).
+ * - They are write-only.
+ * - The semantics of writing to these registers are such that
+ * replacing the low 96 bits with zero does not affect functionality.
+ * - If the host writes to the last dword address of such a register
+ * (i.e. the high 32 bits) the underlying register will always be
+ * written. If the collector and the current write together do not
+ * provide values for all 128 bits of the register, the low 96 bits
+ * will be written as zero.
+ * - If the host writes to the address of any other part of such a
+ * register while the collector already holds values for some other
+ * register, the write is discarded and the collector maintains its
+ * current state.
*/
#if BITS_PER_LONG == 64
@@ -72,7 +83,7 @@ static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
return (__force __le32)__raw_readl(efx->membase + reg);
}
-/* Writes to a normal 16-byte Efx register, locking as appropriate. */
+/* Write a normal 128-bit CSR, locking as appropriate. */
static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
unsigned int reg)
{
@@ -85,21 +96,19 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
spin_lock_irqsave(&efx->biu_lock, flags);
#ifdef EFX_USE_QWORD_IO
_efx_writeq(efx, value->u64[0], reg + 0);
- wmb();
_efx_writeq(efx, value->u64[1], reg + 8);
#else
_efx_writed(efx, value->u32[0], reg + 0);
_efx_writed(efx, value->u32[1], reg + 4);
_efx_writed(efx, value->u32[2], reg + 8);
- wmb();
_efx_writed(efx, value->u32[3], reg + 12);
#endif
+ wmb();
mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
-/* Write an 8-byte NIC SRAM entry through the supplied mapping,
- * locking as appropriate. */
+/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
efx_qword_t *value, unsigned int index)
{
@@ -115,36 +124,27 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
__raw_writeq((__force u64)value->u64[0], membase + addr);
#else
__raw_writel((__force u32)value->u32[0], membase + addr);
- wmb();
__raw_writel((__force u32)value->u32[1], membase + addr + 4);
#endif
+ wmb();
mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
-/* Write dword to NIC register that allows partial writes
- *
- * Some registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
- * TX_DESC_UPD_REG) can be written to as a single dword. This allows
- * for lockless writes.
- */
+/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
unsigned int reg)
{
netif_vdbg(efx, hw, efx->net_dev,
- "writing partial register %x with "EFX_DWORD_FMT"\n",
+ "writing register %x with "EFX_DWORD_FMT"\n",
reg, EFX_DWORD_VAL(*value));
/* No lock required */
_efx_writed(efx, value->u32[0], reg);
+ wmb();
}
-/* Read from a NIC register
- *
- * This reads an entire 16-byte register in one go, locking as
- * appropriate. It is essential to read the first dword first, as this
- * prompts the NIC to load the current value into the shadow register.
- */
+/* Read a 128-bit CSR, locking as appropriate. */
static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
unsigned int reg)
{
@@ -152,7 +152,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
spin_lock_irqsave(&efx->biu_lock, flags);
value->u32[0] = _efx_readd(efx, reg + 0);
- rmb();
value->u32[1] = _efx_readd(efx, reg + 4);
value->u32[2] = _efx_readd(efx, reg + 8);
value->u32[3] = _efx_readd(efx, reg + 12);
@@ -163,8 +162,7 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
EFX_OWORD_VAL(*value));
}
-/* Read an 8-byte SRAM entry through supplied mapping,
- * locking as appropriate. */
+/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */
static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
efx_qword_t *value, unsigned int index)
{
@@ -176,7 +174,6 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
value->u64[0] = (__force __le64)__raw_readq(membase + addr);
#else
value->u32[0] = (__force __le32)__raw_readl(membase + addr);
- rmb();
value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);
@@ -186,7 +183,7 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
addr, EFX_QWORD_VAL(*value));
}
-/* Read dword from register that allows partial writes (sic) */
+/* Read a 32-bit CSR or SRAM */
static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
unsigned int reg)
{
@@ -196,28 +193,28 @@ static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
reg, EFX_DWORD_VAL(*value));
}
-/* Write to a register forming part of a table */
+/* Write a 128-bit CSR forming part of a table */
static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value,
unsigned int reg, unsigned int index)
{
efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
}
-/* Read to a register forming part of a table */
+/* Read a 128-bit CSR forming part of a table */
static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
unsigned int reg, unsigned int index)
{
efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
}
-/* Write to a dword register forming part of a table */
+/* Write a 32-bit CSR forming part of a table, or 32-bit SRAM */
static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
unsigned int reg, unsigned int index)
{
efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
}
-/* Read from a dword register forming part of a table */
+/* Read a 32-bit CSR forming part of a table, or 32-bit SRAM */
static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
unsigned int reg, unsigned int index)
{
@@ -231,29 +228,56 @@ static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
#define EFX_PAGED_REG(page, reg) \
((page) * EFX_PAGE_BLOCK_SIZE + (reg))
-/* As for efx_writeo(), but for a page-mapped register. */
-static inline void efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
- unsigned int reg, unsigned int page)
+/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
+static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
+ unsigned int reg, unsigned int page)
{
- efx_writeo(efx, value, EFX_PAGED_REG(page, reg));
-}
+ reg = EFX_PAGED_REG(page, reg);
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing register %x with " EFX_OWORD_FMT "\n", reg,
+ EFX_OWORD_VAL(*value));
-/* As for efx_writed(), but for a page-mapped register. */
-static inline void efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
- unsigned int reg, unsigned int page)
+#ifdef EFX_USE_QWORD_IO
+ _efx_writeq(efx, value->u64[0], reg + 0);
+ _efx_writeq(efx, value->u64[1], reg + 8);
+#else
+ _efx_writed(efx, value->u32[0], reg + 0);
+ _efx_writed(efx, value->u32[1], reg + 4);
+ _efx_writed(efx, value->u32[2], reg + 8);
+ _efx_writed(efx, value->u32[3], reg + 12);
+#endif
+ wmb();
+}
+#define efx_writeo_page(efx, value, reg, page) \
+ _efx_writeo_page(efx, value, \
+ reg + \
+ BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
+ page)
+
+/* Write a page-mapped 32-bit CSR (EVQ_RPTR or the high bits of
+ * RX_DESC_UPD or TX_DESC_UPD)
+ */
+static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
+ unsigned int reg, unsigned int page)
{
efx_writed(efx, value, EFX_PAGED_REG(page, reg));
}
-
-/* Write dword to page-mapped register with an extra lock.
- *
- * As for efx_writed_page(), but for a register that suffers from
- * SFC bug 3181. Take out a lock so the BIU collector cannot be
- * confused. */
-static inline void efx_writed_page_locked(struct efx_nic *efx,
- efx_dword_t *value,
- unsigned int reg,
- unsigned int page)
+#define efx_writed_page(efx, value, reg, page) \
+ _efx_writed_page(efx, value, \
+ reg + \
+ BUILD_BUG_ON_ZERO((reg) != 0x400 && (reg) != 0x83c \
+ && (reg) != 0xa1c), \
+ page)
+
+/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
+ * in the BIU means that writes to TIMER_COMMAND[0] invalidate the
+ * collector register.
+ */
+static inline void _efx_writed_page_locked(struct efx_nic *efx,
+ efx_dword_t *value,
+ unsigned int reg,
+ unsigned int page)
{
unsigned long flags __attribute__ ((unused));
@@ -265,5 +289,9 @@ static inline void efx_writed_page_locked(struct efx_nic *efx,
efx_writed(efx, value, EFX_PAGED_REG(page, reg));
}
}
+#define efx_writed_page_locked(efx, value, reg, page) \
+ _efx_writed_page_locked(efx, value, \
+ reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
+ page)
#endif /* EFX_IO_H */
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 12cf910c2ce7..5e118f0d2479 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2008-2009 Solarflare Communications Inc.
+ * Copyright 2008-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -94,14 +94,15 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
efx_writed(efx, &hdr, pdu);
- for (i = 0; i < inlen; i += 4)
+ for (i = 0; i < inlen; i += 4) {
_efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
-
- /* Ensure the payload is written out before the header */
- wmb();
+ /* use wmb() within loop to inhibit write combining */
+ wmb();
+ }
/* ring the doorbell with a distinctive value */
_efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
+ wmb();
}
static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
@@ -381,7 +382,7 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
-rc);
efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
} else
- netif_err(efx, hw, efx->net_dev,
+ netif_dbg(efx, hw, efx->net_dev,
"MC command 0x%x inlen %d failed rc=%d\n",
cmd, (int)inlen, -rc);
}
@@ -463,6 +464,7 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
if (mcdi->mode == MCDI_MODE_EVENTS) {
mcdi->resprc = rc;
mcdi->resplen = 0;
+ ++mcdi->credits;
}
} else
/* Nobody was waiting for an MCDI request, so trigger a reset */
@@ -601,7 +603,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
**************************************************************************
*/
-int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
+void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
{
u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)];
size_t outlength;
@@ -615,29 +617,20 @@ int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
if (rc)
goto fail;
- if (outlength == MC_CMD_GET_VERSION_V0_OUT_LEN) {
- *version = 0;
- *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
- return 0;
- }
-
if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) {
rc = -EIO;
goto fail;
}
ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
- *version = (((u64)le16_to_cpu(ver_words[0]) << 48) |
- ((u64)le16_to_cpu(ver_words[1]) << 32) |
- ((u64)le16_to_cpu(ver_words[2]) << 16) |
- le16_to_cpu(ver_words[3]));
- *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
-
- return 0;
+ snprintf(buf, len, "%u.%u.%u.%u",
+ le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
+ le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
+ return;
fail:
netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
+ buf[0] = 0;
}
int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h
index c792f1d65e48..aced2a7856fc 100644
--- a/drivers/net/sfc/mcdi.h
+++ b/drivers/net/sfc/mcdi.h
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2008-2009 Solarflare Communications Inc.
+ * Copyright 2008-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -93,7 +93,7 @@ extern void efx_mcdi_process_event(struct efx_channel *channel,
#define MCDI_EVENT_FIELD(_ev, _field) \
EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
-extern int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build);
+extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
bool *was_attached_out);
extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c
index f88f4bf986ff..33f7294edb47 100644
--- a/drivers/net/sfc/mcdi_mac.c
+++ b/drivers/net/sfc/mcdi_mac.c
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009 Solarflare Communications Inc.
+ * Copyright 2009-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index 90359e644006..b86a15f221ad 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009 Solarflare Communications Inc.
+ * Copyright 2009-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index c992742446b1..ec3f740f5465 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009 Solarflare Communications Inc.
+ * Copyright 2009-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -16,7 +16,6 @@
#include "phy.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
-#include "mdio_10g.h"
#include "nic.h"
#include "selftest.h"
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 98d946020429..19e68c26d103 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -15,7 +15,6 @@
#include "net_driver.h"
#include "mdio_10g.h"
#include "workarounds.h"
-#include "nic.h"
unsigned efx_mdio_id_oui(u32 id)
{
@@ -52,13 +51,10 @@ int efx_mdio_reset_mmd(struct efx_nic *port, int mmd,
return spins ? spins : -ETIMEDOUT;
}
-static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal)
+static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd)
{
int status;
- if (LOOPBACK_INTERNAL(efx))
- return 0;
-
if (mmd != MDIO_MMD_AN) {
/* Read MMD STATUS2 to check it is responding. */
status = efx_mdio_read(efx, mmd, MDIO_STAT2);
@@ -69,20 +65,6 @@ static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal)
}
}
- /* Read MMD STATUS 1 to check for fault. */
- status = efx_mdio_read(efx, mmd, MDIO_STAT1);
- if (status & MDIO_STAT1_FAULT) {
- if (fault_fatal) {
- netif_err(efx, hw, efx->net_dev,
- "PHY MMD %d reporting fatal"
- " fault: status %x\n", mmd, status);
- return -EIO;
- } else {
- netif_dbg(efx, hw, efx->net_dev,
- "PHY MMD %d reporting status"
- " %x (expected)\n", mmd, status);
- }
- }
return 0;
}
@@ -131,8 +113,7 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
return rc;
}
-int efx_mdio_check_mmds(struct efx_nic *efx,
- unsigned int mmd_mask, unsigned int fatal_mask)
+int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask)
{
int mmd = 0, probe_mmd, devs1, devs2;
u32 devices;
@@ -162,13 +143,9 @@ int efx_mdio_check_mmds(struct efx_nic *efx,
/* Check all required MMDs are responding and happy. */
while (mmd_mask) {
- if (mmd_mask & 1) {
- int fault_fatal = fatal_mask & 1;
- if (efx_mdio_check_mmd(efx, mmd, fault_fatal))
- return -EIO;
- }
+ if ((mmd_mask & 1) && efx_mdio_check_mmd(efx, mmd))
+ return -EIO;
mmd_mask = mmd_mask >> 1;
- fatal_mask = fatal_mask >> 1;
mmd++;
}
@@ -338,7 +315,7 @@ int efx_mdio_test_alive(struct efx_nic *efx)
"no MDIO PHY present with ID %d\n", efx->mdio.prtad);
rc = -EINVAL;
} else {
- rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
+ rc = efx_mdio_check_mmds(efx, efx->mdio.mmds);
}
mutex_unlock(&efx->mac_lock);
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index 75791d3d4963..df0703940c83 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -68,8 +68,7 @@ extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd,
int spins, int spintime);
/* As efx_mdio_check_mmd but for multiple MMDs */
-int efx_mdio_check_mmds(struct efx_nic *efx,
- unsigned int mmd_mask, unsigned int fatal_mask);
+int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
/* Check the link status of specified mmds in bit mask */
extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index 02e54b4f701f..e646bfce2d84 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -321,14 +321,15 @@ static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
struct efx_mtd *efx_mtd = mtd->priv;
const struct efx_spi_device *spi = efx_mtd->spi;
struct efx_nic *efx = efx_mtd->efx;
+ struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
- rc = mutex_lock_interruptible(&efx->spi_lock);
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
if (rc)
return rc;
rc = falcon_spi_read(efx, spi, part->offset + start, len,
retlen, buffer);
- mutex_unlock(&efx->spi_lock);
+ mutex_unlock(&nic_data->spi_lock);
return rc;
}
@@ -337,13 +338,14 @@ static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv;
struct efx_nic *efx = efx_mtd->efx;
+ struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
- rc = mutex_lock_interruptible(&efx->spi_lock);
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
if (rc)
return rc;
rc = efx_spi_erase(part, part->offset + start, len);
- mutex_unlock(&efx->spi_lock);
+ mutex_unlock(&nic_data->spi_lock);
return rc;
}
@@ -354,14 +356,15 @@ static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
struct efx_mtd *efx_mtd = mtd->priv;
const struct efx_spi_device *spi = efx_mtd->spi;
struct efx_nic *efx = efx_mtd->efx;
+ struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
- rc = mutex_lock_interruptible(&efx->spi_lock);
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
if (rc)
return rc;
rc = falcon_spi_write(efx, spi, part->offset + start, len,
retlen, buffer);
- mutex_unlock(&efx->spi_lock);
+ mutex_unlock(&nic_data->spi_lock);
return rc;
}
@@ -370,11 +373,12 @@ static int falcon_mtd_sync(struct mtd_info *mtd)
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv;
struct efx_nic *efx = efx_mtd->efx;
+ struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
- mutex_lock(&efx->spi_lock);
+ mutex_lock(&nic_data->spi_lock);
rc = efx_spi_slow_wait(part, true);
- mutex_unlock(&efx->spi_lock);
+ mutex_unlock(&nic_data->spi_lock);
return rc;
}
@@ -387,35 +391,67 @@ static struct efx_mtd_ops falcon_mtd_ops = {
static int falcon_mtd_probe(struct efx_nic *efx)
{
- struct efx_spi_device *spi = efx->spi_flash;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ struct efx_spi_device *spi;
struct efx_mtd *efx_mtd;
- int rc;
+ int rc = -ENODEV;
ASSERT_RTNL();
- if (!spi || spi->size <= FALCON_FLASH_BOOTCODE_START)
- return -ENODEV;
-
- efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
- GFP_KERNEL);
- if (!efx_mtd)
- return -ENOMEM;
-
- efx_mtd->spi = spi;
- efx_mtd->name = "flash";
- efx_mtd->ops = &falcon_mtd_ops;
+ spi = &nic_data->spi_flash;
+ if (efx_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
+ efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
+ GFP_KERNEL);
+ if (!efx_mtd)
+ return -ENOMEM;
+
+ efx_mtd->spi = spi;
+ efx_mtd->name = "flash";
+ efx_mtd->ops = &falcon_mtd_ops;
+
+ efx_mtd->n_parts = 1;
+ efx_mtd->part[0].mtd.type = MTD_NORFLASH;
+ efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
+ efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
+ efx_mtd->part[0].mtd.erasesize = spi->erase_size;
+ efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
+ efx_mtd->part[0].type_name = "sfc_flash_bootrom";
+
+ rc = efx_mtd_probe_device(efx, efx_mtd);
+ if (rc) {
+ kfree(efx_mtd);
+ return rc;
+ }
+ }
- efx_mtd->n_parts = 1;
- efx_mtd->part[0].mtd.type = MTD_NORFLASH;
- efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
- efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
- efx_mtd->part[0].mtd.erasesize = spi->erase_size;
- efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
- efx_mtd->part[0].type_name = "sfc_flash_bootrom";
+ spi = &nic_data->spi_eeprom;
+ if (efx_spi_present(spi) && spi->size > EFX_EEPROM_BOOTCONFIG_START) {
+ efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
+ GFP_KERNEL);
+ if (!efx_mtd)
+ return -ENOMEM;
+
+ efx_mtd->spi = spi;
+ efx_mtd->name = "EEPROM";
+ efx_mtd->ops = &falcon_mtd_ops;
+
+ efx_mtd->n_parts = 1;
+ efx_mtd->part[0].mtd.type = MTD_RAM;
+ efx_mtd->part[0].mtd.flags = MTD_CAP_RAM;
+ efx_mtd->part[0].mtd.size =
+ min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
+ EFX_EEPROM_BOOTCONFIG_START;
+ efx_mtd->part[0].mtd.erasesize = spi->erase_size;
+ efx_mtd->part[0].offset = EFX_EEPROM_BOOTCONFIG_START;
+ efx_mtd->part[0].type_name = "sfc_bootconfig";
+
+ rc = efx_mtd_probe_device(efx, efx_mtd);
+ if (rc) {
+ kfree(efx_mtd);
+ return rc;
+ }
+ }
- rc = efx_mtd_probe_device(efx, efx_mtd);
- if (rc)
- kfree(efx_mtd);
return rc;
}
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 0a7e26d73b52..215d5c51bfa0 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2009 Solarflare Communications Inc.
+ * Copyright 2005-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -41,7 +41,7 @@
*
**************************************************************************/
-#define EFX_DRIVER_VERSION "3.0"
+#define EFX_DRIVER_VERSION "3.1"
#ifdef EFX_ENABLE_DEBUG
#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -63,10 +63,12 @@
/* Checksum generation is a per-queue option in hardware, so each
* queue visible to the networking core is backed by two hardware TX
* queues. */
-#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS
-#define EFX_TXQ_TYPE_OFFLOAD 1
-#define EFX_TXQ_TYPES 2
-#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES)
+#define EFX_MAX_TX_TC 2
+#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
+#define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */
+#define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */
+#define EFX_TXQ_TYPES 4
+#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
/**
* struct efx_special_buffer - An Efx special buffer
@@ -136,14 +138,20 @@ struct efx_tx_buffer {
* @efx: The associated Efx NIC
* @queue: DMA queue number
* @channel: The associated channel
+ * @core_txq: The networking core TX queue structure
* @buffer: The software buffer ring
* @txd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1.
+ * @initialised: Has hardware queue been initialised?
* @flushed: Used when handling queue flushing
* @read_count: Current read pointer.
* This is the number of buffers that have been removed from both rings.
- * @stopped: Stopped count.
- * Set if this TX queue is currently stopping its port.
+ * @old_write_count: The value of @write_count when last checked.
+ * This is here for performance reasons. The xmit path will
+ * only get the up-to-date value of @write_count if this
+ * variable indicates that the queue is empty. This is to
+ * avoid cache-line ping-pong between the xmit path and the
+ * completion path.
* @insert_count: Current insert pointer
* This is the number of buffers that have been added to the
* software ring.
@@ -163,21 +171,26 @@ struct efx_tx_buffer {
* @tso_long_headers: Number of packets with headers too long for standard
* blocks
* @tso_packets: Number of packets via the TSO xmit path
+ * @pushes: Number of times the TX push feature has been used
+ * @empty_read_count: If the completion path has seen the queue as empty
+ * and the transmission path has not yet checked this, the value of
+ * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
*/
struct efx_tx_queue {
/* Members which don't change on the fast path */
struct efx_nic *efx ____cacheline_aligned_in_smp;
unsigned queue;
struct efx_channel *channel;
- struct efx_nic *nic;
+ struct netdev_queue *core_txq;
struct efx_tx_buffer *buffer;
struct efx_special_buffer txd;
unsigned int ptr_mask;
+ bool initialised;
enum efx_flush_state flushed;
/* Members used mainly on the completion path */
unsigned int read_count ____cacheline_aligned_in_smp;
- int stopped;
+ unsigned int old_write_count;
/* Members used only on the xmit path */
unsigned int insert_count ____cacheline_aligned_in_smp;
@@ -187,6 +200,11 @@ struct efx_tx_queue {
unsigned int tso_bursts;
unsigned int tso_long_headers;
unsigned int tso_packets;
+ unsigned int pushes;
+
+ /* Members shared between paths and sometimes updated */
+ unsigned int empty_read_count ____cacheline_aligned_in_smp;
+#define EFX_EMPTY_COUNT_VALID 0x80000000
};
/**
@@ -196,15 +214,17 @@ struct efx_tx_queue {
* If both this and page are %NULL, the buffer slot is currently free.
* @page: The associated page buffer, if any.
* If both this and skb are %NULL, the buffer slot is currently free.
- * @data: Pointer to ethernet header
* @len: Buffer length, in bytes.
+ * @is_page: Indicates if @page is valid. If false, @skb is valid.
*/
struct efx_rx_buffer {
dma_addr_t dma_addr;
- struct sk_buff *skb;
- struct page *page;
- char *data;
+ union {
+ struct sk_buff *skb;
+ struct page *page;
+ } u;
unsigned int len;
+ bool is_page;
};
/**
@@ -305,7 +325,6 @@ enum efx_rx_alloc_method {
* @irq_moderation: IRQ moderation value (in hardware ticks)
* @napi_dev: Net device used with NAPI
* @napi_str: NAPI control structure
- * @reset_work: Scheduled reset work thread
* @work_pending: Is work pending via NAPI?
* @eventq: Event queue buffer
* @eventq_mask: Event queue pointer mask
@@ -326,8 +345,6 @@ enum efx_rx_alloc_method {
* @n_rx_overlength: Count of RX_OVERLENGTH errors
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
* @rx_queue: RX queue for this channel
- * @tx_stop_count: Core TX queue stop count
- * @tx_stop_lock: Core TX queue stop lock
* @tx_queue: TX queues for this channel
*/
struct efx_channel {
@@ -347,6 +364,9 @@ struct efx_channel {
unsigned int irq_count;
unsigned int irq_mod_score;
+#ifdef CONFIG_RFS_ACCEL
+ unsigned int rfs_filters_added;
+#endif
int rx_alloc_level;
int rx_alloc_push_pages;
@@ -366,11 +386,7 @@ struct efx_channel {
bool rx_pkt_csummed;
struct efx_rx_queue rx_queue;
-
- atomic_t tx_stop_count;
- spinlock_t tx_stop_lock;
-
- struct efx_tx_queue tx_queue[2];
+ struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
};
enum efx_led_mode {
@@ -621,14 +637,13 @@ struct efx_filter_state;
* @pci_dev: The PCI device
* @type: Controller type attributes
* @legacy_irq: IRQ number
+ * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
* @workqueue: Workqueue for port reconfigures and the HW monitor.
* Work items do not hold and must not acquire RTNL.
* @workqueue_name: Name of workqueue
* @reset_work: Scheduled reset workitem
- * @monitor_work: Hardware monitor workitem
* @membase_phys: Memory BAR value as physical address
* @membase: Memory BAR value
- * @biu_lock: BIU (bus interface unit) lock
* @interrupt_mode: Interrupt mode
* @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
* @irq_rx_moderation: IRQ moderation time for RX event queues
@@ -647,23 +662,14 @@ struct efx_filter_state;
* @n_tx_channels: Number of channels used for TX
* @rx_buffer_len: RX buffer length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
+ * @rx_hash_key: Toeplitz hash key for RSS
* @rx_indir_table: Indirection table for RSS
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
* @irq_status: Interrupt status buffer
- * @last_irq_cpu: Last CPU to handle interrupt.
- * This register is written with the SMP processor ID whenever an
- * interrupt is handled. It is used by efx_nic_test_interrupt()
- * to verify that an interrupt has occurred.
* @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
* @fatal_irq_level: IRQ level (bit number) used for serious errors
- * @spi_flash: SPI flash device
- * This field will be %NULL if no flash device is present (or for Siena).
- * @spi_eeprom: SPI EEPROM device
- * This field will be %NULL if no EEPROM device is present (or for Siena).
- * @spi_lock: SPI bus lock
* @mtd_list: List of MTDs attached to the NIC
- * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
* @nic_data: Hardware dependant state
* @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
* @port_inhibited, efx_monitor() and efx_reconfigure_port()
@@ -676,21 +682,14 @@ struct efx_filter_state;
* @port_initialized: Port initialized?
* @net_dev: Operating system network device. Consider holding the rtnl lock
* @rx_checksum_enabled: RX checksumming enabled
- * @mac_stats: MAC statistics. These include all statistics the MACs
- * can provide. Generic code converts these into a standard
- * &struct net_device_stats.
* @stats_buffer: DMA buffer for statistics
- * @stats_lock: Statistics update lock. Serialises statistics fetches
* @mac_op: MAC interface
- * @mac_address: Permanent MAC address
* @phy_type: PHY type
- * @mdio_lock: MDIO lock
* @phy_op: PHY interface
* @phy_data: PHY private data (including PHY-specific stats)
* @mdio: PHY MDIO interface
* @mdio_bus: PHY MDIO bus ID (only used by Siena)
* @phy_mode: PHY operating mode. Serialised by @mac_lock.
- * @xmac_poll_required: XMAC link state needs polling
* @link_advertising: Autonegotiation advertising flags
* @link_state: Current state of the link
* @n_link_state_changes: Number of times the link has changed state
@@ -701,21 +700,34 @@ struct efx_filter_state;
* @loopback_mode: Loopback status
* @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state
+ * @monitor_work: Hardware monitor workitem
+ * @biu_lock: BIU (bus interface unit) lock
+ * @last_irq_cpu: Last CPU to handle interrupt.
+ * This register is written with the SMP processor ID whenever an
+ * interrupt is handled. It is used by efx_nic_test_interrupt()
+ * to verify that an interrupt has occurred.
+ * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
+ * @mac_stats: MAC statistics. These include all statistics the MACs
+ * can provide. Generic code converts these into a standard
+ * &struct net_device_stats.
+ * @stats_lock: Statistics update lock. Serialises statistics fetches
*
* This is stored in the private area of the &struct net_device.
*/
struct efx_nic {
+ /* The following fields should be written very rarely */
+
char name[IFNAMSIZ];
struct pci_dev *pci_dev;
const struct efx_nic_type *type;
int legacy_irq;
+ bool legacy_irq_enabled;
struct workqueue_struct *workqueue;
char workqueue_name[16];
struct work_struct reset_work;
- struct delayed_work monitor_work;
resource_size_t membase_phys;
void __iomem *membase;
- spinlock_t biu_lock;
+
enum efx_int_mode interrupt_mode;
bool irq_rx_adaptive;
unsigned int irq_rx_moderation;
@@ -732,6 +744,7 @@ struct efx_nic {
unsigned next_buffer_table;
unsigned n_channels;
unsigned n_rx_channels;
+ unsigned tx_channel_offset;
unsigned n_tx_channels;
unsigned int rx_buffer_len;
unsigned int rx_buffer_order;
@@ -742,19 +755,13 @@ struct efx_nic {
unsigned long int_error_expire;
struct efx_buffer irq_status;
- volatile signed int last_irq_cpu;
unsigned irq_zero_count;
unsigned fatal_irq_level;
- struct efx_spi_device *spi_flash;
- struct efx_spi_device *spi_eeprom;
- struct mutex spi_lock;
#ifdef CONFIG_SFC_MTD
struct list_head mtd_list;
#endif
- unsigned n_rx_nodesc_drop_cnt;
-
void *nic_data;
struct mutex mac_lock;
@@ -766,22 +773,17 @@ struct efx_nic {
struct net_device *net_dev;
bool rx_checksum_enabled;
- struct efx_mac_stats mac_stats;
struct efx_buffer stats_buffer;
- spinlock_t stats_lock;
struct efx_mac_operations *mac_op;
- unsigned char mac_address[ETH_ALEN];
unsigned int phy_type;
- struct mutex mdio_lock;
struct efx_phy_operations *phy_op;
void *phy_data;
struct mdio_if_info mdio;
unsigned int mdio_bus;
enum efx_phy_mode phy_mode;
- bool xmac_poll_required;
u32 link_advertising;
struct efx_link_state link_state;
unsigned int n_link_state_changes;
@@ -797,6 +799,15 @@ struct efx_nic {
void *loopback_selftest;
struct efx_filter_state *filter_state;
+
+ /* The following fields may be written more often */
+
+ struct delayed_work monitor_work ____cacheline_aligned_in_smp;
+ spinlock_t biu_lock;
+ volatile signed int last_irq_cpu;
+ unsigned n_rx_nodesc_drop_cnt;
+ struct efx_mac_stats mac_stats;
+ spinlock_t stats_lock;
};
static inline int efx_dev_registered(struct efx_nic *efx)
@@ -829,6 +840,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* be called while the controller is uninitialised.
* @probe_port: Probe the MAC and PHY
* @remove_port: Free resources allocated by probe_port()
+ * @handle_global_event: Handle a "global" event (may be %NULL)
* @prepare_flush: Prepare the hardware for flushing the DMA queues
* @update_stats: Update statistics not provided by event handling
* @start_stats: Start the regular fetching of statistics
@@ -873,6 +885,7 @@ struct efx_nic_type {
int (*reset)(struct efx_nic *efx, enum reset_type method);
int (*probe_port)(struct efx_nic *efx);
void (*remove_port)(struct efx_nic *efx);
+ bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
void (*prepare_flush)(struct efx_nic *efx);
void (*update_stats)(struct efx_nic *efx);
void (*start_stats)(struct efx_nic *efx);
@@ -902,7 +915,7 @@ struct efx_nic_type {
unsigned int phys_addr_channels;
unsigned int tx_dc_base;
unsigned int rx_dc_base;
- unsigned long offload_features;
+ u32 offload_features;
u32 reset_world_flags;
};
@@ -926,21 +939,48 @@ efx_get_channel(struct efx_nic *efx, unsigned index)
_channel = (_channel->channel + 1 < (_efx)->n_channels) ? \
(_efx)->channel[_channel->channel + 1] : NULL)
-extern struct efx_tx_queue *
-efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type);
+static inline struct efx_tx_queue *
+efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
+{
+ EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
+ type >= EFX_TXQ_TYPES);
+ return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
+}
+
+static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
+{
+ return channel->channel - channel->efx->tx_channel_offset <
+ channel->efx->n_tx_channels;
+}
static inline struct efx_tx_queue *
efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
{
- struct efx_tx_queue *tx_queue = channel->tx_queue;
- EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES);
- return tx_queue->channel ? tx_queue + type : NULL;
+ EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
+ type >= EFX_TXQ_TYPES);
+ return &channel->tx_queue[type];
+}
+
+static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
+{
+ return !(tx_queue->efx->net_dev->num_tc < 2 &&
+ tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
}
/* Iterate over all TX queues belonging to a channel */
#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
- for (_tx_queue = efx_channel_get_tx_queue(channel, 0); \
- _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
+ if (!efx_channel_has_tx_queues(_channel)) \
+ ; \
+ else \
+ for (_tx_queue = (_channel)->tx_queue; \
+ _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
+ efx_tx_queue_used(_tx_queue); \
+ _tx_queue++)
+
+/* Iterate over all possible TX queues belonging to a channel */
+#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
+ for (_tx_queue = (_channel)->tx_queue; \
+ _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
_tx_queue++)
static inline struct efx_rx_queue *
@@ -950,18 +990,26 @@ efx_get_rx_queue(struct efx_nic *efx, unsigned index)
return &efx->channel[index]->rx_queue;
}
+static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
+{
+ return channel->channel < channel->efx->n_rx_channels;
+}
+
static inline struct efx_rx_queue *
efx_channel_get_rx_queue(struct efx_channel *channel)
{
- return channel->channel < channel->efx->n_rx_channels ?
- &channel->rx_queue : NULL;
+ EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
+ return &channel->rx_queue;
}
/* Iterate over all RX queues belonging to a channel */
#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
- for (_rx_queue = efx_channel_get_rx_queue(channel); \
- _rx_queue; \
- _rx_queue = NULL)
+ if (!efx_channel_has_rx_queue(_channel)) \
+ ; \
+ else \
+ for (_rx_queue = &(_channel)->rx_queue; \
+ _rx_queue; \
+ _rx_queue = NULL)
static inline struct efx_channel *
efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index 41c36b9a4244..e8396614daf3 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -41,26 +41,6 @@
#define RX_DC_ENTRIES 64
#define RX_DC_ENTRIES_ORDER 3
-/* RX FIFO XOFF watermark
- *
- * When the amount of the RX FIFO increases used increases past this
- * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
- * This also has an effect on RX/TX arbitration
- */
-int efx_nic_rx_xoff_thresh = -1;
-module_param_named(rx_xoff_thresh_bytes, efx_nic_rx_xoff_thresh, int, 0644);
-MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
-
-/* RX FIFO XON watermark
- *
- * When the amount of the RX FIFO used decreases below this
- * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
- * This also has an effect on RX/TX arbitration
- */
-int efx_nic_rx_xon_thresh = -1;
-module_param_named(rx_xon_thresh_bytes, efx_nic_rx_xon_thresh, int, 0644);
-MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
-
/* If EFX_MAX_INT_ERRORS internal errors occur within
* EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
* disable it.
@@ -362,6 +342,35 @@ static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
}
+/* Write pointer and first descriptor for TX descriptor ring */
+static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
+ const efx_qword_t *txd)
+{
+ unsigned write_ptr;
+ efx_oword_t reg;
+
+ BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
+ BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
+ FRF_AZ_TX_DESC_WPTR, write_ptr);
+ reg.qword[0] = *txd;
+ efx_writeo_page(tx_queue->efx, &reg,
+ FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
+}
+
+static inline bool
+efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
+{
+ unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+
+ if (empty_read_count == 0)
+ return false;
+
+ tx_queue->empty_read_count = 0;
+ return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
+}
/* For each entry inserted into the software descriptor ring, create a
* descriptor in the hardware TX descriptor ring (in host memory), and
@@ -373,6 +382,7 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
struct efx_tx_buffer *buffer;
efx_qword_t *txd;
unsigned write_ptr;
+ unsigned old_write_count = tx_queue->write_count;
BUG_ON(tx_queue->write_count == tx_queue->insert_count);
@@ -391,7 +401,15 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
} while (tx_queue->write_count != tx_queue->insert_count);
wmb(); /* Ensure descriptors are written before they are fetched */
- efx_notify_tx_desc(tx_queue);
+
+ if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
+ txd = efx_tx_desc(tx_queue,
+ old_write_count & tx_queue->ptr_mask);
+ efx_push_tx_desc(tx_queue, txd);
+ ++tx_queue->pushes;
+ } else {
+ efx_notify_tx_desc(tx_queue);
+ }
}
/* Allocate hardware resources for a TX queue */
@@ -407,8 +425,8 @@ int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
{
- efx_oword_t tx_desc_ptr;
struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t reg;
tx_queue->flushed = FLUSH_NONE;
@@ -416,7 +434,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
efx_init_special_buffer(efx, &tx_queue->txd);
/* Push TX descriptor ring to card */
- EFX_POPULATE_OWORD_10(tx_desc_ptr,
+ EFX_POPULATE_OWORD_10(reg,
FRF_AZ_TX_DESCQ_EN, 1,
FRF_AZ_TX_ISCSI_DDIG_EN, 0,
FRF_AZ_TX_ISCSI_HDIG_EN, 0,
@@ -432,17 +450,15 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
- EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
- EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
!csum);
}
- efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
+ efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
tx_queue->queue);
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
- efx_oword_t reg;
-
/* Only 128 bits in this register */
BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
@@ -453,6 +469,16 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
set_bit_le(tx_queue->queue, (void *)&reg);
efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
}
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ EFX_POPULATE_OWORD_1(reg,
+ FRF_BZ_TX_PACE,
+ (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+ FFE_BZ_TX_PACE_OFF :
+ FFE_BZ_TX_PACE_RESERVED);
+ efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
+ tx_queue->queue);
+ }
}
static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
@@ -894,46 +920,6 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
channel->channel, EFX_QWORD_VAL(*event));
}
-/* Global events are basically PHY events */
-static void
-efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
-{
- struct efx_nic *efx = channel->efx;
- bool handled = false;
-
- if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
- EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
- EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) {
- /* Ignored */
- handled = true;
- }
-
- if ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) &&
- EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
- efx->xmac_poll_required = true;
- handled = true;
- }
-
- if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
- EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
- EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
- netif_err(efx, rx_err, efx->net_dev,
- "channel %d seen global RX_RESET event. Resetting.\n",
- channel->channel);
-
- atomic_inc(&efx->rx_reset);
- efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
- RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
- handled = true;
- }
-
- if (!handled)
- netif_err(efx, hw, efx->net_dev,
- "channel %d unknown global event "
- EFX_QWORD_FMT "\n", channel->channel,
- EFX_QWORD_VAL(*event));
-}
-
static void
efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
{
@@ -1050,15 +1036,17 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
case FSE_AZ_EV_CODE_DRV_GEN_EV:
efx_handle_generated_event(channel, &event);
break;
- case FSE_AZ_EV_CODE_GLOBAL_EV:
- efx_handle_global_event(channel, &event);
- break;
case FSE_AZ_EV_CODE_DRIVER_EV:
efx_handle_driver_event(channel, &event);
break;
case FSE_CZ_EV_CODE_MCDI_EV:
efx_mcdi_process_event(channel, &event);
break;
+ case FSE_AZ_EV_CODE_GLOBAL_EV:
+ if (efx->type->handle_global_event &&
+ efx->type->handle_global_event(channel, &event))
+ break;
+ /* else fall through */
default:
netif_err(channel->efx, hw, channel->efx->net_dev,
"channel %d unknown event type %d (data "
@@ -1238,8 +1226,10 @@ int efx_nic_flush_queues(struct efx_nic *efx)
/* Flush all tx queues in parallel */
efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel)
- efx_flush_tx_queue(tx_queue);
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->initialised)
+ efx_flush_tx_queue(tx_queue);
+ }
}
/* The hardware supports four concurrent rx flushes, each of which may
@@ -1262,8 +1252,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
++rx_pending;
}
}
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- if (tx_queue->flushed != FLUSH_DONE)
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->initialised &&
+ tx_queue->flushed != FLUSH_DONE)
++tx_pending;
}
}
@@ -1278,8 +1269,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
/* Mark the queues as all flushed. We're going to return failure
* leading to a reset, or fake up success anyway */
efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- if (tx_queue->flushed != FLUSH_DONE)
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->initialised &&
+ tx_queue->flushed != FLUSH_DONE)
netif_err(efx, hw, efx->net_dev,
"tx queue %d flush command timed out\n",
tx_queue->queue);
@@ -1418,6 +1410,12 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
u32 queues;
int syserr;
+ /* Could this be ours? If interrupts are disabled then the
+ * channel state may not be valid.
+ */
+ if (!efx->legacy_irq_enabled)
+ return result;
+
/* Read the ISR which also ACKs the interrupts */
efx_readd(efx, &reg, FR_BZ_INT_ISR0);
queues = EFX_EXTRACT_DWORD(reg, 0, 31);
@@ -1664,7 +1662,7 @@ void efx_nic_init_common(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
/* Enable SW_EV to inherit in char driver - assume harmless here */
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
@@ -1676,6 +1674,19 @@ void efx_nic_init_common(struct efx_nic *efx)
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ EFX_POPULATE_OWORD_4(temp,
+ /* Default values */
+ FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
+ FRF_BZ_TX_PACE_SB_AF, 0xb,
+ FRF_BZ_TX_PACE_FB_BASE, 0,
+ /* Allow large pace values in the
+ * fast bin. */
+ FRF_BZ_TX_PACE_BIN_TH,
+ FFE_BZ_TX_PACE_RESERVED);
+ efx_writeo(efx, &temp, FR_BZ_TX_PACE);
+ }
}
/* Register dump */
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 0438dc98722d..d9de1b647d41 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -15,6 +15,7 @@
#include "net_driver.h"
#include "efx.h"
#include "mcdi.h"
+#include "spi.h"
/*
* Falcon hardware control
@@ -113,6 +114,11 @@ struct falcon_board {
* @stats_pending: Is there a pending DMA of MAC statistics.
* @stats_timer: A timer for regularly fetching MAC statistics.
* @stats_dma_done: Pointer to the flag which indicates DMA completion.
+ * @spi_flash: SPI flash device
+ * @spi_eeprom: SPI EEPROM device
+ * @spi_lock: SPI bus lock
+ * @mdio_lock: MDIO bus lock
+ * @xmac_poll_required: XMAC link state needs polling
*/
struct falcon_nic_data {
struct pci_dev *pci_dev2;
@@ -121,6 +127,11 @@ struct falcon_nic_data {
bool stats_pending;
struct timer_list stats_timer;
u32 *stats_dma_done;
+ struct efx_spi_device spi_flash;
+ struct efx_spi_device spi_eeprom;
+ struct mutex spi_lock;
+ struct mutex mdio_lock;
+ bool xmac_poll_required;
};
static inline struct falcon_board *falcon_board(struct efx_nic *efx)
@@ -131,21 +142,14 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
/**
* struct siena_nic_data - Siena NIC state
- * @fw_version: Management controller firmware version
- * @fw_build: Firmware build number
* @mcdi: Management-Controller-to-Driver Interface
* @wol_filter_id: Wake-on-LAN packet filter id
- * @ipv6_rss_key: Toeplitz hash key for IPv6 RSS
*/
struct siena_nic_data {
- u64 fw_version;
- u32 fw_build;
struct efx_mcdi_iface mcdi;
int wol_filter_id;
};
-extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len);
-
extern struct efx_nic_type falcon_a1_nic_type;
extern struct efx_nic_type falcon_b0_nic_type;
extern struct efx_nic_type siena_a0_nic_type;
@@ -184,7 +188,6 @@ extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
/* MAC/PHY */
extern void falcon_drain_tx_fifo(struct efx_nic *efx);
extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
-extern int efx_nic_rx_xoff_thresh, efx_nic_rx_xon_thresh;
/* Interrupts and test events */
extern int efx_nic_init_interrupt(struct efx_nic *efx);
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index 1dab609757fb..b3b79472421e 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2009 Solarflare Communications Inc.
+ * Copyright 2007-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index 68813d1d85f3..55f90924247e 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -41,6 +41,8 @@
#define PCS_UC_STATUS_LBN 0
#define PCS_UC_STATUS_WIDTH 8
#define PCS_UC_STATUS_FW_SAVE 0x20
+#define PMA_PMD_MODE_REG 0xc301
+#define PMA_PMD_RXIN_SEL_LBN 6
#define PMA_PMD_FTX_CTRL2_REG 0xc309
#define PMA_PMD_FTX_STATIC_LBN 13
#define PMA_PMD_VEND1_REG 0xc001
@@ -282,6 +284,10 @@ static int qt2025c_select_phy_mode(struct efx_nic *efx)
* slow) reload of the firmware image (the microcontroller's code
* memory is not affected by the microcontroller reset). */
efx_mdio_write(efx, 1, 0xc317, 0x00ff);
+ /* PMA/PMD loopback sets RXIN to inverse polarity and the firmware
+ * restart doesn't reset it. We need to do that ourselves. */
+ efx_mdio_set_flag(efx, 1, PMA_PMD_MODE_REG,
+ 1 << PMA_PMD_RXIN_SEL_LBN, false);
efx_mdio_write(efx, 1, 0xc300, 0x0002);
msleep(20);
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h
index 96430ed81c36..cc2c86b76a7b 100644
--- a/drivers/net/sfc/regs.h
+++ b/drivers/net/sfc/regs.h
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -2907,6 +2907,12 @@
#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44
#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16
+/* TX_PACE_TBL */
+/* Values >20 are documented as reserved, but will result in a queue going
+ * into the fast bin with a pace value of zero. */
+#define FFE_BZ_TX_PACE_OFF 0
+#define FFE_BZ_TX_PACE_RESERVED 21
+
/* DRIVER_EV */
/* Sub-fields of an RX flush completion event */
#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 6d0959b5158e..c0fdb59030fb 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2009 Solarflare Communications Inc.
+ * Copyright 2005-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -37,7 +37,7 @@
* This driver supports two methods for allocating and using RX buffers:
* each RX buffer may be backed by an skb or by an order-n page.
*
- * When LRO is in use then the second method has a lower overhead,
+ * When GRO is in use then the second method has a lower overhead,
* since we don't have to allocate then free skbs on reassembled frames.
*
* Values:
@@ -50,25 +50,25 @@
*
* - Since pushing and popping descriptors are separated by the rx_queue
* size, so the watermarks should be ~rxd_size.
- * - The performance win by using page-based allocation for LRO is less
- * than the performance hit of using page-based allocation of non-LRO,
+ * - The performance win by using page-based allocation for GRO is less
+ * than the performance hit of using page-based allocation of non-GRO,
* so the watermarks should reflect this.
*
* Per channel we maintain a single variable, updated by each channel:
*
- * rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO :
+ * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
* RX_ALLOC_FACTOR_SKB)
* Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
* limits the hysteresis), and update the allocation strategy:
*
- * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ?
+ * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
* RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
*/
static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
-#define RX_ALLOC_LEVEL_LRO 0x2000
+#define RX_ALLOC_LEVEL_GRO 0x2000
#define RX_ALLOC_LEVEL_MAX 0x3000
-#define RX_ALLOC_FACTOR_LRO 1
+#define RX_ALLOC_FACTOR_GRO 1
#define RX_ALLOC_FACTOR_SKB (-2)
/* This is the percentage fill level below which new RX descriptors
@@ -89,24 +89,37 @@ static unsigned int rx_refill_limit = 95;
*/
#define EFX_RXD_HEAD_ROOM 2
-static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf)
+/* Offset of ethernet header within page */
+static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
+ struct efx_rx_buffer *buf)
{
/* Offset is always within one page, so we don't need to consider
* the page order.
*/
- return (__force unsigned long) buf->data & (PAGE_SIZE - 1);
+ return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) +
+ efx->type->rx_buffer_hash_size);
}
static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
{
return PAGE_SIZE << efx->rx_buffer_order;
}
-static inline u32 efx_rx_buf_hash(struct efx_rx_buffer *buf)
+static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
{
+ if (buf->is_page)
+ return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
+ else
+ return ((u8 *)buf->u.skb->data +
+ efx->type->rx_buffer_hash_size);
+}
+
+static inline u32 efx_rx_buf_hash(const u8 *eh)
+{
+ /* The ethernet header is always directly after any hash. */
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
- return __le32_to_cpup((const __le32 *)(buf->data - 4));
+ return __le32_to_cpup((const __le32 *)(eh - 4));
#else
- const u8 *data = (const u8 *)(buf->data - 4);
+ const u8 *data = eh - 4;
return ((u32)data[0] |
(u32)data[1] << 8 |
(u32)data[2] << 16 |
@@ -129,6 +142,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
struct efx_nic *efx = rx_queue->efx;
struct net_device *net_dev = efx->net_dev;
struct efx_rx_buffer *rx_buf;
+ struct sk_buff *skb;
int skb_len = efx->rx_buffer_len;
unsigned index, count;
@@ -136,24 +150,23 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
- rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
- if (unlikely(!rx_buf->skb))
+ rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len);
+ if (unlikely(!skb))
return -ENOMEM;
- rx_buf->page = NULL;
/* Adjust the SKB for padding and checksum */
- skb_reserve(rx_buf->skb, NET_IP_ALIGN);
+ skb_reserve(skb, NET_IP_ALIGN);
rx_buf->len = skb_len - NET_IP_ALIGN;
- rx_buf->data = (char *)rx_buf->skb->data;
- rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
+ rx_buf->is_page = false;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
rx_buf->dma_addr = pci_map_single(efx->pci_dev,
- rx_buf->data, rx_buf->len,
+ skb->data, rx_buf->len,
PCI_DMA_FROMDEVICE);
if (unlikely(pci_dma_mapping_error(efx->pci_dev,
rx_buf->dma_addr))) {
- dev_kfree_skb_any(rx_buf->skb);
- rx_buf->skb = NULL;
+ dev_kfree_skb_any(skb);
+ rx_buf->u.skb = NULL;
return -EIO;
}
@@ -211,10 +224,9 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
- rx_buf->skb = NULL;
- rx_buf->page = page;
- rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN;
+ rx_buf->u.page = page;
rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
+ rx_buf->is_page = true;
++rx_queue->added_count;
++rx_queue->alloc_page_count;
++state->refcnt;
@@ -235,19 +247,17 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
static void efx_unmap_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf)
{
- if (rx_buf->page) {
+ if (rx_buf->is_page && rx_buf->u.page) {
struct efx_rx_page_state *state;
- EFX_BUG_ON_PARANOID(rx_buf->skb);
-
- state = page_address(rx_buf->page);
+ state = page_address(rx_buf->u.page);
if (--state->refcnt == 0) {
pci_unmap_page(efx->pci_dev,
state->dma_addr,
efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE);
}
- } else if (likely(rx_buf->skb)) {
+ } else if (!rx_buf->is_page && rx_buf->u.skb) {
pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
rx_buf->len, PCI_DMA_FROMDEVICE);
}
@@ -256,12 +266,12 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
static void efx_free_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf)
{
- if (rx_buf->page) {
- __free_pages(rx_buf->page, efx->rx_buffer_order);
- rx_buf->page = NULL;
- } else if (likely(rx_buf->skb)) {
- dev_kfree_skb_any(rx_buf->skb);
- rx_buf->skb = NULL;
+ if (rx_buf->is_page && rx_buf->u.page) {
+ __free_pages(rx_buf->u.page, efx->rx_buffer_order);
+ rx_buf->u.page = NULL;
+ } else if (!rx_buf->is_page && rx_buf->u.skb) {
+ dev_kfree_skb_any(rx_buf->u.skb);
+ rx_buf->u.skb = NULL;
}
}
@@ -277,7 +287,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf)
{
- struct efx_rx_page_state *state = page_address(rx_buf->page);
+ struct efx_rx_page_state *state = page_address(rx_buf->u.page);
struct efx_rx_buffer *new_buf;
unsigned fill_level, index;
@@ -292,16 +302,14 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
}
++state->refcnt;
- get_page(rx_buf->page);
+ get_page(rx_buf->u.page);
index = rx_queue->added_count & rx_queue->ptr_mask;
new_buf = efx_rx_buffer(rx_queue, index);
new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
- new_buf->skb = NULL;
- new_buf->page = rx_buf->page;
- new_buf->data = (void *)
- ((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1));
+ new_buf->u.page = rx_buf->u.page;
new_buf->len = rx_buf->len;
+ new_buf->is_page = true;
++rx_queue->added_count;
}
@@ -315,16 +323,15 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
struct efx_rx_buffer *new_buf;
unsigned index;
- if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
- page_count(rx_buf->page) == 1)
+ if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
+ page_count(rx_buf->u.page) == 1)
efx_resurrect_rx_buffer(rx_queue, rx_buf);
index = rx_queue->added_count & rx_queue->ptr_mask;
new_buf = efx_rx_buffer(rx_queue, index);
memcpy(new_buf, rx_buf, sizeof(*new_buf));
- rx_buf->page = NULL;
- rx_buf->skb = NULL;
+ rx_buf->u.page = NULL;
++rx_queue->added_count;
}
@@ -428,7 +435,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
* data at the end of the skb will be trashed. So
* we have no choice but to leak the fragment.
*/
- *leak_packet = (rx_buf->skb != NULL);
+ *leak_packet = !rx_buf->is_page;
efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
} else {
if (net_ratelimit())
@@ -441,26 +448,25 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
}
-/* Pass a received packet up through the generic LRO stack
+/* Pass a received packet up through the generic GRO stack
*
* Handles driverlink veto, and passes the fragment up via
- * the appropriate LRO method
+ * the appropriate GRO method
*/
-static void efx_rx_packet_lro(struct efx_channel *channel,
+static void efx_rx_packet_gro(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
- bool checksummed)
+ const u8 *eh, bool checksummed)
{
struct napi_struct *napi = &channel->napi_str;
gro_result_t gro_result;
- /* Pass the skb/page into the LRO engine */
- if (rx_buf->page) {
+ /* Pass the skb/page into the GRO engine */
+ if (rx_buf->is_page) {
struct efx_nic *efx = channel->efx;
- struct page *page = rx_buf->page;
+ struct page *page = rx_buf->u.page;
struct sk_buff *skb;
- EFX_BUG_ON_PARANOID(rx_buf->skb);
- rx_buf->page = NULL;
+ rx_buf->u.page = NULL;
skb = napi_get_frags(napi);
if (!skb) {
@@ -469,11 +475,11 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
}
if (efx->net_dev->features & NETIF_F_RXHASH)
- skb->rxhash = efx_rx_buf_hash(rx_buf);
+ skb->rxhash = efx_rx_buf_hash(eh);
skb_shinfo(skb)->frags[0].page = page;
skb_shinfo(skb)->frags[0].page_offset =
- efx_rx_buf_offset(rx_buf);
+ efx_rx_buf_offset(efx, rx_buf);
skb_shinfo(skb)->frags[0].size = rx_buf->len;
skb_shinfo(skb)->nr_frags = 1;
@@ -487,11 +493,10 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
gro_result = napi_gro_frags(napi);
} else {
- struct sk_buff *skb = rx_buf->skb;
+ struct sk_buff *skb = rx_buf->u.skb;
- EFX_BUG_ON_PARANOID(!skb);
EFX_BUG_ON_PARANOID(!checksummed);
- rx_buf->skb = NULL;
+ rx_buf->u.skb = NULL;
gro_result = napi_gro_receive(napi, skb);
}
@@ -499,7 +504,7 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
if (gro_result == GRO_NORMAL) {
channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
} else if (gro_result != GRO_DROP) {
- channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
+ channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO;
channel->irq_mod_score += 2;
}
}
@@ -513,9 +518,6 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
bool leak_packet = false;
rx_buf = efx_rx_buffer(rx_queue, index);
- EFX_BUG_ON_PARANOID(!rx_buf->data);
- EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page);
- EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page));
/* This allows the refill path to post another buffer.
* EFX_RXD_HEAD_ROOM ensures that the slot we are using
@@ -554,12 +556,12 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
/* Prefetch nice and early so data will (hopefully) be in cache by
* the time we look at it.
*/
- prefetch(rx_buf->data);
+ prefetch(efx_rx_buf_eh(efx, rx_buf));
/* Pipeline receives so that we give time for packet headers to be
* prefetched into cache.
*/
- rx_buf->len = len;
+ rx_buf->len = len - efx->type->rx_buffer_hash_size;
out:
if (channel->rx_pkt)
__efx_rx_packet(channel,
@@ -574,45 +576,43 @@ void __efx_rx_packet(struct efx_channel *channel,
{
struct efx_nic *efx = channel->efx;
struct sk_buff *skb;
-
- rx_buf->data += efx->type->rx_buffer_hash_size;
- rx_buf->len -= efx->type->rx_buffer_hash_size;
+ u8 *eh = efx_rx_buf_eh(efx, rx_buf);
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
*/
if (unlikely(efx->loopback_selftest)) {
- efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len);
+ efx_loopback_rx_packet(efx, eh, rx_buf->len);
efx_free_rx_buffer(efx, rx_buf);
return;
}
- if (rx_buf->skb) {
- prefetch(skb_shinfo(rx_buf->skb));
+ if (!rx_buf->is_page) {
+ skb = rx_buf->u.skb;
- skb_reserve(rx_buf->skb, efx->type->rx_buffer_hash_size);
- skb_put(rx_buf->skb, rx_buf->len);
+ prefetch(skb_shinfo(skb));
+
+ skb_reserve(skb, efx->type->rx_buffer_hash_size);
+ skb_put(skb, rx_buf->len);
if (efx->net_dev->features & NETIF_F_RXHASH)
- rx_buf->skb->rxhash = efx_rx_buf_hash(rx_buf);
+ skb->rxhash = efx_rx_buf_hash(eh);
/* Move past the ethernet header. rx_buf->data still points
* at the ethernet header */
- rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
- efx->net_dev);
+ skb->protocol = eth_type_trans(skb, efx->net_dev);
- skb_record_rx_queue(rx_buf->skb, channel->channel);
+ skb_record_rx_queue(skb, channel->channel);
}
- if (likely(checksummed || rx_buf->page)) {
- efx_rx_packet_lro(channel, rx_buf, checksummed);
+ if (likely(checksummed || rx_buf->is_page)) {
+ efx_rx_packet_gro(channel, rx_buf, eh, checksummed);
return;
}
/* We now own the SKB */
- skb = rx_buf->skb;
- rx_buf->skb = NULL;
- EFX_BUG_ON_PARANOID(!skb);
+ skb = rx_buf->u.skb;
+ rx_buf->u.skb = NULL;
/* Set the SKB flags */
skb_checksum_none_assert(skb);
@@ -628,7 +628,7 @@ void efx_rx_strategy(struct efx_channel *channel)
{
enum efx_rx_alloc_method method = rx_alloc_method;
- /* Only makes sense to use page based allocation if LRO is enabled */
+ /* Only makes sense to use page based allocation if GRO is enabled */
if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
method = RX_ALLOC_METHOD_SKB;
} else if (method == RX_ALLOC_METHOD_AUTO) {
@@ -639,7 +639,7 @@ void efx_rx_strategy(struct efx_channel *channel)
channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
/* Decide on the allocation method */
- method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_LRO) ?
+ method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ?
RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
}
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 0ebfb99f1299..a0f49b348d62 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -644,7 +644,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
goto out;
}
- /* Test both types of TX queue */
+ /* Test all enabled types of TX queue */
efx_for_each_channel_tx_queue(tx_queue, channel) {
state->offload_csum = (tx_queue->queue &
EFX_TXQ_TYPE_OFFLOAD);
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index aed495a4dad7..dba5456e70f3 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2008 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 45236f58a258..e4dd8986b1fe 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -194,13 +194,7 @@ static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
static int siena_probe_nvconfig(struct efx_nic *efx)
{
- int rc;
-
- rc = efx_mcdi_get_board_cfg(efx, efx->mac_address, NULL);
- if (rc)
- return rc;
-
- return 0;
+ return efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL);
}
static int siena_probe_nic(struct efx_nic *efx)
@@ -233,13 +227,6 @@ static int siena_probe_nic(struct efx_nic *efx)
if (rc)
goto fail1;
- rc = efx_mcdi_fwver(efx, &nic_data->fw_version, &nic_data->fw_build);
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "Failed to read MCPU firmware version - rc %d\n", rc);
- goto fail1; /* MCPU absent? */
- }
-
/* Let the BMC know that the driver is now in charge of link and
* filter settings. We must do this before we reset the NIC */
rc = efx_mcdi_drv_attach(efx, true, &already_attached);
@@ -354,11 +341,6 @@ static int siena_init_nic(struct efx_nic *efx)
FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
- if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0)
- /* No MCDI operation has been defined to set thresholds */
- netif_err(efx, hw, efx->net_dev,
- "ignoring RX flow control thresholds\n");
-
/* Enable event logging */
rc = efx_mcdi_log_ctrl(efx, true, false, 0);
if (rc)
@@ -520,16 +502,6 @@ static void siena_stop_nic_stats(struct efx_nic *efx)
efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
}
-void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len)
-{
- struct siena_nic_data *nic_data = efx->nic_data;
- snprintf(buf, len, "%u.%u.%u.%u",
- (unsigned int)(nic_data->fw_version >> 48),
- (unsigned int)(nic_data->fw_version >> 32 & 0xffff),
- (unsigned int)(nic_data->fw_version >> 16 & 0xffff),
- (unsigned int)(nic_data->fw_version & 0xffff));
-}
-
/**************************************************************************
*
* Wake on LAN
@@ -562,7 +534,7 @@ static int siena_set_wol(struct efx_nic *efx, u32 type)
if (nic_data->wol_filter_id != -1)
efx_mcdi_wol_filter_remove(efx,
nic_data->wol_filter_id);
- rc = efx_mcdi_wol_filter_set_magic(efx, efx->mac_address,
+ rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr,
&nic_data->wol_filter_id);
if (rc)
goto fail;
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h
index 8bf4fce0813a..71f2e3ebe1c7 100644
--- a/drivers/net/sfc/spi.h
+++ b/drivers/net/sfc/spi.h
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005 Fen Systems Ltd.
- * Copyright 2006 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -61,6 +61,11 @@ struct efx_spi_device {
unsigned int block_size;
};
+static inline bool efx_spi_present(const struct efx_spi_device *spi)
+{
+ return spi->size != 0;
+}
+
int falcon_spi_cmd(struct efx_nic *efx,
const struct efx_spi_device *spi, unsigned int command,
int address, const void* in, void *out, size_t len);
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index 1bc6c48c96ee..efdceb35aaae 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2009 Solarflare Communications Inc.
+ * Copyright 2007-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -15,9 +15,7 @@
#include "mdio_10g.h"
#include "nic.h"
#include "phy.h"
-#include "regs.h"
#include "workarounds.h"
-#include "selftest.h"
/* We expect these MMDs to be in the package. */
#define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \
@@ -198,7 +196,7 @@ static int tenxpress_phy_init(struct efx_nic *efx)
if (rc < 0)
return rc;
- rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
+ rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
return rc;
}
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 11726989fe2d..139801908217 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -1,7 +1,7 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2009 Solarflare Communications Inc.
+ * Copyright 2005-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -30,50 +30,6 @@
*/
#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
-/* We need to be able to nest calls to netif_tx_stop_queue(), partly
- * because of the 2 hardware queues associated with each core queue,
- * but also so that we can inhibit TX for reasons other than a full
- * hardware queue. */
-void efx_stop_queue(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
- struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
-
- if (!tx_queue)
- return;
-
- spin_lock_bh(&channel->tx_stop_lock);
- netif_vdbg(efx, tx_queued, efx->net_dev, "stop TX queue\n");
-
- atomic_inc(&channel->tx_stop_count);
- netif_tx_stop_queue(
- netdev_get_tx_queue(efx->net_dev,
- tx_queue->queue / EFX_TXQ_TYPES));
-
- spin_unlock_bh(&channel->tx_stop_lock);
-}
-
-/* Decrement core TX queue stop count and wake it if the count is 0 */
-void efx_wake_queue(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
- struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
-
- if (!tx_queue)
- return;
-
- local_bh_disable();
- if (atomic_dec_and_lock(&channel->tx_stop_count,
- &channel->tx_stop_lock)) {
- netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n");
- netif_tx_wake_queue(
- netdev_get_tx_queue(efx->net_dev,
- tx_queue->queue / EFX_TXQ_TYPES));
- spin_unlock(&channel->tx_stop_lock);
- }
- local_bh_enable();
-}
-
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer)
{
@@ -234,21 +190,22 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
* checked. Update the xmit path's
* copy of read_count.
*/
- ++tx_queue->stopped;
+ netif_tx_stop_queue(tx_queue->core_txq);
/* This memory barrier protects the
- * change of stopped from the access
+ * change of queue state from the access
* of read_count. */
smp_mb();
tx_queue->old_read_count =
- *(volatile unsigned *)
- &tx_queue->read_count;
+ ACCESS_ONCE(tx_queue->read_count);
fill_level = (tx_queue->insert_count
- tx_queue->old_read_count);
q_space = efx->txq_entries - 1 - fill_level;
- if (unlikely(q_space-- <= 0))
- goto stop;
+ if (unlikely(q_space-- <= 0)) {
+ rc = NETDEV_TX_BUSY;
+ goto unwind;
+ }
smp_mb();
- --tx_queue->stopped;
+ netif_tx_start_queue(tx_queue->core_txq);
}
insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
@@ -308,13 +265,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Mark the packet as transmitted, and free the SKB ourselves */
dev_kfree_skb_any(skb);
- goto unwind;
-
- stop:
- rc = NETDEV_TX_BUSY;
-
- if (tx_queue->stopped == 1)
- efx_stop_queue(tx_queue->channel);
unwind:
/* Work backwards until we hit the original insert pointer value */
@@ -386,17 +336,91 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_tx_queue *tx_queue;
+ unsigned index, type;
if (unlikely(efx->port_inhibited))
return NETDEV_TX_BUSY;
- tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb),
- skb->ip_summed == CHECKSUM_PARTIAL ?
- EFX_TXQ_TYPE_OFFLOAD : 0);
+ index = skb_get_queue_mapping(skb);
+ type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
+ if (index >= efx->n_tx_channels) {
+ index -= efx->n_tx_channels;
+ type |= EFX_TXQ_TYPE_HIGHPRI;
+ }
+ tx_queue = efx_get_tx_queue(efx, index, type);
return efx_enqueue_skb(tx_queue, skb);
}
+void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+
+ /* Must be inverse of queue lookup in efx_hard_start_xmit() */
+ tx_queue->core_txq =
+ netdev_get_tx_queue(efx->net_dev,
+ tx_queue->queue / EFX_TXQ_TYPES +
+ ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+ efx->n_tx_channels : 0));
+}
+
+int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ unsigned tc;
+ int rc;
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
+ return -EINVAL;
+
+ if (num_tc == net_dev->num_tc)
+ return 0;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
+ net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
+ }
+
+ if (num_tc > net_dev->num_tc) {
+ /* Initialise high-priority queues as necessary */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_possible_channel_tx_queue(tx_queue,
+ channel) {
+ if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
+ continue;
+ if (!tx_queue->buffer) {
+ rc = efx_probe_tx_queue(tx_queue);
+ if (rc)
+ return rc;
+ }
+ if (!tx_queue->initialised)
+ efx_init_tx_queue(tx_queue);
+ efx_init_tx_queue_core_txq(tx_queue);
+ }
+ }
+ } else {
+ /* Reduce number of classes before number of queues */
+ net_dev->num_tc = num_tc;
+ }
+
+ rc = netif_set_real_num_tx_queues(net_dev,
+ max_t(int, num_tc, 1) *
+ efx->n_tx_channels);
+ if (rc)
+ return rc;
+
+ /* Do not destroy high-priority queues when they become
+ * unused. We would have to flush them first, and it is
+ * fairly difficult to flush a subset of TX queues. Leave
+ * it to efx_fini_channels().
+ */
+
+ net_dev->num_tc = num_tc;
+ return 0;
+}
+
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned fill_level;
@@ -407,22 +431,25 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
efx_dequeue_buffers(tx_queue, index);
/* See if we need to restart the netif queue. This barrier
- * separates the update of read_count from the test of
- * stopped. */
+ * separates the update of read_count from the test of the
+ * queue state. */
smp_mb();
- if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
+ if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
+ likely(efx->port_enabled)) {
fill_level = tx_queue->insert_count - tx_queue->read_count;
if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
+ netif_tx_wake_queue(tx_queue->core_txq);
+ }
+ }
- /* Do this under netif_tx_lock(), to avoid racing
- * with efx_xmit(). */
- netif_tx_lock(efx->net_dev);
- if (tx_queue->stopped) {
- tx_queue->stopped = 0;
- efx_wake_queue(tx_queue->channel);
- }
- netif_tx_unlock(efx->net_dev);
+ /* Check whether the hardware queue is now empty */
+ if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
+ tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
+ if (tx_queue->read_count == tx_queue->old_write_count) {
+ smp_mb();
+ tx_queue->empty_read_count =
+ tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
}
}
}
@@ -470,12 +497,15 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->insert_count = 0;
tx_queue->write_count = 0;
+ tx_queue->old_write_count = 0;
tx_queue->read_count = 0;
tx_queue->old_read_count = 0;
- BUG_ON(tx_queue->stopped);
+ tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
/* Set up TX descriptor ring */
efx_nic_init_tx(tx_queue);
+
+ tx_queue->initialised = true;
}
void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
@@ -498,9 +528,14 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
{
+ if (!tx_queue->initialised)
+ return;
+
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
"shutting down TX queue %d\n", tx_queue->queue);
+ tx_queue->initialised = false;
+
/* Flush TX queue, remove descriptor ring */
efx_nic_fini_tx(tx_queue);
@@ -508,16 +543,13 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
/* Free up TSO header cache */
efx_fini_tso(tx_queue);
-
- /* Release queue's stop on port, if any */
- if (tx_queue->stopped) {
- tx_queue->stopped = 0;
- efx_wake_queue(tx_queue->channel);
- }
}
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
{
+ if (!tx_queue->buffer)
+ return;
+
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
"destroying TX queue %d\n", tx_queue->queue);
efx_nic_remove_tx(tx_queue);
@@ -755,12 +787,12 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
* since the xmit path last checked. Update
* the xmit path's copy of read_count.
*/
- ++tx_queue->stopped;
+ netif_tx_stop_queue(tx_queue->core_txq);
/* This memory barrier protects the change of
- * stopped from the access of read_count. */
+ * queue state from the access of read_count. */
smp_mb();
tx_queue->old_read_count =
- *(volatile unsigned *)&tx_queue->read_count;
+ ACCESS_ONCE(tx_queue->read_count);
fill_level = (tx_queue->insert_count
- tx_queue->old_read_count);
q_space = efx->txq_entries - 1 - fill_level;
@@ -769,7 +801,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
return 1;
}
smp_mb();
- --tx_queue->stopped;
+ netif_tx_start_queue(tx_queue->core_txq);
}
insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
@@ -1109,8 +1141,10 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
while (1) {
rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
- if (unlikely(rc))
- goto stop;
+ if (unlikely(rc)) {
+ rc2 = NETDEV_TX_BUSY;
+ goto unwind;
+ }
/* Move onto the next fragment? */
if (state.in_len == 0) {
@@ -1139,14 +1173,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
netif_err(efx, tx_err, efx->net_dev,
"Out of memory for TSO headers, or PCI mapping error\n");
dev_kfree_skb_any(skb);
- goto unwind;
-
- stop:
- rc2 = NETDEV_TX_BUSY;
-
- /* Stop the queue if it wasn't stopped before. */
- if (tx_queue->stopped == 1)
- efx_stop_queue(tx_queue->channel);
unwind:
/* Free the DMA mapping we were in the process of writing out */
diff --git a/drivers/net/sfc/txc43128_phy.c b/drivers/net/sfc/txc43128_phy.c
index 351794a79215..d9886addcc99 100644
--- a/drivers/net/sfc/txc43128_phy.c
+++ b/drivers/net/sfc/txc43128_phy.c
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -193,7 +193,7 @@ static int txc_reset_phy(struct efx_nic *efx)
goto fail;
/* Check that all the MMDs we expect are present and responding. */
- rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS, 0);
+ rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS);
if (rc < 0)
goto fail;
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index e0d63083c3a8..e4dd3a7f304b 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -1,6 +1,6 @@
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 50259dfec583..e9e7a530552c 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -32,35 +32,40 @@
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/ethtool.h>
#include <asm/cacheflush.h>
#include "sh_eth.h"
+#define SH_ETH_DEF_MSG_ENABLE \
+ (NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_RX_ERR| \
+ NETIF_MSG_TX_ERR)
+
/* There is CPU dependent code */
#if defined(CONFIG_CPU_SUBTYPE_SH7724)
#define SH_ETH_RESET_DEFAULT 1
static void sh_eth_set_duplex(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- u32 ioaddr = ndev->base_addr;
if (mdp->duplex) /* Full */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
else /* Half */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
}
static void sh_eth_set_rate(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- u32 ioaddr = ndev->base_addr;
switch (mdp->speed) {
case 10: /* 10BASE */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR);
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
break;
case 100:/* 100BASE */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR);
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
break;
default:
break;
@@ -89,29 +94,28 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
};
#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
-#define SH_ETH_RESET_DEFAULT 1
+#define SH_ETH_HAS_BOTH_MODULES 1
+#define SH_ETH_HAS_TSU 1
static void sh_eth_set_duplex(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- u32 ioaddr = ndev->base_addr;
if (mdp->duplex) /* Full */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
else /* Half */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
}
static void sh_eth_set_rate(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- u32 ioaddr = ndev->base_addr;
switch (mdp->speed) {
case 10: /* 10BASE */
- ctrl_outl(0, ioaddr + RTRATE);
+ sh_eth_write(ndev, 0, RTRATE);
break;
case 100:/* 100BASE */
- ctrl_outl(1, ioaddr + RTRATE);
+ sh_eth_write(ndev, 1, RTRATE);
break;
default:
break;
@@ -138,24 +142,154 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.no_ade = 1,
};
+#define SH_GIGA_ETH_BASE 0xfee00000
+#define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
+#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
+static void sh_eth_chip_reset_giga(struct net_device *ndev)
+{
+ int i;
+ unsigned long mahr[2], malr[2];
+
+ /* save MAHR and MALR */
+ for (i = 0; i < 2; i++) {
+ malr[i] = readl(GIGA_MALR(i));
+ mahr[i] = readl(GIGA_MAHR(i));
+ }
+
+ /* reset device */
+ writel(ARSTR_ARSTR, SH_GIGA_ETH_BASE + 0x1800);
+ mdelay(1);
+
+ /* restore MAHR and MALR */
+ for (i = 0; i < 2; i++) {
+ writel(malr[i], GIGA_MALR(i));
+ writel(mahr[i], GIGA_MAHR(i));
+ }
+}
+
+static int sh_eth_is_gether(struct sh_eth_private *mdp);
+static void sh_eth_reset(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int cnt = 100;
+
+ if (sh_eth_is_gether(mdp)) {
+ sh_eth_write(ndev, 0x03, EDSR);
+ sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
+ EDMR);
+ while (cnt > 0) {
+ if (!(sh_eth_read(ndev, EDMR) & 0x3))
+ break;
+ mdelay(1);
+ cnt--;
+ }
+ if (cnt < 0)
+ printk(KERN_ERR "Device reset fail\n");
+
+ /* Table Init */
+ sh_eth_write(ndev, 0x0, TDLAR);
+ sh_eth_write(ndev, 0x0, TDFAR);
+ sh_eth_write(ndev, 0x0, TDFXR);
+ sh_eth_write(ndev, 0x0, TDFFR);
+ sh_eth_write(ndev, 0x0, RDLAR);
+ sh_eth_write(ndev, 0x0, RDFAR);
+ sh_eth_write(ndev, 0x0, RDFXR);
+ sh_eth_write(ndev, 0x0, RDFFR);
+ } else {
+ sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
+ EDMR);
+ mdelay(3);
+ sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
+ EDMR);
+ }
+}
+
+static void sh_eth_set_duplex_giga(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ if (mdp->duplex) /* Full */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
+ else /* Half */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
+}
+
+static void sh_eth_set_rate_giga(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ switch (mdp->speed) {
+ case 10: /* 10BASE */
+ sh_eth_write(ndev, 0x00000000, GECMR);
+ break;
+ case 100:/* 100BASE */
+ sh_eth_write(ndev, 0x00000010, GECMR);
+ break;
+ case 1000: /* 1000BASE */
+ sh_eth_write(ndev, 0x00000020, GECMR);
+ break;
+ default:
+ break;
+ }
+}
+
+/* SH7757(GETHERC) */
+static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
+ .chip_reset = sh_eth_chip_reset_giga,
+ .set_duplex = sh_eth_set_duplex_giga,
+ .set_rate = sh_eth_set_rate_giga,
+
+ .ecsr_value = ECSR_ICD | ECSR_MPD,
+ .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+
+ .tx_check = EESR_TC1 | EESR_FTC,
+ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
+ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+ EESR_ECI,
+ .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
+ EESR_TFE,
+ .fdr_value = 0x0000072f,
+ .rmcr_value = 0x00000001,
+
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .bculr = 1,
+ .hw_swap = 1,
+ .rpadir = 1,
+ .rpadir_value = 2 << 16,
+ .no_trimd = 1,
+ .no_ade = 1,
+};
+
+static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
+{
+ if (sh_eth_is_gether(mdp))
+ return &sh_eth_my_cpu_data_giga;
+ else
+ return &sh_eth_my_cpu_data;
+}
+
#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
#define SH_ETH_HAS_TSU 1
static void sh_eth_chip_reset(struct net_device *ndev)
{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
/* reset device */
- ctrl_outl(ARSTR_ARSTR, ARSTR);
+ sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
mdelay(1);
}
static void sh_eth_reset(struct net_device *ndev)
{
- u32 ioaddr = ndev->base_addr;
int cnt = 100;
- ctrl_outl(EDSR_ENALL, ioaddr + EDSR);
- ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
+ sh_eth_write(ndev, EDSR_ENALL, EDSR);
+ sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
while (cnt > 0) {
- if (!(ctrl_inl(ioaddr + EDMR) & 0x3))
+ if (!(sh_eth_read(ndev, EDMR) & 0x3))
break;
mdelay(1);
cnt--;
@@ -164,41 +298,39 @@ static void sh_eth_reset(struct net_device *ndev)
printk(KERN_ERR "Device reset fail\n");
/* Table Init */
- ctrl_outl(0x0, ioaddr + TDLAR);
- ctrl_outl(0x0, ioaddr + TDFAR);
- ctrl_outl(0x0, ioaddr + TDFXR);
- ctrl_outl(0x0, ioaddr + TDFFR);
- ctrl_outl(0x0, ioaddr + RDLAR);
- ctrl_outl(0x0, ioaddr + RDFAR);
- ctrl_outl(0x0, ioaddr + RDFXR);
- ctrl_outl(0x0, ioaddr + RDFFR);
+ sh_eth_write(ndev, 0x0, TDLAR);
+ sh_eth_write(ndev, 0x0, TDFAR);
+ sh_eth_write(ndev, 0x0, TDFXR);
+ sh_eth_write(ndev, 0x0, TDFFR);
+ sh_eth_write(ndev, 0x0, RDLAR);
+ sh_eth_write(ndev, 0x0, RDFAR);
+ sh_eth_write(ndev, 0x0, RDFXR);
+ sh_eth_write(ndev, 0x0, RDFFR);
}
static void sh_eth_set_duplex(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- u32 ioaddr = ndev->base_addr;
if (mdp->duplex) /* Full */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
else /* Half */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
}
static void sh_eth_set_rate(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- u32 ioaddr = ndev->base_addr;
switch (mdp->speed) {
case 10: /* 10BASE */
- ctrl_outl(GECMR_10, ioaddr + GECMR);
+ sh_eth_write(ndev, GECMR_10, GECMR);
break;
case 100:/* 100BASE */
- ctrl_outl(GECMR_100, ioaddr + GECMR);
+ sh_eth_write(ndev, GECMR_100, GECMR);
break;
case 1000: /* 1000BASE */
- ctrl_outl(GECMR_1000, ioaddr + GECMR);
+ sh_eth_write(ndev, GECMR_1000, GECMR);
break;
default:
break;
@@ -229,6 +361,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.hw_swap = 1,
.no_trimd = 1,
.no_ade = 1,
+ .tsu = 1,
};
#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
@@ -246,6 +379,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
#define SH_ETH_HAS_TSU 1
static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+ .tsu = 1,
};
#endif
@@ -281,11 +415,9 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
/* Chip Reset */
static void sh_eth_reset(struct net_device *ndev)
{
- u32 ioaddr = ndev->base_addr;
-
- ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
+ sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
mdelay(3);
- ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
+ sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
}
#endif
@@ -334,13 +466,11 @@ static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
*/
static void update_mac_address(struct net_device *ndev)
{
- u32 ioaddr = ndev->base_addr;
-
- ctrl_outl((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
- (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]),
- ioaddr + MAHR);
- ctrl_outl((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
- ioaddr + MALR);
+ sh_eth_write(ndev,
+ (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
+ (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
+ sh_eth_write(ndev,
+ (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
}
/*
@@ -353,21 +483,36 @@ static void update_mac_address(struct net_device *ndev)
*/
static void read_mac_address(struct net_device *ndev, unsigned char *mac)
{
- u32 ioaddr = ndev->base_addr;
-
if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
memcpy(ndev->dev_addr, mac, 6);
} else {
- ndev->dev_addr[0] = (ctrl_inl(ioaddr + MAHR) >> 24);
- ndev->dev_addr[1] = (ctrl_inl(ioaddr + MAHR) >> 16) & 0xFF;
- ndev->dev_addr[2] = (ctrl_inl(ioaddr + MAHR) >> 8) & 0xFF;
- ndev->dev_addr[3] = (ctrl_inl(ioaddr + MAHR) & 0xFF);
- ndev->dev_addr[4] = (ctrl_inl(ioaddr + MALR) >> 8) & 0xFF;
- ndev->dev_addr[5] = (ctrl_inl(ioaddr + MALR) & 0xFF);
+ ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
+ ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
+ ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
+ ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
+ ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
+ ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
}
}
+static int sh_eth_is_gether(struct sh_eth_private *mdp)
+{
+ if (mdp->reg_offset == sh_eth_offset_gigabit)
+ return 1;
+ else
+ return 0;
+}
+
+static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
+{
+ if (sh_eth_is_gether(mdp))
+ return EDTRR_TRNS_GETHER;
+ else
+ return EDTRR_TRNS_ETHER;
+}
+
struct bb_info {
+ void (*set_gate)(unsigned long addr);
struct mdiobb_ctrl ctrl;
u32 addr;
u32 mmd_msk;/* MMD */
@@ -379,25 +524,29 @@ struct bb_info {
/* PHY bit set */
static void bb_set(u32 addr, u32 msk)
{
- ctrl_outl(ctrl_inl(addr) | msk, addr);
+ writel(readl(addr) | msk, addr);
}
/* PHY bit clear */
static void bb_clr(u32 addr, u32 msk)
{
- ctrl_outl((ctrl_inl(addr) & ~msk), addr);
+ writel((readl(addr) & ~msk), addr);
}
/* PHY bit read */
static int bb_read(u32 addr, u32 msk)
{
- return (ctrl_inl(addr) & msk) != 0;
+ return (readl(addr) & msk) != 0;
}
/* Data I/O pin control */
static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
{
struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+
+ if (bitbang->set_gate)
+ bitbang->set_gate(bitbang->addr);
+
if (bit)
bb_set(bitbang->addr, bitbang->mmd_msk);
else
@@ -409,6 +558,9 @@ static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
{
struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+ if (bitbang->set_gate)
+ bitbang->set_gate(bitbang->addr);
+
if (bit)
bb_set(bitbang->addr, bitbang->mdo_msk);
else
@@ -419,6 +571,10 @@ static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
{
struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+
+ if (bitbang->set_gate)
+ bitbang->set_gate(bitbang->addr);
+
return bb_read(bitbang->addr, bitbang->mdi_msk);
}
@@ -427,6 +583,9 @@ static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
{
struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+ if (bitbang->set_gate)
+ bitbang->set_gate(bitbang->addr);
+
if (bit)
bb_set(bitbang->addr, bitbang->mdc_msk);
else
@@ -470,7 +629,6 @@ static void sh_eth_ring_free(struct net_device *ndev)
/* format skb and descriptor buffer */
static void sh_eth_ring_format(struct net_device *ndev)
{
- u32 ioaddr = ndev->base_addr;
struct sh_eth_private *mdp = netdev_priv(ndev);
int i;
struct sk_buff *skb;
@@ -506,10 +664,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
/* Rx descriptor address set */
if (i == 0) {
- ctrl_outl(mdp->rx_desc_dma, ioaddr + RDLAR);
-#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- ctrl_outl(mdp->rx_desc_dma, ioaddr + RDFAR);
-#endif
+ sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
+ if (sh_eth_is_gether(mdp))
+ sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
}
}
@@ -528,10 +685,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
txdesc->buffer_length = 0;
if (i == 0) {
/* Tx descriptor address set */
- ctrl_outl(mdp->tx_desc_dma, ioaddr + TDLAR);
-#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- ctrl_outl(mdp->tx_desc_dma, ioaddr + TDFAR);
-#endif
+ sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
+ if (sh_eth_is_gether(mdp))
+ sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
}
}
@@ -613,7 +769,6 @@ static int sh_eth_dev_init(struct net_device *ndev)
{
int ret = 0;
struct sh_eth_private *mdp = netdev_priv(ndev);
- u32 ioaddr = ndev->base_addr;
u_int32_t rx_int_var, tx_int_var;
u32 val;
@@ -623,71 +778,71 @@ static int sh_eth_dev_init(struct net_device *ndev)
/* Descriptor format */
sh_eth_ring_format(ndev);
if (mdp->cd->rpadir)
- ctrl_outl(mdp->cd->rpadir_value, ioaddr + RPADIR);
+ sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
/* all sh_eth int mask */
- ctrl_outl(0, ioaddr + EESIPR);
+ sh_eth_write(ndev, 0, EESIPR);
#if defined(__LITTLE_ENDIAN__)
if (mdp->cd->hw_swap)
- ctrl_outl(EDMR_EL, ioaddr + EDMR);
+ sh_eth_write(ndev, EDMR_EL, EDMR);
else
#endif
- ctrl_outl(0, ioaddr + EDMR);
+ sh_eth_write(ndev, 0, EDMR);
/* FIFO size set */
- ctrl_outl(mdp->cd->fdr_value, ioaddr + FDR);
- ctrl_outl(0, ioaddr + TFTR);
+ sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
+ sh_eth_write(ndev, 0, TFTR);
/* Frame recv control */
- ctrl_outl(mdp->cd->rmcr_value, ioaddr + RMCR);
+ sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
- ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER);
+ sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER);
if (mdp->cd->bculr)
- ctrl_outl(0x800, ioaddr + BCULR); /* Burst sycle set */
+ sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
- ctrl_outl(mdp->cd->fcftr_value, ioaddr + FCFTR);
+ sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
if (!mdp->cd->no_trimd)
- ctrl_outl(0, ioaddr + TRIMD);
+ sh_eth_write(ndev, 0, TRIMD);
/* Recv frame limit set register */
- ctrl_outl(RFLR_VALUE, ioaddr + RFLR);
+ sh_eth_write(ndev, RFLR_VALUE, RFLR);
- ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR);
- ctrl_outl(mdp->cd->eesipr_value, ioaddr + EESIPR);
+ sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
+ sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
/* PAUSE Prohibition */
- val = (ctrl_inl(ioaddr + ECMR) & ECMR_DM) |
+ val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
- ctrl_outl(val, ioaddr + ECMR);
+ sh_eth_write(ndev, val, ECMR);
if (mdp->cd->set_rate)
mdp->cd->set_rate(ndev);
/* E-MAC Status Register clear */
- ctrl_outl(mdp->cd->ecsr_value, ioaddr + ECSR);
+ sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
/* E-MAC Interrupt Enable register */
- ctrl_outl(mdp->cd->ecsipr_value, ioaddr + ECSIPR);
+ sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
/* Set MAC address */
update_mac_address(ndev);
/* mask reset */
if (mdp->cd->apr)
- ctrl_outl(APR_AP, ioaddr + APR);
+ sh_eth_write(ndev, APR_AP, APR);
if (mdp->cd->mpr)
- ctrl_outl(MPR_MP, ioaddr + MPR);
+ sh_eth_write(ndev, MPR_MP, MPR);
if (mdp->cd->tpauser)
- ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
+ sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
/* Setting the Rx mode will start the Rx process. */
- ctrl_outl(EDRRR_R, ioaddr + EDRRR);
+ sh_eth_write(ndev, EDRRR_R, EDRRR);
netif_start_queue(ndev);
@@ -811,24 +966,37 @@ static int sh_eth_rx(struct net_device *ndev)
/* Restart Rx engine if stopped. */
/* If we don't need to check status, don't. -KDU */
- if (!(ctrl_inl(ndev->base_addr + EDRRR) & EDRRR_R))
- ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR);
+ if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
+ sh_eth_write(ndev, EDRRR_R, EDRRR);
return 0;
}
+static void sh_eth_rcv_snd_disable(struct net_device *ndev)
+{
+ /* disable tx and rx */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
+ ~(ECMR_RE | ECMR_TE), ECMR);
+}
+
+static void sh_eth_rcv_snd_enable(struct net_device *ndev)
+{
+ /* enable tx and rx */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
+ (ECMR_RE | ECMR_TE), ECMR);
+}
+
/* error control function */
static void sh_eth_error(struct net_device *ndev, int intr_status)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- u32 ioaddr = ndev->base_addr;
u32 felic_stat;
u32 link_stat;
u32 mask;
if (intr_status & EESR_ECI) {
- felic_stat = ctrl_inl(ioaddr + ECSR);
- ctrl_outl(felic_stat, ioaddr + ECSR); /* clear int */
+ felic_stat = sh_eth_read(ndev, ECSR);
+ sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
if (felic_stat & ECSR_ICD)
mdp->stats.tx_carrier_errors++;
if (felic_stat & ECSR_LCHNG) {
@@ -839,26 +1007,23 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
else
link_stat = PHY_ST_LINK;
} else {
- link_stat = (ctrl_inl(ioaddr + PSR));
+ link_stat = (sh_eth_read(ndev, PSR));
if (mdp->ether_link_active_low)
link_stat = ~link_stat;
}
- if (!(link_stat & PHY_ST_LINK)) {
- /* Link Down : disable tx and rx */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) &
- ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
- } else {
+ if (!(link_stat & PHY_ST_LINK))
+ sh_eth_rcv_snd_disable(ndev);
+ else {
/* Link Up */
- ctrl_outl(ctrl_inl(ioaddr + EESIPR) &
- ~DMAC_M_ECI, ioaddr + EESIPR);
+ sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
+ ~DMAC_M_ECI, EESIPR);
/*clear int */
- ctrl_outl(ctrl_inl(ioaddr + ECSR),
- ioaddr + ECSR);
- ctrl_outl(ctrl_inl(ioaddr + EESIPR) |
- DMAC_M_ECI, ioaddr + EESIPR);
+ sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
+ ECSR);
+ sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
+ DMAC_M_ECI, EESIPR);
/* enable tx and rx */
- ctrl_outl(ctrl_inl(ioaddr + ECMR) |
- (ECMR_RE | ECMR_TE), ioaddr + ECMR);
+ sh_eth_rcv_snd_enable(ndev);
}
}
}
@@ -867,6 +1032,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
/* Write buck end. unused write back interrupt */
if (intr_status & EESR_TABT) /* Transmit Abort int */
mdp->stats.tx_aborted_errors++;
+ if (netif_msg_tx_err(mdp))
+ dev_err(&ndev->dev, "Transmit Abort\n");
}
if (intr_status & EESR_RABT) {
@@ -874,28 +1041,47 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
if (intr_status & EESR_RFRMER) {
/* Receive Frame Overflow int */
mdp->stats.rx_frame_errors++;
- dev_err(&ndev->dev, "Receive Frame Overflow\n");
+ if (netif_msg_rx_err(mdp))
+ dev_err(&ndev->dev, "Receive Abort\n");
}
}
- if (!mdp->cd->no_ade) {
- if (intr_status & EESR_ADE && intr_status & EESR_TDE &&
- intr_status & EESR_TFE)
- mdp->stats.tx_fifo_errors++;
+ if (intr_status & EESR_TDE) {
+ /* Transmit Descriptor Empty int */
+ mdp->stats.tx_fifo_errors++;
+ if (netif_msg_tx_err(mdp))
+ dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
+ }
+
+ if (intr_status & EESR_TFE) {
+ /* FIFO under flow */
+ mdp->stats.tx_fifo_errors++;
+ if (netif_msg_tx_err(mdp))
+ dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
}
if (intr_status & EESR_RDE) {
/* Receive Descriptor Empty int */
mdp->stats.rx_over_errors++;
- if (ctrl_inl(ioaddr + EDRRR) ^ EDRRR_R)
- ctrl_outl(EDRRR_R, ioaddr + EDRRR);
- dev_err(&ndev->dev, "Receive Descriptor Empty\n");
+ if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
+ sh_eth_write(ndev, EDRRR_R, EDRRR);
+ if (netif_msg_rx_err(mdp))
+ dev_err(&ndev->dev, "Receive Descriptor Empty\n");
}
+
if (intr_status & EESR_RFE) {
/* Receive FIFO Overflow int */
mdp->stats.rx_fifo_errors++;
- dev_err(&ndev->dev, "Receive FIFO Overflow\n");
+ if (netif_msg_rx_err(mdp))
+ dev_err(&ndev->dev, "Receive FIFO Overflow\n");
+ }
+
+ if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
+ /* Address Error */
+ mdp->stats.tx_fifo_errors++;
+ if (netif_msg_tx_err(mdp))
+ dev_err(&ndev->dev, "Address Error\n");
}
mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
@@ -903,7 +1089,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
mask &= ~EESR_ADE;
if (intr_status & mask) {
/* Tx error */
- u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR);
+ u32 edtrr = sh_eth_read(ndev, EDTRR);
/* dmesg */
dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
intr_status, mdp->cur_tx);
@@ -913,9 +1099,9 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
sh_eth_txfree(ndev);
/* SH7712 BUG */
- if (edtrr ^ EDTRR_TRNS) {
+ if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
/* tx dma start */
- ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
+ sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
}
/* wakeup */
netif_wake_queue(ndev);
@@ -928,18 +1114,17 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_cpu_data *cd = mdp->cd;
irqreturn_t ret = IRQ_NONE;
- u32 ioaddr, intr_status = 0;
+ u32 intr_status = 0;
- ioaddr = ndev->base_addr;
spin_lock(&mdp->lock);
/* Get interrpt stat */
- intr_status = ctrl_inl(ioaddr + EESR);
+ intr_status = sh_eth_read(ndev, EESR);
/* Clear interrupt */
if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
cd->tx_check | cd->eesr_err_check)) {
- ctrl_outl(intr_status, ioaddr + EESR);
+ sh_eth_write(ndev, intr_status, EESR);
ret = IRQ_HANDLED;
} else
goto other_irq;
@@ -982,7 +1167,6 @@ static void sh_eth_adjust_link(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct phy_device *phydev = mdp->phydev;
- u32 ioaddr = ndev->base_addr;
int new_state = 0;
if (phydev->link != PHY_DOWN) {
@@ -1000,8 +1184,8 @@ static void sh_eth_adjust_link(struct net_device *ndev)
mdp->cd->set_rate(ndev);
}
if (mdp->link == PHY_DOWN) {
- ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF)
- | ECMR_DM, ioaddr + ECMR);
+ sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_TXF)
+ | ECMR_DM, ECMR);
new_state = 1;
mdp->link = phydev->link;
}
@@ -1012,7 +1196,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
mdp->duplex = -1;
}
- if (new_state)
+ if (new_state && netif_msg_link(mdp))
phy_print_status(phydev);
}
@@ -1032,7 +1216,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
/* Try connect to PHY */
phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
- 0, PHY_INTERFACE_MODE_MII);
+ 0, mdp->phy_interface);
if (IS_ERR(phydev)) {
dev_err(&ndev->dev, "phy_connect failed\n");
return PTR_ERR(phydev);
@@ -1063,6 +1247,131 @@ static int sh_eth_phy_start(struct net_device *ndev)
return 0;
}
+static int sh_eth_get_settings(struct net_device *ndev,
+ struct ethtool_cmd *ecmd)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mdp->lock, flags);
+ ret = phy_ethtool_gset(mdp->phydev, ecmd);
+ spin_unlock_irqrestore(&mdp->lock, flags);
+
+ return ret;
+}
+
+static int sh_eth_set_settings(struct net_device *ndev,
+ struct ethtool_cmd *ecmd)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mdp->lock, flags);
+
+ /* disable tx and rx */
+ sh_eth_rcv_snd_disable(ndev);
+
+ ret = phy_ethtool_sset(mdp->phydev, ecmd);
+ if (ret)
+ goto error_exit;
+
+ if (ecmd->duplex == DUPLEX_FULL)
+ mdp->duplex = 1;
+ else
+ mdp->duplex = 0;
+
+ if (mdp->cd->set_duplex)
+ mdp->cd->set_duplex(ndev);
+
+error_exit:
+ mdelay(1);
+
+ /* enable tx and rx */
+ sh_eth_rcv_snd_enable(ndev);
+
+ spin_unlock_irqrestore(&mdp->lock, flags);
+
+ return ret;
+}
+
+static int sh_eth_nway_reset(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&mdp->lock, flags);
+ ret = phy_start_aneg(mdp->phydev);
+ spin_unlock_irqrestore(&mdp->lock, flags);
+
+ return ret;
+}
+
+static u32 sh_eth_get_msglevel(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ return mdp->msg_enable;
+}
+
+static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ mdp->msg_enable = value;
+}
+
+static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "rx_current", "tx_current",
+ "rx_dirty", "tx_dirty",
+};
+#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
+
+static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return SH_ETH_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void sh_eth_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int i = 0;
+
+ /* device-specific stats */
+ data[i++] = mdp->cur_rx;
+ data[i++] = mdp->cur_tx;
+ data[i++] = mdp->dirty_rx;
+ data[i++] = mdp->dirty_tx;
+}
+
+static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, *sh_eth_gstrings_stats,
+ sizeof(sh_eth_gstrings_stats));
+ break;
+ }
+}
+
+static struct ethtool_ops sh_eth_ethtool_ops = {
+ .get_settings = sh_eth_get_settings,
+ .set_settings = sh_eth_set_settings,
+ .nway_reset = sh_eth_nway_reset,
+ .get_msglevel = sh_eth_get_msglevel,
+ .set_msglevel = sh_eth_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_strings = sh_eth_get_strings,
+ .get_ethtool_stats = sh_eth_get_ethtool_stats,
+ .get_sset_count = sh_eth_get_sset_count,
+};
+
/* network device open function */
static int sh_eth_open(struct net_device *ndev)
{
@@ -1073,8 +1382,8 @@ static int sh_eth_open(struct net_device *ndev)
ret = request_irq(ndev->irq, sh_eth_interrupt,
#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
- defined(CONFIG_CPU_SUBTYPE_SH7764) || \
- defined(CONFIG_CPU_SUBTYPE_SH7757)
+ defined(CONFIG_CPU_SUBTYPE_SH7764) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7757)
IRQF_SHARED,
#else
0,
@@ -1117,15 +1426,14 @@ out_free_irq:
static void sh_eth_tx_timeout(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- u32 ioaddr = ndev->base_addr;
struct sh_eth_rxdesc *rxdesc;
int i;
netif_stop_queue(ndev);
- /* worning message out. */
- printk(KERN_WARNING "%s: transmit timed out, status %8.8x,"
- " resetting...\n", ndev->name, (int)ctrl_inl(ioaddr + EESR));
+ if (netif_msg_timer(mdp))
+ dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
+ " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
/* tx_errors count up */
mdp->stats.tx_errors++;
@@ -1167,6 +1475,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_lock_irqsave(&mdp->lock, flags);
if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
if (!sh_eth_txfree(ndev)) {
+ if (netif_msg_tx_queued(mdp))
+ dev_warn(&ndev->dev, "TxFD exhausted.\n");
netif_stop_queue(ndev);
spin_unlock_irqrestore(&mdp->lock, flags);
return NETDEV_TX_BUSY;
@@ -1196,8 +1506,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
mdp->cur_tx++;
- if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
- ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
+ if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
+ sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
return NETDEV_TX_OK;
}
@@ -1206,17 +1516,16 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
static int sh_eth_close(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- u32 ioaddr = ndev->base_addr;
int ringsize;
netif_stop_queue(ndev);
/* Disable interrupts by clearing the interrupt mask. */
- ctrl_outl(0x0000, ioaddr + EESIPR);
+ sh_eth_write(ndev, 0x0000, EESIPR);
/* Stop the chip's Tx and Rx processes. */
- ctrl_outl(0, ioaddr + EDTRR);
- ctrl_outl(0, ioaddr + EDRRR);
+ sh_eth_write(ndev, 0, EDTRR);
+ sh_eth_write(ndev, 0, EDRRR);
/* PHY Disconnect */
if (mdp->phydev) {
@@ -1247,25 +1556,24 @@ static int sh_eth_close(struct net_device *ndev)
static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- u32 ioaddr = ndev->base_addr;
pm_runtime_get_sync(&mdp->pdev->dev);
- mdp->stats.tx_dropped += ctrl_inl(ioaddr + TROCR);
- ctrl_outl(0, ioaddr + TROCR); /* (write clear) */
- mdp->stats.collisions += ctrl_inl(ioaddr + CDCR);
- ctrl_outl(0, ioaddr + CDCR); /* (write clear) */
- mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + LCCR);
- ctrl_outl(0, ioaddr + LCCR); /* (write clear) */
-#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CERCR);/* CERCR */
- ctrl_outl(0, ioaddr + CERCR); /* (write clear) */
- mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CEECR);/* CEECR */
- ctrl_outl(0, ioaddr + CEECR); /* (write clear) */
-#else
- mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR);
- ctrl_outl(0, ioaddr + CNDCR); /* (write clear) */
-#endif
+ mdp->stats.tx_dropped += sh_eth_read(ndev, TROCR);
+ sh_eth_write(ndev, 0, TROCR); /* (write clear) */
+ mdp->stats.collisions += sh_eth_read(ndev, CDCR);
+ sh_eth_write(ndev, 0, CDCR); /* (write clear) */
+ mdp->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
+ sh_eth_write(ndev, 0, LCCR); /* (write clear) */
+ if (sh_eth_is_gether(mdp)) {
+ mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
+ sh_eth_write(ndev, 0, CERCR); /* (write clear) */
+ mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
+ sh_eth_write(ndev, 0, CEECR); /* (write clear) */
+ } else {
+ mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
+ sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
+ }
pm_runtime_put_sync(&mdp->pdev->dev);
return &mdp->stats;
@@ -1291,48 +1599,46 @@ static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
/* Multicast reception directions set */
static void sh_eth_set_multicast_list(struct net_device *ndev)
{
- u32 ioaddr = ndev->base_addr;
-
if (ndev->flags & IFF_PROMISC) {
/* Set promiscuous. */
- ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM,
- ioaddr + ECMR);
+ sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_MCT) |
+ ECMR_PRM, ECMR);
} else {
/* Normal, unicast/broadcast-only mode. */
- ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT,
- ioaddr + ECMR);
+ sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) |
+ ECMR_MCT, ECMR);
}
}
+#endif /* SH_ETH_HAS_TSU */
/* SuperH's TSU register init function */
-static void sh_eth_tsu_init(u32 ioaddr)
-{
- ctrl_outl(0, ioaddr + TSU_FWEN0); /* Disable forward(0->1) */
- ctrl_outl(0, ioaddr + TSU_FWEN1); /* Disable forward(1->0) */
- ctrl_outl(0, ioaddr + TSU_FCM); /* forward fifo 3k-3k */
- ctrl_outl(0xc, ioaddr + TSU_BSYSL0);
- ctrl_outl(0xc, ioaddr + TSU_BSYSL1);
- ctrl_outl(0, ioaddr + TSU_PRISL0);
- ctrl_outl(0, ioaddr + TSU_PRISL1);
- ctrl_outl(0, ioaddr + TSU_FWSL0);
- ctrl_outl(0, ioaddr + TSU_FWSL1);
- ctrl_outl(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
-#if defined(CONFIG_CPU_SUBTYPE_SH7763)
- ctrl_outl(0, ioaddr + TSU_QTAG0); /* Disable QTAG(0->1) */
- ctrl_outl(0, ioaddr + TSU_QTAG1); /* Disable QTAG(1->0) */
-#else
- ctrl_outl(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */
- ctrl_outl(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */
-#endif
- ctrl_outl(0, ioaddr + TSU_FWSR); /* all interrupt status clear */
- ctrl_outl(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */
- ctrl_outl(0, ioaddr + TSU_TEN); /* Disable all CAM entry */
- ctrl_outl(0, ioaddr + TSU_POST1); /* Disable CAM entry [ 0- 7] */
- ctrl_outl(0, ioaddr + TSU_POST2); /* Disable CAM entry [ 8-15] */
- ctrl_outl(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */
- ctrl_outl(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */
+static void sh_eth_tsu_init(struct sh_eth_private *mdp)
+{
+ sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
+ sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
+ sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
+ sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
+ sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
+ sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
+ sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
+ sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
+ sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
+ sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
+ if (sh_eth_is_gether(mdp)) {
+ sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
+ sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
+ } else {
+ sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
+ sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
+ }
+ sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
+ sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
+ sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
+ sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
+ sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
+ sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
+ sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
}
-#endif /* SH_ETH_HAS_TSU */
/* MDIO bus release function */
static int sh_mdio_release(struct net_device *ndev)
@@ -1355,7 +1661,8 @@ static int sh_mdio_release(struct net_device *ndev)
}
/* MDIO bus init function */
-static int sh_mdio_init(struct net_device *ndev, int id)
+static int sh_mdio_init(struct net_device *ndev, int id,
+ struct sh_eth_plat_data *pd)
{
int ret, i;
struct bb_info *bitbang;
@@ -1369,7 +1676,8 @@ static int sh_mdio_init(struct net_device *ndev, int id)
}
/* bitbang init */
- bitbang->addr = ndev->base_addr + PIR;
+ bitbang->addr = ndev->base_addr + mdp->reg_offset[PIR];
+ bitbang->set_gate = pd->set_mdio_gate;
bitbang->mdi_msk = 0x08;
bitbang->mdo_msk = 0x04;
bitbang->mmd_msk = 0x02;/* MMD */
@@ -1420,6 +1728,28 @@ out:
return ret;
}
+static const u16 *sh_eth_get_register_offset(int register_type)
+{
+ const u16 *reg_offset = NULL;
+
+ switch (register_type) {
+ case SH_ETH_REG_GIGABIT:
+ reg_offset = sh_eth_offset_gigabit;
+ break;
+ case SH_ETH_REG_FAST_SH4:
+ reg_offset = sh_eth_offset_fast_sh4;
+ break;
+ case SH_ETH_REG_FAST_SH3_SH2:
+ reg_offset = sh_eth_offset_fast_sh3_sh2;
+ break;
+ default:
+ printk(KERN_ERR "Unknown register type (%d)\n", register_type);
+ break;
+ }
+
+ return reg_offset;
+}
+
static const struct net_device_ops sh_eth_netdev_ops = {
.ndo_open = sh_eth_open,
.ndo_stop = sh_eth_close,
@@ -1486,19 +1816,28 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
/* get PHY ID */
mdp->phy_id = pd->phy;
+ mdp->phy_interface = pd->phy_interface;
/* EDMAC endian */
mdp->edmac_endian = pd->edmac_endian;
mdp->no_ether_link = pd->no_ether_link;
mdp->ether_link_active_low = pd->ether_link_active_low;
+ mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
/* set cpu data */
+#if defined(SH_ETH_HAS_BOTH_MODULES)
+ mdp->cd = sh_eth_get_cpu_data(mdp);
+#else
mdp->cd = &sh_eth_my_cpu_data;
+#endif
sh_eth_set_default_cpu_data(mdp->cd);
/* set function */
ndev->netdev_ops = &sh_eth_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
ndev->watchdog_timeo = TX_TIMEOUT;
+ /* debug message level */
+ mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
mdp->post_rx = POST_RX >> (devno << 1);
mdp->post_fw = POST_FW >> (devno << 1);
@@ -1507,13 +1846,23 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* First device only init */
if (!devno) {
+ if (mdp->cd->tsu) {
+ struct resource *rtsu;
+ rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!rtsu) {
+ dev_err(&pdev->dev, "Not found TSU resource\n");
+ goto out_release;
+ }
+ mdp->tsu_addr = ioremap(rtsu->start,
+ resource_size(rtsu));
+ }
if (mdp->cd->chip_reset)
mdp->cd->chip_reset(ndev);
-#if defined(SH_ETH_HAS_TSU)
- /* TSU init (Init only)*/
- sh_eth_tsu_init(SH_TSU_ADDR);
-#endif
+ if (mdp->cd->tsu) {
+ /* TSU init (Init only)*/
+ sh_eth_tsu_init(mdp);
+ }
}
/* network device register */
@@ -1522,7 +1871,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
goto out_release;
/* mdio bus init */
- ret = sh_mdio_init(ndev, pdev->id);
+ ret = sh_mdio_init(ndev, pdev->id, pd);
if (ret)
goto out_unregister;
@@ -1539,6 +1888,8 @@ out_unregister:
out_release:
/* net_dev free */
+ if (mdp->tsu_addr)
+ iounmap(mdp->tsu_addr);
if (ndev)
free_netdev(ndev);
@@ -1549,10 +1900,11 @@ out:
static int sh_eth_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ iounmap(mdp->tsu_addr);
sh_mdio_release(ndev);
unregister_netdev(ndev);
- flush_scheduled_work();
pm_runtime_disable(&pdev->dev);
free_netdev(ndev);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/sh_eth.h b/drivers/net/sh_eth.h
index 8b47763958f2..c3048a6ba676 100644
--- a/drivers/net/sh_eth.h
+++ b/drivers/net/sh_eth.h
@@ -2,7 +2,7 @@
* SuperH Ethernet device driver
*
* Copyright (C) 2006-2008 Nobuhiro Iwamatsu
- * Copyright (C) 2008-2009 Renesas Solutions Corp.
+ * Copyright (C) 2008-2011 Renesas Solutions Corp.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -26,7 +26,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
-#include <linux/workqueue.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
@@ -39,278 +38,340 @@
#define ETHERSMALL 60
#define PKT_BUF_SZ 1538
-#if defined(CONFIG_CPU_SUBTYPE_SH7763)
-/* This CPU register maps is very difference by other SH4 CPU */
-
-/* Chip Base Address */
-# define SH_TSU_ADDR 0xFEE01800
-# define ARSTR SH_TSU_ADDR
-
-/* Chip Registers */
-/* E-DMAC */
-# define EDSR 0x000
-# define EDMR 0x400
-# define EDTRR 0x408
-# define EDRRR 0x410
-# define EESR 0x428
-# define EESIPR 0x430
-# define TDLAR 0x010
-# define TDFAR 0x014
-# define TDFXR 0x018
-# define TDFFR 0x01C
-# define RDLAR 0x030
-# define RDFAR 0x034
-# define RDFXR 0x038
-# define RDFFR 0x03C
-# define TRSCER 0x438
-# define RMFCR 0x440
-# define TFTR 0x448
-# define FDR 0x450
-# define RMCR 0x458
-# define RPADIR 0x460
-# define FCFTR 0x468
-
-/* Ether Register */
-# define ECMR 0x500
-# define ECSR 0x510
-# define ECSIPR 0x518
-# define PIR 0x520
-# define PSR 0x528
-# define PIPR 0x52C
-# define RFLR 0x508
-# define APR 0x554
-# define MPR 0x558
-# define PFTCR 0x55C
-# define PFRCR 0x560
-# define TPAUSER 0x564
-# define GECMR 0x5B0
-# define BCULR 0x5B4
-# define MAHR 0x5C0
-# define MALR 0x5C8
-# define TROCR 0x700
-# define CDCR 0x708
-# define LCCR 0x710
-# define CEFCR 0x740
-# define FRECR 0x748
-# define TSFRCR 0x750
-# define TLFRCR 0x758
-# define RFCR 0x760
-# define CERCR 0x768
-# define CEECR 0x770
-# define MAFCR 0x778
-
-/* TSU Absolute Address */
-# define TSU_CTRST 0x004
-# define TSU_FWEN0 0x010
-# define TSU_FWEN1 0x014
-# define TSU_FCM 0x18
-# define TSU_BSYSL0 0x20
-# define TSU_BSYSL1 0x24
-# define TSU_PRISL0 0x28
-# define TSU_PRISL1 0x2C
-# define TSU_FWSL0 0x30
-# define TSU_FWSL1 0x34
-# define TSU_FWSLC 0x38
-# define TSU_QTAG0 0x40
-# define TSU_QTAG1 0x44
-# define TSU_FWSR 0x50
-# define TSU_FWINMK 0x54
-# define TSU_ADQT0 0x48
-# define TSU_ADQT1 0x4C
-# define TSU_VTAG0 0x58
-# define TSU_VTAG1 0x5C
-# define TSU_ADSBSY 0x60
-# define TSU_TEN 0x64
-# define TSU_POST1 0x70
-# define TSU_POST2 0x74
-# define TSU_POST3 0x78
-# define TSU_POST4 0x7C
-# define TSU_ADRH0 0x100
-# define TSU_ADRL0 0x104
-# define TSU_ADRH31 0x1F8
-# define TSU_ADRL31 0x1FC
-
-# define TXNLCR0 0x80
-# define TXALCR0 0x84
-# define RXNLCR0 0x88
-# define RXALCR0 0x8C
-# define FWNLCR0 0x90
-# define FWALCR0 0x94
-# define TXNLCR1 0xA0
-# define TXALCR1 0xA4
-# define RXNLCR1 0xA8
-# define RXALCR1 0xAC
-# define FWNLCR1 0xB0
-# define FWALCR1 0x40
-
-#elif defined(CONFIG_CPU_SH4) /* #if defined(CONFIG_CPU_SUBTYPE_SH7763) */
-/* EtherC */
-#define ECMR 0x100
-#define RFLR 0x108
-#define ECSR 0x110
-#define ECSIPR 0x118
-#define PIR 0x120
-#define PSR 0x128
-#define RDMLR 0x140
-#define IPGR 0x150
-#define APR 0x154
-#define MPR 0x158
-#define TPAUSER 0x164
-#define RFCF 0x160
-#define TPAUSECR 0x168
-#define BCFRR 0x16c
-#define MAHR 0x1c0
-#define MALR 0x1c8
-#define TROCR 0x1d0
-#define CDCR 0x1d4
-#define LCCR 0x1d8
-#define CNDCR 0x1dc
-#define CEFCR 0x1e4
-#define FRECR 0x1e8
-#define TSFRCR 0x1ec
-#define TLFRCR 0x1f0
-#define RFCR 0x1f4
-#define MAFCR 0x1f8
-#define RTRATE 0x1fc
-
-/* E-DMAC */
-#define EDMR 0x000
-#define EDTRR 0x008
-#define EDRRR 0x010
-#define TDLAR 0x018
-#define RDLAR 0x020
-#define EESR 0x028
-#define EESIPR 0x030
-#define TRSCER 0x038
-#define RMFCR 0x040
-#define TFTR 0x048
-#define FDR 0x050
-#define RMCR 0x058
-#define TFUCR 0x064
-#define RFOCR 0x068
-#define FCFTR 0x070
-#define RPADIR 0x078
-#define TRIMD 0x07c
-#define RBWAR 0x0c8
-#define RDFAR 0x0cc
-#define TBRAR 0x0d4
-#define TDFAR 0x0d8
-#else /* #elif defined(CONFIG_CPU_SH4) */
-/* This section is SH3 or SH2 */
-#ifndef CONFIG_CPU_SUBTYPE_SH7619
-/* Chip base address */
-# define SH_TSU_ADDR 0xA7000804
-# define ARSTR 0xA7000800
-#endif
-/* Chip Registers */
-/* E-DMAC */
-# define EDMR 0x0000
-# define EDTRR 0x0004
-# define EDRRR 0x0008
-# define TDLAR 0x000C
-# define RDLAR 0x0010
-# define EESR 0x0014
-# define EESIPR 0x0018
-# define TRSCER 0x001C
-# define RMFCR 0x0020
-# define TFTR 0x0024
-# define FDR 0x0028
-# define RMCR 0x002C
-# define EDOCR 0x0030
-# define FCFTR 0x0034
-# define RPADIR 0x0038
-# define TRIMD 0x003C
-# define RBWAR 0x0040
-# define RDFAR 0x0044
-# define TBRAR 0x004C
-# define TDFAR 0x0050
-
-/* Ether Register */
-# define ECMR 0x0160
-# define ECSR 0x0164
-# define ECSIPR 0x0168
-# define PIR 0x016C
-# define MAHR 0x0170
-# define MALR 0x0174
-# define RFLR 0x0178
-# define PSR 0x017C
-# define TROCR 0x0180
-# define CDCR 0x0184
-# define LCCR 0x0188
-# define CNDCR 0x018C
-# define CEFCR 0x0194
-# define FRECR 0x0198
-# define TSFRCR 0x019C
-# define TLFRCR 0x01A0
-# define RFCR 0x01A4
-# define MAFCR 0x01A8
-# define IPGR 0x01B4
-# if defined(CONFIG_CPU_SUBTYPE_SH7710)
-# define APR 0x01B8
-# define MPR 0x01BC
-# define TPAUSER 0x1C4
-# define BCFR 0x1CC
-# endif /* CONFIG_CPU_SH7710 */
-
-/* TSU */
-# define TSU_CTRST 0x004
-# define TSU_FWEN0 0x010
-# define TSU_FWEN1 0x014
-# define TSU_FCM 0x018
-# define TSU_BSYSL0 0x020
-# define TSU_BSYSL1 0x024
-# define TSU_PRISL0 0x028
-# define TSU_PRISL1 0x02C
-# define TSU_FWSL0 0x030
-# define TSU_FWSL1 0x034
-# define TSU_FWSLC 0x038
-# define TSU_QTAGM0 0x040
-# define TSU_QTAGM1 0x044
-# define TSU_ADQT0 0x048
-# define TSU_ADQT1 0x04C
-# define TSU_FWSR 0x050
-# define TSU_FWINMK 0x054
-# define TSU_ADSBSY 0x060
-# define TSU_TEN 0x064
-# define TSU_POST1 0x070
-# define TSU_POST2 0x074
-# define TSU_POST3 0x078
-# define TSU_POST4 0x07C
-# define TXNLCR0 0x080
-# define TXALCR0 0x084
-# define RXNLCR0 0x088
-# define RXALCR0 0x08C
-# define FWNLCR0 0x090
-# define FWALCR0 0x094
-# define TXNLCR1 0x0A0
-# define TXALCR1 0x0A4
-# define RXNLCR1 0x0A8
-# define RXALCR1 0x0AC
-# define FWNLCR1 0x0B0
-# define FWALCR1 0x0B4
-
-#define TSU_ADRH0 0x0100
-#define TSU_ADRL0 0x0104
-#define TSU_ADRL31 0x01FC
-
-#endif /* CONFIG_CPU_SUBTYPE_SH7763 */
-
-/* There are avoid compile error... */
-#if !defined(BCULR)
-#define BCULR 0x0fc
-#endif
-#if !defined(TRIMD)
-#define TRIMD 0x0fc
-#endif
-#if !defined(APR)
-#define APR 0x0fc
-#endif
-#if !defined(MPR)
-#define MPR 0x0fc
-#endif
-#if !defined(TPAUSER)
-#define TPAUSER 0x0fc
-#endif
+enum {
+ /* E-DMAC registers */
+ EDSR = 0,
+ EDMR,
+ EDTRR,
+ EDRRR,
+ EESR,
+ EESIPR,
+ TDLAR,
+ TDFAR,
+ TDFXR,
+ TDFFR,
+ RDLAR,
+ RDFAR,
+ RDFXR,
+ RDFFR,
+ TRSCER,
+ RMFCR,
+ TFTR,
+ FDR,
+ RMCR,
+ EDOCR,
+ TFUCR,
+ RFOCR,
+ FCFTR,
+ RPADIR,
+ TRIMD,
+ RBWAR,
+ TBRAR,
+
+ /* Ether registers */
+ ECMR,
+ ECSR,
+ ECSIPR,
+ PIR,
+ PSR,
+ RDMLR,
+ PIPR,
+ RFLR,
+ IPGR,
+ APR,
+ MPR,
+ PFTCR,
+ PFRCR,
+ RFCR,
+ RFCF,
+ TPAUSER,
+ TPAUSECR,
+ BCFR,
+ BCFRR,
+ GECMR,
+ BCULR,
+ MAHR,
+ MALR,
+ TROCR,
+ CDCR,
+ LCCR,
+ CNDCR,
+ CEFCR,
+ FRECR,
+ TSFRCR,
+ TLFRCR,
+ CERCR,
+ CEECR,
+ MAFCR,
+ RTRATE,
+
+ /* TSU Absolute address */
+ ARSTR,
+ TSU_CTRST,
+ TSU_FWEN0,
+ TSU_FWEN1,
+ TSU_FCM,
+ TSU_BSYSL0,
+ TSU_BSYSL1,
+ TSU_PRISL0,
+ TSU_PRISL1,
+ TSU_FWSL0,
+ TSU_FWSL1,
+ TSU_FWSLC,
+ TSU_QTAG0,
+ TSU_QTAG1,
+ TSU_QTAGM0,
+ TSU_QTAGM1,
+ TSU_FWSR,
+ TSU_FWINMK,
+ TSU_ADQT0,
+ TSU_ADQT1,
+ TSU_VTAG0,
+ TSU_VTAG1,
+ TSU_ADSBSY,
+ TSU_TEN,
+ TSU_POST1,
+ TSU_POST2,
+ TSU_POST3,
+ TSU_POST4,
+ TSU_ADRH0,
+ TSU_ADRL0,
+ TSU_ADRH31,
+ TSU_ADRL31,
+
+ TXNLCR0,
+ TXALCR0,
+ RXNLCR0,
+ RXALCR0,
+ FWNLCR0,
+ FWALCR0,
+ TXNLCR1,
+ TXALCR1,
+ RXNLCR1,
+ RXALCR1,
+ FWNLCR1,
+ FWALCR1,
+
+ /* This value must be written at last. */
+ SH_ETH_MAX_REGISTER_OFFSET,
+};
+
+static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [EDSR] = 0x0000,
+ [EDMR] = 0x0400,
+ [EDTRR] = 0x0408,
+ [EDRRR] = 0x0410,
+ [EESR] = 0x0428,
+ [EESIPR] = 0x0430,
+ [TDLAR] = 0x0010,
+ [TDFAR] = 0x0014,
+ [TDFXR] = 0x0018,
+ [TDFFR] = 0x001c,
+ [RDLAR] = 0x0030,
+ [RDFAR] = 0x0034,
+ [RDFXR] = 0x0038,
+ [RDFFR] = 0x003c,
+ [TRSCER] = 0x0438,
+ [RMFCR] = 0x0440,
+ [TFTR] = 0x0448,
+ [FDR] = 0x0450,
+ [RMCR] = 0x0458,
+ [RPADIR] = 0x0460,
+ [FCFTR] = 0x0468,
+
+ [ECMR] = 0x0500,
+ [ECSR] = 0x0510,
+ [ECSIPR] = 0x0518,
+ [PIR] = 0x0520,
+ [PSR] = 0x0528,
+ [PIPR] = 0x052c,
+ [RFLR] = 0x0508,
+ [APR] = 0x0554,
+ [MPR] = 0x0558,
+ [PFTCR] = 0x055c,
+ [PFRCR] = 0x0560,
+ [TPAUSER] = 0x0564,
+ [GECMR] = 0x05b0,
+ [BCULR] = 0x05b4,
+ [MAHR] = 0x05c0,
+ [MALR] = 0x05c8,
+ [TROCR] = 0x0700,
+ [CDCR] = 0x0708,
+ [LCCR] = 0x0710,
+ [CEFCR] = 0x0740,
+ [FRECR] = 0x0748,
+ [TSFRCR] = 0x0750,
+ [TLFRCR] = 0x0758,
+ [RFCR] = 0x0760,
+ [CERCR] = 0x0768,
+ [CEECR] = 0x0770,
+ [MAFCR] = 0x0778,
+
+ [ARSTR] = 0x0000,
+ [TSU_CTRST] = 0x0004,
+ [TSU_FWEN0] = 0x0010,
+ [TSU_FWEN1] = 0x0014,
+ [TSU_FCM] = 0x0018,
+ [TSU_BSYSL0] = 0x0020,
+ [TSU_BSYSL1] = 0x0024,
+ [TSU_PRISL0] = 0x0028,
+ [TSU_PRISL1] = 0x002c,
+ [TSU_FWSL0] = 0x0030,
+ [TSU_FWSL1] = 0x0034,
+ [TSU_FWSLC] = 0x0038,
+ [TSU_QTAG0] = 0x0040,
+ [TSU_QTAG1] = 0x0044,
+ [TSU_FWSR] = 0x0050,
+ [TSU_FWINMK] = 0x0054,
+ [TSU_ADQT0] = 0x0048,
+ [TSU_ADQT1] = 0x004c,
+ [TSU_VTAG0] = 0x0058,
+ [TSU_VTAG1] = 0x005c,
+ [TSU_ADSBSY] = 0x0060,
+ [TSU_TEN] = 0x0064,
+ [TSU_POST1] = 0x0070,
+ [TSU_POST2] = 0x0074,
+ [TSU_POST3] = 0x0078,
+ [TSU_POST4] = 0x007c,
+ [TSU_ADRH0] = 0x0100,
+ [TSU_ADRL0] = 0x0104,
+ [TSU_ADRH31] = 0x01f8,
+ [TSU_ADRL31] = 0x01fc,
+
+ [TXNLCR0] = 0x0080,
+ [TXALCR0] = 0x0084,
+ [RXNLCR0] = 0x0088,
+ [RXALCR0] = 0x008c,
+ [FWNLCR0] = 0x0090,
+ [FWALCR0] = 0x0094,
+ [TXNLCR1] = 0x00a0,
+ [TXALCR1] = 0x00a0,
+ [RXNLCR1] = 0x00a8,
+ [RXALCR1] = 0x00ac,
+ [FWNLCR1] = 0x00b0,
+ [FWALCR1] = 0x00b4,
+};
+
+static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [ECMR] = 0x0100,
+ [RFLR] = 0x0108,
+ [ECSR] = 0x0110,
+ [ECSIPR] = 0x0118,
+ [PIR] = 0x0120,
+ [PSR] = 0x0128,
+ [RDMLR] = 0x0140,
+ [IPGR] = 0x0150,
+ [APR] = 0x0154,
+ [MPR] = 0x0158,
+ [TPAUSER] = 0x0164,
+ [RFCF] = 0x0160,
+ [TPAUSECR] = 0x0168,
+ [BCFRR] = 0x016c,
+ [MAHR] = 0x01c0,
+ [MALR] = 0x01c8,
+ [TROCR] = 0x01d0,
+ [CDCR] = 0x01d4,
+ [LCCR] = 0x01d8,
+ [CNDCR] = 0x01dc,
+ [CEFCR] = 0x01e4,
+ [FRECR] = 0x01e8,
+ [TSFRCR] = 0x01ec,
+ [TLFRCR] = 0x01f0,
+ [RFCR] = 0x01f4,
+ [MAFCR] = 0x01f8,
+ [RTRATE] = 0x01fc,
+
+ [EDMR] = 0x0000,
+ [EDTRR] = 0x0008,
+ [EDRRR] = 0x0010,
+ [TDLAR] = 0x0018,
+ [RDLAR] = 0x0020,
+ [EESR] = 0x0028,
+ [EESIPR] = 0x0030,
+ [TRSCER] = 0x0038,
+ [RMFCR] = 0x0040,
+ [TFTR] = 0x0048,
+ [FDR] = 0x0050,
+ [RMCR] = 0x0058,
+ [TFUCR] = 0x0064,
+ [RFOCR] = 0x0068,
+ [FCFTR] = 0x0070,
+ [RPADIR] = 0x0078,
+ [TRIMD] = 0x007c,
+ [RBWAR] = 0x00c8,
+ [RDFAR] = 0x00cc,
+ [TBRAR] = 0x00d4,
+ [TDFAR] = 0x00d8,
+};
+
+static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [ECMR] = 0x0160,
+ [ECSR] = 0x0164,
+ [ECSIPR] = 0x0168,
+ [PIR] = 0x016c,
+ [MAHR] = 0x0170,
+ [MALR] = 0x0174,
+ [RFLR] = 0x0178,
+ [PSR] = 0x017c,
+ [TROCR] = 0x0180,
+ [CDCR] = 0x0184,
+ [LCCR] = 0x0188,
+ [CNDCR] = 0x018c,
+ [CEFCR] = 0x0194,
+ [FRECR] = 0x0198,
+ [TSFRCR] = 0x019c,
+ [TLFRCR] = 0x01a0,
+ [RFCR] = 0x01a4,
+ [MAFCR] = 0x01a8,
+ [IPGR] = 0x01b4,
+ [APR] = 0x01b8,
+ [MPR] = 0x01bc,
+ [TPAUSER] = 0x01c4,
+ [BCFR] = 0x01cc,
+
+ [ARSTR] = 0x0000,
+ [TSU_CTRST] = 0x0004,
+ [TSU_FWEN0] = 0x0010,
+ [TSU_FWEN1] = 0x0014,
+ [TSU_FCM] = 0x0018,
+ [TSU_BSYSL0] = 0x0020,
+ [TSU_BSYSL1] = 0x0024,
+ [TSU_PRISL0] = 0x0028,
+ [TSU_PRISL1] = 0x002c,
+ [TSU_FWSL0] = 0x0030,
+ [TSU_FWSL1] = 0x0034,
+ [TSU_FWSLC] = 0x0038,
+ [TSU_QTAGM0] = 0x0040,
+ [TSU_QTAGM1] = 0x0044,
+ [TSU_ADQT0] = 0x0048,
+ [TSU_ADQT1] = 0x004c,
+ [TSU_FWSR] = 0x0050,
+ [TSU_FWINMK] = 0x0054,
+ [TSU_ADSBSY] = 0x0060,
+ [TSU_TEN] = 0x0064,
+ [TSU_POST1] = 0x0070,
+ [TSU_POST2] = 0x0074,
+ [TSU_POST3] = 0x0078,
+ [TSU_POST4] = 0x007c,
+
+ [TXNLCR0] = 0x0080,
+ [TXALCR0] = 0x0084,
+ [RXNLCR0] = 0x0088,
+ [RXALCR0] = 0x008c,
+ [FWNLCR0] = 0x0090,
+ [FWALCR0] = 0x0094,
+ [TXNLCR1] = 0x00a0,
+ [TXALCR1] = 0x00a0,
+ [RXNLCR1] = 0x00a8,
+ [RXALCR1] = 0x00ac,
+ [FWNLCR1] = 0x00b0,
+ [FWALCR1] = 0x00b4,
+
+ [TSU_ADRH0] = 0x0100,
+ [TSU_ADRL0] = 0x0104,
+ [TSU_ADRL31] = 0x01fc,
+
+};
/* Driver's parameters */
#if defined(CONFIG_CPU_SH4)
@@ -339,20 +400,14 @@ enum GECMR_BIT {
enum DMAC_M_BIT {
EDMR_EL = 0x40, /* Litte endian */
EDMR_DL1 = 0x20, EDMR_DL0 = 0x10,
-#ifdef CONFIG_CPU_SUBTYPE_SH7763
- EDMR_SRST = 0x03,
-#else /* CONFIG_CPU_SUBTYPE_SH7763 */
- EDMR_SRST = 0x01,
-#endif
+ EDMR_SRST_GETHER = 0x03,
+ EDMR_SRST_ETHER = 0x01,
};
/* EDTRR */
enum DMAC_T_BIT {
-#ifdef CONFIG_CPU_SUBTYPE_SH7763
- EDTRR_TRNS = 0x03,
-#else
- EDTRR_TRNS = 0x01,
-#endif
+ EDTRR_TRNS_GETHER = 0x03,
+ EDTRR_TRNS_ETHER = 0x01,
};
/* EDRRR*/
@@ -696,6 +751,7 @@ struct sh_eth_cpu_data {
unsigned mpr:1; /* EtherC have MPR */
unsigned tpauser:1; /* EtherC have TPAUSER */
unsigned bculr:1; /* EtherC have BCULR */
+ unsigned tsu:1; /* EtherC have TSU */
unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */
unsigned rpadir:1; /* E-DMAC have RPADIR */
unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
@@ -705,6 +761,8 @@ struct sh_eth_cpu_data {
struct sh_eth_private {
struct platform_device *pdev;
struct sh_eth_cpu_data *cd;
+ const u16 *reg_offset;
+ void __iomem *tsu_addr;
dma_addr_t rx_desc_dma;
dma_addr_t tx_desc_dma;
struct sh_eth_rxdesc *rx_ring;
@@ -723,6 +781,7 @@ struct sh_eth_private {
struct mii_bus *mii_bus; /* MDIO bus control */
struct phy_device *phydev; /* PHY device control */
enum phy_state link;
+ phy_interface_t phy_interface;
int msg_enable;
int speed;
int duplex;
@@ -747,4 +806,32 @@ static inline void sh_eth_soft_swap(char *src, int len)
#endif
}
+static inline void sh_eth_write(struct net_device *ndev, unsigned long data,
+ int enum_index)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ writel(data, ndev->base_addr + mdp->reg_offset[enum_index]);
+}
+
+static inline unsigned long sh_eth_read(struct net_device *ndev,
+ int enum_index)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ return readl(ndev->base_addr + mdp->reg_offset[enum_index]);
+}
+
+static inline void sh_eth_tsu_write(struct sh_eth_private *mdp,
+ unsigned long data, int enum_index)
+{
+ writel(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
+}
+
+static inline unsigned long sh_eth_tsu_read(struct sh_eth_private *mdp,
+ int enum_index)
+{
+ return readl(mdp->tsu_addr + mdp->reg_offset[enum_index]);
+}
+
#endif /* #ifndef __SH_ETH_H__ */
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index a5d6a6bd0c1a..3406ed870917 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -1915,9 +1915,10 @@ err_release_board:
static void __devexit sis190_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
+ struct sis190_private *tp = netdev_priv(dev);
sis190_mii_remove(dev);
- flush_scheduled_work();
+ cancel_work_sync(&tp->phy_task);
unregister_netdev(dev);
sis190_release_board(pdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 581836867098..84d4167eee9a 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -36,7 +36,7 @@
Rev 1.07.06 Nov. 7 2000 Jeff Garzik <jgarzik@pobox.com> some bug fix and cleaning
Rev 1.07.05 Nov. 6 2000 metapirat<metapirat@gmx.de> contribute media type select by ifconfig
Rev 1.07.04 Sep. 6 2000 Lei-Chun Chang added ICS1893 PHY support
- Rev 1.07.03 Aug. 24 2000 Lei-Chun Chang (lcchang@sis.com.tw) modified 630E eqaulizer workaround rule
+ Rev 1.07.03 Aug. 24 2000 Lei-Chun Chang (lcchang@sis.com.tw) modified 630E equalizer workaround rule
Rev 1.07.01 Aug. 08 2000 Ollie Lho minor update for SiS 630E and SiS 630E A1
Rev 1.07 Mar. 07 2000 Ollie Lho bug fix in Rx buffer ring
Rev 1.06.04 Feb. 11 2000 Jeff Garzik <jgarzik@pobox.com> softnet and init for kernel 2.4
@@ -495,7 +495,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
sis_priv->mii_info.reg_num_mask = 0x1f;
/* Get Mac address according to the chip revision */
- pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &(sis_priv->chipset_rev));
+ sis_priv->chipset_rev = pci_dev->revision;
if(netif_msg_probe(sis_priv))
printk(KERN_DEBUG "%s: detected revision %2.2x, "
"trying to get MAC address...\n",
@@ -532,7 +532,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
/* save our host bridge revision */
dev = pci_get_device(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_630, NULL);
if (dev) {
- pci_read_config_byte(dev, PCI_CLASS_REVISION, &sis_priv->host_bridge_rev);
+ sis_priv->host_bridge_rev = dev->revision;
pci_dev_put(dev);
}
@@ -1777,6 +1777,7 @@ static int sis900_rx(struct net_device *net_dev)
"cur_rx:%4.4d, dirty_rx:%4.4d\n",
net_dev->name, sis_priv->cur_rx,
sis_priv->dirty_rx);
+ dev_kfree_skb(skb);
break;
}
diff --git a/drivers/net/skfp/Makefile b/drivers/net/skfp/Makefile
index cb23580fcffa..b0be0234abf6 100644
--- a/drivers/net/skfp/Makefile
+++ b/drivers/net/skfp/Makefile
@@ -17,4 +17,4 @@ skfp-objs := skfddi.o hwmtm.o fplustm.o smt.o cfm.o \
# projects. To keep the source common for all those drivers (and
# thus simplify fixes to it), please do not clean it up!
-EXTRA_CFLAGS += -Idrivers/net/skfp -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
+ccflags-y := -Idrivers/net/skfp -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index 0a66fed52e8e..16c62659cdd9 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -412,7 +412,7 @@ static int skfp_driver_init(struct net_device *dev)
bp->SharedMemAddr = pci_alloc_consistent(&bp->pdev,
bp->SharedMemSize,
&bp->SharedMemDMA);
- if (!bp->SharedMemSize) {
+ if (!bp->SharedMemAddr) {
printk("could not allocate mem for ");
printk("hardware module: %ld byte\n",
bp->SharedMemSize);
diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c
index 2d9941c045bc..1e1bd0c201c8 100644
--- a/drivers/net/skfp/smt.c
+++ b/drivers/net/skfp/smt.c
@@ -1263,7 +1263,7 @@ void smt_set_timestamp(struct s_smc *smc, u_char *p)
static void smt_fill_policy(struct s_smc *smc, struct smt_p_policy *policy)
{
int i ;
- u_char *map ;
+ const u_char *map ;
u_short in ;
u_short out ;
@@ -1271,7 +1271,7 @@ static void smt_fill_policy(struct s_smc *smc, struct smt_p_policy *policy)
* MIB para 101b (fddiSMTConnectionPolicy) coding
* is different from 0005 coding
*/
- static u_char ansi_weirdness[16] = {
+ static const u_char ansi_weirdness[16] = {
0,7,5,3,8,1,6,4,9,10,2,11,12,13,14,15
} ;
SMTSETPARA(policy,SMT_P_POLICY) ;
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 220e0398f1d5..35b28f42d208 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -1191,7 +1191,7 @@ static void genesis_init(struct skge_hw *hw)
static void genesis_reset(struct skge_hw *hw, int port)
{
- const u8 zero[8] = { 0 };
+ static const u8 zero[8] = { 0 };
u32 reg;
skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
@@ -1557,7 +1557,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN;
int i;
u32 r;
- const u8 zero[6] = { 0 };
+ static const u8 zero[6] = { 0 };
for (i = 0; i < 10; i++) {
skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
@@ -2764,7 +2764,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
td->dma_hi = map >> 32;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- const int offset = skb_transport_offset(skb);
+ const int offset = skb_checksum_start_offset(skb);
/* This seems backwards, but it is what the sk98lin
* does. Looks like hardware is wrong?
@@ -3856,9 +3856,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
- /* device is off until link detection */
- netif_carrier_off(dev);
-
return dev;
}
@@ -4012,8 +4009,6 @@ static void __devexit skge_remove(struct pci_dev *pdev)
if (!hw)
return;
- flush_scheduled_work();
-
dev1 = hw->dev[1];
if (dev1)
unregister_netdev(dev1);
@@ -4044,53 +4039,40 @@ static void __devexit skge_remove(struct pci_dev *pdev)
}
#ifdef CONFIG_PM
-static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
+static int skge_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct skge_hw *hw = pci_get_drvdata(pdev);
- int i, err, wol = 0;
+ int i;
if (!hw)
return 0;
- err = pci_save_state(pdev);
- if (err)
- return err;
-
for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
struct skge_port *skge = netdev_priv(dev);
if (netif_running(dev))
skge_down(dev);
+
if (skge->wol)
skge_wol_init(skge);
-
- wol |= skge->wol;
}
skge_write32(hw, B0_IMSK, 0);
- pci_prepare_to_sleep(pdev);
-
return 0;
}
-static int skge_resume(struct pci_dev *pdev)
+static int skge_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct skge_hw *hw = pci_get_drvdata(pdev);
int i, err;
if (!hw)
return 0;
- err = pci_back_from_sleep(pdev);
- if (err)
- goto out;
-
- err = pci_restore_state(pdev);
- if (err)
- goto out;
-
err = skge_reset(hw);
if (err)
goto out;
@@ -4111,12 +4093,19 @@ static int skge_resume(struct pci_dev *pdev)
out:
return err;
}
+
+static SIMPLE_DEV_PM_OPS(skge_pm_ops, skge_suspend, skge_resume);
+#define SKGE_PM_OPS (&skge_pm_ops)
+
+#else
+
+#define SKGE_PM_OPS NULL
#endif
static void skge_shutdown(struct pci_dev *pdev)
{
struct skge_hw *hw = pci_get_drvdata(pdev);
- int i, wol = 0;
+ int i;
if (!hw)
return;
@@ -4127,15 +4116,10 @@ static void skge_shutdown(struct pci_dev *pdev)
if (skge->wol)
skge_wol_init(skge);
- wol |= skge->wol;
}
- if (pci_enable_wake(pdev, PCI_D3cold, wol))
- pci_enable_wake(pdev, PCI_D3hot, wol);
-
- pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
pci_set_power_state(pdev, PCI_D3hot);
-
}
static struct pci_driver skge_driver = {
@@ -4143,11 +4127,8 @@ static struct pci_driver skge_driver = {
.id_table = skge_id_table,
.probe = skge_probe,
.remove = __devexit_p(skge_remove),
-#ifdef CONFIG_PM
- .suspend = skge_suspend,
- .resume = skge_resume,
-#endif
.shutdown = skge_shutdown,
+ .driver.pm = SKGE_PM_OPS,
};
static struct dmi_system_id skge_32bit_dma_boards[] = {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index d6577084ce70..2a91868788f7 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -46,10 +46,6 @@
#include <asm/irq.h>
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-#define SKY2_VLAN_TAG_USED 1
-#endif
-
#include "sky2.h"
#define DRV_NAME "sky2"
@@ -1326,39 +1322,34 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err;
}
-#ifdef SKY2_VLAN_TAG_USED
-static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff)
-{
- if (onoff) {
- sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
- RX_VLAN_STRIP_ON);
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
- TX_VLAN_TAG_ON);
- } else {
- sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
- RX_VLAN_STRIP_OFF);
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
- TX_VLAN_TAG_OFF);
- }
-}
+#define NETIF_F_ALL_VLAN (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)
-static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+static void sky2_vlan_mode(struct net_device *dev)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
u16 port = sky2->port;
- netif_tx_lock_bh(dev);
- napi_disable(&hw->napi);
+ if (dev->features & NETIF_F_HW_VLAN_RX)
+ sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
+ RX_VLAN_STRIP_ON);
+ else
+ sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
+ RX_VLAN_STRIP_OFF);
- sky2->vlgrp = grp;
- sky2_set_vlan_mode(hw, port, grp != NULL);
+ dev->vlan_features = dev->features &~ NETIF_F_ALL_VLAN;
+ if (dev->features & NETIF_F_HW_VLAN_TX)
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+ TX_VLAN_TAG_ON);
+ else {
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+ TX_VLAN_TAG_OFF);
- sky2_read32(hw, B0_Y2_SP_LISR);
- napi_enable(&hw->napi);
- netif_tx_unlock_bh(dev);
+ /* Can't do transmit offload of vlan without hw vlan */
+ dev->vlan_features &= ~(NETIF_F_TSO | NETIF_F_SG
+ | NETIF_F_ALL_CSUM);
+ }
}
-#endif
/* Amount of required worst case padding in rx buffer */
static inline unsigned sky2_rx_pad(const struct sky2_hw *hw)
@@ -1635,9 +1626,7 @@ static void sky2_hw_up(struct sky2_port *sky2)
sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
sky2->tx_ring_size - 1);
-#ifdef SKY2_VLAN_TAG_USED
- sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
-#endif
+ sky2_vlan_mode(sky2->netdev);
sky2_rx_start(sky2);
}
@@ -1780,7 +1769,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
}
ctrl = 0;
-#ifdef SKY2_VLAN_TAG_USED
+
/* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
if (vlan_tx_tag_present(skb)) {
if (!le) {
@@ -1792,7 +1781,6 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
le->length = cpu_to_be16(vlan_tx_tag_get(skb));
ctrl |= INS_VLAN;
}
-#endif
/* Handle TCP checksum offload */
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -1917,8 +1905,10 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
netif_printk(sky2, tx_done, KERN_DEBUG, dev,
"tx done %u\n", idx);
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
+ u64_stats_update_begin(&sky2->tx_stats.syncp);
+ ++sky2->tx_stats.packets;
+ sky2->tx_stats.bytes += skb->len;
+ u64_stats_update_end(&sky2->tx_stats.syncp);
re->skb = NULL;
dev_kfree_skb_any(skb);
@@ -2430,11 +2420,8 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
struct sk_buff *skb = NULL;
u16 count = (status & GMR_FS_LEN) >> 16;
-#ifdef SKY2_VLAN_TAG_USED
- /* Account for vlan tag */
- if (sky2->vlgrp && (status & GMR_FS_VLAN))
- count -= VLAN_HLEN;
-#endif
+ if (status & GMR_FS_VLAN)
+ count -= VLAN_HLEN; /* Account for vlan tag */
netif_printk(sky2, rx_status, KERN_DEBUG, dev,
"rx slot %u status 0x%x len %d\n",
@@ -2460,7 +2447,7 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
/* if length reported by DMA does not match PHY, packet was truncated */
if (length != count)
- goto len_error;
+ goto error;
okay:
if (length < copybreak)
@@ -2475,34 +2462,13 @@ resubmit:
return skb;
-len_error:
- /* Truncation of overlength packets
- causes PHY length to not match MAC length */
- ++dev->stats.rx_length_errors;
- if (net_ratelimit())
- netif_info(sky2, rx_err, dev,
- "rx length error: status %#x length %d\n",
- status, length);
- goto resubmit;
-
error:
++dev->stats.rx_errors;
- if (status & GMR_FS_RX_FF_OV) {
- dev->stats.rx_over_errors++;
- goto resubmit;
- }
if (net_ratelimit())
netif_info(sky2, rx_err, dev,
"rx error, status 0x%x length %d\n", status, length);
- if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE))
- dev->stats.rx_length_errors++;
- if (status & GMR_FS_FRAGMENT)
- dev->stats.rx_frame_errors++;
- if (status & GMR_FS_CRC_ERR)
- dev->stats.rx_crc_errors++;
-
goto resubmit;
}
@@ -2523,17 +2489,9 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
static inline void sky2_skb_rx(const struct sky2_port *sky2,
u32 status, struct sk_buff *skb)
{
-#ifdef SKY2_VLAN_TAG_USED
- u16 vlan_tag = be16_to_cpu(sky2->rx_tag);
- if (sky2->vlgrp && (status & GMR_FS_VLAN)) {
- if (skb->ip_summed == CHECKSUM_NONE)
- vlan_hwaccel_receive_skb(skb, sky2->vlgrp, vlan_tag);
- else
- vlan_gro_receive(&sky2->hw->napi, sky2->vlgrp,
- vlan_tag, skb);
- return;
- }
-#endif
+ if (status & GMR_FS_VLAN)
+ __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag));
+
if (skb->ip_summed == CHECKSUM_NONE)
netif_receive_skb(skb);
else
@@ -2543,14 +2501,19 @@ static inline void sky2_skb_rx(const struct sky2_port *sky2,
static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port,
unsigned packets, unsigned bytes)
{
- if (packets) {
- struct net_device *dev = hw->dev[port];
+ struct net_device *dev = hw->dev[port];
+ struct sky2_port *sky2 = netdev_priv(dev);
- dev->stats.rx_packets += packets;
- dev->stats.rx_bytes += bytes;
- dev->last_rx = jiffies;
- sky2_rx_update(netdev_priv(dev), rxqaddr[port]);
- }
+ if (packets == 0)
+ return;
+
+ u64_stats_update_begin(&sky2->rx_stats.syncp);
+ sky2->rx_stats.packets += packets;
+ sky2->rx_stats.bytes += bytes;
+ u64_stats_update_end(&sky2->rx_stats.syncp);
+
+ dev->last_rx = jiffies;
+ sky2_rx_update(netdev_priv(dev), rxqaddr[port]);
}
static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
@@ -2645,7 +2608,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
goto exit_loop;
break;
-#ifdef SKY2_VLAN_TAG_USED
case OP_RXVLAN:
sky2->rx_tag = length;
break;
@@ -2653,7 +2615,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
case OP_RXCHKSVLAN:
sky2->rx_tag = length;
/* fall through */
-#endif
case OP_RXCHKS:
if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM))
sky2_rx_checksum(sky2, status);
@@ -3056,6 +3017,10 @@ static int __devinit sky2_init(struct sky2_hw *hw)
| SKY2_HW_NEW_LE
| SKY2_HW_AUTO_TX_SUM
| SKY2_HW_ADV_POWER_CTL;
+
+ /* The workaround for status conflicts VLAN tag detection. */
+ if (hw->chip_rev == CHIP_REV_YU_FE2_A0)
+ hw->flags |= SKY2_HW_VLAN_BROKEN;
break;
case CHIP_ID_YUKON_SUPR:
@@ -3398,12 +3363,24 @@ static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
+ bool enable_wakeup = false;
+ int i;
if ((wol->wolopts & ~sky2_wol_supported(sky2->hw)) ||
!device_can_wakeup(&hw->pdev->dev))
return -EOPNOTSUPP;
sky2->wol = wol->wolopts;
+
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+ struct sky2_port *sky2 = netdev_priv(dev);
+
+ if (sky2->wol)
+ enable_wakeup = true;
+ }
+ device_set_wakeup_enable(&hw->pdev->dev, enable_wakeup);
+
return 0;
}
@@ -3413,18 +3390,15 @@ static u32 sky2_supported_modes(const struct sky2_hw *hw)
u32 modes = SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
| SUPPORTED_100baseT_Half
- | SUPPORTED_100baseT_Full
- | SUPPORTED_Autoneg | SUPPORTED_TP;
+ | SUPPORTED_100baseT_Full;
if (hw->flags & SKY2_HW_GIGABIT)
modes |= SUPPORTED_1000baseT_Half
| SUPPORTED_1000baseT_Full;
return modes;
} else
- return SUPPORTED_1000baseT_Half
- | SUPPORTED_1000baseT_Full
- | SUPPORTED_Autoneg
- | SUPPORTED_FIBRE;
+ return SUPPORTED_1000baseT_Half
+ | SUPPORTED_1000baseT_Full;
}
static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
@@ -3438,9 +3412,11 @@ static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if (sky2_is_copper(hw)) {
ecmd->port = PORT_TP;
ecmd->speed = sky2->speed;
+ ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP;
} else {
ecmd->speed = SPEED_1000;
ecmd->port = PORT_FIBRE;
+ ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE;
}
ecmd->advertising = sky2->advertising;
@@ -3457,8 +3433,19 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
u32 supported = sky2_supported_modes(hw);
if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if (ecmd->advertising & ~supported)
+ return -EINVAL;
+
+ if (sky2_is_copper(hw))
+ sky2->advertising = ecmd->advertising |
+ ADVERTISED_TP |
+ ADVERTISED_Autoneg;
+ else
+ sky2->advertising = ecmd->advertising |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg;
+
sky2->flags |= SKY2_FLAG_AUTO_SPEED;
- ecmd->advertising = supported;
sky2->duplex = -1;
sky2->speed = -1;
} else {
@@ -3502,8 +3489,6 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
}
- sky2->advertising = ecmd->advertising;
-
if (netif_running(dev)) {
sky2_phy_reinit(sky2);
sky2_set_multicast(dev);
@@ -3614,13 +3599,11 @@ static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count)
unsigned port = sky2->port;
int i;
- data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
- | (u64) gma_read32(hw, port, GM_TXO_OK_LO);
- data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
- | (u64) gma_read32(hw, port, GM_RXO_OK_LO);
+ data[0] = get_stats64(hw, port, GM_TXO_OK_LO);
+ data[1] = get_stats64(hw, port, GM_RXO_OK_LO);
for (i = 2; i < count; i++)
- data[i] = (u64) gma_read32(hw, port, sky2_stats[i].offset);
+ data[i] = get_stats32(hw, port, sky2_stats[i].offset);
}
static void sky2_set_msglevel(struct net_device *netdev, u32 value)
@@ -3738,6 +3721,51 @@ static void sky2_set_multicast(struct net_device *dev)
gma_write16(hw, port, GM_RX_CTRL, reg);
}
+static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ unsigned int start;
+ u64 _bytes, _packets;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&sky2->rx_stats.syncp);
+ _bytes = sky2->rx_stats.bytes;
+ _packets = sky2->rx_stats.packets;
+ } while (u64_stats_fetch_retry_bh(&sky2->rx_stats.syncp, start));
+
+ stats->rx_packets = _packets;
+ stats->rx_bytes = _bytes;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&sky2->tx_stats.syncp);
+ _bytes = sky2->tx_stats.bytes;
+ _packets = sky2->tx_stats.packets;
+ } while (u64_stats_fetch_retry_bh(&sky2->tx_stats.syncp, start));
+
+ stats->tx_packets = _packets;
+ stats->tx_bytes = _bytes;
+
+ stats->multicast = get_stats32(hw, port, GM_RXF_MC_OK)
+ + get_stats32(hw, port, GM_RXF_BC_OK);
+
+ stats->collisions = get_stats32(hw, port, GM_TXF_COL);
+
+ stats->rx_length_errors = get_stats32(hw, port, GM_RXF_LNG_ERR);
+ stats->rx_crc_errors = get_stats32(hw, port, GM_RXF_FCS_ERR);
+ stats->rx_frame_errors = get_stats32(hw, port, GM_RXF_SHT)
+ + get_stats32(hw, port, GM_RXE_FRAG);
+ stats->rx_over_errors = get_stats32(hw, port, GM_RXE_FIFO_OV);
+
+ stats->rx_dropped = dev->stats.rx_dropped;
+ stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
+ stats->tx_fifo_errors = dev->stats.tx_fifo_errors;
+
+ return stats;
+}
+
/* Can have one global because blinking is controlled by
* ethtool and that is always under RTNL mutex
*/
@@ -4188,15 +4216,28 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
static int sky2_set_flags(struct net_device *dev, u32 data)
{
struct sky2_port *sky2 = netdev_priv(dev);
- u32 supported =
- (sky2->hw->flags & SKY2_HW_RSS_BROKEN) ? 0 : ETH_FLAG_RXHASH;
+ unsigned long old_feat = dev->features;
+ u32 supported = 0;
int rc;
+ if (!(sky2->hw->flags & SKY2_HW_RSS_BROKEN))
+ supported |= ETH_FLAG_RXHASH;
+
+ if (!(sky2->hw->flags & SKY2_HW_VLAN_BROKEN))
+ supported |= ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN;
+
+ printk(KERN_DEBUG "sky2 set_flags: supported %x data %x\n",
+ supported, data);
+
rc = ethtool_op_set_flags(dev, data, supported);
if (rc)
return rc;
- rx_set_rss(dev);
+ if ((old_feat ^ dev->features) & NETIF_F_RXHASH)
+ rx_set_rss(dev);
+
+ if ((old_feat ^ dev->features) & NETIF_F_ALL_VLAN)
+ sky2_vlan_mode(dev);
return 0;
}
@@ -4232,6 +4273,7 @@ static const struct ethtool_ops sky2_ethtool_ops = {
.get_sset_count = sky2_get_sset_count,
.get_ethtool_stats = sky2_get_ethtool_stats,
.set_flags = sky2_set_flags,
+ .get_flags = ethtool_op_get_flags,
};
#ifdef CONFIG_SKY2_DEBUG
@@ -4512,9 +4554,7 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
.ndo_set_multicast_list = sky2_set_multicast,
.ndo_change_mtu = sky2_change_mtu,
.ndo_tx_timeout = sky2_tx_timeout,
-#ifdef SKY2_VLAN_TAG_USED
- .ndo_vlan_rx_register = sky2_vlan_rx_register,
-#endif
+ .ndo_get_stats64 = sky2_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = sky2_netpoll,
#endif
@@ -4529,9 +4569,7 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
.ndo_set_multicast_list = sky2_set_multicast,
.ndo_change_mtu = sky2_change_mtu,
.ndo_tx_timeout = sky2_tx_timeout,
-#ifdef SKY2_VLAN_TAG_USED
- .ndo_vlan_rx_register = sky2_vlan_rx_register,
-#endif
+ .ndo_get_stats64 = sky2_get_stats,
},
};
@@ -4582,7 +4620,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
sky2->port = port;
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG
- | NETIF_F_TSO | NETIF_F_GRO;
+ | NETIF_F_TSO | NETIF_F_GRO;
+
if (highmem)
dev->features |= NETIF_F_HIGHDMA;
@@ -4590,13 +4629,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
if (!(hw->flags & SKY2_HW_RSS_BROKEN))
dev->features |= NETIF_F_RXHASH;
-#ifdef SKY2_VLAN_TAG_USED
- /* The workaround for FE+ status conflicts with VLAN tag detection. */
- if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
- sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) {
+ if (!(hw->flags & SKY2_HW_VLAN_BROKEN))
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- }
-#endif
/* read the mac address */
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
@@ -4920,10 +4954,11 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
-static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
+static int sky2_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct sky2_hw *hw = pci_get_drvdata(pdev);
- int i, wol = 0;
+ int i;
if (!hw)
return 0;
@@ -4940,41 +4975,24 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
if (sky2->wol)
sky2_wol_init(sky2);
-
- wol |= sky2->wol;
}
- device_set_wakeup_enable(&pdev->dev, wol != 0);
-
sky2_power_aux(hw);
rtnl_unlock();
- pci_save_state(pdev);
- pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
return 0;
}
-#ifdef CONFIG_PM
-static int sky2_resume(struct pci_dev *pdev)
+#ifdef CONFIG_PM_SLEEP
+static int sky2_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct sky2_hw *hw = pci_get_drvdata(pdev);
int err;
if (!hw)
return 0;
- err = pci_set_power_state(pdev, PCI_D0);
- if (err)
- goto out;
-
- err = pci_restore_state(pdev);
- if (err)
- goto out;
-
- pci_enable_wake(pdev, PCI_D0, 0);
-
/* Re-enable all clocks */
err = pci_write_config_dword(pdev, PCI_DEV_REG3, 0);
if (err) {
@@ -4994,11 +5012,20 @@ out:
pci_disable_device(pdev);
return err;
}
+
+static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume);
+#define SKY2_PM_OPS (&sky2_pm_ops)
+
+#else
+
+#define SKY2_PM_OPS NULL
#endif
static void sky2_shutdown(struct pci_dev *pdev)
{
- sky2_suspend(pdev, PMSG_SUSPEND);
+ sky2_suspend(&pdev->dev);
+ pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
+ pci_set_power_state(pdev, PCI_D3hot);
}
static struct pci_driver sky2_driver = {
@@ -5006,11 +5033,8 @@ static struct pci_driver sky2_driver = {
.id_table = sky2_id_table,
.probe = sky2_probe,
.remove = __devexit_p(sky2_remove),
-#ifdef CONFIG_PM
- .suspend = sky2_suspend,
- .resume = sky2_resume,
-#endif
.shutdown = sky2_shutdown,
+ .driver.pm = SKY2_PM_OPS,
};
static int __init sky2_init_module(void)
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 61891a6cacc2..6861b0e8db9a 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2200,6 +2200,12 @@ enum flow_control {
FC_BOTH = 3,
};
+struct sky2_stats {
+ struct u64_stats_sync syncp;
+ u64 packets;
+ u64 bytes;
+};
+
struct sky2_port {
struct sky2_hw *hw;
struct net_device *netdev;
@@ -2209,6 +2215,8 @@ struct sky2_port {
struct tx_ring_info *tx_ring;
struct sky2_tx_le *tx_le;
+ struct sky2_stats tx_stats;
+
u16 tx_ring_size;
u16 tx_cons; /* next le to check */
u16 tx_prod; /* next le to use */
@@ -2221,17 +2229,15 @@ struct sky2_port {
struct rx_ring_info *rx_ring ____cacheline_aligned_in_smp;
struct sky2_rx_le *rx_le;
+ struct sky2_stats rx_stats;
u16 rx_next; /* next re to check */
u16 rx_put; /* next le index to use */
u16 rx_pending;
u16 rx_data_size;
u16 rx_nfrags;
-
-#ifdef SKY2_VLAN_TAG_USED
u16 rx_tag;
- struct vlan_group *vlgrp;
-#endif
+
struct {
unsigned long last;
u32 mac_rp;
@@ -2275,6 +2281,7 @@ struct sky2_hw {
#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
#define SKY2_HW_RSS_BROKEN 0x00000100
+#define SKY2_HW_VLAN_BROKEN 0x00000200
u8 chip_id;
u8 chip_rev;
@@ -2346,6 +2353,39 @@ static inline u32 gma_read32(struct sky2_hw *hw, unsigned port, unsigned reg)
| (u32) sky2_read16(hw, base+4) << 16;
}
+static inline u64 gma_read64(struct sky2_hw *hw, unsigned port, unsigned reg)
+{
+ unsigned base = SK_GMAC_REG(port, reg);
+
+ return (u64) sky2_read16(hw, base)
+ | (u64) sky2_read16(hw, base+4) << 16
+ | (u64) sky2_read16(hw, base+8) << 32
+ | (u64) sky2_read16(hw, base+12) << 48;
+}
+
+/* There is no way to atomically read32 bit values from PHY, so retry */
+static inline u32 get_stats32(struct sky2_hw *hw, unsigned port, unsigned reg)
+{
+ u32 val;
+
+ do {
+ val = gma_read32(hw, port, reg);
+ } while (gma_read32(hw, port, reg) != val);
+
+ return val;
+}
+
+static inline u64 get_stats64(struct sky2_hw *hw, unsigned port, unsigned reg)
+{
+ u64 val;
+
+ do {
+ val = gma_read64(hw, port, reg);
+ } while (gma_read64(hw, port, reg) != val);
+
+ return val;
+}
+
static inline void gma_write16(const struct sky2_hw *hw, unsigned port, int r, u16 v)
{
sky2_write16(hw, SK_GMAC_REG(port,r), v);
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c
index d2dd8e6113ab..235a3c6c9f91 100644
--- a/drivers/net/smc-ultra.c
+++ b/drivers/net/smc-ultra.c
@@ -277,8 +277,12 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
dev->base_addr = ioaddr+ULTRA_NIC_OFFSET;
{
- int addr_tbl[4] = {0x0C0000, 0x0E0000, 0xFC0000, 0xFE0000};
- short num_pages_tbl[4] = {0x20, 0x40, 0x80, 0xff};
+ static const int addr_tbl[4] = {
+ 0x0C0000, 0x0E0000, 0xFC0000, 0xFE0000
+ };
+ static const short num_pages_tbl[4] = {
+ 0x20, 0x40, 0x80, 0xff
+ };
dev->mem_start = ((addr & 0x0f) << 13) + addr_tbl[(addr >> 6) & 3] ;
num_pages = num_pages_tbl[(addr >> 4) & 3];
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 726df611ee17..43654a3bb0ec 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -81,6 +81,7 @@ static const char version[] =
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/workqueue.h>
+#include <linux/of.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -2394,6 +2395,15 @@ static int smc_drv_resume(struct device *dev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id smc91x_match[] = {
+ { .compatible = "smsc,lan91c94", },
+ { .compatible = "smsc,lan91c111", },
+ {},
+}
+MODULE_DEVICE_TABLE(of, smc91x_match);
+#endif
+
static struct dev_pm_ops smc_drv_pm_ops = {
.suspend = smc_drv_suspend,
.resume = smc_drv_resume,
@@ -2406,6 +2416,9 @@ static struct platform_driver smc_driver = {
.name = CARDNAME,
.owner = THIS_MODULE,
.pm = &smc_drv_pm_ops,
+#ifdef CONFIG_OF
+ .of_match_table = smc91x_match,
+#endif
},
};
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index ee747919a766..68d48ab6eacf 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -206,68 +206,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#define RPC_LSA_DEFAULT RPC_LED_TX_RX
#define RPC_LSB_DEFAULT RPC_LED_100_10
-#elif defined(CONFIG_MACH_LPD79520) || \
- defined(CONFIG_MACH_LPD7A400) || \
- defined(CONFIG_MACH_LPD7A404)
-
-/* The LPD7X_IOBARRIER is necessary to overcome a mismatch between the
- * way that the CPU handles chip selects and the way that the SMC chip
- * expects the chip select to operate. Refer to
- * Documentation/arm/Sharp-LH/IOBarrier for details. The read from
- * IOBARRIER is a byte, in order that we read the least-common
- * denominator. It would be wasteful to read 32 bits from an 8-bit
- * accessible region.
- *
- * There is no explicit protection against interrupts intervening
- * between the writew and the IOBARRIER. In SMC ISR there is a
- * preamble that performs an IOBARRIER in the extremely unlikely event
- * that the driver interrupts itself between a writew to the chip an
- * the IOBARRIER that follows *and* the cache is large enough that the
- * first off-chip access while handing the interrupt is to the SMC
- * chip. Other devices in the same address space as the SMC chip must
- * be aware of the potential for trouble and perform a similar
- * IOBARRIER on entry to their ISR.
- */
-
-#include <mach/constants.h> /* IOBARRIER_VIRT */
-
-#define SMC_CAN_USE_8BIT 0
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-#define SMC_NOWAIT 0
-#define LPD7X_IOBARRIER readb (IOBARRIER_VIRT)
-
-#define SMC_inw(a,r)\
- ({ unsigned short v = readw ((void*) ((a) + (r))); LPD7X_IOBARRIER; v; })
-#define SMC_outw(v,a,r) ({ writew ((v), (a) + (r)); LPD7X_IOBARRIER; })
-
-#define SMC_insw LPD7_SMC_insw
-static inline void LPD7_SMC_insw (unsigned char* a, int r,
- unsigned char* p, int l)
-{
- unsigned short* ps = (unsigned short*) p;
- while (l-- > 0) {
- *ps++ = readw (a + r);
- LPD7X_IOBARRIER;
- }
-}
-
-#define SMC_outsw LPD7_SMC_outsw
-static inline void LPD7_SMC_outsw (unsigned char* a, int r,
- unsigned char* p, int l)
-{
- unsigned short* ps = (unsigned short*) p;
- while (l-- > 0) {
- writew (*ps++, a + r);
- LPD7X_IOBARRIER;
- }
-}
-
-#define SMC_INTERRUPT_PREAMBLE LPD7X_IOBARRIER
-
-#define RPC_LSA_DEFAULT RPC_LED_TX_RX
-#define RPC_LSB_DEFAULT RPC_LED_100_10
-
#elif defined(CONFIG_ARCH_VERSATILE)
#define SMC_CAN_USE_8BIT 1
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 64bfdae5956f..1566259c1f27 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -791,8 +791,8 @@ static int smsc911x_mii_probe(struct net_device *dev)
return -ENODEV;
}
- SMSC_TRACE(PROBE, "PHY %d: addr %d, phy_id 0x%08X",
- phy_addr, phydev->addr, phydev->phy_id);
+ SMSC_TRACE(PROBE, "PHY: addr %d, phy_id 0x%08X",
+ phydev->addr, phydev->phy_id);
ret = phy_connect_direct(dev, phydev,
&smsc911x_phy_adjust_link, 0,
@@ -1178,6 +1178,11 @@ static int smsc911x_open(struct net_device *dev)
smsc911x_reg_write(pdata, HW_CFG, 0x00050000);
smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740);
+ /* Increase the legal frame size of VLAN tagged frames to 1522 bytes */
+ spin_lock_irq(&pdata->mac_lock);
+ smsc911x_mac_write(pdata, VLAN1, ETH_P_8021Q);
+ spin_unlock_irq(&pdata->mac_lock);
+
/* Make sure EEPROM has finished loading before setting GPIO_CFG */
timeout = 50;
while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) &&
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 4adf12422787..a4f2bd52e546 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -148,7 +148,7 @@ static int full_duplex[MAX_UNITS] = {0, };
* This SUCKS.
* We need a much better method to determine if dma_addr_t is 64-bit.
*/
-#if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__alpha__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) || (defined(__powerpc64__) || defined(CONFIG_PHYS_64BIT))
+#if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__alpha__) || (defined(CONFIG_MIPS) && ((defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) || defined(CONFIG_64BIT))) || (defined(__powerpc64__) || defined(CONFIG_PHYS_64BIT))
/* 64-bit dma_addr_t */
#define ADDR_64BITS /* This chip uses 64 bit addresses. */
#define netdrv_addr_t __le64
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index 79bdc2e13224..5f06c4706abe 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -20,7 +20,7 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#define DRV_MODULE_VERSION "Apr_2010"
+#define DRV_MODULE_VERSION "Nov_2010"
#include <linux/platform_device.h>
#include <linux/stmmac.h>
@@ -37,7 +37,6 @@ struct stmmac_priv {
unsigned int cur_tx;
unsigned int dirty_tx;
unsigned int dma_tx_size;
- int tx_coe;
int tx_coalesce;
struct dma_desc *dma_rx ;
@@ -48,7 +47,6 @@ struct stmmac_priv {
struct sk_buff_head rx_recycle;
struct net_device *dev;
- int is_gmac;
dma_addr_t dma_rx_phy;
unsigned int dma_rx_size;
unsigned int dma_buf_sz;
@@ -60,14 +58,11 @@ struct stmmac_priv {
struct napi_struct napi;
phy_interface_t phy_interface;
- int pbl;
- int bus_id;
int phy_addr;
int phy_mask;
int (*phy_reset) (void *priv);
- void (*fix_mac_speed) (void *priv, unsigned int speed);
- void (*bus_setup)(void __iomem *ioaddr);
- void *bsp_priv;
+ int rx_coe;
+ int no_csum_insertion;
int phy_irq;
struct phy_device *phydev;
@@ -77,47 +72,20 @@ struct stmmac_priv {
unsigned int flow_ctrl;
unsigned int pause;
struct mii_bus *mii;
- int mii_clk_csr;
u32 msg_enable;
spinlock_t lock;
int wolopts;
int wolenabled;
- int shutdown;
#ifdef CONFIG_STMMAC_TIMER
struct stmmac_timer *tm;
#endif
#ifdef STMMAC_VLAN_TAG_USED
struct vlan_group *vlgrp;
#endif
- int enh_desc;
- int rx_coe;
- int bugged_jumbo;
- int no_csum_insertion;
+ struct plat_stmmacenet_data *plat;
};
-#ifdef CONFIG_STM_DRIVERS
-#include <linux/stm/pad.h>
-static inline int stmmac_claim_resource(struct platform_device *pdev)
-{
- int ret = 0;
- struct plat_stmmacenet_data *plat_dat = pdev->dev.platform_data;
-
- /* Pad routing setup */
- if (IS_ERR(devm_stm_pad_claim(&pdev->dev, plat_dat->pad_config,
- dev_name(&pdev->dev)))) {
- printk(KERN_ERR "%s: Failed to request pads!\n", __func__);
- ret = -ENODEV;
- }
- return ret;
-}
-#else
-static inline int stmmac_claim_resource(struct platform_device *pdev)
-{
- return 0;
-}
-#endif
-
extern int stmmac_mdio_unregister(struct net_device *ndev);
extern int stmmac_mdio_register(struct net_device *ndev);
extern void stmmac_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index 6d65482e789a..fd719edc7f7c 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -94,7 +94,7 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (!priv->is_gmac)
+ if (!priv->plat->has_gmac)
strcpy(info->driver, MAC100_ETHTOOL_NAME);
else
strcpy(info->driver, GMAC_ETHTOOL_NAME);
@@ -176,7 +176,7 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
memset(reg_space, 0x0, REG_SPACE_SIZE);
- if (!priv->is_gmac) {
+ if (!priv->plat->has_gmac) {
/* MAC registers */
for (i = 0; i < 12; i++)
reg_space[i] = readl(priv->ioaddr + (i * 4));
@@ -197,16 +197,6 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
}
}
-static int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
-{
- if (data)
- netdev->features |= NETIF_F_HW_CSUM;
- else
- netdev->features &= ~NETIF_F_HW_CSUM;
-
- return 0;
-}
-
static u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
@@ -370,7 +360,7 @@ static struct ethtool_ops stmmac_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_rx_csum = stmmac_ethtool_get_rx_csum,
.get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = stmmac_ethtool_set_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_pauseparam = stmmac_get_pauseparam,
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 06bc6034ce81..0e5f03135b50 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -186,6 +186,18 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
}
+/* On some ST platforms, some HW system configuraton registers have to be
+ * set according to the link speed negotiated.
+ */
+static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
+{
+ struct phy_device *phydev = priv->phydev;
+
+ if (likely(priv->plat->fix_mac_speed))
+ priv->plat->fix_mac_speed(priv->plat->bsp_priv,
+ phydev->speed);
+}
+
/**
* stmmac_adjust_link
* @dev: net device structure
@@ -228,15 +240,13 @@ static void stmmac_adjust_link(struct net_device *dev)
new_state = 1;
switch (phydev->speed) {
case 1000:
- if (likely(priv->is_gmac))
+ if (likely(priv->plat->has_gmac))
ctrl &= ~priv->hw->link.port;
- if (likely(priv->fix_mac_speed))
- priv->fix_mac_speed(priv->bsp_priv,
- phydev->speed);
+ stmmac_hw_fix_mac_speed(priv);
break;
case 100:
case 10:
- if (priv->is_gmac) {
+ if (priv->plat->has_gmac) {
ctrl |= priv->hw->link.port;
if (phydev->speed == SPEED_100) {
ctrl |= priv->hw->link.speed;
@@ -246,9 +256,7 @@ static void stmmac_adjust_link(struct net_device *dev)
} else {
ctrl &= ~priv->hw->link.port;
}
- if (likely(priv->fix_mac_speed))
- priv->fix_mac_speed(priv->bsp_priv,
- phydev->speed);
+ stmmac_hw_fix_mac_speed(priv);
break;
default:
if (netif_msg_link(priv))
@@ -305,7 +313,7 @@ static int stmmac_init_phy(struct net_device *dev)
return 0;
}
- snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
+ snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
priv->phy_addr);
pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
@@ -552,7 +560,7 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
*/
static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
- if (likely((priv->tx_coe) && (!priv->no_csum_insertion))) {
+ if (likely((priv->plat->tx_coe) && (!priv->no_csum_insertion))) {
/* In case of GMAC, SF mode has to be enabled
* to perform the TX COE. This depends on:
* 1) TX COE if actually supported
@@ -814,7 +822,7 @@ static int stmmac_open(struct net_device *dev)
init_dma_desc_rings(dev);
/* DMA initialization and SW reset */
- if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->pbl,
+ if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
priv->dma_tx_phy,
priv->dma_rx_phy) < 0)) {
@@ -825,19 +833,17 @@ static int stmmac_open(struct net_device *dev)
/* Copy the MAC addr into the HW */
priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
/* If required, perform hw setup of the bus. */
- if (priv->bus_setup)
- priv->bus_setup(priv->ioaddr);
+ if (priv->plat->bus_setup)
+ priv->plat->bus_setup(priv->ioaddr);
/* Initialize the MAC Core */
priv->hw->mac->core_init(priv->ioaddr);
priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
if (priv->rx_coe)
pr_info("stmmac: Rx Checksum Offload Engine supported\n");
- if (priv->tx_coe)
+ if (priv->plat->tx_coe)
pr_info("\tTX Checksum insertion supported\n");
- priv->shutdown = 0;
-
/* Initialise the MMC (if present) to disable all interrupts. */
writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
@@ -943,7 +949,7 @@ static int stmmac_sw_tso(struct stmmac_priv *priv, struct sk_buff *skb)
skb, skb->len);
segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO);
- if (unlikely(IS_ERR(segs)))
+ if (IS_ERR(segs))
goto sw_tso_end;
do {
@@ -1042,7 +1048,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return stmmac_sw_tso(priv, skb);
if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
- if (unlikely((!priv->tx_coe) || (priv->no_csum_insertion)))
+ if (unlikely((!priv->plat->tx_coe) ||
+ (priv->no_csum_insertion)))
skb_checksum_help(skb);
else
csum_insertion = 1;
@@ -1146,7 +1153,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
DMA_FROM_DEVICE);
(p + entry)->des2 = priv->rx_skbuff_dma[entry];
- if (unlikely(priv->is_gmac)) {
+ if (unlikely(priv->plat->has_gmac)) {
if (bfsize >= BUF_SIZE_8KiB)
(p + entry)->des3 =
(p + entry)->des2 + BUF_SIZE_8KiB;
@@ -1356,7 +1363,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return -EBUSY;
}
- if (priv->is_gmac)
+ if (priv->plat->has_gmac)
max_mtu = JUMBO_LEN;
else
max_mtu = ETH_DATA_LEN;
@@ -1370,7 +1377,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
* needs to have the Tx COE disabled for oversized frames
* (due to limited buffer sizes). In this case we disable
* the TX csum insertionin the TDES and not use SF. */
- if ((priv->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN))
+ if ((priv->plat->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN))
priv->no_csum_insertion = 1;
else
priv->no_csum_insertion = 0;
@@ -1390,7 +1397,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
return IRQ_NONE;
}
- if (priv->is_gmac)
+ if (priv->plat->has_gmac)
/* To handle GMAC own interrupts */
priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr);
@@ -1487,7 +1494,8 @@ static int stmmac_probe(struct net_device *dev)
dev->netdev_ops = &stmmac_netdev_ops;
stmmac_set_ethtool_ops(dev);
- dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA);
+ dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
dev->watchdog_timeo = msecs_to_jiffies(watchdog);
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
@@ -1509,6 +1517,8 @@ static int stmmac_probe(struct net_device *dev)
pr_warning("\tno valid MAC address;"
"please, use ifconfig or nwhwconfig!\n");
+ spin_lock_init(&priv->lock);
+
ret = register_netdev(dev);
if (ret) {
pr_err("%s: ERROR %i registering the device\n",
@@ -1518,9 +1528,7 @@ static int stmmac_probe(struct net_device *dev)
DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
- (dev->features & NETIF_F_HW_CSUM) ? "on" : "off");
-
- spin_lock_init(&priv->lock);
+ (dev->features & NETIF_F_IP_CSUM) ? "on" : "off");
return ret;
}
@@ -1536,7 +1544,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
struct mac_device_info *device;
- if (priv->is_gmac)
+ if (priv->plat->has_gmac)
device = dwmac1000_setup(priv->ioaddr);
else
device = dwmac100_setup(priv->ioaddr);
@@ -1544,7 +1552,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
if (!device)
return -ENOMEM;
- if (priv->enh_desc) {
+ if (priv->plat->enh_desc) {
device->desc = &enh_desc_ops;
pr_info("\tEnhanced descriptor structure\n");
} else
@@ -1552,8 +1560,10 @@ static int stmmac_mac_device_setup(struct net_device *dev)
priv->hw = device;
- if (device_can_wakeup(priv->device))
+ if (device_can_wakeup(priv->device)) {
priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
+ enable_irq_wake(dev->irq);
+ }
return 0;
}
@@ -1598,7 +1608,7 @@ static int stmmac_associate_phy(struct device *dev, void *data)
plat_dat->bus_id);
/* Check that this phy is for the MAC being initialised */
- if (priv->bus_id != plat_dat->bus_id)
+ if (priv->plat->bus_id != plat_dat->bus_id)
return 0;
/* OK, this PHY is connected to the MAC.
@@ -1634,15 +1644,13 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *addr = NULL;
struct net_device *ndev = NULL;
- struct stmmac_priv *priv;
+ struct stmmac_priv *priv = NULL;
struct plat_stmmacenet_data *plat_dat;
pr_info("STMMAC driver:\n\tplatform registration... ");
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENODEV;
- goto out;
- }
+ if (!res)
+ return -ENODEV;
pr_info("\tdone!\n");
if (!request_mem_region(res->start, resource_size(res),
@@ -1650,22 +1658,21 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
pr_err("%s: ERROR: memory allocation failed"
"cannot get the I/O addr 0x%x\n",
__func__, (unsigned int)res->start);
- ret = -EBUSY;
- goto out;
+ return -EBUSY;
}
addr = ioremap(res->start, resource_size(res));
if (!addr) {
pr_err("%s: ERROR: memory mapping failed\n", __func__);
ret = -ENOMEM;
- goto out;
+ goto out_release_region;
}
ndev = alloc_etherdev(sizeof(struct stmmac_priv));
if (!ndev) {
pr_err("%s: ERROR: allocating the device\n", __func__);
ret = -ENOMEM;
- goto out;
+ goto out_unmap;
}
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -1675,21 +1682,17 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
if (ndev->irq == -ENXIO) {
pr_err("%s: ERROR: MAC IRQ configuration "
"information not found\n", __func__);
- ret = -ENODEV;
- goto out;
+ ret = -ENXIO;
+ goto out_free_ndev;
}
priv = netdev_priv(ndev);
priv->device = &(pdev->dev);
priv->dev = ndev;
plat_dat = pdev->dev.platform_data;
- priv->bus_id = plat_dat->bus_id;
- priv->pbl = plat_dat->pbl; /* TLI */
- priv->mii_clk_csr = plat_dat->clk_csr;
- priv->tx_coe = plat_dat->tx_coe;
- priv->bugged_jumbo = plat_dat->bugged_jumbo;
- priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
- priv->enh_desc = plat_dat->enh_desc;
+
+ priv->plat = plat_dat;
+
priv->ioaddr = addr;
/* PMT module is not integrated in all the MAC devices. */
@@ -1703,20 +1706,22 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
/* Set the I/O base addr */
ndev->base_addr = (unsigned long)addr;
- /* Verify embedded resource for the platform */
- ret = stmmac_claim_resource(pdev);
- if (ret < 0)
- goto out;
+ /* Custom initialisation */
+ if (priv->plat->init) {
+ ret = priv->plat->init(pdev);
+ if (unlikely(ret))
+ goto out_free_ndev;
+ }
/* MAC HW revice detection */
ret = stmmac_mac_device_setup(ndev);
if (ret < 0)
- goto out;
+ goto out_plat_exit;
/* Network Device Registration */
ret = stmmac_probe(ndev);
if (ret < 0)
- goto out;
+ goto out_plat_exit;
/* associate a PHY - it is provided by another platform bus */
if (!driver_for_each_device
@@ -1724,31 +1729,33 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
stmmac_associate_phy)) {
pr_err("No PHY device is associated with this MAC!\n");
ret = -ENODEV;
- goto out;
+ goto out_unregister;
}
- priv->fix_mac_speed = plat_dat->fix_mac_speed;
- priv->bus_setup = plat_dat->bus_setup;
- priv->bsp_priv = plat_dat->bsp_priv;
-
pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
"\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
pdev->id, ndev->irq, addr);
/* MDIO bus Registration */
- pr_debug("\tMDIO bus (id: %d)...", priv->bus_id);
+ pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id);
ret = stmmac_mdio_register(ndev);
if (ret < 0)
- goto out;
+ goto out_unregister;
pr_debug("registered!\n");
+ return 0;
-out:
- if (ret < 0) {
- platform_set_drvdata(pdev, NULL);
- release_mem_region(res->start, resource_size(res));
- if (addr != NULL)
- iounmap(addr);
- }
+out_unregister:
+ unregister_netdev(ndev);
+out_plat_exit:
+ if (priv->plat->exit)
+ priv->plat->exit(pdev);
+out_free_ndev:
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+out_unmap:
+ iounmap(addr);
+out_release_region:
+ release_mem_region(res->start, resource_size(res));
return ret;
}
@@ -1777,6 +1784,9 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
stmmac_mdio_unregister(ndev);
+ if (priv->plat->exit)
+ priv->plat->exit(pdev);
+
platform_set_drvdata(pdev, NULL);
unregister_netdev(ndev);
@@ -1790,70 +1800,55 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
+static int stmmac_suspend(struct device *dev)
{
- struct net_device *dev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(dev);
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
int dis_ic = 0;
- if (!dev || !netif_running(dev))
+ if (!ndev || !netif_running(ndev))
return 0;
spin_lock(&priv->lock);
- if (state.event == PM_EVENT_SUSPEND) {
- netif_device_detach(dev);
- netif_stop_queue(dev);
- if (priv->phydev)
- phy_stop(priv->phydev);
+ netif_device_detach(ndev);
+ netif_stop_queue(ndev);
+ if (priv->phydev)
+ phy_stop(priv->phydev);
#ifdef CONFIG_STMMAC_TIMER
- priv->tm->timer_stop();
- if (likely(priv->tm->enable))
- dis_ic = 1;
+ priv->tm->timer_stop();
+ if (likely(priv->tm->enable))
+ dis_ic = 1;
#endif
- napi_disable(&priv->napi);
-
- /* Stop TX/RX DMA */
- priv->hw->dma->stop_tx(priv->ioaddr);
- priv->hw->dma->stop_rx(priv->ioaddr);
- /* Clear the Rx/Tx descriptors */
- priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
- dis_ic);
- priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
-
- /* Enable Power down mode by programming the PMT regs */
- if (device_can_wakeup(priv->device))
- priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
- else
- stmmac_disable_mac(priv->ioaddr);
- } else {
- priv->shutdown = 1;
- /* Although this can appear slightly redundant it actually
- * makes fast the standby operation and guarantees the driver
- * working if hibernation is on media. */
- stmmac_release(dev);
- }
+ napi_disable(&priv->napi);
+
+ /* Stop TX/RX DMA */
+ priv->hw->dma->stop_tx(priv->ioaddr);
+ priv->hw->dma->stop_rx(priv->ioaddr);
+ /* Clear the Rx/Tx descriptors */
+ priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
+ dis_ic);
+ priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
+
+ /* Enable Power down mode by programming the PMT regs */
+ if (device_may_wakeup(priv->device))
+ priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
+ else
+ stmmac_disable_mac(priv->ioaddr);
spin_unlock(&priv->lock);
return 0;
}
-static int stmmac_resume(struct platform_device *pdev)
+static int stmmac_resume(struct device *dev)
{
- struct net_device *dev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(dev);
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
- if (!netif_running(dev))
+ if (!netif_running(ndev))
return 0;
- if (priv->shutdown) {
- /* Re-open the interface and re-init the MAC/DMA
- and the rings (i.e. on hibernation stage) */
- stmmac_open(dev);
- return 0;
- }
-
spin_lock(&priv->lock);
/* Power Down bit, into the PM register, is cleared
@@ -1861,10 +1856,10 @@ static int stmmac_resume(struct platform_device *pdev)
* is received. Anyway, it's better to manually clear
* this bit because it can generate problems while resuming
* from another devices (e.g. serial console). */
- if (device_can_wakeup(priv->device))
+ if (device_may_wakeup(priv->device))
priv->hw->mac->pmt(priv->ioaddr, 0);
- netif_device_attach(dev);
+ netif_device_attach(ndev);
/* Enable the MAC and DMA */
stmmac_enable_mac(priv->ioaddr);
@@ -1872,31 +1867,59 @@ static int stmmac_resume(struct platform_device *pdev)
priv->hw->dma->start_rx(priv->ioaddr);
#ifdef CONFIG_STMMAC_TIMER
- priv->tm->timer_start(tmrate);
+ if (likely(priv->tm->enable))
+ priv->tm->timer_start(tmrate);
#endif
napi_enable(&priv->napi);
if (priv->phydev)
phy_start(priv->phydev);
- netif_start_queue(dev);
+ netif_start_queue(ndev);
spin_unlock(&priv->lock);
return 0;
}
-#endif
-static struct platform_driver stmmac_driver = {
- .driver = {
- .name = STMMAC_RESOURCE_NAME,
- },
- .probe = stmmac_dvr_probe,
- .remove = stmmac_dvr_remove,
-#ifdef CONFIG_PM
+static int stmmac_freeze(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ return stmmac_release(ndev);
+}
+
+static int stmmac_restore(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ return stmmac_open(ndev);
+}
+
+static const struct dev_pm_ops stmmac_pm_ops = {
.suspend = stmmac_suspend,
.resume = stmmac_resume,
-#endif
+ .freeze = stmmac_freeze,
+ .thaw = stmmac_restore,
+ .restore = stmmac_restore,
+};
+#else
+static const struct dev_pm_ops stmmac_pm_ops;
+#endif /* CONFIG_PM */
+static struct platform_driver stmmac_driver = {
+ .probe = stmmac_dvr_probe,
+ .remove = stmmac_dvr_remove,
+ .driver = {
+ .name = STMMAC_RESOURCE_NAME,
+ .owner = THIS_MODULE,
+ .pm = &stmmac_pm_ops,
+ },
};
/**
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
index d7441616357d..234b4068a1fc 100644
--- a/drivers/net/stmmac/stmmac_mdio.c
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -53,7 +53,7 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
int data;
u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
((phyreg << 6) & (0x000007C0)));
- regValue |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2);
+ regValue |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
writel(regValue, priv->ioaddr + mii_address);
@@ -85,7 +85,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
(((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
| MII_WRITE;
- value |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2);
+ value |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
/* Wait until any existing MII operation is complete */
@@ -114,7 +114,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
if (priv->phy_reset) {
pr_debug("stmmac_mdio_reset: calling phy_reset\n");
- priv->phy_reset(priv->bsp_priv);
+ priv->phy_reset(priv->plat->bsp_priv);
}
/* This is a workaround for problems with the STE101P PHY.
@@ -157,7 +157,7 @@ int stmmac_mdio_register(struct net_device *ndev)
new_bus->read = &stmmac_mdio_read;
new_bus->write = &stmmac_mdio_write;
new_bus->reset = &stmmac_mdio_reset;
- snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
new_bus->priv = ndev;
new_bus->irq = irqlist;
new_bus->phy_mask = priv->phy_mask;
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 0a6a5ced3c1c..aa4765803a4c 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -1242,8 +1242,7 @@ fail_and_cleanup:
/* QEC can be the parent of either QuadEthernet or a BigMAC. We want
* the latter.
*/
-static int __devinit bigmac_sbus_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int __devinit bigmac_sbus_probe(struct platform_device *op)
{
struct device *parent = op->dev.parent;
struct platform_device *qec_op;
@@ -1289,7 +1288,7 @@ static const struct of_device_id bigmac_sbus_match[] = {
MODULE_DEVICE_TABLE(of, bigmac_sbus_match);
-static struct of_platform_driver bigmac_sbus_driver = {
+static struct platform_driver bigmac_sbus_driver = {
.driver = {
.name = "sunbmac",
.owner = THIS_MODULE,
@@ -1301,12 +1300,12 @@ static struct of_platform_driver bigmac_sbus_driver = {
static int __init bigmac_init(void)
{
- return of_register_platform_driver(&bigmac_sbus_driver);
+ return platform_driver_register(&bigmac_sbus_driver);
}
static void __exit bigmac_exit(void)
{
- of_unregister_platform_driver(&bigmac_sbus_driver);
+ platform_driver_unregister(&bigmac_sbus_driver);
}
module_init(bigmac_init);
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 3ed2a67bd6d3..4793df843c24 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -294,6 +294,9 @@ enum alta_offsets {
/* Aliased and bogus values! */
RxStatus = 0x0c,
};
+
+#define ASIC_HI_WORD(x) ((x) + 2)
+
enum ASICCtrl_HiWord_bit {
GlobalReset = 0x0001,
RxReset = 0x0002,
@@ -431,6 +434,7 @@ static void netdev_error(struct net_device *dev, int intr_status);
static void netdev_error(struct net_device *dev, int intr_status);
static void set_rx_mode(struct net_device *dev);
static int __set_mac_addr(struct net_device *dev);
+static int sundance_set_mac_addr(struct net_device *dev, void *data);
static struct net_device_stats *get_stats(struct net_device *dev);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int netdev_close(struct net_device *dev);
@@ -464,7 +468,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_do_ioctl = netdev_ioctl,
.ndo_tx_timeout = tx_timeout,
.ndo_change_mtu = change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = sundance_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -1016,7 +1020,7 @@ static void init_ring(struct net_device *dev)
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + 2);
np->rx_skbuff[i] = skb;
if (skb == NULL)
break;
@@ -1407,7 +1411,7 @@ static void refill_rx (struct net_device *dev)
struct sk_buff *skb;
entry = np->dirty_rx % RX_RING_SIZE;
if (np->rx_skbuff[entry] == NULL) {
- skb = dev_alloc_skb(np->rx_buf_sz);
+ skb = dev_alloc_skb(np->rx_buf_sz + 2);
np->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
@@ -1592,6 +1596,19 @@ static int __set_mac_addr(struct net_device *dev)
return 0;
}
+/* Invoked with rtnl_lock held */
+static int sundance_set_mac_addr(struct net_device *dev, void *data)
+{
+ const struct sockaddr *addr = data;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ __set_mac_addr(dev);
+
+ return 0;
+}
+
static const struct {
const char name[ETH_GSTRING_LEN];
} sundance_stats[] = {
@@ -1772,10 +1789,10 @@ static int netdev_close(struct net_device *dev)
}
iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
- ioaddr +ASICCtrl + 2);
+ ioaddr + ASIC_HI_WORD(ASICCtrl));
for (i = 2000; i > 0; i--) {
- if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
+ if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
break;
mdelay(1);
}
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 4ceb3cf6a9a9..c1a344829b54 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -320,28 +320,28 @@ static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
if (txmac_stat & MAC_TXSTAT_URUN) {
netdev_err(dev, "TX MAC xmit underrun\n");
- gp->net_stats.tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
}
if (txmac_stat & MAC_TXSTAT_MPE) {
netdev_err(dev, "TX MAC max packet size error\n");
- gp->net_stats.tx_errors++;
+ dev->stats.tx_errors++;
}
/* The rest are all cases of one of the 16-bit TX
* counters expiring.
*/
if (txmac_stat & MAC_TXSTAT_NCE)
- gp->net_stats.collisions += 0x10000;
+ dev->stats.collisions += 0x10000;
if (txmac_stat & MAC_TXSTAT_ECE) {
- gp->net_stats.tx_aborted_errors += 0x10000;
- gp->net_stats.collisions += 0x10000;
+ dev->stats.tx_aborted_errors += 0x10000;
+ dev->stats.collisions += 0x10000;
}
if (txmac_stat & MAC_TXSTAT_LCE) {
- gp->net_stats.tx_aborted_errors += 0x10000;
- gp->net_stats.collisions += 0x10000;
+ dev->stats.tx_aborted_errors += 0x10000;
+ dev->stats.collisions += 0x10000;
}
/* We do not keep track of MAC_TXSTAT_FCE and
@@ -469,20 +469,20 @@ static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
u32 smac = readl(gp->regs + MAC_SMACHINE);
netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
- gp->net_stats.rx_over_errors++;
- gp->net_stats.rx_fifo_errors++;
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_fifo_errors++;
ret = gem_rxmac_reset(gp);
}
if (rxmac_stat & MAC_RXSTAT_ACE)
- gp->net_stats.rx_frame_errors += 0x10000;
+ dev->stats.rx_frame_errors += 0x10000;
if (rxmac_stat & MAC_RXSTAT_CCE)
- gp->net_stats.rx_crc_errors += 0x10000;
+ dev->stats.rx_crc_errors += 0x10000;
if (rxmac_stat & MAC_RXSTAT_LCE)
- gp->net_stats.rx_length_errors += 0x10000;
+ dev->stats.rx_length_errors += 0x10000;
/* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
* events.
@@ -594,7 +594,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat
if (netif_msg_rx_err(gp))
printk(KERN_DEBUG "%s: no buffer for rx frame\n",
gp->dev->name);
- gp->net_stats.rx_dropped++;
+ dev->stats.rx_dropped++;
}
if (gem_status & GREG_STAT_RXTAGERR) {
@@ -602,7 +602,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat
if (netif_msg_rx_err(gp))
printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
gp->dev->name);
- gp->net_stats.rx_errors++;
+ dev->stats.rx_errors++;
goto do_reset;
}
@@ -684,7 +684,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
break;
}
gp->tx_skbs[entry] = NULL;
- gp->net_stats.tx_bytes += skb->len;
+ dev->stats.tx_bytes += skb->len;
for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
txd = &gp->init_block->txd[entry];
@@ -696,7 +696,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
entry = NEXT_TX(entry);
}
- gp->net_stats.tx_packets++;
+ dev->stats.tx_packets++;
dev_kfree_skb_irq(skb);
}
gp->tx_old = entry;
@@ -738,6 +738,7 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit)
static int gem_rx(struct gem *gp, int work_to_do)
{
+ struct net_device *dev = gp->dev;
int entry, drops, work_done = 0;
u32 done;
__sum16 csum;
@@ -782,15 +783,15 @@ static int gem_rx(struct gem *gp, int work_to_do)
len = (status & RXDCTRL_BUFSZ) >> 16;
if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
- gp->net_stats.rx_errors++;
+ dev->stats.rx_errors++;
if (len < ETH_ZLEN)
- gp->net_stats.rx_length_errors++;
+ dev->stats.rx_length_errors++;
if (len & RXDCTRL_BAD)
- gp->net_stats.rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
/* We'll just return it to GEM. */
drop_it:
- gp->net_stats.rx_dropped++;
+ dev->stats.rx_dropped++;
goto next;
}
@@ -843,8 +844,8 @@ static int gem_rx(struct gem *gp, int work_to_do)
netif_receive_skb(skb);
- gp->net_stats.rx_packets++;
- gp->net_stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
next:
entry = NEXT_RX(entry);
@@ -1004,7 +1005,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
ctrl = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- const u64 csum_start_off = skb_transport_offset(skb);
+ const u64 csum_start_off = skb_checksum_start_offset(skb);
const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
ctrl = (TXDCTRL_CENAB |
@@ -2380,10 +2381,8 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
*/
mutex_unlock(&gp->pm_mutex);
- /* Wait for a pending reset task to complete */
- while (gp->reset_task_pending)
- yield();
- flush_scheduled_work();
+ /* Wait for the pending reset task to complete */
+ flush_work_sync(&gp->reset_task);
/* Shut the PHY down eventually and setup WOL */
gem_stop_phy(gp, gp->asleep_wol);
@@ -2474,7 +2473,6 @@ static int gem_resume(struct pci_dev *pdev)
static struct net_device_stats *gem_get_stats(struct net_device *dev)
{
struct gem *gp = netdev_priv(dev);
- struct net_device_stats *stats = &gp->net_stats;
spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock);
@@ -2483,17 +2481,17 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
* so we shield against this
*/
if (gp->running) {
- stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR);
+ dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
writel(0, gp->regs + MAC_FCSERR);
- stats->rx_frame_errors += readl(gp->regs + MAC_AERR);
+ dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
writel(0, gp->regs + MAC_AERR);
- stats->rx_length_errors += readl(gp->regs + MAC_LERR);
+ dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
writel(0, gp->regs + MAC_LERR);
- stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
- stats->collisions +=
+ dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
+ dev->stats.collisions +=
(readl(gp->regs + MAC_ECOLL) +
readl(gp->regs + MAC_LCOLL));
writel(0, gp->regs + MAC_ECOLL);
@@ -2503,7 +2501,7 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
- return &gp->net_stats;
+ return &dev->stats;
}
static int gem_set_mac_address(struct net_device *dev, void *addr)
@@ -2928,10 +2926,8 @@ static void gem_remove_one(struct pci_dev *pdev)
/* We shouldn't need any locking here */
gem_get_cell(gp);
- /* Wait for a pending reset task to complete */
- while (gp->reset_task_pending)
- yield();
- flush_scheduled_work();
+ /* Cancel reset task */
+ cancel_work_sync(&gp->reset_task);
/* Shut the PHY down */
gem_stop_phy(gp, 0);
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
index 19905460def6..d225077964e2 100644
--- a/drivers/net/sungem.h
+++ b/drivers/net/sungem.h
@@ -843,7 +843,7 @@ struct gem_txd {
/* GEM requires that RX descriptors are provided four at a time,
* aligned. Also, the RX ring may not wrap around. This means that
- * there will be at least 4 unused desciptor entries in the middle
+ * there will be at least 4 unused descriptor entries in the middle
* of the RX ring at all times.
*
* Similar to HME, GEM assumes that it can write garbage bytes before
@@ -994,7 +994,6 @@ struct gem {
u32 status;
struct napi_struct napi;
- struct net_device_stats net_stats;
int tx_fifo_sz;
int rx_fifo_sz;
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 5e28c414421a..eb4f59fb01e9 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -2266,7 +2266,7 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
tx_flags = TXFLAG_OWN;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- const u32 csum_start_off = skb_transport_offset(skb);
+ const u32 csum_start_off = skb_checksum_start_offset(skb);
const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
@@ -3237,11 +3237,15 @@ static void happy_meal_pci_exit(void)
#endif
#ifdef CONFIG_SBUS
-static int __devinit hme_sbus_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit hme_sbus_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
const char *model = of_get_property(dp, "model", NULL);
- int is_qfe = (match->data != NULL);
+ int is_qfe;
+
+ if (!op->dev.of_match)
+ return -EINVAL;
+ is_qfe = (op->dev.of_match->data != NULL);
if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
is_qfe = 1;
@@ -3292,7 +3296,7 @@ static const struct of_device_id hme_sbus_match[] = {
MODULE_DEVICE_TABLE(of, hme_sbus_match);
-static struct of_platform_driver hme_sbus_driver = {
+static struct platform_driver hme_sbus_driver = {
.driver = {
.name = "hme",
.owner = THIS_MODULE,
@@ -3306,7 +3310,7 @@ static int __init happy_meal_sbus_init(void)
{
int err;
- err = of_register_platform_driver(&hme_sbus_driver);
+ err = platform_driver_register(&hme_sbus_driver);
if (!err)
err = quattro_sbus_register_irqs();
@@ -3315,7 +3319,7 @@ static int __init happy_meal_sbus_init(void)
static void happy_meal_sbus_exit(void)
{
- of_unregister_platform_driver(&hme_sbus_driver);
+ platform_driver_unregister(&hme_sbus_driver);
quattro_sbus_free_irqs();
while (qfe_sbus_list) {
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 2cf84e5968b2..32a5c7f63c43 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1295,17 +1295,9 @@ static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvin
strcpy(info->version, "2.02");
}
-static u32 sparc_lance_get_link(struct net_device *dev)
-{
- /* We really do not keep track of this, but this
- * is better than not reporting anything at all.
- */
- return 1;
-}
-
static const struct ethtool_ops sparc_lance_ethtool_ops = {
.get_drvinfo = sparc_lance_get_drvinfo,
- .get_link = sparc_lance_get_link,
+ .get_link = ethtool_op_get_link,
};
static const struct net_device_ops sparc_lance_ops = {
@@ -1503,7 +1495,7 @@ fail:
return -ENODEV;
}
-static int __devinit sunlance_sbus_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit sunlance_sbus_probe(struct platform_device *op)
{
struct platform_device *parent = to_platform_device(op->dev.parent);
struct device_node *parent_dp = parent->dev.of_node;
@@ -1544,7 +1536,7 @@ static const struct of_device_id sunlance_sbus_match[] = {
MODULE_DEVICE_TABLE(of, sunlance_sbus_match);
-static struct of_platform_driver sunlance_sbus_driver = {
+static struct platform_driver sunlance_sbus_driver = {
.driver = {
.name = "sunlance",
.owner = THIS_MODULE,
@@ -1558,12 +1550,12 @@ static struct of_platform_driver sunlance_sbus_driver = {
/* Find all the lance cards on the system and initialize them */
static int __init sparc_lance_init(void)
{
- return of_register_platform_driver(&sunlance_sbus_driver);
+ return platform_driver_register(&sunlance_sbus_driver);
}
static void __exit sparc_lance_exit(void)
{
- of_unregister_platform_driver(&sunlance_sbus_driver);
+ platform_driver_unregister(&sunlance_sbus_driver);
}
module_init(sparc_lance_init);
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index 9536b2f010be..18ecdc303751 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -941,7 +941,7 @@ fail:
return res;
}
-static int __devinit qec_sbus_probe(struct platform_device *op, const struct of_device_id *match)
+static int __devinit qec_sbus_probe(struct platform_device *op)
{
return qec_ether_init(op);
}
@@ -976,7 +976,7 @@ static const struct of_device_id qec_sbus_match[] = {
MODULE_DEVICE_TABLE(of, qec_sbus_match);
-static struct of_platform_driver qec_sbus_driver = {
+static struct platform_driver qec_sbus_driver = {
.driver = {
.name = "qec",
.owner = THIS_MODULE,
@@ -988,12 +988,12 @@ static struct of_platform_driver qec_sbus_driver = {
static int __init qec_init(void)
{
- return of_register_platform_driver(&qec_sbus_driver);
+ return platform_driver_register(&qec_sbus_driver);
}
static void __exit qec_exit(void)
{
- of_unregister_platform_driver(&qec_sbus_driver);
+ platform_driver_unregister(&qec_sbus_driver);
while (root_qec_dev) {
struct sunqec *next = root_qec_dev->next_module;
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 8b3dc1eb4015..3397618d4d96 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -12,7 +12,7 @@
/*
* RX HW/SW interaction overview
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * There are 2 types of RX communication channels betwean driver and NIC.
+ * There are 2 types of RX communication channels between driver and NIC.
* 1) RX Free Fifo - RXF - holds descriptors of empty buffers to accept incoming
* traffic. This Fifo is filled by SW and is readen by HW. Each descriptor holds
* info about buffer's location, size and ID. An ID field is used to identify a
@@ -324,7 +324,7 @@ static int bdx_fw_load(struct bdx_priv *priv)
ENTER;
master = READ_REG(priv, regINIT_SEMAPHORE);
if (!READ_REG(priv, regINIT_STATUS) && master) {
- rc = request_firmware(&fw, "tehuti/firmware.bin", &priv->pdev->dev);
+ rc = request_firmware(&fw, "tehuti/bdx.bin", &priv->pdev->dev);
if (rc)
goto out;
bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size);
@@ -821,7 +821,7 @@ static void bdx_setmulti(struct net_device *ndev)
}
/* use PMF to accept first MAC_MCST_NUM (15) addresses */
- /* TBD: sort addreses and write them in ascending order
+ /* TBD: sort addresses and write them in ascending order
* into RX_MAC_MCST regs. we skip this phase now and accept ALL
* multicast frames throu IMF */
/* accept the rest of addresses throu IMF */
@@ -1346,7 +1346,7 @@ static void print_rxfd(struct rxf_desc *rxfd)
/*
* TX HW/SW interaction overview
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * There are 2 types of TX communication channels betwean driver and NIC.
+ * There are 2 types of TX communication channels between driver and NIC.
* 1) TX Free Fifo - TXF - holds ack descriptors for sent packets
* 2) TX Data Fifo - TXD - holds descriptors of full buffers.
*
@@ -2510,4 +2510,4 @@ module_exit(bdx_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(BDX_DRV_DESC);
-MODULE_FIRMWARE("tehuti/firmware.bin");
+MODULE_FIRMWARE("tehuti/bdx.bin");
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 30ccbb6d097a..ebec88882c3b 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2010 Broadcom Corporation.
+ * Copyright (C) 2005-2011 Broadcom Corporation.
*
* Firmware is:
* Derived from proprietary unpublished source code,
@@ -32,6 +32,7 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
+#include <linux/mdio.h>
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/brcmphy.h>
@@ -59,20 +60,14 @@
#define BAR_0 0
#define BAR_2 2
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-#define TG3_VLAN_TAG_USED 1
-#else
-#define TG3_VLAN_TAG_USED 0
-#endif
-
#include "tg3.h"
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 115
+#define TG3_MIN_NUM 117
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "October 14, 2010"
+#define DRV_MODULE_RELDATE "January 25, 2011"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -133,9 +128,6 @@
TG3_TX_RING_SIZE)
#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
-#define TG3_RX_DMA_ALIGN 16
-#define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
-
#define TG3_DMA_BYTE_ENAB 64
#define TG3_RX_STD_DMA_SZ 1536
@@ -1769,9 +1761,9 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
if (tp->link_config.autoneg == AUTONEG_ENABLE &&
current_link_up == 1 &&
- (tp->link_config.active_speed == SPEED_1000 ||
- (tp->link_config.active_speed == SPEED_100 &&
- tp->link_config.active_duplex == DUPLEX_FULL))) {
+ tp->link_config.active_duplex == DUPLEX_FULL &&
+ (tp->link_config.active_speed == SPEED_100 ||
+ tp->link_config.active_speed == SPEED_1000)) {
u32 eeectl;
if (tp->link_config.active_speed == SPEED_1000)
@@ -1781,11 +1773,32 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
tw32(TG3_CPMU_EEE_CTRL, eeectl);
- tg3_phy_cl45_read(tp, 0x7, TG3_CL45_D7_EEERES_STAT, &val);
-
- if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
- val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
+ tg3_phy_cl45_read(tp, MDIO_MMD_AN,
+ TG3_CL45_D7_EEERES_STAT, &val);
+
+ switch (val) {
+ case TG3_CL45_D7_EEERES_STAT_LP_1000T:
+ switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+ case ASIC_REV_5717:
+ case ASIC_REV_5719:
+ case ASIC_REV_57765:
+ /* Enable SM_DSP clock and tx 6dB coding. */
+ val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
+ MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
+ MII_TG3_AUXCTL_ACTL_TX_6DB;
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
+
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
+
+ /* Turn off SM_DSP clock. */
+ val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
+ MII_TG3_AUXCTL_ACTL_TX_6DB;
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
+ }
+ /* Fallthrough */
+ case TG3_CL45_D7_EEERES_STAT_LP_100TX:
tp->setlpicnt = 2;
+ }
}
if (!tp->setlpicnt) {
@@ -2107,7 +2120,7 @@ out:
static void tg3_frob_aux_power(struct tg3 *tp)
{
- struct tg3 *tp_peer = tp;
+ bool need_vaux = false;
/* The GPIOs do something completely different on 57765. */
if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
@@ -2115,23 +2128,32 @@ static void tg3_frob_aux_power(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
return;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) &&
+ tp->pdev_peer != tp->pdev) {
struct net_device *dev_peer;
dev_peer = pci_get_drvdata(tp->pdev_peer);
+
/* remove_one() may have been run on the peer. */
- if (!dev_peer)
- tp_peer = tp;
- else
- tp_peer = netdev_priv(dev_peer);
+ if (dev_peer) {
+ struct tg3 *tp_peer = netdev_priv(dev_peer);
+
+ if (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE)
+ return;
+
+ if ((tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
+ (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF))
+ need_vaux = true;
+ }
}
- if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
- (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
- (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
- (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
+ if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
+ (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
+ need_vaux = true;
+
+ if (need_vaux) {
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
@@ -2161,10 +2183,6 @@ static void tg3_frob_aux_power(struct tg3 *tp)
u32 no_gpio2;
u32 grc_local_ctrl = 0;
- if (tp_peer != tp &&
- (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
- return;
-
/* Workaround to prevent overdrawing Amps. */
if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
ASIC_REV_5714) {
@@ -2203,10 +2221,6 @@ static void tg3_frob_aux_power(struct tg3 *tp)
} else {
if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
- if (tp_peer != tp &&
- (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
- return;
-
tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
(GRC_LCLCTRL_GPIO_OE1 |
GRC_LCLCTRL_GPIO_OUTPUT1), 100);
@@ -2549,39 +2563,35 @@ static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
tw32(MAC_TX_BACKOFF_SEED, addr_high);
}
-static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
+static void tg3_enable_register_access(struct tg3 *tp)
{
- u32 misc_host_ctrl;
- bool device_should_wake, do_low_power;
-
- /* Make sure register accesses (indirect or otherwise)
- * will function correctly.
+ /*
+ * Make sure register accesses (indirect or otherwise) will function
+ * correctly.
*/
pci_write_config_dword(tp->pdev,
- TG3PCI_MISC_HOST_CTRL,
- tp->misc_host_ctrl);
+ TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
+}
- switch (state) {
- case PCI_D0:
- pci_enable_wake(tp->pdev, state, false);
- pci_set_power_state(tp->pdev, PCI_D0);
+static int tg3_power_up(struct tg3 *tp)
+{
+ tg3_enable_register_access(tp);
- /* Switch out of Vaux if it is a NIC */
- if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
- tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
+ pci_set_power_state(tp->pdev, PCI_D0);
- return 0;
+ /* Switch out of Vaux if it is a NIC */
+ if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
+ tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
- case PCI_D1:
- case PCI_D2:
- case PCI_D3hot:
- break;
+ return 0;
+}
- default:
- netdev_err(tp->dev, "Invalid power state (D%d) requested\n",
- state);
- return -EINVAL;
- }
+static int tg3_power_down_prepare(struct tg3 *tp)
+{
+ u32 misc_host_ctrl;
+ bool device_should_wake, do_low_power;
+
+ tg3_enable_register_access(tp);
/* Restore the CLKREQ setting. */
if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
@@ -2600,8 +2610,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
tw32(TG3PCI_MISC_HOST_CTRL,
misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
- device_should_wake = pci_pme_capable(tp->pdev, state) &&
- device_may_wakeup(&tp->pdev->dev) &&
+ device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
(tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
@@ -2728,12 +2737,10 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
- mac_mode |= tp->mac_mode &
- (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
- if (mac_mode & MAC_MODE_APE_TX_EN)
- mac_mode |= MAC_MODE_TDE_ENABLE;
- }
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+ mac_mode |= MAC_MODE_APE_TX_EN |
+ MAC_MODE_APE_RX_EN |
+ MAC_MODE_TDE_ENABLE;
tw32_f(MAC_MODE, mac_mode);
udelay(100);
@@ -2823,13 +2830,15 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
- if (device_should_wake)
- pci_enable_wake(tp->pdev, state, true);
+ return 0;
+}
- /* Finally, set the new power state. */
- pci_set_power_state(tp->pdev, state);
+static void tg3_power_down(struct tg3 *tp)
+{
+ tg3_power_down_prepare(tp);
- return 0;
+ pci_wake_from_d3(tp->pdev, tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
+ pci_set_power_state(tp->pdev, PCI_D3hot);
}
static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
@@ -2969,7 +2978,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
}
if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
- u32 val = 0;
+ u32 val;
tw32(TG3_CPMU_EEE_MODE,
tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
@@ -2980,25 +2989,32 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
MII_TG3_AUXCTL_ACTL_TX_6DB;
tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
- !tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
- tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2,
- val | MII_TG3_DSP_CH34TP2_HIBW01);
+ switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+ case ASIC_REV_5717:
+ case ASIC_REV_57765:
+ if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
+ tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
+ MII_TG3_DSP_CH34TP2_HIBW01);
+ /* Fall through */
+ case ASIC_REV_5719:
+ val = MII_TG3_DSP_TAP26_ALNOKO |
+ MII_TG3_DSP_TAP26_RMRXSTO |
+ MII_TG3_DSP_TAP26_OPCSINPT;
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
+ }
+ val = 0;
if (tp->link_config.autoneg == AUTONEG_ENABLE) {
/* Advertise 100-BaseTX EEE ability */
if (tp->link_config.advertising &
- (ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full))
- val |= TG3_CL45_D7_EEEADV_CAP_100TX;
+ ADVERTISED_100baseT_Full)
+ val |= MDIO_AN_EEE_ADV_100TX;
/* Advertise 1000-BaseT EEE ability */
if (tp->link_config.advertising &
- (ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full))
- val |= TG3_CL45_D7_EEEADV_CAP_1000T;
+ ADVERTISED_1000baseT_Full)
+ val |= MDIO_AN_EEE_ADV_1000T;
}
- tg3_phy_cl45_write(tp, 0x7, TG3_CL45_D7_EEEADV_CAP, val);
+ tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
/* Turn off SM_DSP clock. */
val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
@@ -4726,8 +4742,6 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
struct sk_buff *skb;
dma_addr_t dma_addr;
u32 opaque_key, desc_idx, *post_ptr;
- bool hw_vlan __maybe_unused = false;
- u16 vtag __maybe_unused = 0;
desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
@@ -4786,12 +4800,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
tg3_recycle_rx(tnapi, tpr, opaque_key,
desc_idx, *post_ptr);
- copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN +
+ copy_skb = netdev_alloc_skb(tp->dev, len +
TG3_RAW_IP_ALIGN);
if (copy_skb == NULL)
goto drop_it_no_recycle;
- skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
+ skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
skb_put(copy_skb, len);
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(skb, copy_skb->data, len);
@@ -4818,30 +4832,11 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
}
if (desc->type_flags & RXD_FLAG_VLAN &&
- !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
- vtag = desc->err_vlan & RXD_VLAN_MASK;
-#if TG3_VLAN_TAG_USED
- if (tp->vlgrp)
- hw_vlan = true;
- else
-#endif
- {
- struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
- __skb_push(skb, VLAN_HLEN);
-
- memmove(ve, skb->data + VLAN_HLEN,
- ETH_ALEN * 2);
- ve->h_vlan_proto = htons(ETH_P_8021Q);
- ve->h_vlan_TCI = htons(vtag);
- }
- }
+ !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
+ __vlan_hwaccel_put_tag(skb,
+ desc->err_vlan & RXD_VLAN_MASK);
-#if TG3_VLAN_TAG_USED
- if (hw_vlan)
- vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
- else
-#endif
- napi_gro_receive(&tnapi->napi, skb);
+ napi_gro_receive(&tnapi->napi, skb);
received++;
budget--;
@@ -5744,11 +5739,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
base_flags |= TXD_FLAG_TCPUDP_CSUM;
}
-#if TG3_VLAN_TAG_USED
if (vlan_tx_tag_present(skb))
base_flags |= (TXD_FLAG_VLAN |
(vlan_tx_tag_get(skb) << 16));
-#endif
len = skb_headlen(skb);
@@ -5763,7 +5756,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
- !mss && skb->len > ETH_DATA_LEN)
+ !mss && skb->len > VLAN_ETH_FRAME_LEN)
base_flags |= TXD_FLAG_JMB_PKT;
tg3_set_txd(tnapi, entry, mapping, len, base_flags,
@@ -5990,14 +5983,13 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
}
}
}
-#if TG3_VLAN_TAG_USED
+
if (vlan_tx_tag_present(skb))
base_flags |= (TXD_FLAG_VLAN |
(vlan_tx_tag_get(skb) << 16));
-#endif
if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
- !mss && skb->len > ETH_DATA_LEN)
+ !mss && skb->len > VLAN_ETH_FRAME_LEN)
base_flags |= TXD_FLAG_JMB_PKT;
len = skb_headlen(skb);
@@ -6339,13 +6331,13 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
kfree(tpr->rx_jmb_buffers);
tpr->rx_jmb_buffers = NULL;
if (tpr->rx_std) {
- pci_free_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp),
- tpr->rx_std, tpr->rx_std_mapping);
+ dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
+ tpr->rx_std, tpr->rx_std_mapping);
tpr->rx_std = NULL;
}
if (tpr->rx_jmb) {
- pci_free_consistent(tp->pdev, TG3_RX_JMB_RING_BYTES(tp),
- tpr->rx_jmb, tpr->rx_jmb_mapping);
+ dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
+ tpr->rx_jmb, tpr->rx_jmb_mapping);
tpr->rx_jmb = NULL;
}
}
@@ -6358,8 +6350,10 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
if (!tpr->rx_std_buffers)
return -ENOMEM;
- tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp),
- &tpr->rx_std_mapping);
+ tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_RX_STD_RING_BYTES(tp),
+ &tpr->rx_std_mapping,
+ GFP_KERNEL);
if (!tpr->rx_std)
goto err_out;
@@ -6370,9 +6364,10 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
if (!tpr->rx_jmb_buffers)
goto err_out;
- tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
- TG3_RX_JMB_RING_BYTES(tp),
- &tpr->rx_jmb_mapping);
+ tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_RX_JMB_RING_BYTES(tp),
+ &tpr->rx_jmb_mapping,
+ GFP_KERNEL);
if (!tpr->rx_jmb)
goto err_out;
}
@@ -6491,7 +6486,7 @@ static void tg3_free_consistent(struct tg3 *tp)
struct tg3_napi *tnapi = &tp->napi[i];
if (tnapi->tx_ring) {
- pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
+ dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
tnapi->tx_ring, tnapi->tx_desc_mapping);
tnapi->tx_ring = NULL;
}
@@ -6500,25 +6495,26 @@ static void tg3_free_consistent(struct tg3 *tp)
tnapi->tx_buffers = NULL;
if (tnapi->rx_rcb) {
- pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
- tnapi->rx_rcb,
- tnapi->rx_rcb_mapping);
+ dma_free_coherent(&tp->pdev->dev,
+ TG3_RX_RCB_RING_BYTES(tp),
+ tnapi->rx_rcb,
+ tnapi->rx_rcb_mapping);
tnapi->rx_rcb = NULL;
}
tg3_rx_prodring_fini(tp, &tnapi->prodring);
if (tnapi->hw_status) {
- pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
- tnapi->hw_status,
- tnapi->status_mapping);
+ dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
+ tnapi->hw_status,
+ tnapi->status_mapping);
tnapi->hw_status = NULL;
}
}
if (tp->hw_stats) {
- pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
- tp->hw_stats, tp->stats_mapping);
+ dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
+ tp->hw_stats, tp->stats_mapping);
tp->hw_stats = NULL;
}
}
@@ -6531,9 +6527,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
{
int i;
- tp->hw_stats = pci_alloc_consistent(tp->pdev,
- sizeof(struct tg3_hw_stats),
- &tp->stats_mapping);
+ tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
+ sizeof(struct tg3_hw_stats),
+ &tp->stats_mapping,
+ GFP_KERNEL);
if (!tp->hw_stats)
goto err_out;
@@ -6543,9 +6540,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
struct tg3_napi *tnapi = &tp->napi[i];
struct tg3_hw_status *sblk;
- tnapi->hw_status = pci_alloc_consistent(tp->pdev,
- TG3_HW_STATUS_SIZE,
- &tnapi->status_mapping);
+ tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_HW_STATUS_SIZE,
+ &tnapi->status_mapping,
+ GFP_KERNEL);
if (!tnapi->hw_status)
goto err_out;
@@ -6566,9 +6564,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
if (!tnapi->tx_buffers)
goto err_out;
- tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
- TG3_TX_RING_BYTES,
- &tnapi->tx_desc_mapping);
+ tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_TX_RING_BYTES,
+ &tnapi->tx_desc_mapping,
+ GFP_KERNEL);
if (!tnapi->tx_ring)
goto err_out;
}
@@ -6601,9 +6600,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
continue;
- tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
- TG3_RX_RCB_RING_BYTES(tp),
- &tnapi->rx_rcb_mapping);
+ tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
+ TG3_RX_RCB_RING_BYTES(tp),
+ &tnapi->rx_rcb_mapping,
+ GFP_KERNEL);
if (!tnapi->rx_rcb)
goto err_out;
@@ -6987,7 +6987,7 @@ static void tg3_restore_pci_state(struct tg3 *tp)
if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
- pcie_set_readrq(tp->pdev, 4096);
+ pcie_set_readrq(tp->pdev, tp->pcie_readrq);
else {
pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
tp->pci_cacheline_sz);
@@ -7181,7 +7181,7 @@ static int tg3_chip_reset(struct tg3 *tp)
tp->pcie_cap + PCI_EXP_DEVCTL,
val16);
- pcie_set_readrq(tp->pdev, 4096);
+ pcie_set_readrq(tp->pdev, tp->pcie_readrq);
/* Clear error status */
pci_write_config_word(tp->pdev,
@@ -7222,19 +7222,21 @@ static int tg3_chip_reset(struct tg3 *tp)
tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
}
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+ tp->mac_mode = MAC_MODE_APE_TX_EN |
+ MAC_MODE_APE_RX_EN |
+ MAC_MODE_TDE_ENABLE;
+
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
- tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
- tw32_f(MAC_MODE, tp->mac_mode);
+ tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
+ val = tp->mac_mode;
} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
- tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
- tw32_f(MAC_MODE, tp->mac_mode);
- } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
- tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
- if (tp->mac_mode & MAC_MODE_APE_TX_EN)
- tp->mac_mode |= MAC_MODE_TDE_ENABLE;
- tw32_f(MAC_MODE, tp->mac_mode);
+ tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ val = tp->mac_mode;
} else
- tw32_f(MAC_MODE, 0);
+ val = 0;
+
+ tw32_f(MAC_MODE, val);
udelay(40);
tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
@@ -7801,6 +7803,37 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
tg3_abort_hw(tp, 1);
+ /* Enable MAC control of LPI */
+ if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
+ tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
+ TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
+ TG3_CPMU_EEE_LNKIDL_UART_IDL);
+
+ tw32_f(TG3_CPMU_EEE_CTRL,
+ TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
+
+ val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
+ TG3_CPMU_EEEMD_LPI_IN_TX |
+ TG3_CPMU_EEEMD_LPI_IN_RX |
+ TG3_CPMU_EEEMD_EEE_ENABLE;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
+ val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
+
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+ val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
+
+ tw32_f(TG3_CPMU_EEE_MODE, val);
+
+ tw32_f(TG3_CPMU_EEE_DBTMR1,
+ TG3_CPMU_DBTMR1_PCIEXIT_2047US |
+ TG3_CPMU_DBTMR1_LNKIDLE_2047US);
+
+ tw32_f(TG3_CPMU_EEE_DBTMR2,
+ TG3_CPMU_DBTMR2_APE_TX_2047US |
+ TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
+ }
+
if (reset_phy)
tg3_phy_reset(tp);
@@ -7860,18 +7893,21 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(GRC_MODE, grc_mode);
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
- u32 grc_mode = tr32(GRC_MODE);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+ if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ u32 grc_mode = tr32(GRC_MODE);
- /* Access the lower 1K of PL PCIE block registers. */
- val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
- tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
+ /* Access the lower 1K of PL PCIE block registers. */
+ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+ tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
- val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5);
- tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
- val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
+ val = tr32(TG3_PCIE_TLDLPL_PORT +
+ TG3_PCIE_PL_LO_PHYCTL5);
+ tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
+ val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
- tw32(GRC_MODE, grc_mode);
+ tw32(GRC_MODE, grc_mode);
+ }
val = tr32(TG3_CPMU_LSPD_10MB_CLK);
val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
@@ -7879,22 +7915,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(TG3_CPMU_LSPD_10MB_CLK, val);
}
- /* Enable MAC control of LPI */
- if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
- tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
- TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
- TG3_CPMU_EEE_LNKIDL_UART_IDL);
-
- tw32_f(TG3_CPMU_EEE_CTRL,
- TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
-
- tw32_f(TG3_CPMU_EEE_MODE,
- TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
- TG3_CPMU_EEEMD_LPI_IN_TX |
- TG3_CPMU_EEEMD_LPI_IN_RX |
- TG3_CPMU_EEEMD_EEE_ENABLE);
- }
-
/* This works around an issue with Athlon chipsets on
* B3 tigon3 silicon. This bit has no effect on any
* other revision. But do not set this on PCI Express
@@ -8084,8 +8104,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Program the jumbo buffer descriptor ring control
* blocks on those devices that have them.
*/
- if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
- !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
+ !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))) {
/* Setup replenish threshold. */
tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
@@ -8162,8 +8183,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
RDMAC_MODE_LNGREAD_ENAB);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
@@ -8173,10 +8193,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
- /* If statement applies to 5705 and 5750 PCI devices only */
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
@@ -8203,6 +8221,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
val = tr32(TG3_RDMA_RSRVCTRL_REG);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+ val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
+ TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
+ TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
+ val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
+ TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
+ TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
+ }
tw32(TG3_RDMA_RSRVCTRL_REG,
val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
}
@@ -8280,7 +8306,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
}
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
- tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
+ tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
else
tp->mac_mode = 0;
tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
@@ -8323,7 +8349,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
udelay(100);
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
+ if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
+ tp->irq_cnt > 1) {
val = tr32(MSGINT_MODE);
val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
tw32(MSGINT_MODE, val);
@@ -8340,17 +8367,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
WDMAC_MODE_LNGREAD_ENAB);
- /* If statement applies to 5705 and 5750 PCI devices only */
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
(tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
/* nothing */
} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
- !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
- !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
+ !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
val |= WDMAC_MODE_RX_ACCEL;
}
}
@@ -9031,8 +9055,14 @@ static bool tg3_enable_msix(struct tg3 *tp)
pci_disable_msix(tp->pdev);
return false;
}
- if (tp->irq_cnt > 1)
+
+ if (tp->irq_cnt > 1) {
tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+ tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
+ netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
+ }
+ }
return true;
}
@@ -9057,7 +9087,8 @@ static void tg3_ints_init(struct tg3 *tp)
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
u32 msi_mode = tr32(MSGINT_MODE);
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+ if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
+ tp->irq_cnt > 1)
msi_mode |= MSGINT_MODE_MULTIVEC_EN;
tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
}
@@ -9101,7 +9132,7 @@ static int tg3_open(struct net_device *dev)
netif_carrier_off(tp->dev);
- err = tg3_set_power_state(tp, PCI_D0);
+ err = tg3_power_up(tp);
if (err)
return err;
@@ -9266,7 +9297,7 @@ static int tg3_close(struct net_device *dev)
tg3_free_consistent(tp);
- tg3_set_power_state(tp, PCI_D3hot);
+ tg3_power_down(tp);
netif_carrier_off(tp->dev);
@@ -9499,17 +9530,10 @@ static void __tg3_set_rx_mode(struct net_device *dev)
rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
RX_MODE_KEEP_VLAN_TAG);
+#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
* flag clear.
*/
-#if TG3_VLAN_TAG_USED
- if (!tp->vlgrp &&
- !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
- rx_mode |= RX_MODE_KEEP_VLAN_TAG;
-#else
- /* By definition, VLAN is disabled always in this
- * case.
- */
if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
rx_mode |= RX_MODE_KEEP_VLAN_TAG;
#endif
@@ -10459,16 +10483,53 @@ static int tg3_test_nvram(struct tg3 *tp)
goto out;
}
+ err = -EIO;
+
/* Bootstrap checksum at offset 0x10 */
csum = calc_crc((unsigned char *) buf, 0x10);
- if (csum != be32_to_cpu(buf[0x10/4]))
+ if (csum != le32_to_cpu(buf[0x10/4]))
goto out;
/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
- if (csum != be32_to_cpu(buf[0xfc/4]))
+ if (csum != le32_to_cpu(buf[0xfc/4]))
goto out;
+ for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
+ /* The data is in little-endian format in NVRAM.
+ * Use the big-endian read routines to preserve
+ * the byte order as it exists in NVRAM.
+ */
+ if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &buf[i/4]))
+ goto out;
+ }
+
+ i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
+ PCI_VPD_LRDT_RO_DATA);
+ if (i > 0) {
+ j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
+ if (j < 0)
+ goto out;
+
+ if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
+ goto out;
+
+ i += PCI_VPD_LRDT_TAG_SIZE;
+ j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
+ PCI_VPD_RO_KEYWORD_CHKSUM);
+ if (j > 0) {
+ u8 csum8 = 0;
+
+ j += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+ for (i = 0; i <= j; i++)
+ csum8 += ((u8 *)buf)[i];
+
+ if (csum8)
+ goto out;
+ }
+ }
+
err = 0;
out:
@@ -10840,13 +10901,16 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
if (loopback_mode == TG3_MAC_LOOPBACK) {
/* HW errata - mac loopback fails in some cases on 5780.
* Normal traffic and PHY loopback are not affected by
- * errata.
+ * errata. Also, the MAC loopback test is deprecated for
+ * all newer ASIC revisions.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
+ (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
return 0;
- mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
- MAC_MODE_PORT_INT_LPBACK;
+ mac_mode = tp->mac_mode &
+ ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
+ mac_mode |= MAC_MODE_PORT_INT_LPBACK;
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
mac_mode |= MAC_MODE_LINK_POLARITY;
if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
@@ -10868,7 +10932,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
tg3_writephy(tp, MII_BMCR, val);
udelay(40);
- mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
+ mac_mode = tp->mac_mode &
+ ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
tg3_writephy(tp, MII_TG3_FET_PTEST,
MII_TG3_FET_PTEST_FRC_TX_LINK |
@@ -10896,6 +10961,13 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
MII_TG3_EXT_CTRL_LNK3_LED_MODE);
}
tw32(MAC_MODE, mac_mode);
+
+ /* Wait for link */
+ for (i = 0; i < 100; i++) {
+ if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+ break;
+ mdelay(1);
+ }
} else {
return -EINVAL;
}
@@ -11002,14 +11074,19 @@ out:
static int tg3_test_loopback(struct tg3 *tp)
{
int err = 0;
- u32 cpmuctrl = 0;
+ u32 eee_cap, cpmuctrl = 0;
if (!netif_running(tp->dev))
return TG3_LOOPBACK_FAILED;
+ eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
+ tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
+
err = tg3_reset_hw(tp, 1);
- if (err)
- return TG3_LOOPBACK_FAILED;
+ if (err) {
+ err = TG3_LOOPBACK_FAILED;
+ goto done;
+ }
/* Turn off gphy autopowerdown. */
if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
@@ -11029,8 +11106,10 @@ static int tg3_test_loopback(struct tg3 *tp)
udelay(10);
}
- if (status != CPMU_MUTEX_GNT_DRIVER)
- return TG3_LOOPBACK_FAILED;
+ if (status != CPMU_MUTEX_GNT_DRIVER) {
+ err = TG3_LOOPBACK_FAILED;
+ goto done;
+ }
/* Turn off link-based power management. */
cpmuctrl = tr32(TG3_CPMU_CTRL);
@@ -11059,6 +11138,9 @@ static int tg3_test_loopback(struct tg3 *tp)
if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
tg3_phy_toggle_apd(tp, true);
+done:
+ tp->phy_flags |= eee_cap;
+
return err;
}
@@ -11068,7 +11150,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
struct tg3 *tp = netdev_priv(dev);
if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
- tg3_set_power_state(tp, PCI_D0);
+ tg3_power_up(tp);
memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
@@ -11136,7 +11218,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
tg3_phy_start(tp);
}
if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
- tg3_set_power_state(tp, PCI_D3hot);
+ tg3_power_down(tp);
}
@@ -11165,7 +11247,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
break; /* We have no PHY */
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+ if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
+ ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+ !netif_running(dev)))
return -EAGAIN;
spin_lock_bh(&tp->lock);
@@ -11181,7 +11265,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
break; /* We have no PHY */
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+ if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
+ ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+ !netif_running(dev)))
return -EAGAIN;
spin_lock_bh(&tp->lock);
@@ -11197,31 +11283,6 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EOPNOTSUPP;
}
-#if TG3_VLAN_TAG_USED
-static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
- struct tg3 *tp = netdev_priv(dev);
-
- if (!netif_running(dev)) {
- tp->vlgrp = grp;
- return;
- }
-
- tg3_netif_stop(tp);
-
- tg3_full_lock(tp, 0);
-
- tp->vlgrp = grp;
-
- /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
- __tg3_set_rx_mode(dev);
-
- tg3_netif_start(tp);
-
- tg3_full_unlock(tp);
-}
-#endif
-
static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
{
struct tg3 *tp = netdev_priv(dev);
@@ -12411,8 +12472,9 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
if (cfg2 & (1 << 18))
tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
- if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
+ if (((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) ||
+ ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))) &&
(cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
@@ -12434,9 +12496,11 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
}
done:
- device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
- device_set_wakeup_enable(&tp->pdev->dev,
+ if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
+ device_set_wakeup_enable(&tp->pdev->dev,
tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
+ else
+ device_set_wakeup_capable(&tp->pdev->dev, false);
}
static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
@@ -12488,12 +12552,45 @@ static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
}
+static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
+{
+ u32 adv = ADVERTISED_Autoneg |
+ ADVERTISED_Pause;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
+ adv |= ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full;
+
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
+ adv |= ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_TP;
+ else
+ adv |= ADVERTISED_FIBRE;
+
+ tp->link_config.advertising = adv;
+ tp->link_config.speed = SPEED_INVALID;
+ tp->link_config.duplex = DUPLEX_INVALID;
+ tp->link_config.autoneg = AUTONEG_ENABLE;
+ tp->link_config.active_speed = SPEED_INVALID;
+ tp->link_config.active_duplex = DUPLEX_INVALID;
+ tp->link_config.orig_speed = SPEED_INVALID;
+ tp->link_config.orig_duplex = DUPLEX_INVALID;
+ tp->link_config.orig_autoneg = AUTONEG_INVALID;
+}
+
static int __devinit tg3_phy_probe(struct tg3 *tp)
{
u32 hw_phy_id_1, hw_phy_id_2;
u32 hw_phy_id, hw_phy_id_masked;
int err;
+ /* flow control autonegotiation is default behavior */
+ tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
+ tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
+
if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
return tg3_phy_init(tp);
@@ -12548,11 +12645,15 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
}
}
- if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
- tp->pci_chip_rev_id != CHIPREV_ID_57765_A0))
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
+ ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
+ tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
+ tg3_phy_init_link_config(tp);
+
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
@@ -12608,17 +12709,6 @@ skip_phy_reset:
err = tg3_init_5401phy_dsp(tp);
}
- if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
- tp->link_config.advertising =
- (ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_Autoneg |
- ADVERTISED_FIBRE);
- if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
- tp->link_config.advertising &=
- ~(ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full);
-
return err;
}
@@ -12658,7 +12748,7 @@ static void __devinit tg3_read_vpd(struct tg3 *tp)
cnt = pci_read_vpd(tp->pdev, pos,
TG3_NVM_VPD_LEN - pos,
&vpd_data[pos]);
- if (cnt == -ETIMEDOUT || -EINTR)
+ if (cnt == -ETIMEDOUT || cnt == -EINTR)
cnt = 0;
else if (cnt < 0)
goto out_not_found;
@@ -13030,9 +13120,7 @@ static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
{
-#if TG3_VLAN_TAG_USED
dev->vlan_features |= flags;
-#endif
}
static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
@@ -13047,17 +13135,15 @@ static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
return 512;
}
+static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
+ { },
+};
+
static int __devinit tg3_get_invariants(struct tg3 *tp)
{
- static struct pci_device_id write_reorder_chipsets[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_AMD,
- PCI_DEVICE_ID_AMD_FE_GATE_700C) },
- { PCI_DEVICE(PCI_VENDOR_ID_AMD,
- PCI_DEVICE_ID_AMD_8131_BRIDGE) },
- { PCI_DEVICE(PCI_VENDOR_ID_VIA,
- PCI_DEVICE_ID_VIA_8385_0) },
- { },
- };
u32 misc_ctrl_reg;
u32 pci_state_reg, grc_misc_cfg;
u32 val;
@@ -13291,7 +13377,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
}
/* Determine TSO capabilities */
- if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ ; /* Do nothing. HW bug. */
+ else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
@@ -13342,7 +13430,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
}
- if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
+ if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
@@ -13359,7 +13448,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
- pcie_set_readrq(tp->pdev, 4096);
+ tp->pcie_readrq = 4096;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ tp->pcie_readrq = 2048;
+
+ pcie_set_readrq(tp->pdev, tp->pcie_readrq);
pci_read_config_word(tp->pdev,
tp->pcie_cap + PCI_EXP_LNKCTL,
@@ -13396,7 +13489,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
* every mailbox register write to force the writes to be
* posted to the chip in order.
*/
- if (pci_dev_present(write_reorder_chipsets) &&
+ if (pci_dev_present(tg3_write_reorder_chipsets) &&
!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
@@ -13546,7 +13639,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
(tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
- /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
+ /* Set up tp->grc_local_ctrl before calling tg_power_up().
* GPIO1 driven high will bring 5700's external PHY out of reset.
* It is also used as eeprom write protect on LOMs.
*/
@@ -13577,7 +13670,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
}
/* Force the chip into D0. */
- err = tg3_set_power_state(tp, PCI_D0);
+ err = tg3_power_up(tp);
if (err) {
dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
return err;
@@ -13722,8 +13815,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
/* Preserve the APE MAC_MODE bits */
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
- tp->mac_mode = tr32(MAC_MODE) |
- MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
+ tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
else
tp->mac_mode = TG3_DEF_MAC_MODE;
@@ -13790,11 +13882,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
else
tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
- tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM;
+ tp->rx_offset = NET_IP_ALIGN;
tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
(tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
- tp->rx_offset -= NET_IP_ALIGN;
+ tp->rx_offset = 0;
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
tp->rx_copy_thresh = ~(u16)0;
#endif
@@ -14153,13 +14245,19 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
#define TEST_BUFFER_SIZE 0x2000
+static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
+ { },
+};
+
static int __devinit tg3_test_dma(struct tg3 *tp)
{
dma_addr_t buf_dma;
u32 *buf, saved_dma_rwctrl;
int ret = 0;
- buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
+ buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
+ &buf_dma, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto out_nofree;
@@ -14321,17 +14419,12 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
}
if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
DMA_RWCTRL_WRITE_BNDRY_16) {
- static struct pci_device_id dma_wait_state_chipsets[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
- PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
- { },
- };
/* DMA test passed without adjusting DMA boundary,
* now look for chipsets that are known to expose the
* DMA bug without failing the test.
*/
- if (pci_dev_present(dma_wait_state_chipsets)) {
+ if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
} else {
@@ -14343,28 +14436,11 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
}
out:
- pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
+ dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
out_nofree:
return ret;
}
-static void __devinit tg3_init_link_config(struct tg3 *tp)
-{
- tp->link_config.advertising =
- (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
- ADVERTISED_Autoneg | ADVERTISED_MII);
- tp->link_config.speed = SPEED_INVALID;
- tp->link_config.duplex = DUPLEX_INVALID;
- tp->link_config.autoneg = AUTONEG_ENABLE;
- tp->link_config.active_speed = SPEED_INVALID;
- tp->link_config.active_duplex = DUPLEX_INVALID;
- tp->link_config.orig_speed = SPEED_INVALID;
- tp->link_config.orig_duplex = DUPLEX_INVALID;
- tp->link_config.orig_autoneg = AUTONEG_INVALID;
-}
-
static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
{
if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
@@ -14557,9 +14633,6 @@ static const struct net_device_ops tg3_netdev_ops = {
.ndo_do_ioctl = tg3_ioctl,
.ndo_tx_timeout = tg3_tx_timeout,
.ndo_change_mtu = tg3_change_mtu,
-#if TG3_VLAN_TAG_USED
- .ndo_vlan_rx_register = tg3_vlan_rx_register,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tg3_poll_controller,
#endif
@@ -14576,9 +14649,6 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = {
.ndo_do_ioctl = tg3_ioctl,
.ndo_tx_timeout = tg3_tx_timeout,
.ndo_change_mtu = tg3_change_mtu,
-#if TG3_VLAN_TAG_USED
- .ndo_vlan_rx_register = tg3_vlan_rx_register,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tg3_poll_controller,
#endif
@@ -14628,9 +14698,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
-#if TG3_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-#endif
tp = netdev_priv(dev);
tp->pdev = pdev;
@@ -14676,8 +14744,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
goto err_out_free_dev;
}
- tg3_init_link_config(tp);
-
tp->rx_pending = TG3_DEF_RX_RING_PENDING;
tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
@@ -14825,10 +14891,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
goto err_out_apeunmap;
}
- /* flow control autonegotiation is default behavior */
- tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
- tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
-
intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
@@ -14957,7 +15019,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
if (tp->fw)
release_firmware(tp->fw);
- flush_scheduled_work();
+ cancel_work_sync(&tp->reset_task);
if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
tg3_phy_fini(tp);
@@ -14980,23 +15042,18 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
}
}
-static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int tg3_suspend(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct tg3 *tp = netdev_priv(dev);
- pci_power_t target_state;
int err;
- /* PCI register 4 needs to be saved whether netif_running() or not.
- * MSI address and data need to be saved if using MSI and
- * netif_running().
- */
- pci_save_state(pdev);
-
if (!netif_running(dev))
return 0;
- flush_scheduled_work();
+ flush_work_sync(&tp->reset_task);
tg3_phy_stop(tp);
tg3_netif_stop(tp);
@@ -15013,9 +15070,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
tg3_full_unlock(tp);
- target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
-
- err = tg3_set_power_state(tp, target_state);
+ err = tg3_power_down_prepare(tp);
if (err) {
int err2;
@@ -15042,21 +15097,16 @@ out:
return err;
}
-static int tg3_resume(struct pci_dev *pdev)
+static int tg3_resume(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct tg3 *tp = netdev_priv(dev);
int err;
- pci_restore_state(tp->pdev);
-
if (!netif_running(dev))
return 0;
- err = tg3_set_power_state(tp, PCI_D0);
- if (err)
- return err;
-
netif_device_attach(dev);
tg3_full_lock(tp, 0);
@@ -15080,13 +15130,21 @@ out:
return err;
}
+static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
+#define TG3_PM_OPS (&tg3_pm_ops)
+
+#else
+
+#define TG3_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
static struct pci_driver tg3_driver = {
.name = DRV_MODULE_NAME,
.id_table = tg3_pci_tbl,
.probe = tg3_init_one,
.remove = __devexit_p(tg3_remove_one),
- .suspend = tg3_suspend,
- .resume = tg3_resume
+ .driver.pm = TG3_PM_OPS,
};
static int __init tg3_init(void)
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 4a1974804b9f..73884b69b749 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2007-2010 Broadcom Corporation.
+ * Copyright (C) 2007-2011 Broadcom Corporation.
*/
#ifndef _T3_H
@@ -141,6 +141,7 @@
#define CHIPREV_ID_57780_A1 0x57780001
#define CHIPREV_ID_5717_A0 0x05717000
#define CHIPREV_ID_57765_A0 0x57785000
+#define CHIPREV_ID_5719_A0 0x05719000
#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
#define ASIC_REV_5700 0x07
#define ASIC_REV_5701 0x00
@@ -1094,13 +1095,19 @@
/* 0x3664 --> 0x36b0 unused */
#define TG3_CPMU_EEE_MODE 0x000036b0
-#define TG3_CPMU_EEEMD_ERLY_L1_XIT_DET 0x00000008
-#define TG3_CPMU_EEEMD_LPI_ENABLE 0x00000080
-#define TG3_CPMU_EEEMD_LPI_IN_TX 0x00000100
-#define TG3_CPMU_EEEMD_LPI_IN_RX 0x00000200
-#define TG3_CPMU_EEEMD_EEE_ENABLE 0x00100000
-/* 0x36b4 --> 0x36b8 unused */
-
+#define TG3_CPMU_EEEMD_APE_TX_DET_EN 0x00000004
+#define TG3_CPMU_EEEMD_ERLY_L1_XIT_DET 0x00000008
+#define TG3_CPMU_EEEMD_SND_IDX_DET_EN 0x00000040
+#define TG3_CPMU_EEEMD_LPI_ENABLE 0x00000080
+#define TG3_CPMU_EEEMD_LPI_IN_TX 0x00000100
+#define TG3_CPMU_EEEMD_LPI_IN_RX 0x00000200
+#define TG3_CPMU_EEEMD_EEE_ENABLE 0x00100000
+#define TG3_CPMU_EEE_DBTMR1 0x000036b4
+#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000
+#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000070ff
+#define TG3_CPMU_EEE_DBTMR2 0x000036b8
+#define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000
+#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000070ff
#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc
#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000
#define TG3_CPMU_EEE_LNKIDL_UART_IDL 0x00000004
@@ -1327,6 +1334,12 @@
#define TG3_RDMA_RSRVCTRL_REG 0x00004900
#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004
+#define TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K 0x00000c00
+#define TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK 0x00000ff0
+#define TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K 0x000c0000
+#define TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK 0x000ff000
+#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000
+#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000
/* 0x4904 --> 0x4910 unused */
#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910
@@ -2100,6 +2113,10 @@
#define MII_TG3_DSP_TAP1 0x0001
#define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007
+#define MII_TG3_DSP_TAP26 0x001a
+#define MII_TG3_DSP_TAP26_ALNOKO 0x0001
+#define MII_TG3_DSP_TAP26_RMRXSTO 0x0002
+#define MII_TG3_DSP_TAP26_OPCSINPT 0x0004
#define MII_TG3_DSP_AADJ1CH0 0x001f
#define MII_TG3_DSP_CH34TP2 0x4022
#define MII_TG3_DSP_CH34TP2_HIBW01 0x0010
@@ -2170,9 +2187,6 @@
#define MII_TG3_TEST1_CRC_EN 0x8000
/* Clause 45 expansion registers */
-#define TG3_CL45_D7_EEEADV_CAP 0x003c
-#define TG3_CL45_D7_EEEADV_CAP_100TX 0x0002
-#define TG3_CL45_D7_EEEADV_CAP_1000T 0x0004
#define TG3_CL45_D7_EEERES_STAT 0x803e
#define TG3_CL45_D7_EEERES_STAT_LP_100TX 0x0002
#define TG3_CL45_D7_EEERES_STAT_LP_1000T 0x0004
@@ -2562,10 +2576,6 @@ struct ring_info {
DEFINE_DMA_UNMAP_ADDR(mapping);
};
-struct tg3_config_info {
- u32 flags;
-};
-
struct tg3_link_config {
/* Describes what we're trying to get. */
u32 advertising;
@@ -2713,17 +2723,17 @@ struct tg3_napi {
u32 last_irq_tag;
u32 int_mbox;
u32 coal_now;
- u32 tx_prod;
- u32 tx_cons;
- u32 tx_pending;
- u32 prodmbox;
- u32 consmbox;
+ u32 consmbox ____cacheline_aligned;
u32 rx_rcb_ptr;
u16 *rx_rcb_prod_idx;
struct tg3_rx_prodring_set prodring;
-
struct tg3_rx_buffer_desc *rx_rcb;
+
+ u32 tx_prod ____cacheline_aligned;
+ u32 tx_cons;
+ u32 tx_pending;
+ u32 prodmbox;
struct tg3_tx_buffer_desc *tx_ring;
struct ring_info *tx_buffers;
@@ -2807,9 +2817,6 @@ struct tg3 {
u32 rx_std_max_post;
u32 rx_offset;
u32 rx_pkt_map_sz;
-#if TG3_VLAN_TAG_USED
- struct vlan_group *vlgrp;
-#endif
/* begin "everything else" cacheline(s) section */
@@ -2946,6 +2953,7 @@ struct tg3 {
int pcix_cap;
int pcie_cap;
};
+ int pcie_readrq;
struct mii_bus *mdio_bus;
int mdio_irq[PHY_MAX_ADDR];
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c
index 0e6bac5ec65b..0825db6d883f 100644
--- a/drivers/net/tile/tilepro.c
+++ b/drivers/net/tile/tilepro.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -44,10 +44,6 @@
#include <linux/tcp.h>
-/* There is no singlethread_cpu, so schedule work on the current cpu. */
-#define singlethread_cpu -1
-
-
/*
* First, "tile_net_init_module()" initializes all four "devices" which
* can be used by linux.
@@ -73,15 +69,16 @@
* return, knowing we will be called again later. Otherwise, we
* reenable the ingress interrupt, and call "napi_complete()".
*
+ * HACK: Since disabling the ingress interrupt is not reliable, we
+ * ignore the interrupt if the global "active" flag is false.
+ *
*
* NOTE: The use of "native_driver" ensures that EPP exists, and that
- * "epp_sendv" is legal, and that "LIPP" is being used.
+ * we are using "LIPP" and "LEPP".
*
* NOTE: Failing to free completions for an arbitrarily long time
* (which is defined to be illegal) does in fact cause bizarre
* problems. The "egress_timer" helps prevent this from happening.
- *
- * NOTE: The egress code can be interrupted by the interrupt handler.
*/
@@ -143,13 +140,6 @@ MODULE_AUTHOR("Tilera");
MODULE_LICENSE("GPL");
-#define IS_MULTICAST(mac_addr) \
- (((u8 *)(mac_addr))[0] & 0x01)
-
-#define IS_BROADCAST(mac_addr) \
- (((u16 *)(mac_addr))[0] == 0xffff)
-
-
/*
* Queue of incoming packets for a specific cpu and device.
*
@@ -185,7 +175,7 @@ struct tile_net_cpu {
struct tile_netio_queue queue;
/* Statistics. */
struct tile_net_stats_t stats;
- /* ISSUE: Is this needed? */
+ /* True iff NAPI is enabled. */
bool napi_enabled;
/* True if this tile has succcessfully registered with the IPP. */
bool registered;
@@ -208,20 +198,20 @@ struct tile_net_cpu {
struct tile_net_priv {
/* Our network device. */
struct net_device *dev;
- /* The actual egress queue. */
- lepp_queue_t *epp_queue;
- /* Protects "epp_queue->cmd_tail" and "epp_queue->comp_tail" */
- spinlock_t cmd_lock;
- /* Protects "epp_queue->comp_head". */
- spinlock_t comp_lock;
+ /* Pages making up the egress queue. */
+ struct page *eq_pages;
+ /* Address of the actual egress queue. */
+ lepp_queue_t *eq;
+ /* Protects "eq". */
+ spinlock_t eq_lock;
/* The hypervisor handle for this interface. */
int hv_devhdl;
/* The intr bit mask that IDs this device. */
u32 intr_id;
/* True iff "tile_net_open_aux()" has succeeded. */
- int partly_opened;
- /* True iff "tile_net_open_inner()" has succeeded. */
- int fully_opened;
+ bool partly_opened;
+ /* True iff the device is "active". */
+ bool active;
/* Effective network cpus. */
struct cpumask network_cpus_map;
/* Number of network cpus. */
@@ -236,6 +226,10 @@ struct tile_net_priv {
struct tile_net_cpu *cpu[NR_CPUS];
};
+/* Log2 of the number of small pages needed for the egress queue. */
+#define EQ_ORDER get_order(sizeof(lepp_queue_t))
+/* Size of the egress queue's pages. */
+#define EQ_SIZE (1 << (PAGE_SHIFT + EQ_ORDER))
/*
* The actual devices (xgbe0, xgbe1, gbe0, gbe1).
@@ -292,7 +286,11 @@ static void net_printk(char *fmt, ...)
*/
static void dump_packet(unsigned char *data, unsigned long length, char *s)
{
+ int my_cpu = smp_processor_id();
+
unsigned long i;
+ char buf[128];
+
static unsigned int count;
pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n",
@@ -302,10 +300,12 @@ static void dump_packet(unsigned char *data, unsigned long length, char *s)
for (i = 0; i < length; i++) {
if ((i & 0xf) == 0)
- sprintf(buf, "%8.8lx:", i);
+ sprintf(buf, "[%02d] %8.8lx:", my_cpu, i);
sprintf(buf + strlen(buf), " %2.2x", data[i]);
- if ((i & 0xf) == 0xf || i == length - 1)
- pr_info("%s\n", buf);
+ if ((i & 0xf) == 0xf || i == length - 1) {
+ strcat(buf, "\n");
+ pr_info("%s", buf);
+ }
}
}
#endif
@@ -359,60 +359,109 @@ static void tile_net_provide_linux_buffer(struct tile_net_cpu *info,
/*
* Provide a linux buffer for LIPP.
+ *
+ * Note that the ACTUAL allocation for each buffer is a "struct sk_buff",
+ * plus a chunk of memory that includes not only the requested bytes, but
+ * also NET_SKB_PAD bytes of initial padding, and a "struct skb_shared_info".
+ *
+ * Note that "struct skb_shared_info" is 88 bytes with 64K pages and
+ * 268 bytes with 4K pages (since the frags[] array needs 18 entries).
+ *
+ * Without jumbo packets, the maximum packet size will be 1536 bytes,
+ * and we use 2 bytes (NET_IP_ALIGN) of padding. ISSUE: If we told
+ * the hardware to clip at 1518 bytes instead of 1536 bytes, then we
+ * could save an entire cache line, but in practice, we don't need it.
+ *
+ * Since CPAs are 38 bits, and we can only encode the high 31 bits in
+ * a "linux_buffer_t", the low 7 bits must be zero, and thus, we must
+ * align the actual "va" mod 128.
+ *
+ * We assume that the underlying "head" will be aligned mod 64. Note
+ * that in practice, we have seen "head" NOT aligned mod 128 even when
+ * using 2048 byte allocations, which is surprising.
+ *
+ * If "head" WAS always aligned mod 128, we could change LIPP to
+ * assume that the low SIX bits are zero, and the 7th bit is one, that
+ * is, align the actual "va" mod 128 plus 64, which would be "free".
+ *
+ * For now, the actual "head" pointer points at NET_SKB_PAD bytes of
+ * padding, plus 28 or 92 bytes of extra padding, plus the sk_buff
+ * pointer, plus the NET_IP_ALIGN padding, plus 126 or 1536 bytes for
+ * the actual packet, plus 62 bytes of empty padding, plus some
+ * padding and the "struct skb_shared_info".
+ *
+ * With 64K pages, a large buffer thus needs 32+92+4+2+1536+62+88
+ * bytes, or 1816 bytes, which fits comfortably into 2048 bytes.
+ *
+ * With 64K pages, a small buffer thus needs 32+92+4+2+126+88
+ * bytes, or 344 bytes, which means we are wasting 64+ bytes, and
+ * could presumably increase the size of small buffers.
+ *
+ * With 4K pages, a large buffer thus needs 32+92+4+2+1536+62+268
+ * bytes, or 1996 bytes, which fits comfortably into 2048 bytes.
+ *
+ * With 4K pages, a small buffer thus needs 32+92+4+2+126+268
+ * bytes, or 524 bytes, which is annoyingly wasteful.
+ *
+ * Maybe we should increase LIPP_SMALL_PACKET_SIZE to 192?
+ *
+ * ISSUE: Maybe we should increase "NET_SKB_PAD" to 64?
*/
static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info,
bool small)
{
- /* ISSUE: What should we use here? */
+#if TILE_NET_MTU <= 1536
+ /* Without "jumbo", 2 + 1536 should be sufficient. */
+ unsigned int large_size = NET_IP_ALIGN + 1536;
+#else
+ /* ISSUE: This has not been tested. */
unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100;
+#endif
- /* Round up to ensure to avoid "false sharing" with last cache line. */
- unsigned int buffer_size =
+ /* Avoid "false sharing" with last cache line. */
+ /* ISSUE: This is already done by "dev_alloc_skb()". */
+ unsigned int len =
(((small ? LIPP_SMALL_PACKET_SIZE : large_size) +
CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE());
- /*
- * ISSUE: Since CPAs are 38 bits, and we can only encode the
- * high 31 bits in a "linux_buffer_t", the low 7 bits must be
- * zero, and thus, we must align the actual "va" mod 128.
- */
- const unsigned long align = 128;
+ unsigned int padding = 128 - NET_SKB_PAD;
+ unsigned int align;
struct sk_buff *skb;
void *va;
struct sk_buff **skb_ptr;
- /* Note that "dev_alloc_skb()" adds NET_SKB_PAD more bytes, */
- /* and also "reserves" that many bytes. */
- /* ISSUE: Can we "share" the NET_SKB_PAD bytes with "skb_ptr"? */
- int len = sizeof(*skb_ptr) + align + buffer_size;
-
- while (1) {
-
- /* Allocate (or fail). */
- skb = dev_alloc_skb(len);
- if (skb == NULL)
- return false;
+ /* Request 96 extra bytes for alignment purposes. */
+ skb = dev_alloc_skb(len + padding);
+ if (skb == NULL)
+ return false;
- /* Make room for a back-pointer to 'skb'. */
- skb_reserve(skb, sizeof(*skb_ptr));
+ /* Skip 32 or 96 bytes to align "data" mod 128. */
+ align = -(long)skb->data & (128 - 1);
+ BUG_ON(align > padding);
+ skb_reserve(skb, align);
- /* Make sure we are aligned. */
- skb_reserve(skb, -(long)skb->data & (align - 1));
+ /* This address is given to IPP. */
+ va = skb->data;
- /* This address is given to IPP. */
- va = skb->data;
+ /* Buffers must not span a huge page. */
+ BUG_ON(((((long)va & ~HPAGE_MASK) + len) & HPAGE_MASK) != 0);
- if (small)
- break;
-
- /* ISSUE: This has never been observed! */
- /* Large buffers must not span a huge page. */
- if (((((long)va & ~HPAGE_MASK) + 1535) & HPAGE_MASK) == 0)
- break;
- pr_err("Leaking unaligned linux buffer at %p.\n", va);
+#ifdef TILE_NET_PARANOIA
+#if CHIP_HAS_CBOX_HOME_MAP()
+ if (hash_default) {
+ HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va);
+ if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
+ panic("Non-HFH ingress buffer! VA=%p Mode=%d PTE=%llx",
+ va, hv_pte_get_mode(pte), hv_pte_val(pte));
}
+#endif
+#endif
+
+ /* Invalidate the packet buffer. */
+ if (!hash_default)
+ __inv_buffer(va, len);
/* Skip two bytes to satisfy LIPP assumptions. */
/* Note that this aligns IP on a 16 byte boundary. */
@@ -423,23 +472,9 @@ static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info,
skb_ptr = va - sizeof(*skb_ptr);
*skb_ptr = skb;
- /* Invalidate the packet buffer. */
- if (!hash_default)
- __inv_buffer(skb->data, buffer_size);
-
/* Make sure "skb_ptr" has been flushed. */
__insn_mf();
-#ifdef TILE_NET_PARANOIA
-#if CHIP_HAS_CBOX_HOME_MAP()
- if (hash_default) {
- HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va);
- if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
- panic("Non-coherent ingress buffer!");
- }
-#endif
-#endif
-
/* Provide the new buffer. */
tile_net_provide_linux_buffer(info, va, small);
@@ -477,48 +512,64 @@ oops:
* Grab some LEPP completions, and store them in "comps", of size
* "comps_size", and return the number of completions which were
* stored, so the caller can free them.
- *
- * If "pending" is not NULL, it will be set to true if there might
- * still be some pending completions caused by this tile, else false.
*/
-static unsigned int tile_net_lepp_grab_comps(struct net_device *dev,
+static unsigned int tile_net_lepp_grab_comps(lepp_queue_t *eq,
struct sk_buff *comps[],
unsigned int comps_size,
- bool *pending)
+ unsigned int min_size)
{
- struct tile_net_priv *priv = netdev_priv(dev);
-
- lepp_queue_t *eq = priv->epp_queue;
-
unsigned int n = 0;
- unsigned int comp_head;
- unsigned int comp_busy;
- unsigned int comp_tail;
-
- spin_lock(&priv->comp_lock);
-
- comp_head = eq->comp_head;
- comp_busy = eq->comp_busy;
- comp_tail = eq->comp_tail;
+ unsigned int comp_head = eq->comp_head;
+ unsigned int comp_busy = eq->comp_busy;
while (comp_head != comp_busy && n < comps_size) {
comps[n++] = eq->comps[comp_head];
LEPP_QINC(comp_head);
}
- if (pending != NULL)
- *pending = (comp_head != comp_tail);
+ if (n < min_size)
+ return 0;
eq->comp_head = comp_head;
- spin_unlock(&priv->comp_lock);
-
return n;
}
/*
+ * Free some comps, and return true iff there are still some pending.
+ */
+static bool tile_net_lepp_free_comps(struct net_device *dev, bool all)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ lepp_queue_t *eq = priv->eq;
+
+ struct sk_buff *olds[64];
+ unsigned int wanted = 64;
+ unsigned int i, n;
+ bool pending;
+
+ spin_lock(&priv->eq_lock);
+
+ if (all)
+ eq->comp_busy = eq->comp_tail;
+
+ n = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
+
+ pending = (eq->comp_head != eq->comp_tail);
+
+ spin_unlock(&priv->eq_lock);
+
+ for (i = 0; i < n; i++)
+ kfree_skb(olds[i]);
+
+ return pending;
+}
+
+
+/*
* Make sure the egress timer is scheduled.
*
* Note that we use "schedule if not scheduled" logic instead of the more
@@ -552,21 +603,11 @@ static void tile_net_handle_egress_timer(unsigned long arg)
struct tile_net_cpu *info = (struct tile_net_cpu *)arg;
struct net_device *dev = info->napi.dev;
- struct sk_buff *olds[32];
- unsigned int wanted = 32;
- unsigned int i, nolds = 0;
- bool pending;
-
/* The timer is no longer scheduled. */
info->egress_timer_scheduled = false;
- nolds = tile_net_lepp_grab_comps(dev, olds, wanted, &pending);
-
- for (i = 0; i < nolds; i++)
- kfree_skb(olds[i]);
-
- /* Reschedule timer if needed. */
- if (pending)
+ /* Free comps, and reschedule timer if more are pending. */
+ if (tile_net_lepp_free_comps(dev, false))
tile_net_schedule_egress_timer(info);
}
@@ -644,8 +685,39 @@ static bool is_dup_ack(char *s1, char *s2, unsigned int len)
+static void tile_net_discard_aux(struct tile_net_cpu *info, int index)
+{
+ struct tile_netio_queue *queue = &info->queue;
+ netio_queue_impl_t *qsp = queue->__system_part;
+ netio_queue_user_impl_t *qup = &queue->__user_part;
+
+ int index2_aux = index + sizeof(netio_pkt_t);
+ int index2 =
+ ((index2_aux ==
+ qsp->__packet_receive_queue.__last_packet_plus_one) ?
+ 0 : index2_aux);
+
+ netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index);
+
+ /* Extract the "linux_buffer_t". */
+ unsigned int buffer = pkt->__packet.word;
+
+ /* Convert "linux_buffer_t" to "va". */
+ void *va = __va((phys_addr_t)(buffer >> 1) << 7);
+
+ /* Acquire the associated "skb". */
+ struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
+ struct sk_buff *skb = *skb_ptr;
+
+ kfree_skb(skb);
+
+ /* Consume this packet. */
+ qup->__packet_receive_read = index2;
+}
+
+
/*
- * Like "tile_net_handle_packets()", but just discard packets.
+ * Like "tile_net_poll()", but just discard packets.
*/
static void tile_net_discard_packets(struct net_device *dev)
{
@@ -658,32 +730,8 @@ static void tile_net_discard_packets(struct net_device *dev)
while (qup->__packet_receive_read !=
qsp->__packet_receive_queue.__packet_write) {
-
int index = qup->__packet_receive_read;
-
- int index2_aux = index + sizeof(netio_pkt_t);
- int index2 =
- ((index2_aux ==
- qsp->__packet_receive_queue.__last_packet_plus_one) ?
- 0 : index2_aux);
-
- netio_pkt_t *pkt = (netio_pkt_t *)
- ((unsigned long) &qsp[1] + index);
-
- /* Extract the "linux_buffer_t". */
- unsigned int buffer = pkt->__packet.word;
-
- /* Convert "linux_buffer_t" to "va". */
- void *va = __va((phys_addr_t)(buffer >> 1) << 7);
-
- /* Acquire the associated "skb". */
- struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
- struct sk_buff *skb = *skb_ptr;
-
- kfree_skb(skb);
-
- /* Consume this packet. */
- qup->__packet_receive_read = index2;
+ tile_net_discard_aux(info, index);
}
}
@@ -712,7 +760,8 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt);
- /* Extract the packet size. */
+ /* Extract the packet size. FIXME: Shouldn't the second line */
+ /* get subtracted? Mostly moot, since it should be "zero". */
unsigned long len =
(NETIO_PKT_CUSTOM_LENGTH(pkt) +
NET_IP_ALIGN - NETIO_PACKET_PADDING);
@@ -730,15 +779,6 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
/* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */
unsigned char *buf = va + NET_IP_ALIGN;
-#ifdef IGNORE_DUP_ACKS
-
- static int other;
- static int final;
- static int keep;
- static int skip;
-
-#endif
-
/* Invalidate the packet buffer. */
if (!hash_default)
__inv_buffer(buf, len);
@@ -753,16 +793,8 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
#ifdef TILE_NET_VERIFY_INGRESS
if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) &&
NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) {
- /*
- * FIXME: This complains about UDP packets
- * with a "zero" checksum (bug 6624).
- */
-#ifdef TILE_NET_PANIC_ON_BAD
- dump_packet(buf, len, "rx");
- panic("Bad L4 checksum.");
-#else
+ /* Bug 6624: Includes UDP packets with a "zero" checksum. */
pr_warning("Bad L4 checksum on %d byte packet.\n", len);
-#endif
}
if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) &&
NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) {
@@ -777,90 +809,29 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
}
break;
case NETIO_PKT_STATUS_BAD:
-#ifdef TILE_NET_PANIC_ON_BAD
- dump_packet(buf, len, "rx");
- panic("Unexpected BAD packet.");
-#else
- pr_warning("Unexpected BAD %d byte packet.\n", len);
-#endif
+ pr_warning("Unexpected BAD %ld byte packet.\n", len);
}
#endif
filter = 0;
+ /* ISSUE: Filter TCP packets with "bad" checksums? */
+
if (!(dev->flags & IFF_UP)) {
/* Filter packets received before we're up. */
filter = 1;
+ } else if (NETIO_PKT_STATUS_M(metadata, pkt) == NETIO_PKT_STATUS_BAD) {
+ /* Filter "truncated" packets. */
+ filter = 1;
} else if (!(dev->flags & IFF_PROMISC)) {
- /*
- * FIXME: Implement HW multicast filter.
- */
- if (!IS_MULTICAST(buf) && !IS_BROADCAST(buf)) {
+ /* FIXME: Implement HW multicast filter. */
+ if (!is_multicast_ether_addr(buf)) {
/* Filter packets not for our address. */
const u8 *mine = dev->dev_addr;
filter = compare_ether_addr(mine, buf);
}
}
-#ifdef IGNORE_DUP_ACKS
-
- if (len != 66) {
- /* FIXME: Must check "is_tcp_ack(buf, len)" somehow. */
-
- other++;
-
- } else if (index2 ==
- qsp->__packet_receive_queue.__packet_write) {
-
- final++;
-
- } else {
-
- netio_pkt_t *pkt2 = (netio_pkt_t *)
- ((unsigned long) &qsp[1] + index2);
-
- netio_pkt_metadata_t *metadata2 =
- NETIO_PKT_METADATA(pkt2);
-
- /* Extract the packet size. */
- unsigned long len2 =
- (NETIO_PKT_CUSTOM_LENGTH(pkt2) +
- NET_IP_ALIGN - NETIO_PACKET_PADDING);
-
- if (len2 == 66 &&
- NETIO_PKT_FLOW_HASH_M(metadata, pkt) ==
- NETIO_PKT_FLOW_HASH_M(metadata2, pkt2)) {
-
- /* Extract the "linux_buffer_t". */
- unsigned int buffer2 = pkt2->__packet.word;
-
- /* Convert "linux_buffer_t" to "va". */
- void *va2 =
- __va((phys_addr_t)(buffer2 >> 1) << 7);
-
- /* Extract the packet data pointer. */
- /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */
- unsigned char *buf2 = va2 + NET_IP_ALIGN;
-
- /* Invalidate the packet buffer. */
- if (!hash_default)
- __inv_buffer(buf2, len2);
-
- if (is_dup_ack(buf, buf2, len)) {
- skip++;
- filter = 1;
- } else {
- keep++;
- }
- }
- }
-
- if (net_ratelimit())
- pr_info("Other %d Final %d Keep %d Skip %d.\n",
- other, final, keep, skip);
-
-#endif
-
if (filter) {
/* ISSUE: Update "drop" statistics? */
@@ -885,10 +856,7 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
/* NOTE: This call also sets "skb->dev = dev". */
skb->protocol = eth_type_trans(skb, dev);
- /* ISSUE: Discard corrupt packets? */
- /* ISSUE: Discard packets with bad checksums? */
-
- /* Avoid recomputing TCP/UDP checksums. */
+ /* Avoid recomputing "good" TCP/UDP checksums. */
if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt))
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -920,9 +888,14 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
/*
* Handle some packets for the given device on the current CPU.
*
- * ISSUE: The "rotting packet" race condition occurs if a packet
- * arrives after the queue appears to be empty, and before the
- * hypervisor interrupt is re-enabled.
+ * If "tile_net_stop()" is called on some other tile while this
+ * function is running, we will return, hopefully before that
+ * other tile asks us to call "napi_disable()".
+ *
+ * The "rotting packet" race condition occurs if a packet arrives
+ * during the extremely narrow window between the queue appearing to
+ * be empty, and the ingress interrupt being re-enabled. This happens
+ * a LOT under heavy network load.
*/
static int tile_net_poll(struct napi_struct *napi, int budget)
{
@@ -936,7 +909,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
unsigned int work = 0;
- while (1) {
+ while (priv->active) {
int index = qup->__packet_receive_read;
if (index == qsp->__packet_receive_queue.__packet_write)
break;
@@ -949,19 +922,24 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
napi_complete(&info->napi);
- /* Re-enable hypervisor interrupts. */
+ if (!priv->active)
+ goto done;
+
+ /* Re-enable the ingress interrupt. */
enable_percpu_irq(priv->intr_id);
- /* HACK: Avoid the "rotting packet" problem. */
+ /* HACK: Avoid the "rotting packet" problem (see above). */
if (qup->__packet_receive_read !=
- qsp->__packet_receive_queue.__packet_write)
- napi_schedule(&info->napi);
-
- /* ISSUE: Handle completions? */
+ qsp->__packet_receive_queue.__packet_write) {
+ /* ISSUE: Sometimes this returns zero, presumably */
+ /* because an interrupt was handled for this tile. */
+ (void)napi_reschedule(&info->napi);
+ }
done:
- tile_net_provide_needed_buffers(info);
+ if (priv->active)
+ tile_net_provide_needed_buffers(info);
return work;
}
@@ -969,6 +947,12 @@ done:
/*
* Handle an ingress interrupt for the given device on the current cpu.
+ *
+ * ISSUE: Sometimes this gets called after "disable_percpu_irq()" has
+ * been called! This is probably due to "pending hypervisor downcalls".
+ *
+ * ISSUE: Is there any race condition between the "napi_schedule()" here
+ * and the "napi_complete()" call above?
*/
static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr)
{
@@ -977,9 +961,15 @@ static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr)
int my_cpu = smp_processor_id();
struct tile_net_cpu *info = priv->cpu[my_cpu];
- /* Disable hypervisor interrupt. */
+ /* Disable the ingress interrupt. */
disable_percpu_irq(priv->intr_id);
+ /* Ignore unwanted interrupts. */
+ if (!priv->active)
+ return IRQ_HANDLED;
+
+ /* ISSUE: Sometimes "info->napi_enabled" is false here. */
+
napi_schedule(&info->napi);
return IRQ_HANDLED;
@@ -1013,8 +1003,7 @@ static int tile_net_open_aux(struct net_device *dev)
*/
{
int epp_home = hv_lotar_to_cpu(epp_lotar);
- struct page *page = virt_to_page(priv->epp_queue);
- homecache_change_page_home(page, 0, epp_home);
+ homecache_change_page_home(priv->eq_pages, EQ_ORDER, epp_home);
}
/*
@@ -1023,9 +1012,9 @@ static int tile_net_open_aux(struct net_device *dev)
{
netio_ipp_address_t ea = {
.va = 0,
- .pa = __pa(priv->epp_queue),
+ .pa = __pa(priv->eq),
.pte = hv_pte(0),
- .size = PAGE_SIZE,
+ .size = EQ_SIZE,
};
ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar);
ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3);
@@ -1051,7 +1040,7 @@ static int tile_net_open_aux(struct net_device *dev)
/*
- * Register with hypervisor on each CPU.
+ * Register with hypervisor on the current CPU.
*
* Strangely, this function does important things even if it "fails",
* which is especially common if the link is not up yet. Hopefully
@@ -1100,7 +1089,8 @@ static void tile_net_register(void *dev_ptr)
priv->cpu[my_cpu] = info;
/*
- * Register ourselves with the IPP.
+ * Register ourselves with LIPP. This does a lot of stuff,
+ * including invoking the LIPP registration code.
*/
ret = hv_dev_pwrite(priv->hv_devhdl, 0,
(HV_VirtAddr)&config,
@@ -1109,8 +1099,11 @@ static void tile_net_register(void *dev_ptr)
PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n",
ret);
if (ret < 0) {
- printk(KERN_DEBUG "hv_dev_pwrite NETIO_IPP_INPUT_REGISTER_OFF"
- " failure %d\n", ret);
+ if (ret != NETIO_LINK_DOWN) {
+ printk(KERN_DEBUG "hv_dev_pwrite "
+ "NETIO_IPP_INPUT_REGISTER_OFF failure %d\n",
+ ret);
+ }
info->link_down = (ret == NETIO_LINK_DOWN);
return;
}
@@ -1153,15 +1146,47 @@ static void tile_net_register(void *dev_ptr)
NETIO_IPP_GET_FASTIO_OFF);
PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret);
- netif_napi_add(dev, &info->napi, tile_net_poll, 64);
-
/* Now we are registered. */
info->registered = true;
}
/*
- * Unregister with hypervisor on each CPU.
+ * Deregister with hypervisor on the current CPU.
+ *
+ * This simply discards all our credits, so no more packets will be
+ * delivered to this tile. There may still be packets in our queue.
+ *
+ * Also, disable the ingress interrupt.
+ */
+static void tile_net_deregister(void *dev_ptr)
+{
+ struct net_device *dev = (struct net_device *)dev_ptr;
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int my_cpu = smp_processor_id();
+ struct tile_net_cpu *info = priv->cpu[my_cpu];
+
+ /* Disable the ingress interrupt. */
+ disable_percpu_irq(priv->intr_id);
+
+ /* Do nothing else if not registered. */
+ if (info == NULL || !info->registered)
+ return;
+
+ {
+ struct tile_netio_queue *queue = &info->queue;
+ netio_queue_user_impl_t *qup = &queue->__user_part;
+
+ /* Discard all our credits. */
+ __netio_fastio_return_credits(qup->__fastio_index, -1);
+ }
+}
+
+
+/*
+ * Unregister with hypervisor on the current CPU.
+ *
+ * Also, disable the ingress interrupt.
*/
static void tile_net_unregister(void *dev_ptr)
{
@@ -1170,35 +1195,23 @@ static void tile_net_unregister(void *dev_ptr)
int my_cpu = smp_processor_id();
struct tile_net_cpu *info = priv->cpu[my_cpu];
- int ret = 0;
+ int ret;
int dummy = 0;
- /* Do nothing if never registered. */
- if (info == NULL)
- return;
+ /* Disable the ingress interrupt. */
+ disable_percpu_irq(priv->intr_id);
- /* Do nothing if already unregistered. */
- if (!info->registered)
+ /* Do nothing else if not registered. */
+ if (info == NULL || !info->registered)
return;
- /*
- * Unregister ourselves with LIPP.
- */
+ /* Unregister ourselves with LIPP/LEPP. */
ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF);
- PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_UNREGISTER_OFF) returned %d\n",
- ret);
- if (ret < 0) {
- /* FIXME: Just panic? */
- pr_err("hv_dev_pwrite NETIO_IPP_INPUT_UNREGISTER_OFF"
- " failure %d\n", ret);
- }
+ if (ret < 0)
+ panic("Failed to unregister with LIPP/LEPP!\n");
- /*
- * Discard all packets still in our NetIO queue. Hopefully,
- * once the unregister call is complete, there will be no
- * packets still in flight on the IDN.
- */
+ /* Discard all packets still in our NetIO queue. */
tile_net_discard_packets(dev);
/* Reset state. */
@@ -1208,11 +1221,6 @@ static void tile_net_unregister(void *dev_ptr)
/* Cancel egress timer. */
del_timer(&info->egress_timer);
info->egress_timer_scheduled = false;
-
- netif_napi_del(&info->napi);
-
- /* Now we are unregistered. */
- info->registered = false;
}
@@ -1220,18 +1228,28 @@ static void tile_net_unregister(void *dev_ptr)
* Helper function for "tile_net_stop()".
*
* Also used to handle registration failure in "tile_net_open_inner()",
- * when "fully_opened" is known to be false, and the various extra
- * steps in "tile_net_stop()" are not necessary. ISSUE: It might be
- * simpler if we could just call "tile_net_stop()" anyway.
+ * when the various extra steps in "tile_net_stop()" are not necessary.
*/
static void tile_net_stop_aux(struct net_device *dev)
{
struct tile_net_priv *priv = netdev_priv(dev);
+ int i;
int dummy = 0;
- /* Unregister all tiles, so LIPP will stop delivering packets. */
+ /*
+ * Unregister all tiles, so LIPP will stop delivering packets.
+ * Also, delete all the "napi" objects (sequentially, to protect
+ * "dev->napi_list").
+ */
on_each_cpu(tile_net_unregister, (void *)dev, 1);
+ for_each_online_cpu(i) {
+ struct tile_net_cpu *info = priv->cpu[i];
+ if (info != NULL && info->registered) {
+ netif_napi_del(&info->napi);
+ info->registered = false;
+ }
+ }
/* Stop LIPP/LEPP. */
if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
@@ -1243,18 +1261,15 @@ static void tile_net_stop_aux(struct net_device *dev)
/*
- * Disable ingress interrupts for the given device on the current cpu.
+ * Disable NAPI for the given device on the current cpu.
*/
-static void tile_net_disable_intr(void *dev_ptr)
+static void tile_net_stop_disable(void *dev_ptr)
{
struct net_device *dev = (struct net_device *)dev_ptr;
struct tile_net_priv *priv = netdev_priv(dev);
int my_cpu = smp_processor_id();
struct tile_net_cpu *info = priv->cpu[my_cpu];
- /* Disable hypervisor interrupt. */
- disable_percpu_irq(priv->intr_id);
-
/* Disable NAPI if needed. */
if (info != NULL && info->napi_enabled) {
napi_disable(&info->napi);
@@ -1264,21 +1279,24 @@ static void tile_net_disable_intr(void *dev_ptr)
/*
- * Enable ingress interrupts for the given device on the current cpu.
+ * Enable NAPI and the ingress interrupt for the given device
+ * on the current cpu.
+ *
+ * ISSUE: Only do this for "network cpus"?
*/
-static void tile_net_enable_intr(void *dev_ptr)
+static void tile_net_open_enable(void *dev_ptr)
{
struct net_device *dev = (struct net_device *)dev_ptr;
struct tile_net_priv *priv = netdev_priv(dev);
int my_cpu = smp_processor_id();
struct tile_net_cpu *info = priv->cpu[my_cpu];
- /* Enable hypervisor interrupt. */
- enable_percpu_irq(priv->intr_id);
-
/* Enable NAPI. */
napi_enable(&info->napi);
info->napi_enabled = true;
+
+ /* Enable the ingress interrupt. */
+ enable_percpu_irq(priv->intr_id);
}
@@ -1296,8 +1314,9 @@ static int tile_net_open_inner(struct net_device *dev)
int my_cpu = smp_processor_id();
struct tile_net_cpu *info;
struct tile_netio_queue *queue;
- unsigned int irq;
+ int result = 0;
int i;
+ int dummy = 0;
/*
* First try to register just on the local CPU, and handle any
@@ -1315,42 +1334,52 @@ static int tile_net_open_inner(struct net_device *dev)
/*
* Now register everywhere else. If any registration fails,
* even for "link down" (which might not be possible), we
- * clean up using "tile_net_stop_aux()".
+ * clean up using "tile_net_stop_aux()". Also, add all the
+ * "napi" objects (sequentially, to protect "dev->napi_list").
+ * ISSUE: Only use "netif_napi_add()" for "network cpus"?
*/
smp_call_function(tile_net_register, (void *)dev, 1);
for_each_online_cpu(i) {
- if (!priv->cpu[i]->registered) {
- tile_net_stop_aux(dev);
- return -EAGAIN;
- }
+ struct tile_net_cpu *info = priv->cpu[i];
+ if (info->registered)
+ netif_napi_add(dev, &info->napi, tile_net_poll, 64);
+ else
+ result = -EAGAIN;
+ }
+ if (result != 0) {
+ tile_net_stop_aux(dev);
+ return result;
}
queue = &info->queue;
- /*
- * Set the device intr bit mask.
- * The tile_net_register above sets per tile __intr_id.
- */
- priv->intr_id = queue->__system_part->__intr_id;
- BUG_ON(!priv->intr_id);
+ if (priv->intr_id == 0) {
+ unsigned int irq;
- /*
- * Register the device interrupt handler.
- * The __ffs() function returns the index into the interrupt handler
- * table from the interrupt bit mask which should have one bit
- * and one bit only set.
- */
- irq = __ffs(priv->intr_id);
- tile_irq_activate(irq, TILE_IRQ_PERCPU);
- BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt,
- 0, dev->name, (void *)dev) != 0);
-
- /* ISSUE: How could "priv->fully_opened" ever be "true" here? */
-
- if (!priv->fully_opened) {
+ /*
+ * Acquire the irq allocated by the hypervisor. Every
+ * queue gets the same irq. The "__intr_id" field is
+ * "1 << irq", so we use "__ffs()" to extract "irq".
+ */
+ priv->intr_id = queue->__system_part->__intr_id;
+ BUG_ON(priv->intr_id == 0);
+ irq = __ffs(priv->intr_id);
- int dummy = 0;
+ /*
+ * Register the ingress interrupt handler for this
+ * device, permanently.
+ *
+ * We used to call "free_irq()" in "tile_net_stop()",
+ * and then re-register the handler here every time,
+ * but that caused DNP errors in "handle_IRQ_event()"
+ * because "desc->action" was NULL. See bug 9143.
+ */
+ tile_irq_activate(irq, TILE_IRQ_PERCPU);
+ BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt,
+ 0, dev->name, (void *)dev) != 0);
+ }
+ {
/* Allocate initial buffers. */
int max_buffers =
@@ -1367,18 +1396,21 @@ static int tile_net_open_inner(struct net_device *dev)
if (info->num_needed_small_buffers != 0 ||
info->num_needed_large_buffers != 0)
panic("Insufficient memory for buffer stack!");
+ }
- /* Start LIPP/LEPP and activate "ingress" at the shim. */
- if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
- sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0)
- panic("Failed to activate the LIPP Shim!\n");
+ /* We are about to be active. */
+ priv->active = true;
- priv->fully_opened = 1;
- }
+ /* Make sure "active" is visible to all tiles. */
+ mb();
- /* On each tile, enable the hypervisor to trigger interrupts. */
- /* ISSUE: Do this before starting LIPP/LEPP? */
- on_each_cpu(tile_net_enable_intr, (void *)dev, 1);
+ /* On each tile, enable NAPI and the ingress interrupt. */
+ on_each_cpu(tile_net_open_enable, (void *)dev, 1);
+
+ /* Start LIPP/LEPP and activate "ingress" at the shim. */
+ if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
+ sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0)
+ panic("Failed to activate the LIPP Shim!\n");
/* Start our transmit queue. */
netif_start_queue(dev);
@@ -1404,9 +1436,9 @@ static void tile_net_open_retry(struct work_struct *w)
* ourselves to try again later; otherwise, tell Linux we now have
* a working link. ISSUE: What if the return value is negative?
*/
- if (tile_net_open_inner(priv->dev))
- schedule_delayed_work_on(singlethread_cpu, &priv->retry_work,
- TILE_NET_RETRY_INTERVAL);
+ if (tile_net_open_inner(priv->dev) != 0)
+ schedule_delayed_work(&priv->retry_work,
+ TILE_NET_RETRY_INTERVAL);
else
netif_carrier_on(priv->dev);
}
@@ -1420,8 +1452,8 @@ static void tile_net_open_retry(struct work_struct *w)
* The open entry point is called when a network interface is made
* active by the system (IFF_UP). At this point all resources needed
* for transmit and receive operations are allocated, the interrupt
- * handler is registered with the OS, the watchdog timer is started,
- * and the stack is notified that the interface is ready.
+ * handler is registered with the OS (if needed), the watchdog timer
+ * is started, and the stack is notified that the interface is ready.
*
* If the actual link is not available yet, then we tell Linux that
* we have no carrier, and we keep checking until the link comes up.
@@ -1476,6 +1508,10 @@ static int tile_net_open(struct net_device *dev)
#endif
priv->partly_opened = 1;
+
+ } else {
+ /* FIXME: Is this possible? */
+ /* printk("Already partly opened.\n"); */
}
/*
@@ -1495,57 +1531,17 @@ static int tile_net_open(struct net_device *dev)
* and then remember to try again later.
*/
netif_carrier_off(dev);
- schedule_delayed_work_on(singlethread_cpu, &priv->retry_work,
- TILE_NET_RETRY_INTERVAL);
+ schedule_delayed_work(&priv->retry_work, TILE_NET_RETRY_INTERVAL);
return 0;
}
-/*
- * Disables a network interface.
- *
- * Returns 0, this is not allowed to fail.
- *
- * The close entry point is called when an interface is de-activated
- * by the OS. The hardware is still under the drivers control, but
- * needs to be disabled. A global MAC reset is issued to stop the
- * hardware, and all transmit and receive resources are freed.
- *
- * ISSUE: Can this can be called while "tile_net_poll()" is running?
- */
-static int tile_net_stop(struct net_device *dev)
+static int tile_net_drain_lipp_buffers(struct tile_net_priv *priv)
{
- struct tile_net_priv *priv = netdev_priv(dev);
-
- bool pending = true;
-
- PDEBUG("tile_net_stop()\n");
-
- /* ISSUE: Only needed if not yet fully open. */
- cancel_delayed_work_sync(&priv->retry_work);
-
- /* Can't transmit any more. */
- netif_stop_queue(dev);
-
- /*
- * Disable hypervisor interrupts on each tile.
- */
- on_each_cpu(tile_net_disable_intr, (void *)dev, 1);
-
- /*
- * Unregister the interrupt handler.
- * The __ffs() function returns the index into the interrupt handler
- * table from the interrupt bit mask which should have one bit
- * and one bit only set.
- */
- if (priv->intr_id)
- free_irq(__ffs(priv->intr_id), dev);
-
- /*
- * Drain all the LIPP buffers.
- */
+ int n = 0;
+ /* Drain all the LIPP buffers. */
while (true) {
int buffer;
@@ -1568,43 +1564,105 @@ static int tile_net_stop(struct net_device *dev)
kfree_skb(skb);
}
+
+ n++;
}
- /* Stop LIPP/LEPP. */
- tile_net_stop_aux(dev);
+ return n;
+}
- priv->fully_opened = 0;
+/*
+ * Disables a network interface.
+ *
+ * Returns 0, this is not allowed to fail.
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ *
+ * ISSUE: How closely does "netif_running(dev)" mirror "priv->active"?
+ *
+ * Before we are called by "__dev_close()", "netif_running()" will
+ * have been cleared, so no NEW calls to "tile_net_poll()" will be
+ * made by "netpoll_poll_dev()".
+ *
+ * Often, this can cause some tiles to still have packets in their
+ * queues, so we must call "tile_net_discard_packets()" later.
+ *
+ * Note that some other tile may still be INSIDE "tile_net_poll()",
+ * and in fact, many will be, if there is heavy network load.
+ *
+ * Calling "on_each_cpu(tile_net_stop_disable, (void *)dev, 1)" when
+ * any tile is still "napi_schedule()"'d will induce a horrible crash
+ * when "msleep()" is called. This includes tiles which are inside
+ * "tile_net_poll()" which have not yet called "napi_complete()".
+ *
+ * So, we must first try to wait long enough for other tiles to finish
+ * with any current "tile_net_poll()" call, and, hopefully, to clear
+ * the "scheduled" flag. ISSUE: It is unclear what happens to tiles
+ * which have called "napi_schedule()" but which had not yet tried to
+ * call "tile_net_poll()", or which exhausted their budget inside
+ * "tile_net_poll()" just before this function was called.
+ */
+static int tile_net_stop(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ PDEBUG("tile_net_stop()\n");
+ /* Start discarding packets. */
+ priv->active = false;
+
+ /* Make sure "active" is visible to all tiles. */
+ mb();
/*
- * XXX: ISSUE: It appears that, in practice anyway, by the
- * time we get here, there are no pending completions.
+ * On each tile, make sure no NEW packets get delivered, and
+ * disable the ingress interrupt.
+ *
+ * Note that the ingress interrupt can fire AFTER this,
+ * presumably due to packets which were recently delivered,
+ * but it will have no effect.
*/
- while (pending) {
+ on_each_cpu(tile_net_deregister, (void *)dev, 1);
- struct sk_buff *olds[32];
- unsigned int wanted = 32;
- unsigned int i, nolds = 0;
+ /* Optimistically drain LIPP buffers. */
+ (void)tile_net_drain_lipp_buffers(priv);
- nolds = tile_net_lepp_grab_comps(dev, olds,
- wanted, &pending);
+ /* ISSUE: Only needed if not yet fully open. */
+ cancel_delayed_work_sync(&priv->retry_work);
- /* ISSUE: We have never actually seen this debug spew. */
- if (nolds != 0)
- pr_info("During tile_net_stop(), grabbed %d comps.\n",
- nolds);
+ /* Can't transmit any more. */
+ netif_stop_queue(dev);
- for (i = 0; i < nolds; i++)
- kfree_skb(olds[i]);
- }
+ /* Disable NAPI on each tile. */
+ on_each_cpu(tile_net_stop_disable, (void *)dev, 1);
+
+ /*
+ * Drain any remaining LIPP buffers. NOTE: This "printk()"
+ * has never been observed, but in theory it could happen.
+ */
+ if (tile_net_drain_lipp_buffers(priv) != 0)
+ printk("Had to drain some extra LIPP buffers!\n");
+ /* Stop LIPP/LEPP. */
+ tile_net_stop_aux(dev);
+
+ /*
+ * ISSUE: It appears that, in practice anyway, by the time we
+ * get here, there are no pending completions, but just in case,
+ * we free (all of) them anyway.
+ */
+ while (tile_net_lepp_free_comps(dev, true))
+ /* loop */;
/* Wipe the EPP queue. */
- memset(priv->epp_queue, 0, sizeof(lepp_queue_t));
+ memset(priv->eq, 0, sizeof(lepp_queue_t));
/* Evict the EPP queue. */
- finv_buffer(priv->epp_queue, PAGE_SIZE);
+ finv_buffer(priv->eq, EQ_SIZE);
return 0;
}
@@ -1628,7 +1686,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
if (b_len != 0) {
if (!hash_default)
- finv_buffer_remote(b_data, b_len);
+ finv_buffer_remote(b_data, b_len, 0);
cpa = __pa(b_data);
frags[n].cpa_lo = cpa;
@@ -1651,7 +1709,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
if (!hash_default) {
void *va = pfn_to_kaddr(pfn) + f->page_offset;
BUG_ON(PageHighMem(f->page));
- finv_buffer_remote(va, f->size);
+ finv_buffer_remote(va, f->size, 0);
}
cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset;
@@ -1750,17 +1808,15 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
unsigned long irqflags;
- lepp_queue_t *eq = priv->epp_queue;
+ lepp_queue_t *eq = priv->eq;
- struct sk_buff *olds[4];
- unsigned int wanted = 4;
+ struct sk_buff *olds[8];
+ unsigned int wanted = 8;
unsigned int i, nolds = 0;
unsigned int cmd_head, cmd_tail, cmd_next;
unsigned int comp_tail;
- unsigned int free_slots;
-
/* Paranoia. */
BUG_ON(skb->protocol != htons(ETH_P_IP));
@@ -1788,34 +1844,32 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
/* Enqueue the command. */
- spin_lock_irqsave(&priv->cmd_lock, irqflags);
+ spin_lock_irqsave(&priv->eq_lock, irqflags);
/*
* Handle completions if needed to make room.
* HACK: Spin until there is sufficient room.
*/
- free_slots = lepp_num_free_comp_slots(eq);
- if (free_slots < 1) {
-spin:
- nolds += tile_net_lepp_grab_comps(dev, olds + nolds,
- wanted - nolds, NULL);
- if (lepp_num_free_comp_slots(eq) < 1)
- goto spin;
+ if (lepp_num_free_comp_slots(eq) == 0) {
+ nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
+ if (nolds == 0) {
+busy:
+ spin_unlock_irqrestore(&priv->eq_lock, irqflags);
+ return NETDEV_TX_BUSY;
+ }
}
cmd_head = eq->cmd_head;
cmd_tail = eq->cmd_tail;
- /* NOTE: The "gotos" below are untested. */
-
/* Prepare to advance, detecting full queue. */
cmd_next = cmd_tail + cmd_size;
if (cmd_tail < cmd_head && cmd_next >= cmd_head)
- goto spin;
+ goto busy;
if (cmd_next > LEPP_CMD_LIMIT) {
cmd_next = 0;
if (cmd_next == cmd_head)
- goto spin;
+ goto busy;
}
/* Copy the command. */
@@ -1831,14 +1885,18 @@ spin:
eq->comp_tail = comp_tail;
/* Flush before allowing LEPP to handle the command. */
+ /* ISSUE: Is this the optimal location for the flush? */
__insn_mf();
eq->cmd_tail = cmd_tail;
- spin_unlock_irqrestore(&priv->cmd_lock, irqflags);
-
+ /* NOTE: Using "4" here is more efficient than "0" or "2", */
+ /* and, strangely, more efficient than pre-checking the number */
+ /* of available completions, and comparing it to 4. */
if (nolds == 0)
- nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL);
+ nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4);
+
+ spin_unlock_irqrestore(&priv->eq_lock, irqflags);
/* Handle completions. */
for (i = 0; i < nolds; i++)
@@ -1878,10 +1936,10 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
unsigned int num_frags;
- lepp_queue_t *eq = priv->epp_queue;
+ lepp_queue_t *eq = priv->eq;
- struct sk_buff *olds[4];
- unsigned int wanted = 4;
+ struct sk_buff *olds[8];
+ unsigned int wanted = 8;
unsigned int i, nolds = 0;
unsigned int cmd_size = sizeof(lepp_cmd_t);
@@ -1891,8 +1949,6 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
lepp_cmd_t cmds[LEPP_MAX_FRAGS];
- unsigned int free_slots;
-
/*
* This is paranoia, since we think that if the link doesn't come
@@ -1913,7 +1969,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
if (hash_default) {
HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data);
if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
- panic("Non-coherent egress buffer!");
+ panic("Non-HFH egress buffer! VA=%p Mode=%d PTE=%llx",
+ data, hv_pte_get_mode(pte), hv_pte_val(pte));
}
#endif
#endif
@@ -1966,37 +2023,35 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
/* Enqueue the commands. */
- spin_lock_irqsave(&priv->cmd_lock, irqflags);
+ spin_lock_irqsave(&priv->eq_lock, irqflags);
/*
* Handle completions if needed to make room.
* HACK: Spin until there is sufficient room.
*/
- free_slots = lepp_num_free_comp_slots(eq);
- if (free_slots < 1) {
-spin:
- nolds += tile_net_lepp_grab_comps(dev, olds + nolds,
- wanted - nolds, NULL);
- if (lepp_num_free_comp_slots(eq) < 1)
- goto spin;
+ if (lepp_num_free_comp_slots(eq) == 0) {
+ nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0);
+ if (nolds == 0) {
+busy:
+ spin_unlock_irqrestore(&priv->eq_lock, irqflags);
+ return NETDEV_TX_BUSY;
+ }
}
cmd_head = eq->cmd_head;
cmd_tail = eq->cmd_tail;
- /* NOTE: The "gotos" below are untested. */
-
/* Copy the commands, or fail. */
for (i = 0; i < num_frags; i++) {
/* Prepare to advance, detecting full queue. */
cmd_next = cmd_tail + cmd_size;
if (cmd_tail < cmd_head && cmd_next >= cmd_head)
- goto spin;
+ goto busy;
if (cmd_next > LEPP_CMD_LIMIT) {
cmd_next = 0;
if (cmd_next == cmd_head)
- goto spin;
+ goto busy;
}
/* Copy the command. */
@@ -2013,14 +2068,18 @@ spin:
eq->comp_tail = comp_tail;
/* Flush before allowing LEPP to handle the command. */
+ /* ISSUE: Is this the optimal location for the flush? */
__insn_mf();
eq->cmd_tail = cmd_tail;
- spin_unlock_irqrestore(&priv->cmd_lock, irqflags);
-
+ /* NOTE: Using "4" here is more efficient than "0" or "2", */
+ /* and, strangely, more efficient than pre-checking the number */
+ /* of available completions, and comparing it to 4. */
if (nolds == 0)
- nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL);
+ nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4);
+
+ spin_unlock_irqrestore(&priv->eq_lock, irqflags);
/* Handle completions. */
for (i = 0; i < nolds; i++)
@@ -2269,7 +2328,6 @@ static struct net_device *tile_net_dev_init(const char *name)
int ret;
struct net_device *dev;
struct tile_net_priv *priv;
- struct page *page;
/*
* Allocate the device structure. This allocates "priv", calls
@@ -2293,23 +2351,21 @@ static struct net_device *tile_net_dev_init(const char *name)
INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry);
- spin_lock_init(&priv->cmd_lock);
- spin_lock_init(&priv->comp_lock);
+ spin_lock_init(&priv->eq_lock);
- /* Allocate "epp_queue". */
- BUG_ON(get_order(sizeof(lepp_queue_t)) != 0);
- page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
- if (!page) {
+ /* Allocate "eq". */
+ priv->eq_pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, EQ_ORDER);
+ if (!priv->eq_pages) {
free_netdev(dev);
return NULL;
}
- priv->epp_queue = page_address(page);
+ priv->eq = page_address(priv->eq_pages);
/* Register the network device. */
ret = register_netdev(dev);
if (ret) {
pr_err("register_netdev %s failed %d\n", dev->name, ret);
- free_page((unsigned long)priv->epp_queue);
+ __free_pages(priv->eq_pages, EQ_ORDER);
free_netdev(dev);
return NULL;
}
@@ -2318,7 +2374,7 @@ static struct net_device *tile_net_dev_init(const char *name)
ret = tile_net_get_mac(dev);
if (ret < 0) {
unregister_netdev(dev);
- free_page((unsigned long)priv->epp_queue);
+ __free_pages(priv->eq_pages, EQ_ORDER);
free_netdev(dev);
return NULL;
}
@@ -2329,6 +2385,9 @@ static struct net_device *tile_net_dev_init(const char *name)
/*
* Module cleanup.
+ *
+ * FIXME: If compiled as a module, this module cannot be "unloaded",
+ * because the "ingress interrupt handler" is registered permanently.
*/
static void tile_net_cleanup(void)
{
@@ -2339,8 +2398,8 @@ static void tile_net_cleanup(void)
struct net_device *dev = tile_net_devs[i];
struct tile_net_priv *priv = netdev_priv(dev);
unregister_netdev(dev);
- finv_buffer(priv->epp_queue, PAGE_SIZE);
- free_page((unsigned long)priv->epp_queue);
+ finv_buffer(priv->eq, EQ_SIZE);
+ __free_pages(priv->eq_pages, EQ_ORDER);
free_netdev(dev);
}
}
@@ -2363,7 +2422,12 @@ static int tile_net_init_module(void)
}
+module_init(tile_net_init_module);
+module_exit(tile_net_cleanup);
+
+
#ifndef MODULE
+
/*
* The "network_cpus" boot argument specifies the cpus that are dedicated
* to handle ingress packets.
@@ -2399,8 +2463,5 @@ static int __init network_cpus_setup(char *str)
return 0;
}
__setup("network_cpus=", network_cpus_setup);
-#endif
-
-module_init(tile_net_init_module);
-module_exit(tile_net_cleanup);
+#endif
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index f8e463cd8ecc..ace6404e2fac 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -25,150 +25,9 @@
* Microchip Technology, 24C01A/02A/04A Data Sheet
* available in PDF format from www.microchip.com
*
- * Change History
- *
- * Tigran Aivazian <tigran@sco.com>: TLan_PciProbe() now uses
- * new PCI BIOS interface.
- * Alan Cox <alan@lxorguk.ukuu.org.uk>:
- * Fixed the out of memory
- * handling.
- *
- * Torben Mathiasen <torben.mathiasen@compaq.com> New Maintainer!
- *
- * v1.1 Dec 20, 1999 - Removed linux version checking
- * Patch from Tigran Aivazian.
- * - v1.1 includes Alan's SMP updates.
- * - We still have problems on SMP though,
- * but I'm looking into that.
- *
- * v1.2 Jan 02, 2000 - Hopefully fixed the SMP deadlock.
- * - Removed dependency of HZ being 100.
- * - We now allow higher priority timers to
- * overwrite timers like TLAN_TIMER_ACTIVITY
- * Patch from John Cagle <john.cagle@compaq.com>.
- * - Fixed a few compiler warnings.
- *
- * v1.3 Feb 04, 2000 - Fixed the remaining HZ issues.
- * - Removed call to pci_present().
- * - Removed SA_INTERRUPT flag from irq handler.
- * - Added __init and __initdata to reduce resisdent
- * code size.
- * - Driver now uses module_init/module_exit.
- * - Rewrote init_module and tlan_probe to
- * share a lot more code. We now use tlan_probe
- * with builtin and module driver.
- * - Driver ported to new net API.
- * - tlan.txt has been reworked to reflect current
- * driver (almost)
- * - Other minor stuff
- *
- * v1.4 Feb 10, 2000 - Updated with more changes required after Dave's
- * network cleanup in 2.3.43pre7 (Tigran & myself)
- * - Minor stuff.
- *
- * v1.5 March 22, 2000 - Fixed another timer bug that would hang the driver
- * if no cable/link were present.
- * - Cosmetic changes.
- * - TODO: Port completely to new PCI/DMA API
- * Auto-Neg fallback.
- *
- * v1.6 April 04, 2000 - Fixed driver support for kernel-parameters. Haven't
- * tested it though, as the kernel support is currently
- * broken (2.3.99p4p3).
- * - Updated tlan.txt accordingly.
- * - Adjusted minimum/maximum frame length.
- * - There is now a TLAN website up at
- * http://hp.sourceforge.net/
- *
- * v1.7 April 07, 2000 - Started to implement custom ioctls. Driver now
- * reports PHY information when used with Donald
- * Beckers userspace MII diagnostics utility.
- *
- * v1.8 April 23, 2000 - Fixed support for forced speed/duplex settings.
- * - Added link information to Auto-Neg and forced
- * modes. When NIC operates with auto-neg the driver
- * will report Link speed & duplex modes as well as
- * link partner abilities. When forced link is used,
- * the driver will report status of the established
- * link.
- * Please read tlan.txt for additional information.
- * - Removed call to check_region(), and used
- * return value of request_region() instead.
- *
- * v1.8a May 28, 2000 - Minor updates.
- *
- * v1.9 July 25, 2000 - Fixed a few remaining Full-Duplex issues.
- * - Updated with timer fixes from Andrew Morton.
- * - Fixed module race in TLan_Open.
- * - Added routine to monitor PHY status.
- * - Added activity led support for Proliant devices.
- *
- * v1.10 Aug 30, 2000 - Added support for EISA based tlan controllers
- * like the Compaq NetFlex3/E.
- * - Rewrote tlan_probe to better handle multiple
- * bus probes. Probing and device setup is now
- * done through TLan_Probe and TLan_init_one. Actual
- * hardware probe is done with kernel API and
- * TLan_EisaProbe.
- * - Adjusted debug information for probing.
- * - Fixed bug that would cause general debug information
- * to be printed after driver removal.
- * - Added transmit timeout handling.
- * - Fixed OOM return values in tlan_probe.
- * - Fixed possible mem leak in tlan_exit
- * (now tlan_remove_one).
- * - Fixed timer bug in TLan_phyMonitor.
- * - This driver version is alpha quality, please
- * send me any bug issues you may encounter.
- *
- * v1.11 Aug 31, 2000 - Do not try to register irq 0 if no irq line was
- * set for EISA cards.
- * - Added support for NetFlex3/E with nibble-rate
- * 10Base-T PHY. This is untestet as I haven't got
- * one of these cards.
- * - Fixed timer being added twice.
- * - Disabled PhyMonitoring by default as this is
- * work in progress. Define MONITOR to enable it.
- * - Now we don't display link info with PHYs that
- * doesn't support it (level1).
- * - Incresed tx_timeout beacuse of auto-neg.
- * - Adjusted timers for forced speeds.
- *
- * v1.12 Oct 12, 2000 - Minor fixes (memleak, init, etc.)
- *
- * v1.13 Nov 28, 2000 - Stop flooding console with auto-neg issues
- * when link can't be established.
- * - Added the bbuf option as a kernel parameter.
- * - Fixed ioaddr probe bug.
- * - Fixed stupid deadlock with MII interrupts.
- * - Added support for speed/duplex selection with
- * multiple nics.
- * - Added partly fix for TX Channel lockup with
- * TLAN v1.0 silicon. This needs to be investigated
- * further.
- *
- * v1.14 Dec 16, 2000 - Added support for servicing multiple frames per.
- * interrupt. Thanks goes to
- * Adam Keys <adam@ti.com>
- * Denis Beaudoin <dbeaudoin@ti.com>
- * for providing the patch.
- * - Fixed auto-neg output when using multiple
- * adapters.
- * - Converted to use new taskq interface.
- *
- * v1.14a Jan 6, 2001 - Minor adjustments (spinlocks, etc.)
- *
- * Samuel Chessman <chessman@tux.org> New Maintainer!
- *
- * v1.15 Apr 4, 2002 - Correct operation when aui=1 to be
- * 10T half duplex no loopback
- * Thanks to Gunnar Eikman
- *
- * Sakari Ailus <sakari.ailus@iki.fi>:
- *
- * v1.15a Dec 15 2008 - Remove bbuf support, it doesn't work anyway.
- *
- *******************************************************************************/
+ ******************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/init.h>
@@ -185,13 +44,11 @@
#include "tlan.h"
-typedef u32 (TLanIntVectorFunc)( struct net_device *, u16 );
-
/* For removing EISA devices */
-static struct net_device *TLan_Eisa_Devices;
+static struct net_device *tlan_eisa_devices;
-static int TLanDevicesInstalled;
+static int tlan_devices_installed;
/* Set speed, duplex and aui settings */
static int aui[MAX_TLAN_BOARDS];
@@ -202,8 +59,9 @@ module_param_array(aui, int, NULL, 0);
module_param_array(duplex, int, NULL, 0);
module_param_array(speed, int, NULL, 0);
MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
-MODULE_PARM_DESC(duplex, "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
-MODULE_PARM_DESC(speed, "ThunderLAN port speen setting(s) (0,10,100)");
+MODULE_PARM_DESC(duplex,
+ "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
+MODULE_PARM_DESC(speed, "ThunderLAN port speed setting(s) (0,10,100)");
MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
@@ -218,139 +76,144 @@ static int debug;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
-static const char TLanSignature[] = "TLAN";
-static const char tlan_banner[] = "ThunderLAN driver v1.15a\n";
+static const char tlan_signature[] = "TLAN";
+static const char tlan_banner[] = "ThunderLAN driver v1.17\n";
static int tlan_have_pci;
static int tlan_have_eisa;
-static const char *media[] = {
- "10BaseT-HD ", "10BaseT-FD ","100baseTx-HD ",
- "100baseTx-FD", "100baseT4", NULL
+static const char * const media[] = {
+ "10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
+ "100BaseTx-FD", "100BaseT4", NULL
};
static struct board {
- const char *deviceLabel;
- u32 flags;
- u16 addrOfs;
+ const char *device_label;
+ u32 flags;
+ u16 addr_ofs;
} board_info[] = {
{ "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
- { "Compaq Netelligent 10/100 TX PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+ { "Compaq Netelligent 10/100 TX PCI UTP",
+ TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
{ "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
{ "Compaq NetFlex-3/P",
TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
{ "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
{ "Compaq Netelligent Integrated 10/100 TX UTP",
TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
- { "Compaq Netelligent Dual 10/100 TX PCI UTP", TLAN_ADAPTER_NONE, 0x83 },
- { "Compaq Netelligent 10/100 TX Embedded UTP", TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq Netelligent Dual 10/100 TX PCI UTP",
+ TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq Netelligent 10/100 TX Embedded UTP",
+ TLAN_ADAPTER_NONE, 0x83 },
{ "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
- { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xF8 },
- { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xF8 },
+ { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
+ { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
{ "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
- { "Compaq Netelligent 10 T/2 PCI UTP/Coax", TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
{ "Compaq NetFlex-3/E",
- TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */
+ TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */
TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
- { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
+ { "Compaq NetFlex-3/E",
+ TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
};
static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
{ PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
{ PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
{ PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
-static void TLan_EisaProbe( void );
-static void TLan_Eisa_Cleanup( void );
-static int TLan_Init( struct net_device * );
-static int TLan_Open( struct net_device *dev );
-static netdev_tx_t TLan_StartTx( struct sk_buff *, struct net_device *);
-static irqreturn_t TLan_HandleInterrupt( int, void *);
-static int TLan_Close( struct net_device *);
-static struct net_device_stats *TLan_GetStats( struct net_device *);
-static void TLan_SetMulticastList( struct net_device *);
-static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd);
-static int TLan_probe1( struct pci_dev *pdev, long ioaddr,
- int irq, int rev, const struct pci_device_id *ent);
-static void TLan_tx_timeout( struct net_device *dev);
-static void TLan_tx_timeout_work(struct work_struct *work);
-static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent);
-
-static u32 TLan_HandleTxEOF( struct net_device *, u16 );
-static u32 TLan_HandleStatOverflow( struct net_device *, u16 );
-static u32 TLan_HandleRxEOF( struct net_device *, u16 );
-static u32 TLan_HandleDummy( struct net_device *, u16 );
-static u32 TLan_HandleTxEOC( struct net_device *, u16 );
-static u32 TLan_HandleStatusCheck( struct net_device *, u16 );
-static u32 TLan_HandleRxEOC( struct net_device *, u16 );
-
-static void TLan_Timer( unsigned long );
-
-static void TLan_ResetLists( struct net_device * );
-static void TLan_FreeLists( struct net_device * );
-static void TLan_PrintDio( u16 );
-static void TLan_PrintList( TLanList *, char *, int );
-static void TLan_ReadAndClearStats( struct net_device *, int );
-static void TLan_ResetAdapter( struct net_device * );
-static void TLan_FinishReset( struct net_device * );
-static void TLan_SetMac( struct net_device *, int areg, char *mac );
-
-static void TLan_PhyPrint( struct net_device * );
-static void TLan_PhyDetect( struct net_device * );
-static void TLan_PhyPowerDown( struct net_device * );
-static void TLan_PhyPowerUp( struct net_device * );
-static void TLan_PhyReset( struct net_device * );
-static void TLan_PhyStartLink( struct net_device * );
-static void TLan_PhyFinishAutoNeg( struct net_device * );
+static void tlan_eisa_probe(void);
+static void tlan_eisa_cleanup(void);
+static int tlan_init(struct net_device *);
+static int tlan_open(struct net_device *dev);
+static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
+static irqreturn_t tlan_handle_interrupt(int, void *);
+static int tlan_close(struct net_device *);
+static struct net_device_stats *tlan_get_stats(struct net_device *);
+static void tlan_set_multicast_list(struct net_device *);
+static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int tlan_probe1(struct pci_dev *pdev, long ioaddr,
+ int irq, int rev, const struct pci_device_id *ent);
+static void tlan_tx_timeout(struct net_device *dev);
+static void tlan_tx_timeout_work(struct work_struct *work);
+static int tlan_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+
+static u32 tlan_handle_tx_eof(struct net_device *, u16);
+static u32 tlan_handle_stat_overflow(struct net_device *, u16);
+static u32 tlan_handle_rx_eof(struct net_device *, u16);
+static u32 tlan_handle_dummy(struct net_device *, u16);
+static u32 tlan_handle_tx_eoc(struct net_device *, u16);
+static u32 tlan_handle_status_check(struct net_device *, u16);
+static u32 tlan_handle_rx_eoc(struct net_device *, u16);
+
+static void tlan_timer(unsigned long);
+
+static void tlan_reset_lists(struct net_device *);
+static void tlan_free_lists(struct net_device *);
+static void tlan_print_dio(u16);
+static void tlan_print_list(struct tlan_list *, char *, int);
+static void tlan_read_and_clear_stats(struct net_device *, int);
+static void tlan_reset_adapter(struct net_device *);
+static void tlan_finish_reset(struct net_device *);
+static void tlan_set_mac(struct net_device *, int areg, char *mac);
+
+static void tlan_phy_print(struct net_device *);
+static void tlan_phy_detect(struct net_device *);
+static void tlan_phy_power_down(struct net_device *);
+static void tlan_phy_power_up(struct net_device *);
+static void tlan_phy_reset(struct net_device *);
+static void tlan_phy_start_link(struct net_device *);
+static void tlan_phy_finish_auto_neg(struct net_device *);
#ifdef MONITOR
-static void TLan_PhyMonitor( struct net_device * );
+static void tlan_phy_monitor(struct net_device *);
#endif
/*
-static int TLan_PhyNop( struct net_device * );
-static int TLan_PhyInternalCheck( struct net_device * );
-static int TLan_PhyInternalService( struct net_device * );
-static int TLan_PhyDp83840aCheck( struct net_device * );
+ static int tlan_phy_nop(struct net_device *);
+ static int tlan_phy_internal_check(struct net_device *);
+ static int tlan_phy_internal_service(struct net_device *);
+ static int tlan_phy_dp83840a_check(struct net_device *);
*/
-static bool TLan_MiiReadReg( struct net_device *, u16, u16, u16 * );
-static void TLan_MiiSendData( u16, u32, unsigned );
-static void TLan_MiiSync( u16 );
-static void TLan_MiiWriteReg( struct net_device *, u16, u16, u16 );
+static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
+static void tlan_mii_send_data(u16, u32, unsigned);
+static void tlan_mii_sync(u16);
+static void tlan_mii_write_reg(struct net_device *, u16, u16, u16);
-static void TLan_EeSendStart( u16 );
-static int TLan_EeSendByte( u16, u8, int );
-static void TLan_EeReceiveByte( u16, u8 *, int );
-static int TLan_EeReadByte( struct net_device *, u8, u8 * );
+static void tlan_ee_send_start(u16);
+static int tlan_ee_send_byte(u16, u8, int);
+static void tlan_ee_receive_byte(u16, u8 *, int);
+static int tlan_ee_read_byte(struct net_device *, u8, u8 *);
static inline void
-TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
+tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
{
unsigned long addr = (unsigned long)skb;
tag->buffer[9].address = addr;
@@ -358,7 +221,7 @@ TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
}
static inline struct sk_buff *
-TLan_GetSKB( const struct tlan_list_tag *tag)
+tlan_get_skb(const struct tlan_list *tag)
{
unsigned long addr;
@@ -367,50 +230,50 @@ TLan_GetSKB( const struct tlan_list_tag *tag)
return (struct sk_buff *) addr;
}
-
-static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = {
+static u32
+(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
NULL,
- TLan_HandleTxEOF,
- TLan_HandleStatOverflow,
- TLan_HandleRxEOF,
- TLan_HandleDummy,
- TLan_HandleTxEOC,
- TLan_HandleStatusCheck,
- TLan_HandleRxEOC
+ tlan_handle_tx_eof,
+ tlan_handle_stat_overflow,
+ tlan_handle_rx_eof,
+ tlan_handle_dummy,
+ tlan_handle_tx_eoc,
+ tlan_handle_status_check,
+ tlan_handle_rx_eoc
};
static inline void
-TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
+tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
unsigned long flags = 0;
if (!in_irq())
spin_lock_irqsave(&priv->lock, flags);
- if ( priv->timer.function != NULL &&
- priv->timerType != TLAN_TIMER_ACTIVITY ) {
+ if (priv->timer.function != NULL &&
+ priv->timer_type != TLAN_TIMER_ACTIVITY) {
if (!in_irq())
spin_unlock_irqrestore(&priv->lock, flags);
return;
}
- priv->timer.function = TLan_Timer;
+ priv->timer.function = tlan_timer;
if (!in_irq())
spin_unlock_irqrestore(&priv->lock, flags);
priv->timer.data = (unsigned long) dev;
- priv->timerSetAt = jiffies;
- priv->timerType = type;
+ priv->timer_set_at = jiffies;
+ priv->timer_type = type;
mod_timer(&priv->timer, jiffies + ticks);
-} /* TLan_SetTimer */
+}
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver Primary Functions
+ThunderLAN driver primary functions
- These functions are more or less common to all Linux network drivers.
+these functions are more or less common to all linux network drivers.
******************************************************************************
*****************************************************************************/
@@ -419,56 +282,124 @@ TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
- /***************************************************************
- * tlan_remove_one
- *
- * Returns:
- * Nothing
- * Parms:
- * None
- *
- * Goes through the TLanDevices list and frees the device
- * structs and memory associated with each device (lists
- * and buffers). It also ureserves the IO port regions
- * associated with this device.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_remove_one
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * None
+ *
+ * Goes through the TLanDevices list and frees the device
+ * structs and memory associated with each device (lists
+ * and buffers). It also ureserves the IO port regions
+ * associated with this device.
+ *
+ **************************************************************/
-static void __devexit tlan_remove_one( struct pci_dev *pdev)
+static void __devexit tlan_remove_one(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata( pdev );
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tlan_priv *priv = netdev_priv(dev);
- unregister_netdev( dev );
+ unregister_netdev(dev);
- if ( priv->dmaStorage ) {
- pci_free_consistent(priv->pciDev,
- priv->dmaSize, priv->dmaStorage,
- priv->dmaStorageDMA );
+ if (priv->dma_storage) {
+ pci_free_consistent(priv->pci_dev,
+ priv->dma_size, priv->dma_storage,
+ priv->dma_storage_dma);
}
#ifdef CONFIG_PCI
pci_release_regions(pdev);
#endif
- free_netdev( dev );
+ free_netdev(dev);
- pci_set_drvdata( pdev, NULL );
+ pci_set_drvdata(pdev, NULL);
}
+static void tlan_start(struct net_device *dev)
+{
+ tlan_reset_lists(dev);
+ /* NOTE: It might not be necessary to read the stats before a
+ reset if you don't care what the values are.
+ */
+ tlan_read_and_clear_stats(dev, TLAN_IGNORE);
+ tlan_reset_adapter(dev);
+ netif_wake_queue(dev);
+}
+
+static void tlan_stop(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
+ outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
+ /* Reset and power down phy */
+ tlan_reset_adapter(dev);
+ if (priv->timer.function != NULL) {
+ del_timer_sync(&priv->timer);
+ priv->timer.function = NULL;
+ }
+}
+
+#ifdef CONFIG_PM
+
+static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (netif_running(dev))
+ tlan_stop(dev);
+
+ netif_device_detach(dev);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int tlan_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_enable_wake(pdev, 0, 0);
+ netif_device_attach(dev);
+
+ if (netif_running(dev))
+ tlan_start(dev);
+
+ return 0;
+}
+
+#else /* CONFIG_PM */
+
+#define tlan_suspend NULL
+#define tlan_resume NULL
+
+#endif /* CONFIG_PM */
+
+
static struct pci_driver tlan_driver = {
.name = "tlan",
.id_table = tlan_pci_tbl,
.probe = tlan_init_one,
.remove = __devexit_p(tlan_remove_one),
+ .suspend = tlan_suspend,
+ .resume = tlan_resume,
};
static int __init tlan_probe(void)
{
int rc = -ENODEV;
- printk(KERN_INFO "%s", tlan_banner);
+ pr_info("%s", tlan_banner);
TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
@@ -477,18 +408,18 @@ static int __init tlan_probe(void)
rc = pci_register_driver(&tlan_driver);
if (rc != 0) {
- printk(KERN_ERR "TLAN: Could not register pci driver.\n");
+ pr_err("Could not register pci driver\n");
goto err_out_pci_free;
}
TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
- TLan_EisaProbe();
+ tlan_eisa_probe();
- printk(KERN_INFO "TLAN: %d device%s installed, PCI: %d EISA: %d\n",
- TLanDevicesInstalled, TLanDevicesInstalled == 1 ? "" : "s",
- tlan_have_pci, tlan_have_eisa);
+ pr_info("%d device%s installed, PCI: %d EISA: %d\n",
+ tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
+ tlan_have_pci, tlan_have_eisa);
- if (TLanDevicesInstalled == 0) {
+ if (tlan_devices_installed == 0) {
rc = -ENODEV;
goto err_out_pci_unreg;
}
@@ -501,39 +432,39 @@ err_out_pci_free:
}
-static int __devinit tlan_init_one( struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int __devinit tlan_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
- return TLan_probe1( pdev, -1, -1, 0, ent);
+ return tlan_probe1(pdev, -1, -1, 0, ent);
}
/*
- ***************************************************************
- * tlan_probe1
- *
- * Returns:
- * 0 on success, error code on error
- * Parms:
- * none
- *
- * The name is lower case to fit in with all the rest of
- * the netcard_probe names. This function looks for
- * another TLan based adapter, setting it up with the
- * allocated device struct if one is found.
- * tlan_probe has been ported to the new net API and
- * now allocates its own device structure. This function
- * is also used by modules.
- *
- **************************************************************/
-
-static int __devinit TLan_probe1(struct pci_dev *pdev,
+***************************************************************
+* tlan_probe1
+*
+* Returns:
+* 0 on success, error code on error
+* Parms:
+* none
+*
+* The name is lower case to fit in with all the rest of
+* the netcard_probe names. This function looks for
+* another TLan based adapter, setting it up with the
+* allocated device struct if one is found.
+* tlan_probe has been ported to the new net API and
+* now allocates its own device structure. This function
+* is also used by modules.
+*
+**************************************************************/
+
+static int __devinit tlan_probe1(struct pci_dev *pdev,
long ioaddr, int irq, int rev,
- const struct pci_device_id *ent )
+ const struct pci_device_id *ent)
{
struct net_device *dev;
- TLanPrivateInfo *priv;
+ struct tlan_priv *priv;
u16 device_id;
int reg, rc = -ENODEV;
@@ -543,17 +474,17 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
if (rc)
return rc;
- rc = pci_request_regions(pdev, TLanSignature);
+ rc = pci_request_regions(pdev, tlan_signature);
if (rc) {
- printk(KERN_ERR "TLAN: Could not reserve IO regions\n");
+ pr_err("Could not reserve IO regions\n");
goto err_out;
}
}
#endif /* CONFIG_PCI */
- dev = alloc_etherdev(sizeof(TLanPrivateInfo));
+ dev = alloc_etherdev(sizeof(struct tlan_priv));
if (dev == NULL) {
- printk(KERN_ERR "TLAN: Could not allocate memory for device.\n");
+ pr_err("Could not allocate memory for device\n");
rc = -ENOMEM;
goto err_out_regions;
}
@@ -561,38 +492,39 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
priv = netdev_priv(dev);
- priv->pciDev = pdev;
+ priv->pci_dev = pdev;
priv->dev = dev;
/* Is this a PCI device? */
if (pdev) {
- u32 pci_io_base = 0;
+ u32 pci_io_base = 0;
priv->adapter = &board_info[ent->driver_data];
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- printk(KERN_ERR "TLAN: No suitable PCI mapping available.\n");
+ pr_err("No suitable PCI mapping available\n");
goto err_out_free_dev;
}
- for ( reg= 0; reg <= 5; reg ++ ) {
+ for (reg = 0; reg <= 5; reg++) {
if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
pci_io_base = pci_resource_start(pdev, reg);
- TLAN_DBG( TLAN_DEBUG_GNRL, "IO mapping is available at %x.\n",
- pci_io_base);
+ TLAN_DBG(TLAN_DEBUG_GNRL,
+ "IO mapping is available at %x.\n",
+ pci_io_base);
break;
}
}
if (!pci_io_base) {
- printk(KERN_ERR "TLAN: No IO mappings available\n");
+ pr_err("No IO mappings available\n");
rc = -EIO;
goto err_out_free_dev;
}
dev->base_addr = pci_io_base;
dev->irq = pdev->irq;
- priv->adapterRev = pdev->revision;
+ priv->adapter_rev = pdev->revision;
pci_set_master(pdev);
pci_set_drvdata(pdev, dev);
@@ -602,11 +534,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
device_id = inw(ioaddr + EISA_ID2);
priv->is_eisa = 1;
if (device_id == 0x20F1) {
- priv->adapter = &board_info[13]; /* NetFlex-3/E */
- priv->adapterRev = 23; /* TLAN 2.3 */
+ priv->adapter = &board_info[13]; /* NetFlex-3/E */
+ priv->adapter_rev = 23; /* TLAN 2.3 */
} else {
priv->adapter = &board_info[14];
- priv->adapterRev = 10; /* TLAN 1.0 */
+ priv->adapter_rev = 10; /* TLAN 1.0 */
}
dev->base_addr = ioaddr;
dev->irq = irq;
@@ -620,11 +552,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
: (dev->mem_start & 0x18) >> 3;
- if (priv->speed == 0x1) {
+ if (priv->speed == 0x1)
priv->speed = TLAN_SPEED_10;
- } else if (priv->speed == 0x2) {
+ else if (priv->speed == 0x2)
priv->speed = TLAN_SPEED_100;
- }
+
debug = priv->debug = dev->mem_end;
} else {
priv->aui = aui[boards_found];
@@ -635,46 +567,45 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
/* This will be used when we get an adapter error from
* within our irq handler */
- INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work);
+ INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
spin_lock_init(&priv->lock);
- rc = TLan_Init(dev);
+ rc = tlan_init(dev);
if (rc) {
- printk(KERN_ERR "TLAN: Could not set up device.\n");
+ pr_err("Could not set up device\n");
goto err_out_free_dev;
}
rc = register_netdev(dev);
if (rc) {
- printk(KERN_ERR "TLAN: Could not register device.\n");
+ pr_err("Could not register device\n");
goto err_out_uninit;
}
- TLanDevicesInstalled++;
+ tlan_devices_installed++;
boards_found++;
/* pdev is NULL if this is an EISA device */
if (pdev)
tlan_have_pci++;
else {
- priv->nextDevice = TLan_Eisa_Devices;
- TLan_Eisa_Devices = dev;
+ priv->next_device = tlan_eisa_devices;
+ tlan_eisa_devices = dev;
tlan_have_eisa++;
}
- printk(KERN_INFO "TLAN: %s irq=%2d, io=%04x, %s, Rev. %d\n",
- dev->name,
- (int) dev->irq,
- (int) dev->base_addr,
- priv->adapter->deviceLabel,
- priv->adapterRev);
+ netdev_info(dev, "irq=%2d, io=%04x, %s, Rev. %d\n",
+ (int)dev->irq,
+ (int)dev->base_addr,
+ priv->adapter->device_label,
+ priv->adapter_rev);
return 0;
err_out_uninit:
- pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage,
- priv->dmaStorageDMA );
+ pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage,
+ priv->dma_storage_dma);
err_out_free_dev:
free_netdev(dev);
err_out_regions:
@@ -689,22 +620,23 @@ err_out:
}
-static void TLan_Eisa_Cleanup(void)
+static void tlan_eisa_cleanup(void)
{
struct net_device *dev;
- TLanPrivateInfo *priv;
+ struct tlan_priv *priv;
- while( tlan_have_eisa ) {
- dev = TLan_Eisa_Devices;
+ while (tlan_have_eisa) {
+ dev = tlan_eisa_devices;
priv = netdev_priv(dev);
- if (priv->dmaStorage) {
- pci_free_consistent(priv->pciDev, priv->dmaSize,
- priv->dmaStorage, priv->dmaStorageDMA );
+ if (priv->dma_storage) {
+ pci_free_consistent(priv->pci_dev, priv->dma_size,
+ priv->dma_storage,
+ priv->dma_storage_dma);
}
- release_region( dev->base_addr, 0x10);
- unregister_netdev( dev );
- TLan_Eisa_Devices = priv->nextDevice;
- free_netdev( dev );
+ release_region(dev->base_addr, 0x10);
+ unregister_netdev(dev);
+ tlan_eisa_devices = priv->next_device;
+ free_netdev(dev);
tlan_have_eisa--;
}
}
@@ -715,7 +647,7 @@ static void __exit tlan_exit(void)
pci_unregister_driver(&tlan_driver);
if (tlan_have_eisa)
- TLan_Eisa_Cleanup();
+ tlan_eisa_cleanup();
}
@@ -726,24 +658,24 @@ module_exit(tlan_exit);
- /**************************************************************
- * TLan_EisaProbe
- *
- * Returns: 0 on success, 1 otherwise
- *
- * Parms: None
- *
- *
- * This functions probes for EISA devices and calls
- * TLan_probe1 when one is found.
- *
- *************************************************************/
+/**************************************************************
+ * tlan_eisa_probe
+ *
+ * Returns: 0 on success, 1 otherwise
+ *
+ * Parms: None
+ *
+ *
+ * This functions probes for EISA devices and calls
+ * TLan_probe1 when one is found.
+ *
+ *************************************************************/
-static void __init TLan_EisaProbe (void)
+static void __init tlan_eisa_probe(void)
{
- long ioaddr;
- int rc = -ENODEV;
- int irq;
+ long ioaddr;
+ int rc = -ENODEV;
+ int irq;
u16 device_id;
if (!EISA_bus) {
@@ -754,15 +686,16 @@ static void __init TLan_EisaProbe (void)
/* Loop through all slots of the EISA bus */
for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
- TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n",
- (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID));
- TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n",
- (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2));
+ TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
+ (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
+ TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
+ (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
- TLAN_DBG(TLAN_DEBUG_PROBE, "Probing for EISA adapter at IO: 0x%4x : ",
- (int) ioaddr);
- if (request_region(ioaddr, 0x10, TLanSignature) == NULL)
+ TLAN_DBG(TLAN_DEBUG_PROBE,
+ "Probing for EISA adapter at IO: 0x%4x : ",
+ (int) ioaddr);
+ if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
goto out;
if (inw(ioaddr + EISA_ID) != 0x110E) {
@@ -772,326 +705,324 @@ static void __init TLan_EisaProbe (void)
device_id = inw(ioaddr + EISA_ID2);
if (device_id != 0x20F1 && device_id != 0x40F1) {
- release_region (ioaddr, 0x10);
+ release_region(ioaddr, 0x10);
goto out;
}
- if (inb(ioaddr + EISA_CR) != 0x1) { /* Check if adapter is enabled */
- release_region (ioaddr, 0x10);
+ /* check if adapter is enabled */
+ if (inb(ioaddr + EISA_CR) != 0x1) {
+ release_region(ioaddr, 0x10);
goto out2;
}
if (debug == 0x10)
- printk("Found one\n");
+ pr_info("Found one\n");
/* Get irq from board */
- switch (inb(ioaddr + 0xCC0)) {
- case(0x10):
- irq=5;
- break;
- case(0x20):
- irq=9;
- break;
- case(0x40):
- irq=10;
- break;
- case(0x80):
- irq=11;
- break;
- default:
- goto out;
+ switch (inb(ioaddr + 0xcc0)) {
+ case(0x10):
+ irq = 5;
+ break;
+ case(0x20):
+ irq = 9;
+ break;
+ case(0x40):
+ irq = 10;
+ break;
+ case(0x80):
+ irq = 11;
+ break;
+ default:
+ goto out;
}
/* Setup the newly found eisa adapter */
- rc = TLan_probe1( NULL, ioaddr, irq,
- 12, NULL);
+ rc = tlan_probe1(NULL, ioaddr, irq,
+ 12, NULL);
continue;
- out:
- if (debug == 0x10)
- printk("None found\n");
- continue;
+out:
+ if (debug == 0x10)
+ pr_info("None found\n");
+ continue;
- out2: if (debug == 0x10)
- printk("Card found but it is not enabled, skipping\n");
- continue;
+out2:
+ if (debug == 0x10)
+ pr_info("Card found but it is not enabled, skipping\n");
+ continue;
}
-} /* TLan_EisaProbe */
+}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static void TLan_Poll(struct net_device *dev)
+static void tlan_poll(struct net_device *dev)
{
disable_irq(dev->irq);
- TLan_HandleInterrupt(dev->irq, dev);
+ tlan_handle_interrupt(dev->irq, dev);
enable_irq(dev->irq);
}
#endif
-static const struct net_device_ops TLan_netdev_ops = {
- .ndo_open = TLan_Open,
- .ndo_stop = TLan_Close,
- .ndo_start_xmit = TLan_StartTx,
- .ndo_tx_timeout = TLan_tx_timeout,
- .ndo_get_stats = TLan_GetStats,
- .ndo_set_multicast_list = TLan_SetMulticastList,
- .ndo_do_ioctl = TLan_ioctl,
+static const struct net_device_ops tlan_netdev_ops = {
+ .ndo_open = tlan_open,
+ .ndo_stop = tlan_close,
+ .ndo_start_xmit = tlan_start_tx,
+ .ndo_tx_timeout = tlan_tx_timeout,
+ .ndo_get_stats = tlan_get_stats,
+ .ndo_set_multicast_list = tlan_set_multicast_list,
+ .ndo_do_ioctl = tlan_ioctl,
.ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = TLan_Poll,
+ .ndo_poll_controller = tlan_poll,
#endif
};
- /***************************************************************
- * TLan_Init
- *
- * Returns:
- * 0 on success, error code otherwise.
- * Parms:
- * dev The structure of the device to be
- * init'ed.
- *
- * This function completes the initialization of the
- * device structure and driver. It reserves the IO
- * addresses, allocates memory for the lists and bounce
- * buffers, retrieves the MAC address from the eeprom
- * and assignes the device's methods.
- *
- **************************************************************/
-
-static int TLan_Init( struct net_device *dev )
+/***************************************************************
+ * tlan_init
+ *
+ * Returns:
+ * 0 on success, error code otherwise.
+ * Parms:
+ * dev The structure of the device to be
+ * init'ed.
+ *
+ * This function completes the initialization of the
+ * device structure and driver. It reserves the IO
+ * addresses, allocates memory for the lists and bounce
+ * buffers, retrieves the MAC address from the eeprom
+ * and assignes the device's methods.
+ *
+ **************************************************************/
+
+static int tlan_init(struct net_device *dev)
{
int dma_size;
- int err;
+ int err;
int i;
- TLanPrivateInfo *priv;
+ struct tlan_priv *priv;
priv = netdev_priv(dev);
- dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS )
- * ( sizeof(TLanList) );
- priv->dmaStorage = pci_alloc_consistent(priv->pciDev,
- dma_size, &priv->dmaStorageDMA);
- priv->dmaSize = dma_size;
+ dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
+ * (sizeof(struct tlan_list));
+ priv->dma_storage = pci_alloc_consistent(priv->pci_dev,
+ dma_size,
+ &priv->dma_storage_dma);
+ priv->dma_size = dma_size;
- if ( priv->dmaStorage == NULL ) {
- printk(KERN_ERR "TLAN: Could not allocate lists and buffers for %s.\n",
- dev->name );
+ if (priv->dma_storage == NULL) {
+ pr_err("Could not allocate lists and buffers for %s\n",
+ dev->name);
return -ENOMEM;
}
- memset( priv->dmaStorage, 0, dma_size );
- priv->rxList = (TLanList *) ALIGN((unsigned long)priv->dmaStorage, 8);
- priv->rxListDMA = ALIGN(priv->dmaStorageDMA, 8);
- priv->txList = priv->rxList + TLAN_NUM_RX_LISTS;
- priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS;
+ memset(priv->dma_storage, 0, dma_size);
+ priv->rx_list = (struct tlan_list *)
+ ALIGN((unsigned long)priv->dma_storage, 8);
+ priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
+ priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
+ priv->tx_list_dma =
+ priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
err = 0;
- for ( i = 0; i < 6 ; i++ )
- err |= TLan_EeReadByte( dev,
- (u8) priv->adapter->addrOfs + i,
- (u8 *) &dev->dev_addr[i] );
- if ( err ) {
- printk(KERN_ERR "TLAN: %s: Error reading MAC from eeprom: %d\n",
- dev->name,
- err );
+ for (i = 0; i < 6 ; i++)
+ err |= tlan_ee_read_byte(dev,
+ (u8) priv->adapter->addr_ofs + i,
+ (u8 *) &dev->dev_addr[i]);
+ if (err) {
+ pr_err("%s: Error reading MAC from eeprom: %d\n",
+ dev->name, err);
}
dev->addr_len = 6;
netif_carrier_off(dev);
/* Device methods */
- dev->netdev_ops = &TLan_netdev_ops;
+ dev->netdev_ops = &tlan_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
return 0;
-} /* TLan_Init */
+}
- /***************************************************************
- * TLan_Open
- *
- * Returns:
- * 0 on success, error code otherwise.
- * Parms:
- * dev Structure of device to be opened.
- *
- * This routine puts the driver and TLAN adapter in a
- * state where it is ready to send and receive packets.
- * It allocates the IRQ, resets and brings the adapter
- * out of reset, and allows interrupts. It also delays
- * the startup for autonegotiation or sends a Rx GO
- * command to the adapter, as appropriate.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_open
+ *
+ * Returns:
+ * 0 on success, error code otherwise.
+ * Parms:
+ * dev Structure of device to be opened.
+ *
+ * This routine puts the driver and TLAN adapter in a
+ * state where it is ready to send and receive packets.
+ * It allocates the IRQ, resets and brings the adapter
+ * out of reset, and allows interrupts. It also delays
+ * the startup for autonegotiation or sends a Rx GO
+ * command to the adapter, as appropriate.
+ *
+ **************************************************************/
-static int TLan_Open( struct net_device *dev )
+static int tlan_open(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int err;
- priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION );
- err = request_irq( dev->irq, TLan_HandleInterrupt, IRQF_SHARED,
- dev->name, dev );
+ priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
+ err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
+ dev->name, dev);
- if ( err ) {
- pr_err("TLAN: Cannot open %s because IRQ %d is already in use.\n",
- dev->name, dev->irq );
+ if (err) {
+ netdev_err(dev, "Cannot open because IRQ %d is already in use\n",
+ dev->irq);
return err;
}
init_timer(&priv->timer);
- netif_start_queue(dev);
- /* NOTE: It might not be necessary to read the stats before a
- reset if you don't care what the values are.
- */
- TLan_ResetLists( dev );
- TLan_ReadAndClearStats( dev, TLAN_IGNORE );
- TLan_ResetAdapter( dev );
+ tlan_start(dev);
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
- dev->name, priv->tlanRev );
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
+ dev->name, priv->tlan_rev);
return 0;
-} /* TLan_Open */
+}
- /**************************************************************
- * TLan_ioctl
- *
- * Returns:
- * 0 on success, error code otherwise
- * Params:
- * dev structure of device to receive ioctl.
- *
- * rq ifreq structure to hold userspace data.
- *
- * cmd ioctl command.
- *
- *
- *************************************************************/
+/**************************************************************
+ * tlan_ioctl
+ *
+ * Returns:
+ * 0 on success, error code otherwise
+ * Params:
+ * dev structure of device to receive ioctl.
+ *
+ * rq ifreq structure to hold userspace data.
+ *
+ * cmd ioctl command.
+ *
+ *
+ *************************************************************/
-static int TLan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
struct mii_ioctl_data *data = if_mii(rq);
- u32 phy = priv->phy[priv->phyNum];
+ u32 phy = priv->phy[priv->phy_num];
- if (!priv->phyOnline)
+ if (!priv->phy_online)
return -EAGAIN;
- switch(cmd) {
- case SIOCGMIIPHY: /* Get address of MII PHY in use. */
- data->phy_id = phy;
+ switch (cmd) {
+ case SIOCGMIIPHY: /* get address of MII PHY in use. */
+ data->phy_id = phy;
- case SIOCGMIIREG: /* Read MII PHY register. */
- TLan_MiiReadReg(dev, data->phy_id & 0x1f,
- data->reg_num & 0x1f, &data->val_out);
- return 0;
+ case SIOCGMIIREG: /* read MII PHY register. */
+ tlan_mii_read_reg(dev, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, &data->val_out);
+ return 0;
- case SIOCSMIIREG: /* Write MII PHY register. */
- TLan_MiiWriteReg(dev, data->phy_id & 0x1f,
- data->reg_num & 0x1f, data->val_in);
- return 0;
- default:
- return -EOPNOTSUPP;
+ case SIOCSMIIREG: /* write MII PHY register. */
+ tlan_mii_write_reg(dev, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, data->val_in);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
}
-} /* tlan_ioctl */
+}
- /***************************************************************
- * TLan_tx_timeout
- *
- * Returns: nothing
- *
- * Params:
- * dev structure of device which timed out
- * during transmit.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_tx_timeout
+ *
+ * Returns: nothing
+ *
+ * Params:
+ * dev structure of device which timed out
+ * during transmit.
+ *
+ **************************************************************/
-static void TLan_tx_timeout(struct net_device *dev)
+static void tlan_tx_timeout(struct net_device *dev)
{
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
/* Ok so we timed out, lets see what we can do about it...*/
- TLan_FreeLists( dev );
- TLan_ResetLists( dev );
- TLan_ReadAndClearStats( dev, TLAN_IGNORE );
- TLan_ResetAdapter( dev );
+ tlan_free_lists(dev);
+ tlan_reset_lists(dev);
+ tlan_read_and_clear_stats(dev, TLAN_IGNORE);
+ tlan_reset_adapter(dev);
dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue( dev );
+ netif_wake_queue(dev);
}
- /***************************************************************
- * TLan_tx_timeout_work
- *
- * Returns: nothing
- *
- * Params:
- * work work item of device which timed out
- *
- **************************************************************/
+/***************************************************************
+ * tlan_tx_timeout_work
+ *
+ * Returns: nothing
+ *
+ * Params:
+ * work work item of device which timed out
+ *
+ **************************************************************/
-static void TLan_tx_timeout_work(struct work_struct *work)
+static void tlan_tx_timeout_work(struct work_struct *work)
{
- TLanPrivateInfo *priv =
- container_of(work, TLanPrivateInfo, tlan_tqueue);
+ struct tlan_priv *priv =
+ container_of(work, struct tlan_priv, tlan_tqueue);
- TLan_tx_timeout(priv->dev);
+ tlan_tx_timeout(priv->dev);
}
- /***************************************************************
- * TLan_StartTx
- *
- * Returns:
- * 0 on success, non-zero on failure.
- * Parms:
- * skb A pointer to the sk_buff containing the
- * frame to be sent.
- * dev The device to send the data on.
- *
- * This function adds a frame to the Tx list to be sent
- * ASAP. First it verifies that the adapter is ready and
- * there is room in the queue. Then it sets up the next
- * available list, copies the frame to the corresponding
- * buffer. If the adapter Tx channel is idle, it gives
- * the adapter a Tx Go command on the list, otherwise it
- * sets the forward address of the previous list to point
- * to this one. Then it frees the sk_buff.
- *
- **************************************************************/
-
-static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
+/***************************************************************
+ * tlan_start_tx
+ *
+ * Returns:
+ * 0 on success, non-zero on failure.
+ * Parms:
+ * skb A pointer to the sk_buff containing the
+ * frame to be sent.
+ * dev The device to send the data on.
+ *
+ * This function adds a frame to the Tx list to be sent
+ * ASAP. First it verifies that the adapter is ready and
+ * there is room in the queue. Then it sets up the next
+ * available list, copies the frame to the corresponding
+ * buffer. If the adapter Tx channel is idle, it gives
+ * the adapter a Tx Go command on the list, otherwise it
+ * sets the forward address of the previous list to point
+ * to this one. Then it frees the sk_buff.
+ *
+ **************************************************************/
+
+static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
dma_addr_t tail_list_phys;
- TLanList *tail_list;
+ struct tlan_list *tail_list;
unsigned long flags;
unsigned int txlen;
- if ( ! priv->phyOnline ) {
- TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
- dev->name );
+ if (!priv->phy_online) {
+ TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
+ dev->name);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1100,218 +1031,214 @@ static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
return NETDEV_TX_OK;
txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
- tail_list = priv->txList + priv->txTail;
- tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail;
+ tail_list = priv->tx_list + priv->tx_tail;
+ tail_list_phys =
+ priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
- if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) {
- TLAN_DBG( TLAN_DEBUG_TX,
- "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
- dev->name, priv->txHead, priv->txTail );
+ if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
+ dev->name, priv->tx_head, priv->tx_tail);
netif_stop_queue(dev);
- priv->txBusyCount++;
+ priv->tx_busy_count++;
return NETDEV_TX_BUSY;
}
tail_list->forward = 0;
- tail_list->buffer[0].address = pci_map_single(priv->pciDev,
+ tail_list->buffer[0].address = pci_map_single(priv->pci_dev,
skb->data, txlen,
PCI_DMA_TODEVICE);
- TLan_StoreSKB(tail_list, skb);
+ tlan_store_skb(tail_list, skb);
- tail_list->frameSize = (u16) txlen;
+ tail_list->frame_size = (u16) txlen;
tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
tail_list->buffer[1].count = 0;
tail_list->buffer[1].address = 0;
spin_lock_irqsave(&priv->lock, flags);
- tail_list->cStat = TLAN_CSTAT_READY;
- if ( ! priv->txInProgress ) {
- priv->txInProgress = 1;
- TLAN_DBG( TLAN_DEBUG_TX,
- "TRANSMIT: Starting TX on buffer %d\n", priv->txTail );
- outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM );
- outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD );
+ tail_list->c_stat = TLAN_CSTAT_READY;
+ if (!priv->tx_in_progress) {
+ priv->tx_in_progress = 1;
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: Starting TX on buffer %d\n",
+ priv->tx_tail);
+ outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
+ outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
} else {
- TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Adding buffer %d to TX channel\n",
- priv->txTail );
- if ( priv->txTail == 0 ) {
- ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: Adding buffer %d to TX channel\n",
+ priv->tx_tail);
+ if (priv->tx_tail == 0) {
+ (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
= tail_list_phys;
} else {
- ( priv->txList + ( priv->txTail - 1 ) )->forward
+ (priv->tx_list + (priv->tx_tail - 1))->forward
= tail_list_phys;
}
}
spin_unlock_irqrestore(&priv->lock, flags);
- CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS );
+ CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
return NETDEV_TX_OK;
-} /* TLan_StartTx */
+}
- /***************************************************************
- * TLan_HandleInterrupt
- *
- * Returns:
- * Nothing
- * Parms:
- * irq The line on which the interrupt
- * occurred.
- * dev_id A pointer to the device assigned to
- * this irq line.
- *
- * This function handles an interrupt generated by its
- * assigned TLAN adapter. The function deactivates
- * interrupts on its adapter, records the type of
- * interrupt, executes the appropriate subhandler, and
- * acknowdges the interrupt to the adapter (thus
- * re-enabling adapter interrupts.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_handle_interrupt
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * irq The line on which the interrupt
+ * occurred.
+ * dev_id A pointer to the device assigned to
+ * this irq line.
+ *
+ * This function handles an interrupt generated by its
+ * assigned TLAN adapter. The function deactivates
+ * interrupts on its adapter, records the type of
+ * interrupt, executes the appropriate subhandler, and
+ * acknowdges the interrupt to the adapter (thus
+ * re-enabling adapter interrupts.
+ *
+ **************************************************************/
-static irqreturn_t TLan_HandleInterrupt(int irq, void *dev_id)
+static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 host_int;
u16 type;
spin_lock(&priv->lock);
- host_int = inw( dev->base_addr + TLAN_HOST_INT );
- type = ( host_int & TLAN_HI_IT_MASK ) >> 2;
- if ( type ) {
+ host_int = inw(dev->base_addr + TLAN_HOST_INT);
+ type = (host_int & TLAN_HI_IT_MASK) >> 2;
+ if (type) {
u32 ack;
u32 host_cmd;
- outw( host_int, dev->base_addr + TLAN_HOST_INT );
- ack = TLanIntVector[type]( dev, host_int );
+ outw(host_int, dev->base_addr + TLAN_HOST_INT);
+ ack = tlan_int_vector[type](dev, host_int);
- if ( ack ) {
- host_cmd = TLAN_HC_ACK | ack | ( type << 18 );
- outl( host_cmd, dev->base_addr + TLAN_HOST_CMD );
+ if (ack) {
+ host_cmd = TLAN_HC_ACK | ack | (type << 18);
+ outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
}
}
spin_unlock(&priv->lock);
return IRQ_RETVAL(type);
-} /* TLan_HandleInterrupts */
+}
- /***************************************************************
- * TLan_Close
- *
- * Returns:
- * An error code.
- * Parms:
- * dev The device structure of the device to
- * close.
- *
- * This function shuts down the adapter. It records any
- * stats, puts the adapter into reset state, deactivates
- * its time as needed, and frees the irq it is using.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_close
+ *
+ * Returns:
+ * An error code.
+ * Parms:
+ * dev The device structure of the device to
+ * close.
+ *
+ * This function shuts down the adapter. It records any
+ * stats, puts the adapter into reset state, deactivates
+ * its time as needed, and frees the irq it is using.
+ *
+ **************************************************************/
-static int TLan_Close(struct net_device *dev)
+static int tlan_close(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
- netif_stop_queue(dev);
priv->neg_be_verbose = 0;
+ tlan_stop(dev);
- TLan_ReadAndClearStats( dev, TLAN_RECORD );
- outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD );
- if ( priv->timer.function != NULL ) {
- del_timer_sync( &priv->timer );
- priv->timer.function = NULL;
- }
-
- free_irq( dev->irq, dev );
- TLan_FreeLists( dev );
- TLAN_DBG( TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name );
+ free_irq(dev->irq, dev);
+ tlan_free_lists(dev);
+ TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
return 0;
-} /* TLan_Close */
+}
- /***************************************************************
- * TLan_GetStats
- *
- * Returns:
- * A pointer to the device's statistics structure.
- * Parms:
- * dev The device structure to return the
- * stats for.
- *
- * This function updates the devices statistics by reading
- * the TLAN chip's onboard registers. Then it returns the
- * address of the statistics structure.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_get_stats
+ *
+ * Returns:
+ * A pointer to the device's statistics structure.
+ * Parms:
+ * dev The device structure to return the
+ * stats for.
+ *
+ * This function updates the devices statistics by reading
+ * the TLAN chip's onboard registers. Then it returns the
+ * address of the statistics structure.
+ *
+ **************************************************************/
-static struct net_device_stats *TLan_GetStats( struct net_device *dev )
+static struct net_device_stats *tlan_get_stats(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int i;
/* Should only read stats if open ? */
- TLan_ReadAndClearStats( dev, TLAN_RECORD );
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
- TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
- priv->rxEocCount );
- TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
- priv->txBusyCount );
- if ( debug & TLAN_DEBUG_GNRL ) {
- TLan_PrintDio( dev->base_addr );
- TLan_PhyPrint( dev );
+ TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
+ priv->rx_eoc_count);
+ TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
+ priv->tx_busy_count);
+ if (debug & TLAN_DEBUG_GNRL) {
+ tlan_print_dio(dev->base_addr);
+ tlan_phy_print(dev);
}
- if ( debug & TLAN_DEBUG_LIST ) {
- for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ )
- TLan_PrintList( priv->rxList + i, "RX", i );
- for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ )
- TLan_PrintList( priv->txList + i, "TX", i );
+ if (debug & TLAN_DEBUG_LIST) {
+ for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
+ tlan_print_list(priv->rx_list + i, "RX", i);
+ for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
+ tlan_print_list(priv->tx_list + i, "TX", i);
}
return &dev->stats;
-} /* TLan_GetStats */
+}
- /***************************************************************
- * TLan_SetMulticastList
- *
- * Returns:
- * Nothing
- * Parms:
- * dev The device structure to set the
- * multicast list for.
- *
- * This function sets the TLAN adaptor to various receive
- * modes. If the IFF_PROMISC flag is set, promiscuous
- * mode is acitviated. Otherwise, promiscuous mode is
- * turned off. If the IFF_ALLMULTI flag is set, then
- * the hash table is set to receive all group addresses.
- * Otherwise, the first three multicast addresses are
- * stored in AREG_1-3, and the rest are selected via the
- * hash table, as necessary.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_set_multicast_list
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure to set the
+ * multicast list for.
+ *
+ * This function sets the TLAN adaptor to various receive
+ * modes. If the IFF_PROMISC flag is set, promiscuous
+ * mode is acitviated. Otherwise, promiscuous mode is
+ * turned off. If the IFF_ALLMULTI flag is set, then
+ * the hash table is set to receive all group addresses.
+ * Otherwise, the first three multicast addresses are
+ * stored in AREG_1-3, and the rest are selected via the
+ * hash table, as necessary.
+ *
+ **************************************************************/
-static void TLan_SetMulticastList( struct net_device *dev )
+static void tlan_set_multicast_list(struct net_device *dev)
{
struct netdev_hw_addr *ha;
u32 hash1 = 0;
@@ -1320,53 +1247,56 @@ static void TLan_SetMulticastList( struct net_device *dev )
u32 offset;
u8 tmp;
- if ( dev->flags & IFF_PROMISC ) {
- tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
- TLan_DioWrite8( dev->base_addr,
- TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF );
+ if (dev->flags & IFF_PROMISC) {
+ tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
+ tlan_dio_write8(dev->base_addr,
+ TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
} else {
- tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
- TLan_DioWrite8( dev->base_addr,
- TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF );
- if ( dev->flags & IFF_ALLMULTI ) {
- for ( i = 0; i < 3; i++ )
- TLan_SetMac( dev, i + 1, NULL );
- TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF );
- TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
+ tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
+ tlan_dio_write8(dev->base_addr,
+ TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
+ if (dev->flags & IFF_ALLMULTI) {
+ for (i = 0; i < 3; i++)
+ tlan_set_mac(dev, i + 1, NULL);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
+ 0xffffffff);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
+ 0xffffffff);
} else {
i = 0;
netdev_for_each_mc_addr(ha, dev) {
- if ( i < 3 ) {
- TLan_SetMac( dev, i + 1,
+ if (i < 3) {
+ tlan_set_mac(dev, i + 1,
(char *) &ha->addr);
} else {
- offset = TLan_HashFunc((u8 *)&ha->addr);
- if ( offset < 32 )
- hash1 |= ( 1 << offset );
+ offset =
+ tlan_hash_func((u8 *)&ha->addr);
+ if (offset < 32)
+ hash1 |= (1 << offset);
else
- hash2 |= ( 1 << ( offset - 32 ) );
+ hash2 |= (1 << (offset - 32));
}
i++;
}
- for ( ; i < 3; i++ )
- TLan_SetMac( dev, i + 1, NULL );
- TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, hash1 );
- TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, hash2 );
+ for ( ; i < 3; i++)
+ tlan_set_mac(dev, i + 1, NULL);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
}
}
-} /* TLan_SetMulticastList */
+}
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver Interrupt Vectors and Table
+ThunderLAN driver interrupt vectors and table
- Please see Chap. 4, "Interrupt Handling" of the "ThunderLAN
- Programmer's Guide" for more informations on handling interrupts
- generated by TLAN based adapters.
+please see chap. 4, "Interrupt Handling" of the "ThunderLAN
+Programmer's Guide" for more informations on handling interrupts
+generated by TLAN based adapters.
******************************************************************************
*****************************************************************************/
@@ -1374,46 +1304,48 @@ static void TLan_SetMulticastList( struct net_device *dev )
- /***************************************************************
- * TLan_HandleTxEOF
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This function handles Tx EOF interrupts which are raised
- * by the adapter when it has completed sending the
- * contents of a buffer. If detemines which list/buffer
- * was completed and resets it. If the buffer was the last
- * in the channel (EOC), then the function checks to see if
- * another buffer is ready to send, and if so, sends a Tx
- * Go command. Finally, the driver activates/continues the
- * activity LED.
- *
- **************************************************************/
-
-static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
+/***************************************************************
+ * tlan_handle_tx_eof
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles Tx EOF interrupts which are raised
+ * by the adapter when it has completed sending the
+ * contents of a buffer. If detemines which list/buffer
+ * was completed and resets it. If the buffer was the last
+ * in the channel (EOC), then the function checks to see if
+ * another buffer is ready to send, and if so, sends a Tx
+ * Go command. Finally, the driver activates/continues the
+ * activity LED.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int eoc = 0;
- TLanList *head_list;
+ struct tlan_list *head_list;
dma_addr_t head_list_phys;
u32 ack = 0;
- u16 tmpCStat;
+ u16 tmp_c_stat;
- TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
- priv->txHead, priv->txTail );
- head_list = priv->txList + priv->txHead;
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
+ priv->tx_head, priv->tx_tail);
+ head_list = priv->tx_list + priv->tx_head;
- while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
- struct sk_buff *skb = TLan_GetSKB(head_list);
+ while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
+ && (ack < 255)) {
+ struct sk_buff *skb = tlan_get_skb(head_list);
ack++;
- pci_unmap_single(priv->pciDev, head_list->buffer[0].address,
+ pci_unmap_single(priv->pci_dev, head_list->buffer[0].address,
max(skb->len,
(unsigned int)TLAN_MIN_FRAME_SIZE),
PCI_DMA_TODEVICE);
@@ -1421,304 +1353,313 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
head_list->buffer[8].address = 0;
head_list->buffer[9].address = 0;
- if ( tmpCStat & TLAN_CSTAT_EOC )
+ if (tmp_c_stat & TLAN_CSTAT_EOC)
eoc = 1;
- dev->stats.tx_bytes += head_list->frameSize;
+ dev->stats.tx_bytes += head_list->frame_size;
- head_list->cStat = TLAN_CSTAT_UNUSED;
+ head_list->c_stat = TLAN_CSTAT_UNUSED;
netif_start_queue(dev);
- CIRC_INC( priv->txHead, TLAN_NUM_TX_LISTS );
- head_list = priv->txList + priv->txHead;
+ CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
+ head_list = priv->tx_list + priv->tx_head;
}
if (!ack)
- printk(KERN_INFO "TLAN: Received interrupt for uncompleted TX frame.\n");
-
- if ( eoc ) {
- TLAN_DBG( TLAN_DEBUG_TX,
- "TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n",
- priv->txHead, priv->txTail );
- head_list = priv->txList + priv->txHead;
- head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
- if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
- outl(head_list_phys, dev->base_addr + TLAN_CH_PARM );
+ netdev_info(dev,
+ "Received interrupt for uncompleted TX frame\n");
+
+ if (eoc) {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n",
+ priv->tx_head, priv->tx_tail);
+ head_list = priv->tx_list + priv->tx_head;
+ head_list_phys = priv->tx_list_dma
+ + sizeof(struct tlan_list)*priv->tx_head;
+ if ((head_list->c_stat & TLAN_CSTAT_READY)
+ == TLAN_CSTAT_READY) {
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
ack |= TLAN_HC_GO;
} else {
- priv->txInProgress = 0;
+ priv->tx_in_progress = 0;
}
}
- if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
- TLan_DioWrite8( dev->base_addr,
- TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
- if ( priv->timer.function == NULL ) {
- priv->timer.function = TLan_Timer;
- priv->timer.data = (unsigned long) dev;
- priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
- priv->timerSetAt = jiffies;
- priv->timerType = TLAN_TIMER_ACTIVITY;
- add_timer(&priv->timer);
- } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) {
- priv->timerSetAt = jiffies;
+ if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
+ tlan_dio_write8(dev->base_addr,
+ TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
+ if (priv->timer.function == NULL) {
+ priv->timer.function = tlan_timer;
+ priv->timer.data = (unsigned long) dev;
+ priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
+ priv->timer_set_at = jiffies;
+ priv->timer_type = TLAN_TIMER_ACTIVITY;
+ add_timer(&priv->timer);
+ } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
+ priv->timer_set_at = jiffies;
}
}
return ack;
-} /* TLan_HandleTxEOF */
+}
- /***************************************************************
- * TLan_HandleStatOverflow
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This function handles the Statistics Overflow interrupt
- * which means that one or more of the TLAN statistics
- * registers has reached 1/2 capacity and needs to be read.
- *
- **************************************************************/
+/***************************************************************
+ * TLan_HandleStatOverflow
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Statistics Overflow interrupt
+ * which means that one or more of the TLAN statistics
+ * registers has reached 1/2 capacity and needs to be read.
+ *
+ **************************************************************/
-static u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
{
- TLan_ReadAndClearStats( dev, TLAN_RECORD );
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
return 1;
-} /* TLan_HandleStatOverflow */
-
-
-
-
- /***************************************************************
- * TLan_HandleRxEOF
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This function handles the Rx EOF interrupt which
- * indicates a frame has been received by the adapter from
- * the net and the frame has been transferred to memory.
- * The function determines the bounce buffer the frame has
- * been loaded into, creates a new sk_buff big enough to
- * hold the frame, and sends it to protocol stack. It
- * then resets the used buffer and appends it to the end
- * of the list. If the frame was the last in the Rx
- * channel (EOC), the function restarts the receive channel
- * by sending an Rx Go command to the adapter. Then it
- * activates/continues the activity LED.
- *
- **************************************************************/
-
-static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
+}
+
+
+
+
+/***************************************************************
+ * TLan_HandleRxEOF
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Rx EOF interrupt which
+ * indicates a frame has been received by the adapter from
+ * the net and the frame has been transferred to memory.
+ * The function determines the bounce buffer the frame has
+ * been loaded into, creates a new sk_buff big enough to
+ * hold the frame, and sends it to protocol stack. It
+ * then resets the used buffer and appends it to the end
+ * of the list. If the frame was the last in the Rx
+ * channel (EOC), the function restarts the receive channel
+ * by sending an Rx Go command to the adapter. Then it
+ * activates/continues the activity LED.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u32 ack = 0;
int eoc = 0;
- TLanList *head_list;
+ struct tlan_list *head_list;
struct sk_buff *skb;
- TLanList *tail_list;
- u16 tmpCStat;
+ struct tlan_list *tail_list;
+ u16 tmp_c_stat;
dma_addr_t head_list_phys;
- TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n",
- priv->rxHead, priv->rxTail );
- head_list = priv->rxList + priv->rxHead;
- head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
+ TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n",
+ priv->rx_head, priv->rx_tail);
+ head_list = priv->rx_list + priv->rx_head;
+ head_list_phys =
+ priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
- while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
- dma_addr_t frameDma = head_list->buffer[0].address;
- u32 frameSize = head_list->frameSize;
+ while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
+ && (ack < 255)) {
+ dma_addr_t frame_dma = head_list->buffer[0].address;
+ u32 frame_size = head_list->frame_size;
struct sk_buff *new_skb;
ack++;
- if (tmpCStat & TLAN_CSTAT_EOC)
+ if (tmp_c_stat & TLAN_CSTAT_EOC)
eoc = 1;
new_skb = netdev_alloc_skb_ip_align(dev,
TLAN_MAX_FRAME_SIZE + 5);
- if ( !new_skb )
+ if (!new_skb)
goto drop_and_reuse;
- skb = TLan_GetSKB(head_list);
- pci_unmap_single(priv->pciDev, frameDma,
+ skb = tlan_get_skb(head_list);
+ pci_unmap_single(priv->pci_dev, frame_dma,
TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
- skb_put( skb, frameSize );
+ skb_put(skb, frame_size);
- dev->stats.rx_bytes += frameSize;
+ dev->stats.rx_bytes += frame_size;
- skb->protocol = eth_type_trans( skb, dev );
- netif_rx( skb );
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
- head_list->buffer[0].address = pci_map_single(priv->pciDev,
- new_skb->data,
- TLAN_MAX_FRAME_SIZE,
- PCI_DMA_FROMDEVICE);
+ head_list->buffer[0].address =
+ pci_map_single(priv->pci_dev, new_skb->data,
+ TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
- TLan_StoreSKB(head_list, new_skb);
+ tlan_store_skb(head_list, new_skb);
drop_and_reuse:
head_list->forward = 0;
- head_list->cStat = 0;
- tail_list = priv->rxList + priv->rxTail;
+ head_list->c_stat = 0;
+ tail_list = priv->rx_list + priv->rx_tail;
tail_list->forward = head_list_phys;
- CIRC_INC( priv->rxHead, TLAN_NUM_RX_LISTS );
- CIRC_INC( priv->rxTail, TLAN_NUM_RX_LISTS );
- head_list = priv->rxList + priv->rxHead;
- head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
+ CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
+ CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
+ head_list = priv->rx_list + priv->rx_head;
+ head_list_phys = priv->rx_list_dma
+ + sizeof(struct tlan_list)*priv->rx_head;
}
if (!ack)
- printk(KERN_INFO "TLAN: Received interrupt for uncompleted RX frame.\n");
-
-
- if ( eoc ) {
- TLAN_DBG( TLAN_DEBUG_RX,
- "RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n",
- priv->rxHead, priv->rxTail );
- head_list = priv->rxList + priv->rxHead;
- head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
- outl(head_list_phys, dev->base_addr + TLAN_CH_PARM );
+ netdev_info(dev,
+ "Received interrupt for uncompleted RX frame\n");
+
+
+ if (eoc) {
+ TLAN_DBG(TLAN_DEBUG_RX,
+ "RECEIVE: handling RX EOC (Head=%d Tail=%d)\n",
+ priv->rx_head, priv->rx_tail);
+ head_list = priv->rx_list + priv->rx_head;
+ head_list_phys = priv->rx_list_dma
+ + sizeof(struct tlan_list)*priv->rx_head;
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
ack |= TLAN_HC_GO | TLAN_HC_RT;
- priv->rxEocCount++;
+ priv->rx_eoc_count++;
}
- if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
- TLan_DioWrite8( dev->base_addr,
- TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
- if ( priv->timer.function == NULL ) {
- priv->timer.function = TLan_Timer;
+ if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
+ tlan_dio_write8(dev->base_addr,
+ TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
+ if (priv->timer.function == NULL) {
+ priv->timer.function = tlan_timer;
priv->timer.data = (unsigned long) dev;
priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
- priv->timerSetAt = jiffies;
- priv->timerType = TLAN_TIMER_ACTIVITY;
+ priv->timer_set_at = jiffies;
+ priv->timer_type = TLAN_TIMER_ACTIVITY;
add_timer(&priv->timer);
- } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) {
- priv->timerSetAt = jiffies;
+ } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
+ priv->timer_set_at = jiffies;
}
}
return ack;
-} /* TLan_HandleRxEOF */
+}
- /***************************************************************
- * TLan_HandleDummy
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This function handles the Dummy interrupt, which is
- * raised whenever a test interrupt is generated by setting
- * the Req_Int bit of HOST_CMD to 1.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_handle_dummy
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Dummy interrupt, which is
+ * raised whenever a test interrupt is generated by setting
+ * the Req_Int bit of HOST_CMD to 1.
+ *
+ **************************************************************/
-static u32 TLan_HandleDummy( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
{
- printk( "TLAN: Test interrupt on %s.\n", dev->name );
+ netdev_info(dev, "Test interrupt\n");
return 1;
-} /* TLan_HandleDummy */
+}
- /***************************************************************
- * TLan_HandleTxEOC
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This driver is structured to determine EOC occurrences by
- * reading the CSTAT member of the list structure. Tx EOC
- * interrupts are disabled via the DIO INTDIS register.
- * However, TLAN chips before revision 3.0 didn't have this
- * functionality, so process EOC events if this is the
- * case.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_handle_tx_eoc
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This driver is structured to determine EOC occurrences by
+ * reading the CSTAT member of the list structure. Tx EOC
+ * interrupts are disabled via the DIO INTDIS register.
+ * However, TLAN chips before revision 3.0 didn't have this
+ * functionality, so process EOC events if this is the
+ * case.
+ *
+ **************************************************************/
-static u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
- TLanList *head_list;
+ struct tlan_priv *priv = netdev_priv(dev);
+ struct tlan_list *head_list;
dma_addr_t head_list_phys;
u32 ack = 1;
host_int = 0;
- if ( priv->tlanRev < 0x30 ) {
- TLAN_DBG( TLAN_DEBUG_TX,
- "TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
- priv->txHead, priv->txTail );
- head_list = priv->txList + priv->txHead;
- head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
- if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
+ if (priv->tlan_rev < 0x30) {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
+ priv->tx_head, priv->tx_tail);
+ head_list = priv->tx_list + priv->tx_head;
+ head_list_phys = priv->tx_list_dma
+ + sizeof(struct tlan_list)*priv->tx_head;
+ if ((head_list->c_stat & TLAN_CSTAT_READY)
+ == TLAN_CSTAT_READY) {
netif_stop_queue(dev);
- outl( head_list_phys, dev->base_addr + TLAN_CH_PARM );
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
ack |= TLAN_HC_GO;
} else {
- priv->txInProgress = 0;
+ priv->tx_in_progress = 0;
}
}
return ack;
-} /* TLan_HandleTxEOC */
+}
- /***************************************************************
- * TLan_HandleStatusCheck
- *
- * Returns:
- * 0 if Adapter check, 1 if Network Status check.
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This function handles Adapter Check/Network Status
- * interrupts generated by the adapter. It checks the
- * vector in the HOST_INT register to determine if it is
- * an Adapter Check interrupt. If so, it resets the
- * adapter. Otherwise it clears the status registers
- * and services the PHY.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_handle_status_check
+ *
+ * Returns:
+ * 0 if Adapter check, 1 if Network Status check.
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles Adapter Check/Network Status
+ * interrupts generated by the adapter. It checks the
+ * vector in the HOST_INT register to determine if it is
+ * an Adapter Check interrupt. If so, it resets the
+ * adapter. Otherwise it clears the status registers
+ * and services the PHY.
+ *
+ **************************************************************/
-static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u32 ack;
u32 error;
u8 net_sts;
@@ -1727,92 +1668,94 @@ static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
u16 tlphy_sts;
ack = 1;
- if ( host_int & TLAN_HI_IV_MASK ) {
- netif_stop_queue( dev );
- error = inl( dev->base_addr + TLAN_CH_PARM );
- printk( "TLAN: %s: Adaptor Error = 0x%x\n", dev->name, error );
- TLan_ReadAndClearStats( dev, TLAN_RECORD );
- outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD );
+ if (host_int & TLAN_HI_IV_MASK) {
+ netif_stop_queue(dev);
+ error = inl(dev->base_addr + TLAN_CH_PARM);
+ netdev_info(dev, "Adaptor Error = 0x%x\n", error);
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
+ outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
schedule_work(&priv->tlan_tqueue);
netif_wake_queue(dev);
ack = 0;
} else {
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name );
- phy = priv->phy[priv->phyNum];
-
- net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS );
- if ( net_sts ) {
- TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts );
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
- dev->name, (unsigned) net_sts );
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
+ phy = priv->phy[priv->phy_num];
+
+ net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
+ if (net_sts) {
+ tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
+ dev->name, (unsigned) net_sts);
}
- if ( ( net_sts & TLAN_NET_STS_MIRQ ) && ( priv->phyNum == 0 ) ) {
- TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts );
- TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
- if ( ! ( tlphy_sts & TLAN_TS_POLOK ) &&
- ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
- tlphy_ctl |= TLAN_TC_SWAPOL;
- TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
- } else if ( ( tlphy_sts & TLAN_TS_POLOK ) &&
- ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
- tlphy_ctl &= ~TLAN_TC_SWAPOL;
- TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
- }
-
- if (debug) {
- TLan_PhyPrint( dev );
+ if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) {
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+ if (!(tlphy_sts & TLAN_TS_POLOK) &&
+ !(tlphy_ctl & TLAN_TC_SWAPOL)) {
+ tlphy_ctl |= TLAN_TC_SWAPOL;
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+ tlphy_ctl);
+ } else if ((tlphy_sts & TLAN_TS_POLOK) &&
+ (tlphy_ctl & TLAN_TC_SWAPOL)) {
+ tlphy_ctl &= ~TLAN_TC_SWAPOL;
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+ tlphy_ctl);
}
+
+ if (debug)
+ tlan_phy_print(dev);
}
}
return ack;
-} /* TLan_HandleStatusCheck */
+}
- /***************************************************************
- * TLan_HandleRxEOC
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This driver is structured to determine EOC occurrences by
- * reading the CSTAT member of the list structure. Rx EOC
- * interrupts are disabled via the DIO INTDIS register.
- * However, TLAN chips before revision 3.0 didn't have this
- * CSTAT member or a INTDIS register, so if this chip is
- * pre-3.0, process EOC interrupts normally.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_handle_rx_eoc
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This driver is structured to determine EOC occurrences by
+ * reading the CSTAT member of the list structure. Rx EOC
+ * interrupts are disabled via the DIO INTDIS register.
+ * However, TLAN chips before revision 3.0 didn't have this
+ * CSTAT member or a INTDIS register, so if this chip is
+ * pre-3.0, process EOC interrupts normally.
+ *
+ **************************************************************/
-static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
dma_addr_t head_list_phys;
u32 ack = 1;
- if ( priv->tlanRev < 0x30 ) {
- TLAN_DBG( TLAN_DEBUG_RX,
- "RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n",
- priv->rxHead, priv->rxTail );
- head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
- outl( head_list_phys, dev->base_addr + TLAN_CH_PARM );
+ if (priv->tlan_rev < 0x30) {
+ TLAN_DBG(TLAN_DEBUG_RX,
+ "RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n",
+ priv->rx_head, priv->rx_tail);
+ head_list_phys = priv->rx_list_dma
+ + sizeof(struct tlan_list)*priv->rx_head;
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
ack |= TLAN_HC_GO | TLAN_HC_RT;
- priv->rxEocCount++;
+ priv->rx_eoc_count++;
}
return ack;
-} /* TLan_HandleRxEOC */
+}
@@ -1820,98 +1763,98 @@ static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver Timer Function
+ThunderLAN driver timer function
******************************************************************************
*****************************************************************************/
- /***************************************************************
- * TLan_Timer
- *
- * Returns:
- * Nothing
- * Parms:
- * data A value given to add timer when
- * add_timer was called.
- *
- * This function handles timed functionality for the
- * TLAN driver. The two current timer uses are for
- * delaying for autonegotionation and driving the ACT LED.
- * - Autonegotiation requires being allowed about
- * 2 1/2 seconds before attempting to transmit a
- * packet. It would be a very bad thing to hang
- * the kernel this long, so the driver doesn't
- * allow transmission 'til after this time, for
- * certain PHYs. It would be much nicer if all
- * PHYs were interrupt-capable like the internal
- * PHY.
- * - The ACT LED, which shows adapter activity, is
- * driven by the driver, and so must be left on
- * for a short period to power up the LED so it
- * can be seen. This delay can be changed by
- * changing the TLAN_TIMER_ACT_DELAY in tlan.h,
- * if desired. 100 ms produces a slightly
- * sluggish response.
- *
- **************************************************************/
-
-static void TLan_Timer( unsigned long data )
+/***************************************************************
+ * tlan_timer
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * data A value given to add timer when
+ * add_timer was called.
+ *
+ * This function handles timed functionality for the
+ * TLAN driver. The two current timer uses are for
+ * delaying for autonegotionation and driving the ACT LED.
+ * - Autonegotiation requires being allowed about
+ * 2 1/2 seconds before attempting to transmit a
+ * packet. It would be a very bad thing to hang
+ * the kernel this long, so the driver doesn't
+ * allow transmission 'til after this time, for
+ * certain PHYs. It would be much nicer if all
+ * PHYs were interrupt-capable like the internal
+ * PHY.
+ * - The ACT LED, which shows adapter activity, is
+ * driven by the driver, and so must be left on
+ * for a short period to power up the LED so it
+ * can be seen. This delay can be changed by
+ * changing the TLAN_TIMER_ACT_DELAY in tlan.h,
+ * if desired. 100 ms produces a slightly
+ * sluggish response.
+ *
+ **************************************************************/
+
+static void tlan_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u32 elapsed;
unsigned long flags = 0;
priv->timer.function = NULL;
- switch ( priv->timerType ) {
+ switch (priv->timer_type) {
#ifdef MONITOR
- case TLAN_TIMER_LINK_BEAT:
- TLan_PhyMonitor( dev );
- break;
+ case TLAN_TIMER_LINK_BEAT:
+ tlan_phy_monitor(dev);
+ break;
#endif
- case TLAN_TIMER_PHY_PDOWN:
- TLan_PhyPowerDown( dev );
- break;
- case TLAN_TIMER_PHY_PUP:
- TLan_PhyPowerUp( dev );
- break;
- case TLAN_TIMER_PHY_RESET:
- TLan_PhyReset( dev );
- break;
- case TLAN_TIMER_PHY_START_LINK:
- TLan_PhyStartLink( dev );
- break;
- case TLAN_TIMER_PHY_FINISH_AN:
- TLan_PhyFinishAutoNeg( dev );
- break;
- case TLAN_TIMER_FINISH_RESET:
- TLan_FinishReset( dev );
- break;
- case TLAN_TIMER_ACTIVITY:
- spin_lock_irqsave(&priv->lock, flags);
- if ( priv->timer.function == NULL ) {
- elapsed = jiffies - priv->timerSetAt;
- if ( elapsed >= TLAN_TIMER_ACT_DELAY ) {
- TLan_DioWrite8( dev->base_addr,
- TLAN_LED_REG, TLAN_LED_LINK );
- } else {
- priv->timer.function = TLan_Timer;
- priv->timer.expires = priv->timerSetAt
- + TLAN_TIMER_ACT_DELAY;
- spin_unlock_irqrestore(&priv->lock, flags);
- add_timer( &priv->timer );
- break;
- }
+ case TLAN_TIMER_PHY_PDOWN:
+ tlan_phy_power_down(dev);
+ break;
+ case TLAN_TIMER_PHY_PUP:
+ tlan_phy_power_up(dev);
+ break;
+ case TLAN_TIMER_PHY_RESET:
+ tlan_phy_reset(dev);
+ break;
+ case TLAN_TIMER_PHY_START_LINK:
+ tlan_phy_start_link(dev);
+ break;
+ case TLAN_TIMER_PHY_FINISH_AN:
+ tlan_phy_finish_auto_neg(dev);
+ break;
+ case TLAN_TIMER_FINISH_RESET:
+ tlan_finish_reset(dev);
+ break;
+ case TLAN_TIMER_ACTIVITY:
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->timer.function == NULL) {
+ elapsed = jiffies - priv->timer_set_at;
+ if (elapsed >= TLAN_TIMER_ACT_DELAY) {
+ tlan_dio_write8(dev->base_addr,
+ TLAN_LED_REG, TLAN_LED_LINK);
+ } else {
+ priv->timer.function = tlan_timer;
+ priv->timer.expires = priv->timer_set_at
+ + TLAN_TIMER_ACT_DELAY;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ add_timer(&priv->timer);
+ break;
}
- spin_unlock_irqrestore(&priv->lock, flags);
- break;
- default:
- break;
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+ break;
+ default:
+ break;
}
-} /* TLan_Timer */
+}
@@ -1919,39 +1862,39 @@ static void TLan_Timer( unsigned long data )
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver Adapter Related Routines
+ThunderLAN driver adapter related routines
******************************************************************************
*****************************************************************************/
- /***************************************************************
- * TLan_ResetLists
- *
- * Returns:
- * Nothing
- * Parms:
- * dev The device structure with the list
- * stuctures to be reset.
- *
- * This routine sets the variables associated with managing
- * the TLAN lists to their initial values.
- *
- **************************************************************/
-
-static void TLan_ResetLists( struct net_device *dev )
+/***************************************************************
+ * tlan_reset_lists
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure with the list
+ * stuctures to be reset.
+ *
+ * This routine sets the variables associated with managing
+ * the TLAN lists to their initial values.
+ *
+ **************************************************************/
+
+static void tlan_reset_lists(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int i;
- TLanList *list;
+ struct tlan_list *list;
dma_addr_t list_phys;
struct sk_buff *skb;
- priv->txHead = 0;
- priv->txTail = 0;
- for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
- list = priv->txList + i;
- list->cStat = TLAN_CSTAT_UNUSED;
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+ for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
+ list = priv->tx_list + i;
+ list->c_stat = TLAN_CSTAT_UNUSED;
list->buffer[0].address = 0;
list->buffer[2].count = 0;
list->buffer[2].address = 0;
@@ -1959,169 +1902,169 @@ static void TLan_ResetLists( struct net_device *dev )
list->buffer[9].address = 0;
}
- priv->rxHead = 0;
- priv->rxTail = TLAN_NUM_RX_LISTS - 1;
- for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
- list = priv->rxList + i;
- list_phys = priv->rxListDMA + sizeof(TLanList) * i;
- list->cStat = TLAN_CSTAT_READY;
- list->frameSize = TLAN_MAX_FRAME_SIZE;
+ priv->rx_head = 0;
+ priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
+ for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
+ list = priv->rx_list + i;
+ list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
+ list->c_stat = TLAN_CSTAT_READY;
+ list->frame_size = TLAN_MAX_FRAME_SIZE;
list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
- if ( !skb ) {
- pr_err("TLAN: out of memory for received data.\n" );
+ if (!skb) {
+ netdev_err(dev, "Out of memory for received data\n");
break;
}
- list->buffer[0].address = pci_map_single(priv->pciDev,
+ list->buffer[0].address = pci_map_single(priv->pci_dev,
skb->data,
TLAN_MAX_FRAME_SIZE,
PCI_DMA_FROMDEVICE);
- TLan_StoreSKB(list, skb);
+ tlan_store_skb(list, skb);
list->buffer[1].count = 0;
list->buffer[1].address = 0;
- list->forward = list_phys + sizeof(TLanList);
+ list->forward = list_phys + sizeof(struct tlan_list);
}
/* in case ran out of memory early, clear bits */
while (i < TLAN_NUM_RX_LISTS) {
- TLan_StoreSKB(priv->rxList + i, NULL);
+ tlan_store_skb(priv->rx_list + i, NULL);
++i;
}
list->forward = 0;
-} /* TLan_ResetLists */
+}
-static void TLan_FreeLists( struct net_device *dev )
+static void tlan_free_lists(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int i;
- TLanList *list;
+ struct tlan_list *list;
struct sk_buff *skb;
- for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
- list = priv->txList + i;
- skb = TLan_GetSKB(list);
- if ( skb ) {
+ for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
+ list = priv->tx_list + i;
+ skb = tlan_get_skb(list);
+ if (skb) {
pci_unmap_single(
- priv->pciDev,
+ priv->pci_dev,
list->buffer[0].address,
max(skb->len,
(unsigned int)TLAN_MIN_FRAME_SIZE),
PCI_DMA_TODEVICE);
- dev_kfree_skb_any( skb );
+ dev_kfree_skb_any(skb);
list->buffer[8].address = 0;
list->buffer[9].address = 0;
}
}
- for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
- list = priv->rxList + i;
- skb = TLan_GetSKB(list);
- if ( skb ) {
- pci_unmap_single(priv->pciDev,
+ for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
+ list = priv->rx_list + i;
+ skb = tlan_get_skb(list);
+ if (skb) {
+ pci_unmap_single(priv->pci_dev,
list->buffer[0].address,
TLAN_MAX_FRAME_SIZE,
PCI_DMA_FROMDEVICE);
- dev_kfree_skb_any( skb );
+ dev_kfree_skb_any(skb);
list->buffer[8].address = 0;
list->buffer[9].address = 0;
}
}
-} /* TLan_FreeLists */
+}
- /***************************************************************
- * TLan_PrintDio
- *
- * Returns:
- * Nothing
- * Parms:
- * io_base Base IO port of the device of
- * which to print DIO registers.
- *
- * This function prints out all the internal (DIO)
- * registers of a TLAN chip.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_print_dio
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base Base IO port of the device of
+ * which to print DIO registers.
+ *
+ * This function prints out all the internal (DIO)
+ * registers of a TLAN chip.
+ *
+ **************************************************************/
-static void TLan_PrintDio( u16 io_base )
+static void tlan_print_dio(u16 io_base)
{
u32 data0, data1;
int i;
- printk( "TLAN: Contents of internal registers for io base 0x%04hx.\n",
- io_base );
- printk( "TLAN: Off. +0 +4\n" );
- for ( i = 0; i < 0x4C; i+= 8 ) {
- data0 = TLan_DioRead32( io_base, i );
- data1 = TLan_DioRead32( io_base, i + 0x4 );
- printk( "TLAN: 0x%02x 0x%08x 0x%08x\n", i, data0, data1 );
+ pr_info("Contents of internal registers for io base 0x%04hx\n",
+ io_base);
+ pr_info("Off. +0 +4\n");
+ for (i = 0; i < 0x4C; i += 8) {
+ data0 = tlan_dio_read32(io_base, i);
+ data1 = tlan_dio_read32(io_base, i + 0x4);
+ pr_info("0x%02x 0x%08x 0x%08x\n", i, data0, data1);
}
-} /* TLan_PrintDio */
+}
- /***************************************************************
- * TLan_PrintList
- *
- * Returns:
- * Nothing
- * Parms:
- * list A pointer to the TLanList structure to
- * be printed.
- * type A string to designate type of list,
- * "Rx" or "Tx".
- * num The index of the list.
- *
- * This function prints out the contents of the list
- * pointed to by the list parameter.
- *
- **************************************************************/
+/***************************************************************
+ * TLan_PrintList
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * list A pointer to the struct tlan_list structure to
+ * be printed.
+ * type A string to designate type of list,
+ * "Rx" or "Tx".
+ * num The index of the list.
+ *
+ * This function prints out the contents of the list
+ * pointed to by the list parameter.
+ *
+ **************************************************************/
-static void TLan_PrintList( TLanList *list, char *type, int num)
+static void tlan_print_list(struct tlan_list *list, char *type, int num)
{
int i;
- printk( "TLAN: %s List %d at %p\n", type, num, list );
- printk( "TLAN: Forward = 0x%08x\n", list->forward );
- printk( "TLAN: CSTAT = 0x%04hx\n", list->cStat );
- printk( "TLAN: Frame Size = 0x%04hx\n", list->frameSize );
- /* for ( i = 0; i < 10; i++ ) { */
- for ( i = 0; i < 2; i++ ) {
- printk( "TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
- i, list->buffer[i].count, list->buffer[i].address );
+ pr_info("%s List %d at %p\n", type, num, list);
+ pr_info(" Forward = 0x%08x\n", list->forward);
+ pr_info(" CSTAT = 0x%04hx\n", list->c_stat);
+ pr_info(" Frame Size = 0x%04hx\n", list->frame_size);
+ /* for (i = 0; i < 10; i++) { */
+ for (i = 0; i < 2; i++) {
+ pr_info(" Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
+ i, list->buffer[i].count, list->buffer[i].address);
}
-} /* TLan_PrintList */
+}
- /***************************************************************
- * TLan_ReadAndClearStats
- *
- * Returns:
- * Nothing
- * Parms:
- * dev Pointer to device structure of adapter
- * to which to read stats.
- * record Flag indicating whether to add
- *
- * This functions reads all the internal status registers
- * of the TLAN chip, which clears them as a side effect.
- * It then either adds the values to the device's status
- * struct, or discards them, depending on whether record
- * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0).
- *
- **************************************************************/
+/***************************************************************
+ * tlan_read_and_clear_stats
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * to which to read stats.
+ * record Flag indicating whether to add
+ *
+ * This functions reads all the internal status registers
+ * of the TLAN chip, which clears them as a side effect.
+ * It then either adds the values to the device's status
+ * struct, or discards them, depending on whether record
+ * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0).
+ *
+ **************************************************************/
-static void TLan_ReadAndClearStats( struct net_device *dev, int record )
+static void tlan_read_and_clear_stats(struct net_device *dev, int record)
{
u32 tx_good, tx_under;
u32 rx_good, rx_over;
@@ -2129,41 +2072,42 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
u32 multi_col, single_col;
u32 excess_col, late_col, loss;
- outw( TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR );
- tx_good = inb( dev->base_addr + TLAN_DIO_DATA );
- tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
- tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16;
- tx_under = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
-
- outw( TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR );
- rx_good = inb( dev->base_addr + TLAN_DIO_DATA );
- rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
- rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16;
- rx_over = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
-
- outw( TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR );
- def_tx = inb( dev->base_addr + TLAN_DIO_DATA );
- def_tx += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
- crc = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
- code = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
-
- outw( TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR );
- multi_col = inb( dev->base_addr + TLAN_DIO_DATA );
- multi_col += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
- single_col = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
- single_col += inb( dev->base_addr + TLAN_DIO_DATA + 3 ) << 8;
-
- outw( TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR );
- excess_col = inb( dev->base_addr + TLAN_DIO_DATA );
- late_col = inb( dev->base_addr + TLAN_DIO_DATA + 1 );
- loss = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
-
- if ( record ) {
+ outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ tx_good = inb(dev->base_addr + TLAN_DIO_DATA);
+ tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
+ tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+ outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ rx_good = inb(dev->base_addr + TLAN_DIO_DATA);
+ rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
+ rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+ outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
+ def_tx = inb(dev->base_addr + TLAN_DIO_DATA);
+ def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ crc = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+ code = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+ outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ multi_col = inb(dev->base_addr + TLAN_DIO_DATA);
+ multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+ single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
+
+ outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
+ late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1);
+ loss = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+
+ if (record) {
dev->stats.rx_packets += rx_good;
dev->stats.rx_errors += rx_over + crc + code;
dev->stats.tx_packets += tx_good;
dev->stats.tx_errors += tx_under + loss;
- dev->stats.collisions += multi_col + single_col + excess_col + late_col;
+ dev->stats.collisions += multi_col
+ + single_col + excess_col + late_col;
dev->stats.rx_over_errors += rx_over;
dev->stats.rx_crc_errors += crc;
@@ -2173,39 +2117,39 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
dev->stats.tx_carrier_errors += loss;
}
-} /* TLan_ReadAndClearStats */
+}
- /***************************************************************
- * TLan_Reset
- *
- * Returns:
- * 0
- * Parms:
- * dev Pointer to device structure of adapter
- * to be reset.
- *
- * This function resets the adapter and it's physical
- * device. See Chap. 3, pp. 9-10 of the "ThunderLAN
- * Programmer's Guide" for details. The routine tries to
- * implement what is detailed there, though adjustments
- * have been made.
- *
- **************************************************************/
+/***************************************************************
+ * TLan_Reset
+ *
+ * Returns:
+ * 0
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * to be reset.
+ *
+ * This function resets the adapter and it's physical
+ * device. See Chap. 3, pp. 9-10 of the "ThunderLAN
+ * Programmer's Guide" for details. The routine tries to
+ * implement what is detailed there, though adjustments
+ * have been made.
+ *
+ **************************************************************/
static void
-TLan_ResetAdapter( struct net_device *dev )
+tlan_reset_adapter(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int i;
u32 addr;
u32 data;
u8 data8;
- priv->tlanFullDuplex = false;
- priv->phyOnline=0;
+ priv->tlan_full_duplex = false;
+ priv->phy_online = 0;
netif_carrier_off(dev);
/* 1. Assert reset bit. */
@@ -2216,7 +2160,7 @@ TLan_ResetAdapter( struct net_device *dev )
udelay(1000);
-/* 2. Turn off interrupts. ( Probably isn't necessary ) */
+/* 2. Turn off interrupts. (Probably isn't necessary) */
data = inl(dev->base_addr + TLAN_HOST_CMD);
data |= TLAN_HC_INT_OFF;
@@ -2224,207 +2168,204 @@ TLan_ResetAdapter( struct net_device *dev )
/* 3. Clear AREGs and HASHs. */
- for ( i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4 ) {
- TLan_DioWrite32( dev->base_addr, (u16) i, 0 );
- }
+ for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
+ tlan_dio_write32(dev->base_addr, (u16) i, 0);
/* 4. Setup NetConfig register. */
data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
- TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
/* 5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */
- outl( TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD );
- outl( TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD );
+ outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
+ outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
/* 6. Unreset the MII by setting NMRST (in NetSio) to 1. */
- outw( TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
- TLan_SetBit( TLAN_NET_SIO_NMRST, addr );
+ tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
/* 7. Setup the remaining registers. */
- if ( priv->tlanRev >= 0x30 ) {
+ if (priv->tlan_rev >= 0x30) {
data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
- TLan_DioWrite8( dev->base_addr, TLAN_INT_DIS, data8 );
+ tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
}
- TLan_PhyDetect( dev );
+ tlan_phy_detect(dev);
data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
- if ( priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY ) {
+ if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
data |= TLAN_NET_CFG_BIT;
- if ( priv->aui == 1 ) {
- TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a );
- } else if ( priv->duplex == TLAN_DUPLEX_FULL ) {
- TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 );
- priv->tlanFullDuplex = true;
+ if (priv->aui == 1) {
+ tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
+ } else if (priv->duplex == TLAN_DUPLEX_FULL) {
+ tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
+ priv->tlan_full_duplex = true;
} else {
- TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 );
+ tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
}
}
- if ( priv->phyNum == 0 ) {
+ if (priv->phy_num == 0)
data |= TLAN_NET_CFG_PHY_EN;
- }
- TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
- if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
- TLan_FinishReset( dev );
- } else {
- TLan_PhyPowerDown( dev );
- }
+ if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
+ tlan_finish_reset(dev);
+ else
+ tlan_phy_power_down(dev);
-} /* TLan_ResetAdapter */
+}
static void
-TLan_FinishReset( struct net_device *dev )
+tlan_finish_reset(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u8 data;
u32 phy;
u8 sio;
u16 status;
u16 partner;
u16 tlphy_ctl;
- u16 tlphy_par;
+ u16 tlphy_par;
u16 tlphy_id1, tlphy_id2;
- int i;
+ int i;
- phy = priv->phy[priv->phyNum];
+ phy = priv->phy[priv->phy_num];
data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
- if ( priv->tlanFullDuplex ) {
+ if (priv->tlan_full_duplex)
data |= TLAN_NET_CMD_DUPLEX;
- }
- TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, data );
+ tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
- if ( priv->phyNum == 0 ) {
+ if (priv->phy_num == 0)
data |= TLAN_NET_MASK_MASK7;
- }
- TLan_DioWrite8( dev->base_addr, TLAN_NET_MASK, data );
- TLan_DioWrite16( dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7 );
- TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &tlphy_id1 );
- TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &tlphy_id2 );
+ tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
+ tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
- if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) ||
- ( priv->aui ) ) {
+ if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
+ (priv->aui)) {
status = MII_GS_LINK;
- printk( "TLAN: %s: Link forced.\n", dev->name );
+ netdev_info(dev, "Link forced\n");
} else {
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
- udelay( 1000 );
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
- if ( (status & MII_GS_LINK) &&
- /* We only support link info on Nat.Sem. PHY's */
- (tlphy_id1 == NAT_SEM_ID1) &&
- (tlphy_id2 == NAT_SEM_ID2) ) {
- TLan_MiiReadReg( dev, phy, MII_AN_LPA, &partner );
- TLan_MiiReadReg( dev, phy, TLAN_TLPHY_PAR, &tlphy_par );
-
- printk( "TLAN: %s: Link active with ", dev->name );
- if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) {
- printk( "forced 10%sMbps %s-Duplex\n",
- tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
- tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
- } else {
- printk( "AutoNegotiation enabled, at 10%sMbps %s-Duplex\n",
- tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
- tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
- printk("TLAN: Partner capability: ");
- for (i = 5; i <= 10; i++)
- if (partner & (1<<i))
- printk("%s",media[i-5]);
- printk("\n");
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ udelay(1000);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ if ((status & MII_GS_LINK) &&
+ /* We only support link info on Nat.Sem. PHY's */
+ (tlphy_id1 == NAT_SEM_ID1) &&
+ (tlphy_id2 == NAT_SEM_ID2)) {
+ tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner);
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par);
+
+ netdev_info(dev,
+ "Link active with %s %uMbps %s-Duplex\n",
+ !(tlphy_par & TLAN_PHY_AN_EN_STAT)
+ ? "forced" : "Autonegotiation enabled,",
+ tlphy_par & TLAN_PHY_SPEED_100
+ ? 100 : 10,
+ tlphy_par & TLAN_PHY_DUPLEX_FULL
+ ? "Full" : "Half");
+
+ if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
+ netdev_info(dev, "Partner capability:");
+ for (i = 5; i < 10; i++)
+ if (partner & (1 << i))
+ pr_cont(" %s", media[i-5]);
+ pr_cont("\n");
}
- TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
+ tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
+ TLAN_LED_LINK);
#ifdef MONITOR
/* We have link beat..for now anyway */
- priv->link = 1;
- /*Enabling link beat monitoring */
- TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_LINK_BEAT );
+ priv->link = 1;
+ /*Enabling link beat monitoring */
+ tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT);
#endif
} else if (status & MII_GS_LINK) {
- printk( "TLAN: %s: Link active\n", dev->name );
- TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
+ netdev_info(dev, "Link active\n");
+ tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
+ TLAN_LED_LINK);
}
}
- if ( priv->phyNum == 0 ) {
- TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
- tlphy_ctl |= TLAN_TC_INTEN;
- TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl );
- sio = TLan_DioRead8( dev->base_addr, TLAN_NET_SIO );
- sio |= TLAN_NET_SIO_MINTEN;
- TLan_DioWrite8( dev->base_addr, TLAN_NET_SIO, sio );
- }
-
- if ( status & MII_GS_LINK ) {
- TLan_SetMac( dev, 0, dev->dev_addr );
- priv->phyOnline = 1;
- outb( ( TLAN_HC_INT_ON >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 );
- if ( debug >= 1 && debug != TLAN_DEBUG_PROBE ) {
- outb( ( TLAN_HC_REQ_INT >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 );
- }
- outl( priv->rxListDMA, dev->base_addr + TLAN_CH_PARM );
- outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD );
+ if (priv->phy_num == 0) {
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+ tlphy_ctl |= TLAN_TC_INTEN;
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
+ sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
+ sio |= TLAN_NET_SIO_MINTEN;
+ tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
+ }
+
+ if (status & MII_GS_LINK) {
+ tlan_set_mac(dev, 0, dev->dev_addr);
+ priv->phy_online = 1;
+ outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
+ if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
+ outb((TLAN_HC_REQ_INT >> 8),
+ dev->base_addr + TLAN_HOST_CMD + 1);
+ outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
+ outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
netif_carrier_on(dev);
} else {
- printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n",
- dev->name );
- TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET );
+ netdev_info(dev, "Link inactive, will retry in 10 secs...\n");
+ tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
return;
}
- TLan_SetMulticastList(dev);
+ tlan_set_multicast_list(dev);
-} /* TLan_FinishReset */
+}
- /***************************************************************
- * TLan_SetMac
- *
- * Returns:
- * Nothing
- * Parms:
- * dev Pointer to device structure of adapter
- * on which to change the AREG.
- * areg The AREG to set the address in (0 - 3).
- * mac A pointer to an array of chars. Each
- * element stores one byte of the address.
- * IE, it isn't in ascii.
- *
- * This function transfers a MAC address to one of the
- * TLAN AREGs (address registers). The TLAN chip locks
- * the register on writing to offset 0 and unlocks the
- * register after writing to offset 5. If NULL is passed
- * in mac, then the AREG is filled with 0's.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_set_mac
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * on which to change the AREG.
+ * areg The AREG to set the address in (0 - 3).
+ * mac A pointer to an array of chars. Each
+ * element stores one byte of the address.
+ * IE, it isn't in ascii.
+ *
+ * This function transfers a MAC address to one of the
+ * TLAN AREGs (address registers). The TLAN chip locks
+ * the register on writing to offset 0 and unlocks the
+ * register after writing to offset 5. If NULL is passed
+ * in mac, then the AREG is filled with 0's.
+ *
+ **************************************************************/
-static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
+static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
{
int i;
areg *= 6;
- if ( mac != NULL ) {
- for ( i = 0; i < 6; i++ )
- TLan_DioWrite8( dev->base_addr,
- TLAN_AREG_0 + areg + i, mac[i] );
+ if (mac != NULL) {
+ for (i = 0; i < 6; i++)
+ tlan_dio_write8(dev->base_addr,
+ TLAN_AREG_0 + areg + i, mac[i]);
} else {
- for ( i = 0; i < 6; i++ )
- TLan_DioWrite8( dev->base_addr,
- TLAN_AREG_0 + areg + i, 0 );
+ for (i = 0; i < 6; i++)
+ tlan_dio_write8(dev->base_addr,
+ TLAN_AREG_0 + areg + i, 0);
}
-} /* TLan_SetMac */
+}
@@ -2432,205 +2373,199 @@ static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver PHY Layer Routines
+ThunderLAN driver PHY layer routines
******************************************************************************
*****************************************************************************/
- /*********************************************************************
- * TLan_PhyPrint
- *
- * Returns:
- * Nothing
- * Parms:
- * dev A pointer to the device structure of the
- * TLAN device having the PHYs to be detailed.
- *
- * This function prints the registers a PHY (aka transceiver).
- *
- ********************************************************************/
+/*********************************************************************
+ * tlan_phy_print
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev A pointer to the device structure of the
+ * TLAN device having the PHYs to be detailed.
+ *
+ * This function prints the registers a PHY (aka transceiver).
+ *
+ ********************************************************************/
-static void TLan_PhyPrint( struct net_device *dev )
+static void tlan_phy_print(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 i, data0, data1, data2, data3, phy;
- phy = priv->phy[priv->phyNum];
-
- if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
- printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name );
- } else if ( phy <= TLAN_PHY_MAX_ADDR ) {
- printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy );
- printk( "TLAN: Off. +0 +1 +2 +3\n" );
- for ( i = 0; i < 0x20; i+= 4 ) {
- printk( "TLAN: 0x%02x", i );
- TLan_MiiReadReg( dev, phy, i, &data0 );
- printk( " 0x%04hx", data0 );
- TLan_MiiReadReg( dev, phy, i + 1, &data1 );
- printk( " 0x%04hx", data1 );
- TLan_MiiReadReg( dev, phy, i + 2, &data2 );
- printk( " 0x%04hx", data2 );
- TLan_MiiReadReg( dev, phy, i + 3, &data3 );
- printk( " 0x%04hx\n", data3 );
+ phy = priv->phy[priv->phy_num];
+
+ if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
+ netdev_info(dev, "Unmanaged PHY\n");
+ } else if (phy <= TLAN_PHY_MAX_ADDR) {
+ netdev_info(dev, "PHY 0x%02x\n", phy);
+ pr_info(" Off. +0 +1 +2 +3\n");
+ for (i = 0; i < 0x20; i += 4) {
+ tlan_mii_read_reg(dev, phy, i, &data0);
+ tlan_mii_read_reg(dev, phy, i + 1, &data1);
+ tlan_mii_read_reg(dev, phy, i + 2, &data2);
+ tlan_mii_read_reg(dev, phy, i + 3, &data3);
+ pr_info(" 0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n",
+ i, data0, data1, data2, data3);
}
} else {
- printk( "TLAN: Device %s, Invalid PHY.\n", dev->name );
+ netdev_info(dev, "Invalid PHY\n");
}
-} /* TLan_PhyPrint */
+}
- /*********************************************************************
- * TLan_PhyDetect
- *
- * Returns:
- * Nothing
- * Parms:
- * dev A pointer to the device structure of the adapter
- * for which the PHY needs determined.
- *
- * So far I've found that adapters which have external PHYs
- * may also use the internal PHY for part of the functionality.
- * (eg, AUI/Thinnet). This function finds out if this TLAN
- * chip has an internal PHY, and then finds the first external
- * PHY (starting from address 0) if it exists).
- *
- ********************************************************************/
+/*********************************************************************
+ * tlan_phy_detect
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev A pointer to the device structure of the adapter
+ * for which the PHY needs determined.
+ *
+ * So far I've found that adapters which have external PHYs
+ * may also use the internal PHY for part of the functionality.
+ * (eg, AUI/Thinnet). This function finds out if this TLAN
+ * chip has an internal PHY, and then finds the first external
+ * PHY (starting from address 0) if it exists).
+ *
+ ********************************************************************/
-static void TLan_PhyDetect( struct net_device *dev )
+static void tlan_phy_detect(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 control;
u16 hi;
u16 lo;
u32 phy;
- if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
- priv->phyNum = 0xFFFF;
+ if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
+ priv->phy_num = 0xffff;
return;
}
- TLan_MiiReadReg( dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi );
+ tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
- if ( hi != 0xFFFF ) {
+ if (hi != 0xffff)
priv->phy[0] = TLAN_PHY_MAX_ADDR;
- } else {
+ else
priv->phy[0] = TLAN_PHY_NONE;
- }
priv->phy[1] = TLAN_PHY_NONE;
- for ( phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++ ) {
- TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control );
- TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi );
- TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo );
- if ( ( control != 0xFFFF ) ||
- ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) {
- TLAN_DBG( TLAN_DEBUG_GNRL,
- "PHY found at %02x %04x %04x %04x\n",
- phy, control, hi, lo );
- if ( ( priv->phy[1] == TLAN_PHY_NONE ) &&
- ( phy != TLAN_PHY_MAX_ADDR ) ) {
+ for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
+ tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
+ if ((control != 0xffff) ||
+ (hi != 0xffff) || (lo != 0xffff)) {
+ TLAN_DBG(TLAN_DEBUG_GNRL,
+ "PHY found at %02x %04x %04x %04x\n",
+ phy, control, hi, lo);
+ if ((priv->phy[1] == TLAN_PHY_NONE) &&
+ (phy != TLAN_PHY_MAX_ADDR)) {
priv->phy[1] = phy;
}
}
}
- if ( priv->phy[1] != TLAN_PHY_NONE ) {
- priv->phyNum = 1;
- } else if ( priv->phy[0] != TLAN_PHY_NONE ) {
- priv->phyNum = 0;
- } else {
- printk( "TLAN: Cannot initialize device, no PHY was found!\n" );
- }
+ if (priv->phy[1] != TLAN_PHY_NONE)
+ priv->phy_num = 1;
+ else if (priv->phy[0] != TLAN_PHY_NONE)
+ priv->phy_num = 0;
+ else
+ netdev_info(dev, "Cannot initialize device, no PHY was found!\n");
-} /* TLan_PhyDetect */
+}
-static void TLan_PhyPowerDown( struct net_device *dev )
+static void tlan_phy_power_down(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 value;
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name );
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
- TLan_MiiSync( dev->base_addr );
- TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
- if ( ( priv->phyNum == 0 ) &&
- ( priv->phy[1] != TLAN_PHY_NONE ) &&
- ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) {
- TLan_MiiSync( dev->base_addr );
- TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value );
+ tlan_mii_sync(dev->base_addr);
+ tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
+ if ((priv->phy_num == 0) &&
+ (priv->phy[1] != TLAN_PHY_NONE) &&
+ (!(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))) {
+ tlan_mii_sync(dev->base_addr);
+ tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
}
/* Wait for 50 ms and powerup
* This is abitrary. It is intended to make sure the
* transceiver settles.
*/
- TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_PUP );
+ tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP);
-} /* TLan_PhyPowerDown */
+}
-static void TLan_PhyPowerUp( struct net_device *dev )
+static void tlan_phy_power_up(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 value;
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name );
- TLan_MiiSync( dev->base_addr );
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
+ tlan_mii_sync(dev->base_addr);
value = MII_GC_LOOPBK;
- TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
- TLan_MiiSync(dev->base_addr);
+ tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
+ tlan_mii_sync(dev->base_addr);
/* Wait for 500 ms and reset the
* transceiver. The TLAN docs say both 50 ms and
* 500 ms, so do the longer, just in case.
*/
- TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_RESET );
+ tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET);
-} /* TLan_PhyPowerUp */
+}
-static void TLan_PhyReset( struct net_device *dev )
+static void tlan_phy_reset(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 phy;
u16 value;
- phy = priv->phy[priv->phyNum];
+ phy = priv->phy[priv->phy_num];
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name );
- TLan_MiiSync( dev->base_addr );
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name);
+ tlan_mii_sync(dev->base_addr);
value = MII_GC_LOOPBK | MII_GC_RESET;
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, value );
- TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value );
- while ( value & MII_GC_RESET ) {
- TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value );
- }
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
+ tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
+ while (value & MII_GC_RESET)
+ tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
/* Wait for 500 ms and initialize.
* I don't remember why I wait this long.
* I've changed this to 50ms, as it seems long enough.
*/
- TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_START_LINK );
+ tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK);
-} /* TLan_PhyReset */
+}
-static void TLan_PhyStartLink( struct net_device *dev )
+static void tlan_phy_start_link(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 ability;
u16 control;
u16 data;
@@ -2638,86 +2573,87 @@ static void TLan_PhyStartLink( struct net_device *dev )
u16 status;
u16 tctl;
- phy = priv->phy[priv->phyNum];
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name );
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &ability );
+ phy = priv->phy[priv->phy_num];
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
- if ( ( status & MII_GS_AUTONEG ) &&
- ( ! priv->aui ) ) {
+ if ((status & MII_GS_AUTONEG) &&
+ (!priv->aui)) {
ability = status >> 11;
- if ( priv->speed == TLAN_SPEED_10 &&
- priv->duplex == TLAN_DUPLEX_HALF) {
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000);
- } else if ( priv->speed == TLAN_SPEED_10 &&
- priv->duplex == TLAN_DUPLEX_FULL) {
- priv->tlanFullDuplex = true;
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100);
- } else if ( priv->speed == TLAN_SPEED_100 &&
- priv->duplex == TLAN_DUPLEX_HALF) {
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000);
- } else if ( priv->speed == TLAN_SPEED_100 &&
- priv->duplex == TLAN_DUPLEX_FULL) {
- priv->tlanFullDuplex = true;
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100);
+ if (priv->speed == TLAN_SPEED_10 &&
+ priv->duplex == TLAN_DUPLEX_HALF) {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
+ } else if (priv->speed == TLAN_SPEED_10 &&
+ priv->duplex == TLAN_DUPLEX_FULL) {
+ priv->tlan_full_duplex = true;
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
+ } else if (priv->speed == TLAN_SPEED_100 &&
+ priv->duplex == TLAN_DUPLEX_HALF) {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
+ } else if (priv->speed == TLAN_SPEED_100 &&
+ priv->duplex == TLAN_DUPLEX_FULL) {
+ priv->tlan_full_duplex = true;
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
} else {
/* Set Auto-Neg advertisement */
- TLan_MiiWriteReg( dev, phy, MII_AN_ADV, (ability << 5) | 1);
+ tlan_mii_write_reg(dev, phy, MII_AN_ADV,
+ (ability << 5) | 1);
/* Enablee Auto-Neg */
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1000 );
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
/* Restart Auto-Neg */
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1200 );
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
/* Wait for 4 sec for autonegotiation
- * to complete. The max spec time is less than this
- * but the card need additional time to start AN.
- * .5 sec should be plenty extra.
- */
- printk( "TLAN: %s: Starting autonegotiation.\n", dev->name );
- TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN );
+ * to complete. The max spec time is less than this
+ * but the card need additional time to start AN.
+ * .5 sec should be plenty extra.
+ */
+ netdev_info(dev, "Starting autonegotiation\n");
+ tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
return;
}
}
- if ( ( priv->aui ) && ( priv->phyNum != 0 ) ) {
- priv->phyNum = 0;
- data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
- TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
- TLan_SetTimer( dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN );
+ if ((priv->aui) && (priv->phy_num != 0)) {
+ priv->phy_num = 0;
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
+ | TLAN_NET_CFG_PHY_EN;
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
+ tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN);
return;
- } else if ( priv->phyNum == 0 ) {
+ } else if (priv->phy_num == 0) {
control = 0;
- TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tctl );
- if ( priv->aui ) {
- tctl |= TLAN_TC_AUISEL;
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
+ if (priv->aui) {
+ tctl |= TLAN_TC_AUISEL;
} else {
- tctl &= ~TLAN_TC_AUISEL;
- if ( priv->duplex == TLAN_DUPLEX_FULL ) {
+ tctl &= ~TLAN_TC_AUISEL;
+ if (priv->duplex == TLAN_DUPLEX_FULL) {
control |= MII_GC_DUPLEX;
- priv->tlanFullDuplex = true;
+ priv->tlan_full_duplex = true;
}
- if ( priv->speed == TLAN_SPEED_100 ) {
+ if (priv->speed == TLAN_SPEED_100)
control |= MII_GC_SPEEDSEL;
- }
}
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, control );
- TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tctl );
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
}
/* Wait for 2 sec to give the transceiver time
* to establish link.
*/
- TLan_SetTimer( dev, (4*HZ), TLAN_TIMER_FINISH_RESET );
+ tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
-} /* TLan_PhyStartLink */
+}
-static void TLan_PhyFinishAutoNeg( struct net_device *dev )
+static void tlan_phy_finish_auto_neg(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 an_adv;
u16 an_lpa;
u16 data;
@@ -2725,115 +2661,118 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
u16 phy;
u16 status;
- phy = priv->phy[priv->phyNum];
+ phy = priv->phy[priv->phy_num];
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
- udelay( 1000 );
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ udelay(1000);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
- if ( ! ( status & MII_GS_AUTOCMPLT ) ) {
+ if (!(status & MII_GS_AUTOCMPLT)) {
/* Wait for 8 sec to give the process
* more time. Perhaps we should fail after a while.
*/
- if (!priv->neg_be_verbose++) {
- pr_info("TLAN: Giving autonegotiation more time.\n");
- pr_info("TLAN: Please check that your adapter has\n");
- pr_info("TLAN: been properly connected to a HUB or Switch.\n");
- pr_info("TLAN: Trying to establish link in the background...\n");
- }
- TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN );
+ if (!priv->neg_be_verbose++) {
+ pr_info("Giving autonegotiation more time.\n");
+ pr_info("Please check that your adapter has\n");
+ pr_info("been properly connected to a HUB or Switch.\n");
+ pr_info("Trying to establish link in the background...\n");
+ }
+ tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN);
return;
}
- printk( "TLAN: %s: Autonegotiation complete.\n", dev->name );
- TLan_MiiReadReg( dev, phy, MII_AN_ADV, &an_adv );
- TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa );
+ netdev_info(dev, "Autonegotiation complete\n");
+ tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
+ tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
mode = an_adv & an_lpa & 0x03E0;
- if ( mode & 0x0100 ) {
- priv->tlanFullDuplex = true;
- } else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) {
- priv->tlanFullDuplex = true;
- }
-
- if ( ( ! ( mode & 0x0180 ) ) &&
- ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) &&
- ( priv->phyNum != 0 ) ) {
- priv->phyNum = 0;
- data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
- TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
- TLan_SetTimer( dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN );
+ if (mode & 0x0100)
+ priv->tlan_full_duplex = true;
+ else if (!(mode & 0x0080) && (mode & 0x0040))
+ priv->tlan_full_duplex = true;
+
+ if ((!(mode & 0x0180)) &&
+ (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
+ (priv->phy_num != 0)) {
+ priv->phy_num = 0;
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
+ | TLAN_NET_CFG_PHY_EN;
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
+ tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN);
return;
}
- if ( priv->phyNum == 0 ) {
- if ( ( priv->duplex == TLAN_DUPLEX_FULL ) ||
- ( an_adv & an_lpa & 0x0040 ) ) {
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL,
- MII_GC_AUTOENB | MII_GC_DUPLEX );
- pr_info("TLAN: Starting internal PHY with FULL-DUPLEX\n" );
+ if (priv->phy_num == 0) {
+ if ((priv->duplex == TLAN_DUPLEX_FULL) ||
+ (an_adv & an_lpa & 0x0040)) {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
+ MII_GC_AUTOENB | MII_GC_DUPLEX);
+ netdev_info(dev, "Starting internal PHY with FULL-DUPLEX\n");
} else {
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB );
- pr_info( "TLAN: Starting internal PHY with HALF-DUPLEX\n" );
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
+ MII_GC_AUTOENB);
+ netdev_info(dev, "Starting internal PHY with HALF-DUPLEX\n");
}
}
/* Wait for 100 ms. No reason in partiticular.
*/
- TLan_SetTimer( dev, (HZ/10), TLAN_TIMER_FINISH_RESET );
+ tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET);
-} /* TLan_PhyFinishAutoNeg */
+}
#ifdef MONITOR
- /*********************************************************************
- *
- * TLan_phyMonitor
- *
- * Returns:
- * None
- *
- * Params:
- * dev The device structure of this device.
- *
- *
- * This function monitors PHY condition by reading the status
- * register via the MII bus. This can be used to give info
- * about link changes (up/down), and possible switch to alternate
- * media.
- *
- * ******************************************************************/
-
-void TLan_PhyMonitor( struct net_device *dev )
+/*********************************************************************
+ *
+ * tlan_phy_monitor
+ *
+ * Returns:
+ * None
+ *
+ * Params:
+ * dev The device structure of this device.
+ *
+ *
+ * This function monitors PHY condition by reading the status
+ * register via the MII bus. This can be used to give info
+ * about link changes (up/down), and possible switch to alternate
+ * media.
+ *
+ *******************************************************************/
+
+void tlan_phy_monitor(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 phy;
u16 phy_status;
- phy = priv->phy[priv->phyNum];
+ phy = priv->phy[priv->phy_num];
- /* Get PHY status register */
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &phy_status );
+ /* Get PHY status register */
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
- /* Check if link has been lost */
- if (!(phy_status & MII_GS_LINK)) {
- if (priv->link) {
- priv->link = 0;
- printk(KERN_DEBUG "TLAN: %s has lost link\n", dev->name);
- netif_carrier_off(dev);
- TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT );
- return;
+ /* Check if link has been lost */
+ if (!(phy_status & MII_GS_LINK)) {
+ if (priv->link) {
+ priv->link = 0;
+ printk(KERN_DEBUG "TLAN: %s has lost link\n",
+ dev->name);
+ netif_carrier_off(dev);
+ tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
+ return;
}
}
- /* Link restablished? */
- if ((phy_status & MII_GS_LINK) && !priv->link) {
- priv->link = 1;
- printk(KERN_DEBUG "TLAN: %s has reestablished link\n", dev->name);
+ /* Link restablished? */
+ if ((phy_status & MII_GS_LINK) && !priv->link) {
+ priv->link = 1;
+ printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
+ dev->name);
netif_carrier_on(dev);
- }
+ }
/* Setup a new monitor */
- TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT );
+ tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
}
#endif /* MONITOR */
@@ -2842,47 +2781,48 @@ void TLan_PhyMonitor( struct net_device *dev )
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver MII Routines
+ThunderLAN driver MII routines
- These routines are based on the information in Chap. 2 of the
- "ThunderLAN Programmer's Guide", pp. 15-24.
+these routines are based on the information in chap. 2 of the
+"ThunderLAN Programmer's Guide", pp. 15-24.
******************************************************************************
*****************************************************************************/
- /***************************************************************
- * TLan_MiiReadReg
- *
- * Returns:
- * false if ack received ok
- * true if no ack received or other error
- *
- * Parms:
- * dev The device structure containing
- * The io address and interrupt count
- * for this device.
- * phy The address of the PHY to be queried.
- * reg The register whose contents are to be
- * retrieved.
- * val A pointer to a variable to store the
- * retrieved value.
- *
- * This function uses the TLAN's MII bus to retrieve the contents
- * of a given register on a PHY. It sends the appropriate info
- * and then reads the 16-bit register value from the MII bus via
- * the TLAN SIO register.
- *
- **************************************************************/
-
-static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
+/***************************************************************
+ * tlan_mii_read_reg
+ *
+ * Returns:
+ * false if ack received ok
+ * true if no ack received or other error
+ *
+ * Parms:
+ * dev The device structure containing
+ * The io address and interrupt count
+ * for this device.
+ * phy The address of the PHY to be queried.
+ * reg The register whose contents are to be
+ * retrieved.
+ * val A pointer to a variable to store the
+ * retrieved value.
+ *
+ * This function uses the TLAN's MII bus to retrieve the contents
+ * of a given register on a PHY. It sends the appropriate info
+ * and then reads the 16-bit register value from the MII bus via
+ * the TLAN SIO register.
+ *
+ **************************************************************/
+
+static bool
+tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
{
u8 nack;
u16 sio, tmp;
- u32 i;
+ u32 i;
bool err;
int minten;
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
unsigned long flags = 0;
err = false;
@@ -2892,48 +2832,48 @@ static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val
if (!in_irq())
spin_lock_irqsave(&priv->lock, flags);
- TLan_MiiSync(dev->base_addr);
+ tlan_mii_sync(dev->base_addr);
- minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio );
- if ( minten )
- TLan_ClearBit(TLAN_NET_SIO_MINTEN, sio);
+ minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
+ if (minten)
+ tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
- TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */
- TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Read ( 10b ) */
- TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */
- TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */
+ tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */
+ tlan_mii_send_data(dev->base_addr, 0x2, 2); /* read (10b) */
+ tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */
+ tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */
- TLan_ClearBit(TLAN_NET_SIO_MTXEN, sio); /* Change direction */
+ tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio); /* change direction */
- TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Clock Idle bit */
- TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
- TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Wait 300ns */
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* clock idle bit */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* wait 300ns */
- nack = TLan_GetBit(TLAN_NET_SIO_MDATA, sio); /* Check for ACK */
- TLan_SetBit(TLAN_NET_SIO_MCLK, sio); /* Finish ACK */
- if (nack) { /* No ACK, so fake it */
+ nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio); /* check for ACK */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio); /* finish ACK */
+ if (nack) { /* no ACK, so fake it */
for (i = 0; i < 16; i++) {
- TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
- TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
}
tmp = 0xffff;
err = true;
} else { /* ACK, so read data */
for (tmp = 0, i = 0x8000; i; i >>= 1) {
- TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
- if (TLan_GetBit(TLAN_NET_SIO_MDATA, sio))
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
tmp |= i;
- TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
}
}
- TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Idle cycle */
- TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
- if ( minten )
- TLan_SetBit(TLAN_NET_SIO_MINTEN, sio);
+ if (minten)
+ tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
*val = tmp;
@@ -2942,116 +2882,117 @@ static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val
return err;
-} /* TLan_MiiReadReg */
+}
- /***************************************************************
- * TLan_MiiSendData
- *
- * Returns:
- * Nothing
- * Parms:
- * base_port The base IO port of the adapter in
- * question.
- * dev The address of the PHY to be queried.
- * data The value to be placed on the MII bus.
- * num_bits The number of bits in data that are to
- * be placed on the MII bus.
- *
- * This function sends on sequence of bits on the MII
- * configuration bus.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_mii_send_data
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * base_port The base IO port of the adapter in
+ * question.
+ * dev The address of the PHY to be queried.
+ * data The value to be placed on the MII bus.
+ * num_bits The number of bits in data that are to
+ * be placed on the MII bus.
+ *
+ * This function sends on sequence of bits on the MII
+ * configuration bus.
+ *
+ **************************************************************/
-static void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits )
+static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
{
u16 sio;
u32 i;
- if ( num_bits == 0 )
+ if (num_bits == 0)
return;
- outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
- TLan_SetBit( TLAN_NET_SIO_MTXEN, sio );
+ tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
- for ( i = ( 0x1 << ( num_bits - 1 ) ); i; i >>= 1 ) {
- TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
- (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio );
- if ( data & i )
- TLan_SetBit( TLAN_NET_SIO_MDATA, sio );
+ for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
+ if (data & i)
+ tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
else
- TLan_ClearBit( TLAN_NET_SIO_MDATA, sio );
- TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
- (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio );
+ tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
}
-} /* TLan_MiiSendData */
+}
- /***************************************************************
- * TLan_MiiSync
- *
- * Returns:
- * Nothing
- * Parms:
- * base_port The base IO port of the adapter in
- * question.
- *
- * This functions syncs all PHYs in terms of the MII configuration
- * bus.
- *
- **************************************************************/
+/***************************************************************
+ * TLan_MiiSync
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * base_port The base IO port of the adapter in
+ * question.
+ *
+ * This functions syncs all PHYs in terms of the MII configuration
+ * bus.
+ *
+ **************************************************************/
-static void TLan_MiiSync( u16 base_port )
+static void tlan_mii_sync(u16 base_port)
{
int i;
u16 sio;
- outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
- TLan_ClearBit( TLAN_NET_SIO_MTXEN, sio );
- for ( i = 0; i < 32; i++ ) {
- TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
- TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+ tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
+ for (i = 0; i < 32; i++) {
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
}
-} /* TLan_MiiSync */
+}
- /***************************************************************
- * TLan_MiiWriteReg
- *
- * Returns:
- * Nothing
- * Parms:
- * dev The device structure for the device
- * to write to.
- * phy The address of the PHY to be written to.
- * reg The register whose contents are to be
- * written.
- * val The value to be written to the register.
- *
- * This function uses the TLAN's MII bus to write the contents of a
- * given register on a PHY. It sends the appropriate info and then
- * writes the 16-bit register value from the MII configuration bus
- * via the TLAN SIO register.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_mii_write_reg
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure for the device
+ * to write to.
+ * phy The address of the PHY to be written to.
+ * reg The register whose contents are to be
+ * written.
+ * val The value to be written to the register.
+ *
+ * This function uses the TLAN's MII bus to write the contents of a
+ * given register on a PHY. It sends the appropriate info and then
+ * writes the 16-bit register value from the MII configuration bus
+ * via the TLAN SIO register.
+ *
+ **************************************************************/
-static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val )
+static void
+tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
{
u16 sio;
int minten;
unsigned long flags = 0;
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
@@ -3059,30 +3000,30 @@ static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val
if (!in_irq())
spin_lock_irqsave(&priv->lock, flags);
- TLan_MiiSync( dev->base_addr );
+ tlan_mii_sync(dev->base_addr);
- minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio );
- if ( minten )
- TLan_ClearBit( TLAN_NET_SIO_MINTEN, sio );
+ minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
+ if (minten)
+ tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
- TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */
- TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Write ( 01b ) */
- TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */
- TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */
+ tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */
+ tlan_mii_send_data(dev->base_addr, 0x1, 2); /* write (01b) */
+ tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */
+ tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */
- TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Send ACK */
- TLan_MiiSendData( dev->base_addr, val, 16 ); /* Send Data */
+ tlan_mii_send_data(dev->base_addr, 0x2, 2); /* send ACK */
+ tlan_mii_send_data(dev->base_addr, val, 16); /* send data */
- TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); /* Idle cycle */
- TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
- if ( minten )
- TLan_SetBit( TLAN_NET_SIO_MINTEN, sio );
+ if (minten)
+ tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
if (!in_irq())
spin_unlock_irqrestore(&priv->lock, flags);
-} /* TLan_MiiWriteReg */
+}
@@ -3090,229 +3031,226 @@ static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver Eeprom routines
+ThunderLAN driver eeprom routines
- The Compaq Netelligent 10 and 10/100 cards use a Microchip 24C02A
- EEPROM. These functions are based on information in Microchip's
- data sheet. I don't know how well this functions will work with
- other EEPROMs.
+the Compaq netelligent 10 and 10/100 cards use a microchip 24C02A
+EEPROM. these functions are based on information in microchip's
+data sheet. I don't know how well this functions will work with
+other Eeproms.
******************************************************************************
*****************************************************************************/
- /***************************************************************
- * TLan_EeSendStart
- *
- * Returns:
- * Nothing
- * Parms:
- * io_base The IO port base address for the
- * TLAN device with the EEPROM to
- * use.
- *
- * This function sends a start cycle to an EEPROM attached
- * to a TLAN chip.
- *
- **************************************************************/
-
-static void TLan_EeSendStart( u16 io_base )
+/***************************************************************
+ * tlan_ee_send_start
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ *
+ * This function sends a start cycle to an EEPROM attached
+ * to a TLAN chip.
+ *
+ **************************************************************/
+
+static void tlan_ee_send_start(u16 io_base)
{
u16 sio;
- outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
- TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
- TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
-
-} /* TLan_EeSendStart */
-
-
-
-
- /***************************************************************
- * TLan_EeSendByte
- *
- * Returns:
- * If the correct ack was received, 0, otherwise 1
- * Parms: io_base The IO port base address for the
- * TLAN device with the EEPROM to
- * use.
- * data The 8 bits of information to
- * send to the EEPROM.
- * stop If TLAN_EEPROM_STOP is passed, a
- * stop cycle is sent after the
- * byte is sent after the ack is
- * read.
- *
- * This function sends a byte on the serial EEPROM line,
- * driving the clock to send each bit. The function then
- * reverses transmission direction and reads an acknowledge
- * bit.
- *
- **************************************************************/
-
-static int TLan_EeSendByte( u16 io_base, u8 data, int stop )
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_ee_send_byte
+ *
+ * Returns:
+ * If the correct ack was received, 0, otherwise 1
+ * Parms: io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * data The 8 bits of information to
+ * send to the EEPROM.
+ * stop If TLAN_EEPROM_STOP is passed, a
+ * stop cycle is sent after the
+ * byte is sent after the ack is
+ * read.
+ *
+ * This function sends a byte on the serial EEPROM line,
+ * driving the clock to send each bit. The function then
+ * reverses transmission direction and reads an acknowledge
+ * bit.
+ *
+ **************************************************************/
+
+static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
{
int err;
u8 place;
u16 sio;
- outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
/* Assume clock is low, tx is enabled; */
- for ( place = 0x80; place != 0; place >>= 1 ) {
- if ( place & data )
- TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+ for (place = 0x80; place != 0; place >>= 1) {
+ if (place & data)
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
else
- TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
}
- TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio );
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- err = TLan_GetBit( TLAN_NET_SIO_EDATA, sio );
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
+ tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
- if ( ( ! err ) && stop ) {
+ if ((!err) && stop) {
/* STOP, raise data while clock is high */
- TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
}
return err;
-} /* TLan_EeSendByte */
-
-
-
-
- /***************************************************************
- * TLan_EeReceiveByte
- *
- * Returns:
- * Nothing
- * Parms:
- * io_base The IO port base address for the
- * TLAN device with the EEPROM to
- * use.
- * data An address to a char to hold the
- * data sent from the EEPROM.
- * stop If TLAN_EEPROM_STOP is passed, a
- * stop cycle is sent after the
- * byte is received, and no ack is
- * sent.
- *
- * This function receives 8 bits of data from the EEPROM
- * over the serial link. It then sends and ack bit, or no
- * ack and a stop bit. This function is used to retrieve
- * data after the address of a byte in the EEPROM has been
- * sent.
- *
- **************************************************************/
-
-static void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop )
+}
+
+
+
+
+/***************************************************************
+ * tlan_ee_receive_byte
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * data An address to a char to hold the
+ * data sent from the EEPROM.
+ * stop If TLAN_EEPROM_STOP is passed, a
+ * stop cycle is sent after the
+ * byte is received, and no ack is
+ * sent.
+ *
+ * This function receives 8 bits of data from the EEPROM
+ * over the serial link. It then sends and ack bit, or no
+ * ack and a stop bit. This function is used to retrieve
+ * data after the address of a byte in the EEPROM has been
+ * sent.
+ *
+ **************************************************************/
+
+static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
{
u8 place;
u16 sio;
- outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
*data = 0;
/* Assume clock is low, tx is enabled; */
- TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio );
- for ( place = 0x80; place; place >>= 1 ) {
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- if ( TLan_GetBit( TLAN_NET_SIO_EDATA, sio ) )
+ tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
+ for (place = 0x80; place; place >>= 1) {
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
*data |= place;
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
}
- TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
- if ( ! stop ) {
- TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* Ack = 0 */
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+ if (!stop) {
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); /* ack = 0 */
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
} else {
- TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); /* No ack = 1 (?) */
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio); /* no ack = 1 (?) */
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
/* STOP, raise data while clock is high */
- TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
- }
-
-} /* TLan_EeReceiveByte */
-
-
-
-
- /***************************************************************
- * TLan_EeReadByte
- *
- * Returns:
- * No error = 0, else, the stage at which the error
- * occurred.
- * Parms:
- * io_base The IO port base address for the
- * TLAN device with the EEPROM to
- * use.
- * ee_addr The address of the byte in the
- * EEPROM whose contents are to be
- * retrieved.
- * data An address to a char to hold the
- * data obtained from the EEPROM.
- *
- * This function reads a byte of information from an byte
- * cell in the EEPROM.
- *
- **************************************************************/
-
-static int TLan_EeReadByte( struct net_device *dev, u8 ee_addr, u8 *data )
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+ }
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_ee_read_byte
+ *
+ * Returns:
+ * No error = 0, else, the stage at which the error
+ * occurred.
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * ee_addr The address of the byte in the
+ * EEPROM whose contents are to be
+ * retrieved.
+ * data An address to a char to hold the
+ * data obtained from the EEPROM.
+ *
+ * This function reads a byte of information from an byte
+ * cell in the EEPROM.
+ *
+ **************************************************************/
+
+static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
{
int err;
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
unsigned long flags = 0;
- int ret=0;
+ int ret = 0;
spin_lock_irqsave(&priv->lock, flags);
- TLan_EeSendStart( dev->base_addr );
- err = TLan_EeSendByte( dev->base_addr, 0xA0, TLAN_EEPROM_ACK );
- if (err)
- {
- ret=1;
+ tlan_ee_send_start(dev->base_addr);
+ err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
+ if (err) {
+ ret = 1;
goto fail;
}
- err = TLan_EeSendByte( dev->base_addr, ee_addr, TLAN_EEPROM_ACK );
- if (err)
- {
- ret=2;
+ err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
+ if (err) {
+ ret = 2;
goto fail;
}
- TLan_EeSendStart( dev->base_addr );
- err = TLan_EeSendByte( dev->base_addr, 0xA1, TLAN_EEPROM_ACK );
- if (err)
- {
- ret=3;
+ tlan_ee_send_start(dev->base_addr);
+ err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
+ if (err) {
+ ret = 3;
goto fail;
}
- TLan_EeReceiveByte( dev->base_addr, data, TLAN_EEPROM_STOP );
+ tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
fail:
spin_unlock_irqrestore(&priv->lock, flags);
return ret;
-} /* TLan_EeReadByte */
+}
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index 3315ced774e2..5fc98a8e4889 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -20,8 +20,8 @@
********************************************************************/
-#include <asm/io.h>
-#include <asm/types.h>
+#include <linux/io.h>
+#include <linux/types.h>
#include <linux/netdevice.h>
@@ -40,8 +40,11 @@
#define TLAN_IGNORE 0
#define TLAN_RECORD 1
-#define TLAN_DBG(lvl, format, args...) \
- do { if (debug&lvl) printk(KERN_DEBUG "TLAN: " format, ##args ); } while(0)
+#define TLAN_DBG(lvl, format, args...) \
+ do { \
+ if (debug&lvl) \
+ printk(KERN_DEBUG "TLAN: " format, ##args); \
+ } while (0)
#define TLAN_DEBUG_GNRL 0x0001
#define TLAN_DEBUG_TX 0x0002
@@ -50,7 +53,8 @@
#define TLAN_DEBUG_PROBE 0x0010
#define TX_TIMEOUT (10*HZ) /* We need time for auto-neg */
-#define MAX_TLAN_BOARDS 8 /* Max number of boards installed at a time */
+#define MAX_TLAN_BOARDS 8 /* Max number of boards installed
+ at a time */
/*****************************************************************
@@ -70,13 +74,13 @@
#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014
#endif
-typedef struct tlan_adapter_entry {
- u16 vendorId;
- u16 deviceId;
- char *deviceLabel;
+struct tlan_adapter_entry {
+ u16 vendor_id;
+ u16 device_id;
+ char *device_label;
u32 flags;
- u16 addrOfs;
-} TLanAdapterEntry;
+ u16 addr_ofs;
+};
#define TLAN_ADAPTER_NONE 0x00000000
#define TLAN_ADAPTER_UNMANAGED_PHY 0x00000001
@@ -129,18 +133,18 @@ typedef struct tlan_adapter_entry {
#define TLAN_CSTAT_DP_PR 0x0100
-typedef struct tlan_buffer_ref_tag {
+struct tlan_buffer {
u32 count;
u32 address;
-} TLanBufferRef;
+};
-typedef struct tlan_list_tag {
+struct tlan_list {
u32 forward;
- u16 cStat;
- u16 frameSize;
- TLanBufferRef buffer[TLAN_BUFFERS_PER_LIST];
-} TLanList;
+ u16 c_stat;
+ u16 frame_size;
+ struct tlan_buffer buffer[TLAN_BUFFERS_PER_LIST];
+};
typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
@@ -164,49 +168,49 @@ typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
*
****************************************************************/
-typedef struct tlan_private_tag {
- struct net_device *nextDevice;
- struct pci_dev *pciDev;
+struct tlan_priv {
+ struct net_device *next_device;
+ struct pci_dev *pci_dev;
struct net_device *dev;
- void *dmaStorage;
- dma_addr_t dmaStorageDMA;
- unsigned int dmaSize;
- u8 *padBuffer;
- TLanList *rxList;
- dma_addr_t rxListDMA;
- u8 *rxBuffer;
- dma_addr_t rxBufferDMA;
- u32 rxHead;
- u32 rxTail;
- u32 rxEocCount;
- TLanList *txList;
- dma_addr_t txListDMA;
- u8 *txBuffer;
- dma_addr_t txBufferDMA;
- u32 txHead;
- u32 txInProgress;
- u32 txTail;
- u32 txBusyCount;
- u32 phyOnline;
- u32 timerSetAt;
- u32 timerType;
+ void *dma_storage;
+ dma_addr_t dma_storage_dma;
+ unsigned int dma_size;
+ u8 *pad_buffer;
+ struct tlan_list *rx_list;
+ dma_addr_t rx_list_dma;
+ u8 *rx_buffer;
+ dma_addr_t rx_buffer_dma;
+ u32 rx_head;
+ u32 rx_tail;
+ u32 rx_eoc_count;
+ struct tlan_list *tx_list;
+ dma_addr_t tx_list_dma;
+ u8 *tx_buffer;
+ dma_addr_t tx_buffer_dma;
+ u32 tx_head;
+ u32 tx_in_progress;
+ u32 tx_tail;
+ u32 tx_busy_count;
+ u32 phy_online;
+ u32 timer_set_at;
+ u32 timer_type;
struct timer_list timer;
struct board *adapter;
- u32 adapterRev;
+ u32 adapter_rev;
u32 aui;
u32 debug;
u32 duplex;
u32 phy[2];
- u32 phyNum;
+ u32 phy_num;
u32 speed;
- u8 tlanRev;
- u8 tlanFullDuplex;
+ u8 tlan_rev;
+ u8 tlan_full_duplex;
spinlock_t lock;
u8 link;
u8 is_eisa;
struct work_struct tlan_tqueue;
u8 neg_be_verbose;
-} TLanPrivateInfo;
+};
@@ -247,7 +251,7 @@ typedef struct tlan_private_tag {
****************************************************************/
#define TLAN_HOST_CMD 0x00
-#define TLAN_HC_GO 0x80000000
+#define TLAN_HC_GO 0x80000000
#define TLAN_HC_STOP 0x40000000
#define TLAN_HC_ACK 0x20000000
#define TLAN_HC_CS_MASK 0x1FE00000
@@ -283,7 +287,7 @@ typedef struct tlan_private_tag {
#define TLAN_NET_CMD_TRFRAM 0x02
#define TLAN_NET_CMD_TXPACE 0x01
#define TLAN_NET_SIO 0x01
-#define TLAN_NET_SIO_MINTEN 0x80
+#define TLAN_NET_SIO_MINTEN 0x80
#define TLAN_NET_SIO_ECLOK 0x40
#define TLAN_NET_SIO_ETXEN 0x20
#define TLAN_NET_SIO_EDATA 0x10
@@ -304,7 +308,7 @@ typedef struct tlan_private_tag {
#define TLAN_NET_MASK_MASK4 0x10
#define TLAN_NET_MASK_RSRVD 0x0F
#define TLAN_NET_CONFIG 0x04
-#define TLAN_NET_CFG_RCLK 0x8000
+#define TLAN_NET_CFG_RCLK 0x8000
#define TLAN_NET_CFG_TCLK 0x4000
#define TLAN_NET_CFG_BIT 0x2000
#define TLAN_NET_CFG_RXCRC 0x1000
@@ -372,7 +376,7 @@ typedef struct tlan_private_tag {
/* Generic MII/PHY Registers */
#define MII_GEN_CTL 0x00
-#define MII_GC_RESET 0x8000
+#define MII_GC_RESET 0x8000
#define MII_GC_LOOPBK 0x4000
#define MII_GC_SPEEDSEL 0x2000
#define MII_GC_AUTOENB 0x1000
@@ -397,9 +401,9 @@ typedef struct tlan_private_tag {
#define MII_GS_EXTCAP 0x0001
#define MII_GEN_ID_HI 0x02
#define MII_GEN_ID_LO 0x03
-#define MII_GIL_OUI 0xFC00
-#define MII_GIL_MODEL 0x03F0
-#define MII_GIL_REVISION 0x000F
+#define MII_GIL_OUI 0xFC00
+#define MII_GIL_MODEL 0x03F0
+#define MII_GIL_REVISION 0x000F
#define MII_AN_ADV 0x04
#define MII_AN_LPA 0x05
#define MII_AN_EXP 0x06
@@ -408,7 +412,7 @@ typedef struct tlan_private_tag {
#define TLAN_TLPHY_ID 0x10
#define TLAN_TLPHY_CTL 0x11
-#define TLAN_TC_IGLINK 0x8000
+#define TLAN_TC_IGLINK 0x8000
#define TLAN_TC_SWAPOL 0x4000
#define TLAN_TC_AUISEL 0x2000
#define TLAN_TC_SQEEN 0x1000
@@ -435,41 +439,41 @@ typedef struct tlan_private_tag {
#define LEVEL1_ID1 0x7810
#define LEVEL1_ID2 0x0000
-#define CIRC_INC( a, b ) if ( ++a >= b ) a = 0
+#define CIRC_INC(a, b) if (++a >= b) a = 0
/* Routines to access internal registers. */
-static inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr)
+static inline u8 tlan_dio_read8(u16 base_addr, u16 internal_addr)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3));
-} /* TLan_DioRead8 */
+}
-static inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr)
+static inline u16 tlan_dio_read16(u16 base_addr, u16 internal_addr)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2));
-} /* TLan_DioRead16 */
+}
-static inline u32 TLan_DioRead32(u16 base_addr, u16 internal_addr)
+static inline u32 tlan_dio_read32(u16 base_addr, u16 internal_addr)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
return inl(base_addr + TLAN_DIO_DATA);
-} /* TLan_DioRead32 */
+}
-static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data)
+static inline void tlan_dio_write8(u16 base_addr, u16 internal_addr, u8 data)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3));
@@ -479,7 +483,7 @@ static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data)
-static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data)
+static inline void tlan_dio_write16(u16 base_addr, u16 internal_addr, u16 data)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
@@ -489,16 +493,16 @@ static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data)
-static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data)
+static inline void tlan_dio_write32(u16 base_addr, u16 internal_addr, u32 data)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
}
-#define TLan_ClearBit( bit, port ) outb_p(inb_p(port) & ~bit, port)
-#define TLan_GetBit( bit, port ) ((int) (inb_p(port) & bit))
-#define TLan_SetBit( bit, port ) outb_p(inb_p(port) | bit, port)
+#define tlan_clear_bit(bit, port) outb_p(inb_p(port) & ~bit, port)
+#define tlan_get_bit(bit, port) ((int) (inb_p(port) & bit))
+#define tlan_set_bit(bit, port) outb_p(inb_p(port) | bit, port)
/*
* given 6 bytes, view them as 8 6-bit numbers and return the XOR of those
@@ -506,37 +510,37 @@ static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data)
*
* The original code was:
*
- * u32 xor( u32 a, u32 b ) { return ( ( a && ! b ) || ( ! a && b ) ); }
+ * u32 xor(u32 a, u32 b) { return ((a && !b ) || (! a && b )); }
*
- * #define XOR8( a, b, c, d, e, f, g, h ) \
- * xor( a, xor( b, xor( c, xor( d, xor( e, xor( f, xor( g, h ) ) ) ) ) ) )
- * #define DA( a, bit ) ( ( (u8) a[bit/8] ) & ( (u8) ( 1 << bit%8 ) ) )
+ * #define XOR8(a, b, c, d, e, f, g, h) \
+ * xor(a, xor(b, xor(c, xor(d, xor(e, xor(f, xor(g, h)) ) ) ) ) )
+ * #define DA(a, bit) (( (u8) a[bit/8] ) & ( (u8) (1 << bit%8)) )
*
- * hash = XOR8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
- * DA(a,30), DA(a,36), DA(a,42) );
- * hash |= XOR8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
- * DA(a,31), DA(a,37), DA(a,43) ) << 1;
- * hash |= XOR8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
- * DA(a,32), DA(a,38), DA(a,44) ) << 2;
- * hash |= XOR8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
- * DA(a,33), DA(a,39), DA(a,45) ) << 3;
- * hash |= XOR8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
- * DA(a,34), DA(a,40), DA(a,46) ) << 4;
- * hash |= XOR8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
- * DA(a,35), DA(a,41), DA(a,47) ) << 5;
+ * hash = XOR8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
+ * DA(a,30), DA(a,36), DA(a,42));
+ * hash |= XOR8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
+ * DA(a,31), DA(a,37), DA(a,43)) << 1;
+ * hash |= XOR8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
+ * DA(a,32), DA(a,38), DA(a,44)) << 2;
+ * hash |= XOR8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
+ * DA(a,33), DA(a,39), DA(a,45)) << 3;
+ * hash |= XOR8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
+ * DA(a,34), DA(a,40), DA(a,46)) << 4;
+ * hash |= XOR8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
+ * DA(a,35), DA(a,41), DA(a,47)) << 5;
*
*/
-static inline u32 TLan_HashFunc( const u8 *a )
+static inline u32 tlan_hash_func(const u8 *a)
{
- u8 hash;
+ u8 hash;
- hash = (a[0]^a[3]); /* & 077 */
- hash ^= ((a[0]^a[3])>>6); /* & 003 */
- hash ^= ((a[1]^a[4])<<2); /* & 074 */
- hash ^= ((a[1]^a[4])>>4); /* & 017 */
- hash ^= ((a[2]^a[5])<<4); /* & 060 */
- hash ^= ((a[2]^a[5])>>2); /* & 077 */
+ hash = (a[0]^a[3]); /* & 077 */
+ hash ^= ((a[0]^a[3])>>6); /* & 003 */
+ hash ^= ((a[1]^a[4])<<2); /* & 074 */
+ hash ^= ((a[1]^a[4])>>4); /* & 017 */
+ hash ^= ((a[2]^a[5])<<4); /* & 060 */
+ hash ^= ((a[2]^a[5])>>2); /* & 077 */
- return hash & 077;
+ return hash & 077;
}
#endif
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 91e6c78271a3..4786497de03e 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -657,8 +657,9 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
#ifndef PCMCIA
/* finish figuring the shared RAM address */
if (cardpresent == TR_ISA) {
- static __u32 ram_bndry_mask[] =
- { 0xffffe000, 0xffffc000, 0xffff8000, 0xffff0000 };
+ static const __u32 ram_bndry_mask[] = {
+ 0xffffe000, 0xffffc000, 0xffff8000, 0xffff0000
+ };
__u32 new_base, rrr_32, chk_base, rbm;
rrr_32=readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_ODD) >> 2 & 0x03;
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index c78a50586c1d..b13c6b040be3 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -964,7 +964,7 @@ static void de_set_media (struct de_private *de)
dw32(MacMode, macmode);
}
-static void de_next_media (struct de_private *de, u32 *media,
+static void de_next_media (struct de_private *de, const u32 *media,
unsigned int n_media)
{
unsigned int i;
@@ -1008,10 +1008,10 @@ static void de21040_media_timer (unsigned long data)
return;
if (de->media_type == DE_MEDIA_AUI) {
- u32 next_state = DE_MEDIA_TP;
+ static const u32 next_state = DE_MEDIA_TP;
de_next_media(de, &next_state, 1);
} else {
- u32 next_state = DE_MEDIA_AUI;
+ static const u32 next_state = DE_MEDIA_AUI;
de_next_media(de, &next_state, 1);
}
@@ -1136,13 +1136,19 @@ static void de21041_media_timer (unsigned long data)
* simply resets the PHY and reloads the current media settings.
*/
if (de->media_type == DE_MEDIA_AUI) {
- u32 next_states[] = { DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
+ static const u32 next_states[] = {
+ DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
+ };
de_next_media(de, next_states, ARRAY_SIZE(next_states));
} else if (de->media_type == DE_MEDIA_BNC) {
- u32 next_states[] = { DE_MEDIA_TP_AUTO, DE_MEDIA_AUI };
+ static const u32 next_states[] = {
+ DE_MEDIA_TP_AUTO, DE_MEDIA_AUI
+ };
de_next_media(de, next_states, ARRAY_SIZE(next_states));
} else {
- u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
+ static const u32 next_states[] = {
+ DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
+ };
de_next_media(de, next_states, ARRAY_SIZE(next_states));
}
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index a9f7d5d1a269..7064e035757a 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -688,9 +688,6 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
DMFE_DBUG(0, "dmfe_start_xmit", 0);
- /* Resource flag check */
- netif_stop_queue(dev);
-
/* Too large packet check */
if (skb->len > MAX_PACKET_SIZE) {
pr_err("big packet = %d\n", (u16)skb->len);
@@ -698,6 +695,9 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+ /* Resource flag check */
+ netif_stop_queue(dev);
+
spin_lock_irqsave(&db->lock, flags);
/* No Tx resource check, it never happen nromally */
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 2c39f2591216..5c01e260f1ba 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1302,17 +1302,18 @@ static const struct net_device_ops tulip_netdev_ops = {
#endif
};
+DEFINE_PCI_DEVICE_TABLE(early_486_chipsets) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
+ { },
+};
+
static int __devinit tulip_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct tulip_private *tp;
/* See note below on the multiport cards. */
static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
- static struct pci_device_id early_486_chipsets[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
- { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
- { },
- };
static int last_irq;
static int multiport_cnt; /* For four-port boards w/one EEPROM */
int i, irq;
@@ -1682,7 +1683,9 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
tp->full_duplex_lock = 1;
if (tulip_media_cap[tp->default_port] & MediaIsMII) {
- u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
+ static const u16 media2advert[] = {
+ 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200
+ };
tp->mii_advertise = media2advert[tp->default_port - 9];
tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 55f3a3e667a9..f5e9ac00a07b 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -34,6 +34,8 @@
* Modifications for 2.3.99-pre5 kernel.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "tun"
#define DRV_VERSION "1.6"
#define DRV_DESCRIPTION "Universal TUN/TAP device driver"
@@ -76,11 +78,27 @@
#ifdef TUN_DEBUG
static int debug;
-#define DBG if(tun->debug)printk
-#define DBG1 if(debug==2)printk
+#define tun_debug(level, tun, fmt, args...) \
+do { \
+ if (tun->debug) \
+ netdev_printk(level, tun->dev, fmt, ##args); \
+} while (0)
+#define DBG1(level, fmt, args...) \
+do { \
+ if (debug == 2) \
+ printk(level fmt, ##args); \
+} while (0)
#else
-#define DBG( a... )
-#define DBG1( a... )
+#define tun_debug(level, tun, fmt, args...) \
+do { \
+ if (0) \
+ netdev_printk(level, tun->dev, fmt, ##args); \
+} while (0)
+#define DBG1(level, fmt, args...) \
+do { \
+ if (0) \
+ printk(level fmt, ##args); \
+} while (0)
#endif
#define FLT_EXACT_COUNT 8
@@ -205,7 +223,7 @@ static void tun_put(struct tun_struct *tun)
tun_detach(tfile->tun);
}
-/* TAP filterting */
+/* TAP filtering */
static void addr_hash_set(u32 *mask, const u8 *addr)
{
int n = ether_crc(ETH_ALEN, addr) >> 26;
@@ -360,7 +378,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
- DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->dev->name, skb->len);
+ tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
/* Drop packet if interface is not attached */
if (!tun->tfile)
@@ -499,7 +517,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
sk = tun->socket.sk;
- DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
+ tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
poll_wait(file, &tun->wq.wait, wait);
@@ -690,7 +708,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
if (!tun)
return -EBADFD;
- DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count);
+ tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
result = tun_get_user(tun, iv, iov_length(iv, count),
file->f_flags & O_NONBLOCK);
@@ -739,7 +757,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
else if (sinfo->gso_type & SKB_GSO_UDP)
gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
else {
- printk(KERN_ERR "tun: unexpected GSO type: "
+ pr_err("unexpected GSO type: "
"0x%x, gso_size %d, hdr_len %d\n",
sinfo->gso_type, gso.gso_size,
gso.hdr_len);
@@ -757,7 +775,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- gso.csum_start = skb->csum_start - skb_headroom(skb);
+ gso.csum_start = skb_checksum_start_offset(skb);
gso.csum_offset = skb->csum_offset;
} /* else everything is zero */
@@ -786,7 +804,7 @@ static ssize_t tun_do_read(struct tun_struct *tun,
struct sk_buff *skb;
ssize_t ret = 0;
- DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
+ tun_debug(KERN_INFO, tun, "tun_chr_read\n");
add_wait_queue(&tun->wq.wait, &wait);
while (len) {
@@ -1083,7 +1101,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
device_create_file(&tun->dev->dev, &dev_attr_owner) ||
device_create_file(&tun->dev->dev, &dev_attr_group))
- printk(KERN_ERR "Failed to create tun sysfs files\n");
+ pr_err("Failed to create tun sysfs files\n");
sk->sk_destruct = tun_sock_destruct;
@@ -1092,7 +1110,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
goto failed;
}
- DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name);
+ tun_debug(KERN_INFO, tun, "tun_set_iff\n");
if (ifr->ifr_flags & IFF_NO_PI)
tun->flags |= TUN_NO_PI;
@@ -1129,7 +1147,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
static int tun_get_iff(struct net *net, struct tun_struct *tun,
struct ifreq *ifr)
{
- DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name);
+ tun_debug(KERN_INFO, tun, "tun_get_iff\n");
strcpy(ifr->ifr_name, tun->dev->name);
@@ -1142,7 +1160,7 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun,
* privs required. */
static int set_offload(struct net_device *dev, unsigned long arg)
{
- unsigned int old_features, features;
+ u32 old_features, features;
old_features = dev->features;
/* Unset features, set them as we chew on the arg. */
@@ -1229,7 +1247,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
if (!tun)
goto unlock;
- DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd);
+ tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %d\n", cmd);
ret = 0;
switch (cmd) {
@@ -1249,8 +1267,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
else
tun->flags &= ~TUN_NOCHECKSUM;
- DBG(KERN_INFO "%s: checksum %s\n",
- tun->dev->name, arg ? "disabled" : "enabled");
+ tun_debug(KERN_INFO, tun, "checksum %s\n",
+ arg ? "disabled" : "enabled");
break;
case TUNSETPERSIST:
@@ -1260,33 +1278,34 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
else
tun->flags &= ~TUN_PERSIST;
- DBG(KERN_INFO "%s: persist %s\n",
- tun->dev->name, arg ? "enabled" : "disabled");
+ tun_debug(KERN_INFO, tun, "persist %s\n",
+ arg ? "enabled" : "disabled");
break;
case TUNSETOWNER:
/* Set owner of the device */
tun->owner = (uid_t) arg;
- DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner);
+ tun_debug(KERN_INFO, tun, "owner set to %d\n", tun->owner);
break;
case TUNSETGROUP:
/* Set group of the device */
tun->group= (gid_t) arg;
- DBG(KERN_INFO "%s: group set to %d\n", tun->dev->name, tun->group);
+ tun_debug(KERN_INFO, tun, "group set to %d\n", tun->group);
break;
case TUNSETLINK:
/* Only allow setting the type when the interface is down */
if (tun->dev->flags & IFF_UP) {
- DBG(KERN_INFO "%s: Linktype set failed because interface is up\n",
- tun->dev->name);
+ tun_debug(KERN_INFO, tun,
+ "Linktype set failed because interface is up\n");
ret = -EBUSY;
} else {
tun->dev->type = (int) arg;
- DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type);
+ tun_debug(KERN_INFO, tun, "linktype set to %d\n",
+ tun->dev->type);
ret = 0;
}
break;
@@ -1309,7 +1328,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
break;
case SIOCGIFHWADDR:
- /* Get hw addres */
+ /* Get hw address */
memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
ifr.ifr_hwaddr.sa_family = tun->dev->type;
if (copy_to_user(argp, &ifr, ifreq_len))
@@ -1318,8 +1337,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
case SIOCSIFHWADDR:
/* Set hw address */
- DBG(KERN_DEBUG "%s: set hw address: %pM\n",
- tun->dev->name, ifr.ifr_hwaddr.sa_data);
+ tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
+ ifr.ifr_hwaddr.sa_data);
ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
break;
@@ -1433,7 +1452,7 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
if (!tun)
return -EBADFD;
- DBG(KERN_INFO "%s: tun_chr_fasync %d\n", tun->dev->name, on);
+ tun_debug(KERN_INFO, tun, "tun_chr_fasync %d\n", on);
if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
goto out;
@@ -1455,7 +1474,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
{
struct tun_file *tfile;
- DBG1(KERN_INFO "tunX: tun_chr_open\n");
+ DBG1(KERN_INFO, "tunX: tun_chr_open\n");
tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
if (!tfile)
@@ -1476,7 +1495,7 @@ static int tun_chr_close(struct inode *inode, struct file *file)
if (tun) {
struct net_device *dev = tun->dev;
- DBG(KERN_INFO "%s: tun_chr_close\n", dev->name);
+ tun_debug(KERN_INFO, tun, "tun_chr_close\n");
__tun_detach(tun);
@@ -1607,18 +1626,18 @@ static int __init tun_init(void)
{
int ret = 0;
- printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
- printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT);
+ pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
+ pr_info("%s\n", DRV_COPYRIGHT);
ret = rtnl_link_register(&tun_link_ops);
if (ret) {
- printk(KERN_ERR "tun: Can't register link_ops\n");
+ pr_err("Can't register link_ops\n");
goto err_linkops;
}
ret = misc_register(&tun_miscdev);
if (ret) {
- printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR);
+ pr_err("Can't register misc device %d\n", TUN_MINOR);
goto err_misc;
}
return 0;
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 5b83c3f35f47..7fa5ec2de942 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -123,12 +123,11 @@ static const int multicast_filter_limit = 32;
#include <linux/in6.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
-#include <generated/utsrelease.h>
#include "typhoon.h"
MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
-MODULE_VERSION(UTS_RELEASE);
+MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_NAME);
MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
@@ -1004,7 +1003,6 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
}
strcpy(info->driver, KBUILD_MODNAME);
- strcpy(info->version, UTS_RELEASE);
strcpy(info->bus_info, pci_name(pci_dev));
}
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index acbdab3d66ca..ef041057d9d3 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -28,6 +28,7 @@
#include <linux/phy.h>
#include <linux/workqueue.h>
#include <linux/of_mdio.h>
+#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <asm/uaccess.h>
@@ -2031,7 +2032,7 @@ static void ucc_geth_set_multi(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev) {
/* Only support group multicast for now.
*/
- if (!(ha->addr[0] & 1))
+ if (!is_multicast_ether_addr(ha->addr))
continue;
/* Ask CPM to run CRC and set bit in
@@ -3739,7 +3740,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
#endif
};
-static int ucc_geth_probe(struct platform_device* ofdev, const struct of_device_id *match)
+static int ucc_geth_probe(struct platform_device* ofdev)
{
struct device *device = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
@@ -3985,7 +3986,7 @@ static struct of_device_id ucc_geth_match[] = {
MODULE_DEVICE_TABLE(of, ucc_geth_match);
-static struct of_platform_driver ucc_geth_driver = {
+static struct platform_driver ucc_geth_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
@@ -4007,14 +4008,14 @@ static int __init ucc_geth_init(void)
memcpy(&(ugeth_info[i]), &ugeth_primary_info,
sizeof(ugeth_primary_info));
- ret = of_register_platform_driver(&ucc_geth_driver);
+ ret = platform_driver_register(&ucc_geth_driver);
return ret;
}
static void __exit ucc_geth_exit(void)
{
- of_unregister_platform_driver(&ucc_geth_driver);
+ platform_driver_unregister(&ucc_geth_driver);
}
module_init(ucc_geth_init);
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 52ffabe6db0e..6f600cced6e1 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -196,6 +196,25 @@ config USB_NET_CDC_EEM
IEEE 802 "local assignment" bit is set in the address, a "usbX"
name is used instead.
+config USB_NET_CDC_NCM
+ tristate "CDC NCM support"
+ depends on USB_USBNET
+ default y
+ help
+ This driver provides support for CDC NCM (Network Control Model
+ Device USB Class Specification). The CDC NCM specification is
+ available from <http://www.usb.org/>.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module.
+
+ This driver should work with at least the following devices:
+ * ST-Ericsson M700 LTE FDD/TDD Mobile Broadband Modem (ref. design)
+ * ST-Ericsson M5730 HSPA+ Mobile Broadband Modem (reference design)
+ * ST-Ericsson M570 HSPA+ Mobile Broadband Modem (reference design)
+ * ST-Ericsson M343 HSPA Mobile Broadband Modem (reference design)
+ * Ericsson F5521gw Mobile Broadband Module
+
config USB_NET_DM9601
tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices"
depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index a19b0259ae16..cac170301187 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -26,4 +26,5 @@ obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
obj-$(CONFIG_USB_IPHETH) += ipheth.o
obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
+obj-$(CONFIG_USB_NET_CDC_NCM) += cdc_ncm.o
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index aea4645be7f6..6140b56cce53 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -1508,6 +1508,10 @@ static const struct usb_device_id products [] = {
USB_DEVICE (0x0b95, 0x1780),
.driver_info = (unsigned long) &ax88178_info,
}, {
+ // Logitec LAN-GTJ/U2A
+ USB_DEVICE (0x0789, 0x0160),
+ .driver_info = (unsigned long) &ax88178_info,
+}, {
// Linksys USB200M Rev 2
USB_DEVICE (0x13b1, 0x0018),
.driver_info = (unsigned long) &ax88772_info,
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 109751bad3bb..f967913e11bc 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -328,13 +328,13 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
static const char ifname[] = "usbpn%d";
const struct usb_cdc_union_desc *union_header = NULL;
- const struct usb_cdc_header_desc *phonet_header = NULL;
const struct usb_host_interface *data_desc;
struct usb_interface *data_intf;
struct usb_device *usbdev = interface_to_usbdev(intf);
struct net_device *dev;
struct usbpn_dev *pnd;
u8 *data;
+ int phonet = 0;
int len, err;
data = intf->altsetting->extra;
@@ -355,10 +355,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
(struct usb_cdc_union_desc *)data;
break;
case 0xAB:
- if (phonet_header || dlen < 5)
- break;
- phonet_header =
- (struct usb_cdc_header_desc *)data;
+ phonet = 1;
break;
}
}
@@ -366,7 +363,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
len -= dlen;
}
- if (!union_header || !phonet_header)
+ if (!union_header || !phonet)
return -EINVAL;
data_intf = usb_ifnum_to_if(usbdev, union_header->bSlaveInterface0);
@@ -392,7 +389,6 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
pnd = netdev_priv(dev);
SET_NETDEV_DEV(dev, &intf->dev);
- netif_stop_queue(dev);
pnd->dev = dev;
pnd->usb = usb_get_dev(usbdev);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index b3fe0de40469..9a60e415d76b 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -99,9 +99,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
*/
buf = dev->udev->actconfig->extra;
len = dev->udev->actconfig->extralen;
- if (len)
- dev_dbg(&intf->dev,
- "CDC descriptors on config\n");
+ dev_dbg(&intf->dev, "CDC descriptors on config\n");
}
/* Maybe CDC descriptors are after the endpoint? This bug has
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
new file mode 100644
index 000000000000..7113168473cf
--- /dev/null
+++ b/drivers/net/usb/cdc_ncm.c
@@ -0,0 +1,1287 @@
+/*
+ * cdc_ncm.c
+ *
+ * Copyright (C) ST-Ericsson 2010-2011
+ * Contact: Alexey Orishko <alexey.orishko@stericsson.com>
+ * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
+ *
+ * USB Host Driver for Network Control Model (NCM)
+ * http://www.usb.org/developers/devclass_docs/NCM10.zip
+ *
+ * The NCM encoding, decoding and initialization logic
+ * derives from FreeBSD 8.x. if_cdce.c and if_cdcereg.h
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose this file to be licensed under the terms
+ * of the GNU General Public License (GPL) Version 2 or the 2-clause
+ * BSD license listed below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/ctype.h>
+#include <linux/ethtool.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/usb.h>
+#include <linux/version.h>
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+#include <linux/usb/usbnet.h>
+#include <linux/usb/cdc.h>
+
+#define DRIVER_VERSION "7-Feb-2011"
+
+/* CDC NCM subclass 3.2.1 */
+#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
+
+/* Maximum NTB length */
+#define CDC_NCM_NTB_MAX_SIZE_TX 16384 /* bytes */
+#define CDC_NCM_NTB_MAX_SIZE_RX 16384 /* bytes */
+
+/* Minimum value for MaxDatagramSize, ch. 6.2.9 */
+#define CDC_NCM_MIN_DATAGRAM_SIZE 1514 /* bytes */
+
+#define CDC_NCM_MIN_TX_PKT 512 /* bytes */
+
+/* Default value for MaxDatagramSize */
+#define CDC_NCM_MAX_DATAGRAM_SIZE 2048 /* bytes */
+
+/*
+ * Maximum amount of datagrams in NCM Datagram Pointer Table, not counting
+ * the last NULL entry. Any additional datagrams in NTB would be discarded.
+ */
+#define CDC_NCM_DPT_DATAGRAMS_MAX 32
+
+/* Maximum amount of IN datagrams in NTB */
+#define CDC_NCM_DPT_DATAGRAMS_IN_MAX 0 /* unlimited */
+
+/* Restart the timer, if amount of datagrams is less than given value */
+#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3
+
+/* The following macro defines the minimum header space */
+#define CDC_NCM_MIN_HDR_SIZE \
+ (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
+ (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
+
+struct cdc_ncm_data {
+ struct usb_cdc_ncm_nth16 nth16;
+ struct usb_cdc_ncm_ndp16 ndp16;
+ struct usb_cdc_ncm_dpe16 dpe16[CDC_NCM_DPT_DATAGRAMS_MAX + 1];
+};
+
+struct cdc_ncm_ctx {
+ struct cdc_ncm_data rx_ncm;
+ struct cdc_ncm_data tx_ncm;
+ struct usb_cdc_ncm_ntb_parameters ncm_parm;
+ struct timer_list tx_timer;
+
+ const struct usb_cdc_ncm_desc *func_desc;
+ const struct usb_cdc_header_desc *header_desc;
+ const struct usb_cdc_union_desc *union_desc;
+ const struct usb_cdc_ether_desc *ether_desc;
+
+ struct net_device *netdev;
+ struct usb_device *udev;
+ struct usb_host_endpoint *in_ep;
+ struct usb_host_endpoint *out_ep;
+ struct usb_host_endpoint *status_ep;
+ struct usb_interface *intf;
+ struct usb_interface *control;
+ struct usb_interface *data;
+
+ struct sk_buff *tx_curr_skb;
+ struct sk_buff *tx_rem_skb;
+
+ spinlock_t mtx;
+
+ u32 tx_timer_pending;
+ u32 tx_curr_offset;
+ u32 tx_curr_last_offset;
+ u32 tx_curr_frame_num;
+ u32 rx_speed;
+ u32 tx_speed;
+ u32 rx_max;
+ u32 tx_max;
+ u32 max_datagram_size;
+ u16 tx_max_datagrams;
+ u16 tx_remainder;
+ u16 tx_modulus;
+ u16 tx_ndp_modulus;
+ u16 tx_seq;
+ u16 connected;
+ u8 data_claimed;
+ u8 control_claimed;
+};
+
+static void cdc_ncm_tx_timeout(unsigned long arg);
+static const struct driver_info cdc_ncm_info;
+static struct usb_driver cdc_ncm_driver;
+static struct ethtool_ops cdc_ncm_ethtool_ops;
+
+static const struct usb_device_id cdc_devs[] = {
+ { USB_INTERFACE_INFO(USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_ncm_info,
+ },
+ {
+ },
+};
+
+MODULE_DEVICE_TABLE(usb, cdc_devs);
+
+static void
+cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
+{
+ struct usbnet *dev = netdev_priv(net);
+
+ strncpy(info->driver, dev->driver_name, sizeof(info->driver));
+ strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strncpy(info->fw_version, dev->driver_info->description,
+ sizeof(info->fw_version));
+ usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
+}
+
+static int
+cdc_ncm_do_request(struct cdc_ncm_ctx *ctx, struct usb_cdc_notification *req,
+ void *data, u16 flags, u16 *actlen, u16 timeout)
+{
+ int err;
+
+ err = usb_control_msg(ctx->udev, (req->bmRequestType & USB_DIR_IN) ?
+ usb_rcvctrlpipe(ctx->udev, 0) :
+ usb_sndctrlpipe(ctx->udev, 0),
+ req->bNotificationType, req->bmRequestType,
+ req->wValue,
+ req->wIndex, data,
+ req->wLength, timeout);
+
+ if (err < 0) {
+ if (actlen)
+ *actlen = 0;
+ return err;
+ }
+
+ if (actlen)
+ *actlen = err;
+
+ return 0;
+}
+
+static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
+{
+ struct usb_cdc_notification req;
+ u32 val;
+ u8 flags;
+ u8 iface_no;
+ int err;
+ u16 ntb_fmt_supported;
+
+ iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
+
+ req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE;
+ req.bNotificationType = USB_CDC_GET_NTB_PARAMETERS;
+ req.wValue = 0;
+ req.wIndex = cpu_to_le16(iface_no);
+ req.wLength = cpu_to_le16(sizeof(ctx->ncm_parm));
+
+ err = cdc_ncm_do_request(ctx, &req, &ctx->ncm_parm, 0, NULL, 1000);
+ if (err) {
+ pr_debug("failed GET_NTB_PARAMETERS\n");
+ return 1;
+ }
+
+ /* read correct set of parameters according to device mode */
+ ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
+ ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
+ ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
+ ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
+ ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
+ /* devices prior to NCM Errata shall set this field to zero */
+ ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
+ ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
+
+ if (ctx->func_desc != NULL)
+ flags = ctx->func_desc->bmNetworkCapabilities;
+ else
+ flags = 0;
+
+ pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
+ "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
+ "wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
+ ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
+ ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
+
+ /* max count of tx datagrams */
+ if ((ctx->tx_max_datagrams == 0) ||
+ (ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX))
+ ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
+
+ /* verify maximum size of received NTB in bytes */
+ if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) {
+ pr_debug("Using min receive length=%d\n",
+ USB_CDC_NCM_NTB_MIN_IN_SIZE);
+ ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE;
+ }
+
+ if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) {
+ pr_debug("Using default maximum receive length=%d\n",
+ CDC_NCM_NTB_MAX_SIZE_RX);
+ ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
+ }
+
+ /* inform device about NTB input size changes */
+ if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
+ req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+ USB_RECIP_INTERFACE;
+ req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE;
+ req.wValue = 0;
+ req.wIndex = cpu_to_le16(iface_no);
+
+ if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) {
+ struct usb_cdc_ncm_ndp_input_size ndp_in_sz;
+
+ req.wLength = 8;
+ ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+ ndp_in_sz.wNtbInMaxDatagrams =
+ cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX);
+ ndp_in_sz.wReserved = 0;
+ err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL,
+ 1000);
+ } else {
+ __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+
+ req.wLength = 4;
+ err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0,
+ NULL, 1000);
+ }
+
+ if (err)
+ pr_debug("Setting NTB Input Size failed\n");
+ }
+
+ /* verify maximum size of transmitted NTB in bytes */
+ if ((ctx->tx_max <
+ (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
+ (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX)) {
+ pr_debug("Using default maximum transmit length=%d\n",
+ CDC_NCM_NTB_MAX_SIZE_TX);
+ ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
+ }
+
+ /*
+ * verify that the structure alignment is:
+ * - power of two
+ * - not greater than the maximum transmit length
+ * - not less than four bytes
+ */
+ val = ctx->tx_ndp_modulus;
+
+ if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
+ (val != ((-val) & val)) || (val >= ctx->tx_max)) {
+ pr_debug("Using default alignment: 4 bytes\n");
+ ctx->tx_ndp_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
+ }
+
+ /*
+ * verify that the payload alignment is:
+ * - power of two
+ * - not greater than the maximum transmit length
+ * - not less than four bytes
+ */
+ val = ctx->tx_modulus;
+
+ if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
+ (val != ((-val) & val)) || (val >= ctx->tx_max)) {
+ pr_debug("Using default transmit modulus: 4 bytes\n");
+ ctx->tx_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
+ }
+
+ /* verify the payload remainder */
+ if (ctx->tx_remainder >= ctx->tx_modulus) {
+ pr_debug("Using default transmit remainder: 0 bytes\n");
+ ctx->tx_remainder = 0;
+ }
+
+ /* adjust TX-remainder according to NCM specification. */
+ ctx->tx_remainder = ((ctx->tx_remainder - ETH_HLEN) &
+ (ctx->tx_modulus - 1));
+
+ /* additional configuration */
+
+ /* set CRC Mode */
+ if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
+ req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+ USB_RECIP_INTERFACE;
+ req.bNotificationType = USB_CDC_SET_CRC_MODE;
+ req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
+ req.wIndex = cpu_to_le16(iface_no);
+ req.wLength = 0;
+
+ err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
+ if (err)
+ pr_debug("Setting CRC mode off failed\n");
+ }
+
+ /* set NTB format, if both formats are supported */
+ if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
+ req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+ USB_RECIP_INTERFACE;
+ req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
+ req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
+ req.wIndex = cpu_to_le16(iface_no);
+ req.wLength = 0;
+
+ err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
+ if (err)
+ pr_debug("Setting NTB format to 16-bit failed\n");
+ }
+
+ ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
+
+ /* set Max Datagram Size (MTU) */
+ if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
+ __le16 max_datagram_size;
+ u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
+
+ req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN |
+ USB_RECIP_INTERFACE;
+ req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
+ req.wValue = 0;
+ req.wIndex = cpu_to_le16(iface_no);
+ req.wLength = cpu_to_le16(2);
+
+ err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL,
+ 1000);
+ if (err) {
+ pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
+ CDC_NCM_MIN_DATAGRAM_SIZE);
+ } else {
+ ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
+ /* Check Eth descriptor value */
+ if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) {
+ if (ctx->max_datagram_size > eth_max_sz)
+ ctx->max_datagram_size = eth_max_sz;
+ } else {
+ if (ctx->max_datagram_size >
+ CDC_NCM_MAX_DATAGRAM_SIZE)
+ ctx->max_datagram_size =
+ CDC_NCM_MAX_DATAGRAM_SIZE;
+ }
+
+ if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
+ ctx->max_datagram_size =
+ CDC_NCM_MIN_DATAGRAM_SIZE;
+
+ /* if value changed, update device */
+ req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+ USB_RECIP_INTERFACE;
+ req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE;
+ req.wValue = 0;
+ req.wIndex = cpu_to_le16(iface_no);
+ req.wLength = 2;
+ max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
+
+ err = cdc_ncm_do_request(ctx, &req, &max_datagram_size,
+ 0, NULL, 1000);
+ if (err)
+ pr_debug("SET_MAX_DATAGRAM_SIZE failed\n");
+ }
+
+ }
+
+ if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN))
+ ctx->netdev->mtu = ctx->max_datagram_size - ETH_HLEN;
+
+ return 0;
+}
+
+static void
+cdc_ncm_find_endpoints(struct cdc_ncm_ctx *ctx, struct usb_interface *intf)
+{
+ struct usb_host_endpoint *e;
+ u8 ep;
+
+ for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) {
+
+ e = intf->cur_altsetting->endpoint + ep;
+ switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+ case USB_ENDPOINT_XFER_INT:
+ if (usb_endpoint_dir_in(&e->desc)) {
+ if (ctx->status_ep == NULL)
+ ctx->status_ep = e;
+ }
+ break;
+
+ case USB_ENDPOINT_XFER_BULK:
+ if (usb_endpoint_dir_in(&e->desc)) {
+ if (ctx->in_ep == NULL)
+ ctx->in_ep = e;
+ } else {
+ if (ctx->out_ep == NULL)
+ ctx->out_ep = e;
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
+{
+ if (ctx == NULL)
+ return;
+
+ del_timer_sync(&ctx->tx_timer);
+
+ if (ctx->data_claimed) {
+ usb_set_intfdata(ctx->data, NULL);
+ usb_driver_release_interface(driver_of(ctx->intf), ctx->data);
+ }
+
+ if (ctx->control_claimed) {
+ usb_set_intfdata(ctx->control, NULL);
+ usb_driver_release_interface(driver_of(ctx->intf),
+ ctx->control);
+ }
+
+ if (ctx->tx_rem_skb != NULL) {
+ dev_kfree_skb_any(ctx->tx_rem_skb);
+ ctx->tx_rem_skb = NULL;
+ }
+
+ if (ctx->tx_curr_skb != NULL) {
+ dev_kfree_skb_any(ctx->tx_curr_skb);
+ ctx->tx_curr_skb = NULL;
+ }
+
+ kfree(ctx);
+}
+
+static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct cdc_ncm_ctx *ctx;
+ struct usb_driver *driver;
+ u8 *buf;
+ int len;
+ int temp;
+ u8 iface_no;
+
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ if (ctx == NULL)
+ goto error;
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ init_timer(&ctx->tx_timer);
+ spin_lock_init(&ctx->mtx);
+ ctx->netdev = dev->net;
+
+ /* store ctx pointer in device data field */
+ dev->data[0] = (unsigned long)ctx;
+
+ /* get some pointers */
+ driver = driver_of(intf);
+ buf = intf->cur_altsetting->extra;
+ len = intf->cur_altsetting->extralen;
+
+ ctx->udev = dev->udev;
+ ctx->intf = intf;
+
+ /* parse through descriptors associated with control interface */
+ while ((len > 0) && (buf[0] > 2) && (buf[0] <= len)) {
+
+ if (buf[1] != USB_DT_CS_INTERFACE)
+ goto advance;
+
+ switch (buf[2]) {
+ case USB_CDC_UNION_TYPE:
+ if (buf[0] < sizeof(*(ctx->union_desc)))
+ break;
+
+ ctx->union_desc =
+ (const struct usb_cdc_union_desc *)buf;
+
+ ctx->control = usb_ifnum_to_if(dev->udev,
+ ctx->union_desc->bMasterInterface0);
+ ctx->data = usb_ifnum_to_if(dev->udev,
+ ctx->union_desc->bSlaveInterface0);
+ break;
+
+ case USB_CDC_ETHERNET_TYPE:
+ if (buf[0] < sizeof(*(ctx->ether_desc)))
+ break;
+
+ ctx->ether_desc =
+ (const struct usb_cdc_ether_desc *)buf;
+ dev->hard_mtu =
+ le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
+
+ if (dev->hard_mtu < CDC_NCM_MIN_DATAGRAM_SIZE)
+ dev->hard_mtu = CDC_NCM_MIN_DATAGRAM_SIZE;
+ else if (dev->hard_mtu > CDC_NCM_MAX_DATAGRAM_SIZE)
+ dev->hard_mtu = CDC_NCM_MAX_DATAGRAM_SIZE;
+ break;
+
+ case USB_CDC_NCM_TYPE:
+ if (buf[0] < sizeof(*(ctx->func_desc)))
+ break;
+
+ ctx->func_desc = (const struct usb_cdc_ncm_desc *)buf;
+ break;
+
+ default:
+ break;
+ }
+advance:
+ /* advance to next descriptor */
+ temp = buf[0];
+ buf += temp;
+ len -= temp;
+ }
+
+ /* check if we got everything */
+ if ((ctx->control == NULL) || (ctx->data == NULL) ||
+ (ctx->ether_desc == NULL))
+ goto error;
+
+ /* claim interfaces, if any */
+ if (ctx->data != intf) {
+ temp = usb_driver_claim_interface(driver, ctx->data, dev);
+ if (temp)
+ goto error;
+ ctx->data_claimed = 1;
+ }
+
+ if (ctx->control != intf) {
+ temp = usb_driver_claim_interface(driver, ctx->control, dev);
+ if (temp)
+ goto error;
+ ctx->control_claimed = 1;
+ }
+
+ iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
+
+ /* reset data interface */
+ temp = usb_set_interface(dev->udev, iface_no, 0);
+ if (temp)
+ goto error;
+
+ /* initialize data interface */
+ if (cdc_ncm_setup(ctx))
+ goto error;
+
+ /* configure data interface */
+ temp = usb_set_interface(dev->udev, iface_no, 1);
+ if (temp)
+ goto error;
+
+ cdc_ncm_find_endpoints(ctx, ctx->data);
+ cdc_ncm_find_endpoints(ctx, ctx->control);
+
+ if ((ctx->in_ep == NULL) || (ctx->out_ep == NULL) ||
+ (ctx->status_ep == NULL))
+ goto error;
+
+ dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
+
+ usb_set_intfdata(ctx->data, dev);
+ usb_set_intfdata(ctx->control, dev);
+ usb_set_intfdata(ctx->intf, dev);
+
+ temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress);
+ if (temp)
+ goto error;
+
+ dev_info(&dev->udev->dev, "MAC-Address: "
+ "0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ dev->net->dev_addr[0], dev->net->dev_addr[1],
+ dev->net->dev_addr[2], dev->net->dev_addr[3],
+ dev->net->dev_addr[4], dev->net->dev_addr[5]);
+
+ dev->in = usb_rcvbulkpipe(dev->udev,
+ ctx->in_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+ dev->out = usb_sndbulkpipe(dev->udev,
+ ctx->out_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+ dev->status = ctx->status_ep;
+ dev->rx_urb_size = ctx->rx_max;
+
+ /*
+ * We should get an event when network connection is "connected" or
+ * "disconnected". Set network connection in "disconnected" state
+ * (carrier is OFF) during attach, so the IP network stack does not
+ * start IPv6 negotiation and more.
+ */
+ netif_carrier_off(dev->net);
+ ctx->tx_speed = ctx->rx_speed = 0;
+ return 0;
+
+error:
+ cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]);
+ dev->data[0] = 0;
+ dev_info(&dev->udev->dev, "Descriptor failure\n");
+ return -ENODEV;
+}
+
+static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ struct usb_driver *driver;
+
+ if (ctx == NULL)
+ return; /* no setup */
+
+ driver = driver_of(intf);
+
+ usb_set_intfdata(ctx->data, NULL);
+ usb_set_intfdata(ctx->control, NULL);
+ usb_set_intfdata(ctx->intf, NULL);
+
+ /* release interfaces, if any */
+ if (ctx->data_claimed) {
+ usb_driver_release_interface(driver, ctx->data);
+ ctx->data_claimed = 0;
+ }
+
+ if (ctx->control_claimed) {
+ usb_driver_release_interface(driver, ctx->control);
+ ctx->control_claimed = 0;
+ }
+
+ cdc_ncm_free(ctx);
+}
+
+static void cdc_ncm_zero_fill(u8 *ptr, u32 first, u32 end, u32 max)
+{
+ if (first >= max)
+ return;
+ if (first >= end)
+ return;
+ if (end > max)
+ end = max;
+ memset(ptr + first, 0, end - first);
+}
+
+static struct sk_buff *
+cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
+{
+ struct sk_buff *skb_out;
+ u32 rem;
+ u32 offset;
+ u32 last_offset;
+ u16 n = 0;
+ u8 ready2send = 0;
+
+ /* if there is a remaining skb, it gets priority */
+ if (skb != NULL)
+ swap(skb, ctx->tx_rem_skb);
+ else
+ ready2send = 1;
+
+ /*
+ * +----------------+
+ * | skb_out |
+ * +----------------+
+ * ^ offset
+ * ^ last_offset
+ */
+
+ /* check if we are resuming an OUT skb */
+ if (ctx->tx_curr_skb != NULL) {
+ /* pop variables */
+ skb_out = ctx->tx_curr_skb;
+ offset = ctx->tx_curr_offset;
+ last_offset = ctx->tx_curr_last_offset;
+ n = ctx->tx_curr_frame_num;
+
+ } else {
+ /* reset variables */
+ skb_out = alloc_skb(ctx->tx_max, GFP_ATOMIC);
+ if (skb_out == NULL) {
+ if (skb != NULL) {
+ dev_kfree_skb_any(skb);
+ ctx->netdev->stats.tx_dropped++;
+ }
+ goto exit_no_skb;
+ }
+
+ /* make room for NTH and NDP */
+ offset = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
+ ctx->tx_ndp_modulus) +
+ sizeof(struct usb_cdc_ncm_ndp16) +
+ (ctx->tx_max_datagrams + 1) *
+ sizeof(struct usb_cdc_ncm_dpe16);
+
+ /* store last valid offset before alignment */
+ last_offset = offset;
+ /* align first Datagram offset correctly */
+ offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder;
+ /* zero buffer till the first IP datagram */
+ cdc_ncm_zero_fill(skb_out->data, 0, offset, offset);
+ n = 0;
+ ctx->tx_curr_frame_num = 0;
+ }
+
+ for (; n < ctx->tx_max_datagrams; n++) {
+ /* check if end of transmit buffer is reached */
+ if (offset >= ctx->tx_max) {
+ ready2send = 1;
+ break;
+ }
+ /* compute maximum buffer size */
+ rem = ctx->tx_max - offset;
+
+ if (skb == NULL) {
+ skb = ctx->tx_rem_skb;
+ ctx->tx_rem_skb = NULL;
+
+ /* check for end of skb */
+ if (skb == NULL)
+ break;
+ }
+
+ if (skb->len > rem) {
+ if (n == 0) {
+ /* won't fit, MTU problem? */
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ ctx->netdev->stats.tx_dropped++;
+ } else {
+ /* no room for skb - store for later */
+ if (ctx->tx_rem_skb != NULL) {
+ dev_kfree_skb_any(ctx->tx_rem_skb);
+ ctx->netdev->stats.tx_dropped++;
+ }
+ ctx->tx_rem_skb = skb;
+ skb = NULL;
+ ready2send = 1;
+ }
+ break;
+ }
+
+ memcpy(((u8 *)skb_out->data) + offset, skb->data, skb->len);
+
+ ctx->tx_ncm.dpe16[n].wDatagramLength = cpu_to_le16(skb->len);
+ ctx->tx_ncm.dpe16[n].wDatagramIndex = cpu_to_le16(offset);
+
+ /* update offset */
+ offset += skb->len;
+
+ /* store last valid offset before alignment */
+ last_offset = offset;
+
+ /* align offset correctly */
+ offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder;
+
+ /* zero padding */
+ cdc_ncm_zero_fill(skb_out->data, last_offset, offset,
+ ctx->tx_max);
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ }
+
+ /* free up any dangling skb */
+ if (skb != NULL) {
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ ctx->netdev->stats.tx_dropped++;
+ }
+
+ ctx->tx_curr_frame_num = n;
+
+ if (n == 0) {
+ /* wait for more frames */
+ /* push variables */
+ ctx->tx_curr_skb = skb_out;
+ ctx->tx_curr_offset = offset;
+ ctx->tx_curr_last_offset = last_offset;
+ goto exit_no_skb;
+
+ } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) {
+ /* wait for more frames */
+ /* push variables */
+ ctx->tx_curr_skb = skb_out;
+ ctx->tx_curr_offset = offset;
+ ctx->tx_curr_last_offset = last_offset;
+ /* set the pending count */
+ if (n < CDC_NCM_RESTART_TIMER_DATAGRAM_CNT)
+ ctx->tx_timer_pending = 2;
+ goto exit_no_skb;
+
+ } else {
+ /* frame goes out */
+ /* variables will be reset at next call */
+ }
+
+ /* check for overflow */
+ if (last_offset > ctx->tx_max)
+ last_offset = ctx->tx_max;
+
+ /* revert offset */
+ offset = last_offset;
+
+ /*
+ * If collected data size is less or equal CDC_NCM_MIN_TX_PKT bytes,
+ * we send buffers as it is. If we get more data, it would be more
+ * efficient for USB HS mobile device with DMA engine to receive a full
+ * size NTB, than canceling DMA transfer and receiving a short packet.
+ */
+ if (offset > CDC_NCM_MIN_TX_PKT)
+ offset = ctx->tx_max;
+
+ /* final zero padding */
+ cdc_ncm_zero_fill(skb_out->data, last_offset, offset, ctx->tx_max);
+
+ /* store last offset */
+ last_offset = offset;
+
+ if ((last_offset < ctx->tx_max) && ((last_offset %
+ le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) {
+ /* force short packet */
+ *(((u8 *)skb_out->data) + last_offset) = 0;
+ last_offset++;
+ }
+
+ /* zero the rest of the DPEs plus the last NULL entry */
+ for (; n <= CDC_NCM_DPT_DATAGRAMS_MAX; n++) {
+ ctx->tx_ncm.dpe16[n].wDatagramLength = 0;
+ ctx->tx_ncm.dpe16[n].wDatagramIndex = 0;
+ }
+
+ /* fill out 16-bit NTB header */
+ ctx->tx_ncm.nth16.dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
+ ctx->tx_ncm.nth16.wHeaderLength =
+ cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
+ ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
+ ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
+ ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
+ ctx->tx_ndp_modulus);
+
+ memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
+ ctx->tx_seq++;
+
+ /* fill out 16-bit NDP table */
+ ctx->tx_ncm.ndp16.dwSignature =
+ cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN);
+ rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) *
+ sizeof(struct usb_cdc_ncm_dpe16));
+ ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
+ ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */
+
+ memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex,
+ &(ctx->tx_ncm.ndp16),
+ sizeof(ctx->tx_ncm.ndp16));
+
+ memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex +
+ sizeof(ctx->tx_ncm.ndp16),
+ &(ctx->tx_ncm.dpe16),
+ (ctx->tx_curr_frame_num + 1) *
+ sizeof(struct usb_cdc_ncm_dpe16));
+
+ /* set frame length */
+ skb_put(skb_out, last_offset);
+
+ /* return skb */
+ ctx->tx_curr_skb = NULL;
+ return skb_out;
+
+exit_no_skb:
+ return NULL;
+}
+
+static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx)
+{
+ /* start timer, if not already started */
+ if (timer_pending(&ctx->tx_timer) == 0) {
+ ctx->tx_timer.function = &cdc_ncm_tx_timeout;
+ ctx->tx_timer.data = (unsigned long)ctx;
+ ctx->tx_timer.expires = jiffies + ((HZ + 999) / 1000);
+ add_timer(&ctx->tx_timer);
+ }
+}
+
+static void cdc_ncm_tx_timeout(unsigned long arg)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)arg;
+ u8 restart;
+
+ spin_lock(&ctx->mtx);
+ if (ctx->tx_timer_pending != 0) {
+ ctx->tx_timer_pending--;
+ restart = 1;
+ } else {
+ restart = 0;
+ }
+
+ spin_unlock(&ctx->mtx);
+
+ if (restart) {
+ spin_lock(&ctx->mtx);
+ cdc_ncm_tx_timeout_start(ctx);
+ spin_unlock(&ctx->mtx);
+ } else if (ctx->netdev != NULL) {
+ usbnet_start_xmit(NULL, ctx->netdev);
+ }
+}
+
+static struct sk_buff *
+cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
+{
+ struct sk_buff *skb_out;
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ u8 need_timer = 0;
+
+ /*
+ * The Ethernet API we are using does not support transmitting
+ * multiple Ethernet frames in a single call. This driver will
+ * accumulate multiple Ethernet frames and send out a larger
+ * USB frame when the USB buffer is full or when a single jiffies
+ * timeout happens.
+ */
+ if (ctx == NULL)
+ goto error;
+
+ spin_lock(&ctx->mtx);
+ skb_out = cdc_ncm_fill_tx_frame(ctx, skb);
+ if (ctx->tx_curr_skb != NULL)
+ need_timer = 1;
+
+ /* Start timer, if there is a remaining skb */
+ if (need_timer)
+ cdc_ncm_tx_timeout_start(ctx);
+
+ if (skb_out)
+ dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
+
+ spin_unlock(&ctx->mtx);
+ return skb_out;
+
+error:
+ if (skb != NULL)
+ dev_kfree_skb_any(skb);
+
+ return NULL;
+}
+
+static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
+{
+ struct sk_buff *skb;
+ struct cdc_ncm_ctx *ctx;
+ int sumlen;
+ int actlen;
+ int temp;
+ int nframes;
+ int x;
+ int offset;
+
+ ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ if (ctx == NULL)
+ goto error;
+
+ actlen = skb_in->len;
+ sumlen = CDC_NCM_NTB_MAX_SIZE_RX;
+
+ if (actlen < (sizeof(ctx->rx_ncm.nth16) + sizeof(ctx->rx_ncm.ndp16))) {
+ pr_debug("frame too short\n");
+ goto error;
+ }
+
+ memcpy(&(ctx->rx_ncm.nth16), ((u8 *)skb_in->data),
+ sizeof(ctx->rx_ncm.nth16));
+
+ if (le32_to_cpu(ctx->rx_ncm.nth16.dwSignature) !=
+ USB_CDC_NCM_NTH16_SIGN) {
+ pr_debug("invalid NTH16 signature <%u>\n",
+ le32_to_cpu(ctx->rx_ncm.nth16.dwSignature));
+ goto error;
+ }
+
+ temp = le16_to_cpu(ctx->rx_ncm.nth16.wBlockLength);
+ if (temp > sumlen) {
+ pr_debug("unsupported NTB block length %u/%u\n", temp, sumlen);
+ goto error;
+ }
+
+ temp = le16_to_cpu(ctx->rx_ncm.nth16.wNdpIndex);
+ if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) {
+ pr_debug("invalid DPT16 index\n");
+ goto error;
+ }
+
+ memcpy(&(ctx->rx_ncm.ndp16), ((u8 *)skb_in->data) + temp,
+ sizeof(ctx->rx_ncm.ndp16));
+
+ if (le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature) !=
+ USB_CDC_NCM_NDP16_NOCRC_SIGN) {
+ pr_debug("invalid DPT16 signature <%u>\n",
+ le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature));
+ goto error;
+ }
+
+ if (le16_to_cpu(ctx->rx_ncm.ndp16.wLength) <
+ USB_CDC_NCM_NDP16_LENGTH_MIN) {
+ pr_debug("invalid DPT16 length <%u>\n",
+ le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature));
+ goto error;
+ }
+
+ nframes = ((le16_to_cpu(ctx->rx_ncm.ndp16.wLength) -
+ sizeof(struct usb_cdc_ncm_ndp16)) /
+ sizeof(struct usb_cdc_ncm_dpe16));
+ nframes--; /* we process NDP entries except for the last one */
+
+ pr_debug("nframes = %u\n", nframes);
+
+ temp += sizeof(ctx->rx_ncm.ndp16);
+
+ if ((temp + nframes * (sizeof(struct usb_cdc_ncm_dpe16))) > actlen) {
+ pr_debug("Invalid nframes = %d\n", nframes);
+ goto error;
+ }
+
+ if (nframes > CDC_NCM_DPT_DATAGRAMS_MAX) {
+ pr_debug("Truncating number of frames from %u to %u\n",
+ nframes, CDC_NCM_DPT_DATAGRAMS_MAX);
+ nframes = CDC_NCM_DPT_DATAGRAMS_MAX;
+ }
+
+ memcpy(&(ctx->rx_ncm.dpe16), ((u8 *)skb_in->data) + temp,
+ nframes * (sizeof(struct usb_cdc_ncm_dpe16)));
+
+ for (x = 0; x < nframes; x++) {
+ offset = le16_to_cpu(ctx->rx_ncm.dpe16[x].wDatagramIndex);
+ temp = le16_to_cpu(ctx->rx_ncm.dpe16[x].wDatagramLength);
+
+ /*
+ * CDC NCM ch. 3.7
+ * All entries after first NULL entry are to be ignored
+ */
+ if ((offset == 0) || (temp == 0)) {
+ if (!x)
+ goto error; /* empty NTB */
+ break;
+ }
+
+ /* sanity checking */
+ if (((offset + temp) > actlen) ||
+ (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) {
+ pr_debug("invalid frame detected (ignored)"
+ "offset[%u]=%u, length=%u, skb=%p\n",
+ x, offset, temp, skb_in);
+ if (!x)
+ goto error;
+ break;
+
+ } else {
+ skb = skb_clone(skb_in, GFP_ATOMIC);
+ if (!skb)
+ goto error;
+ skb->len = temp;
+ skb->data = ((u8 *)skb_in->data) + offset;
+ skb_set_tail_pointer(skb, temp);
+ usbnet_skb_return(dev, skb);
+ }
+ }
+ return 1;
+error:
+ return 0;
+}
+
+static void
+cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
+ struct usb_cdc_speed_change *data)
+{
+ uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
+ uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
+
+ /*
+ * Currently the USB-NET API does not support reporting the actual
+ * device speed. Do print it instead.
+ */
+ if ((tx_speed != ctx->tx_speed) || (rx_speed != ctx->rx_speed)) {
+ ctx->tx_speed = tx_speed;
+ ctx->rx_speed = rx_speed;
+
+ if ((tx_speed > 1000000) && (rx_speed > 1000000)) {
+ printk(KERN_INFO KBUILD_MODNAME
+ ": %s: %u mbit/s downlink "
+ "%u mbit/s uplink\n",
+ ctx->netdev->name,
+ (unsigned int)(rx_speed / 1000000U),
+ (unsigned int)(tx_speed / 1000000U));
+ } else {
+ printk(KERN_INFO KBUILD_MODNAME
+ ": %s: %u kbit/s downlink "
+ "%u kbit/s uplink\n",
+ ctx->netdev->name,
+ (unsigned int)(rx_speed / 1000U),
+ (unsigned int)(tx_speed / 1000U));
+ }
+ }
+}
+
+static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
+{
+ struct cdc_ncm_ctx *ctx;
+ struct usb_cdc_notification *event;
+
+ ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+ if (urb->actual_length < sizeof(*event))
+ return;
+
+ /* test for split data in 8-byte chunks */
+ if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
+ cdc_ncm_speed_change(ctx,
+ (struct usb_cdc_speed_change *)urb->transfer_buffer);
+ return;
+ }
+
+ event = urb->transfer_buffer;
+
+ switch (event->bNotificationType) {
+ case USB_CDC_NOTIFY_NETWORK_CONNECTION:
+ /*
+ * According to the CDC NCM specification ch.7.1
+ * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
+ * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
+ */
+ ctx->connected = event->wValue;
+
+ printk(KERN_INFO KBUILD_MODNAME ": %s: network connection:"
+ " %sconnected\n",
+ ctx->netdev->name, ctx->connected ? "" : "dis");
+
+ if (ctx->connected)
+ netif_carrier_on(dev->net);
+ else {
+ netif_carrier_off(dev->net);
+ ctx->tx_speed = ctx->rx_speed = 0;
+ }
+ break;
+
+ case USB_CDC_NOTIFY_SPEED_CHANGE:
+ if (urb->actual_length < (sizeof(*event) +
+ sizeof(struct usb_cdc_speed_change)))
+ set_bit(EVENT_STS_SPLIT, &dev->flags);
+ else
+ cdc_ncm_speed_change(ctx,
+ (struct usb_cdc_speed_change *) &event[1]);
+ break;
+
+ default:
+ dev_err(&dev->udev->dev, "NCM: unexpected "
+ "notification 0x%02x!\n", event->bNotificationType);
+ break;
+ }
+}
+
+static int cdc_ncm_check_connect(struct usbnet *dev)
+{
+ struct cdc_ncm_ctx *ctx;
+
+ ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ if (ctx == NULL)
+ return 1; /* disconnected */
+
+ return !ctx->connected;
+}
+
+static int
+cdc_ncm_probe(struct usb_interface *udev, const struct usb_device_id *prod)
+{
+ return usbnet_probe(udev, prod);
+}
+
+static void cdc_ncm_disconnect(struct usb_interface *intf)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+
+ if (dev == NULL)
+ return; /* already disconnected */
+
+ usbnet_disconnect(intf);
+}
+
+static int cdc_ncm_manage_power(struct usbnet *dev, int status)
+{
+ dev->intf->needs_remote_wakeup = status;
+ return 0;
+}
+
+static const struct driver_info cdc_ncm_info = {
+ .description = "CDC NCM",
+ .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET,
+ .bind = cdc_ncm_bind,
+ .unbind = cdc_ncm_unbind,
+ .check_connect = cdc_ncm_check_connect,
+ .manage_power = cdc_ncm_manage_power,
+ .status = cdc_ncm_status,
+ .rx_fixup = cdc_ncm_rx_fixup,
+ .tx_fixup = cdc_ncm_tx_fixup,
+};
+
+static struct usb_driver cdc_ncm_driver = {
+ .name = "cdc_ncm",
+ .id_table = cdc_devs,
+ .probe = cdc_ncm_probe,
+ .disconnect = cdc_ncm_disconnect,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+ .supports_autosuspend = 1,
+};
+
+static struct ethtool_ops cdc_ncm_ethtool_ops = {
+ .get_drvinfo = cdc_ncm_get_drvinfo,
+ .get_link = usbnet_get_link,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_settings = usbnet_get_settings,
+ .set_settings = usbnet_set_settings,
+ .nway_reset = usbnet_nway_reset,
+};
+
+static int __init cdc_ncm_init(void)
+{
+ printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION "\n");
+ return usb_register(&cdc_ncm_driver);
+}
+
+module_init(cdc_ncm_init);
+
+static void __exit cdc_ncm_exit(void)
+{
+ usb_deregister(&cdc_ncm_driver);
+}
+
+module_exit(cdc_ncm_exit);
+
+MODULE_AUTHOR("Hans Petter Selasky");
+MODULE_DESCRIPTION("USB CDC NCM host driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 02b622e3b9fb..5002f5be47be 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -651,6 +651,10 @@ static const struct usb_device_id products[] = {
.driver_info = (unsigned long)&dm9601_info,
},
{
+ USB_DEVICE(0x0fe6, 0x9700), /* DM9601 USB to Fast Ethernet Adapter */
+ .driver_info = (unsigned long)&dm9601_info,
+ },
+ {
USB_DEVICE(0x0a46, 0x9000), /* DM9000E */
.driver_info = (unsigned long)&dm9601_info,
},
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 62e9e8dc8190..387ca43f26f4 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -324,7 +324,7 @@ struct hso_device {
/* Prototypes */
/*****************************************************************************/
/* Serial driver functions */
-static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file,
+static int hso_serial_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
static void ctrl_callback(struct urb *urb);
static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial);
@@ -958,10 +958,6 @@ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt,
/* Packet is complete. Inject into stack. */
/* We have IP packet here */
odev->skb_rx_buf->protocol = cpu_to_be16(ETH_P_IP);
- /* don't check it */
- odev->skb_rx_buf->ip_summed =
- CHECKSUM_UNNECESSARY;
-
skb_reset_mac_header(odev->skb_rx_buf);
/* Ship it off to the kernel */
@@ -1001,6 +997,18 @@ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt,
}
}
+static void fix_crc_bug(struct urb *urb, __le16 max_packet_size)
+{
+ static const u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
+ u32 rest = urb->actual_length % le16_to_cpu(max_packet_size);
+
+ if (((rest == 5) || (rest == 6)) &&
+ !memcmp(((u8 *)urb->transfer_buffer) + urb->actual_length - 4,
+ crc_check, 4)) {
+ urb->actual_length -= 4;
+ }
+}
+
/* Moving data from usb to kernel (in interrupt state) */
static void read_bulk_callback(struct urb *urb)
{
@@ -1029,17 +1037,8 @@ static void read_bulk_callback(struct urb *urb)
return;
}
- if (odev->parent->port_spec & HSO_INFO_CRC_BUG) {
- u32 rest;
- u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
- rest = urb->actual_length %
- le16_to_cpu(odev->in_endp->wMaxPacketSize);
- if (((rest == 5) || (rest == 6)) &&
- !memcmp(((u8 *) urb->transfer_buffer) +
- urb->actual_length - 4, crc_check, 4)) {
- urb->actual_length -= 4;
- }
- }
+ if (odev->parent->port_spec & HSO_INFO_CRC_BUG)
+ fix_crc_bug(urb, odev->in_endp->wMaxPacketSize);
/* do we even have a packet? */
if (urb->actual_length) {
@@ -1231,18 +1230,8 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
return;
if (status == 0) {
- if (serial->parent->port_spec & HSO_INFO_CRC_BUG) {
- u32 rest;
- u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
- rest =
- urb->actual_length %
- le16_to_cpu(serial->in_endp->wMaxPacketSize);
- if (((rest == 5) || (rest == 6)) &&
- !memcmp(((u8 *) urb->transfer_buffer) +
- urb->actual_length - 4, crc_check, 4)) {
- urb->actual_length -= 4;
- }
- }
+ if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
+ fix_crc_bug(urb, serial->in_endp->wMaxPacketSize);
/* Valid data, handle RX data */
spin_lock(&serial->serial_lock);
serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
@@ -1346,7 +1335,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
/* done */
if (result)
- hso_serial_tiocmset(tty, NULL, TIOCM_RTS | TIOCM_DTR, 0);
+ hso_serial_tiocmset(tty, TIOCM_RTS | TIOCM_DTR, 0);
err_out:
mutex_unlock(&serial->parent->mutex);
return result;
@@ -1667,7 +1656,7 @@ static int hso_get_count(struct tty_struct *tty,
}
-static int hso_serial_tiocmget(struct tty_struct *tty, struct file *file)
+static int hso_serial_tiocmget(struct tty_struct *tty)
{
int retval;
struct hso_serial *serial = get_serial_by_tty(tty);
@@ -1698,7 +1687,7 @@ static int hso_serial_tiocmget(struct tty_struct *tty, struct file *file)
return retval;
}
-static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file,
+static int hso_serial_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
int val = 0;
@@ -1741,11 +1730,10 @@ static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file,
USB_CTRL_SET_TIMEOUT);
}
-static int hso_serial_ioctl(struct tty_struct *tty, struct file *file,
+static int hso_serial_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct hso_serial *serial = get_serial_by_tty(tty);
- void __user *uarg = (void __user *)arg;
int ret = 0;
D4("IOCTL cmd: %d, arg: %ld", cmd, arg);
@@ -2640,15 +2628,15 @@ exit:
static void hso_free_tiomget(struct hso_serial *serial)
{
- struct hso_tiocmget *tiocmget = serial->tiocmget;
+ struct hso_tiocmget *tiocmget;
+ if (!serial)
+ return;
+ tiocmget = serial->tiocmget;
if (tiocmget) {
- if (tiocmget->urb) {
- usb_free_urb(tiocmget->urb);
- tiocmget->urb = NULL;
- }
+ usb_free_urb(tiocmget->urb);
+ tiocmget->urb = NULL;
serial->tiocmget = NULL;
kfree(tiocmget);
-
}
}
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index b2bcf99e6f08..7d42f9a2c068 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -363,7 +363,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
/* Paranoid */
if (skb->len > IPHETH_BUF_SIZE) {
- WARN(1, "%s: skb too large: %d bytes", __func__, skb->len);
+ WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len);
dev->net->stats.tx_dropped++;
dev_kfree_skb_irq(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 5e98643a4a21..7dc84971f26f 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -406,6 +406,7 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth,
if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) {
err("Firmware too big: %zu", fw->size);
+ release_firmware(fw);
return -ENOSPC;
}
data_len = fw->size;
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index a6281e3987b5..2b791392e788 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -1,5 +1,5 @@
/*
- * MOSCHIP MCS7830 based USB 2.0 Ethernet Devices
+ * MOSCHIP MCS7830 based (7730/7830/7832) USB 2.0 Ethernet Devices
*
* based on usbnet.c, asix.c and the vendor provided mcs7830 driver
*
@@ -11,6 +11,9 @@
*
* Definitions gathered from MOSCHIP, Data Sheet_7830DA.pdf (thanks!).
*
+ * 2010-12-19: add 7832 USB PID ("functionality same as MCS7830"),
+ * per active notification by manufacturer
+ *
* TODO:
* - support HIF_REG_CONFIG_SLEEPMODE/HIF_REG_CONFIG_TXENABLE (via autopm?)
* - implement ethtool_ops get_pauseparam/set_pauseparam
@@ -60,6 +63,7 @@
#define MCS7830_MAX_MCAST 64
#define MCS7830_VENDOR_ID 0x9710
+#define MCS7832_PRODUCT_ID 0x7832
#define MCS7830_PRODUCT_ID 0x7830
#define MCS7730_PRODUCT_ID 0x7730
@@ -351,7 +355,7 @@ static int mcs7830_set_autoneg(struct usbnet *dev, int ptrUserPhyMode)
if (!ret)
ret = mcs7830_write_phy(dev, MII_BMCR,
BMCR_ANENABLE | BMCR_ANRESTART );
- return ret < 0 ? : 0;
+ return ret;
}
@@ -626,7 +630,7 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
}
static const struct driver_info moschip_info = {
- .description = "MOSCHIP 7830/7730 usb-NET adapter",
+ .description = "MOSCHIP 7830/7832/7730 usb-NET adapter",
.bind = mcs7830_bind,
.rx_fixup = mcs7830_rx_fixup,
.flags = FLAG_ETHER,
@@ -645,6 +649,10 @@ static const struct driver_info sitecom_info = {
static const struct usb_device_id products[] = {
{
+ USB_DEVICE(MCS7830_VENDOR_ID, MCS7832_PRODUCT_ID),
+ .driver_info = (unsigned long) &moschip_info,
+ },
+ {
USB_DEVICE(MCS7830_VENDOR_ID, MCS7830_PRODUCT_ID),
.driver_info = (unsigned long) &moschip_info,
},
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 6710f09346d6..ef3667690b12 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -359,7 +359,7 @@ fail:
static int mdio_read(struct net_device *dev, int phy_id, int loc)
{
- pegasus_t *pegasus = (pegasus_t *) netdev_priv(dev);
+ pegasus_t *pegasus = netdev_priv(dev);
u16 res;
read_mii_word(pegasus, phy_id, loc, &res);
@@ -397,7 +397,7 @@ fail:
static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
{
- pegasus_t *pegasus = (pegasus_t *) netdev_priv(dev);
+ pegasus_t *pegasus = netdev_priv(dev);
write_mii_word(pegasus, phy_id, loc, val);
}
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index d1ac15c95faf..ed1b43210584 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -802,10 +802,9 @@ static void sierra_net_unbind(struct usbnet *dev, struct usb_interface *intf)
dev_dbg(&dev->udev->dev, "%s", __func__);
- /* Kill the timer then flush the work queue */
+ /* kill the timer and work */
del_timer_sync(&priv->sync_timer);
-
- flush_scheduled_work();
+ cancel_work_sync(&priv->sierra_net_kevent);
/* tell modem we are going away */
status = sierra_net_send_cmd(dev, priv->shdwn_msg,
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 65cb1abfbe57..bc86f4b6ecc2 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1163,9 +1163,8 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
static u32 smsc95xx_calc_csum_preamble(struct sk_buff *skb)
{
- int len = skb->data - skb->head;
- u16 high_16 = (u16)(skb->csum_offset + skb->csum_start - len);
- u16 low_16 = (u16)(skb->csum_start - len);
+ u16 low_16 = (u16)skb_checksum_start_offset(skb);
+ u16 high_16 = low_16 + skb->csum_offset;
return (high_16 << 16) | low_16;
}
@@ -1193,7 +1192,7 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
if (skb->len <= 45) {
/* workaround - hardware tx checksum does not work
* properly with extremely small packets */
- long csstart = skb->csum_start - skb_headroom(skb);
+ long csstart = skb_checksum_start_offset(skb);
__wsum calc = csum_partial(skb->data + csstart,
skb->len - csstart, 0);
*((__sum16 *)(skb->data + csstart
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index c04d49e31f81..95c41d56631c 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -391,14 +391,19 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
goto error;
// else network stack removes extra byte if we forced a short packet
- if (skb->len)
- usbnet_skb_return (dev, skb);
- else {
- netif_dbg(dev, rx_err, dev->net, "drop\n");
-error:
- dev->net->stats.rx_errors++;
- skb_queue_tail (&dev->done, skb);
+ if (skb->len) {
+ /* all data was already cloned from skb inside the driver */
+ if (dev->driver_info->flags & FLAG_MULTI_PACKET)
+ dev_kfree_skb_any(skb);
+ else
+ usbnet_skb_return(dev, skb);
+ return;
}
+
+ netif_dbg(dev, rx_err, dev->net, "drop\n");
+error:
+ dev->net->stats.rx_errors++;
+ skb_queue_tail(&dev->done, skb);
}
/*-------------------------------------------------------------------------*/
@@ -926,8 +931,10 @@ fail_halt:
if (urb != NULL) {
clear_bit (EVENT_RX_MEMORY, &dev->flags);
status = usb_autopm_get_interface(dev->intf);
- if (status < 0)
+ if (status < 0) {
+ usb_free_urb(urb);
goto fail_lowmem;
+ }
if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
resched = 0;
usb_autopm_put_interface(dev->intf);
@@ -971,7 +978,8 @@ static void tx_complete (struct urb *urb)
struct usbnet *dev = entry->dev;
if (urb->status == 0) {
- dev->net->stats.tx_packets++;
+ if (!(dev->driver_info->flags & FLAG_MULTI_PACKET))
+ dev->net->stats.tx_packets++;
dev->net->stats.tx_bytes += entry->length;
} else {
dev->net->stats.tx_errors++;
@@ -1044,8 +1052,13 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
if (info->tx_fixup) {
skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
if (!skb) {
- netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
- goto drop;
+ if (netif_msg_tx_err(dev)) {
+ netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
+ goto drop;
+ } else {
+ /* cdc_ncm collected packet; waits for more */
+ goto not_drop;
+ }
}
}
length = skb->len;
@@ -1067,13 +1080,18 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
/* don't assume the hardware handles USB_ZERO_PACKET
* NOTE: strictly conforming cdc-ether devices should expect
* the ZLP here, but ignore the one-byte packet.
+ * NOTE2: CDC NCM specification is different from CDC ECM when
+ * handling ZLP/short packets, so cdc_ncm driver will make short
+ * packet itself if needed.
*/
if (length % dev->maxpacket == 0) {
if (!(info->flags & FLAG_SEND_ZLP)) {
- urb->transfer_buffer_length++;
- if (skb_tailroom(skb)) {
- skb->data[skb->len] = 0;
- __skb_put(skb, 1);
+ if (!(info->flags & FLAG_MULTI_PACKET)) {
+ urb->transfer_buffer_length++;
+ if (skb_tailroom(skb)) {
+ skb->data[skb->len] = 0;
+ __skb_put(skb, 1);
+ }
}
} else
urb->transfer_flags |= URB_ZERO_PACKET;
@@ -1122,6 +1140,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
drop:
dev->net->stats.tx_dropped++;
+not_drop:
if (skb)
dev_kfree_skb_any (skb);
usb_free_urb (urb);
@@ -1231,8 +1250,7 @@ void usbnet_disconnect (struct usb_interface *intf)
net = dev->net;
unregister_netdev (net);
- /* we don't hold rtnl here ... */
- flush_scheduled_work ();
+ cancel_work_sync(&dev->kevent);
if (dev->driver_info->unbind)
dev->driver_info->unbind (dev, intf);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 0bbc0c323135..105d7f0630cc 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -166,7 +166,9 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
if (!(rcv->flags & IFF_UP))
goto tx_drop;
- if (dev->features & NETIF_F_NO_CSUM)
+ /* don't change ip_summed == CHECKSUM_PARTIAL, as that
+ will cause bad checksum on forwarded packets */
+ if (skb->ip_summed == CHECKSUM_NONE)
skb->ip_summed = rcv_priv->ip_summed;
length = skb->len + ETH_HLEN;
@@ -401,17 +403,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
if (tb[IFLA_ADDRESS] == NULL)
random_ether_addr(dev->dev_addr);
- if (tb[IFLA_IFNAME])
- nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
- else
- snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
-
- if (strchr(dev->name, '%')) {
- err = dev_alloc_name(dev, dev->name);
- if (err < 0)
- goto err_alloc_name;
- }
-
err = register_netdevice(dev);
if (err < 0)
goto err_register_dev;
@@ -431,7 +422,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
err_register_dev:
/* nothing to do */
-err_alloc_name:
err_configure_peer:
unregister_netdevice(peer);
return err;
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 4930f9dbc493..5e7f069eab53 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -30,8 +30,8 @@
*/
#define DRV_NAME "via-rhine"
-#define DRV_VERSION "1.4.3"
-#define DRV_RELDATE "2007-03-06"
+#define DRV_VERSION "1.5.0"
+#define DRV_RELDATE "2010-10-09"
/* A few user-configurable values.
@@ -100,6 +100,7 @@ static const int multicast_filter_limit = 32;
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/crc32.h>
+#include <linux/if_vlan.h>
#include <linux/bitops.h>
#include <linux/workqueue.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
@@ -133,6 +134,9 @@ MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
+#define MCAM_SIZE 32
+#define VCAM_SIZE 32
+
/*
Theory of Operation
@@ -279,15 +283,16 @@ MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
/* Offsets to the device registers. */
enum register_offsets {
StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
- ChipCmd1=0x09,
+ ChipCmd1=0x09, TQWake=0x0A,
IntrStatus=0x0C, IntrEnable=0x0E,
MulticastFilter0=0x10, MulticastFilter1=0x14,
RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
- MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
+ MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
StickyHW=0x83, IntrStatus2=0x84,
+ CamMask=0x88, CamCon=0x92, CamAddr=0x93,
WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
WOLcrClr1=0xA6, WOLcgClr=0xA7,
PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
@@ -299,6 +304,40 @@ enum backoff_bits {
BackCaptureEffect=0x04, BackRandom=0x08
};
+/* Bits in the TxConfig (TCR) register */
+enum tcr_bits {
+ TCR_PQEN=0x01,
+ TCR_LB0=0x02, /* loopback[0] */
+ TCR_LB1=0x04, /* loopback[1] */
+ TCR_OFSET=0x08,
+ TCR_RTGOPT=0x10,
+ TCR_RTFT0=0x20,
+ TCR_RTFT1=0x40,
+ TCR_RTSF=0x80,
+};
+
+/* Bits in the CamCon (CAMC) register */
+enum camcon_bits {
+ CAMC_CAMEN=0x01,
+ CAMC_VCAMSL=0x02,
+ CAMC_CAMWR=0x04,
+ CAMC_CAMRD=0x08,
+};
+
+/* Bits in the PCIBusConfig1 (BCR1) register */
+enum bcr1_bits {
+ BCR1_POT0=0x01,
+ BCR1_POT1=0x02,
+ BCR1_POT2=0x04,
+ BCR1_CTFT0=0x08,
+ BCR1_CTFT1=0x10,
+ BCR1_CTSF=0x20,
+ BCR1_TXQNOBK=0x40, /* for VT6105 */
+ BCR1_VIDFR=0x80, /* for VT6105 */
+ BCR1_MED0=0x40, /* for VT6102 */
+ BCR1_MED1=0x80, /* for VT6102 */
+};
+
#ifdef USE_MMIO
/* Registers we check that mmio and reg are the same. */
static const int mmio_verify_registers[] = {
@@ -356,6 +395,11 @@ enum desc_status_bits {
DescOwn=0x80000000
};
+/* Bits in *_desc.*_length */
+enum desc_length_bits {
+ DescTag=0x00010000
+};
+
/* Bits in ChipCmd. */
enum chip_cmd_bits {
CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
@@ -365,6 +409,9 @@ enum chip_cmd_bits {
};
struct rhine_private {
+ /* Bit mask for configured VLAN ids */
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
/* Descriptor rings */
struct rx_desc *rx_ring;
struct tx_desc *tx_ring;
@@ -405,6 +452,23 @@ struct rhine_private {
void __iomem *base;
};
+#define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
+#define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
+#define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
+
+#define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
+#define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
+#define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
+
+#define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
+#define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
+#define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
+
+#define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
+#define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
+#define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
+
+
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int rhine_open(struct net_device *dev);
@@ -422,6 +486,14 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static const struct ethtool_ops netdev_ethtool_ops;
static int rhine_close(struct net_device *dev);
static void rhine_shutdown (struct pci_dev *pdev);
+static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
+static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
+static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
+static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
+static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
+static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask);
+static void rhine_init_cam_filter(struct net_device *dev);
+static void rhine_update_vcam(struct net_device *dev);
#define RHINE_WAIT_FOR(condition) do { \
int i=1024; \
@@ -629,6 +701,8 @@ static const struct net_device_ops rhine_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_do_ioctl = netdev_ioctl,
.ndo_tx_timeout = rhine_tx_timeout,
+ .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = rhine_poll,
#endif
@@ -795,6 +869,10 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
if (rp->quirks & rqRhineI)
dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
+ if (pdev->revision >= VT6105M)
+ dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
/* dev->name not defined before register_netdev()! */
rc = register_netdev(dev);
if (rc)
@@ -1040,6 +1118,167 @@ static void rhine_set_carrier(struct mii_if_info *mii)
netif_carrier_ok(mii->dev));
}
+/**
+ * rhine_set_cam - set CAM multicast filters
+ * @ioaddr: register block of this Rhine
+ * @idx: multicast CAM index [0..MCAM_SIZE-1]
+ * @addr: multicast address (6 bytes)
+ *
+ * Load addresses into multicast filters.
+ */
+static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
+{
+ int i;
+
+ iowrite8(CAMC_CAMEN, ioaddr + CamCon);
+ wmb();
+
+ /* Paranoid -- idx out of range should never happen */
+ idx &= (MCAM_SIZE - 1);
+
+ iowrite8((u8) idx, ioaddr + CamAddr);
+
+ for (i = 0; i < 6; i++, addr++)
+ iowrite8(*addr, ioaddr + MulticastFilter0 + i);
+ udelay(10);
+ wmb();
+
+ iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
+ udelay(10);
+
+ iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_set_vlan_cam - set CAM VLAN filters
+ * @ioaddr: register block of this Rhine
+ * @idx: VLAN CAM index [0..VCAM_SIZE-1]
+ * @addr: VLAN ID (2 bytes)
+ *
+ * Load addresses into VLAN filters.
+ */
+static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
+{
+ iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
+ wmb();
+
+ /* Paranoid -- idx out of range should never happen */
+ idx &= (VCAM_SIZE - 1);
+
+ iowrite8((u8) idx, ioaddr + CamAddr);
+
+ iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
+ udelay(10);
+ wmb();
+
+ iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
+ udelay(10);
+
+ iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_set_cam_mask - set multicast CAM mask
+ * @ioaddr: register block of this Rhine
+ * @mask: multicast CAM mask
+ *
+ * Mask sets multicast filters active/inactive.
+ */
+static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
+{
+ iowrite8(CAMC_CAMEN, ioaddr + CamCon);
+ wmb();
+
+ /* write mask */
+ iowrite32(mask, ioaddr + CamMask);
+
+ /* disable CAMEN */
+ iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_set_vlan_cam_mask - set VLAN CAM mask
+ * @ioaddr: register block of this Rhine
+ * @mask: VLAN CAM mask
+ *
+ * Mask sets VLAN filters active/inactive.
+ */
+static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
+{
+ iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
+ wmb();
+
+ /* write mask */
+ iowrite32(mask, ioaddr + CamMask);
+
+ /* disable CAMEN */
+ iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_init_cam_filter - initialize CAM filters
+ * @dev: network device
+ *
+ * Initialize (disable) hardware VLAN and multicast support on this
+ * Rhine.
+ */
+static void rhine_init_cam_filter(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+
+ /* Disable all CAMs */
+ rhine_set_vlan_cam_mask(ioaddr, 0);
+ rhine_set_cam_mask(ioaddr, 0);
+
+ /* disable hardware VLAN support */
+ BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
+ BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
+}
+
+/**
+ * rhine_update_vcam - update VLAN CAM filters
+ * @rp: rhine_private data of this Rhine
+ *
+ * Update VLAN CAM filters to match configuration change.
+ */
+static void rhine_update_vcam(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ u16 vid;
+ u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */
+ unsigned int i = 0;
+
+ for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
+ rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
+ vCAMmask |= 1 << i;
+ if (++i >= VCAM_SIZE)
+ break;
+ }
+ rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
+}
+
+static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+
+ spin_lock_irq(&rp->lock);
+ set_bit(vid, rp->active_vlans);
+ rhine_update_vcam(dev);
+ spin_unlock_irq(&rp->lock);
+}
+
+static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+
+ spin_lock_irq(&rp->lock);
+ clear_bit(vid, rp->active_vlans);
+ rhine_update_vcam(dev);
+ spin_unlock_irq(&rp->lock);
+}
+
static void init_registers(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
@@ -1061,6 +1300,9 @@ static void init_registers(struct net_device *dev)
rhine_set_rx_mode(dev);
+ if (rp->pdev->revision >= VT6105M)
+ rhine_init_cam_filter(dev);
+
napi_enable(&rp->napi);
/* Enable interrupts by setting the interrupt mask. */
@@ -1276,16 +1518,28 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
rp->tx_ring[entry].desc_length =
cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
+ if (unlikely(vlan_tx_tag_present(skb))) {
+ rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
+ /* request tagging */
+ rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
+ }
+ else
+ rp->tx_ring[entry].tx_status = 0;
+
/* lock eth irq */
spin_lock_irqsave(&rp->lock, flags);
wmb();
- rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
+ rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
wmb();
rp->cur_tx++;
/* Non-x86 Todo: explicitly flush cache lines here. */
+ if (vlan_tx_tag_present(skb))
+ /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
+ BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
+
/* Wake the potentially-idle transmit channel */
iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
ioaddr + ChipCmd1);
@@ -1437,6 +1691,21 @@ static void rhine_tx(struct net_device *dev)
spin_unlock(&rp->lock);
}
+/**
+ * rhine_get_vlan_tci - extract TCI from Rx data buffer
+ * @skb: pointer to sk_buff
+ * @data_size: used data area of the buffer including CRC
+ *
+ * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
+ * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
+ * aligned following the CRC.
+ */
+static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
+{
+ u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
+ return ntohs(*(u16 *)trailer);
+}
+
/* Process up to limit frames from receive ring */
static int rhine_rx(struct net_device *dev, int limit)
{
@@ -1454,6 +1723,7 @@ static int rhine_rx(struct net_device *dev, int limit)
for (count = 0; count < limit; ++count) {
struct rx_desc *desc = rp->rx_head_desc;
u32 desc_status = le32_to_cpu(desc->rx_status);
+ u32 desc_length = le32_to_cpu(desc->desc_length);
int data_size = desc_status >> 16;
if (desc_status & DescOwn)
@@ -1498,6 +1768,7 @@ static int rhine_rx(struct net_device *dev, int limit)
struct sk_buff *skb = NULL;
/* Length should omit the CRC */
int pkt_len = data_size - 4;
+ u16 vlan_tci = 0;
/* Check if the packet is long enough to accept without
copying to a minimally-sized skbuff. */
@@ -1532,7 +1803,14 @@ static int rhine_rx(struct net_device *dev, int limit)
rp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
}
+
+ if (unlikely(desc_length & DescTag))
+ vlan_tci = rhine_get_vlan_tci(skb, data_size);
+
skb->protocol = eth_type_trans(skb, dev);
+
+ if (unlikely(desc_length & DescTag))
+ __vlan_hwaccel_put_tag(skb, vlan_tci);
netif_receive_skb(skb);
dev->stats.rx_bytes += pkt_len;
dev->stats.rx_packets++;
@@ -1596,6 +1874,11 @@ static void rhine_restart_tx(struct net_device *dev) {
iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
ioaddr + ChipCmd);
+
+ if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
+ /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
+ BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
+
iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
ioaddr + ChipCmd1);
IOSYNC;
@@ -1631,7 +1914,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
}
if (intr_status & IntrTxUnderrun) {
if (rp->tx_thresh < 0xE0)
- iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
+ BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
if (debug > 1)
printk(KERN_INFO "%s: Transmitter underrun, Tx "
"threshold now %2.2x.\n",
@@ -1646,7 +1929,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
(intr_status & (IntrTxAborted |
IntrTxUnderrun | IntrTxDescRace)) == 0) {
if (rp->tx_thresh < 0xE0) {
- iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
+ BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
}
if (debug > 1)
printk(KERN_INFO "%s: Unspecified error. Tx "
@@ -1688,7 +1971,8 @@ static void rhine_set_rx_mode(struct net_device *dev)
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
u32 mc_filter[2]; /* Multicast hash filter */
- u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
+ u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */
+ struct netdev_hw_addr *ha;
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
rx_mode = 0x1C;
@@ -1699,10 +1983,18 @@ static void rhine_set_rx_mode(struct net_device *dev)
/* Too many to match, or accept all multicasts. */
iowrite32(0xffffffff, ioaddr + MulticastFilter0);
iowrite32(0xffffffff, ioaddr + MulticastFilter1);
- rx_mode = 0x0C;
+ } else if (rp->pdev->revision >= VT6105M) {
+ int i = 0;
+ u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
+ netdev_for_each_mc_addr(ha, dev) {
+ if (i == MCAM_SIZE)
+ break;
+ rhine_set_cam(ioaddr, i, ha->addr);
+ mCAMmask |= 1 << i;
+ i++;
+ }
+ rhine_set_cam_mask(ioaddr, mCAMmask);
} else {
- struct netdev_hw_addr *ha;
-
memset(mc_filter, 0, sizeof(mc_filter));
netdev_for_each_mc_addr(ha, dev) {
int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
@@ -1711,9 +2003,15 @@ static void rhine_set_rx_mode(struct net_device *dev)
}
iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
- rx_mode = 0x0C;
}
- iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
+ /* enable/disable VLAN receive filtering */
+ if (rp->pdev->revision >= VT6105M) {
+ if (dev->flags & IFF_PROMISC)
+ BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
+ else
+ BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
+ }
+ BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
}
static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
@@ -1966,7 +2264,7 @@ static int rhine_resume(struct pci_dev *pdev)
if (!netif_running(dev))
return 0;
- if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
+ if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
ret = pci_set_power_state(pdev, PCI_D0);
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index cab96ad49e60..0d6fec6b7d93 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -898,7 +898,7 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
set_mii_flow_control(vptr);
/*
- Check if new status is consisent with current status
+ Check if new status is consistent with current status
if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
(mii_status==curr_status)) {
vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
@@ -2923,6 +2923,7 @@ static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
static int velocity_set_wol(struct velocity_info *vptr)
{
struct mac_regs __iomem *regs = vptr->mac_regs;
+ enum speed_opt spd_dpx = vptr->options.spd_dpx;
static u8 buf[256];
int i;
@@ -2968,6 +2969,12 @@ static int velocity_set_wol(struct velocity_info *vptr)
writew(0x0FFF, &regs->WOLSRClr);
+ if (spd_dpx == SPD_DPX_1000_FULL)
+ goto mac_done;
+
+ if (spd_dpx != SPD_DPX_AUTO)
+ goto advertise_done;
+
if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
@@ -2978,6 +2985,7 @@ static int velocity_set_wol(struct velocity_info *vptr)
if (vptr->mii_status & VELOCITY_SPEED_1000)
MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
+advertise_done:
BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
{
@@ -2987,6 +2995,7 @@ static int velocity_set_wol(struct velocity_info *vptr)
writeb(GCR, &regs->CHIPGCR);
}
+mac_done:
BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
/* Turn on SWPTAG just before entering power mode */
BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index aa2e69b9ff61..d7227539484e 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -361,7 +361,7 @@ enum velocity_owner {
#define MAC_REG_CHIPGSR 0x9C
#define MAC_REG_TESTCFG 0x9D
#define MAC_REG_DEBUG 0x9E
-#define MAC_REG_CHIPGCR 0x9F
+#define MAC_REG_CHIPGCR 0x9F /* Chip Operation and Diagnostic Control */
#define MAC_REG_WOLCR0_SET 0xA0
#define MAC_REG_WOLCR1_SET 0xA1
#define MAC_REG_PWCFG_SET 0xA2
@@ -848,10 +848,10 @@ enum velocity_owner {
* Bits in CHIPGCR register
*/
-#define CHIPGCR_FCGMII 0x80 /* enable GMII mode */
-#define CHIPGCR_FCFDX 0x40
+#define CHIPGCR_FCGMII 0x80 /* force GMII (else MII only) */
+#define CHIPGCR_FCFDX 0x40 /* force full duplex */
#define CHIPGCR_FCRESV 0x20
-#define CHIPGCR_FCMODE 0x10
+#define CHIPGCR_FCMODE 0x10 /* enable MAC forced mode */
#define CHIPGCR_LPSOPT 0x08
#define CHIPGCR_TM1US 0x04
#define CHIPGCR_TM0US 0x02
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b6d402806ae6..82dba5aaf423 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -446,6 +446,20 @@ static void skb_recv_done(struct virtqueue *rvq)
}
}
+static void virtnet_napi_enable(struct virtnet_info *vi)
+{
+ napi_enable(&vi->napi);
+
+ /* If all buffers were filled by other side before we napi_enabled, we
+ * won't get another interrupt, so process any outstanding packets
+ * now. virtnet_poll wants re-enable the queue, so we disable here.
+ * We synchronize against interrupts via NAPI_STATE_SCHED */
+ if (napi_schedule_prep(&vi->napi)) {
+ virtqueue_disable_cb(vi->rvq);
+ __napi_schedule(&vi->napi);
+ }
+}
+
static void refill_work(struct work_struct *work)
{
struct virtnet_info *vi;
@@ -454,7 +468,7 @@ static void refill_work(struct work_struct *work)
vi = container_of(work, struct virtnet_info, refill.work);
napi_disable(&vi->napi);
still_empty = !try_fill_recv(vi, GFP_KERNEL);
- napi_enable(&vi->napi);
+ virtnet_napi_enable(vi);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again. */
@@ -519,7 +533,7 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
if (skb->ip_summed == CHECKSUM_PARTIAL) {
hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- hdr->hdr.csum_start = skb->csum_start - skb_headroom(skb);
+ hdr->hdr.csum_start = skb_checksum_start_offset(skb);
hdr->hdr.csum_offset = skb->csum_offset;
} else {
hdr->hdr.flags = 0;
@@ -638,16 +652,7 @@ static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
- napi_enable(&vi->napi);
-
- /* If all buffers were filled by other side before we napi_enabled, we
- * won't get another interrupt, so process any outstanding packets
- * now. virtnet_poll wants re-enable the queue, so we disable here.
- * We synchronize against interrupts via NAPI_STATE_SCHED */
- if (napi_schedule_prep(&vi->napi)) {
- virtqueue_disable_cb(vi->rvq);
- __napi_schedule(&vi->napi);
- }
+ virtnet_napi_enable(vi);
return 0;
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 21314e06e6d7..cc14b4a75048 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -44,6 +44,12 @@ MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
static atomic_t devices_found;
+#define VMXNET3_MAX_DEVICES 10
+static int enable_mq = 1;
+static int irq_share_mode;
+
+static void
+vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
/*
* Enable/Disable the given intr
@@ -99,7 +105,7 @@ vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
static bool
vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
- return netif_queue_stopped(adapter->netdev);
+ return tq->stopped;
}
@@ -107,7 +113,7 @@ static void
vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
tq->stopped = false;
- netif_start_queue(adapter->netdev);
+ netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
}
@@ -115,7 +121,7 @@ static void
vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
tq->stopped = false;
- netif_wake_queue(adapter->netdev);
+ netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
}
@@ -124,7 +130,7 @@ vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
tq->stopped = true;
tq->num_stop++;
- netif_stop_queue(adapter->netdev);
+ netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
}
@@ -135,9 +141,14 @@ static void
vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
{
u32 ret;
+ int i;
+ unsigned long flags;
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
adapter->link_speed = ret >> 16;
if (ret & 1) { /* Link is up. */
printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
@@ -145,22 +156,28 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
if (!netif_carrier_ok(adapter->netdev))
netif_carrier_on(adapter->netdev);
- if (affectTxQueue)
- vmxnet3_tq_start(&adapter->tx_queue, adapter);
+ if (affectTxQueue) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_start(&adapter->tx_queue[i],
+ adapter);
+ }
} else {
printk(KERN_INFO "%s: NIC Link is Down\n",
adapter->netdev->name);
if (netif_carrier_ok(adapter->netdev))
netif_carrier_off(adapter->netdev);
- if (affectTxQueue)
- vmxnet3_tq_stop(&adapter->tx_queue, adapter);
+ if (affectTxQueue) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
+ }
}
}
static void
vmxnet3_process_events(struct vmxnet3_adapter *adapter)
{
+ int i;
u32 events = le32_to_cpu(adapter->shared->ecr);
if (!events)
return;
@@ -173,19 +190,23 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
/* Check if there is an error on xmit/recv queues */
if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
+ spin_lock(&adapter->cmd_lock);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_QUEUE_STATUS);
-
- if (adapter->tqd_start->status.stopped) {
- printk(KERN_ERR "%s: tq error 0x%x\n",
- adapter->netdev->name,
- le32_to_cpu(adapter->tqd_start->status.error));
- }
- if (adapter->rqd_start->status.stopped) {
- printk(KERN_ERR "%s: rq error 0x%x\n",
- adapter->netdev->name,
- adapter->rqd_start->status.error);
- }
+ spin_unlock(&adapter->cmd_lock);
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ if (adapter->tqd_start[i].status.stopped)
+ dev_err(&adapter->netdev->dev,
+ "%s: tq[%d] error 0x%x\n",
+ adapter->netdev->name, i, le32_to_cpu(
+ adapter->tqd_start[i].status.error));
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ if (adapter->rqd_start[i].status.stopped)
+ dev_err(&adapter->netdev->dev,
+ "%s: rq[%d] error 0x%x\n",
+ adapter->netdev->name, i,
+ adapter->rqd_start[i].status.error);
schedule_work(&adapter->work);
}
@@ -410,7 +431,7 @@ vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
}
-void
+static void
vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
@@ -437,6 +458,17 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
}
+/* Destroy all tx queues */
+void
+vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
+}
+
+
static void
vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
@@ -518,6 +550,14 @@ err:
return -ENOMEM;
}
+static void
+vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
+}
/*
* starting from ring->next2fill, allocate rx buffers for the given ring
@@ -732,6 +772,17 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
}
+/* Init all tx queues */
+static void
+vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
+}
+
+
/*
* parse and copy relevant protocol headers:
* For a tso pkt, relevant headers are L2/3/4 including options
@@ -756,36 +807,31 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
{
struct Vmxnet3_TxDataDesc *tdd;
- if (ctx->mss) {
+ if (ctx->mss) { /* TSO */
ctx->eth_ip_hdr_size = skb_transport_offset(skb);
ctx->l4_hdr_size = ((struct tcphdr *)
skb_transport_header(skb))->doff * 4;
ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
} else {
- unsigned int pull_size;
-
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- ctx->eth_ip_hdr_size = skb_transport_offset(skb);
+ ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
if (ctx->ipv4) {
struct iphdr *iph = (struct iphdr *)
skb_network_header(skb);
- if (iph->protocol == IPPROTO_TCP) {
- pull_size = ctx->eth_ip_hdr_size +
- sizeof(struct tcphdr);
-
- if (unlikely(!pskb_may_pull(skb,
- pull_size))) {
- goto err;
- }
+ if (iph->protocol == IPPROTO_TCP)
ctx->l4_hdr_size = ((struct tcphdr *)
skb_transport_header(skb))->doff * 4;
- } else if (iph->protocol == IPPROTO_UDP) {
+ else if (iph->protocol == IPPROTO_UDP)
+ /*
+ * Use tcp header size so that bytes to
+ * be copied are more than required by
+ * the device.
+ */
ctx->l4_hdr_size =
- sizeof(struct udphdr);
- } else {
+ sizeof(struct tcphdr);
+ else
ctx->l4_hdr_size = 0;
- }
} else {
/* for simplicity, don't copy L4 headers */
ctx->l4_hdr_size = 0;
@@ -903,6 +949,21 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
}
}
+ spin_lock_irqsave(&tq->tx_lock, flags);
+
+ if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
+ tq->stats.tx_ring_full++;
+ dev_dbg(&adapter->netdev->dev,
+ "tx queue stopped on %s, next2comp %u"
+ " next2fill %u\n", adapter->netdev->name,
+ tq->tx_ring.next2comp, tq->tx_ring.next2fill);
+
+ vmxnet3_tq_stop(tq, adapter);
+ spin_unlock_irqrestore(&tq->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+
ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
if (ret >= 0) {
BUG_ON(ret <= 0 && ctx.copy_size != 0);
@@ -923,21 +984,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
}
} else {
tq->stats.drop_hdr_inspect_err++;
- goto drop_pkt;
- }
-
- spin_lock_irqsave(&tq->tx_lock, flags);
-
- if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
- tq->stats.tx_ring_full++;
- dev_dbg(&adapter->netdev->dev,
- "tx queue stopped on %s, next2comp %u"
- " next2fill %u\n", adapter->netdev->name,
- tq->tx_ring.next2comp, tq->tx_ring.next2fill);
-
- vmxnet3_tq_stop(tq, adapter);
- spin_unlock_irqrestore(&tq->tx_lock, flags);
- return NETDEV_TX_BUSY;
+ goto unlock_drop_pkt;
}
/* fill tx descs related to addr & len */
@@ -1000,7 +1047,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
if (le32_to_cpu(tq->shared->txNumDeferred) >=
le32_to_cpu(tq->shared->txThreshold)) {
tq->shared->txNumDeferred = 0;
- VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
+ VMXNET3_WRITE_BAR0_REG(adapter,
+ VMXNET3_REG_TXPROD + tq->qid * 8,
tq->tx_ring.next2fill);
}
@@ -1008,6 +1056,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
hdr_too_big:
tq->stats.drop_oversized_hdr++;
+unlock_drop_pkt:
+ spin_unlock_irqrestore(&tq->tx_lock, flags);
drop_pkt:
tq->stats.drop_total++;
dev_kfree_skb(skb);
@@ -1020,7 +1070,10 @@ vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- return vmxnet3_tq_xmit(skb, &adapter->tx_queue, adapter, netdev);
+ BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
+ return vmxnet3_tq_xmit(skb,
+ &adapter->tx_queue[skb->queue_mapping],
+ adapter, netdev);
}
@@ -1082,7 +1135,9 @@ static int
vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter, int quota)
{
- static u32 rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
+ static const u32 rxprod_reg[2] = {
+ VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
+ };
u32 num_rxd = 0;
struct Vmxnet3_RxCompDesc *rcd;
struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
@@ -1106,9 +1161,9 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
break;
}
num_rxd++;
-
+ BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
idx = rcd->rxdIdx;
- ring_idx = rcd->rqID == rq->qid ? 0 : 1;
+ ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
&rxCmdDesc);
rbi = rq->buf_info[ring_idx] + idx;
@@ -1260,6 +1315,16 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
}
+static void
+vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
+}
+
+
void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter)
{
@@ -1351,6 +1416,25 @@ vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
static int
+vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
+ if (unlikely(err)) {
+ dev_err(&adapter->netdev->dev, "%s: failed to "
+ "initialize rx queue%i\n",
+ adapter->netdev->name, i);
+ break;
+ }
+ }
+ return err;
+
+}
+
+
+static int
vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
{
int i;
@@ -1398,32 +1482,176 @@ err:
static int
+vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
+ if (unlikely(err)) {
+ dev_err(&adapter->netdev->dev,
+ "%s: failed to create rx queue%i\n",
+ adapter->netdev->name, i);
+ goto err_out;
+ }
+ }
+ return err;
+err_out:
+ vmxnet3_rq_destroy_all(adapter);
+ return err;
+
+}
+
+/* Multiple queue aware polling function for tx and rx */
+
+static int
vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
{
+ int rcd_done = 0, i;
if (unlikely(adapter->shared->ecr))
vmxnet3_process_events(adapter);
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
- vmxnet3_tq_tx_complete(&adapter->tx_queue, adapter);
- return vmxnet3_rq_rx_complete(&adapter->rx_queue, adapter, budget);
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
+ adapter, budget);
+ return rcd_done;
}
static int
vmxnet3_poll(struct napi_struct *napi, int budget)
{
- struct vmxnet3_adapter *adapter = container_of(napi,
- struct vmxnet3_adapter, napi);
+ struct vmxnet3_rx_queue *rx_queue = container_of(napi,
+ struct vmxnet3_rx_queue, napi);
int rxd_done;
- rxd_done = vmxnet3_do_poll(adapter, budget);
+ rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
if (rxd_done < budget) {
napi_complete(napi);
- vmxnet3_enable_intr(adapter, 0);
+ vmxnet3_enable_all_intrs(rx_queue->adapter);
}
return rxd_done;
}
+/*
+ * NAPI polling function for MSI-X mode with multiple Rx queues
+ * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
+ */
+
+static int
+vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
+{
+ struct vmxnet3_rx_queue *rq = container_of(napi,
+ struct vmxnet3_rx_queue, napi);
+ struct vmxnet3_adapter *adapter = rq->adapter;
+ int rxd_done;
+
+ /* When sharing interrupt with corresponding tx queue, process
+ * tx completions in that queue as well
+ */
+ if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
+ struct vmxnet3_tx_queue *tq =
+ &adapter->tx_queue[rq - adapter->rx_queue];
+ vmxnet3_tq_tx_complete(tq, adapter);
+ }
+
+ rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
+
+ if (rxd_done < budget) {
+ napi_complete(napi);
+ vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
+ }
+ return rxd_done;
+}
+
+
+#ifdef CONFIG_PCI_MSI
+
+/*
+ * Handle completion interrupts on tx queues
+ * Returns whether or not the intr is handled
+ */
+
+static irqreturn_t
+vmxnet3_msix_tx(int irq, void *data)
+{
+ struct vmxnet3_tx_queue *tq = data;
+ struct vmxnet3_adapter *adapter = tq->adapter;
+
+ if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+ vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
+
+ /* Handle the case where only one irq is allocate for all tx queues */
+ if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
+ int i;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
+ vmxnet3_tq_tx_complete(txq, adapter);
+ }
+ } else {
+ vmxnet3_tq_tx_complete(tq, adapter);
+ }
+ vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
+
+ return IRQ_HANDLED;
+}
+
+
+/*
+ * Handle completion interrupts on rx queues. Returns whether or not the
+ * intr is handled
+ */
+
+static irqreturn_t
+vmxnet3_msix_rx(int irq, void *data)
+{
+ struct vmxnet3_rx_queue *rq = data;
+ struct vmxnet3_adapter *adapter = rq->adapter;
+
+ /* disable intr if needed */
+ if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+ vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
+ napi_schedule(&rq->napi);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ *
+ * vmxnet3_msix_event --
+ *
+ * vmxnet3 msix event intr handler
+ *
+ * Result:
+ * whether or not the intr is handled
+ *
+ *----------------------------------------------------------------------------
+ */
+
+static irqreturn_t
+vmxnet3_msix_event(int irq, void *data)
+{
+ struct net_device *dev = data;
+ struct vmxnet3_adapter *adapter = netdev_priv(dev);
+
+ /* disable intr if needed */
+ if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+ vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
+
+ if (adapter->shared->ecr)
+ vmxnet3_process_events(adapter);
+
+ vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
+
+ return IRQ_HANDLED;
+}
+
+#endif /* CONFIG_PCI_MSI */
+
/* Interrupt handler for vmxnet3 */
static irqreturn_t
@@ -1432,7 +1660,7 @@ vmxnet3_intr(int irq, void *dev_id)
struct net_device *dev = dev_id;
struct vmxnet3_adapter *adapter = netdev_priv(dev);
- if (unlikely(adapter->intr.type == VMXNET3_IT_INTX)) {
+ if (adapter->intr.type == VMXNET3_IT_INTX) {
u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
if (unlikely(icr == 0))
/* not ours */
@@ -1442,77 +1670,144 @@ vmxnet3_intr(int irq, void *dev_id)
/* disable intr if needed */
if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
- vmxnet3_disable_intr(adapter, 0);
+ vmxnet3_disable_all_intrs(adapter);
- napi_schedule(&adapter->napi);
+ napi_schedule(&adapter->rx_queue[0].napi);
return IRQ_HANDLED;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-
/* netpoll callback. */
static void
vmxnet3_netpoll(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- int irq;
-#ifdef CONFIG_PCI_MSI
- if (adapter->intr.type == VMXNET3_IT_MSIX)
- irq = adapter->intr.msix_entries[0].vector;
- else
-#endif
- irq = adapter->pdev->irq;
+ if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+ vmxnet3_disable_all_intrs(adapter);
+
+ vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
+ vmxnet3_enable_all_intrs(adapter);
- disable_irq(irq);
- vmxnet3_intr(irq, netdev);
- enable_irq(irq);
}
-#endif
+#endif /* CONFIG_NET_POLL_CONTROLLER */
static int
vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
{
- int err;
+ struct vmxnet3_intr *intr = &adapter->intr;
+ int err = 0, i;
+ int vector = 0;
#ifdef CONFIG_PCI_MSI
if (adapter->intr.type == VMXNET3_IT_MSIX) {
- /* we only use 1 MSI-X vector */
- err = request_irq(adapter->intr.msix_entries[0].vector,
- vmxnet3_intr, 0, adapter->netdev->name,
- adapter->netdev);
- } else if (adapter->intr.type == VMXNET3_IT_MSI) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
+ sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
+ adapter->netdev->name, vector);
+ err = request_irq(
+ intr->msix_entries[vector].vector,
+ vmxnet3_msix_tx, 0,
+ adapter->tx_queue[i].name,
+ &adapter->tx_queue[i]);
+ } else {
+ sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
+ adapter->netdev->name, vector);
+ }
+ if (err) {
+ dev_err(&adapter->netdev->dev,
+ "Failed to request irq for MSIX, %s, "
+ "error %d\n",
+ adapter->tx_queue[i].name, err);
+ return err;
+ }
+
+ /* Handle the case where only 1 MSIx was allocated for
+ * all tx queues */
+ if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
+ for (; i < adapter->num_tx_queues; i++)
+ adapter->tx_queue[i].comp_ring.intr_idx
+ = vector;
+ vector++;
+ break;
+ } else {
+ adapter->tx_queue[i].comp_ring.intr_idx
+ = vector++;
+ }
+ }
+ if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
+ vector = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
+ sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
+ adapter->netdev->name, vector);
+ else
+ sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
+ adapter->netdev->name, vector);
+ err = request_irq(intr->msix_entries[vector].vector,
+ vmxnet3_msix_rx, 0,
+ adapter->rx_queue[i].name,
+ &(adapter->rx_queue[i]));
+ if (err) {
+ printk(KERN_ERR "Failed to request irq for MSIX"
+ ", %s, error %d\n",
+ adapter->rx_queue[i].name, err);
+ return err;
+ }
+
+ adapter->rx_queue[i].comp_ring.intr_idx = vector++;
+ }
+
+ sprintf(intr->event_msi_vector_name, "%s-event-%d",
+ adapter->netdev->name, vector);
+ err = request_irq(intr->msix_entries[vector].vector,
+ vmxnet3_msix_event, 0,
+ intr->event_msi_vector_name, adapter->netdev);
+ intr->event_intr_idx = vector;
+
+ } else if (intr->type == VMXNET3_IT_MSI) {
+ adapter->num_rx_queues = 1;
err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
adapter->netdev->name, adapter->netdev);
- } else
+ } else {
#endif
- {
+ adapter->num_rx_queues = 1;
err = request_irq(adapter->pdev->irq, vmxnet3_intr,
IRQF_SHARED, adapter->netdev->name,
adapter->netdev);
+#ifdef CONFIG_PCI_MSI
}
-
- if (err)
+#endif
+ intr->num_intrs = vector + 1;
+ if (err) {
printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
- ":%d\n", adapter->netdev->name, adapter->intr.type, err);
+ ":%d\n", adapter->netdev->name, intr->type, err);
+ } else {
+ /* Number of rx queues will not change after this */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
+ rq->qid = i;
+ rq->qid2 = i + adapter->num_rx_queues;
+ }
- if (!err) {
- int i;
- /* init our intr settings */
- for (i = 0; i < adapter->intr.num_intrs; i++)
- adapter->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
- /* next setup intr index for all intr sources */
- adapter->tx_queue.comp_ring.intr_idx = 0;
- adapter->rx_queue.comp_ring.intr_idx = 0;
- adapter->intr.event_intr_idx = 0;
+ /* init our intr settings */
+ for (i = 0; i < intr->num_intrs; i++)
+ intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
+ if (adapter->intr.type != VMXNET3_IT_MSIX) {
+ adapter->intr.event_intr_idx = 0;
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_queue[i].comp_ring.intr_idx = 0;
+ adapter->rx_queue[0].comp_ring.intr_idx = 0;
+ }
printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
- "allocated\n", adapter->netdev->name, adapter->intr.type,
- adapter->intr.mask_mode, adapter->intr.num_intrs);
+ "allocated\n", adapter->netdev->name, intr->type,
+ intr->mask_mode, intr->num_intrs);
}
return err;
@@ -1522,18 +1817,32 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
static void
vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
{
- BUG_ON(adapter->intr.type == VMXNET3_IT_AUTO ||
- adapter->intr.num_intrs <= 0);
+ struct vmxnet3_intr *intr = &adapter->intr;
+ BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
- switch (adapter->intr.type) {
+ switch (intr->type) {
#ifdef CONFIG_PCI_MSI
case VMXNET3_IT_MSIX:
{
- int i;
+ int i, vector = 0;
+
+ if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ free_irq(intr->msix_entries[vector++].vector,
+ &(adapter->tx_queue[i]));
+ if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
+ break;
+ }
+ }
- for (i = 0; i < adapter->intr.num_intrs; i++)
- free_irq(adapter->intr.msix_entries[i].vector,
- adapter->netdev);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ free_irq(intr->msix_entries[vector++].vector,
+ &(adapter->rx_queue[i]));
+ }
+
+ free_irq(intr->msix_entries[vector].vector,
+ adapter->netdev);
+ BUG_ON(vector >= intr->num_intrs);
break;
}
#endif
@@ -1554,18 +1863,14 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct Vmxnet3_DriverShared *shared = adapter->shared;
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+ unsigned long flags;
if (grp) {
/* add vlan rx stripping. */
if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
int i;
- struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
adapter->vlan_grp = grp;
- /* update FEATURES to device */
- devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
- VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
- VMXNET3_CMD_UPDATE_FEATURE);
/*
* Clear entire vfTable; then enable untagged pkts.
* Note: setting one entry in vfTable to non-zero turns
@@ -1575,8 +1880,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
vfTable[i] = 0;
VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
} else {
printk(KERN_ERR "%s: vlan_rx_register when device has "
"no NETIF_F_HW_VLAN_RX\n", netdev->name);
@@ -1595,13 +1902,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
*/
vfTable[i] = 0;
}
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
-
- /* update FEATURES to device */
- devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
- VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
- VMXNET3_CMD_UPDATE_FEATURE);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
}
}
@@ -1634,10 +1938,13 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+ unsigned long flags;
VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
@@ -1646,10 +1953,13 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+ unsigned long flags;
VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
@@ -1680,6 +1990,7 @@ static void
vmxnet3_set_mc(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
struct Vmxnet3_RxFilterConf *rxConf =
&adapter->shared->devRead.rxFilterConf;
u8 *new_table = NULL;
@@ -1715,6 +2026,7 @@ vmxnet3_set_mc(struct net_device *netdev)
rxConf->mfTablePA = 0;
}
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
if (new_mode != rxConf->rxMode) {
rxConf->rxMode = cpu_to_le32(new_mode);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -1723,10 +2035,20 @@ vmxnet3_set_mc(struct net_device *netdev)
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_MAC_FILTERS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
kfree(new_table);
}
+void
+vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
+}
+
/*
* Set up driver_shared based on settings in adapter.
@@ -1766,48 +2088,78 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
devRead->misc.uptFeatures |= UPT1_F_LRO;
devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
}
- if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) &&
- adapter->vlan_grp) {
+ if (adapter->netdev->features & NETIF_F_HW_VLAN_RX)
devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
- }
devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
devRead->misc.queueDescLen = cpu_to_le32(
- sizeof(struct Vmxnet3_TxQueueDesc) +
- sizeof(struct Vmxnet3_RxQueueDesc));
+ adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
+ adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
/* tx queue settings */
- BUG_ON(adapter->tx_queue.tx_ring.base == NULL);
-
- devRead->misc.numTxQueues = 1;
- tqc = &adapter->tqd_start->conf;
- tqc->txRingBasePA = cpu_to_le64(adapter->tx_queue.tx_ring.basePA);
- tqc->dataRingBasePA = cpu_to_le64(adapter->tx_queue.data_ring.basePA);
- tqc->compRingBasePA = cpu_to_le64(adapter->tx_queue.comp_ring.basePA);
- tqc->ddPA = cpu_to_le64(virt_to_phys(
- adapter->tx_queue.buf_info));
- tqc->txRingSize = cpu_to_le32(adapter->tx_queue.tx_ring.size);
- tqc->dataRingSize = cpu_to_le32(adapter->tx_queue.data_ring.size);
- tqc->compRingSize = cpu_to_le32(adapter->tx_queue.comp_ring.size);
- tqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_tx_buf_info) *
- tqc->txRingSize);
- tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx;
+ devRead->misc.numTxQueues = adapter->num_tx_queues;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
+ BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
+ tqc = &adapter->tqd_start[i].conf;
+ tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
+ tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
+ tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
+ tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info));
+ tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
+ tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
+ tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
+ tqc->ddLen = cpu_to_le32(
+ sizeof(struct vmxnet3_tx_buf_info) *
+ tqc->txRingSize);
+ tqc->intrIdx = tq->comp_ring.intr_idx;
+ }
/* rx queue settings */
- devRead->misc.numRxQueues = 1;
- rqc = &adapter->rqd_start->conf;
- rqc->rxRingBasePA[0] = cpu_to_le64(adapter->rx_queue.rx_ring[0].basePA);
- rqc->rxRingBasePA[1] = cpu_to_le64(adapter->rx_queue.rx_ring[1].basePA);
- rqc->compRingBasePA = cpu_to_le64(adapter->rx_queue.comp_ring.basePA);
- rqc->ddPA = cpu_to_le64(virt_to_phys(
- adapter->rx_queue.buf_info));
- rqc->rxRingSize[0] = cpu_to_le32(adapter->rx_queue.rx_ring[0].size);
- rqc->rxRingSize[1] = cpu_to_le32(adapter->rx_queue.rx_ring[1].size);
- rqc->compRingSize = cpu_to_le32(adapter->rx_queue.comp_ring.size);
- rqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_rx_buf_info) *
- (rqc->rxRingSize[0] + rqc->rxRingSize[1]));
- rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx;
+ devRead->misc.numRxQueues = adapter->num_rx_queues;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
+ rqc = &adapter->rqd_start[i].conf;
+ rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
+ rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
+ rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
+ rqc->ddPA = cpu_to_le64(virt_to_phys(
+ rq->buf_info));
+ rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
+ rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
+ rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
+ rqc->ddLen = cpu_to_le32(
+ sizeof(struct vmxnet3_rx_buf_info) *
+ (rqc->rxRingSize[0] +
+ rqc->rxRingSize[1]));
+ rqc->intrIdx = rq->comp_ring.intr_idx;
+ }
+
+#ifdef VMXNET3_RSS
+ memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
+
+ if (adapter->rss) {
+ struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+ devRead->misc.uptFeatures |= UPT1_F_RSS;
+ devRead->misc.numRxQueues = adapter->num_rx_queues;
+ rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
+ UPT1_RSS_HASH_TYPE_IPV4 |
+ UPT1_RSS_HASH_TYPE_TCP_IPV6 |
+ UPT1_RSS_HASH_TYPE_IPV6;
+ rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
+ rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
+ rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
+ get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
+ for (i = 0; i < rssConf->indTableSize; i++)
+ rssConf->indTable[i] = i % adapter->num_rx_queues;
+
+ devRead->rssConfDesc.confVer = 1;
+ devRead->rssConfDesc.confLen = sizeof(*rssConf);
+ devRead->rssConfDesc.confPA = virt_to_phys(rssConf);
+ }
+
+#endif /* VMXNET3_RSS */
/* intr settings */
devRead->intrConf.autoMask = adapter->intr.mask_mode ==
@@ -1822,6 +2174,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
/* rx filter settings */
devRead->rxFilterConf.rxMode = 0;
vmxnet3_restore_vlan(adapter);
+ vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
+
/* the rest are already zeroed */
}
@@ -1829,18 +2183,19 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
int
vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
{
- int err;
+ int err, i;
u32 ret;
+ unsigned long flags;
- dev_dbg(&adapter->netdev->dev,
- "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
- " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
- adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
- adapter->rx_queue.rx_ring[0].size,
- adapter->rx_queue.rx_ring[1].size);
-
- vmxnet3_tq_init(&adapter->tx_queue, adapter);
- err = vmxnet3_rq_init(&adapter->rx_queue, adapter);
+ dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
+ " ring sizes %u %u %u\n", adapter->netdev->name,
+ adapter->skb_buf_size, adapter->rx_buf_per_pkt,
+ adapter->tx_queue[0].tx_ring.size,
+ adapter->rx_queue[0].rx_ring[0].size,
+ adapter->rx_queue[0].rx_ring[1].size);
+
+ vmxnet3_tq_init_all(adapter);
+ err = vmxnet3_rq_init_all(adapter);
if (err) {
printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
adapter->netdev->name, err);
@@ -1860,9 +2215,11 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
adapter->shared_pa));
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
adapter->shared_pa));
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_ACTIVATE_DEV);
ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
if (ret != 0) {
printk(KERN_ERR "Failed to activate dev %s: error %u\n",
@@ -1870,10 +2227,15 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
err = -EINVAL;
goto activate_err;
}
- VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD,
- adapter->rx_queue.rx_ring[0].next2fill);
- VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD2,
- adapter->rx_queue.rx_ring[1].next2fill);
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ VMXNET3_WRITE_BAR0_REG(adapter,
+ VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
+ adapter->rx_queue[i].rx_ring[0].next2fill);
+ VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
+ (i * VMXNET3_REG_ALIGN)),
+ adapter->rx_queue[i].rx_ring[1].next2fill);
+ }
/* Apply the rx filter settins last. */
vmxnet3_set_mc(adapter->netdev);
@@ -1883,8 +2245,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
* tx queue if the link is up.
*/
vmxnet3_check_link(adapter, true);
-
- napi_enable(&adapter->napi);
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ napi_enable(&adapter->rx_queue[i].napi);
vmxnet3_enable_all_intrs(adapter);
clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
return 0;
@@ -1896,7 +2258,7 @@ activate_err:
irq_err:
rq_err:
/* free up buffers we allocated */
- vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
+ vmxnet3_rq_cleanup_all(adapter);
return err;
}
@@ -1904,28 +2266,36 @@ rq_err:
void
vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
{
+ unsigned long flags;
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
int
vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
{
+ int i;
+ unsigned long flags;
if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
return 0;
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_QUIESCE_DEV);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
vmxnet3_disable_all_intrs(adapter);
- napi_disable(&adapter->napi);
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ napi_disable(&adapter->rx_queue[i].napi);
netif_tx_disable(adapter->netdev);
adapter->link_speed = 0;
netif_carrier_off(adapter->netdev);
- vmxnet3_tq_cleanup(&adapter->tx_queue, adapter);
- vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
+ vmxnet3_tq_cleanup_all(adapter);
+ vmxnet3_rq_cleanup_all(adapter);
vmxnet3_free_irqs(adapter);
return 0;
}
@@ -2047,7 +2417,9 @@ vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
static void
vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
{
- size_t sz;
+ size_t sz, i, ring0_size, ring1_size, comp_size;
+ struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
+
if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
VMXNET3_MAX_ETH_HDR_SIZE) {
@@ -2069,11 +2441,19 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
* rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
*/
sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
- adapter->rx_queue.rx_ring[0].size = (adapter->rx_queue.rx_ring[0].size +
- sz - 1) / sz * sz;
- adapter->rx_queue.rx_ring[0].size = min_t(u32,
- adapter->rx_queue.rx_ring[0].size,
- VMXNET3_RX_RING_MAX_SIZE / sz * sz);
+ ring0_size = adapter->rx_queue[0].rx_ring[0].size;
+ ring0_size = (ring0_size + sz - 1) / sz * sz;
+ ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
+ sz * sz);
+ ring1_size = adapter->rx_queue[0].rx_ring[1].size;
+ comp_size = ring0_size + ring1_size;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rq = &adapter->rx_queue[i];
+ rq->rx_ring[0].size = ring0_size;
+ rq->rx_ring[1].size = ring1_size;
+ rq->comp_ring.size = comp_size;
+ }
}
@@ -2081,29 +2461,53 @@ int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
u32 rx_ring_size, u32 rx_ring2_size)
{
- int err;
-
- adapter->tx_queue.tx_ring.size = tx_ring_size;
- adapter->tx_queue.data_ring.size = tx_ring_size;
- adapter->tx_queue.comp_ring.size = tx_ring_size;
- adapter->tx_queue.shared = &adapter->tqd_start->ctrl;
- adapter->tx_queue.stopped = true;
- err = vmxnet3_tq_create(&adapter->tx_queue, adapter);
- if (err)
- return err;
+ int err = 0, i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
+ tq->tx_ring.size = tx_ring_size;
+ tq->data_ring.size = tx_ring_size;
+ tq->comp_ring.size = tx_ring_size;
+ tq->shared = &adapter->tqd_start[i].ctrl;
+ tq->stopped = true;
+ tq->adapter = adapter;
+ tq->qid = i;
+ err = vmxnet3_tq_create(tq, adapter);
+ /*
+ * Too late to change num_tx_queues. We cannot do away with
+ * lesser number of queues than what we asked for
+ */
+ if (err)
+ goto queue_err;
+ }
- adapter->rx_queue.rx_ring[0].size = rx_ring_size;
- adapter->rx_queue.rx_ring[1].size = rx_ring2_size;
+ adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
+ adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
vmxnet3_adjust_rx_ring_size(adapter);
- adapter->rx_queue.comp_ring.size = adapter->rx_queue.rx_ring[0].size +
- adapter->rx_queue.rx_ring[1].size;
- adapter->rx_queue.qid = 0;
- adapter->rx_queue.qid2 = 1;
- adapter->rx_queue.shared = &adapter->rqd_start->ctrl;
- err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
- if (err)
- vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
-
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
+ /* qid and qid2 for rx queues will be assigned later when num
+ * of rx queues is finalized after allocating intrs */
+ rq->shared = &adapter->rqd_start[i].ctrl;
+ rq->adapter = adapter;
+ err = vmxnet3_rq_create(rq, adapter);
+ if (err) {
+ if (i == 0) {
+ printk(KERN_ERR "Could not allocate any rx"
+ "queues. Aborting.\n");
+ goto queue_err;
+ } else {
+ printk(KERN_INFO "Number of rx queues changed "
+ "to : %d.\n", i);
+ adapter->num_rx_queues = i;
+ err = 0;
+ break;
+ }
+ }
+ }
+ return err;
+queue_err:
+ vmxnet3_tq_destroy_all(adapter);
return err;
}
@@ -2111,11 +2515,12 @@ static int
vmxnet3_open(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter;
- int err;
+ int err, i;
adapter = netdev_priv(netdev);
- spin_lock_init(&adapter->tx_queue.tx_lock);
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ spin_lock_init(&adapter->tx_queue[i].tx_lock);
err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
VMXNET3_DEF_RX_RING_SIZE,
@@ -2130,8 +2535,8 @@ vmxnet3_open(struct net_device *netdev)
return 0;
activate_err:
- vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
- vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
+ vmxnet3_rq_destroy_all(adapter);
+ vmxnet3_tq_destroy_all(adapter);
queue_err:
return err;
}
@@ -2151,8 +2556,8 @@ vmxnet3_close(struct net_device *netdev)
vmxnet3_quiesce_dev(adapter);
- vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
- vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
+ vmxnet3_rq_destroy_all(adapter);
+ vmxnet3_tq_destroy_all(adapter);
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
@@ -2164,6 +2569,8 @@ vmxnet3_close(struct net_device *netdev)
void
vmxnet3_force_close(struct vmxnet3_adapter *adapter)
{
+ int i;
+
/*
* we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
* vmxnet3_close() will deadlock.
@@ -2171,7 +2578,8 @@ vmxnet3_force_close(struct vmxnet3_adapter *adapter)
BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
/* we need to enable NAPI, otherwise dev_close will deadlock */
- napi_enable(&adapter->napi);
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ napi_enable(&adapter->rx_queue[i].napi);
dev_close(adapter->netdev);
}
@@ -2202,14 +2610,11 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
vmxnet3_reset_dev(adapter);
/* we need to re-create the rx queue based on the new mtu */
- vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
+ vmxnet3_rq_destroy_all(adapter);
vmxnet3_adjust_rx_ring_size(adapter);
- adapter->rx_queue.comp_ring.size =
- adapter->rx_queue.rx_ring[0].size +
- adapter->rx_queue.rx_ring[1].size;
- err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
+ err = vmxnet3_rq_create_all(adapter);
if (err) {
- printk(KERN_ERR "%s: failed to re-create rx queue,"
+ printk(KERN_ERR "%s: failed to re-create rx queues,"
" error %d. Closing it.\n", netdev->name, err);
goto out;
}
@@ -2274,6 +2679,55 @@ vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
mac[5] = (tmp >> 8) & 0xff;
}
+#ifdef CONFIG_PCI_MSI
+
+/*
+ * Enable MSIx vectors.
+ * Returns :
+ * 0 on successful enabling of required vectors,
+ * VMXNET3_LINUX_MIN_MSIX_VECT when only minumum number of vectors required
+ * could be enabled.
+ * number of vectors which can be enabled otherwise (this number is smaller
+ * than VMXNET3_LINUX_MIN_MSIX_VECT)
+ */
+
+static int
+vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
+ int vectors)
+{
+ int err = 0, vector_threshold;
+ vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
+
+ while (vectors >= vector_threshold) {
+ err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
+ vectors);
+ if (!err) {
+ adapter->intr.num_intrs = vectors;
+ return 0;
+ } else if (err < 0) {
+ printk(KERN_ERR "Failed to enable MSI-X for %s, error"
+ " %d\n", adapter->netdev->name, err);
+ vectors = 0;
+ } else if (err < vector_threshold) {
+ break;
+ } else {
+ /* If fails to enable required number of MSI-x vectors
+ * try enabling minimum number of vectors required.
+ */
+ vectors = vector_threshold;
+ printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
+ " %d instead\n", vectors, adapter->netdev->name,
+ vector_threshold);
+ }
+ }
+
+ printk(KERN_INFO "Number of MSI-X interrupts which can be allocatedi"
+ " are lower than min threshold required.\n");
+ return err;
+}
+
+
+#endif /* CONFIG_PCI_MSI */
static void
vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
@@ -2281,9 +2735,11 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
u32 cfg;
/* intr settings */
+ spin_lock(&adapter->cmd_lock);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_CONF_INTR);
cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock(&adapter->cmd_lock);
adapter->intr.type = cfg & 0x3;
adapter->intr.mask_mode = (cfg >> 2) & 0x3;
@@ -2293,16 +2749,47 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
#ifdef CONFIG_PCI_MSI
if (adapter->intr.type == VMXNET3_IT_MSIX) {
- int err;
-
- adapter->intr.msix_entries[0].entry = 0;
- err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
- VMXNET3_LINUX_MAX_MSIX_VECT);
- if (!err) {
- adapter->intr.num_intrs = 1;
- adapter->intr.type = VMXNET3_IT_MSIX;
+ int vector, err = 0;
+
+ adapter->intr.num_intrs = (adapter->share_intr ==
+ VMXNET3_INTR_TXSHARE) ? 1 :
+ adapter->num_tx_queues;
+ adapter->intr.num_intrs += (adapter->share_intr ==
+ VMXNET3_INTR_BUDDYSHARE) ? 0 :
+ adapter->num_rx_queues;
+ adapter->intr.num_intrs += 1; /* for link event */
+
+ adapter->intr.num_intrs = (adapter->intr.num_intrs >
+ VMXNET3_LINUX_MIN_MSIX_VECT
+ ? adapter->intr.num_intrs :
+ VMXNET3_LINUX_MIN_MSIX_VECT);
+
+ for (vector = 0; vector < adapter->intr.num_intrs; vector++)
+ adapter->intr.msix_entries[vector].entry = vector;
+
+ err = vmxnet3_acquire_msix_vectors(adapter,
+ adapter->intr.num_intrs);
+ /* If we cannot allocate one MSIx vector per queue
+ * then limit the number of rx queues to 1
+ */
+ if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
+ if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
+ || adapter->num_rx_queues != 1) {
+ adapter->share_intr = VMXNET3_INTR_TXSHARE;
+ printk(KERN_ERR "Number of rx queues : 1\n");
+ adapter->num_rx_queues = 1;
+ adapter->intr.num_intrs =
+ VMXNET3_LINUX_MIN_MSIX_VECT;
+ }
return;
}
+ if (!err)
+ return;
+
+ /* If we cannot allocate MSIx vectors use only one rx queue */
+ printk(KERN_INFO "Failed to enable MSI-X for %s, error %d."
+ "#rx queues : 1, try MSI\n", adapter->netdev->name, err);
+
adapter->intr.type = VMXNET3_IT_MSI;
}
@@ -2310,12 +2797,15 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
int err;
err = pci_enable_msi(adapter->pdev);
if (!err) {
+ adapter->num_rx_queues = 1;
adapter->intr.num_intrs = 1;
return;
}
}
#endif /* CONFIG_PCI_MSI */
+ adapter->num_rx_queues = 1;
+ printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n");
adapter->intr.type = VMXNET3_IT_INTX;
/* INT-X related setting */
@@ -2343,6 +2833,7 @@ vmxnet3_tx_timeout(struct net_device *netdev)
printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
schedule_work(&adapter->work);
+ netif_wake_queue(adapter->netdev);
}
@@ -2399,8 +2890,29 @@ vmxnet3_probe_device(struct pci_dev *pdev,
struct net_device *netdev;
struct vmxnet3_adapter *adapter;
u8 mac[ETH_ALEN];
+ int size;
+ int num_tx_queues;
+ int num_rx_queues;
+
+#ifdef VMXNET3_RSS
+ if (enable_mq)
+ num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
+ (int)num_online_cpus());
+ else
+#endif
+ num_rx_queues = 1;
+
+ if (enable_mq)
+ num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
+ (int)num_online_cpus());
+ else
+ num_tx_queues = 1;
+
+ netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
+ max(num_tx_queues, num_rx_queues));
+ printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
+ num_tx_queues, num_rx_queues);
- netdev = alloc_etherdev(sizeof(struct vmxnet3_adapter));
if (!netdev) {
printk(KERN_ERR "Failed to alloc ethernet device for adapter "
"%s\n", pci_name(pdev));
@@ -2412,6 +2924,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->netdev = netdev;
adapter->pdev = pdev;
+ spin_lock_init(&adapter->cmd_lock);
adapter->shared = pci_alloc_consistent(adapter->pdev,
sizeof(struct Vmxnet3_DriverShared),
&adapter->shared_pa);
@@ -2422,9 +2935,12 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_alloc_shared;
}
- adapter->tqd_start = pci_alloc_consistent(adapter->pdev,
- sizeof(struct Vmxnet3_TxQueueDesc) +
- sizeof(struct Vmxnet3_RxQueueDesc),
+ adapter->num_rx_queues = num_rx_queues;
+ adapter->num_tx_queues = num_tx_queues;
+
+ size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
+ size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
+ adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
&adapter->queue_desc_pa);
if (!adapter->tqd_start) {
@@ -2433,8 +2949,8 @@ vmxnet3_probe_device(struct pci_dev *pdev,
err = -ENOMEM;
goto err_alloc_queue_desc;
}
- adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start
- + 1);
+ adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
+ adapter->num_tx_queues);
adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
if (adapter->pm_conf == NULL) {
@@ -2444,6 +2960,17 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_alloc_pm;
}
+#ifdef VMXNET3_RSS
+
+ adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
+ if (adapter->rss_conf == NULL) {
+ printk(KERN_ERR "Failed to allocate memory for %s\n",
+ pci_name(pdev));
+ err = -ENOMEM;
+ goto err_alloc_rss;
+ }
+#endif /* VMXNET3_RSS */
+
err = vmxnet3_alloc_pci_resources(adapter, &dma64);
if (err < 0)
goto err_alloc_pci;
@@ -2471,18 +2998,48 @@ vmxnet3_probe_device(struct pci_dev *pdev,
vmxnet3_declare_features(adapter, dma64);
adapter->dev_number = atomic_read(&devices_found);
+
+ adapter->share_intr = irq_share_mode;
+ if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE &&
+ adapter->num_tx_queues != adapter->num_rx_queues)
+ adapter->share_intr = VMXNET3_INTR_DONTSHARE;
+
vmxnet3_alloc_intr_resources(adapter);
+#ifdef VMXNET3_RSS
+ if (adapter->num_rx_queues > 1 &&
+ adapter->intr.type == VMXNET3_IT_MSIX) {
+ adapter->rss = true;
+ printk(KERN_INFO "RSS is enabled.\n");
+ } else {
+ adapter->rss = false;
+ }
+#endif
+
vmxnet3_read_mac_addr(adapter, mac);
memcpy(netdev->dev_addr, mac, netdev->addr_len);
netdev->netdev_ops = &vmxnet3_netdev_ops;
- netdev->watchdog_timeo = 5 * HZ;
vmxnet3_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
INIT_WORK(&adapter->work, vmxnet3_reset_work);
- netif_napi_add(netdev, &adapter->napi, vmxnet3_poll, 64);
+ if (adapter->intr.type == VMXNET3_IT_MSIX) {
+ int i;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ netif_napi_add(adapter->netdev,
+ &adapter->rx_queue[i].napi,
+ vmxnet3_poll_rx_only, 64);
+ }
+ } else {
+ netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
+ vmxnet3_poll, 64);
+ }
+
+ netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
+ netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
+
SET_NETDEV_DEV(netdev, &pdev->dev);
err = register_netdev(netdev);
@@ -2502,11 +3059,14 @@ err_register:
err_ver:
vmxnet3_free_pci_resources(adapter);
err_alloc_pci:
+#ifdef VMXNET3_RSS
+ kfree(adapter->rss_conf);
+err_alloc_rss:
+#endif
kfree(adapter->pm_conf);
err_alloc_pm:
- pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
- sizeof(struct Vmxnet3_RxQueueDesc),
- adapter->tqd_start, adapter->queue_desc_pa);
+ pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
+ adapter->queue_desc_pa);
err_alloc_queue_desc:
pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
adapter->shared, adapter->shared_pa);
@@ -2522,17 +3082,32 @@ vmxnet3_remove_device(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ int size = 0;
+ int num_rx_queues;
- flush_scheduled_work();
+#ifdef VMXNET3_RSS
+ if (enable_mq)
+ num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
+ (int)num_online_cpus());
+ else
+#endif
+ num_rx_queues = 1;
+
+ cancel_work_sync(&adapter->work);
unregister_netdev(netdev);
vmxnet3_free_intr_resources(adapter);
vmxnet3_free_pci_resources(adapter);
+#ifdef VMXNET3_RSS
+ kfree(adapter->rss_conf);
+#endif
kfree(adapter->pm_conf);
- pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
- sizeof(struct Vmxnet3_RxQueueDesc),
- adapter->tqd_start, adapter->queue_desc_pa);
+
+ size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
+ size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
+ pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
+ adapter->queue_desc_pa);
pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
adapter->shared, adapter->shared_pa);
free_netdev(netdev);
@@ -2553,17 +3128,21 @@ vmxnet3_suspend(struct device *device)
u8 *arpreq;
struct in_device *in_dev;
struct in_ifaddr *ifa;
+ unsigned long flags;
int i = 0;
if (!netif_running(netdev))
return 0;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ napi_disable(&adapter->rx_queue[i].napi);
+
vmxnet3_disable_all_intrs(adapter);
vmxnet3_free_irqs(adapter);
vmxnet3_free_intr_resources(adapter);
netif_device_detach(netdev);
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
/* Create wake-up filters. */
pmConf = adapter->pm_conf;
@@ -2633,8 +3212,10 @@ skip_arp:
adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
pmConf));
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_PMCFG);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
pci_save_state(pdev);
pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
@@ -2649,7 +3230,8 @@ skip_arp:
static int
vmxnet3_resume(struct device *device)
{
- int err;
+ int err, i = 0;
+ unsigned long flags;
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *netdev = pci_get_drvdata(pdev);
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -2677,10 +3259,14 @@ vmxnet3_resume(struct device *device)
pci_enable_wake(pdev, PCI_D0, 0);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_PMCFG);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
vmxnet3_alloc_intr_resources(adapter);
vmxnet3_request_irqs(adapter);
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ napi_enable(&adapter->rx_queue[i].napi);
vmxnet3_enable_all_intrs(adapter);
return 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index b79070bcc92e..81254be85b92 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -45,6 +45,7 @@ static int
vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
if (adapter->rxcsum != val) {
adapter->rxcsum = val;
@@ -56,8 +57,10 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
adapter->shared->devRead.misc.uptFeatures &=
~UPT1_F_RXCSUM;
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
}
return 0;
@@ -68,76 +71,78 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
static const struct vmxnet3_stat_desc
vmxnet3_tq_dev_stats[] = {
/* description, offset */
- { "TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
- { "TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
- { "ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
- { "ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
- { "mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
- { "mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
- { "bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
- { "bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
- { "pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) },
- { "pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) },
+ { "Tx Queue#", 0 },
+ { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
+ { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
+ { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
+ { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
+ { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
+ { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
+ { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
+ { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
+ { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) },
+ { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) },
};
/* per tq stats maintained by the driver */
static const struct vmxnet3_stat_desc
vmxnet3_tq_driver_stats[] = {
/* description, offset */
- {"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats,
- drop_total) },
- { " too many frags", offsetof(struct vmxnet3_tq_driver_stats,
- drop_too_many_frags) },
- { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
- drop_oversized_hdr) },
- { " hdr err", offsetof(struct vmxnet3_tq_driver_stats,
- drop_hdr_inspect_err) },
- { " tso", offsetof(struct vmxnet3_tq_driver_stats,
- drop_tso) },
- { "ring full", offsetof(struct vmxnet3_tq_driver_stats,
- tx_ring_full) },
- { "pkts linearized", offsetof(struct vmxnet3_tq_driver_stats,
- linearized) },
- { "hdr cloned", offsetof(struct vmxnet3_tq_driver_stats,
- copy_skb_header) },
- { "giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
- oversized_hdr) },
+ {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_total) },
+ { " too many frags", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_too_many_frags) },
+ { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_oversized_hdr) },
+ { " hdr err", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_hdr_inspect_err) },
+ { " tso", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_tso) },
+ { " ring full", offsetof(struct vmxnet3_tq_driver_stats,
+ tx_ring_full) },
+ { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats,
+ linearized) },
+ { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats,
+ copy_skb_header) },
+ { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
+ oversized_hdr) },
};
/* per rq stats maintained by the device */
static const struct vmxnet3_stat_desc
vmxnet3_rq_dev_stats[] = {
- { "LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) },
- { "LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) },
- { "ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
- { "ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
- { "mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
- { "mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
- { "bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
- { "bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
- { "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
- { "pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) },
+ { "Rx Queue#", 0 },
+ { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) },
+ { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) },
+ { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
+ { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
+ { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
+ { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
+ { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
+ { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
+ { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
+ { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) },
};
/* per rq stats maintained by the driver */
static const struct vmxnet3_stat_desc
vmxnet3_rq_driver_stats[] = {
/* description, offset */
- { "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
- drop_total) },
- { " err", offsetof(struct vmxnet3_rq_driver_stats,
- drop_err) },
- { " fcs", offsetof(struct vmxnet3_rq_driver_stats,
- drop_fcs) },
- { "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
- rx_buf_alloc_failure) },
+ { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
+ drop_total) },
+ { " err", offsetof(struct vmxnet3_rq_driver_stats,
+ drop_err) },
+ { " fcs", offsetof(struct vmxnet3_rq_driver_stats,
+ drop_fcs) },
+ { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
+ rx_buf_alloc_failure) },
};
/* gloabl stats maintained by the driver */
static const struct vmxnet3_stat_desc
vmxnet3_global_stats[] = {
/* description, offset */
- { "tx timeout count", offsetof(struct vmxnet3_adapter,
+ { "tx timeout count", offsetof(struct vmxnet3_adapter,
tx_timeout_count) }
};
@@ -151,56 +156,60 @@ vmxnet3_get_stats(struct net_device *netdev)
struct UPT1_TxStats *devTxStats;
struct UPT1_RxStats *devRxStats;
struct net_device_stats *net_stats = &netdev->stats;
+ unsigned long flags;
+ int i;
adapter = netdev_priv(netdev);
/* Collect the dev stats into the shared area */
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
-
- /* Assuming that we have a single queue device */
- devTxStats = &adapter->tqd_start->stats;
- devRxStats = &adapter->rqd_start->stats;
-
- /* Get access to the driver stats per queue */
- drvTxStats = &adapter->tx_queue.stats;
- drvRxStats = &adapter->rx_queue.stats;
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
memset(net_stats, 0, sizeof(*net_stats));
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ devTxStats = &adapter->tqd_start[i].stats;
+ drvTxStats = &adapter->tx_queue[i].stats;
+ net_stats->tx_packets += devTxStats->ucastPktsTxOK +
+ devTxStats->mcastPktsTxOK +
+ devTxStats->bcastPktsTxOK;
+ net_stats->tx_bytes += devTxStats->ucastBytesTxOK +
+ devTxStats->mcastBytesTxOK +
+ devTxStats->bcastBytesTxOK;
+ net_stats->tx_errors += devTxStats->pktsTxError;
+ net_stats->tx_dropped += drvTxStats->drop_total;
+ }
- net_stats->rx_packets = devRxStats->ucastPktsRxOK +
- devRxStats->mcastPktsRxOK +
- devRxStats->bcastPktsRxOK;
-
- net_stats->tx_packets = devTxStats->ucastPktsTxOK +
- devTxStats->mcastPktsTxOK +
- devTxStats->bcastPktsTxOK;
-
- net_stats->rx_bytes = devRxStats->ucastBytesRxOK +
- devRxStats->mcastBytesRxOK +
- devRxStats->bcastBytesRxOK;
-
- net_stats->tx_bytes = devTxStats->ucastBytesTxOK +
- devTxStats->mcastBytesTxOK +
- devTxStats->bcastBytesTxOK;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ devRxStats = &adapter->rqd_start[i].stats;
+ drvRxStats = &adapter->rx_queue[i].stats;
+ net_stats->rx_packets += devRxStats->ucastPktsRxOK +
+ devRxStats->mcastPktsRxOK +
+ devRxStats->bcastPktsRxOK;
- net_stats->rx_errors = devRxStats->pktsRxError;
- net_stats->tx_errors = devTxStats->pktsTxError;
- net_stats->rx_dropped = drvRxStats->drop_total;
- net_stats->tx_dropped = drvTxStats->drop_total;
- net_stats->multicast = devRxStats->mcastPktsRxOK;
+ net_stats->rx_bytes += devRxStats->ucastBytesRxOK +
+ devRxStats->mcastBytesRxOK +
+ devRxStats->bcastBytesRxOK;
+ net_stats->rx_errors += devRxStats->pktsRxError;
+ net_stats->rx_dropped += drvRxStats->drop_total;
+ net_stats->multicast += devRxStats->mcastPktsRxOK;
+ }
return net_stats;
}
static int
vmxnet3_get_sset_count(struct net_device *netdev, int sset)
{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
switch (sset) {
case ETH_SS_STATS:
- return ARRAY_SIZE(vmxnet3_tq_dev_stats) +
- ARRAY_SIZE(vmxnet3_tq_driver_stats) +
- ARRAY_SIZE(vmxnet3_rq_dev_stats) +
- ARRAY_SIZE(vmxnet3_rq_driver_stats) +
+ return (ARRAY_SIZE(vmxnet3_tq_dev_stats) +
+ ARRAY_SIZE(vmxnet3_tq_driver_stats)) *
+ adapter->num_tx_queues +
+ (ARRAY_SIZE(vmxnet3_rq_dev_stats) +
+ ARRAY_SIZE(vmxnet3_rq_driver_stats)) *
+ adapter->num_rx_queues +
ARRAY_SIZE(vmxnet3_global_stats);
default:
return -EOPNOTSUPP;
@@ -208,10 +217,16 @@ vmxnet3_get_sset_count(struct net_device *netdev, int sset)
}
+/* Should be multiple of 4 */
+#define NUM_TX_REGS 8
+#define NUM_RX_REGS 12
+
static int
vmxnet3_get_regs_len(struct net_device *netdev)
{
- return 20 * sizeof(u32);
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) +
+ adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32));
}
@@ -242,29 +257,37 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
static void
vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
if (stringset == ETH_SS_STATS) {
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
- memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
- ETH_GSTRING_LEN);
- buf += ETH_GSTRING_LEN;
- }
- for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) {
- memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
- ETH_GSTRING_LEN);
- buf += ETH_GSTRING_LEN;
- }
- for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
- memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
- ETH_GSTRING_LEN);
- buf += ETH_GSTRING_LEN;
+ int i, j;
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
+ memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
+ ETH_GSTRING_LEN);
+ buf += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats);
+ i++) {
+ memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
+ ETH_GSTRING_LEN);
+ buf += ETH_GSTRING_LEN;
+ }
}
- for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) {
- memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
- ETH_GSTRING_LEN);
- buf += ETH_GSTRING_LEN;
+
+ for (j = 0; j < adapter->num_rx_queues; j++) {
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
+ memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
+ ETH_GSTRING_LEN);
+ buf += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats);
+ i++) {
+ memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
+ ETH_GSTRING_LEN);
+ buf += ETH_GSTRING_LEN;
+ }
}
+
for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) {
memcpy(buf, vmxnet3_global_stats[i].desc,
ETH_GSTRING_LEN);
@@ -279,6 +302,7 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
+ unsigned long flags;
if (data & ~ETH_FLAG_LRO)
return -EOPNOTSUPP;
@@ -294,8 +318,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
else
adapter->shared->devRead.misc.uptFeatures &=
~UPT1_F_LRO;
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
return 0;
}
@@ -305,28 +331,41 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *buf)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
u8 *base;
int i;
+ int j = 0;
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
/* this does assume each counter is 64-bit wide */
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ base = (u8 *)&adapter->tqd_start[j].stats;
+ *buf++ = (u64)j;
+ for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
+ *buf++ = *(u64 *)(base +
+ vmxnet3_tq_dev_stats[i].offset);
+
+ base = (u8 *)&adapter->tx_queue[j].stats;
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
+ *buf++ = *(u64 *)(base +
+ vmxnet3_tq_driver_stats[i].offset);
+ }
- base = (u8 *)&adapter->tqd_start->stats;
- for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
- *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset);
-
- base = (u8 *)&adapter->tx_queue.stats;
- for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
- *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset);
-
- base = (u8 *)&adapter->rqd_start->stats;
- for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
- *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset);
-
- base = (u8 *)&adapter->rx_queue.stats;
- for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
- *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset);
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ base = (u8 *)&adapter->rqd_start[j].stats;
+ *buf++ = (u64) j;
+ for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
+ *buf++ = *(u64 *)(base +
+ vmxnet3_rq_dev_stats[i].offset);
+
+ base = (u8 *)&adapter->rx_queue[j].stats;
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
+ *buf++ = *(u64 *)(base +
+ vmxnet3_rq_driver_stats[i].offset);
+ }
base = (u8 *)adapter;
for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
@@ -339,6 +378,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u32 *buf = p;
+ int i = 0, j = 0;
memset(p, 0, vmxnet3_get_regs_len(netdev));
@@ -347,30 +387,35 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
/* Update vmxnet3_get_regs_len if we want to dump more registers */
/* make each ring use multiple of 16 bytes */
- buf[0] = adapter->tx_queue.tx_ring.next2fill;
- buf[1] = adapter->tx_queue.tx_ring.next2comp;
- buf[2] = adapter->tx_queue.tx_ring.gen;
- buf[3] = 0;
-
- buf[4] = adapter->tx_queue.comp_ring.next2proc;
- buf[5] = adapter->tx_queue.comp_ring.gen;
- buf[6] = adapter->tx_queue.stopped;
- buf[7] = 0;
-
- buf[8] = adapter->rx_queue.rx_ring[0].next2fill;
- buf[9] = adapter->rx_queue.rx_ring[0].next2comp;
- buf[10] = adapter->rx_queue.rx_ring[0].gen;
- buf[11] = 0;
-
- buf[12] = adapter->rx_queue.rx_ring[1].next2fill;
- buf[13] = adapter->rx_queue.rx_ring[1].next2comp;
- buf[14] = adapter->rx_queue.rx_ring[1].gen;
- buf[15] = 0;
-
- buf[16] = adapter->rx_queue.comp_ring.next2proc;
- buf[17] = adapter->rx_queue.comp_ring.gen;
- buf[18] = 0;
- buf[19] = 0;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ buf[j++] = adapter->tx_queue[i].tx_ring.next2fill;
+ buf[j++] = adapter->tx_queue[i].tx_ring.next2comp;
+ buf[j++] = adapter->tx_queue[i].tx_ring.gen;
+ buf[j++] = 0;
+
+ buf[j++] = adapter->tx_queue[i].comp_ring.next2proc;
+ buf[j++] = adapter->tx_queue[i].comp_ring.gen;
+ buf[j++] = adapter->tx_queue[i].stopped;
+ buf[j++] = 0;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill;
+ buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp;
+ buf[j++] = adapter->rx_queue[i].rx_ring[0].gen;
+ buf[j++] = 0;
+
+ buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill;
+ buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp;
+ buf[j++] = adapter->rx_queue[i].rx_ring[1].gen;
+ buf[j++] = 0;
+
+ buf[j++] = adapter->rx_queue[i].comp_ring.next2proc;
+ buf[j++] = adapter->rx_queue[i].comp_ring.gen;
+ buf[j++] = 0;
+ buf[j++] = 0;
+ }
+
}
@@ -435,8 +480,10 @@ vmxnet3_get_ringparam(struct net_device *netdev,
param->rx_mini_max_pending = 0;
param->rx_jumbo_max_pending = 0;
- param->rx_pending = adapter->rx_queue.rx_ring[0].size;
- param->tx_pending = adapter->tx_queue.tx_ring.size;
+ param->rx_pending = adapter->rx_queue[0].rx_ring[0].size *
+ adapter->num_rx_queues;
+ param->tx_pending = adapter->tx_queue[0].tx_ring.size *
+ adapter->num_tx_queues;
param->rx_mini_pending = 0;
param->rx_jumbo_pending = 0;
}
@@ -480,8 +527,8 @@ vmxnet3_set_ringparam(struct net_device *netdev,
sz) != 0)
return -EINVAL;
- if (new_tx_ring_size == adapter->tx_queue.tx_ring.size &&
- new_rx_ring_size == adapter->rx_queue.rx_ring[0].size) {
+ if (new_tx_ring_size == adapter->tx_queue[0].tx_ring.size &&
+ new_rx_ring_size == adapter->rx_queue[0].rx_ring[0].size) {
return 0;
}
@@ -498,11 +545,12 @@ vmxnet3_set_ringparam(struct net_device *netdev,
/* recreate the rx queue and the tx queue based on the
* new sizes */
- vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
- vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
+ vmxnet3_tq_destroy_all(adapter);
+ vmxnet3_rq_destroy_all(adapter);
err = vmxnet3_create_queues(adapter, new_tx_ring_size,
new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE);
+
if (err) {
/* failed, most likely because of OOM, try default
* size */
@@ -535,6 +583,69 @@ out:
}
+static int
+vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
+ void *rules)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = adapter->num_rx_queues;
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+#ifdef VMXNET3_RSS
+static int
+vmxnet3_get_rss_indir(struct net_device *netdev,
+ struct ethtool_rxfh_indir *p)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+ unsigned int n = min_t(unsigned int, p->size, rssConf->indTableSize);
+
+ p->size = rssConf->indTableSize;
+ while (n--)
+ p->ring_index[n] = rssConf->indTable[n];
+ return 0;
+
+}
+
+static int
+vmxnet3_set_rss_indir(struct net_device *netdev,
+ const struct ethtool_rxfh_indir *p)
+{
+ unsigned int i;
+ unsigned long flags;
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+
+ if (p->size != rssConf->indTableSize)
+ return -EINVAL;
+ for (i = 0; i < rssConf->indTableSize; i++) {
+ /*
+ * Return with error code if any of the queue indices
+ * is out of range
+ */
+ if (p->ring_index[i] < 0 ||
+ p->ring_index[i] >= adapter->num_rx_queues)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < rssConf->indTableSize; i++)
+ rssConf->indTable[i] = p->ring_index[i];
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_RSSIDT);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
+ return 0;
+
+}
+#endif
+
static struct ethtool_ops vmxnet3_ethtool_ops = {
.get_settings = vmxnet3_get_settings,
.get_drvinfo = vmxnet3_get_drvinfo,
@@ -558,6 +669,11 @@ static struct ethtool_ops vmxnet3_ethtool_ops = {
.get_ethtool_stats = vmxnet3_get_ethtool_stats,
.get_ringparam = vmxnet3_get_ringparam,
.set_ringparam = vmxnet3_set_ringparam,
+ .get_rxnfc = vmxnet3_get_rxnfc,
+#ifdef VMXNET3_RSS
+ .get_rxfh_indir = vmxnet3_get_rss_indir,
+ .set_rxfh_indir = vmxnet3_set_rss_indir,
+#endif
};
void vmxnet3_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index edf228843afc..fb5d245ac878 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -68,11 +68,15 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.0.14.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.0.25.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01000E00
+#define VMXNET3_DRIVER_VERSION_NUM 0x01001900
+#if defined(CONFIG_PCI_MSI)
+ /* RSS only makes sense if MSI-X is supported. */
+ #define VMXNET3_RSS
+#endif
/*
* Capabilities
@@ -218,16 +222,19 @@ struct vmxnet3_tx_ctx {
};
struct vmxnet3_tx_queue {
+ char name[IFNAMSIZ+8]; /* To identify interrupt */
+ struct vmxnet3_adapter *adapter;
spinlock_t tx_lock;
struct vmxnet3_cmd_ring tx_ring;
- struct vmxnet3_tx_buf_info *buf_info;
+ struct vmxnet3_tx_buf_info *buf_info;
struct vmxnet3_tx_data_ring data_ring;
struct vmxnet3_comp_ring comp_ring;
- struct Vmxnet3_TxQueueCtrl *shared;
+ struct Vmxnet3_TxQueueCtrl *shared;
struct vmxnet3_tq_driver_stats stats;
bool stopped;
int num_stop; /* # of times the queue is
* stopped */
+ int qid;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
enum vmxnet3_rx_buf_type {
@@ -259,6 +266,9 @@ struct vmxnet3_rq_driver_stats {
};
struct vmxnet3_rx_queue {
+ char name[IFNAMSIZ + 8]; /* To identify interrupt */
+ struct vmxnet3_adapter *adapter;
+ struct napi_struct napi;
struct vmxnet3_cmd_ring rx_ring[2];
struct vmxnet3_comp_ring comp_ring;
struct vmxnet3_rx_ctx rx_ctx;
@@ -271,7 +281,16 @@ struct vmxnet3_rx_queue {
struct vmxnet3_rq_driver_stats stats;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
-#define VMXNET3_LINUX_MAX_MSIX_VECT 1
+#define VMXNET3_DEVICE_MAX_TX_QUEUES 8
+#define VMXNET3_DEVICE_MAX_RX_QUEUES 8 /* Keep this value as a power of 2 */
+
+/* Should be less than UPT1_RSS_MAX_IND_TABLE_SIZE */
+#define VMXNET3_RSS_IND_TABLE_SIZE (VMXNET3_DEVICE_MAX_RX_QUEUES * 4)
+
+#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
+ VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
+#define VMXNET3_LINUX_MIN_MSIX_VECT 2 /* 1 for tx-rx pair and 1 for event */
+
struct vmxnet3_intr {
enum vmxnet3_intr_mask_mode mask_mode;
@@ -279,27 +298,33 @@ struct vmxnet3_intr {
u8 num_intrs; /* # of intr vectors */
u8 event_intr_idx; /* idx of the intr vector for event */
u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
+ char event_msi_vector_name[IFNAMSIZ+11];
#ifdef CONFIG_PCI_MSI
struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
#endif
};
+/* Interrupt sharing schemes, share_intr */
+#define VMXNET3_INTR_BUDDYSHARE 0 /* Corresponding tx,rx queues share irq */
+#define VMXNET3_INTR_TXSHARE 1 /* All tx queues share one irq */
+#define VMXNET3_INTR_DONTSHARE 2 /* each queue has its own irq */
+
+
#define VMXNET3_STATE_BIT_RESETTING 0
#define VMXNET3_STATE_BIT_QUIESCED 1
struct vmxnet3_adapter {
- struct vmxnet3_tx_queue tx_queue;
- struct vmxnet3_rx_queue rx_queue;
- struct napi_struct napi;
- struct vlan_group *vlan_grp;
-
- struct vmxnet3_intr intr;
-
- struct Vmxnet3_DriverShared *shared;
- struct Vmxnet3_PMConf *pm_conf;
- struct Vmxnet3_TxQueueDesc *tqd_start; /* first tx queue desc */
- struct Vmxnet3_RxQueueDesc *rqd_start; /* first rx queue desc */
- struct net_device *netdev;
- struct pci_dev *pdev;
+ struct vmxnet3_tx_queue tx_queue[VMXNET3_DEVICE_MAX_TX_QUEUES];
+ struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
+ struct vlan_group *vlan_grp;
+ struct vmxnet3_intr intr;
+ spinlock_t cmd_lock;
+ struct Vmxnet3_DriverShared *shared;
+ struct Vmxnet3_PMConf *pm_conf;
+ struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */
+ struct Vmxnet3_RxQueueDesc *rqd_start; /* all rx queue desc */
+ struct net_device *netdev;
+ struct net_device_stats net_stats;
+ struct pci_dev *pdev;
u8 __iomem *hw_addr0; /* for BAR 0 */
u8 __iomem *hw_addr1; /* for BAR 1 */
@@ -308,6 +333,12 @@ struct vmxnet3_adapter {
bool rxcsum;
bool lro;
bool jumbo_frame;
+#ifdef VMXNET3_RSS
+ struct UPT1_RSSConf *rss_conf;
+ bool rss;
+#endif
+ u32 num_rx_queues;
+ u32 num_tx_queues;
/* rx buffer related */
unsigned skb_buf_size;
@@ -327,6 +358,7 @@ struct vmxnet3_adapter {
unsigned long state; /* VMXNET3_STATE_BIT_xxx */
int dev_number;
+ int share_intr;
};
#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
@@ -366,12 +398,10 @@ void
vmxnet3_reset_dev(struct vmxnet3_adapter *adapter);
void
-vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
- struct vmxnet3_adapter *adapter);
+vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter);
void
-vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
- struct vmxnet3_adapter *adapter);
+vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 906a3ca3676b..e74e4b42592d 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -19,109 +19,128 @@
#include "vxge-traffic.h"
#include "vxge-config.h"
-
-static enum vxge_hw_status
-__vxge_hw_fifo_create(
- struct __vxge_hw_vpath_handle *vpath_handle,
- struct vxge_hw_fifo_attr *attr);
-
-static enum vxge_hw_status
-__vxge_hw_fifo_abort(
- struct __vxge_hw_fifo *fifoh);
-
-static enum vxge_hw_status
-__vxge_hw_fifo_reset(
- struct __vxge_hw_fifo *ringh);
-
-static enum vxge_hw_status
-__vxge_hw_fifo_delete(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-static struct __vxge_hw_blockpool_entry *
-__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
- u32 size);
+#include "vxge-main.h"
+
+#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
+ status = __vxge_hw_vpath_stats_access(vpath, \
+ VXGE_HW_STATS_OP_READ, \
+ offset, \
+ &val64); \
+ if (status != VXGE_HW_OK) \
+ return status; \
+}
static void
-__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
- struct __vxge_hw_blockpool_entry *entry);
-
-static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
- void *block_addr,
- u32 length,
- struct pci_dev *dma_h,
- struct pci_dev *acc_handle);
-
-static enum vxge_hw_status
-__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
- struct __vxge_hw_blockpool *blockpool,
- u32 pool_size,
- u32 pool_max);
+vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
+{
+ u64 val64;
-static void
-__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool);
+ val64 = readq(&vp_reg->rxmac_vcfg0);
+ val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
+ writeq(val64, &vp_reg->rxmac_vcfg0);
+ val64 = readq(&vp_reg->rxmac_vcfg0);
+}
-static void *
-__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
- u32 size,
- struct vxge_hw_mempool_dma *dma_object);
+/*
+ * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
+ */
+int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
+{
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+ struct __vxge_hw_virtualpath *vpath;
+ u64 val64, rxd_count, rxd_spat;
+ int count = 0, total_count = 0;
-static void
-__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
- void *memblock,
- u32 size,
- struct vxge_hw_mempool_dma *dma_object);
+ vpath = &hldev->virtual_paths[vp_id];
+ vp_reg = vpath->vp_reg;
+ vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
-static struct __vxge_hw_channel*
-__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
- enum __vxge_hw_channel_type type, u32 length,
- u32 per_dtr_space, void *userdata);
+ /* Check that the ring controller for this vpath has enough free RxDs
+ * to send frames to the host. This is done by reading the
+ * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
+ * RXD_SPAT value for the vpath.
+ */
+ val64 = readq(&vp_reg->prc_cfg6);
+ rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
+ /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
+ * leg room.
+ */
+ rxd_spat *= 2;
-static void
-__vxge_hw_channel_free(
- struct __vxge_hw_channel *channel);
+ do {
+ mdelay(1);
-static enum vxge_hw_status
-__vxge_hw_channel_initialize(
- struct __vxge_hw_channel *channel);
+ rxd_count = readq(&vp_reg->prc_rxd_doorbell);
-static enum vxge_hw_status
-__vxge_hw_channel_reset(
- struct __vxge_hw_channel *channel);
+ /* Check that the ring controller for this vpath does
+ * not have any frame in its pipeline.
+ */
+ val64 = readq(&vp_reg->frm_in_progress_cnt);
+ if ((rxd_count <= rxd_spat) || (val64 > 0))
+ count = 0;
+ else
+ count++;
+ total_count++;
+ } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
+ (total_count < VXGE_HW_MAX_POLLING_COUNT));
-static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
+ if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
+ printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
+ __func__);
-static enum vxge_hw_status
-__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
+ return total_count;
+}
-static enum vxge_hw_status
-__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
+/* vxge_hw_device_wait_receive_idle - This function waits until all frames
+ * stored in the frame buffer for each vpath assigned to the given
+ * function (hldev) have been sent to the host.
+ */
+void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
+{
+ int i, total_count = 0;
-static void
-__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+ if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
+ continue;
-static void
-__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
+ total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
+ if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
+ break;
+ }
+}
+/*
+ * __vxge_hw_device_register_poll
+ * Will poll certain register for specified amount of time.
+ * Will poll until masked bit is not cleared.
+ */
static enum vxge_hw_status
-__vxge_hw_vpath_card_info_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg,
- struct vxge_hw_device_hw_info *hw_info);
+__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
+{
+ u64 val64;
+ u32 i = 0;
+ enum vxge_hw_status ret = VXGE_HW_FAIL;
-static enum vxge_hw_status
-__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
+ udelay(10);
-static void
-__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
+ do {
+ val64 = readq(reg);
+ if (!(val64 & mask))
+ return VXGE_HW_OK;
+ udelay(100);
+ } while (++i <= 9);
-static enum vxge_hw_status
-__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
+ i = 0;
+ do {
+ val64 = readq(reg);
+ if (!(val64 & mask))
+ return VXGE_HW_OK;
+ mdelay(1);
+ } while (++i <= max_millis);
-static enum vxge_hw_status
-__vxge_hw_device_register_poll(
- void __iomem *reg,
- u64 mask, u32 max_millis);
+ return ret;
+}
static inline enum vxge_hw_status
__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
@@ -129,139 +148,258 @@ __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
{
__vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
wmb();
-
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
wmb();
- return __vxge_hw_device_register_poll(addr, mask, max_millis);
+ return __vxge_hw_device_register_poll(addr, mask, max_millis);
}
-static struct vxge_hw_mempool*
-__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
- u32 item_size, u32 private_size, u32 items_initial,
- u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
- void *userdata);
-static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
-
static enum vxge_hw_status
-__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_vpath_stats_hw_info *hw_stats);
+vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
+ u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
+ u64 *steer_ctrl)
+{
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+ enum vxge_hw_status status;
+ u64 val64;
+ u32 retry = 0, max_retry = 100;
-static enum vxge_hw_status
-vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
+ vp_reg = vpath->vp_reg;
-static enum vxge_hw_status
-__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
+ if (vpath->vp_open) {
+ max_retry = 3;
+ spin_lock(&vpath->lock);
+ }
-static u64
-__vxge_hw_vpath_pci_func_mode_get(u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg);
+ writeq(*data0, &vp_reg->rts_access_steer_data0);
+ writeq(*data1, &vp_reg->rts_access_steer_data1);
+ wmb();
-static u32
-__vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
+ val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
+ *steer_ctrl;
-static enum vxge_hw_status
-__vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
- u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]);
+ status = __vxge_hw_pio_mem_write64(val64,
+ &vp_reg->rts_access_steer_ctrl,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
+ VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+
+ /* The __vxge_hw_device_register_poll can udelay for a significant
+ * amount of time, blocking other proccess from the CPU. If it delays
+ * for ~5secs, a NMI error can occur. A way around this is to give up
+ * the processor via msleep, but this is not allowed is under lock.
+ * So, only allow it to sleep for ~4secs if open. Otherwise, delay for
+ * 1sec and sleep for 10ms until the firmware operation has completed
+ * or timed-out.
+ */
+ while ((status != VXGE_HW_OK) && retry++ < max_retry) {
+ if (!vpath->vp_open)
+ msleep(20);
+ status = __vxge_hw_device_register_poll(
+ &vp_reg->rts_access_steer_ctrl,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
+ VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+ }
-static enum vxge_hw_status
-__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
+ if (status != VXGE_HW_OK)
+ goto out;
+ val64 = readq(&vp_reg->rts_access_steer_ctrl);
+ if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
+ *data0 = readq(&vp_reg->rts_access_steer_data0);
+ *data1 = readq(&vp_reg->rts_access_steer_data1);
+ *steer_ctrl = val64;
+ } else
+ status = VXGE_HW_FAIL;
-static enum vxge_hw_status
-__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id);
+out:
+ if (vpath->vp_open)
+ spin_unlock(&vpath->lock);
+ return status;
+}
-static enum vxge_hw_status
-__vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
- struct vxge_hw_device_hw_info *hw_info);
+enum vxge_hw_status
+vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
+ u32 *minor, u32 *build)
+{
+ u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status;
-static enum vxge_hw_status
-__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id);
+ vpath = &hldev->virtual_paths[hldev->first_vp_id];
-static void
-__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_FW_UPGRADE_ACTION,
+ VXGE_HW_FW_UPGRADE_MEMO,
+ VXGE_HW_FW_UPGRADE_OFFSET_READ,
+ &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ return status;
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
- u32 operation, u32 offset, u64 *stat);
+ *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
+ *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
+ *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
+ return status;
+}
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
+enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
+{
+ u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status;
+ u32 ret;
-/*
- * __vxge_hw_channel_allocate - Allocate memory for channel
- * This function allocates required memory for the channel and various arrays
- * in the channel
- */
-struct __vxge_hw_channel*
-__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
- enum __vxge_hw_channel_type type,
- u32 length, u32 per_dtr_space, void *userdata)
+ vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_FW_UPGRADE_ACTION,
+ VXGE_HW_FW_UPGRADE_MEMO,
+ VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
+ &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
+ goto exit;
+ }
+
+ ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
+ if (ret != 1) {
+ vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
+ __func__, ret);
+ status = VXGE_HW_FAIL;
+ }
+
+exit:
+ return status;
+}
+
+enum vxge_hw_status
+vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
{
- struct __vxge_hw_channel *channel;
- struct __vxge_hw_device *hldev;
- int size = 0;
- u32 vp_id;
+ u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status;
+ int ret_code, sec_code;
- hldev = vph->vpath->hldev;
- vp_id = vph->vpath->vp_id;
+ vpath = &hldev->virtual_paths[hldev->first_vp_id];
- switch (type) {
- case VXGE_HW_CHANNEL_TYPE_FIFO:
- size = sizeof(struct __vxge_hw_fifo);
- break;
- case VXGE_HW_CHANNEL_TYPE_RING:
- size = sizeof(struct __vxge_hw_ring);
- break;
- default:
- break;
+ /* send upgrade start command */
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_FW_UPGRADE_ACTION,
+ VXGE_HW_FW_UPGRADE_MEMO,
+ VXGE_HW_FW_UPGRADE_OFFSET_START,
+ &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
+ __func__);
+ return status;
}
- channel = kzalloc(size, GFP_KERNEL);
- if (channel == NULL)
- goto exit0;
- INIT_LIST_HEAD(&channel->item);
+ /* Transfer fw image to adapter 16 bytes at a time */
+ for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
+ steer_ctrl = 0;
- channel->common_reg = hldev->common_reg;
- channel->first_vp_id = hldev->first_vp_id;
- channel->type = type;
- channel->devh = hldev;
- channel->vph = vph;
- channel->userdata = userdata;
- channel->per_dtr_space = per_dtr_space;
- channel->length = length;
- channel->vp_id = vp_id;
+ /* The next 128bits of fwdata to be loaded onto the adapter */
+ data0 = *((u64 *)fwdata);
+ data1 = *((u64 *)fwdata + 1);
- channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
- if (channel->work_arr == NULL)
- goto exit1;
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_FW_UPGRADE_ACTION,
+ VXGE_HW_FW_UPGRADE_MEMO,
+ VXGE_HW_FW_UPGRADE_OFFSET_SEND,
+ &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
+ __func__);
+ goto out;
+ }
- channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
- if (channel->free_arr == NULL)
- goto exit1;
- channel->free_ptr = length;
+ ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
+ switch (ret_code) {
+ case VXGE_HW_FW_UPGRADE_OK:
+ /* All OK, send next 16 bytes. */
+ break;
+ case VXGE_FW_UPGRADE_BYTES2SKIP:
+ /* skip bytes in the stream */
+ fwdata += (data0 >> 8) & 0xFFFFFFFF;
+ break;
+ case VXGE_HW_FW_UPGRADE_DONE:
+ goto out;
+ case VXGE_HW_FW_UPGRADE_ERR:
+ sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
+ switch (sec_code) {
+ case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
+ case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
+ printk(KERN_ERR
+ "corrupted data from .ncf file\n");
+ break;
+ case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
+ case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
+ case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
+ case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
+ case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
+ printk(KERN_ERR "invalid .ncf file\n");
+ break;
+ case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
+ printk(KERN_ERR "buffer overflow\n");
+ break;
+ case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
+ printk(KERN_ERR "failed to flash the image\n");
+ break;
+ case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
+ printk(KERN_ERR
+ "generic error. Unknown error type\n");
+ break;
+ default:
+ printk(KERN_ERR "Unknown error of type %d\n",
+ sec_code);
+ break;
+ }
+ status = VXGE_HW_FAIL;
+ goto out;
+ default:
+ printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
+ status = VXGE_HW_FAIL;
+ goto out;
+ }
+ /* point to next 16 bytes */
+ fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
+ }
+out:
+ return status;
+}
- channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
- if (channel->reserve_arr == NULL)
- goto exit1;
- channel->reserve_ptr = length;
- channel->reserve_top = 0;
+enum vxge_hw_status
+vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
+ struct eprom_image *img)
+{
+ u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+ struct __vxge_hw_virtualpath *vpath;
+ enum vxge_hw_status status;
+ int i;
- channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
- if (channel->orig_arr == NULL)
- goto exit1;
+ vpath = &hldev->virtual_paths[hldev->first_vp_id];
- return channel;
-exit1:
- __vxge_hw_channel_free(channel);
+ for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
+ data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
+ data1 = steer_ctrl = 0;
-exit0:
- return NULL;
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_FW_API_GET_EPROM_REV,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ break;
+
+ img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
+ img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
+ img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
+ img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
+ }
+
+ return status;
}
/*
@@ -269,7 +407,7 @@ exit0:
* This function deallocates memory from the channel and various arrays
* in the channel
*/
-void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
+static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
{
kfree(channel->work_arr);
kfree(channel->free_arr);
@@ -283,7 +421,7 @@ void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
* This function initializes a channel by properly setting the
* various references
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
{
u32 i;
@@ -318,7 +456,7 @@ __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
* __vxge_hw_channel_reset - Resets a channel
* This function resets a channel by properly setting the various references
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
{
u32 i;
@@ -345,8 +483,7 @@ __vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
* Initialize certain PCI/PCI-X configuration registers
* with recommended values. Save config space for future hw resets.
*/
-void
-__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
+static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
{
u16 cmd = 0;
@@ -358,39 +495,7 @@ __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
pci_save_state(hldev->pdev);
}
-/*
- * __vxge_hw_device_register_poll
- * Will poll certain register for specified amount of time.
- * Will poll until masked bit is not cleared.
- */
-static enum vxge_hw_status
-__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
-{
- u64 val64;
- u32 i = 0;
- enum vxge_hw_status ret = VXGE_HW_FAIL;
-
- udelay(10);
-
- do {
- val64 = readq(reg);
- if (!(val64 & mask))
- return VXGE_HW_OK;
- udelay(100);
- } while (++i <= 9);
-
- i = 0;
- do {
- val64 = readq(reg);
- if (!(val64 & mask))
- return VXGE_HW_OK;
- mdelay(1);
- } while (++i <= max_millis);
-
- return ret;
-}
-
- /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
+/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
* in progress
* This routine checks the vpath reset in progress register is turned zero
*/
@@ -405,6 +510,60 @@ __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
}
/*
+ * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
+ * Set the swapper bits appropriately for the lagacy section.
+ */
+static enum vxge_hw_status
+__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
+{
+ u64 val64;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ val64 = readq(&legacy_reg->toc_swapper_fb);
+
+ wmb();
+
+ switch (val64) {
+ case VXGE_HW_SWAPPER_INITIAL_VALUE:
+ return status;
+
+ case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
+ writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
+ &legacy_reg->pifm_rd_swap_en);
+ writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
+ &legacy_reg->pifm_rd_flip_en);
+ writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
+ &legacy_reg->pifm_wr_swap_en);
+ writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
+ &legacy_reg->pifm_wr_flip_en);
+ break;
+
+ case VXGE_HW_SWAPPER_BYTE_SWAPPED:
+ writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
+ &legacy_reg->pifm_rd_swap_en);
+ writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
+ &legacy_reg->pifm_wr_swap_en);
+ break;
+
+ case VXGE_HW_SWAPPER_BIT_FLIPPED:
+ writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
+ &legacy_reg->pifm_rd_flip_en);
+ writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
+ &legacy_reg->pifm_wr_flip_en);
+ break;
+ }
+
+ wmb();
+
+ val64 = readq(&legacy_reg->toc_swapper_fb);
+
+ if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
+ status = VXGE_HW_ERR_SWAPPER_CTRL;
+
+ return status;
+}
+
+/*
* __vxge_hw_device_toc_get
* This routine sets the swapper and reads the toc pointer and returns the
* memory mapped address of the toc
@@ -435,7 +594,7 @@ exit:
* register location pointers in the device object. It waits until the ric is
* completed initializing registers.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
{
u64 val64;
@@ -496,26 +655,6 @@ exit:
}
/*
- * __vxge_hw_device_id_get
- * This routine returns sets the device id and revision numbers into the device
- * structure
- */
-void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
-{
- u64 val64;
-
- val64 = readq(&hldev->common_reg->titan_asic_id);
- hldev->device_id =
- (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64);
-
- hldev->major_revision =
- (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64);
-
- hldev->minor_revision =
- (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
-}
-
-/*
* __vxge_hw_device_access_rights_get: Get Access Rights of the driver
* This routine returns the Access Rights of the driver
*/
@@ -568,10 +707,25 @@ __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
}
/*
+ * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
+ * Returns the function number of the vpath.
+ */
+static u32
+__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
+{
+ u64 val64;
+
+ val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
+
+ return
+ (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
+}
+
+/*
* __vxge_hw_device_host_info_get
* This routine returns the host type assignments
*/
-void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
+static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
{
u64 val64;
u32 i;
@@ -584,16 +738,18 @@ void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
if (!(hldev->vpath_assignments & vxge_mBIT(i)))
continue;
hldev->func_id =
- __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]);
+ __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
hldev->access_rights = __vxge_hw_device_access_rights_get(
hldev->host_type, hldev->func_id);
+ hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
+ hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
+
hldev->first_vp_id = i;
break;
}
@@ -634,7 +790,8 @@ __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
* __vxge_hw_device_initialize
* Initialize Titan-V hardware.
*/
-enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
+static enum vxge_hw_status
+__vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
{
enum vxge_hw_status status = VXGE_HW_OK;
@@ -650,6 +807,196 @@ exit:
return status;
}
+/*
+ * __vxge_hw_vpath_fw_ver_get - Get the fw version
+ * Returns FW Version
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_device_hw_info *hw_info)
+{
+ struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
+ struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
+ struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
+ struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
+ u64 data0, data1 = 0, steer_ctrl = 0;
+ enum vxge_hw_status status;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ fw_date->day =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
+ fw_date->month =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
+ fw_date->year =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
+
+ snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
+ fw_date->month, fw_date->day, fw_date->year);
+
+ fw_version->major =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
+ fw_version->minor =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
+ fw_version->build =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
+
+ snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
+ fw_version->major, fw_version->minor, fw_version->build);
+
+ flash_date->day =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
+ flash_date->month =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
+ flash_date->year =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
+
+ snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
+ flash_date->month, flash_date->day, flash_date->year);
+
+ flash_version->major =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
+ flash_version->minor =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
+ flash_version->build =
+ (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
+
+ snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
+ flash_version->major, flash_version->minor,
+ flash_version->build);
+
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_card_info_get - Get the serial numbers,
+ * part number and product description.
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_device_hw_info *hw_info)
+{
+ enum vxge_hw_status status;
+ u64 data0, data1 = 0, steer_ctrl = 0;
+ u8 *serial_number = hw_info->serial_number;
+ u8 *part_number = hw_info->part_number;
+ u8 *product_desc = hw_info->product_desc;
+ u32 i, j = 0;
+
+ data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ return status;
+
+ ((u64 *)serial_number)[0] = be64_to_cpu(data0);
+ ((u64 *)serial_number)[1] = be64_to_cpu(data1);
+
+ data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
+ data1 = steer_ctrl = 0;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ return status;
+
+ ((u64 *)part_number)[0] = be64_to_cpu(data0);
+ ((u64 *)part_number)[1] = be64_to_cpu(data1);
+
+ for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
+ i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
+ data0 = i;
+ data1 = steer_ctrl = 0;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ return status;
+
+ ((u64 *)product_desc)[j++] = be64_to_cpu(data0);
+ ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
+ }
+
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
+ * Returns pci function mode
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_device_hw_info *hw_info)
+{
+ u64 data0, data1 = 0, steer_ctrl = 0;
+ enum vxge_hw_status status;
+
+ data0 = 0;
+
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_FW_API_GET_FUNC_MODE,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ return status;
+
+ hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
+ * from MAC address table.
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
+ u8 *macaddr, u8 *macaddr_mask)
+{
+ u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
+ data0 = 0, data1 = 0, steer_ctrl = 0;
+ enum vxge_hw_status status;
+ int i;
+
+ do {
+ status = vxge_hw_vpath_fw_api(vpath, action,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
+ 0, &data0, &data1, &steer_ctrl);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
+ data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
+ data1);
+
+ for (i = ETH_ALEN; i > 0; i--) {
+ macaddr[i - 1] = (u8) (data0 & 0xFF);
+ data0 >>= 8;
+
+ macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
+ data1 >>= 8;
+ }
+
+ action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
+ data0 = 0, data1 = 0, steer_ctrl = 0;
+
+ } while (!is_valid_ether_addr(macaddr));
+exit:
+ return status;
+}
+
/**
* vxge_hw_device_hw_info_get - Get the hw information
* Returns the vpath mask that has the bits set for each vpath allocated
@@ -665,9 +1012,9 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
struct vxge_hw_toc_reg __iomem *toc;
struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
struct vxge_hw_common_reg __iomem *common_reg;
- struct vxge_hw_vpath_reg __iomem *vpath_reg;
struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
enum vxge_hw_status status;
+ struct __vxge_hw_virtualpath vpath;
memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
@@ -693,7 +1040,6 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
(u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
continue;
@@ -702,7 +1048,7 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
(bar0 + val64);
- hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg);
+ hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
if (__vxge_hw_device_access_rights_get(hw_info->host_type,
hw_info->func_id) &
VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
@@ -718,16 +1064,19 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
val64 = readq(&toc->toc_vpath_pointer[i]);
- vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
+ vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
+ (bar0 + val64);
+ vpath.vp_open = 0;
- hw_info->function_mode =
- __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg);
+ status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
+ if (status != VXGE_HW_OK)
+ goto exit;
- status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info);
+ status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
if (status != VXGE_HW_OK)
goto exit;
- status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info);
+ status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
if (status != VXGE_HW_OK)
goto exit;
@@ -735,14 +1084,15 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
}
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
continue;
val64 = readq(&toc->toc_vpath_pointer[i]);
- vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
+ vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
+ (bar0 + val64);
+ vpath.vp_open = 0;
- status = __vxge_hw_vpath_addr_get(i, vpath_reg,
+ status = __vxge_hw_vpath_addr_get(&vpath,
hw_info->mac_addrs[i],
hw_info->mac_addr_masks[i]);
if (status != VXGE_HW_OK)
@@ -753,6 +1103,218 @@ exit:
}
/*
+ * __vxge_hw_blockpool_destroy - Deallocates the block pool
+ */
+static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
+{
+ struct __vxge_hw_device *hldev;
+ struct list_head *p, *n;
+ u16 ret;
+
+ if (blockpool == NULL) {
+ ret = 1;
+ goto exit;
+ }
+
+ hldev = blockpool->hldev;
+
+ list_for_each_safe(p, n, &blockpool->free_block_list) {
+ pci_unmap_single(hldev->pdev,
+ ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
+ ((struct __vxge_hw_blockpool_entry *)p)->length,
+ PCI_DMA_BIDIRECTIONAL);
+
+ vxge_os_dma_free(hldev->pdev,
+ ((struct __vxge_hw_blockpool_entry *)p)->memblock,
+ &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
+
+ list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
+ kfree(p);
+ blockpool->pool_size--;
+ }
+
+ list_for_each_safe(p, n, &blockpool->free_entry_list) {
+ list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
+ kfree((void *)p);
+ }
+ ret = 0;
+exit:
+ return;
+}
+
+/*
+ * __vxge_hw_blockpool_create - Create block pool
+ */
+static enum vxge_hw_status
+__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
+ struct __vxge_hw_blockpool *blockpool,
+ u32 pool_size,
+ u32 pool_max)
+{
+ u32 i;
+ struct __vxge_hw_blockpool_entry *entry = NULL;
+ void *memblock;
+ dma_addr_t dma_addr;
+ struct pci_dev *dma_handle;
+ struct pci_dev *acc_handle;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ if (blockpool == NULL) {
+ status = VXGE_HW_FAIL;
+ goto blockpool_create_exit;
+ }
+
+ blockpool->hldev = hldev;
+ blockpool->block_size = VXGE_HW_BLOCK_SIZE;
+ blockpool->pool_size = 0;
+ blockpool->pool_max = pool_max;
+ blockpool->req_out = 0;
+
+ INIT_LIST_HEAD(&blockpool->free_block_list);
+ INIT_LIST_HEAD(&blockpool->free_entry_list);
+
+ for (i = 0; i < pool_size + pool_max; i++) {
+ entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
+ GFP_KERNEL);
+ if (entry == NULL) {
+ __vxge_hw_blockpool_destroy(blockpool);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto blockpool_create_exit;
+ }
+ list_add(&entry->item, &blockpool->free_entry_list);
+ }
+
+ for (i = 0; i < pool_size; i++) {
+ memblock = vxge_os_dma_malloc(
+ hldev->pdev,
+ VXGE_HW_BLOCK_SIZE,
+ &dma_handle,
+ &acc_handle);
+ if (memblock == NULL) {
+ __vxge_hw_blockpool_destroy(blockpool);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto blockpool_create_exit;
+ }
+
+ dma_addr = pci_map_single(hldev->pdev, memblock,
+ VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(pci_dma_mapping_error(hldev->pdev,
+ dma_addr))) {
+ vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
+ __vxge_hw_blockpool_destroy(blockpool);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto blockpool_create_exit;
+ }
+
+ if (!list_empty(&blockpool->free_entry_list))
+ entry = (struct __vxge_hw_blockpool_entry *)
+ list_first_entry(&blockpool->free_entry_list,
+ struct __vxge_hw_blockpool_entry,
+ item);
+
+ if (entry == NULL)
+ entry =
+ kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
+ GFP_KERNEL);
+ if (entry != NULL) {
+ list_del(&entry->item);
+ entry->length = VXGE_HW_BLOCK_SIZE;
+ entry->memblock = memblock;
+ entry->dma_addr = dma_addr;
+ entry->acc_handle = acc_handle;
+ entry->dma_handle = dma_handle;
+ list_add(&entry->item,
+ &blockpool->free_block_list);
+ blockpool->pool_size++;
+ } else {
+ __vxge_hw_blockpool_destroy(blockpool);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto blockpool_create_exit;
+ }
+ }
+
+blockpool_create_exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_device_fifo_config_check - Check fifo configuration.
+ * Check the fifo configuration
+ */
+static enum vxge_hw_status
+__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
+{
+ if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
+ (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
+ return VXGE_HW_BADCFG_FIFO_BLOCKS;
+
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_vpath_config_check - Check vpath configuration.
+ * Check the vpath configuration
+ */
+static enum vxge_hw_status
+__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
+{
+ enum vxge_hw_status status;
+
+ if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
+ (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX))
+ return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
+
+ status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
+ if (status != VXGE_HW_OK)
+ return status;
+
+ if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
+ ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
+ (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
+ return VXGE_HW_BADCFG_VPATH_MTU;
+
+ if ((vp_config->rpa_strip_vlan_tag !=
+ VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
+ (vp_config->rpa_strip_vlan_tag !=
+ VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
+ (vp_config->rpa_strip_vlan_tag !=
+ VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
+ return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
+
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_config_check - Check device configuration.
+ * Check the device configuration
+ */
+static enum vxge_hw_status
+__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
+{
+ u32 i;
+ enum vxge_hw_status status;
+
+ if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
+ (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
+ (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
+ (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
+ return VXGE_HW_BADCFG_INTR_MODE;
+
+ if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
+ (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
+ return VXGE_HW_BADCFG_RTS_MAC_EN;
+
+ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+ status = __vxge_hw_device_vpath_config_check(
+ &new_config->vp_config[i]);
+ if (status != VXGE_HW_OK)
+ return status;
+ }
+
+ return VXGE_HW_OK;
+}
+
+/*
* vxge_hw_device_initialize - Initialize Titan device.
* Initialize Titan device. Note that all the arguments of this public API
* are 'IN', including @hldev. Driver cooperates with
@@ -776,14 +1338,12 @@ vxge_hw_device_initialize(
if (status != VXGE_HW_OK)
goto exit;
- hldev = (struct __vxge_hw_device *)
- vmalloc(sizeof(struct __vxge_hw_device));
+ hldev = vzalloc(sizeof(struct __vxge_hw_device));
if (hldev == NULL) {
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
- memset(hldev, 0, sizeof(struct __vxge_hw_device));
hldev->magic = VXGE_HW_DEVICE_MAGIC;
vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
@@ -806,7 +1366,6 @@ vxge_hw_device_initialize(
vfree(hldev);
goto exit;
}
- __vxge_hw_device_id_get(hldev);
__vxge_hw_device_host_info_get(hldev);
@@ -814,7 +1373,6 @@ vxge_hw_device_initialize(
nblocks++;
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
if (!(hldev->vpath_assignments & vxge_mBIT(i)))
continue;
@@ -839,7 +1397,6 @@ vxge_hw_device_initialize(
}
status = __vxge_hw_device_initialize(hldev);
-
if (status != VXGE_HW_OK) {
vxge_hw_device_terminate(hldev);
goto exit;
@@ -865,6 +1422,242 @@ vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
}
/*
+ * __vxge_hw_vpath_stats_access - Get the statistics from the given location
+ * and offset and perform an operation
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
+ u32 operation, u32 offset, u64 *stat)
+{
+ u64 val64;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+ status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+ goto vpath_stats_access_exit;
+ }
+
+ vp_reg = vpath->vp_reg;
+
+ val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
+ VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
+ VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
+
+ status = __vxge_hw_pio_mem_write64(val64,
+ &vp_reg->xmac_stats_access_cmd,
+ VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
+ vpath->hldev->config.device_poll_millis);
+ if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
+ *stat = readq(&vp_reg->xmac_stats_access_data);
+ else
+ *stat = 0;
+
+vpath_stats_access_exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
+{
+ u64 *val64;
+ int i;
+ u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ val64 = (u64 *)vpath_tx_stats;
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+ status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+ goto exit;
+ }
+
+ for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
+ status = __vxge_hw_vpath_stats_access(vpath,
+ VXGE_HW_STATS_OP_READ,
+ offset, val64);
+ if (status != VXGE_HW_OK)
+ goto exit;
+ offset++;
+ val64++;
+ }
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
+{
+ u64 *val64;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ int i;
+ u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
+ val64 = (u64 *) vpath_rx_stats;
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+ status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+ goto exit;
+ }
+ for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
+ status = __vxge_hw_vpath_stats_access(vpath,
+ VXGE_HW_STATS_OP_READ,
+ offset >> 3, val64);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ offset += 8;
+ val64++;
+ }
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_vpath_stats_hw_info *hw_stats)
+{
+ u64 val64;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+ status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+ goto exit;
+ }
+ vp_reg = vpath->vp_reg;
+
+ val64 = readq(&vp_reg->vpath_debug_stats0);
+ hw_stats->ini_num_mwr_sent =
+ (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
+
+ val64 = readq(&vp_reg->vpath_debug_stats1);
+ hw_stats->ini_num_mrd_sent =
+ (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
+
+ val64 = readq(&vp_reg->vpath_debug_stats2);
+ hw_stats->ini_num_cpl_rcvd =
+ (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
+
+ val64 = readq(&vp_reg->vpath_debug_stats3);
+ hw_stats->ini_num_mwr_byte_sent =
+ VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
+
+ val64 = readq(&vp_reg->vpath_debug_stats4);
+ hw_stats->ini_num_cpl_byte_rcvd =
+ VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
+
+ val64 = readq(&vp_reg->vpath_debug_stats5);
+ hw_stats->wrcrdtarb_xoff =
+ (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
+
+ val64 = readq(&vp_reg->vpath_debug_stats6);
+ hw_stats->rdcrdtarb_xoff =
+ (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
+
+ val64 = readq(&vp_reg->vpath_genstats_count01);
+ hw_stats->vpath_genstats_count0 =
+ (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
+ val64);
+
+ val64 = readq(&vp_reg->vpath_genstats_count01);
+ hw_stats->vpath_genstats_count1 =
+ (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
+ val64);
+
+ val64 = readq(&vp_reg->vpath_genstats_count23);
+ hw_stats->vpath_genstats_count2 =
+ (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
+ val64);
+
+ val64 = readq(&vp_reg->vpath_genstats_count01);
+ hw_stats->vpath_genstats_count3 =
+ (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
+ val64);
+
+ val64 = readq(&vp_reg->vpath_genstats_count4);
+ hw_stats->vpath_genstats_count4 =
+ (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
+ val64);
+
+ val64 = readq(&vp_reg->vpath_genstats_count5);
+ hw_stats->vpath_genstats_count5 =
+ (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
+ val64);
+
+ status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ VXGE_HW_VPATH_STATS_PIO_READ(
+ VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
+
+ hw_stats->prog_event_vnum0 =
+ (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
+
+ hw_stats->prog_event_vnum1 =
+ (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
+
+ VXGE_HW_VPATH_STATS_PIO_READ(
+ VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
+
+ hw_stats->prog_event_vnum2 =
+ (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
+
+ hw_stats->prog_event_vnum3 =
+ (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
+
+ val64 = readq(&vp_reg->rx_multi_cast_stats);
+ hw_stats->rx_multi_cast_frame_discard =
+ (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
+
+ val64 = readq(&vp_reg->rx_frm_transferred);
+ hw_stats->rx_frm_transferred =
+ (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
+
+ val64 = readq(&vp_reg->rxd_returned);
+ hw_stats->rxd_returned =
+ (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
+
+ val64 = readq(&vp_reg->dbg_stats_rx_mpa);
+ hw_stats->rx_mpa_len_fail_frms =
+ (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
+ hw_stats->rx_mpa_mrk_fail_frms =
+ (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
+ hw_stats->rx_mpa_crc_fail_frms =
+ (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
+
+ val64 = readq(&vp_reg->dbg_stats_rx_fau);
+ hw_stats->rx_permitted_frms =
+ (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
+ hw_stats->rx_vp_reset_discarded_frms =
+ (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
+ hw_stats->rx_wol_frms =
+ (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
+
+ val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
+ hw_stats->tx_vp_reset_discarded_frms =
+ (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
+ val64);
+exit:
+ return status;
+}
+
+/*
* vxge_hw_device_stats_get - Get the device hw statistics.
* Returns the vpath h/w stats for the device.
*/
@@ -876,7 +1669,6 @@ vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
enum vxge_hw_status status = VXGE_HW_OK;
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
(hldev->virtual_paths[i].vp_open ==
VXGE_HW_VP_NOT_OPEN))
@@ -1031,7 +1823,6 @@ vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
status = vxge_hw_device_xmac_aggr_stats_get(hldev,
0, &xmac_stats->aggr_stats[0]);
-
if (status != VXGE_HW_OK)
goto exit;
@@ -1165,7 +1956,6 @@ exit:
* It can be used to set or reset Pause frame generation or reception
* support of the NIC.
*/
-
enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
u32 port, u32 tx, u32 rx)
{
@@ -1407,190 +2197,359 @@ exit:
}
/*
- * __vxge_hw_ring_create - Create a Ring
- * This function creates Ring and initializes it.
- *
+ * __vxge_hw_channel_allocate - Allocate memory for channel
+ * This function allocates required memory for the channel and various arrays
+ * in the channel
*/
-static enum vxge_hw_status
-__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
- struct vxge_hw_ring_attr *attr)
+static struct __vxge_hw_channel *
+__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
+ enum __vxge_hw_channel_type type,
+ u32 length, u32 per_dtr_space,
+ void *userdata)
{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_ring *ring;
- u32 ring_length;
- struct vxge_hw_ring_config *config;
+ struct __vxge_hw_channel *channel;
struct __vxge_hw_device *hldev;
+ int size = 0;
u32 vp_id;
- struct vxge_hw_mempool_cbs ring_mp_callback;
- if ((vp == NULL) || (attr == NULL)) {
+ hldev = vph->vpath->hldev;
+ vp_id = vph->vpath->vp_id;
+
+ switch (type) {
+ case VXGE_HW_CHANNEL_TYPE_FIFO:
+ size = sizeof(struct __vxge_hw_fifo);
+ break;
+ case VXGE_HW_CHANNEL_TYPE_RING:
+ size = sizeof(struct __vxge_hw_ring);
+ break;
+ default:
+ break;
+ }
+
+ channel = kzalloc(size, GFP_KERNEL);
+ if (channel == NULL)
+ goto exit0;
+ INIT_LIST_HEAD(&channel->item);
+
+ channel->common_reg = hldev->common_reg;
+ channel->first_vp_id = hldev->first_vp_id;
+ channel->type = type;
+ channel->devh = hldev;
+ channel->vph = vph;
+ channel->userdata = userdata;
+ channel->per_dtr_space = per_dtr_space;
+ channel->length = length;
+ channel->vp_id = vp_id;
+
+ channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+ if (channel->work_arr == NULL)
+ goto exit1;
+
+ channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+ if (channel->free_arr == NULL)
+ goto exit1;
+ channel->free_ptr = length;
+
+ channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+ if (channel->reserve_arr == NULL)
+ goto exit1;
+ channel->reserve_ptr = length;
+ channel->reserve_top = 0;
+
+ channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+ if (channel->orig_arr == NULL)
+ goto exit1;
+
+ return channel;
+exit1:
+ __vxge_hw_channel_free(channel);
+
+exit0:
+ return NULL;
+}
+
+/*
+ * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
+ * Adds a block to block pool
+ */
+static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
+ void *block_addr,
+ u32 length,
+ struct pci_dev *dma_h,
+ struct pci_dev *acc_handle)
+{
+ struct __vxge_hw_blockpool *blockpool;
+ struct __vxge_hw_blockpool_entry *entry = NULL;
+ dma_addr_t dma_addr;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ u32 req_out;
+
+ blockpool = &devh->block_pool;
+
+ if (block_addr == NULL) {
+ blockpool->req_out--;
status = VXGE_HW_FAIL;
goto exit;
}
- hldev = vp->vpath->hldev;
- vp_id = vp->vpath->vp_id;
+ dma_addr = pci_map_single(devh->pdev, block_addr, length,
+ PCI_DMA_BIDIRECTIONAL);
- config = &hldev->config.vp_config[vp_id].ring;
+ if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
+ vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
+ blockpool->req_out--;
+ status = VXGE_HW_FAIL;
+ goto exit;
+ }
- ring_length = config->ring_blocks *
- vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
+ if (!list_empty(&blockpool->free_entry_list))
+ entry = (struct __vxge_hw_blockpool_entry *)
+ list_first_entry(&blockpool->free_entry_list,
+ struct __vxge_hw_blockpool_entry,
+ item);
- ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
- VXGE_HW_CHANNEL_TYPE_RING,
- ring_length,
- attr->per_rxd_space,
- attr->userdata);
+ if (entry == NULL)
+ entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
+ else
+ list_del(&entry->item);
- if (ring == NULL) {
+ if (entry != NULL) {
+ entry->length = length;
+ entry->memblock = block_addr;
+ entry->dma_addr = dma_addr;
+ entry->acc_handle = acc_handle;
+ entry->dma_handle = dma_h;
+ list_add(&entry->item, &blockpool->free_block_list);
+ blockpool->pool_size++;
+ status = VXGE_HW_OK;
+ } else
status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
- vp->vpath->ringh = ring;
- ring->vp_id = vp_id;
- ring->vp_reg = vp->vpath->vp_reg;
- ring->common_reg = hldev->common_reg;
- ring->stats = &vp->vpath->sw_stats->ring_stats;
- ring->config = config;
- ring->callback = attr->callback;
- ring->rxd_init = attr->rxd_init;
- ring->rxd_term = attr->rxd_term;
- ring->buffer_mode = config->buffer_mode;
- ring->rxds_limit = config->rxds_limit;
+ blockpool->req_out--;
- ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
- ring->rxd_priv_size =
- sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
- ring->per_rxd_space = attr->per_rxd_space;
+ req_out = blockpool->req_out;
+exit:
+ return;
+}
- ring->rxd_priv_size =
- ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
- VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
+static inline void
+vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
+{
+ gfp_t flags;
+ void *vaddr;
- /* how many RxDs can fit into one block. Depends on configured
- * buffer_mode. */
- ring->rxds_per_block =
- vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
+ if (in_interrupt())
+ flags = GFP_ATOMIC | GFP_DMA;
+ else
+ flags = GFP_KERNEL | GFP_DMA;
- /* calculate actual RxD block private size */
- ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
- ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
- ring->mempool = __vxge_hw_mempool_create(hldev,
- VXGE_HW_BLOCK_SIZE,
- VXGE_HW_BLOCK_SIZE,
- ring->rxdblock_priv_size,
- ring->config->ring_blocks,
- ring->config->ring_blocks,
- &ring_mp_callback,
- ring);
+ vaddr = kmalloc((size), flags);
- if (ring->mempool == NULL) {
- __vxge_hw_ring_delete(vp);
- return VXGE_HW_ERR_OUT_OF_MEMORY;
- }
+ vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
+}
- status = __vxge_hw_channel_initialize(&ring->channel);
- if (status != VXGE_HW_OK) {
- __vxge_hw_ring_delete(vp);
- goto exit;
+/*
+ * __vxge_hw_blockpool_blocks_add - Request additional blocks
+ */
+static
+void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
+{
+ u32 nreq = 0, i;
+
+ if ((blockpool->pool_size + blockpool->req_out) <
+ VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
+ nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
+ blockpool->req_out += nreq;
}
- /* Note:
- * Specifying rxd_init callback means two things:
- * 1) rxds need to be initialized by driver at channel-open time;
- * 2) rxds need to be posted at channel-open time
- * (that's what the initial_replenish() below does)
- * Currently we don't have a case when the 1) is done without the 2).
- */
- if (ring->rxd_init) {
- status = vxge_hw_ring_replenish(ring);
- if (status != VXGE_HW_OK) {
- __vxge_hw_ring_delete(vp);
+ for (i = 0; i < nreq; i++)
+ vxge_os_dma_malloc_async(
+ ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ blockpool->hldev, VXGE_HW_BLOCK_SIZE);
+}
+
+/*
+ * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
+ * Allocates a block of memory of given size, either from block pool
+ * or by calling vxge_os_dma_malloc()
+ */
+static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
+ struct vxge_hw_mempool_dma *dma_object)
+{
+ struct __vxge_hw_blockpool_entry *entry = NULL;
+ struct __vxge_hw_blockpool *blockpool;
+ void *memblock = NULL;
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ blockpool = &devh->block_pool;
+
+ if (size != blockpool->block_size) {
+
+ memblock = vxge_os_dma_malloc(devh->pdev, size,
+ &dma_object->handle,
+ &dma_object->acc_handle);
+
+ if (memblock == NULL) {
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
- }
- /* initial replenish will increment the counter in its post() routine,
- * we have to reset it */
- ring->stats->common_stats.usage_cnt = 0;
+ dma_object->addr = pci_map_single(devh->pdev, memblock, size,
+ PCI_DMA_BIDIRECTIONAL);
+
+ if (unlikely(pci_dma_mapping_error(devh->pdev,
+ dma_object->addr))) {
+ vxge_os_dma_free(devh->pdev, memblock,
+ &dma_object->acc_handle);
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto exit;
+ }
+
+ } else {
+
+ if (!list_empty(&blockpool->free_block_list))
+ entry = (struct __vxge_hw_blockpool_entry *)
+ list_first_entry(&blockpool->free_block_list,
+ struct __vxge_hw_blockpool_entry,
+ item);
+
+ if (entry != NULL) {
+ list_del(&entry->item);
+ dma_object->addr = entry->dma_addr;
+ dma_object->handle = entry->dma_handle;
+ dma_object->acc_handle = entry->acc_handle;
+ memblock = entry->memblock;
+
+ list_add(&entry->item,
+ &blockpool->free_entry_list);
+ blockpool->pool_size--;
+ }
+
+ if (memblock != NULL)
+ __vxge_hw_blockpool_blocks_add(blockpool);
+ }
exit:
- return status;
+ return memblock;
}
/*
- * __vxge_hw_ring_abort - Returns the RxD
- * This function terminates the RxDs of ring
+ * __vxge_hw_blockpool_blocks_remove - Free additional blocks
*/
-static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
+static void
+__vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
{
- void *rxdh;
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
+ struct list_head *p, *n;
- for (;;) {
- vxge_hw_channel_dtr_try_complete(channel, &rxdh);
+ list_for_each_safe(p, n, &blockpool->free_block_list) {
- if (rxdh == NULL)
+ if (blockpool->pool_size < blockpool->pool_max)
break;
- vxge_hw_channel_dtr_complete(channel);
+ pci_unmap_single(
+ ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
+ ((struct __vxge_hw_blockpool_entry *)p)->length,
+ PCI_DMA_BIDIRECTIONAL);
- if (ring->rxd_term)
- ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
- channel->userdata);
+ vxge_os_dma_free(
+ ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ ((struct __vxge_hw_blockpool_entry *)p)->memblock,
+ &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
- vxge_hw_channel_dtr_free(channel, rxdh);
- }
+ list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
- return VXGE_HW_OK;
+ list_add(p, &blockpool->free_entry_list);
+
+ blockpool->pool_size--;
+
+ }
}
/*
- * __vxge_hw_ring_reset - Resets the ring
- * This function resets the ring during vpath reset operation
+ * __vxge_hw_blockpool_free - Frees the memory allcoated with
+ * __vxge_hw_blockpool_malloc
*/
-static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
+static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
+ void *memblock, u32 size,
+ struct vxge_hw_mempool_dma *dma_object)
{
+ struct __vxge_hw_blockpool_entry *entry = NULL;
+ struct __vxge_hw_blockpool *blockpool;
enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_channel *channel;
- channel = &ring->channel;
+ blockpool = &devh->block_pool;
- __vxge_hw_ring_abort(ring);
+ if (size != blockpool->block_size) {
+ pci_unmap_single(devh->pdev, dma_object->addr, size,
+ PCI_DMA_BIDIRECTIONAL);
+ vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
+ } else {
- status = __vxge_hw_channel_reset(channel);
+ if (!list_empty(&blockpool->free_entry_list))
+ entry = (struct __vxge_hw_blockpool_entry *)
+ list_first_entry(&blockpool->free_entry_list,
+ struct __vxge_hw_blockpool_entry,
+ item);
- if (status != VXGE_HW_OK)
- goto exit;
+ if (entry == NULL)
+ entry = vmalloc(sizeof(
+ struct __vxge_hw_blockpool_entry));
+ else
+ list_del(&entry->item);
- if (ring->rxd_init) {
- status = vxge_hw_ring_replenish(ring);
- if (status != VXGE_HW_OK)
- goto exit;
+ if (entry != NULL) {
+ entry->length = size;
+ entry->memblock = memblock;
+ entry->dma_addr = dma_object->addr;
+ entry->acc_handle = dma_object->acc_handle;
+ entry->dma_handle = dma_object->handle;
+ list_add(&entry->item,
+ &blockpool->free_block_list);
+ blockpool->pool_size++;
+ status = VXGE_HW_OK;
+ } else
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+
+ if (status == VXGE_HW_OK)
+ __vxge_hw_blockpool_blocks_remove(blockpool);
}
-exit:
- return status;
}
/*
- * __vxge_hw_ring_delete - Removes the ring
- * This function freeup the memory pool and removes the ring
+ * vxge_hw_mempool_destroy
*/
-static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
+static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
{
- struct __vxge_hw_ring *ring = vp->vpath->ringh;
+ u32 i, j;
+ struct __vxge_hw_device *devh = mempool->devh;
- __vxge_hw_ring_abort(ring);
+ for (i = 0; i < mempool->memblocks_allocated; i++) {
+ struct vxge_hw_mempool_dma *dma_object;
- if (ring->mempool)
- __vxge_hw_mempool_destroy(ring->mempool);
+ vxge_assert(mempool->memblocks_arr[i]);
+ vxge_assert(mempool->memblocks_dma_arr + i);
- vp->vpath->ringh = NULL;
- __vxge_hw_channel_free(&ring->channel);
+ dma_object = mempool->memblocks_dma_arr + i;
- return VXGE_HW_OK;
+ for (j = 0; j < mempool->items_per_memblock; j++) {
+ u32 index = i * mempool->items_per_memblock + j;
+
+ /* to skip last partially filled(if any) memblock */
+ if (index >= mempool->items_current)
+ break;
+ }
+
+ vfree(mempool->memblocks_priv_arr[i]);
+
+ __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
+ mempool->memblock_size, dma_object);
+ }
+
+ vfree(mempool->items_arr);
+ vfree(mempool->memblocks_dma_arr);
+ vfree(mempool->memblocks_priv_arr);
+ vfree(mempool->memblocks_arr);
+ vfree(mempool);
}
/*
@@ -1627,15 +2586,12 @@ __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
* allocate new memblock and its private part at once.
* This helps to minimize memory usage a lot. */
mempool->memblocks_priv_arr[i] =
- vmalloc(mempool->items_priv_size * n_items);
+ vzalloc(mempool->items_priv_size * n_items);
if (mempool->memblocks_priv_arr[i] == NULL) {
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
- memset(mempool->memblocks_priv_arr[i], 0,
- mempool->items_priv_size * n_items);
-
/* allocate DMA-capable memblock */
mempool->memblocks_arr[i] =
__vxge_hw_blockpool_malloc(mempool->devh,
@@ -1686,16 +2642,15 @@ exit:
* with size enough to hold %items_initial number of items. Memory is
* DMA-able but client must map/unmap before interoperating with the device.
*/
-static struct vxge_hw_mempool*
-__vxge_hw_mempool_create(
- struct __vxge_hw_device *devh,
- u32 memblock_size,
- u32 item_size,
- u32 items_priv_size,
- u32 items_initial,
- u32 items_max,
- struct vxge_hw_mempool_cbs *mp_callback,
- void *userdata)
+static struct vxge_hw_mempool *
+__vxge_hw_mempool_create(struct __vxge_hw_device *devh,
+ u32 memblock_size,
+ u32 item_size,
+ u32 items_priv_size,
+ u32 items_initial,
+ u32 items_max,
+ struct vxge_hw_mempool_cbs *mp_callback,
+ void *userdata)
{
enum vxge_hw_status status = VXGE_HW_OK;
u32 memblocks_to_allocate;
@@ -1707,13 +2662,11 @@ __vxge_hw_mempool_create(
goto exit;
}
- mempool = (struct vxge_hw_mempool *)
- vmalloc(sizeof(struct vxge_hw_mempool));
+ mempool = vzalloc(sizeof(struct vxge_hw_mempool));
if (mempool == NULL) {
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto exit;
}
- memset(mempool, 0, sizeof(struct vxge_hw_mempool));
mempool->devh = devh;
mempool->memblock_size = memblock_size;
@@ -1733,53 +2686,43 @@ __vxge_hw_mempool_create(
/* allocate array of memblocks */
mempool->memblocks_arr =
- (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
+ vzalloc(sizeof(void *) * mempool->memblocks_max);
if (mempool->memblocks_arr == NULL) {
__vxge_hw_mempool_destroy(mempool);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
mempool = NULL;
goto exit;
}
- memset(mempool->memblocks_arr, 0,
- sizeof(void *) * mempool->memblocks_max);
/* allocate array of private parts of items per memblocks */
mempool->memblocks_priv_arr =
- (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
+ vzalloc(sizeof(void *) * mempool->memblocks_max);
if (mempool->memblocks_priv_arr == NULL) {
__vxge_hw_mempool_destroy(mempool);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
mempool = NULL;
goto exit;
}
- memset(mempool->memblocks_priv_arr, 0,
- sizeof(void *) * mempool->memblocks_max);
/* allocate array of memblocks DMA objects */
- mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *)
- vmalloc(sizeof(struct vxge_hw_mempool_dma) *
+ mempool->memblocks_dma_arr =
+ vzalloc(sizeof(struct vxge_hw_mempool_dma) *
mempool->memblocks_max);
-
if (mempool->memblocks_dma_arr == NULL) {
__vxge_hw_mempool_destroy(mempool);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
mempool = NULL;
goto exit;
}
- memset(mempool->memblocks_dma_arr, 0,
- sizeof(struct vxge_hw_mempool_dma) *
- mempool->memblocks_max);
/* allocate hash array of items */
- mempool->items_arr =
- (void **) vmalloc(sizeof(void *) * mempool->items_max);
+ mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max);
if (mempool->items_arr == NULL) {
__vxge_hw_mempool_destroy(mempool);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
mempool = NULL;
goto exit;
}
- memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max);
/* calculate initial number of memblocks */
memblocks_to_allocate = (mempool->items_initial +
@@ -1801,122 +2744,190 @@ exit:
}
/*
- * vxge_hw_mempool_destroy
+ * __vxge_hw_ring_abort - Returns the RxD
+ * This function terminates the RxDs of ring
*/
-static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
+static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
{
- u32 i, j;
- struct __vxge_hw_device *devh = mempool->devh;
-
- for (i = 0; i < mempool->memblocks_allocated; i++) {
- struct vxge_hw_mempool_dma *dma_object;
+ void *rxdh;
+ struct __vxge_hw_channel *channel;
- vxge_assert(mempool->memblocks_arr[i]);
- vxge_assert(mempool->memblocks_dma_arr + i);
+ channel = &ring->channel;
- dma_object = mempool->memblocks_dma_arr + i;
+ for (;;) {
+ vxge_hw_channel_dtr_try_complete(channel, &rxdh);
- for (j = 0; j < mempool->items_per_memblock; j++) {
- u32 index = i * mempool->items_per_memblock + j;
+ if (rxdh == NULL)
+ break;
- /* to skip last partially filled(if any) memblock */
- if (index >= mempool->items_current)
- break;
- }
+ vxge_hw_channel_dtr_complete(channel);
- vfree(mempool->memblocks_priv_arr[i]);
+ if (ring->rxd_term)
+ ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
+ channel->userdata);
- __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
- mempool->memblock_size, dma_object);
+ vxge_hw_channel_dtr_free(channel, rxdh);
}
- vfree(mempool->items_arr);
+ return VXGE_HW_OK;
+}
- vfree(mempool->memblocks_dma_arr);
+/*
+ * __vxge_hw_ring_reset - Resets the ring
+ * This function resets the ring during vpath reset operation
+ */
+static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct __vxge_hw_channel *channel;
- vfree(mempool->memblocks_priv_arr);
+ channel = &ring->channel;
- vfree(mempool->memblocks_arr);
+ __vxge_hw_ring_abort(ring);
- vfree(mempool);
+ status = __vxge_hw_channel_reset(channel);
+
+ if (status != VXGE_HW_OK)
+ goto exit;
+
+ if (ring->rxd_init) {
+ status = vxge_hw_ring_replenish(ring);
+ if (status != VXGE_HW_OK)
+ goto exit;
+ }
+exit:
+ return status;
}
/*
- * __vxge_hw_device_fifo_config_check - Check fifo configuration.
- * Check the fifo configuration
+ * __vxge_hw_ring_delete - Removes the ring
+ * This function freeup the memory pool and removes the ring
*/
-enum vxge_hw_status
-__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
+static enum vxge_hw_status
+__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
{
- if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
- (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
- return VXGE_HW_BADCFG_FIFO_BLOCKS;
+ struct __vxge_hw_ring *ring = vp->vpath->ringh;
+
+ __vxge_hw_ring_abort(ring);
+
+ if (ring->mempool)
+ __vxge_hw_mempool_destroy(ring->mempool);
+
+ vp->vpath->ringh = NULL;
+ __vxge_hw_channel_free(&ring->channel);
return VXGE_HW_OK;
}
/*
- * __vxge_hw_device_vpath_config_check - Check vpath configuration.
- * Check the vpath configuration
+ * __vxge_hw_ring_create - Create a Ring
+ * This function creates Ring and initializes it.
*/
static enum vxge_hw_status
-__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
+__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
+ struct vxge_hw_ring_attr *attr)
{
- enum vxge_hw_status status;
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct __vxge_hw_ring *ring;
+ u32 ring_length;
+ struct vxge_hw_ring_config *config;
+ struct __vxge_hw_device *hldev;
+ u32 vp_id;
+ struct vxge_hw_mempool_cbs ring_mp_callback;
- if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
- (vp_config->min_bandwidth >
- VXGE_HW_VPATH_BANDWIDTH_MAX))
- return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
+ if ((vp == NULL) || (attr == NULL)) {
+ status = VXGE_HW_FAIL;
+ goto exit;
+ }
- status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
- if (status != VXGE_HW_OK)
- return status;
+ hldev = vp->vpath->hldev;
+ vp_id = vp->vpath->vp_id;
- if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
- ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
- (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
- return VXGE_HW_BADCFG_VPATH_MTU;
+ config = &hldev->config.vp_config[vp_id].ring;
- if ((vp_config->rpa_strip_vlan_tag !=
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
- (vp_config->rpa_strip_vlan_tag !=
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
- (vp_config->rpa_strip_vlan_tag !=
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
- return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
+ ring_length = config->ring_blocks *
+ vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
- return VXGE_HW_OK;
-}
+ ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
+ VXGE_HW_CHANNEL_TYPE_RING,
+ ring_length,
+ attr->per_rxd_space,
+ attr->userdata);
+ if (ring == NULL) {
+ status = VXGE_HW_ERR_OUT_OF_MEMORY;
+ goto exit;
+ }
-/*
- * __vxge_hw_device_config_check - Check device configuration.
- * Check the device configuration
- */
-enum vxge_hw_status
-__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
-{
- u32 i;
- enum vxge_hw_status status;
+ vp->vpath->ringh = ring;
+ ring->vp_id = vp_id;
+ ring->vp_reg = vp->vpath->vp_reg;
+ ring->common_reg = hldev->common_reg;
+ ring->stats = &vp->vpath->sw_stats->ring_stats;
+ ring->config = config;
+ ring->callback = attr->callback;
+ ring->rxd_init = attr->rxd_init;
+ ring->rxd_term = attr->rxd_term;
+ ring->buffer_mode = config->buffer_mode;
+ ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
+ ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
+ ring->rxds_limit = config->rxds_limit;
- if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
- (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
- (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
- (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
- return VXGE_HW_BADCFG_INTR_MODE;
+ ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
+ ring->rxd_priv_size =
+ sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
+ ring->per_rxd_space = attr->per_rxd_space;
- if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
- (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
- return VXGE_HW_BADCFG_RTS_MAC_EN;
+ ring->rxd_priv_size =
+ ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
+ VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
- for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
- status = __vxge_hw_device_vpath_config_check(
- &new_config->vp_config[i]);
- if (status != VXGE_HW_OK)
- return status;
+ /* how many RxDs can fit into one block. Depends on configured
+ * buffer_mode. */
+ ring->rxds_per_block =
+ vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
+
+ /* calculate actual RxD block private size */
+ ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
+ ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
+ ring->mempool = __vxge_hw_mempool_create(hldev,
+ VXGE_HW_BLOCK_SIZE,
+ VXGE_HW_BLOCK_SIZE,
+ ring->rxdblock_priv_size,
+ ring->config->ring_blocks,
+ ring->config->ring_blocks,
+ &ring_mp_callback,
+ ring);
+ if (ring->mempool == NULL) {
+ __vxge_hw_ring_delete(vp);
+ return VXGE_HW_ERR_OUT_OF_MEMORY;
}
- return VXGE_HW_OK;
+ status = __vxge_hw_channel_initialize(&ring->channel);
+ if (status != VXGE_HW_OK) {
+ __vxge_hw_ring_delete(vp);
+ goto exit;
+ }
+
+ /* Note:
+ * Specifying rxd_init callback means two things:
+ * 1) rxds need to be initialized by driver at channel-open time;
+ * 2) rxds need to be posted at channel-open time
+ * (that's what the initial_replenish() below does)
+ * Currently we don't have a case when the 1) is done without the 2).
+ */
+ if (ring->rxd_init) {
+ status = vxge_hw_ring_replenish(ring);
+ if (status != VXGE_HW_OK) {
+ __vxge_hw_ring_delete(vp);
+ goto exit;
+ }
+ }
+
+ /* initial replenish will increment the counter in its post() routine,
+ * we have to reset it */
+ ring->stats->common_stats.usage_cnt = 0;
+exit:
+ return status;
}
/*
@@ -1938,7 +2949,6 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT;
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
device_config->vp_config[i].vp_id = i;
device_config->vp_config[i].min_bandwidth =
@@ -2078,61 +3088,6 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
}
/*
- * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
- * Set the swapper bits appropriately for the lagacy section.
- */
-static enum vxge_hw_status
-__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- val64 = readq(&legacy_reg->toc_swapper_fb);
-
- wmb();
-
- switch (val64) {
-
- case VXGE_HW_SWAPPER_INITIAL_VALUE:
- return status;
-
- case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
- writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_rd_swap_en);
- writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_rd_flip_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_wr_swap_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_wr_flip_en);
- break;
-
- case VXGE_HW_SWAPPER_BYTE_SWAPPED:
- writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_rd_swap_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
- &legacy_reg->pifm_wr_swap_en);
- break;
-
- case VXGE_HW_SWAPPER_BIT_FLIPPED:
- writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_rd_flip_en);
- writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
- &legacy_reg->pifm_wr_flip_en);
- break;
- }
-
- wmb();
-
- val64 = readq(&legacy_reg->toc_swapper_fb);
-
- if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
- status = VXGE_HW_ERR_SWAPPER_CTRL;
-
- return status;
-}
-
-/*
* __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
* Set the swapper bits appropriately for the vpath.
*/
@@ -2156,9 +3111,8 @@ __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
* Set the swapper bits appropriately for the vpath.
*/
static enum vxge_hw_status
-__vxge_hw_kdfc_swapper_set(
- struct vxge_hw_legacy_reg __iomem *legacy_reg,
- struct vxge_hw_vpath_reg __iomem *vpath_reg)
+__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
+ struct vxge_hw_vpath_reg __iomem *vpath_reg)
{
u64 val64;
@@ -2408,6 +3362,69 @@ exit:
}
/*
+ * __vxge_hw_fifo_abort - Returns the TxD
+ * This function terminates the TxDs of fifo
+ */
+static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
+{
+ void *txdlh;
+
+ for (;;) {
+ vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
+
+ if (txdlh == NULL)
+ break;
+
+ vxge_hw_channel_dtr_complete(&fifo->channel);
+
+ if (fifo->txdl_term) {
+ fifo->txdl_term(txdlh,
+ VXGE_HW_TXDL_STATE_POSTED,
+ fifo->channel.userdata);
+ }
+
+ vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
+ }
+
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_fifo_reset - Resets the fifo
+ * This function resets the fifo during vpath reset operation
+ */
+static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+
+ __vxge_hw_fifo_abort(fifo);
+ status = __vxge_hw_channel_reset(&fifo->channel);
+
+ return status;
+}
+
+/*
+ * __vxge_hw_fifo_delete - Removes the FIFO
+ * This function freeup the memory pool and removes the FIFO
+ */
+static enum vxge_hw_status
+__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
+{
+ struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
+
+ __vxge_hw_fifo_abort(fifo);
+
+ if (fifo->mempool)
+ __vxge_hw_mempool_destroy(fifo->mempool);
+
+ vp->vpath->fifoh = NULL;
+
+ __vxge_hw_channel_free(&fifo->channel);
+
+ return VXGE_HW_OK;
+}
+
+/*
* __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
* list callback
* This function is callback passed to __vxge_hw_mempool_create to create memory
@@ -2453,7 +3470,7 @@ __vxge_hw_fifo_mempool_item_alloc(
* __vxge_hw_fifo_create - Create a FIFO
* This function creates FIFO and initializes it.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
struct vxge_hw_fifo_attr *attr)
{
@@ -2496,6 +3513,8 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
/* apply "interrupts per txdl" attribute */
fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
+ fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
+ fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
if (fifo->config->intr)
fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
@@ -2572,68 +3591,6 @@ exit:
}
/*
- * __vxge_hw_fifo_abort - Returns the TxD
- * This function terminates the TxDs of fifo
- */
-static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
-{
- void *txdlh;
-
- for (;;) {
- vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
-
- if (txdlh == NULL)
- break;
-
- vxge_hw_channel_dtr_complete(&fifo->channel);
-
- if (fifo->txdl_term) {
- fifo->txdl_term(txdlh,
- VXGE_HW_TXDL_STATE_POSTED,
- fifo->channel.userdata);
- }
-
- vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
- }
-
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_fifo_reset - Resets the fifo
- * This function resets the fifo during vpath reset operation
- */
-static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- __vxge_hw_fifo_abort(fifo);
- status = __vxge_hw_channel_reset(&fifo->channel);
-
- return status;
-}
-
-/*
- * __vxge_hw_fifo_delete - Removes the FIFO
- * This function freeup the memory pool and removes the FIFO
- */
-enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
-{
- struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
-
- __vxge_hw_fifo_abort(fifo);
-
- if (fifo->mempool)
- __vxge_hw_mempool_destroy(fifo->mempool);
-
- vp->vpath->fifoh = NULL;
-
- __vxge_hw_channel_free(&fifo->channel);
-
- return VXGE_HW_OK;
-}
-
-/*
* __vxge_hw_vpath_pci_read - Read the content of given address
* in pci config space.
* Read from the vpath pci config space.
@@ -2675,297 +3632,6 @@ exit:
return status;
}
-/*
- * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
- * Returns the function number of the vpath.
- */
-static u32
-__vxge_hw_vpath_func_id_get(u32 vp_id,
- struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
-{
- u64 val64;
-
- val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
-
- return
- (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
-}
-
-/*
- * __vxge_hw_read_rts_ds - Program RTS steering critieria
- */
-static inline void
-__vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
- u64 dta_struct_sel)
-{
- writeq(0, &vpath_reg->rts_access_steer_ctrl);
- wmb();
- writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
- writeq(0, &vpath_reg->rts_access_steer_data1);
- wmb();
-}
-
-
-/*
- * __vxge_hw_vpath_card_info_get - Get the serial numbers,
- * part number and product description.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_card_info_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg,
- struct vxge_hw_device_hw_info *hw_info)
-{
- u32 i, j;
- u64 val64;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
- u8 *serial_number = hw_info->serial_number;
- u8 *part_number = hw_info->part_number;
- u8 *product_desc = hw_info->product_desc;
-
- __vxge_hw_read_rts_ds(vpath_reg,
- VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vpath_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- if (status != VXGE_HW_OK)
- return status;
-
- val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
- data1 = readq(&vpath_reg->rts_access_steer_data0);
- ((u64 *)serial_number)[0] = be64_to_cpu(data1);
-
- data2 = readq(&vpath_reg->rts_access_steer_data1);
- ((u64 *)serial_number)[1] = be64_to_cpu(data2);
- status = VXGE_HW_OK;
- } else
- *serial_number = 0;
-
- __vxge_hw_read_rts_ds(vpath_reg,
- VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vpath_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- if (status != VXGE_HW_OK)
- return status;
-
- val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-
- data1 = readq(&vpath_reg->rts_access_steer_data0);
- ((u64 *)part_number)[0] = be64_to_cpu(data1);
-
- data2 = readq(&vpath_reg->rts_access_steer_data1);
- ((u64 *)part_number)[1] = be64_to_cpu(data2);
-
- status = VXGE_HW_OK;
-
- } else
- *part_number = 0;
-
- j = 0;
-
- for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
- i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
-
- __vxge_hw_read_rts_ds(vpath_reg, i);
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vpath_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- if (status != VXGE_HW_OK)
- return status;
-
- val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-
- data1 = readq(&vpath_reg->rts_access_steer_data0);
- ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
-
- data2 = readq(&vpath_reg->rts_access_steer_data1);
- ((u64 *)product_desc)[j++] = be64_to_cpu(data2);
-
- status = VXGE_HW_OK;
- } else
- *product_desc = 0;
- }
-
- return status;
-}
-
-/*
- * __vxge_hw_vpath_fw_ver_get - Get the fw version
- * Returns FW Version
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_fw_ver_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg,
- struct vxge_hw_device_hw_info *hw_info)
-{
- u64 val64;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
- struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
- struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
- struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vpath_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-
- data1 = readq(&vpath_reg->rts_access_steer_data0);
- data2 = readq(&vpath_reg->rts_access_steer_data1);
-
- fw_date->day =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
- data1);
- fw_date->month =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
- data1);
- fw_date->year =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
- data1);
-
- snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
- fw_date->month, fw_date->day, fw_date->year);
-
- fw_version->major =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
- fw_version->minor =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
- fw_version->build =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
-
- snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
- fw_version->major, fw_version->minor, fw_version->build);
-
- flash_date->day =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
- flash_date->month =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
- flash_date->year =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
-
- snprintf(flash_date->date, VXGE_HW_FW_STRLEN,
- "%2.2d/%2.2d/%4.4d",
- flash_date->month, flash_date->day, flash_date->year);
-
- flash_version->major =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
- flash_version->minor =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
- flash_version->build =
- (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
-
- snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
- flash_version->major, flash_version->minor,
- flash_version->build);
-
- status = VXGE_HW_OK;
-
- } else
- status = VXGE_HW_FAIL;
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
- * Returns pci function mode
- */
-static u64
-__vxge_hw_vpath_pci_func_mode_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg)
-{
- u64 val64;
- u64 data1 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- __vxge_hw_read_rts_ds(vpath_reg,
- VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE);
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vpath_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
- data1 = readq(&vpath_reg->rts_access_steer_data0);
- status = VXGE_HW_OK;
- } else {
- data1 = 0;
- status = VXGE_HW_FAIL;
- }
-exit:
- return data1;
-}
-
/**
* vxge_hw_device_flick_link_led - Flick (blink) link LED.
* @hldev: HW device.
@@ -2974,37 +3640,24 @@ exit:
* Flicker the link LED.
*/
enum vxge_hw_status
-vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev,
- u64 on_off)
+vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
+ struct __vxge_hw_virtualpath *vpath;
+ u64 data0, data1 = 0, steer_ctrl = 0;
+ enum vxge_hw_status status;
if (hldev == NULL) {
status = VXGE_HW_ERR_INVALID_DEVICE;
goto exit;
}
- vp_reg = hldev->vpath_reg[hldev->first_vp_id];
-
- writeq(0, &vp_reg->rts_access_steer_ctrl);
- wmb();
- writeq(on_off, &vp_reg->rts_access_steer_data0);
- writeq(0, &vp_reg->rts_access_steer_data1);
- wmb();
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
+ vpath = &hldev->virtual_paths[hldev->first_vp_id];
- status = __vxge_hw_pio_mem_write64(val64,
- &vp_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+ data0 = on_off;
+ status = vxge_hw_vpath_fw_api(vpath,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+ 0, &data0, &data1, &steer_ctrl);
exit:
return status;
}
@@ -3013,63 +3666,38 @@ exit:
* __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
*/
enum vxge_hw_status
-__vxge_hw_vpath_rts_table_get(
- struct __vxge_hw_vpath_handle *vp,
- u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2)
+__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
+ u32 action, u32 rts_table, u32 offset,
+ u64 *data0, u64 *data1)
{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- enum vxge_hw_status status = VXGE_HW_OK;
+ enum vxge_hw_status status;
+ u64 steer_ctrl = 0;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
- vpath = vp->vpath;
- vp_reg = vpath->vp_reg;
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
-
if ((rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
+ VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
(rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
+ VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
(rts_table ==
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
(rts_table ==
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
- val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
+ steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
}
- status = __vxge_hw_pio_mem_write64(val64,
- &vp_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- vpath->hldev->config.device_poll_millis);
-
+ status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
+ data0, data1, &steer_ctrl);
if (status != VXGE_HW_OK)
goto exit;
- val64 = readq(&vp_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-
- *data1 = readq(&vp_reg->rts_access_steer_data0);
-
- if ((rts_table ==
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
- (rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
- *data2 = readq(&vp_reg->rts_access_steer_data1);
- }
- status = VXGE_HW_OK;
- } else
- status = VXGE_HW_FAIL;
+ if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
+ (rts_table !=
+ VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
+ *data1 = 0;
exit:
return status;
}
@@ -3078,107 +3706,27 @@ exit:
* __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
*/
enum vxge_hw_status
-__vxge_hw_vpath_rts_table_set(
- struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table,
- u32 offset, u64 data1, u64 data2)
+__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
+ u32 rts_table, u32 offset, u64 steer_data0,
+ u64 steer_data1)
{
- u64 val64;
- struct __vxge_hw_virtualpath *vpath;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
+ u64 data0, data1 = 0, steer_ctrl = 0;
+ enum vxge_hw_status status;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
- vpath = vp->vpath;
- vp_reg = vpath->vp_reg;
-
- writeq(data1, &vp_reg->rts_access_steer_data0);
- wmb();
+ data0 = steer_data0;
if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
(rts_table ==
- VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
- writeq(data2, &vp_reg->rts_access_steer_data1);
- wmb();
- }
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vp_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- vpath->hldev->config.device_poll_millis);
-
- if (status != VXGE_HW_OK)
- goto exit;
+ VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
+ data1 = steer_data1;
- val64 = readq(&vp_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
- status = VXGE_HW_OK;
- else
- status = VXGE_HW_FAIL;
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
- * from MAC address table.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_addr_get(
- u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
- u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
-{
- u32 i;
- u64 val64;
- u64 data1 = 0ULL;
- u64 data2 = 0ULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
- VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vpath_reg->rts_access_steer_ctrl,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
- VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
- if (status != VXGE_HW_OK)
- goto exit;
-
- val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
- if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-
- data1 = readq(&vpath_reg->rts_access_steer_data0);
- data2 = readq(&vpath_reg->rts_access_steer_data1);
-
- data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
- data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
- data2);
-
- for (i = ETH_ALEN; i > 0; i--) {
- macaddr[i-1] = (u8)(data1 & 0xFF);
- data1 >>= 8;
-
- macaddr_mask[i-1] = (u8)(data2 & 0xFF);
- data2 >>= 8;
- }
- status = VXGE_HW_OK;
- } else
- status = VXGE_HW_FAIL;
+ status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
+ &data0, &data1, &steer_ctrl);
exit:
return status;
}
@@ -3204,6 +3752,8 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
0, &data0, &data1);
+ if (status != VXGE_HW_OK)
+ goto exit;
data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
@@ -3771,10 +4321,10 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
vp_reg = vpath->vp_reg;
config = vpath->vp_config;
- writeq((u64)0, &vp_reg->tim_dest_addr);
- writeq((u64)0, &vp_reg->tim_vpath_map);
- writeq((u64)0, &vp_reg->tim_bitmap);
- writeq((u64)0, &vp_reg->tim_remap);
+ writeq(0, &vp_reg->tim_dest_addr);
+ writeq(0, &vp_reg->tim_vpath_map);
+ writeq(0, &vp_reg->tim_bitmap);
+ writeq(0, &vp_reg->tim_remap);
if (config->ring.enable == VXGE_HW_RING_ENABLE)
writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
@@ -3831,6 +4381,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
}
writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+ vpath->tim_tti_cfg1_saved = val64;
+
val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -3876,8 +4428,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
- config->tti.util_sel);
+ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
}
if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -3888,6 +4439,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
}
writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
+ vpath->tim_tti_cfg3_saved = val64;
}
if (config->ring.enable == VXGE_HW_RING_ENABLE) {
@@ -3936,6 +4488,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
}
writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
+ vpath->tim_rti_cfg1_saved = val64;
+
val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -3981,8 +4535,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
- val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
- config->rti.util_sel);
+ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
}
if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -3993,6 +4546,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
}
writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
+ vpath->tim_rti_cfg3_saved = val64;
}
val64 = 0;
@@ -4003,32 +4557,14 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
+ val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150);
+ val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0);
+ val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
+ writeq(val64, &vp_reg->tim_wrkld_clc);
+
return status;
}
-void
-vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct vxge_hw_vp_config *config;
- u64 val64;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
- config = vpath->vp_config;
-
- if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
- val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
-
- if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
- config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- writeq(val64,
- &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
- }
- }
-}
/*
* __vxge_hw_vpath_initialize
* This routine is the final phase of init which initializes the
@@ -4052,22 +4588,18 @@ __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
vp_reg = vpath->vp_reg;
status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
-
if (status != VXGE_HW_OK)
goto exit;
status = __vxge_hw_vpath_mac_configure(hldev, vp_id);
-
if (status != VXGE_HW_OK)
goto exit;
status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
-
if (status != VXGE_HW_OK)
goto exit;
status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
-
if (status != VXGE_HW_OK)
goto exit;
@@ -4075,7 +4607,6 @@ __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
/* Get MRRS value from device control */
status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
-
if (status == VXGE_HW_OK) {
val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
val64 &=
@@ -4099,6 +4630,28 @@ exit:
}
/*
+ * __vxge_hw_vp_terminate - Terminate Virtual Path structure
+ * This routine closes all channels it opened and freeup memory
+ */
+static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
+{
+ struct __vxge_hw_virtualpath *vpath;
+
+ vpath = &hldev->virtual_paths[vp_id];
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
+ goto exit;
+
+ VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
+ vpath->hldev->tim_int_mask1, vpath->vp_id);
+ hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
+
+ memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
+exit:
+ return;
+}
+
+/*
* __vxge_hw_vp_initialize - Initialize Virtual Path structure
* This routine is the initial phase of init which resets the vpath and
* initializes the software support structures.
@@ -4117,6 +4670,7 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
vpath = &hldev->virtual_paths[vp_id];
+ spin_lock_init(&hldev->virtual_paths[vp_id].lock);
vpath->vp_id = vp_id;
vpath->vp_open = VXGE_HW_VP_OPEN;
vpath->hldev = hldev;
@@ -4127,14 +4681,12 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
__vxge_hw_vpath_reset(hldev, vp_id);
status = __vxge_hw_vpath_reset_check(vpath);
-
if (status != VXGE_HW_OK) {
memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
goto exit;
}
status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
-
if (status != VXGE_HW_OK) {
memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
goto exit;
@@ -4148,7 +4700,6 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
hldev->tim_int_mask1, vp_id);
status = __vxge_hw_vpath_initialize(hldev, vp_id);
-
if (status != VXGE_HW_OK)
__vxge_hw_vp_terminate(hldev, vp_id);
exit:
@@ -4156,29 +4707,6 @@ exit:
}
/*
- * __vxge_hw_vp_terminate - Terminate Virtual Path structure
- * This routine closes all channels it opened and freeup memory
- */
-static void
-__vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- struct __vxge_hw_virtualpath *vpath;
-
- vpath = &hldev->virtual_paths[vp_id];
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
- goto exit;
-
- VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
- vpath->hldev->tim_int_mask1, vpath->vp_id);
- hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
-
- memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
-exit:
- return;
-}
-
-/*
* vxge_hw_vpath_mtu_set - Set MTU.
* Set new MTU value. Example, to use jumbo frames:
* vxge_hw_vpath_mtu_set(my_device, 9600);
@@ -4215,6 +4743,64 @@ exit:
}
/*
+ * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
+ * Enable the DMA vpath statistics. The function is to be called to re-enable
+ * the adapter to update stats into the host memory
+ */
+static enum vxge_hw_status
+vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct __vxge_hw_virtualpath *vpath;
+
+ vpath = vp->vpath;
+
+ if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+ status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+ goto exit;
+ }
+
+ memcpy(vpath->hw_stats_sav, vpath->hw_stats,
+ sizeof(struct vxge_hw_vpath_stats_hw_info));
+
+ status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
+exit:
+ return status;
+}
+
+/*
+ * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
+ * This function allocates a block from block pool or from the system
+ */
+static struct __vxge_hw_blockpool_entry *
+__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
+{
+ struct __vxge_hw_blockpool_entry *entry = NULL;
+ struct __vxge_hw_blockpool *blockpool;
+
+ blockpool = &devh->block_pool;
+
+ if (size == blockpool->block_size) {
+
+ if (!list_empty(&blockpool->free_block_list))
+ entry = (struct __vxge_hw_blockpool_entry *)
+ list_first_entry(&blockpool->free_block_list,
+ struct __vxge_hw_blockpool_entry,
+ item);
+
+ if (entry != NULL) {
+ list_del(&entry->item);
+ blockpool->pool_size--;
+ }
+ }
+
+ if (entry != NULL)
+ __vxge_hw_blockpool_blocks_add(blockpool);
+
+ return entry;
+}
+
+/*
* vxge_hw_vpath_open - Open a virtual path on a given adapter
* This function is used to open access to virtual path of an
* adapter for offload, GRO operations. This function returns
@@ -4238,19 +4824,15 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
&hldev->config.vp_config[attr->vp_id]);
-
if (status != VXGE_HW_OK)
goto vpath_open_exit1;
- vp = (struct __vxge_hw_vpath_handle *)
- vmalloc(sizeof(struct __vxge_hw_vpath_handle));
+ vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
if (vp == NULL) {
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto vpath_open_exit2;
}
- memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle));
-
vp->vpath = vpath;
if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
@@ -4273,7 +4855,6 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
VXGE_HW_BLOCK_SIZE);
-
if (vpath->stats_block == NULL) {
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto vpath_open_exit8;
@@ -4332,19 +4913,20 @@ vpath_open_exit1:
* This function is used to close access to virtual path opened
* earlier.
*/
-void
-vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
+void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
{
- struct __vxge_hw_virtualpath *vpath = NULL;
+ struct __vxge_hw_virtualpath *vpath = vp->vpath;
+ struct __vxge_hw_ring *ring = vpath->ringh;
+ struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
u64 new_count, val64, val164;
- struct __vxge_hw_ring *ring;
- vpath = vp->vpath;
- ring = vpath->ringh;
+ if (vdev->titan1) {
+ new_count = readq(&vpath->vp_reg->rxdmem_size);
+ new_count &= 0x1fff;
+ } else
+ new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
- new_count = readq(&vpath->vp_reg->rxdmem_size);
- new_count &= 0x1fff;
- val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
+ val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
&vpath->vp_reg->prc_rxd_doorbell);
@@ -4367,6 +4949,29 @@ vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
}
/*
+ * __vxge_hw_blockpool_block_free - Frees a block from block pool
+ * @devh: Hal device
+ * @entry: Entry of block to be freed
+ *
+ * This function frees a block from block pool
+ */
+static void
+__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
+ struct __vxge_hw_blockpool_entry *entry)
+{
+ struct __vxge_hw_blockpool *blockpool;
+
+ blockpool = &devh->block_pool;
+
+ if (entry->length == blockpool->block_size) {
+ list_add(&entry->item, &blockpool->free_block_list);
+ blockpool->pool_size++;
+ }
+
+ __vxge_hw_blockpool_blocks_remove(blockpool);
+}
+
+/*
* vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
* This function is used to close access to virtual path opened
* earlier.
@@ -4414,7 +5019,9 @@ enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
__vxge_hw_vp_terminate(devh, vp_id);
+ spin_lock(&vpath->lock);
vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
+ spin_unlock(&vpath->lock);
vpath_close_exit:
return status;
@@ -4515,730 +5122,3 @@ vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
&hldev->common_reg->cmn_rsthdlr_cfg1);
}
-
-/*
- * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
- * Enable the DMA vpath statistics. The function is to be called to re-enable
- * the adapter to update stats into the host memory
- */
-static enum vxge_hw_status
-vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct __vxge_hw_virtualpath *vpath;
-
- vpath = vp->vpath;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- memcpy(vpath->hw_stats_sav, vpath->hw_stats,
- sizeof(struct vxge_hw_vpath_stats_hw_info));
-
- status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_stats_access - Get the statistics from the given location
- * and offset and perform an operation
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
- u32 operation, u32 offset, u64 *stat)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto vpath_stats_access_exit;
- }
-
- vp_reg = vpath->vp_reg;
-
- val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
- VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
- VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
-
- status = __vxge_hw_pio_mem_write64(val64,
- &vp_reg->xmac_stats_access_cmd,
- VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
- vpath->hldev->config.device_poll_millis);
-
- if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
- *stat = readq(&vp_reg->xmac_stats_access_data);
- else
- *stat = 0;
-
-vpath_stats_access_exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_tx_stats_get(
- struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
-{
- u64 *val64;
- int i;
- u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- val64 = (u64 *) vpath_tx_stats;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
-
- for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
- status = __vxge_hw_vpath_stats_access(vpath,
- VXGE_HW_STATS_OP_READ,
- offset, val64);
- if (status != VXGE_HW_OK)
- goto exit;
- offset++;
- val64++;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
-{
- u64 *val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- int i;
- u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
- val64 = (u64 *) vpath_rx_stats;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
- for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
- status = __vxge_hw_vpath_stats_access(vpath,
- VXGE_HW_STATS_OP_READ,
- offset >> 3, val64);
- if (status != VXGE_HW_OK)
- goto exit;
-
- offset += 8;
- val64++;
- }
-exit:
- return status;
-}
-
-/*
- * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_vpath_stats_hw_info *hw_stats)
-{
- u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
- status = VXGE_HW_ERR_VPATH_NOT_OPEN;
- goto exit;
- }
- vp_reg = vpath->vp_reg;
-
- val64 = readq(&vp_reg->vpath_debug_stats0);
- hw_stats->ini_num_mwr_sent =
- (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats1);
- hw_stats->ini_num_mrd_sent =
- (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats2);
- hw_stats->ini_num_cpl_rcvd =
- (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats3);
- hw_stats->ini_num_mwr_byte_sent =
- VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats4);
- hw_stats->ini_num_cpl_byte_rcvd =
- VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats5);
- hw_stats->wrcrdtarb_xoff =
- (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
-
- val64 = readq(&vp_reg->vpath_debug_stats6);
- hw_stats->rdcrdtarb_xoff =
- (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count01);
- hw_stats->vpath_genstats_count0 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count01);
- hw_stats->vpath_genstats_count1 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count23);
- hw_stats->vpath_genstats_count2 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count01);
- hw_stats->vpath_genstats_count3 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count4);
- hw_stats->vpath_genstats_count4 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
- val64);
-
- val64 = readq(&vp_reg->vpath_genstats_count5);
- hw_stats->vpath_genstats_count5 =
- (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
- val64);
-
- status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
- if (status != VXGE_HW_OK)
- goto exit;
-
- status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
- if (status != VXGE_HW_OK)
- goto exit;
-
- VXGE_HW_VPATH_STATS_PIO_READ(
- VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
-
- hw_stats->prog_event_vnum0 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
-
- hw_stats->prog_event_vnum1 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
-
- VXGE_HW_VPATH_STATS_PIO_READ(
- VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
-
- hw_stats->prog_event_vnum2 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
-
- hw_stats->prog_event_vnum3 =
- (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
-
- val64 = readq(&vp_reg->rx_multi_cast_stats);
- hw_stats->rx_multi_cast_frame_discard =
- (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
-
- val64 = readq(&vp_reg->rx_frm_transferred);
- hw_stats->rx_frm_transferred =
- (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
-
- val64 = readq(&vp_reg->rxd_returned);
- hw_stats->rxd_returned =
- (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
-
- val64 = readq(&vp_reg->dbg_stats_rx_mpa);
- hw_stats->rx_mpa_len_fail_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
- hw_stats->rx_mpa_mrk_fail_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
- hw_stats->rx_mpa_crc_fail_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
-
- val64 = readq(&vp_reg->dbg_stats_rx_fau);
- hw_stats->rx_permitted_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
- hw_stats->rx_vp_reset_discarded_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
- hw_stats->rx_wol_frms =
- (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
-
- val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
- hw_stats->tx_vp_reset_discarded_frms =
- (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
- val64);
-exit:
- return status;
-}
-
-
-static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
- unsigned long size)
-{
- gfp_t flags;
- void *vaddr;
-
- if (in_interrupt())
- flags = GFP_ATOMIC | GFP_DMA;
- else
- flags = GFP_KERNEL | GFP_DMA;
-
- vaddr = kmalloc((size), flags);
-
- vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
-}
-
-static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
- struct pci_dev **p_dma_acch)
-{
- unsigned long misaligned = *(unsigned long *)p_dma_acch;
- u8 *tmp = (u8 *)vaddr;
- tmp -= misaligned;
- kfree((void *)tmp);
-}
-
-/*
- * __vxge_hw_blockpool_create - Create block pool
- */
-
-enum vxge_hw_status
-__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
- struct __vxge_hw_blockpool *blockpool,
- u32 pool_size,
- u32 pool_max)
-{
- u32 i;
- struct __vxge_hw_blockpool_entry *entry = NULL;
- void *memblock;
- dma_addr_t dma_addr;
- struct pci_dev *dma_handle;
- struct pci_dev *acc_handle;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (blockpool == NULL) {
- status = VXGE_HW_FAIL;
- goto blockpool_create_exit;
- }
-
- blockpool->hldev = hldev;
- blockpool->block_size = VXGE_HW_BLOCK_SIZE;
- blockpool->pool_size = 0;
- blockpool->pool_max = pool_max;
- blockpool->req_out = 0;
-
- INIT_LIST_HEAD(&blockpool->free_block_list);
- INIT_LIST_HEAD(&blockpool->free_entry_list);
-
- for (i = 0; i < pool_size + pool_max; i++) {
- entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
- GFP_KERNEL);
- if (entry == NULL) {
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
- list_add(&entry->item, &blockpool->free_entry_list);
- }
-
- for (i = 0; i < pool_size; i++) {
-
- memblock = vxge_os_dma_malloc(
- hldev->pdev,
- VXGE_HW_BLOCK_SIZE,
- &dma_handle,
- &acc_handle);
-
- if (memblock == NULL) {
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
-
- dma_addr = pci_map_single(hldev->pdev, memblock,
- VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
-
- if (unlikely(pci_dma_mapping_error(hldev->pdev,
- dma_addr))) {
-
- vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
-
- if (!list_empty(&blockpool->free_entry_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_entry_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry == NULL)
- entry =
- kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
- GFP_KERNEL);
- if (entry != NULL) {
- list_del(&entry->item);
- entry->length = VXGE_HW_BLOCK_SIZE;
- entry->memblock = memblock;
- entry->dma_addr = dma_addr;
- entry->acc_handle = acc_handle;
- entry->dma_handle = dma_handle;
- list_add(&entry->item,
- &blockpool->free_block_list);
- blockpool->pool_size++;
- } else {
- __vxge_hw_blockpool_destroy(blockpool);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto blockpool_create_exit;
- }
- }
-
-blockpool_create_exit:
- return status;
-}
-
-/*
- * __vxge_hw_blockpool_destroy - Deallocates the block pool
- */
-
-void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
-{
-
- struct __vxge_hw_device *hldev;
- struct list_head *p, *n;
- u16 ret;
-
- if (blockpool == NULL) {
- ret = 1;
- goto exit;
- }
-
- hldev = blockpool->hldev;
-
- list_for_each_safe(p, n, &blockpool->free_block_list) {
-
- pci_unmap_single(hldev->pdev,
- ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
- ((struct __vxge_hw_blockpool_entry *)p)->length,
- PCI_DMA_BIDIRECTIONAL);
-
- vxge_os_dma_free(hldev->pdev,
- ((struct __vxge_hw_blockpool_entry *)p)->memblock,
- &((struct __vxge_hw_blockpool_entry *) p)->acc_handle);
-
- list_del(
- &((struct __vxge_hw_blockpool_entry *)p)->item);
- kfree(p);
- blockpool->pool_size--;
- }
-
- list_for_each_safe(p, n, &blockpool->free_entry_list) {
- list_del(
- &((struct __vxge_hw_blockpool_entry *)p)->item);
- kfree((void *)p);
- }
- ret = 0;
-exit:
- return;
-}
-
-/*
- * __vxge_hw_blockpool_blocks_add - Request additional blocks
- */
-static
-void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
-{
- u32 nreq = 0, i;
-
- if ((blockpool->pool_size + blockpool->req_out) <
- VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
- nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
- blockpool->req_out += nreq;
- }
-
- for (i = 0; i < nreq; i++)
- vxge_os_dma_malloc_async(
- ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
- blockpool->hldev, VXGE_HW_BLOCK_SIZE);
-}
-
-/*
- * __vxge_hw_blockpool_blocks_remove - Free additional blocks
- */
-static
-void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
-{
- struct list_head *p, *n;
-
- list_for_each_safe(p, n, &blockpool->free_block_list) {
-
- if (blockpool->pool_size < blockpool->pool_max)
- break;
-
- pci_unmap_single(
- ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
- ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
- ((struct __vxge_hw_blockpool_entry *)p)->length,
- PCI_DMA_BIDIRECTIONAL);
-
- vxge_os_dma_free(
- ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
- ((struct __vxge_hw_blockpool_entry *)p)->memblock,
- &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
-
- list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
-
- list_add(p, &blockpool->free_entry_list);
-
- blockpool->pool_size--;
-
- }
-}
-
-/*
- * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
- * Adds a block to block pool
- */
-static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
- void *block_addr,
- u32 length,
- struct pci_dev *dma_h,
- struct pci_dev *acc_handle)
-{
- struct __vxge_hw_blockpool *blockpool;
- struct __vxge_hw_blockpool_entry *entry = NULL;
- dma_addr_t dma_addr;
- enum vxge_hw_status status = VXGE_HW_OK;
- u32 req_out;
-
- blockpool = &devh->block_pool;
-
- if (block_addr == NULL) {
- blockpool->req_out--;
- status = VXGE_HW_FAIL;
- goto exit;
- }
-
- dma_addr = pci_map_single(devh->pdev, block_addr, length,
- PCI_DMA_BIDIRECTIONAL);
-
- if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
-
- vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
- blockpool->req_out--;
- status = VXGE_HW_FAIL;
- goto exit;
- }
-
-
- if (!list_empty(&blockpool->free_entry_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_entry_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry == NULL)
- entry = (struct __vxge_hw_blockpool_entry *)
- vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
- else
- list_del(&entry->item);
-
- if (entry != NULL) {
- entry->length = length;
- entry->memblock = block_addr;
- entry->dma_addr = dma_addr;
- entry->acc_handle = acc_handle;
- entry->dma_handle = dma_h;
- list_add(&entry->item, &blockpool->free_block_list);
- blockpool->pool_size++;
- status = VXGE_HW_OK;
- } else
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
-
- blockpool->req_out--;
-
- req_out = blockpool->req_out;
-exit:
- return;
-}
-
-/*
- * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
- * Allocates a block of memory of given size, either from block pool
- * or by calling vxge_os_dma_malloc()
- */
-void *
-__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
- struct vxge_hw_mempool_dma *dma_object)
-{
- struct __vxge_hw_blockpool_entry *entry = NULL;
- struct __vxge_hw_blockpool *blockpool;
- void *memblock = NULL;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- blockpool = &devh->block_pool;
-
- if (size != blockpool->block_size) {
-
- memblock = vxge_os_dma_malloc(devh->pdev, size,
- &dma_object->handle,
- &dma_object->acc_handle);
-
- if (memblock == NULL) {
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- dma_object->addr = pci_map_single(devh->pdev, memblock, size,
- PCI_DMA_BIDIRECTIONAL);
-
- if (unlikely(pci_dma_mapping_error(devh->pdev,
- dma_object->addr))) {
- vxge_os_dma_free(devh->pdev, memblock,
- &dma_object->acc_handle);
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
- goto exit;
- }
-
- } else {
-
- if (!list_empty(&blockpool->free_block_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_block_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry != NULL) {
- list_del(&entry->item);
- dma_object->addr = entry->dma_addr;
- dma_object->handle = entry->dma_handle;
- dma_object->acc_handle = entry->acc_handle;
- memblock = entry->memblock;
-
- list_add(&entry->item,
- &blockpool->free_entry_list);
- blockpool->pool_size--;
- }
-
- if (memblock != NULL)
- __vxge_hw_blockpool_blocks_add(blockpool);
- }
-exit:
- return memblock;
-}
-
-/*
- * __vxge_hw_blockpool_free - Frees the memory allcoated with
- __vxge_hw_blockpool_malloc
- */
-void
-__vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
- void *memblock, u32 size,
- struct vxge_hw_mempool_dma *dma_object)
-{
- struct __vxge_hw_blockpool_entry *entry = NULL;
- struct __vxge_hw_blockpool *blockpool;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- blockpool = &devh->block_pool;
-
- if (size != blockpool->block_size) {
- pci_unmap_single(devh->pdev, dma_object->addr, size,
- PCI_DMA_BIDIRECTIONAL);
- vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
- } else {
-
- if (!list_empty(&blockpool->free_entry_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_entry_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry == NULL)
- entry = (struct __vxge_hw_blockpool_entry *)
- vmalloc(sizeof(
- struct __vxge_hw_blockpool_entry));
- else
- list_del(&entry->item);
-
- if (entry != NULL) {
- entry->length = size;
- entry->memblock = memblock;
- entry->dma_addr = dma_object->addr;
- entry->acc_handle = dma_object->acc_handle;
- entry->dma_handle = dma_object->handle;
- list_add(&entry->item,
- &blockpool->free_block_list);
- blockpool->pool_size++;
- status = VXGE_HW_OK;
- } else
- status = VXGE_HW_ERR_OUT_OF_MEMORY;
-
- if (status == VXGE_HW_OK)
- __vxge_hw_blockpool_blocks_remove(blockpool);
- }
-}
-
-/*
- * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
- * This function allocates a block from block pool or from the system
- */
-struct __vxge_hw_blockpool_entry *
-__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
-{
- struct __vxge_hw_blockpool_entry *entry = NULL;
- struct __vxge_hw_blockpool *blockpool;
-
- blockpool = &devh->block_pool;
-
- if (size == blockpool->block_size) {
-
- if (!list_empty(&blockpool->free_block_list))
- entry = (struct __vxge_hw_blockpool_entry *)
- list_first_entry(&blockpool->free_block_list,
- struct __vxge_hw_blockpool_entry,
- item);
-
- if (entry != NULL) {
- list_del(&entry->item);
- blockpool->pool_size--;
- }
- }
-
- if (entry != NULL)
- __vxge_hw_blockpool_blocks_add(blockpool);
-
- return entry;
-}
-
-/*
- * __vxge_hw_blockpool_block_free - Frees a block from block pool
- * @devh: Hal device
- * @entry: Entry of block to be freed
- *
- * This function frees a block from block pool
- */
-void
-__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
- struct __vxge_hw_blockpool_entry *entry)
-{
- struct __vxge_hw_blockpool *blockpool;
-
- blockpool = &devh->block_pool;
-
- if (entry->length == blockpool->block_size) {
- list_add(&entry->item, &blockpool->free_block_list);
- blockpool->pool_size++;
- }
-
- __vxge_hw_blockpool_blocks_remove(blockpool);
-}
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 5c00861b6c2c..3c53aa732c9d 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -20,13 +20,6 @@
#define VXGE_CACHE_LINE_SIZE 128
#endif
-#define vxge_os_vaprintf(level, mask, fmt, ...) { \
- char buff[255]; \
- snprintf(buff, 255, fmt, __VA_ARGS__); \
- printk(buff); \
- printk("\n"); \
-}
-
#ifndef VXGE_ALIGN
#define VXGE_ALIGN(adrs, size) \
(((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
@@ -36,8 +29,16 @@
#define VXGE_HW_MAX_MTU 9600
#define VXGE_HW_DEFAULT_MTU 1500
-#ifdef VXGE_DEBUG_ASSERT
+#define VXGE_HW_MAX_ROM_IMAGES 8
+struct eprom_image {
+ u8 is_valid:1;
+ u8 index;
+ u8 type;
+ u16 version;
+};
+
+#ifdef VXGE_DEBUG_ASSERT
/**
* vxge_assert
* @test: C-condition to check
@@ -48,16 +49,13 @@
* compilation
* time.
*/
-#define vxge_assert(test) { \
- if (!(test)) \
- vxge_os_bug("bad cond: "#test" at %s:%d\n", \
- __FILE__, __LINE__); }
+#define vxge_assert(test) BUG_ON(!(test))
#else
#define vxge_assert(test)
#endif /* end of VXGE_DEBUG_ASSERT */
/**
- * enum enum vxge_debug_level
+ * enum vxge_debug_level
* @VXGE_NONE: debug disabled
* @VXGE_ERR: all errors going to be logged out
* @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
@@ -159,6 +157,47 @@ enum vxge_hw_device_link_state {
};
/**
+ * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes.
+ * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes
+ * @VXGE_HW_FW_UPGRADE_DONE: upload completed
+ * @VXGE_HW_FW_UPGRADE_ERR: upload error
+ * @VXGE_FW_UPGRADE_BYTES2SKIP: skip bytes in the stream
+ *
+ */
+enum vxge_hw_fw_upgrade_code {
+ VXGE_HW_FW_UPGRADE_OK = 0,
+ VXGE_HW_FW_UPGRADE_DONE = 1,
+ VXGE_HW_FW_UPGRADE_ERR = 2,
+ VXGE_FW_UPGRADE_BYTES2SKIP = 3
+};
+
+/**
+ * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes.
+ * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data
+ * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type
+ * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed
+ */
+enum vxge_hw_fw_upgrade_err_code {
+ VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1 = 1,
+ VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW = 2,
+ VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3 = 3,
+ VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4 = 4,
+ VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5 = 5,
+ VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6 = 6,
+ VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7 = 7,
+ VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8 = 8,
+ VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN = 9,
+ VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH = 10
+};
+
+/**
* struct vxge_hw_device_date - Date Format
* @day: Day
* @month: Month
@@ -275,9 +314,9 @@ struct vxge_hw_ring_config {
#define VXGE_HW_RING_DEFAULT 1
u32 ring_blocks;
-#define VXGE_HW_MIN_RING_BLOCKS 1
-#define VXGE_HW_MAX_RING_BLOCKS 128
-#define VXGE_HW_DEF_RING_BLOCKS 2
+#define VXGE_HW_MIN_RING_BLOCKS 1
+#define VXGE_HW_MAX_RING_BLOCKS 128
+#define VXGE_HW_DEF_RING_BLOCKS 2
u32 buffer_mode;
#define VXGE_HW_RING_RXD_BUFFER_MODE_1 1
@@ -465,7 +504,6 @@ struct vxge_hw_device_config {
* See also: vxge_hw_driver_initialize().
*/
struct vxge_hw_uld_cbs {
-
void (*link_up)(struct __vxge_hw_device *devh);
void (*link_down)(struct __vxge_hw_device *devh);
void (*crit_err)(struct __vxge_hw_device *devh,
@@ -644,6 +682,10 @@ struct __vxge_hw_virtualpath {
u32 vsport_number;
u32 max_kdfc_db;
u32 max_nofl_db;
+ u64 tim_tti_cfg1_saved;
+ u64 tim_tti_cfg3_saved;
+ u64 tim_rti_cfg1_saved;
+ u64 tim_rti_cfg3_saved;
struct __vxge_hw_ring *____cacheline_aligned ringh;
struct __vxge_hw_fifo *____cacheline_aligned fifoh;
@@ -652,6 +694,7 @@ struct __vxge_hw_virtualpath {
struct vxge_hw_vpath_stats_hw_info *hw_stats;
struct vxge_hw_vpath_stats_hw_info *hw_stats_sav;
struct vxge_hw_vpath_stats_sw_info *sw_stats;
+ spinlock_t lock;
};
/*
@@ -661,7 +704,7 @@ struct __vxge_hw_virtualpath {
*
* This structure is used to store the callback information.
*/
-struct __vxge_hw_vpath_handle{
+struct __vxge_hw_vpath_handle {
struct list_head item;
struct __vxge_hw_virtualpath *vpath;
};
@@ -674,9 +717,6 @@ struct __vxge_hw_vpath_handle{
/**
* struct __vxge_hw_device - Hal device object
* @magic: Magic Number
- * @device_id: PCI Device Id of the adapter
- * @major_revision: PCI Device major revision
- * @minor_revision: PCI Device minor revision
* @bar0: BAR0 virtual address.
* @pdev: Physical device handle
* @config: Confguration passed by the LL driver at initialization
@@ -688,9 +728,6 @@ struct __vxge_hw_device {
u32 magic;
#define VXGE_HW_DEVICE_MAGIC 0x12345678
#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
- u16 device_id;
- u8 major_revision;
- u8 minor_revision;
void __iomem *bar0;
struct pci_dev *pdev;
struct net_device *ndev;
@@ -731,6 +768,7 @@ struct __vxge_hw_device {
u32 debug_level;
u32 level_err;
u32 level_trace;
+ u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES];
};
#define VXGE_HW_INFO_LEN 64
@@ -781,8 +819,8 @@ struct vxge_hw_device_hw_info {
u8 serial_number[VXGE_HW_INFO_LEN];
u8 part_number[VXGE_HW_INFO_LEN];
u8 product_desc[VXGE_HW_INFO_LEN];
- u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
- u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
+ u8 mac_addrs[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
+ u8 mac_addr_masks[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
};
/**
@@ -829,20 +867,10 @@ struct vxge_hw_device_attr {
loc, \
offset, \
&val64); \
- \
if (status != VXGE_HW_OK) \
return status; \
}
-#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
- status = __vxge_hw_vpath_stats_access(vpath, \
- VXGE_HW_STATS_OP_READ, \
- offset, \
- &val64); \
- if (status != VXGE_HW_OK) \
- return status; \
-}
-
/*
* struct __vxge_hw_ring - Ring channel.
* @channel: Channel "base" of this ring, the common part of all HW
@@ -897,6 +925,9 @@ struct __vxge_hw_ring {
u32 doorbell_cnt;
u32 total_db_cnt;
u64 rxds_limit;
+ u32 rtimer;
+ u64 tim_rti_cfg1_saved;
+ u64 tim_rti_cfg3_saved;
enum vxge_hw_status (*callback)(
struct __vxge_hw_ring *ringh,
@@ -976,6 +1007,9 @@ struct __vxge_hw_fifo {
u32 per_txdl_space;
u32 vp_id;
u32 tx_intr_num;
+ u32 rtimer;
+ u64 tim_tti_cfg1_saved;
+ u64 tim_tti_cfg3_saved;
enum vxge_hw_status (*callback)(
struct __vxge_hw_fifo *fifo_handle,
@@ -1114,7 +1148,7 @@ struct __vxge_hw_non_offload_db_wrapper {
* lookup to determine the transmit port.
* 01: Send on physical Port1.
* 10: Send on physical Port0.
- * 11: Send on both ports.
+ * 11: Send on both ports.
* Bits 18 to 21 - Reserved
* Bits 22 to 23 - Gather_Code. This field is set by the host and
* is used to describe how individual buffers comprise a frame.
@@ -1413,12 +1447,12 @@ enum vxge_hw_rth_algoritms {
* See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
*/
struct vxge_hw_rth_hash_types {
- u8 hash_type_tcpipv4_en;
- u8 hash_type_ipv4_en;
- u8 hash_type_tcpipv6_en;
- u8 hash_type_ipv6_en;
- u8 hash_type_tcpipv6ex_en;
- u8 hash_type_ipv6ex_en;
+ u8 hash_type_tcpipv4_en:1,
+ hash_type_ipv4_en:1,
+ hash_type_tcpipv6_en:1,
+ hash_type_ipv6_en:1,
+ hash_type_tcpipv6ex_en:1,
+ hash_type_ipv6ex_en:1;
};
void vxge_hw_device_debug_set(
@@ -1893,6 +1927,15 @@ out:
return vaddr;
}
+static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
+ struct pci_dev **p_dma_acch)
+{
+ unsigned long misaligned = *(unsigned long *)p_dma_acch;
+ u8 *tmp = (u8 *)vaddr;
+ tmp -= misaligned;
+ kfree((void *)tmp);
+}
+
/*
* __vxge_hw_mempool_item_priv - will return pointer on per item private space
*/
@@ -1962,7 +2005,6 @@ enum vxge_hw_status vxge_hw_vpath_mtu_set(
void
vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
-
#ifndef readq
static inline u64 readq(void __iomem *addr)
{
@@ -2000,7 +2042,7 @@ enum vxge_hw_status
vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
/**
- * vxge_debug
+ * vxge_debug_ll
* @level: level of debug verbosity.
* @mask: mask for the debug
* @buf: Circular buffer for tracing
@@ -2012,26 +2054,13 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
* may be compiled out if DEBUG macro was never defined.
* See also: enum vxge_debug_level{}.
*/
-
-#define vxge_trace_aux(level, mask, fmt, ...) \
-{\
- vxge_os_vaprintf(level, mask, fmt, __VA_ARGS__);\
-}
-
-#define vxge_debug(module, level, mask, fmt, ...) { \
-if ((level >= VXGE_TRACE && ((module & VXGE_DEBUG_TRACE_MASK) == module)) || \
- (level >= VXGE_ERR && ((module & VXGE_DEBUG_ERR_MASK) == module))) {\
- if ((mask & VXGE_DEBUG_MASK) == mask)\
- vxge_trace_aux(level, mask, fmt, __VA_ARGS__); \
-} \
-}
-
#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
-#define vxge_debug_ll(level, mask, fmt, ...) \
-{\
- vxge_debug(VXGE_COMPONENT_LL, level, mask, fmt, __VA_ARGS__);\
-}
-
+#define vxge_debug_ll(level, mask, fmt, ...) do { \
+ if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \
+ (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
+ if ((mask & VXGE_DEBUG_MASK) == mask) \
+ printk(fmt "\n", __VA_ARGS__); \
+} while (0)
#else
#define vxge_debug_ll(level, mask, fmt, ...)
#endif
@@ -2051,4 +2080,26 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
enum vxge_hw_status
__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
+
+#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5
+#define VXGE_HW_MAX_POLLING_COUNT 100
+
+void
+vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev);
+
+enum vxge_hw_status
+vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
+ u32 *minor, u32 *build);
+
+enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev);
+
+enum vxge_hw_status
+vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf,
+ int size);
+
+enum vxge_hw_status
+vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
+ struct eprom_image *eprom_image_data);
+
+int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id);
#endif
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index b67746eef923..1dd3a21b3a43 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -11,7 +11,7 @@
* Virtualized Server Adapter.
* Copyright(c) 2002-2010 Exar Corp.
******************************************************************************/
-#include<linux/ethtool.h>
+#include <linux/ethtool.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/etherdevice.h>
@@ -29,7 +29,6 @@
* Return value:
* 0 on success.
*/
-
static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info)
{
/* We currently only support 10Gb/FULL */
@@ -79,10 +78,9 @@ static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
* Returns driver specefic information like name, version etc.. to ethtool.
*/
static void vxge_ethtool_gdrvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
+ struct ethtool_drvinfo *info)
{
- struct vxgedev *vdev;
- vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME));
strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION));
strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN);
@@ -104,15 +102,14 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev,
* buffer area.
*/
static void vxge_ethtool_gregs(struct net_device *dev,
- struct ethtool_regs *regs, void *space)
+ struct ethtool_regs *regs, void *space)
{
int index, offset;
enum vxge_hw_status status;
u64 reg;
- u64 *reg_space = (u64 *) space;
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
- struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
- pci_get_drvdata(vdev->pdev);
+ u64 *reg_space = (u64 *)space;
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct __vxge_hw_device *hldev = vdev->devh;
regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
regs->version = vdev->pdev->subsystem_device;
@@ -147,9 +144,8 @@ static void vxge_ethtool_gregs(struct net_device *dev,
*/
static int vxge_ethtool_idnic(struct net_device *dev, u32 data)
{
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
- struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
- pci_get_drvdata(vdev->pdev);
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct __vxge_hw_device *hldev = vdev->devh;
vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON);
msleep_interruptible(data ? (data * HZ) : VXGE_MAX_FLICKER_TIME);
@@ -168,11 +164,10 @@ static int vxge_ethtool_idnic(struct net_device *dev, u32 data)
* void
*/
static void vxge_ethtool_getpause_data(struct net_device *dev,
- struct ethtool_pauseparam *ep)
+ struct ethtool_pauseparam *ep)
{
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
- struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
- pci_get_drvdata(vdev->pdev);
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct __vxge_hw_device *hldev = vdev->devh;
vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause);
}
@@ -188,11 +183,10 @@ static void vxge_ethtool_getpause_data(struct net_device *dev,
* int, returns 0 on Success
*/
static int vxge_ethtool_setpause_data(struct net_device *dev,
- struct ethtool_pauseparam *ep)
+ struct ethtool_pauseparam *ep)
{
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
- struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
- pci_get_drvdata(vdev->pdev);
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct __vxge_hw_device *hldev = vdev->devh;
vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause);
@@ -209,9 +203,8 @@ static void vxge_get_ethtool_stats(struct net_device *dev,
enum vxge_hw_status status;
enum vxge_hw_status swstatus;
struct vxge_vpath *vpath = NULL;
-
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
- struct __vxge_hw_device *hldev = vdev->devh;
+ struct vxgedev *vdev = netdev_priv(dev);
+ struct __vxge_hw_device *hldev = vdev->devh;
struct vxge_hw_xmac_stats *xmac_stats;
struct vxge_hw_device_stats_sw_info *sw_stats;
struct vxge_hw_device_stats_hw_info *hw_stats;
@@ -574,12 +567,12 @@ static void vxge_get_ethtool_stats(struct net_device *dev,
kfree(hw_stats);
}
-static void vxge_ethtool_get_strings(struct net_device *dev,
- u32 stringset, u8 *data)
+static void vxge_ethtool_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
{
int stat_size = 0;
int i, j;
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
switch (stringset) {
case ETH_SS_STATS:
vxge_add_string("VPATH STATISTICS%s\t\t\t",
@@ -1066,21 +1059,21 @@ static void vxge_ethtool_get_strings(struct net_device *dev,
static int vxge_ethtool_get_regs_len(struct net_device *dev)
{
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
}
static u32 vxge_get_rx_csum(struct net_device *dev)
{
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
return vdev->rx_csum;
}
static int vxge_set_rx_csum(struct net_device *dev, u32 data)
{
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
if (data)
vdev->rx_csum = 1;
@@ -1102,7 +1095,7 @@ static int vxge_ethtool_op_set_tso(struct net_device *dev, u32 data)
static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
{
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
switch (sset) {
case ETH_SS_STATS:
@@ -1119,6 +1112,59 @@ static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
}
}
+static int vxge_set_flags(struct net_device *dev, u32 data)
+{
+ struct vxgedev *vdev = netdev_priv(dev);
+ enum vxge_hw_status status;
+
+ if (data & ~ETH_FLAG_RXHASH)
+ return -EOPNOTSUPP;
+
+ if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en)
+ return 0;
+
+ if (netif_running(dev) || (vdev->config.rth_steering == NO_STEERING))
+ return -EINVAL;
+
+ vdev->devh->config.rth_en = !!(data & ETH_FLAG_RXHASH);
+
+ /* Enabling RTH requires some of the logic in vxge_device_register and a
+ * vpath reset. Due to these restrictions, only allow modification
+ * while the interface is down.
+ */
+ status = vxge_reset_all_vpaths(vdev);
+ if (status != VXGE_HW_OK) {
+ vdev->devh->config.rth_en = !vdev->devh->config.rth_en;
+ return -EFAULT;
+ }
+
+ if (vdev->devh->config.rth_en)
+ dev->features |= NETIF_F_RXHASH;
+ else
+ dev->features &= ~NETIF_F_RXHASH;
+
+ return 0;
+}
+
+static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
+{
+ struct vxgedev *vdev = netdev_priv(dev);
+
+ if (vdev->max_vpath_supported != VXGE_HW_MAX_VIRTUAL_PATHS) {
+ printk(KERN_INFO "Single Function Mode is required to flash the"
+ " firmware\n");
+ return -EINVAL;
+ }
+
+ if (netif_running(dev)) {
+ printk(KERN_INFO "Interface %s must be down to flash the "
+ "firmware\n", dev->name);
+ return -EBUSY;
+ }
+
+ return vxge_fw_upgrade(vdev, parms->data, 1);
+}
+
static const struct ethtool_ops vxge_ethtool_ops = {
.get_settings = vxge_ethtool_gset,
.set_settings = vxge_ethtool_sset,
@@ -1131,7 +1177,7 @@ static const struct ethtool_ops vxge_ethtool_ops = {
.get_rx_csum = vxge_get_rx_csum,
.set_rx_csum = vxge_set_rx_csum,
.get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_hw_csum,
+ .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_tso = ethtool_op_get_tso,
@@ -1140,6 +1186,8 @@ static const struct ethtool_ops vxge_ethtool_ops = {
.phys_id = vxge_ethtool_idnic,
.get_sset_count = vxge_ethtool_get_sset_count,
.get_ethtool_stats = vxge_get_ethtool_stats,
+ .set_flags = vxge_set_flags,
+ .flash_device = vxge_fw_flash,
};
void vxge_initialize_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 813829f3d024..395423aeec00 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -50,6 +50,8 @@
#include <net/ip.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <linux/net_tstamp.h>
#include "vxge-main.h"
#include "vxge-reg.h"
@@ -82,16 +84,6 @@ module_param_array(bw_percentage, uint, NULL, 0);
static struct vxge_drv_config *driver_config;
-static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
- struct macInfo *mac);
-static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
- struct macInfo *mac);
-static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
-static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
-static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
-static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
-static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
-
static inline int is_vxge_card_up(struct vxgedev *vdev)
{
return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -148,11 +140,10 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
* This function is called during interrupt context to notify link up state
* change.
*/
-static void
-vxge_callback_link_up(struct __vxge_hw_device *hldev)
+static void vxge_callback_link_up(struct __vxge_hw_device *hldev)
{
struct net_device *dev = hldev->ndev;
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
vdev->ndev->name, __func__, __LINE__);
@@ -172,11 +163,10 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev)
* This function is called during interrupt context to notify link down state
* change.
*/
-static void
-vxge_callback_link_down(struct __vxge_hw_device *hldev)
+static void vxge_callback_link_down(struct __vxge_hw_device *hldev)
{
struct net_device *dev = hldev->ndev;
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
vxge_debug_entryexit(VXGE_TRACE,
"%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
@@ -195,7 +185,7 @@ vxge_callback_link_down(struct __vxge_hw_device *hldev)
*
* Allocate SKB.
*/
-static struct sk_buff*
+static struct sk_buff *
vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
{
struct net_device *dev;
@@ -369,7 +359,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
u8 t_code, void *userdata)
{
struct vxge_ring *ring = (struct vxge_ring *)userdata;
- struct net_device *dev = ring->ndev;
+ struct net_device *dev = ring->ndev;
unsigned int dma_sizes;
void *first_dtr = NULL;
int dtr_cnt = 0;
@@ -381,9 +371,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
struct vxge_hw_ring_rxd_info ext_info;
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
ring->ndev->name, __func__, __LINE__);
- ring->pkts_processed = 0;
-
- vxge_hw_ring_replenish(ringh);
do {
prefetch((char *)dtr + L1_CACHE_BYTES);
@@ -413,7 +400,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
prefetch((char *)skb + L1_CACHE_BYTES);
if (unlikely(t_code)) {
-
if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
VXGE_HW_OK) {
@@ -436,9 +422,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
}
if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
-
if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
-
if (!vxge_rx_map(dtr, ring)) {
skb_put(skb, pkt_length);
@@ -513,6 +497,23 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
else
skb_checksum_none_assert(skb);
+
+ if (ring->rx_hwts) {
+ struct skb_shared_hwtstamps *skb_hwts;
+ u32 ns = *(u32 *)(skb->head + pkt_length);
+
+ skb_hwts = skb_hwtstamps(skb);
+ skb_hwts->hwtstamp = ns_to_ktime(ns);
+ skb_hwts->syststamp.tv64 = 0;
+ }
+
+ /* rth_hash_type and rth_it_hit are non-zero regardless of
+ * whether rss is enabled. Only the rth_value is zero/non-zero
+ * if rss is disabled/enabled, so key off of that.
+ */
+ if (ext_info.rth_value)
+ skb->rxhash = ext_info.rth_value;
+
vxge_rx_complete(ring, skb, ext_info.vlan,
pkt_length, &ext_info);
@@ -660,6 +661,65 @@ static enum vxge_hw_status vxge_search_mac_addr_in_list(
return FALSE;
}
+static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
+{
+ struct vxge_mac_addrs *new_mac_entry;
+ u8 *mac_address = NULL;
+
+ if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
+ return TRUE;
+
+ new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
+ if (!new_mac_entry) {
+ vxge_debug_mem(VXGE_ERR,
+ "%s: memory allocation failed",
+ VXGE_DRIVER_NAME);
+ return FALSE;
+ }
+
+ list_add(&new_mac_entry->item, &vpath->mac_addr_list);
+
+ /* Copy the new mac address to the list */
+ mac_address = (u8 *)&new_mac_entry->macaddr;
+ memcpy(mac_address, mac->macaddr, ETH_ALEN);
+
+ new_mac_entry->state = mac->state;
+ vpath->mac_addr_cnt++;
+
+ /* Is this a multicast address */
+ if (0x01 & mac->macaddr[0])
+ vpath->mcast_addr_cnt++;
+
+ return TRUE;
+}
+
+/* Add a mac address to DA table */
+static enum vxge_hw_status
+vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxge_vpath *vpath;
+ enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
+
+ if (0x01 & mac->macaddr[0]) /* multicast address */
+ duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
+ else
+ duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
+
+ vpath = &vdev->vpaths[mac->vpath_no];
+ status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
+ mac->macmask, duplicate_mode);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "DA config add entry failed for vpath:%d",
+ vpath->device_id);
+ } else
+ if (FALSE == vxge_mac_list_add(vpath, mac))
+ status = -EPERM;
+
+ return status;
+}
+
static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
{
struct macInfo mac_info;
@@ -670,7 +730,7 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
struct vxge_vpath *vpath = NULL;
struct __vxge_hw_device *hldev;
- hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+ hldev = pci_get_drvdata(vdev->pdev);
mac_address = (u8 *)&mac_addr;
memcpy(mac_address, mac_header, ETH_ALEN);
@@ -769,7 +829,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
- vdev = (struct vxgedev *)netdev_priv(dev);
+ vdev = netdev_priv(dev);
if (unlikely(!is_vxge_card_up(vdev))) {
vxge_debug_tx(VXGE_ERR,
@@ -1005,6 +1065,50 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
"%s:%d Exiting...", __func__, __LINE__);
}
+static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
+{
+ struct list_head *entry, *next;
+ u64 del_mac = 0;
+ u8 *mac_address = (u8 *) (&del_mac);
+
+ /* Copy the mac address to delete from the list */
+ memcpy(mac_address, mac->macaddr, ETH_ALEN);
+
+ list_for_each_safe(entry, next, &vpath->mac_addr_list) {
+ if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
+ list_del(entry);
+ kfree((struct vxge_mac_addrs *)entry);
+ vpath->mac_addr_cnt--;
+
+ /* Is this a multicast address */
+ if (0x01 & mac->macaddr[0])
+ vpath->mcast_addr_cnt--;
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+/* delete a mac address from DA table */
+static enum vxge_hw_status
+vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxge_vpath *vpath;
+
+ vpath = &vdev->vpaths[mac->vpath_no];
+ status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
+ mac->macmask);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "DA config delete entry failed for vpath:%d",
+ vpath->device_id);
+ } else
+ vxge_mac_list_del(vpath, mac);
+ return status;
+}
+
/**
* vxge_set_multicast
* @dev: pointer to the device structure
@@ -1034,7 +1138,7 @@ static void vxge_set_multicast(struct net_device *dev)
vxge_debug_entryexit(VXGE_TRACE,
"%s:%d", __func__, __LINE__);
- vdev = (struct vxgedev *)netdev_priv(dev);
+ vdev = netdev_priv(dev);
hldev = (struct __vxge_hw_device *)vdev->devh;
if (unlikely(!is_vxge_card_up(vdev)))
@@ -1094,7 +1198,7 @@ static void vxge_set_multicast(struct net_device *dev)
/* Delete previous MC's */
for (i = 0; i < mcast_cnt; i++) {
list_for_each_safe(entry, next, list_head) {
- mac_entry = (struct vxge_mac_addrs *) entry;
+ mac_entry = (struct vxge_mac_addrs *)entry;
/* Copy the mac address to delete */
mac_address = (u8 *)&mac_entry->macaddr;
memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1137,7 +1241,7 @@ _set_all_mcast:
/* Delete previous MC's */
for (i = 0; i < mcast_cnt; i++) {
list_for_each_safe(entry, next, list_head) {
- mac_entry = (struct vxge_mac_addrs *) entry;
+ mac_entry = (struct vxge_mac_addrs *)entry;
/* Copy the mac address to delete */
mac_address = (u8 *)&mac_entry->macaddr;
memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1184,14 +1288,14 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
struct vxgedev *vdev;
- struct __vxge_hw_device *hldev;
+ struct __vxge_hw_device *hldev;
enum vxge_hw_status status = VXGE_HW_OK;
struct macInfo mac_info_new, mac_info_old;
int vpath_idx = 0;
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
- vdev = (struct vxgedev *)netdev_priv(dev);
+ vdev = netdev_priv(dev);
hldev = vdev->devh;
if (!is_valid_ether_addr(addr->sa_data))
@@ -1292,8 +1396,13 @@ static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
{
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
+ struct __vxge_hw_device *hldev;
int msix_id;
+ hldev = pci_get_drvdata(vdev->pdev);
+
+ vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
+
vxge_hw_vpath_intr_disable(vpath->handle);
if (vdev->config.intr_type == INTA)
@@ -1310,6 +1419,95 @@ static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
}
}
+/* list all mac addresses from DA table */
+static enum vxge_hw_status
+vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ unsigned char macmask[ETH_ALEN];
+ unsigned char macaddr[ETH_ALEN];
+
+ status = vxge_hw_vpath_mac_addr_get(vpath->handle,
+ macaddr, macmask);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "DA config list entry failed for vpath:%d",
+ vpath->device_id);
+ return status;
+ }
+
+ while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
+ status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
+ macaddr, macmask);
+ if (status != VXGE_HW_OK)
+ break;
+ }
+
+ return status;
+}
+
+/* Store all mac addresses from the list to the DA table */
+static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct macInfo mac_info;
+ u8 *mac_address = NULL;
+ struct list_head *entry, *next;
+
+ memset(&mac_info, 0, sizeof(struct macInfo));
+
+ if (vpath->is_open) {
+ list_for_each_safe(entry, next, &vpath->mac_addr_list) {
+ mac_address =
+ (u8 *)&
+ ((struct vxge_mac_addrs *)entry)->macaddr;
+ memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
+ ((struct vxge_mac_addrs *)entry)->state =
+ VXGE_LL_MAC_ADDR_IN_DA_TABLE;
+ /* does this mac address already exist in da table? */
+ status = vxge_search_mac_addr_in_da_table(vpath,
+ &mac_info);
+ if (status != VXGE_HW_OK) {
+ /* Add this mac address to the DA table */
+ status = vxge_hw_vpath_mac_addr_add(
+ vpath->handle, mac_info.macaddr,
+ mac_info.macmask,
+ VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "DA add entry failed for vpath:%d",
+ vpath->device_id);
+ ((struct vxge_mac_addrs *)entry)->state
+ = VXGE_LL_MAC_ADDR_IN_LIST;
+ }
+ }
+ }
+ }
+
+ return status;
+}
+
+/* Store all vlan ids from the list to the vid table */
+static enum vxge_hw_status
+vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
+{
+ enum vxge_hw_status status = VXGE_HW_OK;
+ struct vxgedev *vdev = vpath->vdev;
+ u16 vid;
+
+ if (vdev->vlgrp && vpath->is_open) {
+
+ for (vid = 0; vid < VLAN_N_VID; vid++) {
+ if (!vlan_group_get_device(vdev->vlgrp, vid))
+ continue;
+ /* Add these vlan to the vid table */
+ status = vxge_hw_vpath_vid_add(vpath->handle, vid);
+ }
+ }
+
+ return status;
+}
+
/*
* vxge_reset_vpath
* @vdev: pointer to vdev
@@ -1387,6 +1585,36 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
return ret;
}
+/* Configure CI */
+static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
+{
+ int i = 0;
+
+ /* Enable CI for RTI */
+ if (vdev->config.intr_type == MSI_X) {
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ struct __vxge_hw_ring *hw_ring;
+
+ hw_ring = vdev->vpaths[i].ring.handle;
+ vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
+ }
+ }
+
+ /* Enable CI for TTI */
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
+ vxge_hw_vpath_tti_ci_set(hw_fifo);
+ /*
+ * For Inta (with or without napi), Set CI ON for only one
+ * vpath. (Have only one free running timer).
+ */
+ if ((vdev->config.intr_type == INTA) && (i == 0))
+ break;
+ }
+
+ return;
+}
+
static int do_vxge_reset(struct vxgedev *vdev, int event)
{
enum vxge_hw_status status;
@@ -1405,12 +1633,16 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
}
if (event == VXGE_LL_FULL_RESET) {
+ netif_carrier_off(vdev->ndev);
+
/* wait for all the vpath reset to complete */
for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
while (test_bit(vp_id, &vdev->vp_reset))
msleep(50);
}
+ netif_carrier_on(vdev->ndev);
+
/* if execution mode is set to debug, don't reset the adapter */
if (unlikely(vdev->exec_mode)) {
vxge_debug_init(VXGE_ERR,
@@ -1423,6 +1655,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
}
if (event == VXGE_LL_FULL_RESET) {
+ vxge_hw_device_wait_receive_idle(vdev->devh);
vxge_hw_device_intr_disable(vdev->devh);
switch (vdev->cric_err_event) {
@@ -1547,6 +1780,9 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
netif_tx_wake_all_queues(vdev->ndev);
}
+ /* configure CI */
+ vxge_config_ci_for_tti_rti(vdev);
+
out:
vxge_debug_entryexit(VXGE_TRACE,
"%s:%d Exiting...", __func__, __LINE__);
@@ -1563,9 +1799,14 @@ out:
*
* driver may reset the chip on events of serr, eccerr, etc
*/
-static int vxge_reset(struct vxgedev *vdev)
+static void vxge_reset(struct work_struct *work)
{
- return do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
+ struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task);
+
+ if (!netif_running(vdev->ndev))
+ return;
+
+ do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
}
/**
@@ -1582,22 +1823,29 @@ static int vxge_reset(struct vxgedev *vdev)
*/
static int vxge_poll_msix(struct napi_struct *napi, int budget)
{
- struct vxge_ring *ring =
- container_of(napi, struct vxge_ring, napi);
+ struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
+ int pkts_processed;
int budget_org = budget;
- ring->budget = budget;
+ ring->budget = budget;
+ ring->pkts_processed = 0;
vxge_hw_vpath_poll_rx(ring->handle);
+ pkts_processed = ring->pkts_processed;
if (ring->pkts_processed < budget_org) {
napi_complete(napi);
+
/* Re enable the Rx interrupts for the vpath */
vxge_hw_channel_msix_unmask(
(struct __vxge_hw_channel *)ring->handle,
ring->rx_vector_no);
+ mmiowb();
}
- return ring->pkts_processed;
+ /* We are copying and returning the local variable, in case if after
+ * clearing the msix interrupt above, if the interrupt fires right
+ * away which can preempt this NAPI thread */
+ return pkts_processed;
}
static int vxge_poll_inta(struct napi_struct *napi, int budget)
@@ -1608,12 +1856,12 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
int budget_org = budget;
struct vxge_ring *ring;
- struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
- pci_get_drvdata(vdev->pdev);
+ struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
for (i = 0; i < vdev->no_of_vpath; i++) {
ring = &vdev->vpaths[i].ring;
ring->budget = budget;
+ ring->pkts_processed = 0;
vxge_hw_vpath_poll_rx(ring->handle);
pkts_processed += ring->pkts_processed;
budget -= ring->pkts_processed;
@@ -1645,11 +1893,11 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
*/
static void vxge_netpoll(struct net_device *dev)
{
- struct __vxge_hw_device *hldev;
+ struct __vxge_hw_device *hldev;
struct vxgedev *vdev;
- vdev = (struct vxgedev *)netdev_priv(dev);
- hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
+ vdev = netdev_priv(dev);
+ hldev = pci_get_drvdata(vdev->pdev);
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
@@ -1689,15 +1937,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
mtable[index] = index % vdev->no_of_vpath;
}
- /* Fill RTH hash types */
- hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
- hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
- hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
- hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
- hash_types.hash_type_tcpipv6ex_en =
- vdev->config.rth_hash_type_tcpipv6ex;
- hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
-
/* set indirection table, bucket-to-vpath mapping */
status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
vdev->no_of_vpath,
@@ -1710,19 +1949,27 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
return status;
}
+ /* Fill RTH hash types */
+ hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
+ hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
+ hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
+ hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
+ hash_types.hash_type_tcpipv6ex_en =
+ vdev->config.rth_hash_type_tcpipv6ex;
+ hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
+
/*
- * Because the itable_set() method uses the active_table field
- * for the target virtual path the RTH config should be updated
- * for all VPATHs. The h/w only uses the lowest numbered VPATH
- * when steering frames.
- */
+ * Because the itable_set() method uses the active_table field
+ * for the target virtual path the RTH config should be updated
+ * for all VPATHs. The h/w only uses the lowest numbered VPATH
+ * when steering frames.
+ */
for (index = 0; index < vdev->no_of_vpath; index++) {
status = vxge_hw_vpath_rts_rth_set(
vdev->vpaths[index].handle,
vdev->config.rth_algorithm,
&hash_types,
vdev->config.rth_bkt_sz);
-
if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR,
"RTH configuration failed for vpath:%d",
@@ -1734,201 +1981,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
return status;
}
-static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
-{
- struct vxge_mac_addrs *new_mac_entry;
- u8 *mac_address = NULL;
-
- if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
- return TRUE;
-
- new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
- if (!new_mac_entry) {
- vxge_debug_mem(VXGE_ERR,
- "%s: memory allocation failed",
- VXGE_DRIVER_NAME);
- return FALSE;
- }
-
- list_add(&new_mac_entry->item, &vpath->mac_addr_list);
-
- /* Copy the new mac address to the list */
- mac_address = (u8 *)&new_mac_entry->macaddr;
- memcpy(mac_address, mac->macaddr, ETH_ALEN);
-
- new_mac_entry->state = mac->state;
- vpath->mac_addr_cnt++;
-
- /* Is this a multicast address */
- if (0x01 & mac->macaddr[0])
- vpath->mcast_addr_cnt++;
-
- return TRUE;
-}
-
-/* Add a mac address to DA table */
-static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
- struct macInfo *mac)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath;
- enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
-
- if (0x01 & mac->macaddr[0]) /* multicast address */
- duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
- else
- duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
-
- vpath = &vdev->vpaths[mac->vpath_no];
- status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
- mac->macmask, duplicate_mode);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA config add entry failed for vpath:%d",
- vpath->device_id);
- } else
- if (FALSE == vxge_mac_list_add(vpath, mac))
- status = -EPERM;
-
- return status;
-}
-
-static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
-{
- struct list_head *entry, *next;
- u64 del_mac = 0;
- u8 *mac_address = (u8 *) (&del_mac);
-
- /* Copy the mac address to delete from the list */
- memcpy(mac_address, mac->macaddr, ETH_ALEN);
-
- list_for_each_safe(entry, next, &vpath->mac_addr_list) {
- if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
- list_del(entry);
- kfree((struct vxge_mac_addrs *)entry);
- vpath->mac_addr_cnt--;
-
- /* Is this a multicast address */
- if (0x01 & mac->macaddr[0])
- vpath->mcast_addr_cnt--;
- return TRUE;
- }
- }
-
- return FALSE;
-}
-/* delete a mac address from DA table */
-static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
- struct macInfo *mac)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxge_vpath *vpath;
-
- vpath = &vdev->vpaths[mac->vpath_no];
- status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
- mac->macmask);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA config delete entry failed for vpath:%d",
- vpath->device_id);
- } else
- vxge_mac_list_del(vpath, mac);
- return status;
-}
-
-/* list all mac addresses from DA table */
-enum vxge_hw_status
-static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
- struct macInfo *mac)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- unsigned char macmask[ETH_ALEN];
- unsigned char macaddr[ETH_ALEN];
-
- status = vxge_hw_vpath_mac_addr_get(vpath->handle,
- macaddr, macmask);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA config list entry failed for vpath:%d",
- vpath->device_id);
- return status;
- }
-
- while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
-
- status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
- macaddr, macmask);
- if (status != VXGE_HW_OK)
- break;
- }
-
- return status;
-}
-
-/* Store all vlan ids from the list to the vid table */
-static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct vxgedev *vdev = vpath->vdev;
- u16 vid;
-
- if (vdev->vlgrp && vpath->is_open) {
-
- for (vid = 0; vid < VLAN_N_VID; vid++) {
- if (!vlan_group_get_device(vdev->vlgrp, vid))
- continue;
- /* Add these vlan to the vid table */
- status = vxge_hw_vpath_vid_add(vpath->handle, vid);
- }
- }
-
- return status;
-}
-
-/* Store all mac addresses from the list to the DA table */
-static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
- struct macInfo mac_info;
- u8 *mac_address = NULL;
- struct list_head *entry, *next;
-
- memset(&mac_info, 0, sizeof(struct macInfo));
-
- if (vpath->is_open) {
-
- list_for_each_safe(entry, next, &vpath->mac_addr_list) {
- mac_address =
- (u8 *)&
- ((struct vxge_mac_addrs *)entry)->macaddr;
- memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
- ((struct vxge_mac_addrs *)entry)->state =
- VXGE_LL_MAC_ADDR_IN_DA_TABLE;
- /* does this mac address already exist in da table? */
- status = vxge_search_mac_addr_in_da_table(vpath,
- &mac_info);
- if (status != VXGE_HW_OK) {
- /* Add this mac address to the DA table */
- status = vxge_hw_vpath_mac_addr_add(
- vpath->handle, mac_info.macaddr,
- mac_info.macmask,
- VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "DA add entry failed for vpath:%d",
- vpath->device_id);
- ((struct vxge_mac_addrs *)entry)->state
- = VXGE_LL_MAC_ADDR_IN_LIST;
- }
- }
- }
- }
-
- return status;
-}
-
/* reset vpaths */
-static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
+enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
@@ -1988,8 +2042,23 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
for (i = 0; i < vdev->no_of_vpath; i++) {
vpath = &vdev->vpaths[i];
-
vxge_assert(vpath->is_configured);
+
+ if (!vdev->titan1) {
+ struct vxge_hw_vp_config *vcfg;
+ vcfg = &vdev->devh->config.vp_config[vpath->device_id];
+
+ vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
+ vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
+ vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
+ vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
+ vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
+ vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
+ vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
+ vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
+ vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
+ }
+
attr.vp_id = vpath->device_id;
attr.fifo_attr.callback = vxge_xmit_compl;
attr.fifo_attr.txdl_term = vxge_tx_term;
@@ -2004,6 +2073,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
vpath->ring.ndev = vdev->ndev;
vpath->ring.pdev = vdev->pdev;
+
status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
if (status == VXGE_HW_OK) {
vpath->fifo.handle =
@@ -2022,8 +2092,10 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
netdev_get_tx_queue(vdev->ndev, 0);
vpath->fifo.indicate_max_pkts =
vdev->config.fifo_indicate_max_pkts;
+ vpath->fifo.tx_vector_no = 0;
vpath->ring.rx_vector_no = 0;
vpath->ring.rx_csum = vdev->rx_csum;
+ vpath->ring.rx_hwts = vdev->rx_hwts;
vpath->is_open = 1;
vdev->vp_handles[i] = vpath->handle;
vpath->ring.gro_enable = vdev->config.gro_enable;
@@ -2031,11 +2103,10 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
vdev->stats.vpaths_open++;
} else {
vdev->stats.vpath_open_fail++;
- vxge_debug_init(VXGE_ERR,
- "%s: vpath: %d failed to open "
- "with status: %d",
- vdev->ndev->name, vpath->device_id,
- status);
+ vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to "
+ "open with status: %d",
+ vdev->ndev->name, vpath->device_id,
+ status);
vxge_close_vpaths(vdev, 0);
return -EPERM;
}
@@ -2043,9 +2114,65 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
vp_id = vpath->handle->vpath->vp_id;
vdev->vpaths_deployed |= vxge_mBIT(vp_id);
}
+
return VXGE_HW_OK;
}
+/**
+ * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
+ * if the interrupts are not within a range
+ * @fifo: pointer to transmit fifo structure
+ * Description: The function changes boundary timer and restriction timer
+ * value depends on the traffic
+ * Return Value: None
+ */
+static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
+{
+ fifo->interrupt_count++;
+ if (jiffies > fifo->jiffies + HZ / 100) {
+ struct __vxge_hw_fifo *hw_fifo = fifo->handle;
+
+ fifo->jiffies = jiffies;
+ if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
+ hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
+ hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
+ vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
+ } else if (hw_fifo->rtimer != 0) {
+ hw_fifo->rtimer = 0;
+ vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
+ }
+ fifo->interrupt_count = 0;
+ }
+}
+
+/**
+ * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
+ * if the interrupts are not within a range
+ * @ring: pointer to receive ring structure
+ * Description: The function increases of decreases the packet counts within
+ * the ranges of traffic utilization, if the interrupts due to this ring are
+ * not within a fixed range.
+ * Return Value: Nothing
+ */
+static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
+{
+ ring->interrupt_count++;
+ if (jiffies > ring->jiffies + HZ / 100) {
+ struct __vxge_hw_ring *hw_ring = ring->handle;
+
+ ring->jiffies = jiffies;
+ if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
+ hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
+ hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
+ vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
+ } else if (hw_ring->rtimer != 0) {
+ hw_ring->rtimer = 0;
+ vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
+ }
+ ring->interrupt_count = 0;
+ }
+}
+
/*
* vxge_isr_napi
* @irq: the irq of the device.
@@ -2062,21 +2189,20 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
struct __vxge_hw_device *hldev;
u64 reason;
enum vxge_hw_status status;
- struct vxgedev *vdev = (struct vxgedev *) dev_id;;
+ struct vxgedev *vdev = (struct vxgedev *)dev_id;
vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
dev = vdev->ndev;
- hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
+ hldev = pci_get_drvdata(vdev->pdev);
if (pci_channel_offline(vdev->pdev))
return IRQ_NONE;
if (unlikely(!is_vxge_card_up(vdev)))
- return IRQ_NONE;
+ return IRQ_HANDLED;
- status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode,
- &reason);
+ status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
if (status == VXGE_HW_OK) {
vxge_hw_device_mask_all(hldev);
@@ -2107,24 +2233,39 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
#ifdef CONFIG_PCI_MSI
-static irqreturn_t
-vxge_tx_msix_handle(int irq, void *dev_id)
+static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
{
struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
+ adaptive_coalesce_tx_interrupts(fifo);
+
+ vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
+ fifo->tx_vector_no);
+
+ vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
+ fifo->tx_vector_no);
+
VXGE_COMPLETE_VPATH_TX(fifo);
+ vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
+ fifo->tx_vector_no);
+
+ mmiowb();
+
return IRQ_HANDLED;
}
-static irqreturn_t
-vxge_rx_msix_napi_handle(int irq, void *dev_id)
+static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
{
struct vxge_ring *ring = (struct vxge_ring *)dev_id;
- /* MSIX_IDX for Rx is 1 */
+ adaptive_coalesce_rx_interrupts(ring);
+
vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
- ring->rx_vector_no);
+ ring->rx_vector_no);
+
+ vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
+ ring->rx_vector_no);
napi_schedule(&ring->napi);
return IRQ_HANDLED;
@@ -2141,14 +2282,20 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
for (i = 0; i < vdev->no_of_vpath; i++) {
+ /* Reduce the chance of loosing alarm interrupts by masking
+ * the vector. A pending bit will be set if an alarm is
+ * generated and on unmask the interrupt will be fired.
+ */
vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
+ vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
+ mmiowb();
status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
vdev->exec_mode);
if (status == VXGE_HW_OK) {
-
vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
- msix_id);
+ msix_id);
+ mmiowb();
continue;
}
vxge_debug_intr(VXGE_ERR,
@@ -2267,6 +2414,9 @@ static int vxge_enable_msix(struct vxgedev *vdev)
vpath->ring.rx_vector_no = (vpath->device_id *
VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
+ vpath->fifo.tx_vector_no = (vpath->device_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE);
+
vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
VXGE_ALARM_MSIX_ID);
}
@@ -2301,8 +2451,8 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
static void vxge_rem_isr(struct vxgedev *vdev)
{
- struct __vxge_hw_device *hldev;
- hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+ struct __vxge_hw_device *hldev;
+ hldev = pci_get_drvdata(vdev->pdev);
#ifdef CONFIG_PCI_MSI
if (vdev->config.intr_type == MSI_X) {
@@ -2442,8 +2592,9 @@ INTA_MODE:
"%s:vxge:INTA", vdev->ndev->name);
vxge_hw_device_set_intr_type(vdev->devh,
VXGE_HW_INTR_MODE_IRQLINE);
- vxge_hw_vpath_tti_ci_set(vdev->devh,
- vdev->vpaths[0].device_id);
+
+ vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
+
ret = request_irq((int) vdev->pdev->irq,
vxge_isr_napi,
IRQF_SHARED, vdev->desc[0], vdev);
@@ -2529,8 +2680,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
* Return value: '0' on success and an appropriate (-)ve integer as
* defined in errno.h file on failure.
*/
-static int
-vxge_open(struct net_device *dev)
+static int vxge_open(struct net_device *dev)
{
enum vxge_hw_status status;
struct vxgedev *vdev;
@@ -2539,11 +2689,12 @@ vxge_open(struct net_device *dev)
int ret = 0;
int i;
u64 val64, function_mode;
+
vxge_debug_entryexit(VXGE_TRACE,
"%s: %s:%d", dev->name, __func__, __LINE__);
- vdev = (struct vxgedev *)netdev_priv(dev);
- hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+ vdev = netdev_priv(dev);
+ hldev = pci_get_drvdata(vdev->pdev);
function_mode = vdev->config.device_hw_info.function_mode;
/* make sure you have link off by default every time Nic is
@@ -2598,6 +2749,8 @@ vxge_open(struct net_device *dev)
goto out2;
}
}
+ printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
+ hldev->config.rth_en ? "enabled" : "disabled");
for (i = 0; i < vdev->no_of_vpath; i++) {
vpath = &vdev->vpaths[i];
@@ -2683,9 +2836,10 @@ vxge_open(struct net_device *dev)
vxge_os_timer(vdev->vp_reset_timer,
vxge_poll_vp_reset, vdev, (HZ/2));
- if (vdev->vp_lockup_timer.function == NULL)
- vxge_os_timer(vdev->vp_lockup_timer,
- vxge_poll_vp_lockup, vdev, (HZ/2));
+ /* There is no need to check for RxD leak and RxD lookup on Titan1A */
+ if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
+ vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
+ HZ / 2);
set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -2710,6 +2864,10 @@ vxge_open(struct net_device *dev)
}
netif_tx_start_all_queues(vdev->ndev);
+
+ /* configure CI */
+ vxge_config_ci_for_tti_rti(vdev);
+
goto out0;
out2:
@@ -2767,8 +2925,8 @@ static int do_vxge_close(struct net_device *dev, int do_io)
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
dev->name, __func__, __LINE__);
- vdev = (struct vxgedev *)netdev_priv(dev);
- hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+ vdev = netdev_priv(dev);
+ hldev = pci_get_drvdata(vdev->pdev);
if (unlikely(!is_vxge_card_up(vdev)))
return 0;
@@ -2778,7 +2936,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
msleep(50);
- clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
if (do_io) {
/* Put the vpath back in normal mode */
vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
@@ -2789,7 +2946,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
struct vxge_hw_mrpcim_reg,
rts_mgr_cbasin_cfg),
&val64);
-
if (status == VXGE_HW_OK) {
val64 &= ~vpath_vector;
status = vxge_hw_mgmt_reg_write(vdev->devh,
@@ -2818,10 +2974,17 @@ static int do_vxge_close(struct net_device *dev, int do_io)
smp_wmb();
}
- del_timer_sync(&vdev->vp_lockup_timer);
+
+ if (vdev->titan1)
+ del_timer_sync(&vdev->vp_lockup_timer);
del_timer_sync(&vdev->vp_reset_timer);
+ if (do_io)
+ vxge_hw_device_wait_receive_idle(hldev);
+
+ clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
+
/* Disable napi */
if (vdev->config.intr_type != MSI_X)
napi_disable(&vdev->napi);
@@ -2838,8 +3001,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
if (do_io)
vxge_hw_device_intr_disable(vdev->devh);
- mdelay(1000);
-
vxge_rem_isr(vdev);
vxge_napi_del_all(vdev);
@@ -2868,8 +3029,7 @@ static int do_vxge_close(struct net_device *dev, int do_io)
* Return value: '0' on success and an appropriate (-)ve integer as
* defined in errno.h file on failure.
*/
-static int
-vxge_close(struct net_device *dev)
+static int vxge_close(struct net_device *dev)
{
do_vxge_close(dev, 1);
return 0;
@@ -2943,9 +3103,7 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors;
net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast;
- net_stats->rx_dropped +=
- vdev->vpaths[k].ring.stats.rx_dropped;
-
+ net_stats->rx_dropped += vdev->vpaths[k].ring.stats.rx_dropped;
net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms;
net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes;
net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors;
@@ -2954,6 +3112,101 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
return net_stats;
}
+static enum vxge_hw_status vxge_timestamp_config(struct vxgedev *vdev,
+ int enable)
+{
+ enum vxge_hw_status status;
+ u64 val64;
+
+ /* Timestamp is passed to the driver via the FCS, therefore we
+ * must disable the FCS stripping by the adapter. Since this is
+ * required for the driver to load (due to a hardware bug),
+ * there is no need to do anything special here.
+ */
+ if (enable)
+ val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
+ VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
+ VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
+ else
+ val64 = 0;
+
+ status = vxge_hw_mgmt_reg_write(vdev->devh,
+ vxge_hw_mgmt_reg_type_mrpcim,
+ 0,
+ offsetof(struct vxge_hw_mrpcim_reg,
+ xmac_timestamp),
+ val64);
+ vxge_hw_device_flush_io(vdev->devh);
+ return status;
+}
+
+static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
+{
+ struct hwtstamp_config config;
+ enum vxge_hw_status status;
+ int i;
+
+ if (copy_from_user(&config, data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ /* Transmit HW Timestamp not supported */
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+ case HWTSTAMP_TX_ON:
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ status = vxge_timestamp_config(vdev, 0);
+ if (status != VXGE_HW_OK)
+ return -EFAULT;
+
+ vdev->rx_hwts = 0;
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ status = vxge_timestamp_config(vdev, 1);
+ if (status != VXGE_HW_OK)
+ return -EFAULT;
+
+ vdev->rx_hwts = 1;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ for (i = 0; i < vdev->no_of_vpath; i++)
+ vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
+
+ if (copy_to_user(data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
/**
* vxge_ioctl
* @dev: Device pointer.
@@ -2966,7 +3219,20 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
*/
static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- return -EOPNOTSUPP;
+ struct vxgedev *vdev = netdev_priv(dev);
+ int ret;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
}
/**
@@ -2977,18 +3243,17 @@ static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
* This function is triggered if the Tx Queue is stopped
* for a pre-defined amount of time when the Interface is still up.
*/
-static void
-vxge_tx_watchdog(struct net_device *dev)
+static void vxge_tx_watchdog(struct net_device *dev)
{
struct vxgedev *vdev;
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
- vdev = (struct vxgedev *)netdev_priv(dev);
+ vdev = netdev_priv(dev);
vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
- vxge_reset(vdev);
+ schedule_work(&vdev->reset_task);
vxge_debug_entryexit(VXGE_TRACE,
"%s:%d Exiting...", __func__, __LINE__);
}
@@ -3012,7 +3277,7 @@ vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
- vdev = (struct vxgedev *)netdev_priv(dev);
+ vdev = netdev_priv(dev);
vpath = &vdev->vpaths[0];
if ((NULL == grp) && (vpath->is_open)) {
@@ -3061,7 +3326,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
struct vxge_vpath *vpath;
int vp_id;
- vdev = (struct vxgedev *)netdev_priv(dev);
+ vdev = netdev_priv(dev);
/* Add these vlan to the vid table */
for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
@@ -3088,7 +3353,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
- vdev = (struct vxgedev *)netdev_priv(dev);
+ vdev = netdev_priv(dev);
vlan_group_set_device(vdev->vlgrp, vid, NULL);
@@ -3110,15 +3375,12 @@ static const struct net_device_ops vxge_netdev_ops = {
.ndo_start_xmit = vxge_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_multicast_list = vxge_set_multicast,
-
.ndo_do_ioctl = vxge_ioctl,
-
.ndo_set_mac_address = vxge_set_mac_addr,
.ndo_change_mtu = vxge_change_mtu,
.ndo_vlan_rx_register = vxge_vlan_rx_register,
.ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
.ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
-
.ndo_tx_timeout = vxge_tx_watchdog,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = vxge_netpoll,
@@ -3163,6 +3425,8 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
vdev->pdev = hldev->pdev;
memcpy(&vdev->config, config, sizeof(struct vxge_config));
vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
+ vdev->rx_hwts = 0;
+ vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
@@ -3175,9 +3439,15 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
ndev->netdev_ops = &vxge_netdev_ops;
ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
+ INIT_WORK(&vdev->reset_task, vxge_reset);
vxge_initialize_ethtool_ops(ndev);
+ if (vdev->config.rth_steering != NO_STEERING) {
+ ndev->features |= NETIF_F_RXHASH;
+ hldev->config.rth_en = VXGE_HW_RTH_ENABLE;
+ }
+
/* Allocate memory for vpath */
vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
no_of_vpath, GFP_KERNEL);
@@ -3185,13 +3455,13 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
vxge_debug_init(VXGE_ERR,
"%s: vpath memory allocation failed",
vdev->ndev->name);
- ret = -ENODEV;
+ ret = -ENOMEM;
goto _out1;
}
ndev->features |= NETIF_F_SG;
- ndev->features |= NETIF_F_HW_CSUM;
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
"%s : checksuming enabled", __func__);
@@ -3206,11 +3476,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
if (vdev->config.gro_enable)
ndev->features |= NETIF_F_GRO;
- if (register_netdev(ndev)) {
+ ret = register_netdev(ndev);
+ if (ret) {
vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
"%s: %s : device registration failed!",
ndev->name, __func__);
- ret = -ENODEV;
goto _out2;
}
@@ -3227,6 +3497,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
"%s: Ethernet device registered",
ndev->name);
+ hldev->ndev = ndev;
*vdev_out = vdev;
/* Resetting the Device stats */
@@ -3261,36 +3532,34 @@ _out0:
*
* This function will unregister and free network device
*/
-static void
-vxge_device_unregister(struct __vxge_hw_device *hldev)
+static void vxge_device_unregister(struct __vxge_hw_device *hldev)
{
struct vxgedev *vdev;
struct net_device *dev;
char buf[IFNAMSIZ];
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
- (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
- u32 level_trace;
-#endif
dev = hldev->ndev;
vdev = netdev_priv(dev);
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
- (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
- level_trace = vdev->level_trace;
-#endif
- vxge_debug_entryexit(level_trace,
- "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
- memcpy(buf, vdev->ndev->name, IFNAMSIZ);
+ vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
+ __func__, __LINE__);
+
+ strncpy(buf, dev->name, IFNAMSIZ);
+
+ flush_work_sync(&vdev->reset_task);
/* in 2.6 will call stop() if device is up */
unregister_netdev(dev);
- flush_scheduled_work();
+ kfree(vdev->vpaths);
- vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf);
- vxge_debug_entryexit(level_trace,
- "%s: %s:%d Exiting...", buf, __func__, __LINE__);
+ /* we are safe to free it now */
+ free_netdev(dev);
+
+ vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
+ buf);
+ vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
+ __func__, __LINE__);
}
/*
@@ -3304,7 +3573,7 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev,
enum vxge_hw_event type, u64 vp_id)
{
struct net_device *dev = hldev->ndev;
- struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+ struct vxgedev *vdev = netdev_priv(dev);
struct vxge_vpath *vpath = NULL;
int vpath_idx;
@@ -3527,9 +3796,9 @@ static int __devinit vxge_config_vpaths(
device_config->vp_config[i].tti.timer_ac_en =
VXGE_HW_TIM_TIMER_AC_ENABLE;
- /* For msi-x with napi (each vector
- has a handler of its own) -
- Set CI to OFF for all vpaths */
+ /* For msi-x with napi (each vector has a handler of its own) -
+ * Set CI to OFF for all vpaths
+ */
device_config->vp_config[i].tti.timer_ci_en =
VXGE_HW_TIM_TIMER_CI_DISABLE;
@@ -3559,10 +3828,13 @@ static int __devinit vxge_config_vpaths(
device_config->vp_config[i].ring.ring_blocks =
VXGE_HW_DEF_RING_BLOCKS;
+
device_config->vp_config[i].ring.buffer_mode =
VXGE_HW_RING_RXD_BUFFER_MODE_1;
+
device_config->vp_config[i].ring.rxds_limit =
VXGE_HW_DEF_RING_RXDS_LIMIT;
+
device_config->vp_config[i].ring.scatter_mode =
VXGE_HW_RING_SCATTER_MODE_A;
@@ -3639,9 +3911,10 @@ static void __devinit vxge_device_config_init(
break;
case MSI_X:
- device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX;
+ device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
break;
}
+
/* Timer period between device poll */
device_config->device_poll_millis = VXGE_TIMER_DELAY;
@@ -3653,16 +3926,10 @@ static void __devinit vxge_device_config_init(
vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
__func__);
- vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_initial : %d",
- device_config->dma_blockpool_initial);
- vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_max : %d",
- device_config->dma_blockpool_max);
vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
device_config->intr_mode);
vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
device_config->device_poll_millis);
- vxge_debug_ll_config(VXGE_TRACE, "rts_mac_en : %d",
- device_config->rts_mac_en);
vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
device_config->rth_en);
vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
@@ -3751,9 +4018,6 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
vxge_debug_init(VXGE_TRACE,
"%s: MAC Address learning enabled", vdev->ndev->name);
- vxge_debug_init(VXGE_TRACE,
- "%s: Rx doorbell mode enabled", vdev->ndev->name);
-
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
if (!vxge_bVALn(vpath_mask, i, 1))
continue;
@@ -3766,14 +4030,6 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
((struct __vxge_hw_device *)(vdev->devh))->
config.vp_config[i].rpa_strip_vlan_tag
? "Enabled" : "Disabled");
- vxge_debug_init(VXGE_TRACE,
- "%s: Ring blocks : %d", vdev->ndev->name,
- ((struct __vxge_hw_device *)(vdev->devh))->
- config.vp_config[i].ring.ring_blocks);
- vxge_debug_init(VXGE_TRACE,
- "%s: Fifo blocks : %d", vdev->ndev->name,
- ((struct __vxge_hw_device *)(vdev->devh))->
- config.vp_config[i].fifo.fifo_blocks);
vxge_debug_ll_config(VXGE_TRACE,
"%s: Max frags : %d", vdev->ndev->name,
((struct __vxge_hw_device *)(vdev->devh))->
@@ -3813,8 +4069,7 @@ static int vxge_pm_resume(struct pci_dev *pdev)
static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
- struct __vxge_hw_device *hldev =
- (struct __vxge_hw_device *) pci_get_drvdata(pdev);
+ struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
struct net_device *netdev = hldev->ndev;
netif_device_detach(netdev);
@@ -3843,8 +4098,7 @@ static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
*/
static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
{
- struct __vxge_hw_device *hldev =
- (struct __vxge_hw_device *) pci_get_drvdata(pdev);
+ struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
struct net_device *netdev = hldev->ndev;
struct vxgedev *vdev = netdev_priv(netdev);
@@ -3855,7 +4109,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
}
pci_set_master(pdev);
- vxge_reset(vdev);
+ do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
return PCI_ERS_RESULT_RECOVERED;
}
@@ -3869,8 +4123,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
*/
static void vxge_io_resume(struct pci_dev *pdev)
{
- struct __vxge_hw_device *hldev =
- (struct __vxge_hw_device *) pci_get_drvdata(pdev);
+ struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
struct net_device *netdev = hldev->ndev;
if (netif_running(netdev)) {
@@ -3914,6 +4167,157 @@ static inline u32 vxge_get_num_vfs(u64 function_mode)
return num_functions;
}
+int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
+{
+ struct __vxge_hw_device *hldev = vdev->devh;
+ u32 maj, min, bld, cmaj, cmin, cbld;
+ enum vxge_hw_status status;
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
+ if (ret) {
+ vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
+ VXGE_DRIVER_NAME, fw_name);
+ goto out;
+ }
+
+ /* Load the new firmware onto the adapter */
+ status = vxge_update_fw_image(hldev, fw->data, fw->size);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: FW image download to adapter failed '%s'.",
+ VXGE_DRIVER_NAME, fw_name);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Read the version of the new firmware */
+ status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR,
+ "%s: Upgrade read version failed '%s'.",
+ VXGE_DRIVER_NAME, fw_name);
+ ret = -EIO;
+ goto out;
+ }
+
+ cmaj = vdev->config.device_hw_info.fw_version.major;
+ cmin = vdev->config.device_hw_info.fw_version.minor;
+ cbld = vdev->config.device_hw_info.fw_version.build;
+ /* It's possible the version in /lib/firmware is not the latest version.
+ * If so, we could get into a loop of trying to upgrade to the latest
+ * and flashing the older version.
+ */
+ if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
+ !override) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
+ maj, min, bld);
+
+ /* Flash the adapter with the new firmware */
+ status = vxge_hw_flash_fw(hldev);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
+ VXGE_DRIVER_NAME, fw_name);
+ ret = -EIO;
+ goto out;
+ }
+
+ printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be "
+ "hard reset before using, thus requiring a system reboot or a "
+ "hotplug event.\n");
+
+out:
+ release_firmware(fw);
+ return ret;
+}
+
+static int vxge_probe_fw_update(struct vxgedev *vdev)
+{
+ u32 maj, min, bld;
+ int ret, gpxe = 0;
+ char *fw_name;
+
+ maj = vdev->config.device_hw_info.fw_version.major;
+ min = vdev->config.device_hw_info.fw_version.minor;
+ bld = vdev->config.device_hw_info.fw_version.build;
+
+ if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
+ return 0;
+
+ /* Ignore the build number when determining if the current firmware is
+ * "too new" to load the driver
+ */
+ if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
+ vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
+ "version, unable to load driver\n",
+ VXGE_DRIVER_NAME);
+ return -EINVAL;
+ }
+
+ /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
+ * work with this driver.
+ */
+ if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
+ vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
+ "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
+ return -EINVAL;
+ }
+
+ /* If file not specified, determine gPXE or not */
+ if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
+ int i;
+ for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
+ if (vdev->devh->eprom_versions[i]) {
+ gpxe = 1;
+ break;
+ }
+ }
+ if (gpxe)
+ fw_name = "vxge/X3fw-pxe.ncf";
+ else
+ fw_name = "vxge/X3fw.ncf";
+
+ ret = vxge_fw_upgrade(vdev, fw_name, 0);
+ /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
+ * probe, so ignore them
+ */
+ if (ret != -EINVAL && ret != -ENOENT)
+ return -EIO;
+ else
+ ret = 0;
+
+ if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
+ VXGE_FW_VER(maj, min, 0)) {
+ vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
+ " be used with this driver.\n"
+ "Please get the latest version from "
+ "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
+ VXGE_DRIVER_NAME, maj, min, bld);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int __devinit is_sriov_initialized(struct pci_dev *pdev)
+{
+ int pos;
+ u16 ctrl;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
+ if (ctrl & PCI_SRIOV_CTRL_VFE)
+ return 1;
+ }
+ return 0;
+}
+
/**
* vxge_probe
* @pdev : structure containing the PCI related information of the device.
@@ -3928,7 +4332,7 @@ static inline u32 vxge_get_num_vfs(u64 function_mode)
static int __devinit
vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
{
- struct __vxge_hw_device *hldev;
+ struct __vxge_hw_device *hldev;
enum vxge_hw_status status;
int ret;
int high_dma = 0;
@@ -3951,9 +4355,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
attr.pdev = pdev;
/* In SRIOV-17 mode, functions of the same adapter
- * can be deployed on different buses */
- if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) ||
- (device != PCI_SLOT(pdev->devfn))))
+ * can be deployed on different buses
+ */
+ if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) &&
+ !pdev->is_virtfn)
new_device = 1;
bus = pdev->bus->number;
@@ -3971,6 +4376,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
driver_config->config_dev_cnt = 0;
driver_config->total_dev_cnt = 0;
}
+
/* Now making the CPU based no of vpath calculation
* applicable for individual functions as well.
*/
@@ -3993,11 +4399,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit0;
}
- ll_config = kzalloc(sizeof(*ll_config), GFP_KERNEL);
+ ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL);
if (!ll_config) {
ret = -ENOMEM;
vxge_debug_init(VXGE_ERR,
- "ll_config : malloc failed %s %d",
+ "device_config : malloc failed %s %d",
__FILE__, __LINE__);
goto _exit0;
}
@@ -4041,10 +4447,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit1;
}
- if (pci_request_regions(pdev, VXGE_DRIVER_NAME)) {
+ ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
+ if (ret) {
vxge_debug_init(VXGE_ERR,
"%s : request regions failed", __func__);
- ret = -ENODEV;
goto _exit1;
}
@@ -4072,16 +4478,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit3;
}
- if (ll_config->device_hw_info.fw_version.major !=
- VXGE_DRIVER_FW_VERSION_MAJOR) {
- vxge_debug_init(VXGE_ERR,
- "%s: Incorrect firmware version."
- "Please upgrade the firmware to version 1.x.x",
- VXGE_DRIVER_NAME);
- ret = -EINVAL;
- goto _exit3;
- }
-
vpath_mask = ll_config->device_hw_info.vpath_mask;
if (vpath_mask == 0) {
vxge_debug_ll_config(VXGE_TRACE,
@@ -4110,14 +4506,13 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
num_vfs = vxge_get_num_vfs(function_mode) - 1;
/* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
- if (is_sriov(function_mode) && (max_config_dev > 1) &&
- (ll_config->intr_type != INTA) &&
- (is_privileged == VXGE_HW_OK)) {
- ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs)
- ? (max_config_dev - 1) : num_vfs);
+ if (is_sriov(function_mode) && !is_sriov_initialized(pdev) &&
+ (ll_config->intr_type != INTA)) {
+ ret = pci_enable_sriov(pdev, num_vfs);
if (ret)
vxge_debug_ll_config(VXGE_ERR,
"Failed in enabling SRIOV mode: %d\n", ret);
+ /* No need to fail out, as an error here is non-fatal */
}
/*
@@ -4145,11 +4540,37 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit3;
}
+ if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
+ ll_config->device_hw_info.fw_version.minor,
+ ll_config->device_hw_info.fw_version.build) >=
+ VXGE_EPROM_FW_VER) {
+ struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
+
+ status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
+ VXGE_DRIVER_NAME);
+ /* This is a non-fatal error, continue */
+ }
+
+ for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
+ hldev->eprom_versions[i] = img[i].version;
+ if (!img[i].is_valid)
+ break;
+ vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
+ "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
+ VXGE_EPROM_IMG_MAJOR(img[i].version),
+ VXGE_EPROM_IMG_MINOR(img[i].version),
+ VXGE_EPROM_IMG_FIX(img[i].version),
+ VXGE_EPROM_IMG_BUILD(img[i].version));
+ }
+ }
+
/* if FCS stripping is not disabled in MAC fail driver load */
- if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "%s: FCS stripping is not disabled in MAC"
- " failing driver load", VXGE_DRIVER_NAME);
+ status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
+ " failing driver load", VXGE_DRIVER_NAME);
ret = -EINVAL;
goto _exit4;
}
@@ -4163,28 +4584,32 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
ll_config->addr_learn_en = addr_learn_en;
ll_config->rth_algorithm = RTH_ALG_JENKINS;
- ll_config->rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4;
- ll_config->rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE;
- ll_config->rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
- ll_config->rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
- ll_config->rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
- ll_config->rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
+ ll_config->rth_hash_type_tcpipv4 = 1;
+ ll_config->rth_hash_type_ipv4 = 0;
+ ll_config->rth_hash_type_tcpipv6 = 0;
+ ll_config->rth_hash_type_ipv6 = 0;
+ ll_config->rth_hash_type_tcpipv6ex = 0;
+ ll_config->rth_hash_type_ipv6ex = 0;
ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
- if (vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
- &vdev)) {
+ ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
+ &vdev);
+ if (ret) {
ret = -EINVAL;
goto _exit4;
}
+ ret = vxge_probe_fw_update(vdev);
+ if (ret)
+ goto _exit5;
+
vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
vxge_hw_device_trace_level_get(hldev));
/* set private HW device info */
- hldev->ndev = vdev->ndev;
vdev->mtu = VXGE_HW_DEFAULT_MTU;
vdev->bar0 = attr.bar0;
vdev->max_vpath_supported = max_vpath_supported;
@@ -4278,15 +4703,13 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Copy the station mac address to the list */
for (i = 0; i < vdev->no_of_vpath; i++) {
- entry = (struct vxge_mac_addrs *)
- kzalloc(sizeof(struct vxge_mac_addrs),
- GFP_KERNEL);
+ entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
if (NULL == entry) {
vxge_debug_init(VXGE_ERR,
"%s: mac_addr_list : memory allocation failed",
vdev->ndev->name);
ret = -EPERM;
- goto _exit5;
+ goto _exit6;
}
macaddr = (u8 *)&entry->macaddr;
memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
@@ -4326,25 +4749,26 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
kfree(ll_config);
return 0;
-_exit5:
+_exit6:
for (i = 0; i < vdev->no_of_vpath; i++)
vxge_free_mac_add_list(&vdev->vpaths[i]);
-
+_exit5:
vxge_device_unregister(hldev);
_exit4:
- pci_disable_sriov(pdev);
+ pci_set_drvdata(pdev, NULL);
vxge_hw_device_terminate(hldev);
+ pci_disable_sriov(pdev);
_exit3:
iounmap(attr.bar0);
_exit2:
- pci_release_regions(pdev);
+ pci_release_region(pdev, 0);
_exit1:
pci_disable_device(pdev);
_exit0:
kfree(ll_config);
kfree(device_config);
driver_config->config_dev_cnt--;
- pci_set_drvdata(pdev, NULL);
+ driver_config->total_dev_cnt--;
return ret;
}
@@ -4354,61 +4778,39 @@ _exit0:
* Description: This function is called by the Pci subsystem to release a
* PCI device and free up all resource held up by the device.
*/
-static void __devexit
-vxge_remove(struct pci_dev *pdev)
+static void __devexit vxge_remove(struct pci_dev *pdev)
{
- struct __vxge_hw_device *hldev;
- struct vxgedev *vdev = NULL;
- struct net_device *dev;
- int i = 0;
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
- (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
- u32 level_trace;
-#endif
-
- hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev);
+ struct __vxge_hw_device *hldev;
+ struct vxgedev *vdev;
+ int i;
+ hldev = pci_get_drvdata(pdev);
if (hldev == NULL)
return;
- dev = hldev->ndev;
- vdev = netdev_priv(dev);
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
- (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
- level_trace = vdev->level_trace;
-#endif
- vxge_debug_entryexit(level_trace,
- "%s:%d", __func__, __LINE__);
+ vdev = netdev_priv(hldev->ndev);
- vxge_debug_init(level_trace,
- "%s : removing PCI device...", __func__);
- vxge_device_unregister(hldev);
+ vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
+ vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
+ __func__);
- for (i = 0; i < vdev->no_of_vpath; i++) {
+ for (i = 0; i < vdev->no_of_vpath; i++)
vxge_free_mac_add_list(&vdev->vpaths[i]);
- vdev->vpaths[i].mcast_addr_cnt = 0;
- vdev->vpaths[i].mac_addr_cnt = 0;
- }
-
- kfree(vdev->vpaths);
-
- iounmap(vdev->bar0);
-
- pci_disable_sriov(pdev);
-
- /* we are safe to free it now */
- free_netdev(dev);
-
- vxge_debug_init(level_trace,
- "%s:%d Device unregistered", __func__, __LINE__);
+ vxge_device_unregister(hldev);
+ pci_set_drvdata(pdev, NULL);
+ /* Do not call pci_disable_sriov here, as it will break child devices */
vxge_hw_device_terminate(hldev);
-
+ iounmap(vdev->bar0);
+ pci_release_region(pdev, 0);
pci_disable_device(pdev);
- pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
- vxge_debug_entryexit(level_trace,
- "%s:%d Exiting...", __func__, __LINE__);
+ driver_config->config_dev_cnt--;
+ driver_config->total_dev_cnt--;
+
+ vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
+ __func__, __LINE__);
+ vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
+ __LINE__);
}
static struct pci_error_handlers vxge_err_handler = {
@@ -4444,6 +4846,10 @@ vxge_starter(void)
return -ENOMEM;
ret = pci_register_driver(&vxge_driver);
+ if (ret) {
+ kfree(driver_config);
+ goto err;
+ }
if (driver_config->config_dev_cnt &&
(driver_config->config_dev_cnt != driver_config->total_dev_cnt))
@@ -4451,10 +4857,7 @@ vxge_starter(void)
"%s: Configured %d of %d devices",
VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
driver_config->total_dev_cnt);
-
- if (ret)
- kfree(driver_config);
-
+err:
return ret;
}
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index de64536cb7d0..40474f0da576 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -29,6 +29,9 @@
#define PCI_DEVICE_ID_TITAN_WIN 0x5733
#define PCI_DEVICE_ID_TITAN_UNI 0x5833
+#define VXGE_HW_TITAN1_PCI_REVISION 1
+#define VXGE_HW_TITAN1A_PCI_REVISION 2
+
#define VXGE_USE_DEFAULT 0xffffffff
#define VXGE_HW_VPATH_MSIX_ACTIVE 4
#define VXGE_ALARM_MSIX_ID 2
@@ -53,12 +56,16 @@
#define VXGE_TTI_BTIMER_VAL 250000
-#define VXGE_TTI_LTIMER_VAL 1000
-#define VXGE_TTI_RTIMER_VAL 0
-#define VXGE_RTI_BTIMER_VAL 250
-#define VXGE_RTI_LTIMER_VAL 100
-#define VXGE_RTI_RTIMER_VAL 0
-#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
+#define VXGE_TTI_LTIMER_VAL 1000
+#define VXGE_T1A_TTI_LTIMER_VAL 80
+#define VXGE_TTI_RTIMER_VAL 0
+#define VXGE_TTI_RTIMER_ADAPT_VAL 10
+#define VXGE_T1A_TTI_RTIMER_VAL 400
+#define VXGE_RTI_BTIMER_VAL 250
+#define VXGE_RTI_LTIMER_VAL 100
+#define VXGE_RTI_RTIMER_VAL 0
+#define VXGE_RTI_RTIMER_ADAPT_VAL 15
+#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
#define VXGE_ISR_POLLING_CNT 8
#define VXGE_MAX_CONFIG_DEV 0xFF
#define VXGE_EXEC_MODE_DISABLE 0
@@ -76,14 +83,40 @@
#define TTI_TX_UFC_B 40
#define TTI_TX_UFC_C 60
#define TTI_TX_UFC_D 100
-
-#define RTI_RX_URANGE_A 5
-#define RTI_RX_URANGE_B 15
-#define RTI_RX_URANGE_C 40
-#define RTI_RX_UFC_A 1
-#define RTI_RX_UFC_B 5
-#define RTI_RX_UFC_C 10
-#define RTI_RX_UFC_D 15
+#define TTI_T1A_TX_UFC_A 30
+#define TTI_T1A_TX_UFC_B 80
+/* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */
+/* Slope - 93 */
+/* 60 - 9k Mtu, 140 - 1.5k mtu */
+#define TTI_T1A_TX_UFC_C(mtu) (60 + ((VXGE_HW_MAX_MTU - mtu) / 93))
+
+/* Slope - 37 */
+/* 100 - 9k Mtu, 300 - 1.5k mtu */
+#define TTI_T1A_TX_UFC_D(mtu) (100 + ((VXGE_HW_MAX_MTU - mtu) / 37))
+
+
+#define RTI_RX_URANGE_A 5
+#define RTI_RX_URANGE_B 15
+#define RTI_RX_URANGE_C 40
+#define RTI_T1A_RX_URANGE_A 1
+#define RTI_T1A_RX_URANGE_B 20
+#define RTI_T1A_RX_URANGE_C 50
+#define RTI_RX_UFC_A 1
+#define RTI_RX_UFC_B 5
+#define RTI_RX_UFC_C 10
+#define RTI_RX_UFC_D 15
+#define RTI_T1A_RX_UFC_B 20
+#define RTI_T1A_RX_UFC_C 50
+#define RTI_T1A_RX_UFC_D 60
+
+/*
+ * The interrupt rate is maintained at 3k per second with the moderation
+ * parameters for most traffic but not all. This is the maximum interrupt
+ * count allowed per function with INTA or per vector in the case of
+ * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A.
+ */
+#define VXGE_T1A_MAX_INTERRUPT_COUNT 100
+#define VXGE_T1A_MAX_TX_INTERRUPT_COUNT 200
/* Milli secs timer period */
#define VXGE_TIMER_DELAY 10000
@@ -145,15 +178,15 @@ struct vxge_config {
int addr_learn_en;
- int rth_steering;
- int rth_algorithm;
- int rth_hash_type_tcpipv4;
- int rth_hash_type_ipv4;
- int rth_hash_type_tcpipv6;
- int rth_hash_type_ipv6;
- int rth_hash_type_tcpipv6ex;
- int rth_hash_type_ipv6ex;
- int rth_bkt_sz;
+ u32 rth_steering:2,
+ rth_algorithm:2,
+ rth_hash_type_tcpipv4:1,
+ rth_hash_type_ipv4:1,
+ rth_hash_type_tcpipv6:1,
+ rth_hash_type_ipv6:1,
+ rth_hash_type_tcpipv6ex:1,
+ rth_hash_type_ipv6ex:1,
+ rth_bkt_sz:8;
int rth_jhash_golden_ratio;
int tx_steering_type;
int fifo_indicate_max_pkts;
@@ -224,6 +257,11 @@ struct vxge_fifo {
int tx_steering_type;
int indicate_max_pkts;
+ /* Adaptive interrupt moderation parameters used in T1A */
+ unsigned long interrupt_count;
+ unsigned long jiffies;
+
+ u32 tx_vector_no;
/* Tx stats */
struct vxge_fifo_stats stats;
} ____cacheline_aligned;
@@ -248,8 +286,13 @@ struct vxge_ring {
*/
int driver_id;
- /* copy of the flag indicating whether rx_csum is to be used */
- u32 rx_csum;
+ /* Adaptive interrupt moderation parameters used in T1A */
+ unsigned long interrupt_count;
+ unsigned long jiffies;
+
+ /* copy of the flag indicating whether rx_csum is to be used */
+ u32 rx_csum:1,
+ rx_hwts:1;
int pkts_processed;
int budget;
@@ -262,7 +305,7 @@ struct vxge_ring {
int vlan_tag_strip;
struct vlan_group *vlgrp;
- int rx_vector_no;
+ u32 rx_vector_no;
enum vxge_hw_status last_status;
/* Rx stats */
@@ -281,8 +324,8 @@ struct vxge_vpath {
int is_configured;
int is_open;
struct vxgedev *vdev;
- u8 (macaddr)[ETH_ALEN];
- u8 (macmask)[ETH_ALEN];
+ u8 macaddr[ETH_ALEN];
+ u8 macmask[ETH_ALEN];
#define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048
/* mac addresses currently programmed into NIC */
@@ -327,7 +370,9 @@ struct vxgedev {
u16 all_multi_flg;
/* A flag indicating whether rx_csum is to be used or not. */
- u32 rx_csum;
+ u32 rx_csum:1,
+ rx_hwts:1,
+ titan1:1;
struct vxge_msix_entry *vxge_entries;
struct msix_entry *entries;
@@ -369,6 +414,7 @@ struct vxgedev {
u32 level_err;
u32 level_trace;
char fw_version[VXGE_HW_FW_STRLEN];
+ struct work_struct reset_task;
};
struct vxge_rx_priv {
@@ -387,8 +433,6 @@ struct vxge_tx_priv {
static int p = val; \
module_param(p, int, 0)
-#define vxge_os_bug(fmt...) { printk(fmt); BUG(); }
-
#define vxge_os_timer(timer, handle, arg, exp) do { \
init_timer(&timer); \
timer.function = handle; \
@@ -396,7 +440,10 @@ struct vxge_tx_priv {
mod_timer(&timer, (jiffies + exp)); \
} while (0);
-extern void vxge_initialize_ethtool_ops(struct net_device *ndev);
+void vxge_initialize_ethtool_ops(struct net_device *ndev);
+enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
+int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
+
/**
* #define VXGE_DEBUG_INIT: debug for initialization functions
* #define VXGE_DEBUG_TX : debug transmit related functions
diff --git a/drivers/net/vxge/vxge-reg.h b/drivers/net/vxge/vxge-reg.h
index 3dd5c9615ef9..3e658b175947 100644
--- a/drivers/net/vxge/vxge-reg.h
+++ b/drivers/net/vxge/vxge-reg.h
@@ -49,6 +49,33 @@
#define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17
#define VXGE_HW_TITAN_VPATH_REG_SPACES 17
+#define VXGE_HW_FW_API_GET_EPROM_REV 31
+
+#define VXGE_EPROM_IMG_MAJOR(val) (u32) vxge_bVALn(val, 48, 4)
+#define VXGE_EPROM_IMG_MINOR(val) (u32) vxge_bVALn(val, 52, 4)
+#define VXGE_EPROM_IMG_FIX(val) (u32) vxge_bVALn(val, 56, 4)
+#define VXGE_EPROM_IMG_BUILD(val) (u32) vxge_bVALn(val, 60, 4)
+
+#define VXGE_HW_GET_EPROM_IMAGE_INDEX(val) vxge_bVALn(val, 16, 8)
+#define VXGE_HW_GET_EPROM_IMAGE_VALID(val) vxge_bVALn(val, 31, 1)
+#define VXGE_HW_GET_EPROM_IMAGE_TYPE(val) vxge_bVALn(val, 40, 8)
+#define VXGE_HW_GET_EPROM_IMAGE_REV(val) vxge_bVALn(val, 48, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(val) vxge_vBIT(val, 16, 8)
+
+#define VXGE_HW_FW_API_GET_FUNC_MODE 29
+#define VXGE_HW_GET_FUNC_MODE_VAL(val) (val & 0xFF)
+
+#define VXGE_HW_FW_UPGRADE_MEMO 13
+#define VXGE_HW_FW_UPGRADE_ACTION 16
+#define VXGE_HW_FW_UPGRADE_OFFSET_START 2
+#define VXGE_HW_FW_UPGRADE_OFFSET_SEND 3
+#define VXGE_HW_FW_UPGRADE_OFFSET_COMMIT 4
+#define VXGE_HW_FW_UPGRADE_OFFSET_READ 5
+
+#define VXGE_HW_FW_UPGRADE_BLK_SIZE 16
+#define VXGE_HW_UPGRADE_GET_RET_ERR_CODE(val) (val & 0xff)
+#define VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(val) ((val >> 8) & 0xff)
+
#define VXGE_HW_ASIC_MODE_RESERVED 0
#define VXGE_HW_ASIC_MODE_NO_IOV 1
#define VXGE_HW_ASIC_MODE_SR_IOV 2
@@ -165,13 +192,13 @@
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
-#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
+#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11
-#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
+#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13
#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
@@ -437,6 +464,7 @@
#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \
vxge_bVALn(bits, 48, 16)
#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(bits) vxge_bVALn(bits, 0, 8)
#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\
vxge_bVALn(bits, 0, 18)
@@ -3998,6 +4026,7 @@ struct vxge_hw_vpath_reg {
#define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9)
#define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9)
#define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9)
+#define VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val) vxge_bVALn(val, 36, 9)
/*0x00a78*/ u64 prc_cfg7;
#define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2)
#define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11)
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 4bdb611a6842..8674f331311c 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -17,13 +17,6 @@
#include "vxge-config.h"
#include "vxge-main.h"
-static enum vxge_hw_status
-__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev,
- u32 vp_id, enum vxge_hw_event type);
-static enum vxge_hw_status
-__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
- u32 skip_alarms);
-
/*
* vxge_hw_vpath_intr_enable - Enable vpath interrupts.
* @vp: Virtual Path handle.
@@ -225,6 +218,68 @@ exit:
return status;
}
+void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
+{
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+ struct vxge_hw_vp_config *config;
+ u64 val64;
+
+ if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
+ return;
+
+ vp_reg = fifo->vp_reg;
+ config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
+
+ if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
+ config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
+ val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+ fifo->tim_tti_cfg1_saved = val64;
+ writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+ }
+}
+
+void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
+{
+ u64 val64 = ring->tim_rti_cfg1_saved;
+
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+ ring->tim_rti_cfg1_saved = val64;
+ writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
+}
+
+void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
+{
+ u64 val64 = fifo->tim_tti_cfg3_saved;
+ u64 timer = (fifo->rtimer * 1000) / 272;
+
+ val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
+ if (timer)
+ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
+ VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
+
+ writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
+ /* tti_cfg3_saved is not updated again because it is
+ * initialized at one place only - init time.
+ */
+}
+
+void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
+{
+ u64 val64 = ring->tim_rti_cfg3_saved;
+ u64 timer = (ring->rtimer * 1000) / 272;
+
+ val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
+ if (timer)
+ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
+ VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
+
+ writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
+ /* rti_cfg3_saved is not updated again because it is
+ * initialized at one place only - init time.
+ */
+}
+
/**
* vxge_hw_channel_msix_mask - Mask MSIX Vector.
* @channeh: Channel for rx or tx handle
@@ -261,6 +316,23 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
}
/**
+ * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
+ * @channel: Channel for rx or tx handle
+ * @msix_id: MSI ID
+ *
+ * The function unmasks the msix interrupt for the given msix_id
+ * if configured in MSIX oneshot mode
+ *
+ * Returns: 0
+ */
+void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
+{
+ __vxge_hw_pio_mem_write32_upper(
+ (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
+ &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
+}
+
+/**
* vxge_hw_device_set_intr_type - Updates the configuration
* with new interrupt type.
* @hldev: HW device handle.
@@ -419,6 +491,384 @@ void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
}
/**
+ * __vxge_hw_device_handle_error - Handle error
+ * @hldev: HW device
+ * @vp_id: Vpath Id
+ * @type: Error type. Please see enum vxge_hw_event{}
+ *
+ * Handle error.
+ */
+static enum vxge_hw_status
+__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
+ enum vxge_hw_event type)
+{
+ switch (type) {
+ case VXGE_HW_EVENT_UNKNOWN:
+ break;
+ case VXGE_HW_EVENT_RESET_START:
+ case VXGE_HW_EVENT_RESET_COMPLETE:
+ case VXGE_HW_EVENT_LINK_DOWN:
+ case VXGE_HW_EVENT_LINK_UP:
+ goto out;
+ case VXGE_HW_EVENT_ALARM_CLEARED:
+ goto out;
+ case VXGE_HW_EVENT_ECCERR:
+ case VXGE_HW_EVENT_MRPCIM_ECCERR:
+ goto out;
+ case VXGE_HW_EVENT_FIFO_ERR:
+ case VXGE_HW_EVENT_VPATH_ERR:
+ case VXGE_HW_EVENT_CRITICAL_ERR:
+ case VXGE_HW_EVENT_SERR:
+ break;
+ case VXGE_HW_EVENT_SRPCIM_SERR:
+ case VXGE_HW_EVENT_MRPCIM_SERR:
+ goto out;
+ case VXGE_HW_EVENT_SLOT_FREEZE:
+ break;
+ default:
+ vxge_assert(0);
+ goto out;
+ }
+
+ /* notify driver */
+ if (hldev->uld_callbacks.crit_err)
+ hldev->uld_callbacks.crit_err(
+ (struct __vxge_hw_device *)hldev,
+ type, vp_id);
+out:
+
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_handle_link_down_ind
+ * @hldev: HW device handle.
+ *
+ * Link down indication handler. The function is invoked by HW when
+ * Titan indicates that the link is down.
+ */
+static enum vxge_hw_status
+__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
+{
+ /*
+ * If the previous link state is not down, return.
+ */
+ if (hldev->link_state == VXGE_HW_LINK_DOWN)
+ goto exit;
+
+ hldev->link_state = VXGE_HW_LINK_DOWN;
+
+ /* notify driver */
+ if (hldev->uld_callbacks.link_down)
+ hldev->uld_callbacks.link_down(hldev);
+exit:
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_handle_link_up_ind
+ * @hldev: HW device handle.
+ *
+ * Link up indication handler. The function is invoked by HW when
+ * Titan indicates that the link is up for programmable amount of time.
+ */
+static enum vxge_hw_status
+__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
+{
+ /*
+ * If the previous link state is not down, return.
+ */
+ if (hldev->link_state == VXGE_HW_LINK_UP)
+ goto exit;
+
+ hldev->link_state = VXGE_HW_LINK_UP;
+
+ /* notify driver */
+ if (hldev->uld_callbacks.link_up)
+ hldev->uld_callbacks.link_up(hldev);
+exit:
+ return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_vpath_alarm_process - Process Alarms.
+ * @vpath: Virtual Path.
+ * @skip_alarms: Do not clear the alarms
+ *
+ * Process vpath alarms.
+ *
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
+ u32 skip_alarms)
+{
+ u64 val64;
+ u64 alarm_status;
+ u64 pic_status;
+ struct __vxge_hw_device *hldev = NULL;
+ enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
+ u64 mask64;
+ struct vxge_hw_vpath_stats_sw_info *sw_stats;
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+
+ if (vpath == NULL) {
+ alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
+ alarm_event);
+ goto out2;
+ }
+
+ hldev = vpath->hldev;
+ vp_reg = vpath->vp_reg;
+ alarm_status = readq(&vp_reg->vpath_general_int_status);
+
+ if (alarm_status == VXGE_HW_ALL_FOXES) {
+ alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
+ alarm_event);
+ goto out;
+ }
+
+ sw_stats = vpath->sw_stats;
+
+ if (alarm_status & ~(
+ VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
+ VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
+ VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
+ VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
+ sw_stats->error_stats.unknown_alarms++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
+ alarm_event);
+ goto out;
+ }
+
+ if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
+
+ val64 = readq(&vp_reg->xgmac_vp_int_status);
+
+ if (val64 &
+ VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
+
+ val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
+
+ if (((val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
+ (!(val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
+ ((val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
+ (!(val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
+ ))) {
+ sw_stats->error_stats.network_sustained_fault++;
+
+ writeq(
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
+ &vp_reg->asic_ntwk_vp_err_mask);
+
+ __vxge_hw_device_handle_link_down_ind(hldev);
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_LINK_DOWN, alarm_event);
+ }
+
+ if (((val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
+ (!(val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
+ ((val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
+ (!(val64 &
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
+ ))) {
+
+ sw_stats->error_stats.network_sustained_ok++;
+
+ writeq(
+ VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
+ &vp_reg->asic_ntwk_vp_err_mask);
+
+ __vxge_hw_device_handle_link_up_ind(hldev);
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_LINK_UP, alarm_event);
+ }
+
+ writeq(VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->asic_ntwk_vp_err_reg);
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
+
+ if (skip_alarms)
+ return VXGE_HW_OK;
+ }
+ }
+
+ if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
+
+ pic_status = readq(&vp_reg->vpath_ppif_int_status);
+
+ if (pic_status &
+ VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
+
+ val64 = readq(&vp_reg->general_errors_reg);
+ mask64 = readq(&vp_reg->general_errors_mask);
+
+ if ((val64 &
+ VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
+ ~mask64) {
+ sw_stats->error_stats.ini_serr_det++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_SERR, alarm_event);
+ }
+
+ if ((val64 &
+ VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
+ ~mask64) {
+ sw_stats->error_stats.dblgen_fifo0_overflow++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_FIFO_ERR, alarm_event);
+ }
+
+ if ((val64 &
+ VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
+ ~mask64)
+ sw_stats->error_stats.statsb_pif_chain_error++;
+
+ if ((val64 &
+ VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
+ ~mask64)
+ sw_stats->error_stats.statsb_drop_timeout++;
+
+ if ((val64 &
+ VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
+ ~mask64)
+ sw_stats->error_stats.target_illegal_access++;
+
+ if (!skip_alarms) {
+ writeq(VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->general_errors_reg);
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_ALARM_CLEARED,
+ alarm_event);
+ }
+ }
+
+ if (pic_status &
+ VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
+
+ val64 = readq(&vp_reg->kdfcctl_errors_reg);
+ mask64 = readq(&vp_reg->kdfcctl_errors_mask);
+
+ if ((val64 &
+ VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
+ ~mask64) {
+ sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_FIFO_ERR,
+ alarm_event);
+ }
+
+ if ((val64 &
+ VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
+ ~mask64) {
+ sw_stats->error_stats.kdfcctl_fifo0_poison++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_FIFO_ERR,
+ alarm_event);
+ }
+
+ if ((val64 &
+ VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
+ ~mask64) {
+ sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_FIFO_ERR,
+ alarm_event);
+ }
+
+ if (!skip_alarms) {
+ writeq(VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->kdfcctl_errors_reg);
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_ALARM_CLEARED,
+ alarm_event);
+ }
+ }
+
+ }
+
+ if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
+
+ val64 = readq(&vp_reg->wrdma_alarm_status);
+
+ if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
+
+ val64 = readq(&vp_reg->prc_alarm_reg);
+ mask64 = readq(&vp_reg->prc_alarm_mask);
+
+ if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
+ ~mask64)
+ sw_stats->error_stats.prc_ring_bumps++;
+
+ if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
+ ~mask64) {
+ sw_stats->error_stats.prc_rxdcm_sc_err++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_VPATH_ERR,
+ alarm_event);
+ }
+
+ if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
+ & ~mask64) {
+ sw_stats->error_stats.prc_rxdcm_sc_abort++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_VPATH_ERR,
+ alarm_event);
+ }
+
+ if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
+ & ~mask64) {
+ sw_stats->error_stats.prc_quanta_size_err++;
+
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_VPATH_ERR,
+ alarm_event);
+ }
+
+ if (!skip_alarms) {
+ writeq(VXGE_HW_INTR_MASK_ALL,
+ &vp_reg->prc_alarm_reg);
+ alarm_event = VXGE_HW_SET_LEVEL(
+ VXGE_HW_EVENT_ALARM_CLEARED,
+ alarm_event);
+ }
+ }
+ }
+out:
+ hldev->stats.sw_dev_err_stats.vpath_alarms++;
+out2:
+ if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
+ (alarm_event == VXGE_HW_EVENT_UNKNOWN))
+ return VXGE_HW_OK;
+
+ __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
+
+ if (alarm_event == VXGE_HW_EVENT_SERR)
+ return VXGE_HW_ERR_CRITICAL;
+
+ return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
+ VXGE_HW_ERR_SLOT_FREEZE :
+ (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
+ VXGE_HW_ERR_VPATH;
+}
+
+/**
* vxge_hw_device_begin_irq - Begin IRQ processing.
* @hldev: HW device handle.
* @skip_alarms: Do not clear the alarms
@@ -513,108 +963,6 @@ exit:
return ret;
}
-/*
- * __vxge_hw_device_handle_link_up_ind
- * @hldev: HW device handle.
- *
- * Link up indication handler. The function is invoked by HW when
- * Titan indicates that the link is up for programmable amount of time.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
-{
- /*
- * If the previous link state is not down, return.
- */
- if (hldev->link_state == VXGE_HW_LINK_UP)
- goto exit;
-
- hldev->link_state = VXGE_HW_LINK_UP;
-
- /* notify driver */
- if (hldev->uld_callbacks.link_up)
- hldev->uld_callbacks.link_up(hldev);
-exit:
- return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_handle_link_down_ind
- * @hldev: HW device handle.
- *
- * Link down indication handler. The function is invoked by HW when
- * Titan indicates that the link is down.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
-{
- /*
- * If the previous link state is not down, return.
- */
- if (hldev->link_state == VXGE_HW_LINK_DOWN)
- goto exit;
-
- hldev->link_state = VXGE_HW_LINK_DOWN;
-
- /* notify driver */
- if (hldev->uld_callbacks.link_down)
- hldev->uld_callbacks.link_down(hldev);
-exit:
- return VXGE_HW_OK;
-}
-
-/**
- * __vxge_hw_device_handle_error - Handle error
- * @hldev: HW device
- * @vp_id: Vpath Id
- * @type: Error type. Please see enum vxge_hw_event{}
- *
- * Handle error.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_error(
- struct __vxge_hw_device *hldev,
- u32 vp_id,
- enum vxge_hw_event type)
-{
- switch (type) {
- case VXGE_HW_EVENT_UNKNOWN:
- break;
- case VXGE_HW_EVENT_RESET_START:
- case VXGE_HW_EVENT_RESET_COMPLETE:
- case VXGE_HW_EVENT_LINK_DOWN:
- case VXGE_HW_EVENT_LINK_UP:
- goto out;
- case VXGE_HW_EVENT_ALARM_CLEARED:
- goto out;
- case VXGE_HW_EVENT_ECCERR:
- case VXGE_HW_EVENT_MRPCIM_ECCERR:
- goto out;
- case VXGE_HW_EVENT_FIFO_ERR:
- case VXGE_HW_EVENT_VPATH_ERR:
- case VXGE_HW_EVENT_CRITICAL_ERR:
- case VXGE_HW_EVENT_SERR:
- break;
- case VXGE_HW_EVENT_SRPCIM_SERR:
- case VXGE_HW_EVENT_MRPCIM_SERR:
- goto out;
- case VXGE_HW_EVENT_SLOT_FREEZE:
- break;
- default:
- vxge_assert(0);
- goto out;
- }
-
- /* notify driver */
- if (hldev->uld_callbacks.crit_err)
- hldev->uld_callbacks.crit_err(
- (struct __vxge_hw_device *)hldev,
- type, vp_id);
-out:
-
- return VXGE_HW_OK;
-}
-
/**
* vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
* condition that has caused the Tx and RX interrupt.
@@ -699,8 +1047,8 @@ _alloc_after_swap:
* Posts a dtr to work array.
*
*/
-static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel,
- void *dtrh)
+static void
+vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
{
vxge_assert(channel->work_arr[channel->post_index] == NULL);
@@ -911,10 +1259,6 @@ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
*/
void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
{
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
-
wmb();
vxge_hw_ring_rxd_post_post(ring, rxdh);
}
@@ -975,7 +1319,7 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
*t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
/* check whether it is not the end */
- if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) {
+ if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
0);
@@ -1868,284 +2212,6 @@ exit:
}
/*
- * __vxge_hw_vpath_alarm_process - Process Alarms.
- * @vpath: Virtual Path.
- * @skip_alarms: Do not clear the alarms
- *
- * Process vpath alarms.
- *
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
- u32 skip_alarms)
-{
- u64 val64;
- u64 alarm_status;
- u64 pic_status;
- struct __vxge_hw_device *hldev = NULL;
- enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
- u64 mask64;
- struct vxge_hw_vpath_stats_sw_info *sw_stats;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
-
- if (vpath == NULL) {
- alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
- alarm_event);
- goto out2;
- }
-
- hldev = vpath->hldev;
- vp_reg = vpath->vp_reg;
- alarm_status = readq(&vp_reg->vpath_general_int_status);
-
- if (alarm_status == VXGE_HW_ALL_FOXES) {
- alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
- alarm_event);
- goto out;
- }
-
- sw_stats = vpath->sw_stats;
-
- if (alarm_status & ~(
- VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
- VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
- VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
- VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
- sw_stats->error_stats.unknown_alarms++;
-
- alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
- alarm_event);
- goto out;
- }
-
- if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
-
- val64 = readq(&vp_reg->xgmac_vp_int_status);
-
- if (val64 &
- VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
-
- val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
-
- if (((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
- ((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
- ))) {
- sw_stats->error_stats.network_sustained_fault++;
-
- writeq(
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
- &vp_reg->asic_ntwk_vp_err_mask);
-
- __vxge_hw_device_handle_link_down_ind(hldev);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_LINK_DOWN, alarm_event);
- }
-
- if (((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
- ((val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
- (!(val64 &
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
- ))) {
-
- sw_stats->error_stats.network_sustained_ok++;
-
- writeq(
- VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
- &vp_reg->asic_ntwk_vp_err_mask);
-
- __vxge_hw_device_handle_link_up_ind(hldev);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_LINK_UP, alarm_event);
- }
-
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->asic_ntwk_vp_err_reg);
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
-
- if (skip_alarms)
- return VXGE_HW_OK;
- }
- }
-
- if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
-
- pic_status = readq(&vp_reg->vpath_ppif_int_status);
-
- if (pic_status &
- VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
-
- val64 = readq(&vp_reg->general_errors_reg);
- mask64 = readq(&vp_reg->general_errors_mask);
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
- ~mask64) {
- sw_stats->error_stats.ini_serr_det++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_SERR, alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
- ~mask64) {
- sw_stats->error_stats.dblgen_fifo0_overflow++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR, alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
- ~mask64)
- sw_stats->error_stats.statsb_pif_chain_error++;
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
- ~mask64)
- sw_stats->error_stats.statsb_drop_timeout++;
-
- if ((val64 &
- VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
- ~mask64)
- sw_stats->error_stats.target_illegal_access++;
-
- if (!skip_alarms) {
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->general_errors_reg);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED,
- alarm_event);
- }
- }
-
- if (pic_status &
- VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
-
- val64 = readq(&vp_reg->kdfcctl_errors_reg);
- mask64 = readq(&vp_reg->kdfcctl_errors_mask);
-
- if ((val64 &
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
- ~mask64) {
- sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR,
- alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
- ~mask64) {
- sw_stats->error_stats.kdfcctl_fifo0_poison++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR,
- alarm_event);
- }
-
- if ((val64 &
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
- ~mask64) {
- sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_FIFO_ERR,
- alarm_event);
- }
-
- if (!skip_alarms) {
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->kdfcctl_errors_reg);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED,
- alarm_event);
- }
- }
-
- }
-
- if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
-
- val64 = readq(&vp_reg->wrdma_alarm_status);
-
- if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
-
- val64 = readq(&vp_reg->prc_alarm_reg);
- mask64 = readq(&vp_reg->prc_alarm_mask);
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
- ~mask64)
- sw_stats->error_stats.prc_ring_bumps++;
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
- ~mask64) {
- sw_stats->error_stats.prc_rxdcm_sc_err++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_VPATH_ERR,
- alarm_event);
- }
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
- & ~mask64) {
- sw_stats->error_stats.prc_rxdcm_sc_abort++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_VPATH_ERR,
- alarm_event);
- }
-
- if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
- & ~mask64) {
- sw_stats->error_stats.prc_quanta_size_err++;
-
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_VPATH_ERR,
- alarm_event);
- }
-
- if (!skip_alarms) {
- writeq(VXGE_HW_INTR_MASK_ALL,
- &vp_reg->prc_alarm_reg);
- alarm_event = VXGE_HW_SET_LEVEL(
- VXGE_HW_EVENT_ALARM_CLEARED,
- alarm_event);
- }
- }
- }
-out:
- hldev->stats.sw_dev_err_stats.vpath_alarms++;
-out2:
- if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
- (alarm_event == VXGE_HW_EVENT_UNKNOWN))
- return VXGE_HW_OK;
-
- __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
-
- if (alarm_event == VXGE_HW_EVENT_SERR)
- return VXGE_HW_ERR_CRITICAL;
-
- return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
- VXGE_HW_ERR_SLOT_FREEZE :
- (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
- VXGE_HW_ERR_VPATH;
-}
-
-/*
* vxge_hw_vpath_alarm_process - Process Alarms.
* @vpath: Virtual Path.
* @skip_alarms: Do not clear the alarms
@@ -2204,19 +2270,14 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
if (vpath->hldev->config.intr_mode ==
VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
+ VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
+ 0, 32), &vp_reg->one_shot_vect0_en);
+ __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
0, 32), &vp_reg->one_shot_vect1_en);
- }
-
- if (vpath->hldev->config.intr_mode ==
- VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
0, 32), &vp_reg->one_shot_vect2_en);
-
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
- VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
- 0, 32), &vp_reg->one_shot_vect3_en);
}
}
@@ -2242,6 +2303,32 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
}
/**
+ * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
+ * @vp: Virtual Path handle.
+ * @msix_id: MSI ID
+ *
+ * The function clears the msix interrupt for the given msix_id
+ *
+ * Returns: 0,
+ * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
+ * status.
+ * See also:
+ */
+void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
+{
+ struct __vxge_hw_device *hldev = vp->vpath->hldev;
+
+ if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
+ __vxge_hw_pio_mem_write32_upper(
+ (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
+ &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
+ else
+ __vxge_hw_pio_mem_write32_upper(
+ (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
+ &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
+}
+
+/**
* vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
* @vp: Virtual Path handle.
* @msix_id: MSI ID
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 9890d4d596d0..9d9dfda4c7ab 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1695,7 +1695,7 @@ struct vxge_hw_device_stats_sw_err {
* struct vxge_hw_device_stats - Contains HW per-device statistics,
* including hw.
* @devh: HW device handle.
- * @dma_addr: DMA addres of the %hw_info. Given to device to fill-in the stats.
+ * @dma_addr: DMA address of the %hw_info. Given to device to fill-in the stats.
* @hw_info_dmah: DMA handle used to map hw statistics onto the device memory
* space.
* @hw_info_dma_acch: One more DMA handle used subsequently to free the
@@ -1904,34 +1904,6 @@ enum vxge_hw_ring_tcode {
VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
};
-/**
- * enum enum vxge_hw_ring_hash_type - RTH hash types
- * @VXGE_HW_RING_HASH_TYPE_NONE: No Hash
- * @VXGE_HW_RING_HASH_TYPE_TCP_IPV4: TCP IPv4
- * @VXGE_HW_RING_HASH_TYPE_UDP_IPV4: UDP IPv4
- * @VXGE_HW_RING_HASH_TYPE_IPV4: IPv4
- * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6: TCP IPv6
- * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6: UDP IPv6
- * @VXGE_HW_RING_HASH_TYPE_IPV6: IPv6
- * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX: TCP IPv6 extension
- * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX: UDP IPv6 extension
- * @VXGE_HW_RING_HASH_TYPE_IPV6_EX: IPv6 extension
- *
- * RTH hash types
- */
-enum vxge_hw_ring_hash_type {
- VXGE_HW_RING_HASH_TYPE_NONE = 0x0,
- VXGE_HW_RING_HASH_TYPE_TCP_IPV4 = 0x1,
- VXGE_HW_RING_HASH_TYPE_UDP_IPV4 = 0x2,
- VXGE_HW_RING_HASH_TYPE_IPV4 = 0x3,
- VXGE_HW_RING_HASH_TYPE_TCP_IPV6 = 0x4,
- VXGE_HW_RING_HASH_TYPE_UDP_IPV6 = 0x5,
- VXGE_HW_RING_HASH_TYPE_IPV6 = 0x6,
- VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX = 0x7,
- VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX = 0x8,
- VXGE_HW_RING_HASH_TYPE_IPV6_EX = 0x9
-};
-
enum vxge_hw_status vxge_hw_ring_rxd_reserve(
struct __vxge_hw_ring *ring_handle,
void **rxdh);
@@ -2109,10 +2081,6 @@ struct __vxge_hw_ring_rxd_priv {
#endif
};
-/* ========================= FIFO PRIVATE API ============================= */
-
-struct vxge_hw_fifo_attr;
-
struct vxge_hw_mempool_cbs {
void (*item_func_alloc)(
struct vxge_hw_mempool *mempoolh,
@@ -2174,6 +2142,10 @@ void vxge_hw_device_clear_tx_rx(
* Virtual Paths
*/
+void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring);
+
+void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo);
+
u32 vxge_hw_vpath_id(
struct __vxge_hw_vpath_handle *vpath_handle);
@@ -2186,27 +2158,27 @@ enum vxge_hw_vpath_mac_addr_add_mode {
enum vxge_hw_status
vxge_hw_vpath_mac_addr_add(
struct __vxge_hw_vpath_handle *vpath_handle,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN],
+ u8 *macaddr,
+ u8 *macaddr_mask,
enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode);
enum vxge_hw_status
vxge_hw_vpath_mac_addr_get(
struct __vxge_hw_vpath_handle *vpath_handle,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN]);
+ u8 *macaddr,
+ u8 *macaddr_mask);
enum vxge_hw_status
vxge_hw_vpath_mac_addr_get_next(
struct __vxge_hw_vpath_handle *vpath_handle,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN]);
+ u8 *macaddr,
+ u8 *macaddr_mask);
enum vxge_hw_status
vxge_hw_vpath_mac_addr_delete(
struct __vxge_hw_vpath_handle *vpath_handle,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN]);
+ u8 *macaddr,
+ u8 *macaddr_mask);
enum vxge_hw_status
vxge_hw_vpath_vid_add(
@@ -2277,6 +2249,8 @@ void
vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
int msix_id);
+void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id);
+
void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
void
@@ -2302,6 +2276,9 @@ void
vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
void
+vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id);
+
+void
vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
void **dtrh);
@@ -2313,7 +2290,9 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
int
vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
-void
-vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
+
+void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo);
+
+void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring);
#endif
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 53fefe137368..581e21525e85 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -15,8 +15,35 @@
#define VXGE_VERSION_H
#define VXGE_VERSION_MAJOR "2"
-#define VXGE_VERSION_MINOR "0"
-#define VXGE_VERSION_FIX "9"
-#define VXGE_VERSION_BUILD "20840"
+#define VXGE_VERSION_MINOR "5"
+#define VXGE_VERSION_FIX "2"
+#define VXGE_VERSION_BUILD "22259"
#define VXGE_VERSION_FOR "k"
+
+#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
+
+#define VXGE_DEAD_FW_VER_MAJOR 1
+#define VXGE_DEAD_FW_VER_MINOR 4
+#define VXGE_DEAD_FW_VER_BUILD 4
+
+#define VXGE_FW_DEAD_VER VXGE_FW_VER(VXGE_DEAD_FW_VER_MAJOR, \
+ VXGE_DEAD_FW_VER_MINOR, \
+ VXGE_DEAD_FW_VER_BUILD)
+
+#define VXGE_EPROM_FW_VER_MAJOR 1
+#define VXGE_EPROM_FW_VER_MINOR 6
+#define VXGE_EPROM_FW_VER_BUILD 1
+
+#define VXGE_EPROM_FW_VER VXGE_FW_VER(VXGE_EPROM_FW_VER_MAJOR, \
+ VXGE_EPROM_FW_VER_MINOR, \
+ VXGE_EPROM_FW_VER_BUILD)
+
+#define VXGE_CERT_FW_VER_MAJOR 1
+#define VXGE_CERT_FW_VER_MINOR 8
+#define VXGE_CERT_FW_VER_BUILD 1
+
+#define VXGE_CERT_FW_VER VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, \
+ VXGE_CERT_FW_VER_MINOR, \
+ VXGE_CERT_FW_VER_BUILD)
+
#endif
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index d45b08d1dbc9..4578e5b4b411 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -125,7 +125,7 @@ static u32 dscc4_pci_config_store[16];
/* Module parameters */
MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>");
-MODULE_DESCRIPTION("Siemens PEB20534 PCI Controler");
+MODULE_DESCRIPTION("Siemens PEB20534 PCI Controller");
MODULE_LICENSE("GPL");
module_param(debug, int, 0);
MODULE_PARM_DESC(debug,"Enable/disable extra messages");
@@ -1358,7 +1358,7 @@ static int dscc4_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return ret;
}
-static int dscc4_match(struct thingie *p, int value)
+static int dscc4_match(const struct thingie *p, int value)
{
int i;
@@ -1403,7 +1403,7 @@ done:
static int dscc4_encoding_setting(struct dscc4_dev_priv *dpriv,
struct net_device *dev)
{
- struct thingie encoding[] = {
+ static const struct thingie encoding[] = {
{ ENCODING_NRZ, 0x00000000 },
{ ENCODING_NRZI, 0x00200000 },
{ ENCODING_FM_MARK, 0x00400000 },
@@ -1442,7 +1442,7 @@ static int dscc4_loopback_setting(struct dscc4_dev_priv *dpriv,
static int dscc4_crc_setting(struct dscc4_dev_priv *dpriv,
struct net_device *dev)
{
- struct thingie crc[] = {
+ static const struct thingie crc[] = {
{ PARITY_CRC16_PR0_CCITT, 0x00000010 },
{ PARITY_CRC16_PR1_CCITT, 0x00000000 },
{ PARITY_CRC32_PR0_CCITT, 0x00000011 },
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index ea476cbd38b5..e305274f83fb 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -293,6 +293,7 @@ static inline void sca_tx_done(port_t *port)
struct net_device *dev = port->netdev;
card_t* card = port->card;
u8 stat;
+ unsigned count = 0;
spin_lock(&port->lock);
@@ -316,10 +317,12 @@ static inline void sca_tx_done(port_t *port)
dev->stats.tx_bytes += readw(&desc->len);
}
writeb(0, &desc->stat); /* Free descriptor */
+ count++;
port->txlast = (port->txlast + 1) % card->tx_ring_buffers;
}
- netif_wake_queue(dev);
+ if (count)
+ netif_wake_queue(dev);
spin_unlock(&port->lock);
}
diff --git a/drivers/net/wan/lmc/Makefile b/drivers/net/wan/lmc/Makefile
index dabdcfed4efd..609710d64eb5 100644
--- a/drivers/net/wan/lmc/Makefile
+++ b/drivers/net/wan/lmc/Makefile
@@ -14,4 +14,4 @@ lmc-objs := lmc_debug.o lmc_media.o lmc_main.o lmc_proto.o
# -DDEBUG \
# -DLMC_PACKET_LOG
-EXTRA_CFLAGS += -I. $(DBGDEF)
+ccflags-y := -I. $(DBGDEF)
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 515d9b8af01e..1c65d1c33873 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -131,9 +131,8 @@ static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx);
static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char);
static void cpc_tty_signal_on(pc300dev_t *pc300dev, unsigned char);
-static int pc300_tiocmset(struct tty_struct *, struct file *,
- unsigned int, unsigned int);
-static int pc300_tiocmget(struct tty_struct *, struct file *);
+static int pc300_tiocmset(struct tty_struct *, unsigned int, unsigned int);
+static int pc300_tiocmget(struct tty_struct *);
/* functions called by PC300 driver */
void cpc_tty_init(pc300dev_t *dev);
@@ -543,7 +542,7 @@ static int cpc_tty_chars_in_buffer(struct tty_struct *tty)
return 0;
}
-static int pc300_tiocmset(struct tty_struct *tty, struct file *file,
+static int pc300_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
st_cpc_tty_area *cpc_tty;
@@ -570,7 +569,7 @@ static int pc300_tiocmset(struct tty_struct *tty, struct file *file,
return 0;
}
-static int pc300_tiocmget(struct tty_struct *tty, struct file *file)
+static int pc300_tiocmget(struct tty_struct *tty)
{
unsigned int result;
unsigned char status;
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index f1549fff0edc..8831a3393ecf 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -275,7 +275,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
dev->base_addr = ioaddr+WD_NIC_OFFSET;
if (dev->irq < 2) {
- int irqmap[] = {9,3,5,7,10,11,15,4};
+ static const int irqmap[] = {9, 3, 5, 7, 10, 11, 15, 4};
int reg1 = inb(ioaddr+1);
int reg4 = inb(ioaddr+4);
if (ancient || reg1 == 0xff) { /* Ack!! No way to read the IRQ! */
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index cdedab46ba21..65bc334ed57b 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -92,54 +92,6 @@ MODULE_PARM_DESC(barkers,
"signal; values are appended to a list--setting one value "
"as zero cleans the existing list and starts a new one.");
-static
-struct i2400m_work *__i2400m_work_setup(
- struct i2400m *i2400m, void (*fn)(struct work_struct *),
- gfp_t gfp_flags, const void *pl, size_t pl_size)
-{
- struct i2400m_work *iw;
-
- iw = kzalloc(sizeof(*iw) + pl_size, gfp_flags);
- if (iw == NULL)
- return NULL;
- iw->i2400m = i2400m_get(i2400m);
- iw->pl_size = pl_size;
- memcpy(iw->pl, pl, pl_size);
- INIT_WORK(&iw->ws, fn);
- return iw;
-}
-
-
-/*
- * Schedule i2400m's specific work on the system's queue.
- *
- * Used for a few cases where we really need it; otherwise, identical
- * to i2400m_queue_work().
- *
- * Returns < 0 errno code on error, 1 if ok.
- *
- * If it returns zero, something really bad happened, as it means the
- * works struct was already queued, but we have just allocated it, so
- * it should not happen.
- */
-static int i2400m_schedule_work(struct i2400m *i2400m,
- void (*fn)(struct work_struct *), gfp_t gfp_flags,
- const void *pl, size_t pl_size)
-{
- int result;
- struct i2400m_work *iw;
-
- result = -ENOMEM;
- iw = __i2400m_work_setup(i2400m, fn, gfp_flags, pl, pl_size);
- if (iw != NULL) {
- result = schedule_work(&iw->ws);
- if (WARN_ON(result == 0))
- result = -ENXIO;
- }
- return result;
-}
-
-
/*
* WiMAX stack operation: relay a message from user space
*
@@ -280,7 +232,7 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
result);
goto error;
}
- /* Extract MAC addresss */
+ /* Extract MAC address */
ddi = (void *) skb->data;
BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address));
d_printf(2, dev, "GET DEVICE INFO: mac addr %pM\n",
@@ -648,17 +600,11 @@ EXPORT_SYMBOL_GPL(i2400m_post_reset);
static
void __i2400m_dev_reset_handle(struct work_struct *ws)
{
- int result;
- struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws);
- const char *reason;
- struct i2400m *i2400m = iw->i2400m;
+ struct i2400m *i2400m = container_of(ws, struct i2400m, reset_ws);
+ const char *reason = i2400m->reset_reason;
struct device *dev = i2400m_dev(i2400m);
struct i2400m_reset_ctx *ctx = i2400m->reset_ctx;
-
- if (WARN_ON(iw->pl_size != sizeof(reason)))
- reason = "SW BUG: reason n/a";
- else
- memcpy(&reason, iw->pl, sizeof(reason));
+ int result;
d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason);
@@ -733,8 +679,6 @@ void __i2400m_dev_reset_handle(struct work_struct *ws)
}
}
out:
- i2400m_put(i2400m);
- kfree(iw);
d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n",
ws, i2400m, reason);
}
@@ -754,8 +698,8 @@ out:
*/
int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason)
{
- return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle,
- GFP_ATOMIC, &reason, sizeof(reason));
+ i2400m->reset_reason = reason;
+ return schedule_work(&i2400m->reset_ws);
}
EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
@@ -768,14 +712,9 @@ EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
static
void __i2400m_error_recovery(struct work_struct *ws)
{
- struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws);
- struct i2400m *i2400m = iw->i2400m;
+ struct i2400m *i2400m = container_of(ws, struct i2400m, recovery_ws);
i2400m_reset(i2400m, I2400M_RT_BUS);
-
- i2400m_put(i2400m);
- kfree(iw);
- return;
}
/*
@@ -805,18 +744,10 @@ void __i2400m_error_recovery(struct work_struct *ws)
*/
void i2400m_error_recovery(struct i2400m *i2400m)
{
- struct device *dev = i2400m_dev(i2400m);
-
- if (atomic_add_return(1, &i2400m->error_recovery) == 1) {
- if (i2400m_schedule_work(i2400m, __i2400m_error_recovery,
- GFP_ATOMIC, NULL, 0) < 0) {
- dev_err(dev, "run out of memory for "
- "scheduling an error recovery ?\n");
- atomic_dec(&i2400m->error_recovery);
- }
- } else
+ if (atomic_add_return(1, &i2400m->error_recovery) == 1)
+ schedule_work(&i2400m->recovery_ws);
+ else
atomic_dec(&i2400m->error_recovery);
- return;
}
EXPORT_SYMBOL_GPL(i2400m_error_recovery);
@@ -886,6 +817,10 @@ void i2400m_init(struct i2400m *i2400m)
mutex_init(&i2400m->init_mutex);
/* wake_tx_ws is initialized in i2400m_tx_setup() */
+
+ INIT_WORK(&i2400m->reset_ws, __i2400m_dev_reset_handle);
+ INIT_WORK(&i2400m->recovery_ws, __i2400m_error_recovery);
+
atomic_set(&i2400m->bus_reset_retries, 0);
i2400m->alive = 0;
@@ -1040,6 +975,9 @@ void i2400m_release(struct i2400m *i2400m)
i2400m_dev_stop(i2400m);
+ cancel_work_sync(&i2400m->reset_ws);
+ cancel_work_sync(&i2400m->recovery_ws);
+
i2400m_debugfs_rm(i2400m);
sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj,
&i2400m_dev_attr_group);
@@ -1083,8 +1021,6 @@ module_init(i2400m_driver_init);
static
void __exit i2400m_driver_exit(void)
{
- /* for scheds i2400m_dev_reset_handle() */
- flush_scheduled_work();
i2400m_barker_db_exit();
}
module_exit(i2400m_driver_exit);
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 59ac7705e76e..030cbfd31704 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -186,7 +186,7 @@ enum {
* struct i2400m_poke_table - Hardware poke table for the Intel 2400m
*
* This structure will be used to create a device specific poke table
- * to put the device in a consistant state at boot time.
+ * to put the device in a consistent state at boot time.
*
* @address: The device address to poke
*
@@ -632,6 +632,11 @@ struct i2400m {
struct work_struct wake_tx_ws;
struct sk_buff *wake_tx_skb;
+ struct work_struct reset_ws;
+ const char *reset_reason;
+
+ struct work_struct recovery_ws;
+
struct dentry *debugfs_dentry;
const char *fw_name; /* name of the current firmware image */
unsigned long fw_version; /* version of the firmware interface */
@@ -698,7 +703,7 @@ enum i2400m_bm_cmd_flags {
* @I2400M_BRI_MAC_REINIT: We need to reinitialize the boot
* rom after reading the MAC address. This is quite a dirty hack,
* if you ask me -- the device requires the bootrom to be
- * intialized after reading the MAC address.
+ * initialized after reading the MAC address.
*/
enum i2400m_bri {
I2400M_BRI_SOFT = 1 << 1,
@@ -896,20 +901,6 @@ struct device *i2400m_dev(struct i2400m *i2400m)
return i2400m->wimax_dev.net_dev->dev.parent;
}
-/*
- * Helper for scheduling simple work functions
- *
- * This struct can get any kind of payload attached (normally in the
- * form of a struct where you pack the stuff you want to pass to the
- * _work function).
- */
-struct i2400m_work {
- struct work_struct ws;
- struct i2400m *i2400m;
- size_t pl_size;
- u8 pl[0];
-};
-
extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *,
char *, size_t);
extern int i2400m_msg_size_check(struct i2400m *,
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
index 9bfc26e1bc6b..be428cae28d8 100644
--- a/drivers/net/wimax/i2400m/sdio.c
+++ b/drivers/net/wimax/i2400m/sdio.c
@@ -590,7 +590,6 @@ module_init(i2400ms_driver_init);
static
void __exit i2400ms_driver_exit(void)
{
- flush_scheduled_work(); /* for the stuff we schedule */
sdio_unregister_driver(&i2400m_sdio_driver);
}
module_exit(i2400ms_driver_exit);
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index d3365ac85dde..298f2b0b6311 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -514,7 +514,7 @@ int i2400mu_probe(struct usb_interface *iface,
#ifdef CONFIG_PM
iface->needs_remote_wakeup = 1; /* autosuspend (15s delay) */
device_init_wakeup(dev, 1);
- usb_dev->autosuspend_delay = 15 * HZ;
+ pm_runtime_set_autosuspend_delay(&usb_dev->dev, 15000);
usb_enable_autosuspend(usb_dev);
#endif
@@ -780,7 +780,6 @@ module_init(i2400mu_driver_init);
static
void __exit i2400mu_driver_exit(void)
{
- flush_scheduled_work(); /* for the stuff we schedule from sysfs.c */
usb_deregister(&i2400mu_driver);
}
module_exit(i2400mu_driver_exit);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 4de4410cd38e..7aeb113cbb90 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -274,11 +274,13 @@ source "drivers/net/wireless/b43legacy/Kconfig"
source "drivers/net/wireless/hostap/Kconfig"
source "drivers/net/wireless/ipw2x00/Kconfig"
source "drivers/net/wireless/iwlwifi/Kconfig"
+source "drivers/net/wireless/iwlegacy/Kconfig"
source "drivers/net/wireless/iwmc3200wifi/Kconfig"
source "drivers/net/wireless/libertas/Kconfig"
source "drivers/net/wireless/orinoco/Kconfig"
source "drivers/net/wireless/p54/Kconfig"
source "drivers/net/wireless/rt2x00/Kconfig"
+source "drivers/net/wireless/rtlwifi/Kconfig"
source "drivers/net/wireless/wl1251/Kconfig"
source "drivers/net/wireless/wl12xx/Kconfig"
source "drivers/net/wireless/zd1211rw/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 06f8ca26c5c1..ddd3fb6ba1d3 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_B43LEGACY) += b43legacy/
obj-$(CONFIG_ZD1211RW) += zd1211rw/
obj-$(CONFIG_RTL8180) += rtl818x/
obj-$(CONFIG_RTL8187) += rtl818x/
+obj-$(CONFIG_RTLWIFI) += rtlwifi/
# 16-bit wireless PCMCIA client drivers
obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
@@ -40,7 +41,8 @@ obj-$(CONFIG_ADM8211) += adm8211.o
obj-$(CONFIG_MWL8K) += mwl8k.o
-obj-$(CONFIG_IWLWIFI) += iwlwifi/
+obj-$(CONFIG_IWLAGN) += iwlwifi/
+obj-$(CONFIG_IWLWIFI_LEGACY) += iwlegacy/
obj-$(CONFIG_RT2X00) += rt2x00/
obj-$(CONFIG_P54_COMMON) += p54/
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index f9aa1bc0a947..afe2cbc6cb24 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1658,7 +1658,7 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
}
/* Put adm8211_tx_hdr on skb and transmit */
-static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
struct adm8211_tx_hdr *txhdr;
size_t payload_len, hdrlen;
@@ -1707,8 +1707,6 @@ static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
txhdr->retry_limit = info->control.rates[0].count;
adm8211_tx_raw(dev, skb, plcp_signal, hdrlen);
-
- return NETDEV_TX_OK;
}
static int adm8211_alloc_rings(struct ieee80211_hw *dev)
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index a36e7870b03e..57a79b0475f6 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -4652,24 +4652,18 @@ static ssize_t proc_write( struct file *file,
size_t len,
loff_t *offset )
{
- loff_t pos = *offset;
+ ssize_t ret;
struct proc_data *priv = file->private_data;
if (!priv->wbuffer)
return -EINVAL;
- if (pos < 0)
- return -EINVAL;
- if (pos >= priv->maxwritelen)
- return 0;
- if (len > priv->maxwritelen - pos)
- len = priv->maxwritelen - pos;
- if (copy_from_user(priv->wbuffer + pos, buffer, len))
- return -EFAULT;
- if ( pos + len > priv->writelen )
- priv->writelen = len + file->f_pos;
- *offset = pos + len;
- return len;
+ ret = simple_write_to_buffer(priv->wbuffer, priv->maxwritelen, offset,
+ buffer, len);
+ if (ret > 0)
+ priv->writelen = max_t(int, priv->writelen, *offset);
+
+ return ret;
}
static int proc_status_open(struct inode *inode, struct file *file)
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 1476314afa8a..298601436ee2 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1728,7 +1728,7 @@ static void at76_mac80211_tx_callback(struct urb *urb)
ieee80211_wake_queues(priv->hw);
}
-static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct at76_priv *priv = hw->priv;
struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer;
@@ -1741,7 +1741,8 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (priv->tx_urb->status == -EINPROGRESS) {
wiphy_err(priv->hw->wiphy,
"%s called while tx urb is pending\n", __func__);
- return NETDEV_TX_BUSY;
+ dev_kfree_skb_any(skb);
+ return;
}
/* The following code lines are important when the device is going to
@@ -1755,7 +1756,8 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (compare_ether_addr(priv->bssid, mgmt->bssid)) {
memcpy(priv->bssid, mgmt->bssid, ETH_ALEN);
ieee80211_queue_work(hw, &priv->work_join_bssid);
- return NETDEV_TX_BUSY;
+ dev_kfree_skb_any(skb);
+ return;
}
}
@@ -1795,8 +1797,6 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
priv->tx_urb,
priv->tx_urb->hcpriv, priv->tx_urb->complete);
}
-
- return 0;
}
static int at76_mac80211_start(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/at76c50x-usb.h b/drivers/net/wireless/at76c50x-usb.h
index 4a37447dfc01..f14a65473fe8 100644
--- a/drivers/net/wireless/at76c50x-usb.h
+++ b/drivers/net/wireless/at76c50x-usb.h
@@ -290,7 +290,7 @@ struct mib_mac_mgmt {
u8 res;
u8 multi_domain_capability_implemented;
u8 multi_domain_capability_enabled;
- u8 country_string[3];
+ u8 country_string[IEEE80211_COUNTRY_STRING_LEN];
u8 reserved[3];
} __packed;
diff --git a/drivers/net/wireless/ath/ar9170/Kconfig b/drivers/net/wireless/ath/ar9170/Kconfig
index d7a4799d20fb..7b9672b0d090 100644
--- a/drivers/net/wireless/ath/ar9170/Kconfig
+++ b/drivers/net/wireless/ath/ar9170/Kconfig
@@ -1,8 +1,10 @@
config AR9170_USB
- tristate "Atheros AR9170 802.11n USB support"
+ tristate "Atheros AR9170 802.11n USB support (OBSOLETE)"
depends on USB && MAC80211
select FW_LOADER
help
+ This driver is going to get replaced by carl9170.
+
This is a driver for the Atheros "otus" 802.11n USB devices.
These devices require additional firmware (2 files).
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index 4f845f80c098..371e4ce49528 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -224,7 +224,7 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
int ar9170_nag_limiter(struct ar9170 *ar);
/* MAC */
-int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
int ar9170_init_mac(struct ar9170 *ar);
int ar9170_set_qos(struct ar9170 *ar);
int ar9170_update_multicast(struct ar9170 *ar, const u64 mc_hast);
diff --git a/drivers/net/wireless/ath/ar9170/cmd.c b/drivers/net/wireless/ath/ar9170/cmd.c
index 4604de09a8b2..6452c5055a63 100644
--- a/drivers/net/wireless/ath/ar9170/cmd.c
+++ b/drivers/net/wireless/ath/ar9170/cmd.c
@@ -54,7 +54,7 @@ int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len)
int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val)
{
- __le32 buf[2] = {
+ const __le32 buf[2] = {
cpu_to_le32(reg),
cpu_to_le32(val),
};
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index 32bf79e6a320..b761fec0d721 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -1475,7 +1475,7 @@ static void ar9170_tx(struct ar9170 *ar)
msecs_to_jiffies(AR9170_JANITOR_DELAY));
}
-int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ar9170 *ar = hw->priv;
struct ieee80211_tx_info *info;
@@ -1493,11 +1493,10 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
skb_queue_tail(&ar->tx_pending[queue], skb);
ar9170_tx(ar);
- return NETDEV_TX_OK;
+ return;
err_free:
dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
}
static int ar9170_op_add_interface(struct ieee80211_hw *hw,
@@ -1945,7 +1944,8 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
static int ar9170_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
{
switch (action) {
case IEEE80211_AMPDU_RX_START:
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index 5dbb5361fd51..d3be6f9816b5 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -161,8 +161,7 @@ static void ar9170_usb_submit_urb(struct ar9170_usb *aru)
static void ar9170_usb_tx_urb_complete_frame(struct urb *urb)
{
struct sk_buff *skb = urb->context;
- struct ar9170_usb *aru = (struct ar9170_usb *)
- usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
+ struct ar9170_usb *aru = usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
if (unlikely(!aru)) {
dev_kfree_skb_irq(skb);
@@ -219,8 +218,7 @@ free:
static void ar9170_usb_rx_completed(struct urb *urb)
{
struct sk_buff *skb = urb->context;
- struct ar9170_usb *aru = (struct ar9170_usb *)
- usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
+ struct ar9170_usb *aru = usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
int err;
if (!aru)
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 501050c0296f..a6c6a466000f 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -108,12 +108,14 @@ enum ath_cipher {
* struct ath_ops - Register read/write operations
*
* @read: Register read
+ * @multi_read: Multiple register read
* @write: Register write
* @enable_write_buffer: Enable multiple register writes
* @write_flush: flush buffered register writes and disable buffering
*/
struct ath_ops {
unsigned int (*read)(void *, u32 reg_offset);
+ void (*multi_read)(void *, u32 *addr, u32 *val, u16 count);
void (*write)(void *, u32 val, u32 reg_offset);
void (*enable_write_buffer)(void *);
void (*write_flush) (void *);
@@ -126,6 +128,7 @@ struct ath_bus_ops {
void (*read_cachesize)(struct ath_common *common, int *csz);
bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
void (*bt_coex_prep)(struct ath_common *common);
+ void (*extn_synch_en)(struct ath_common *common);
};
struct ath_common {
@@ -162,6 +165,8 @@ struct ath_common {
struct ath_regulatory regulatory;
const struct ath_ops *ops;
const struct ath_bus_ops *bus_ops;
+
+ bool btcoex_enabled;
};
struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
@@ -178,4 +183,112 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry);
void ath_hw_cycle_counters_update(struct ath_common *common);
int32_t ath_hw_get_listen_time(struct ath_common *common);
+extern __attribute__ ((format (printf, 3, 4))) int
+ath_printk(const char *level, struct ath_common *common, const char *fmt, ...);
+
+#define ath_emerg(common, fmt, ...) \
+ ath_printk(KERN_EMERG, common, fmt, ##__VA_ARGS__)
+#define ath_alert(common, fmt, ...) \
+ ath_printk(KERN_ALERT, common, fmt, ##__VA_ARGS__)
+#define ath_crit(common, fmt, ...) \
+ ath_printk(KERN_CRIT, common, fmt, ##__VA_ARGS__)
+#define ath_err(common, fmt, ...) \
+ ath_printk(KERN_ERR, common, fmt, ##__VA_ARGS__)
+#define ath_warn(common, fmt, ...) \
+ ath_printk(KERN_WARNING, common, fmt, ##__VA_ARGS__)
+#define ath_notice(common, fmt, ...) \
+ ath_printk(KERN_NOTICE, common, fmt, ##__VA_ARGS__)
+#define ath_info(common, fmt, ...) \
+ ath_printk(KERN_INFO, common, fmt, ##__VA_ARGS__)
+
+/**
+ * enum ath_debug_level - atheros wireless debug level
+ *
+ * @ATH_DBG_RESET: reset processing
+ * @ATH_DBG_QUEUE: hardware queue management
+ * @ATH_DBG_EEPROM: eeprom processing
+ * @ATH_DBG_CALIBRATE: periodic calibration
+ * @ATH_DBG_INTERRUPT: interrupt processing
+ * @ATH_DBG_REGULATORY: regulatory processing
+ * @ATH_DBG_ANI: adaptive noise immunitive processing
+ * @ATH_DBG_XMIT: basic xmit operation
+ * @ATH_DBG_BEACON: beacon handling
+ * @ATH_DBG_CONFIG: configuration of the hardware
+ * @ATH_DBG_FATAL: fatal errors, this is the default, DBG_DEFAULT
+ * @ATH_DBG_PS: power save processing
+ * @ATH_DBG_HWTIMER: hardware timer handling
+ * @ATH_DBG_BTCOEX: bluetooth coexistance
+ * @ATH_DBG_BSTUCK: stuck beacons
+ * @ATH_DBG_ANY: enable all debugging
+ *
+ * The debug level is used to control the amount and type of debugging output
+ * we want to see. Each driver has its own method for enabling debugging and
+ * modifying debug level states -- but this is typically done through a
+ * module parameter 'debug' along with a respective 'debug' debugfs file
+ * entry.
+ */
+enum ATH_DEBUG {
+ ATH_DBG_RESET = 0x00000001,
+ ATH_DBG_QUEUE = 0x00000002,
+ ATH_DBG_EEPROM = 0x00000004,
+ ATH_DBG_CALIBRATE = 0x00000008,
+ ATH_DBG_INTERRUPT = 0x00000010,
+ ATH_DBG_REGULATORY = 0x00000020,
+ ATH_DBG_ANI = 0x00000040,
+ ATH_DBG_XMIT = 0x00000080,
+ ATH_DBG_BEACON = 0x00000100,
+ ATH_DBG_CONFIG = 0x00000200,
+ ATH_DBG_FATAL = 0x00000400,
+ ATH_DBG_PS = 0x00000800,
+ ATH_DBG_HWTIMER = 0x00001000,
+ ATH_DBG_BTCOEX = 0x00002000,
+ ATH_DBG_WMI = 0x00004000,
+ ATH_DBG_BSTUCK = 0x00008000,
+ ATH_DBG_ANY = 0xffffffff
+};
+
+#define ATH_DBG_DEFAULT (ATH_DBG_FATAL)
+
+#ifdef CONFIG_ATH_DEBUG
+
+#define ath_dbg(common, dbg_mask, fmt, ...) \
+({ \
+ int rtn; \
+ if ((common)->debug_mask & dbg_mask) \
+ rtn = ath_printk(KERN_DEBUG, common, fmt, \
+ ##__VA_ARGS__); \
+ else \
+ rtn = 0; \
+ \
+ rtn; \
+})
+#define ATH_DBG_WARN(foo, arg...) WARN(foo, arg)
+#define ATH_DBG_WARN_ON_ONCE(foo) WARN_ON_ONCE(foo)
+
+#else
+
+static inline __attribute__ ((format (printf, 3, 4))) int
+ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask,
+ const char *fmt, ...)
+{
+ return 0;
+}
+#define ATH_DBG_WARN(foo, arg...) do {} while (0)
+#define ATH_DBG_WARN_ON_ONCE(foo) ({ \
+ int __ret_warn_once = !!(foo); \
+ unlikely(__ret_warn_once); \
+})
+
+#endif /* CONFIG_ATH_DEBUG */
+
+/** Returns string describing opmode, or NULL if unknown mode. */
+#ifdef CONFIG_ATH_DEBUG
+const char *ath_opmode_to_string(enum nl80211_iftype opmode);
+#else
+static inline const char *ath_opmode_to_string(enum nl80211_iftype opmode)
+{
+ return "UNKNOWN";
+}
+#endif
+
#endif /* ATH_H */
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index eb83b7b4d0e3..e18a9aa7b6ca 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -1,9 +1,12 @@
config ATH5K
tristate "Atheros 5xxx wireless cards support"
- depends on PCI && MAC80211
+ depends on (PCI || ATHEROS_AR231X) && MAC80211
select MAC80211_LEDS
select LEDS_CLASS
select NEW_LEDS
+ select AVERAGE
+ select ATH5K_AHB if (ATHEROS_AR231X && !PCI)
+ select ATH5K_PCI if (!ATHEROS_AR231X && PCI)
---help---
This module adds support for wireless adapters based on
Atheros 5xxx chipset.
@@ -37,3 +40,27 @@ config ATH5K_DEBUG
modprobe ath5k debug=0x00000400
+config ATH5K_TRACER
+ bool "Atheros 5xxx tracer"
+ depends on ATH5K
+ depends on EVENT_TRACING
+ ---help---
+ Say Y here to enable tracepoints for the ath5k driver
+ using the kernel tracing infrastructure. Select this
+ option if you are interested in debugging the driver.
+
+ If unsure, say N.
+
+config ATH5K_AHB
+ bool "Atheros 5xxx AHB bus support"
+ depends on (ATHEROS_AR231X && !PCI)
+ ---help---
+ This adds support for WiSoC type chipsets of the 5xxx Atheros
+ family.
+
+config ATH5K_PCI
+ bool "Atheros 5xxx PCI bus support"
+ depends on (!ATHEROS_AR231X && PCI)
+ ---help---
+ This adds support for PCI type chipsets of the 5xxx Atheros
+ family.
diff --git a/drivers/net/wireless/ath/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile
index 2242a140e4fe..f60b3899afc4 100644
--- a/drivers/net/wireless/ath/ath5k/Makefile
+++ b/drivers/net/wireless/ath/ath5k/Makefile
@@ -14,5 +14,8 @@ ath5k-y += led.o
ath5k-y += rfkill.o
ath5k-y += ani.o
ath5k-y += sysfs.o
+ath5k-y += mac80211-ops.o
ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o
+ath5k-$(CONFIG_ATH5K_AHB) += ahb.o
+ath5k-$(CONFIG_ATH5K_PCI) += pci.o
obj-$(CONFIG_ATH5K) += ath5k.o
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
new file mode 100644
index 000000000000..82324e98efef
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/nl80211.h>
+#include <linux/platform_device.h>
+#include <ar231x_platform.h>
+#include "ath5k.h"
+#include "debug.h"
+#include "base.h"
+#include "reg.h"
+#include "debug.h"
+
+/* return bus cachesize in 4B word units */
+static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz)
+{
+ *csz = L1_CACHE_BYTES >> 2;
+}
+
+static bool
+ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
+{
+ struct ath5k_softc *sc = common->priv;
+ struct platform_device *pdev = to_platform_device(sc->dev);
+ struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ u16 *eeprom, *eeprom_end;
+
+
+
+ bcfg = pdev->dev.platform_data;
+ eeprom = (u16 *) bcfg->radio;
+ eeprom_end = ((void *) bcfg->config) + BOARD_CONFIG_BUFSZ;
+
+ eeprom += off;
+ if (eeprom > eeprom_end)
+ return false;
+
+ *data = *eeprom;
+ return true;
+}
+
+int ath5k_hw_read_srev(struct ath5k_hw *ah)
+{
+ struct ath5k_softc *sc = ah->ah_sc;
+ struct platform_device *pdev = to_platform_device(sc->dev);
+ struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ ah->ah_mac_srev = bcfg->devid;
+ return 0;
+}
+
+static const struct ath_bus_ops ath_ahb_bus_ops = {
+ .ath_bus_type = ATH_AHB,
+ .read_cachesize = ath5k_ahb_read_cachesize,
+ .eeprom_read = ath5k_ahb_eeprom_read,
+};
+
+/*Initialization*/
+static int ath_ahb_probe(struct platform_device *pdev)
+{
+ struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ struct ath5k_softc *sc;
+ struct ieee80211_hw *hw;
+ struct resource *res;
+ void __iomem *mem;
+ int irq;
+ int ret = 0;
+ u32 reg;
+
+ if (!pdev->dev.platform_data) {
+ dev_err(&pdev->dev, "no platform data specified\n");
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no memory resource found\n");
+ ret = -ENXIO;
+ goto err_out;
+ }
+
+ mem = ioremap_nocache(res->start, resource_size(res));
+ if (mem == NULL) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no IRQ resource found\n");
+ ret = -ENXIO;
+ goto err_out;
+ }
+
+ irq = res->start;
+
+ hw = ieee80211_alloc_hw(sizeof(struct ath5k_softc), &ath5k_hw_ops);
+ if (hw == NULL) {
+ dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ sc = hw->priv;
+ sc->hw = hw;
+ sc->dev = &pdev->dev;
+ sc->iobase = mem;
+ sc->irq = irq;
+ sc->devid = bcfg->devid;
+
+ if (bcfg->devid >= AR5K_SREV_AR2315_R6) {
+ /* Enable WMAC AHB arbitration */
+ reg = __raw_readl((void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
+ reg |= AR5K_AR2315_AHB_ARB_CTL_WLAN;
+ __raw_writel(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
+
+ /* Enable global WMAC swapping */
+ reg = __raw_readl((void __iomem *) AR5K_AR2315_BYTESWAP);
+ reg |= AR5K_AR2315_BYTESWAP_WMAC;
+ __raw_writel(reg, (void __iomem *) AR5K_AR2315_BYTESWAP);
+ } else {
+ /* Enable WMAC DMA access (assuming 5312 or 231x*/
+ /* TODO: check other platforms */
+ reg = __raw_readl((void __iomem *) AR5K_AR5312_ENABLE);
+ if (to_platform_device(sc->dev)->id == 0)
+ reg |= AR5K_AR5312_ENABLE_WLAN0;
+ else
+ reg |= AR5K_AR5312_ENABLE_WLAN1;
+ __raw_writel(reg, (void __iomem *) AR5K_AR5312_ENABLE);
+ }
+
+ ret = ath5k_init_softc(sc, &ath_ahb_bus_ops);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "failed to attach device, err=%d\n", ret);
+ ret = -ENODEV;
+ goto err_free_hw;
+ }
+
+ platform_set_drvdata(pdev, hw);
+
+ return 0;
+
+ err_free_hw:
+ ieee80211_free_hw(hw);
+ platform_set_drvdata(pdev, NULL);
+ err_out:
+ return ret;
+}
+
+static int ath_ahb_remove(struct platform_device *pdev)
+{
+ struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+ struct ieee80211_hw *hw = platform_get_drvdata(pdev);
+ struct ath5k_softc *sc;
+ u32 reg;
+
+ if (!hw)
+ return 0;
+
+ sc = hw->priv;
+
+ if (bcfg->devid >= AR5K_SREV_AR2315_R6) {
+ /* Disable WMAC AHB arbitration */
+ reg = __raw_readl((void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
+ reg &= ~AR5K_AR2315_AHB_ARB_CTL_WLAN;
+ __raw_writel(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
+ } else {
+ /*Stop DMA access */
+ reg = __raw_readl((void __iomem *) AR5K_AR5312_ENABLE);
+ if (to_platform_device(sc->dev)->id == 0)
+ reg &= ~AR5K_AR5312_ENABLE_WLAN0;
+ else
+ reg &= ~AR5K_AR5312_ENABLE_WLAN1;
+ __raw_writel(reg, (void __iomem *) AR5K_AR5312_ENABLE);
+ }
+
+ ath5k_deinit_softc(sc);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver ath_ahb_driver = {
+ .probe = ath_ahb_probe,
+ .remove = ath_ahb_remove,
+ .driver = {
+ .name = "ar231x-wmac",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init
+ath5k_ahb_init(void)
+{
+ return platform_driver_register(&ath_ahb_driver);
+}
+
+static void __exit
+ath5k_ahb_exit(void)
+{
+ platform_driver_unregister(&ath_ahb_driver);
+}
+
+module_init(ath5k_ahb_init);
+module_exit(ath5k_ahb_exit);
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index f1419198a479..f915f404302d 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -58,20 +58,20 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
{
/* TODO:
* ANI documents suggest the following five levels to use, but the HAL
- * and ath9k use only use the last two levels, making this
+ * and ath9k use only the last two levels, making this
* essentially an on/off option. There *may* be a reason for this (???),
* so i stick with the HAL version for now...
*/
#if 0
- const s8 hi[] = { -18, -18, -16, -14, -12 };
- const s8 lo[] = { -52, -56, -60, -64, -70 };
- const s8 sz[] = { -34, -41, -48, -55, -62 };
- const s8 fr[] = { -70, -72, -75, -78, -80 };
+ static const s8 lo[] = { -52, -56, -60, -64, -70 };
+ static const s8 hi[] = { -18, -18, -16, -14, -12 };
+ static const s8 sz[] = { -34, -41, -48, -55, -62 };
+ static const s8 fr[] = { -70, -72, -75, -78, -80 };
#else
- const s8 sz[] = { -55, -62 };
- const s8 lo[] = { -64, -70 };
- const s8 hi[] = { -14, -12 };
- const s8 fr[] = { -78, -80 };
+ static const s8 lo[] = { -64, -70 };
+ static const s8 hi[] = { -14, -12 };
+ static const s8 sz[] = { -55, -62 };
+ static const s8 fr[] = { -78, -80 };
#endif
if (level < 0 || level >= ARRAY_SIZE(sz)) {
ATH5K_ERR(ah->ah_sc, "noise immuniy level %d out of range",
@@ -102,7 +102,7 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
void
ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
{
- const int val[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
+ static const int val[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
if (level < 0 || level >= ARRAY_SIZE(val) ||
level > ah->ah_sc->ani_state.max_spur_level) {
@@ -127,7 +127,7 @@ ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
void
ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
{
- const int val[] = { 0, 4, 8 };
+ static const int val[] = { 0, 4, 8 };
if (level < 0 || level >= ARRAY_SIZE(val)) {
ATH5K_ERR(ah->ah_sc, "firstep level %d out of range", level);
@@ -151,12 +151,12 @@ ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
void
ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
{
- const int m1l[] = { 127, 50 };
- const int m2l[] = { 127, 40 };
- const int m1[] = { 127, 0x4d };
- const int m2[] = { 127, 0x40 };
- const int m2cnt[] = { 31, 16 };
- const int m2lcnt[] = { 63, 48 };
+ static const int m1l[] = { 127, 50 };
+ static const int m2l[] = { 127, 40 };
+ static const int m1[] = { 127, 0x4d };
+ static const int m2[] = { 127, 0x40 };
+ static const int m2cnt[] = { 31, 16 };
+ static const int m2lcnt[] = { 63, 48 };
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
AR5K_PHY_WEAK_OFDM_LOW_THR_M1, m1l[on]);
@@ -192,7 +192,7 @@ ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
void
ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
{
- const int val[] = { 8, 6 };
+ static const int val[] = { 8, 6 };
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_CCK_CROSSCORR,
AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR, val[on]);
ah->ah_sc->ani_state.cck_weak_sig = on;
@@ -216,7 +216,7 @@ static void
ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
bool ofdm_trigger)
{
- int rssi = ah->ah_beacon_rssi_avg.avg;
+ int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "raise immunity (%s)",
ofdm_trigger ? "ODFM" : "CCK");
@@ -301,7 +301,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
static void
ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
{
- int rssi = ah->ah_beacon_rssi_avg.avg;
+ int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "lower immunity");
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 308b79e1ff08..8a06dbd39629 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -25,6 +25,7 @@
#include <linux/io.h>
#include <linux/types.h>
+#include <linux/average.h>
#include <net/mac80211.h>
/* RX/TX descriptor hw structs
@@ -153,19 +154,6 @@
udelay(1); \
} while (0)
-/* Register dumps are done per operation mode */
-#define AR5K_INI_RFGAIN_5GHZ 0
-#define AR5K_INI_RFGAIN_2GHZ 1
-
-/* TODO: Clean this up */
-#define AR5K_INI_VAL_11A 0
-#define AR5K_INI_VAL_11A_TURBO 1
-#define AR5K_INI_VAL_11B 2
-#define AR5K_INI_VAL_11G 3
-#define AR5K_INI_VAL_11G_TURBO 4
-#define AR5K_INI_VAL_XR 0
-#define AR5K_INI_VAL_MAX 5
-
/*
* Some tuneable values (these should be changeable by the user)
* TODO: Make use of them and add more options OR use debug/configfs
@@ -221,42 +209,61 @@
/* Initial values */
#define AR5K_INIT_CYCRSSI_THR1 2
-#define AR5K_INIT_TX_LATENCY 502
-#define AR5K_INIT_USEC 39
-#define AR5K_INIT_USEC_TURBO 79
-#define AR5K_INIT_USEC_32 31
-#define AR5K_INIT_SLOT_TIME 396
-#define AR5K_INIT_SLOT_TIME_TURBO 480
-#define AR5K_INIT_ACK_CTS_TIMEOUT 1024
-#define AR5K_INIT_ACK_CTS_TIMEOUT_TURBO 0x08000800
-#define AR5K_INIT_PROG_IFS 920
-#define AR5K_INIT_PROG_IFS_TURBO 960
-#define AR5K_INIT_EIFS 3440
-#define AR5K_INIT_EIFS_TURBO 6880
-#define AR5K_INIT_SIFS 560
-#define AR5K_INIT_SIFS_TURBO 480
-#define AR5K_INIT_SH_RETRY 10
-#define AR5K_INIT_LG_RETRY AR5K_INIT_SH_RETRY
-#define AR5K_INIT_SSH_RETRY 32
-#define AR5K_INIT_SLG_RETRY AR5K_INIT_SSH_RETRY
-#define AR5K_INIT_TX_RETRY 10
-
-#define AR5K_INIT_TRANSMIT_LATENCY ( \
- (AR5K_INIT_TX_LATENCY << 14) | (AR5K_INIT_USEC_32 << 7) | \
- (AR5K_INIT_USEC) \
-)
-#define AR5K_INIT_TRANSMIT_LATENCY_TURBO ( \
- (AR5K_INIT_TX_LATENCY << 14) | (AR5K_INIT_USEC_32 << 7) | \
- (AR5K_INIT_USEC_TURBO) \
-)
-#define AR5K_INIT_PROTO_TIME_CNTRL ( \
- (AR5K_INIT_CARR_SENSE_EN << 26) | (AR5K_INIT_EIFS << 12) | \
- (AR5K_INIT_PROG_IFS) \
-)
-#define AR5K_INIT_PROTO_TIME_CNTRL_TURBO ( \
- (AR5K_INIT_CARR_SENSE_EN << 26) | (AR5K_INIT_EIFS_TURBO << 12) | \
- (AR5K_INIT_PROG_IFS_TURBO) \
-)
+
+/* Tx retry limit defaults from standard */
+#define AR5K_INIT_RETRY_SHORT 7
+#define AR5K_INIT_RETRY_LONG 4
+
+/* Slot time */
+#define AR5K_INIT_SLOT_TIME_TURBO 6
+#define AR5K_INIT_SLOT_TIME_DEFAULT 9
+#define AR5K_INIT_SLOT_TIME_HALF_RATE 13
+#define AR5K_INIT_SLOT_TIME_QUARTER_RATE 21
+#define AR5K_INIT_SLOT_TIME_B 20
+#define AR5K_SLOT_TIME_MAX 0xffff
+
+/* SIFS */
+#define AR5K_INIT_SIFS_TURBO 6
+/* XXX: 8 from initvals 10 from standard */
+#define AR5K_INIT_SIFS_DEFAULT_BG 8
+#define AR5K_INIT_SIFS_DEFAULT_A 16
+#define AR5K_INIT_SIFS_HALF_RATE 32
+#define AR5K_INIT_SIFS_QUARTER_RATE 64
+
+/* Used to calculate tx time for non 5/10/40MHz
+ * operation */
+/* It's preamble time + signal time (16 + 4) */
+#define AR5K_INIT_OFDM_PREAMPLE_TIME 20
+/* Preamble time for 40MHz (turbo) operation (min ?) */
+#define AR5K_INIT_OFDM_PREAMBLE_TIME_MIN 14
+#define AR5K_INIT_OFDM_SYMBOL_TIME 4
+#define AR5K_INIT_OFDM_PLCP_BITS 22
+
+/* Rx latency for 5 and 10MHz operation (max ?) */
+#define AR5K_INIT_RX_LAT_MAX 63
+/* Tx latencies from initvals (5212 only but no problem
+ * because we only tweak them on 5212) */
+#define AR5K_INIT_TX_LAT_A 54
+#define AR5K_INIT_TX_LAT_BG 384
+/* Tx latency for 40MHz (turbo) operation (min ?) */
+#define AR5K_INIT_TX_LAT_MIN 32
+/* Default Tx/Rx latencies (same for 5211)*/
+#define AR5K_INIT_TX_LATENCY_5210 54
+#define AR5K_INIT_RX_LATENCY_5210 29
+
+/* Tx frame to Tx data start delay */
+#define AR5K_INIT_TXF2TXD_START_DEFAULT 14
+#define AR5K_INIT_TXF2TXD_START_DELAY_10MHZ 12
+#define AR5K_INIT_TXF2TXD_START_DELAY_5MHZ 13
+
+/* We need to increase PHY switch and agc settling time
+ * on turbo mode */
+#define AR5K_SWITCH_SETTLING 5760
+#define AR5K_SWITCH_SETTLING_TURBO 7168
+
+#define AR5K_AGC_SETTLING 28
+/* 38 on 5210 but shouldn't matter */
+#define AR5K_AGC_SETTLING_TURBO 37
/* GENERIC CHIPSET DEFINITIONS */
@@ -303,12 +310,19 @@ struct ath5k_srev_name {
#define AR5K_SREV_AR5311B 0x30 /* Spirit */
#define AR5K_SREV_AR5211 0x40 /* Oahu */
#define AR5K_SREV_AR5212 0x50 /* Venice */
+#define AR5K_SREV_AR5312_R2 0x52 /* AP31 */
#define AR5K_SREV_AR5212_V4 0x54 /* ??? */
#define AR5K_SREV_AR5213 0x55 /* ??? */
+#define AR5K_SREV_AR5312_R7 0x57 /* AP30 */
+#define AR5K_SREV_AR2313_R8 0x58 /* AP43 */
#define AR5K_SREV_AR5213A 0x59 /* Hainan */
#define AR5K_SREV_AR2413 0x78 /* Griffin lite */
#define AR5K_SREV_AR2414 0x70 /* Griffin */
+#define AR5K_SREV_AR2315_R6 0x86 /* AP51-Light */
+#define AR5K_SREV_AR2315_R7 0x87 /* AP51-Full */
#define AR5K_SREV_AR5424 0x90 /* Condor */
+#define AR5K_SREV_AR2317_R1 0x90 /* AP61-Light */
+#define AR5K_SREV_AR2317_R2 0x91 /* AP61-Full */
#define AR5K_SREV_AR5413 0xa4 /* Eagle lite */
#define AR5K_SREV_AR5414 0xa0 /* Eagle */
#define AR5K_SREV_AR2415 0xb0 /* Talon */
@@ -404,12 +418,10 @@ struct ath5k_srev_name {
enum ath5k_driver_mode {
AR5K_MODE_11A = 0,
- AR5K_MODE_11A_TURBO = 1,
- AR5K_MODE_11B = 2,
- AR5K_MODE_11G = 3,
- AR5K_MODE_11G_TURBO = 4,
+ AR5K_MODE_11B = 1,
+ AR5K_MODE_11G = 2,
AR5K_MODE_XR = 0,
- AR5K_MODE_MAX = 5
+ AR5K_MODE_MAX = 3
};
enum ath5k_ant_mode {
@@ -423,6 +435,12 @@ enum ath5k_ant_mode {
AR5K_ANTMODE_MAX,
};
+enum ath5k_bw_mode {
+ AR5K_BWMODE_DEFAULT = 0, /* 20MHz, default operation */
+ AR5K_BWMODE_5MHZ = 1, /* Quarter rate */
+ AR5K_BWMODE_10MHZ = 2, /* Half rate */
+ AR5K_BWMODE_40MHZ = 3 /* Turbo */
+};
/****************\
TX DEFINITIONS
@@ -495,7 +513,7 @@ enum ath5k_tx_queue_id {
AR5K_TX_QUEUE_ID_NOQCU_DATA = 0,
AR5K_TX_QUEUE_ID_NOQCU_BEACON = 1,
AR5K_TX_QUEUE_ID_DATA_MIN = 0, /*IEEE80211_TX_QUEUE_DATA0*/
- AR5K_TX_QUEUE_ID_DATA_MAX = 4, /*IEEE80211_TX_QUEUE_DATA4*/
+ AR5K_TX_QUEUE_ID_DATA_MAX = 3, /*IEEE80211_TX_QUEUE_DATA3*/
AR5K_TX_QUEUE_ID_DATA_SVP = 5, /*IEEE80211_TX_QUEUE_SVP - Spectralink Voice Protocol*/
AR5K_TX_QUEUE_ID_CAB = 6, /*IEEE80211_TX_QUEUE_AFTER_BEACON*/
AR5K_TX_QUEUE_ID_BEACON = 7, /*IEEE80211_TX_QUEUE_BEACON*/
@@ -655,7 +673,6 @@ struct ath5k_gain {
/* channel_flags */
#define CHANNEL_CW_INT 0x0008 /* Contention Window interference detected */
-#define CHANNEL_TURBO 0x0010 /* Turbo Channel */
#define CHANNEL_CCK 0x0020 /* CCK channel */
#define CHANNEL_OFDM 0x0040 /* OFDM channel */
#define CHANNEL_2GHZ 0x0080 /* 2GHz channel. */
@@ -667,16 +684,10 @@ struct ath5k_gain {
#define CHANNEL_A (CHANNEL_5GHZ|CHANNEL_OFDM)
#define CHANNEL_B (CHANNEL_2GHZ|CHANNEL_CCK)
#define CHANNEL_G (CHANNEL_2GHZ|CHANNEL_OFDM)
-#define CHANNEL_T (CHANNEL_5GHZ|CHANNEL_OFDM|CHANNEL_TURBO)
-#define CHANNEL_TG (CHANNEL_2GHZ|CHANNEL_OFDM|CHANNEL_TURBO)
-#define CHANNEL_108A CHANNEL_T
-#define CHANNEL_108G CHANNEL_TG
#define CHANNEL_X (CHANNEL_5GHZ|CHANNEL_OFDM|CHANNEL_XR)
-#define CHANNEL_ALL (CHANNEL_OFDM|CHANNEL_CCK|CHANNEL_2GHZ|CHANNEL_5GHZ| \
- CHANNEL_TURBO)
+#define CHANNEL_ALL (CHANNEL_OFDM|CHANNEL_CCK|CHANNEL_2GHZ|CHANNEL_5GHZ)
-#define CHANNEL_ALL_NOTURBO (CHANNEL_ALL & ~CHANNEL_TURBO)
#define CHANNEL_MODES CHANNEL_ALL
/*
@@ -1025,7 +1036,6 @@ struct ath5k_hw {
enum ath5k_int ah_imr;
struct ieee80211_channel *ah_current_channel;
- bool ah_turbo;
bool ah_calibration;
bool ah_single_chip;
@@ -1034,6 +1044,7 @@ struct ath5k_hw {
u32 ah_phy;
u32 ah_mac_srev;
u16 ah_mac_version;
+ u16 ah_mac_revision;
u16 ah_phy_revision;
u16 ah_radio_5ghz_revision;
u16 ah_radio_2ghz_revision;
@@ -1041,15 +1052,18 @@ struct ath5k_hw {
#define ah_modes ah_capabilities.cap_mode
#define ah_ee_version ah_capabilities.cap_eeprom.ee_version
- u32 ah_limit_tx_retries;
+ u8 ah_retry_long;
+ u8 ah_retry_short;
+
u8 ah_coverage_class;
+ bool ah_ack_bitrate_high;
+ u8 ah_bwmode;
/* Antenna Control */
u32 ah_ant_ctl[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
u8 ah_ant_mode;
u8 ah_tx_ant;
u8 ah_def_ant;
- bool ah_software_retry;
struct ath5k_capabilities ah_capabilities;
@@ -1085,12 +1099,14 @@ struct ath5k_hw {
/* Values in 0.25dB units */
s16 txp_min_pwr;
s16 txp_max_pwr;
+ s16 txp_cur_pwr;
/* Values in 0.5dB units */
s16 txp_offset;
s16 txp_ofdm;
s16 txp_cck_ofdm_gainf_delta;
/* Value in dB units */
s16 txp_cck_ofdm_pwr_delta;
+ bool txp_setup;
} ah_txpower;
struct {
@@ -1102,7 +1118,7 @@ struct ath5k_hw {
struct ath5k_nfcal_hist ah_nfcal_hist;
/* average beacon RSSI in our BSS (used by ANI) */
- struct ath5k_avg_val ah_beacon_rssi_avg;
+ struct ewma ah_beacon_rssi_avg;
/* noise floor from last periodic calibration */
s32 ah_noise_floor;
@@ -1131,36 +1147,70 @@ struct ath5k_hw {
/*
* Prototypes
*/
+extern const struct ieee80211_ops ath5k_hw_ops;
-/* Attach/Detach Functions */
-int ath5k_hw_attach(struct ath5k_softc *sc);
-void ath5k_hw_detach(struct ath5k_hw *ah);
+/* Initialization and detach functions */
+int ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops);
+void ath5k_deinit_softc(struct ath5k_softc *sc);
+int ath5k_hw_init(struct ath5k_softc *sc);
+void ath5k_hw_deinit(struct ath5k_hw *ah);
int ath5k_sysfs_register(struct ath5k_softc *sc);
void ath5k_sysfs_unregister(struct ath5k_softc *sc);
+/* base.c */
+struct ath5k_buf;
+struct ath5k_txq;
+
+void set_beacon_filter(struct ieee80211_hw *hw, bool enable);
+bool ath_any_vif_assoc(struct ath5k_softc *sc);
+void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct ath5k_txq *txq);
+int ath5k_init_hw(struct ath5k_softc *sc);
+int ath5k_stop_hw(struct ath5k_softc *sc);
+void ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif);
+void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
+ struct ieee80211_vif *vif);
+int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan);
+void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
+int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void ath5k_beacon_config(struct ath5k_softc *sc);
+void ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
+void ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
+
+/*Chip id helper functions */
+const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val);
+int ath5k_hw_read_srev(struct ath5k_hw *ah);
+
/* LED functions */
int ath5k_init_leds(struct ath5k_softc *sc);
void ath5k_led_enable(struct ath5k_softc *sc);
void ath5k_led_off(struct ath5k_softc *sc);
void ath5k_unregister_leds(struct ath5k_softc *sc);
+
/* Reset Functions */
int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial);
int ath5k_hw_on_hold(struct ath5k_hw *ah);
int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
- struct ieee80211_channel *channel, bool change_channel);
+ struct ieee80211_channel *channel, bool fast, bool skip_pcu);
int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
bool is_set);
/* Power management functions */
+
+/* Clock rate related functions */
+unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec);
+unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock);
+void ath5k_hw_set_clockrate(struct ath5k_hw *ah);
+
+
/* DMA Related Functions */
void ath5k_hw_start_rx_dma(struct ath5k_hw *ah);
-int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah);
u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah);
-void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr);
+int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr);
int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue);
-int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue);
+int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue);
u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue);
int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue,
u32 phys_addr);
@@ -1170,38 +1220,43 @@ bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah);
int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask);
enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask);
void ath5k_hw_update_mib_counters(struct ath5k_hw *ah);
+/* Init/Stop functions */
+void ath5k_hw_dma_init(struct ath5k_hw *ah);
+int ath5k_hw_dma_stop(struct ath5k_hw *ah);
/* EEPROM access functions */
int ath5k_eeprom_init(struct ath5k_hw *ah);
void ath5k_eeprom_detach(struct ath5k_hw *ah);
int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac);
+
/* Protocol Control Unit Functions */
+/* Helpers */
+int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
+ int len, struct ieee80211_rate *rate);
+unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah);
+unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah);
extern int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype opmode);
void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
-/* BSSID Functions */
+/* RX filter control*/
int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
void ath5k_hw_set_bssid(struct ath5k_hw *ah);
void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask);
-/* Receive start/stop functions */
-void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
-void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
-/* RX Filter functions */
void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1);
u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah);
void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter);
+/* Receive (DRU) start/stop functions */
+void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
+void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
/* Beacon control functions */
u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
bool ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval);
-/* ACK bit rate */
-void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high);
-/* Clock rate related functions */
-unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec);
-unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock);
-void ath5k_hw_set_clockrate(struct ath5k_hw *ah);
+/* Init function */
+void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
+ u8 mode);
/* Queue Control Unit, DFS Control Unit Functions */
int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
@@ -1211,10 +1266,14 @@ int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah,
enum ath5k_tx_queue queue_type,
struct ath5k_txq_info *queue_info);
+void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
+ unsigned int queue);
u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
-int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time);
+int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time);
+/* Init function */
+int ath5k_hw_init_queues(struct ath5k_hw *ah);
/* Hardware Descriptor Functions */
int ath5k_hw_init_desc_functions(struct ath5k_hw *ah);
@@ -1224,6 +1283,7 @@ int ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3);
+
/* GPIO Functions */
void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state);
int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio);
@@ -1233,11 +1293,13 @@ int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val);
void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
u32 interrupt_level);
-/* rfkill Functions */
+
+/* RFkill Functions */
void ath5k_rfkill_hw_start(struct ath5k_hw *ah);
void ath5k_rfkill_hw_stop(struct ath5k_hw *ah);
-/* Misc functions */
+
+/* Misc functions TODO: Cleanup */
int ath5k_hw_set_capabilities(struct ath5k_hw *ah);
int ath5k_hw_get_capability(struct ath5k_hw *ah,
enum ath5k_capability_type cap_type, u32 capability,
@@ -1245,19 +1307,20 @@ int ath5k_hw_get_capability(struct ath5k_hw *ah,
int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id);
int ath5k_hw_disable_pspoll(struct ath5k_hw *ah);
+
/* Initial register settings functions */
int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel);
-/* Initialize RF */
-int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
- struct ieee80211_channel *channel,
- unsigned int mode);
-int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq);
+
+/* PHY functions */
+/* Misc PHY functions */
+u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan);
+int ath5k_hw_phy_disable(struct ath5k_hw *ah);
+/* Gain_F optimization */
enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah);
int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah);
/* PHY/RF channel functions */
bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags);
-int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
/* PHY calibration */
void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah);
int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
@@ -1266,18 +1329,14 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah);
/* Spur mitigation */
bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
struct ieee80211_channel *channel);
-void ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
- struct ieee80211_channel *channel);
-/* Misc PHY functions */
-u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan);
-int ath5k_hw_phy_disable(struct ath5k_hw *ah);
/* Antenna control */
void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode);
void ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode);
/* TX power setup */
-int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
- u8 ee_mode, u8 txpower);
int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower);
+/* Init function */
+int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
+ u8 mode, bool fast);
/*
* Functions used internaly
@@ -1293,6 +1352,32 @@ static inline struct ath_regulatory *ath5k_hw_regulatory(struct ath5k_hw *ah)
return &(ath5k_hw_common(ah)->regulatory);
}
+#ifdef CONFIG_ATHEROS_AR231X
+#define AR5K_AR2315_PCI_BASE ((void __iomem *)0xb0100000)
+
+static inline void __iomem *ath5k_ahb_reg(struct ath5k_hw *ah, u16 reg)
+{
+ /* On AR2315 and AR2317 the PCI clock domain registers
+ * are outside of the WMAC register space */
+ if (unlikely((reg >= 0x4000) && (reg < 0x5000) &&
+ (ah->ah_mac_srev >= AR5K_SREV_AR2315_R6)))
+ return AR5K_AR2315_PCI_BASE + reg;
+
+ return ah->ah_iobase + reg;
+}
+
+static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
+{
+ return __raw_readl(ath5k_ahb_reg(ah, reg));
+}
+
+static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
+{
+ __raw_writel(val, ath5k_ahb_reg(ah, reg));
+}
+
+#else
+
static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
{
return ioread32(ah->ah_iobase + reg);
@@ -1303,6 +1388,24 @@ static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
iowrite32(val, ah->ah_iobase + reg);
}
+#endif
+
+static inline enum ath_bus_type ath5k_get_bus_type(struct ath5k_hw *ah)
+{
+ return ath5k_hw_common(ah)->bus_ops->ath_bus_type;
+}
+
+static inline void ath5k_read_cachesize(struct ath_common *common, int *csz)
+{
+ common->bus_ops->read_cachesize(common, csz);
+}
+
+static inline bool ath5k_hw_nvram_read(struct ath5k_hw *ah, u32 off, u16 *data)
+{
+ struct ath_common *common = ath5k_hw_common(ah);
+ return common->bus_ops->eeprom_read(common, off, data);
+}
+
static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
{
u32 retval = 0, bit, i;
@@ -1315,27 +1418,4 @@ static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
return retval;
}
-#define AVG_SAMPLES 8
-#define AVG_FACTOR 1000
-
-/**
- * ath5k_moving_average - Exponentially weighted moving average
- * @avg: average structure
- * @val: current value
- *
- * This implementation make use of a struct ath5k_avg_val to prevent rounding
- * errors.
- */
-static inline struct ath5k_avg_val
-ath5k_moving_average(const struct ath5k_avg_val avg, const int val)
-{
- struct ath5k_avg_val new;
- new.avg_weight = avg.avg_weight ?
- (((avg.avg_weight * ((AVG_SAMPLES) - 1)) +
- (val * (AVG_FACTOR))) / (AVG_SAMPLES)) :
- (val * (AVG_FACTOR));
- new.avg = new.avg_weight / (AVG_FACTOR);
- return new;
-}
-
#endif
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index fbe8aca975d8..bc8240560488 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -93,16 +93,16 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_attach - Check if hw is supported and init the needed structs
+ * ath5k_hw_init - Check if hw is supported and init the needed structs
*
- * @sc: The &struct ath5k_softc we got from the driver's attach function
+ * @sc: The &struct ath5k_softc we got from the driver's init_softc function
*
* Check if the device is supported, perform a POST and initialize the needed
* structs. Returns -ENOMEM if we don't have memory for the needed structs,
* -ENODEV if the device is not supported or prints an error msg if something
* else went wrong.
*/
-int ath5k_hw_attach(struct ath5k_softc *sc)
+int ath5k_hw_init(struct ath5k_softc *sc)
{
struct ath5k_hw *ah = sc->ah;
struct ath_common *common = ath5k_hw_common(ah);
@@ -115,11 +115,11 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
* HW information
*/
ah->ah_radar.r_enabled = AR5K_TUNE_RADAR_ALERT;
- ah->ah_turbo = false;
+ ah->ah_bwmode = AR5K_BWMODE_DEFAULT;
ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
ah->ah_imr = 0;
- ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
- ah->ah_software_retry = false;
+ ah->ah_retry_short = AR5K_INIT_RETRY_SHORT;
+ ah->ah_retry_long = AR5K_INIT_RETRY_LONG;
ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
ah->ah_noise_floor = -95; /* until first NF calibration is run */
sc->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
@@ -128,7 +128,8 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
/*
* Find the mac version
*/
- srev = ath5k_hw_reg_read(ah, AR5K_SREV);
+ ath5k_hw_read_srev(ah);
+ srev = ah->ah_mac_srev;
if (srev < AR5K_SREV_AR5311)
ah->ah_version = AR5K_AR5210;
else if (srev < AR5K_SREV_AR5212)
@@ -136,6 +137,10 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
else
ah->ah_version = AR5K_AR5212;
+ /* Get the MAC revision */
+ ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER);
+ ah->ah_mac_revision = AR5K_REG_MS(srev, AR5K_SREV_REV);
+
/* Fill the ath5k_hw struct with the needed functions */
ret = ath5k_hw_init_desc_functions(ah);
if (ret)
@@ -146,9 +151,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
if (ret)
goto err;
- /* Get MAC, PHY and RADIO revisions */
- ah->ah_mac_srev = srev;
- ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER);
+ /* Get PHY and RADIO revisions */
ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
0xffffffff;
ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
@@ -217,7 +220,8 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
ah->ah_radio = AR5K_RF5112;
ah->ah_single_chip = false;
ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5112B;
- } else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4)) {
+ } else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4) ||
+ ah->ah_mac_version == (AR5K_SREV_AR2315_R6 >> 4)) {
ah->ah_radio = AR5K_RF2316;
ah->ah_single_chip = true;
ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2316;
@@ -273,7 +277,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
/*
* Write PCI-E power save settings
*/
- if ((ah->ah_version == AR5K_AR5212) && (pdev->is_pcie)) {
+ if ((ah->ah_version == AR5K_AR5212) && pdev && (pci_is_pcie(pdev))) {
ath5k_hw_reg_write(ah, 0x9248fc00, AR5K_PCIE_SERDES);
ath5k_hw_reg_write(ah, 0x24924924, AR5K_PCIE_SERDES);
@@ -305,8 +309,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
/* Get misc capabilities */
ret = ath5k_hw_set_capabilities(ah);
if (ret) {
- ATH5K_ERR(sc, "unable to get device capabilities: 0x%04x\n",
- sc->pdev->device);
+ ATH5K_ERR(sc, "unable to get device capabilities\n");
goto err;
}
@@ -346,11 +349,11 @@ err:
}
/**
- * ath5k_hw_detach - Free the ath5k_hw struct
+ * ath5k_hw_deinit - Free the ath5k_hw struct
*
* @ah: The &struct ath5k_hw
*/
-void ath5k_hw_detach(struct ath5k_hw *ah)
+void ath5k_hw_deinit(struct ath5k_hw *ah)
{
__set_bit(ATH_STAT_INVALID, ah->ah_sc->status);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 8251946842e6..4d7f21ee111c 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -47,8 +47,6 @@
#include <linux/io.h>
#include <linux/netdevice.h>
#include <linux/cache.h>
-#include <linux/pci.h>
-#include <linux/pci-aspm.h>
#include <linux/ethtool.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
@@ -62,10 +60,12 @@
#include "reg.h"
#include "debug.h"
#include "ani.h"
-#include "../debug.h"
-static int modparam_nohwcrypt;
-module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+int ath5k_modparam_nohwcrypt;
+module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
static int modparam_all_channels;
@@ -78,39 +78,24 @@ MODULE_AUTHOR("Nick Kossifidis");
MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION("0.6.0 (EXPERIMENTAL)");
-
-static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan);
-static int ath5k_beacon_update(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
-
-/* Known PCI ids */
-static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
- { PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */
- { PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */
- { PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/
- { PCI_VDEVICE(ATHEROS, 0x0012) }, /* 5211 */
- { PCI_VDEVICE(ATHEROS, 0x0013) }, /* 5212 */
- { PCI_VDEVICE(3COM_2, 0x0013) }, /* 3com 5212 */
- { PCI_VDEVICE(3COM, 0x0013) }, /* 3com 3CRDAG675 5212 */
- { PCI_VDEVICE(ATHEROS, 0x1014) }, /* IBM minipci 5212 */
- { PCI_VDEVICE(ATHEROS, 0x0014) }, /* 5212 combatible */
- { PCI_VDEVICE(ATHEROS, 0x0015) }, /* 5212 combatible */
- { PCI_VDEVICE(ATHEROS, 0x0016) }, /* 5212 combatible */
- { PCI_VDEVICE(ATHEROS, 0x0017) }, /* 5212 combatible */
- { PCI_VDEVICE(ATHEROS, 0x0018) }, /* 5212 combatible */
- { PCI_VDEVICE(ATHEROS, 0x0019) }, /* 5212 combatible */
- { PCI_VDEVICE(ATHEROS, 0x001a) }, /* 2413 Griffin-lite */
- { PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */
- { PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */
- { PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
+
+static int ath5k_init(struct ieee80211_hw *hw);
+static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
+ bool skip_pcu);
+int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
/* Known SREVs */
static const struct ath5k_srev_name srev_names[] = {
+#ifdef CONFIG_ATHEROS_AR231X
+ { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R2 },
+ { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R7 },
+ { "2313", AR5K_VERSION_MAC, AR5K_SREV_AR2313_R8 },
+ { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R6 },
+ { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R7 },
+ { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R1 },
+ { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R2 },
+#else
{ "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 },
{ "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 },
{ "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A },
@@ -129,6 +114,7 @@ static const struct ath5k_srev_name srev_names[] = {
{ "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 },
{ "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 },
{ "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 },
+#endif
{ "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN },
{ "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 },
{ "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 },
@@ -142,10 +128,12 @@ static const struct ath5k_srev_name srev_names[] = {
{ "2112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112B },
{ "2413", AR5K_VERSION_RAD, AR5K_SREV_RAD_2413 },
{ "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 },
- { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 },
- { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 },
{ "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 },
{ "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 },
+#ifdef CONFIG_ATHEROS_AR231X
+ { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 },
+ { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 },
+#endif
{ "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN },
};
@@ -191,38 +179,6 @@ static const struct ieee80211_rate ath5k_rates[] = {
/* XR missing */
};
-static inline void ath5k_txbuf_free_skb(struct ath5k_softc *sc,
- struct ath5k_buf *bf)
-{
- BUG_ON(!bf);
- if (!bf->skb)
- return;
- pci_unmap_single(sc->pdev, bf->skbaddr, bf->skb->len,
- PCI_DMA_TODEVICE);
- dev_kfree_skb_any(bf->skb);
- bf->skb = NULL;
- bf->skbaddr = 0;
- bf->desc->ds_data = 0;
-}
-
-static inline void ath5k_rxbuf_free_skb(struct ath5k_softc *sc,
- struct ath5k_buf *bf)
-{
- struct ath5k_hw *ah = sc->ah;
- struct ath_common *common = ath5k_hw_common(ah);
-
- BUG_ON(!bf);
- if (!bf->skb)
- return;
- pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize,
- PCI_DMA_FROMDEVICE);
- dev_kfree_skb_any(bf->skb);
- bf->skb = NULL;
- bf->skbaddr = 0;
- bf->desc->ds_data = 0;
-}
-
-
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
{
u64 tsf = ath5k_hw_get_tsf64(ah);
@@ -233,7 +189,7 @@ static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
return (tsf & ~0x7fff) | rstamp;
}
-static const char *
+const char *
ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
{
const char *name = "xxxxx";
@@ -289,91 +245,78 @@ static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *re
\********************/
/*
- * Convert IEEE channel number to MHz frequency.
- */
-static inline short
-ath5k_ieee2mhz(short chan)
-{
- if (chan <= 14 || chan >= 27)
- return ieee80211chan2mhz(chan);
- else
- return 2212 + chan * 20;
-}
-
-/*
* Returns true for the channel numbers used without all_channels modparam.
*/
-static bool ath5k_is_standard_channel(short chan)
+static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
{
- return ((chan <= 14) ||
- /* UNII 1,2 */
- ((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
+ if (band == IEEE80211_BAND_2GHZ && chan <= 14)
+ return true;
+
+ return /* UNII 1,2 */
+ (((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
/* midband */
((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
/* UNII-3 */
- ((chan & 3) == 1 && chan >= 149 && chan <= 165));
+ ((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
+ /* 802.11j 5.030-5.080 GHz (20MHz) */
+ (chan == 8 || chan == 12 || chan == 16) ||
+ /* 802.11j 4.9GHz (20MHz) */
+ (chan == 184 || chan == 188 || chan == 192 || chan == 196));
}
static unsigned int
-ath5k_copy_channels(struct ath5k_hw *ah,
- struct ieee80211_channel *channels,
- unsigned int mode,
- unsigned int max)
+ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
+ unsigned int mode, unsigned int max)
{
- unsigned int i, count, size, chfreq, freq, ch;
-
- if (!test_bit(mode, ah->ah_modes))
- return 0;
+ unsigned int count, size, chfreq, freq, ch;
+ enum ieee80211_band band;
switch (mode) {
case AR5K_MODE_11A:
- case AR5K_MODE_11A_TURBO:
/* 1..220, but 2GHz frequencies are filtered by check_channel */
- size = 220 ;
+ size = 220;
chfreq = CHANNEL_5GHZ;
+ band = IEEE80211_BAND_5GHZ;
break;
case AR5K_MODE_11B:
case AR5K_MODE_11G:
- case AR5K_MODE_11G_TURBO:
size = 26;
chfreq = CHANNEL_2GHZ;
+ band = IEEE80211_BAND_2GHZ;
break;
default:
ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n");
return 0;
}
- for (i = 0, count = 0; i < size && max > 0; i++) {
- ch = i + 1 ;
- freq = ath5k_ieee2mhz(ch);
+ count = 0;
+ for (ch = 1; ch <= size && count < max; ch++) {
+ freq = ieee80211_channel_to_frequency(ch, band);
+
+ if (freq == 0) /* mapping failed - not a standard channel */
+ continue;
/* Check if channel is supported by the chipset */
if (!ath5k_channel_ok(ah, freq, chfreq))
continue;
- if (!modparam_all_channels && !ath5k_is_standard_channel(ch))
+ if (!modparam_all_channels &&
+ !ath5k_is_standard_channel(ch, band))
continue;
/* Write channel info and increment counter */
channels[count].center_freq = freq;
- channels[count].band = (chfreq == CHANNEL_2GHZ) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ channels[count].band = band;
switch (mode) {
case AR5K_MODE_11A:
case AR5K_MODE_11G:
channels[count].hw_value = chfreq | CHANNEL_OFDM;
break;
- case AR5K_MODE_11A_TURBO:
- case AR5K_MODE_11G_TURBO:
- channels[count].hw_value = chfreq |
- CHANNEL_OFDM | CHANNEL_TURBO;
- break;
case AR5K_MODE_11B:
channels[count].hw_value = CHANNEL_B;
}
count++;
- max--;
}
return count;
@@ -418,7 +361,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
sband->n_bitrates = 12;
sband->channels = sc->channels;
- sband->n_channels = ath5k_copy_channels(ah, sband->channels,
+ sband->n_channels = ath5k_setup_channels(ah, sband->channels,
AR5K_MODE_11G, max_c);
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
@@ -444,7 +387,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
}
sband->channels = sc->channels;
- sband->n_channels = ath5k_copy_channels(ah, sband->channels,
+ sband->n_channels = ath5k_setup_channels(ah, sband->channels,
AR5K_MODE_11B, max_c);
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
@@ -464,7 +407,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
sband->n_bitrates = 8;
sband->channels = &sc->channels[count_c];
- sband->n_channels = ath5k_copy_channels(ah, sband->channels,
+ sband->n_channels = ath5k_setup_channels(ah, sband->channels,
AR5K_MODE_11A, max_c);
hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
@@ -483,7 +426,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
*
* Called with sc->lock.
*/
-static int
+int
ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
{
ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
@@ -496,34 +439,12 @@ ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
* hardware at the new frequency, and then re-enable
* the relevant bits of the h/w.
*/
- return ath5k_reset(sc, chan);
-}
-
-static void
-ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode)
-{
- sc->curmode = mode;
-
- if (mode == AR5K_MODE_11A) {
- sc->curband = &sc->sbands[IEEE80211_BAND_5GHZ];
- } else {
- sc->curband = &sc->sbands[IEEE80211_BAND_2GHZ];
- }
+ return ath5k_reset(sc, chan, true);
}
-struct ath_vif_iter_data {
- const u8 *hw_macaddr;
- u8 mask[ETH_ALEN];
- u8 active_mac[ETH_ALEN]; /* first active MAC */
- bool need_set_hw_addr;
- bool found_active;
- bool any_assoc;
- enum nl80211_iftype opmode;
-};
-
-static void ath_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
- struct ath_vif_iter_data *iter_data = data;
+ struct ath5k_vif_iter_data *iter_data = data;
int i;
struct ath5k_vif *avf = (void *)vif->drv_priv;
@@ -549,28 +470,25 @@ static void ath_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
/* Calculate combined mode - when APs are active, operate in AP mode.
* Otherwise use the mode of the new interface. This can currently
* only deal with combinations of APs and STAs. Only one ad-hoc
- * interfaces is allowed above.
+ * interfaces is allowed.
*/
if (avf->opmode == NL80211_IFTYPE_AP)
iter_data->opmode = NL80211_IFTYPE_AP;
- else
+ else {
+ if (avf->opmode == NL80211_IFTYPE_STATION)
+ iter_data->n_stas++;
if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
iter_data->opmode = avf->opmode;
+ }
}
-static void ath_do_set_opmode(struct ath5k_softc *sc)
-{
- struct ath5k_hw *ah = sc->ah;
- ath5k_hw_set_opmode(ah, sc->opmode);
- ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
- sc->opmode, ath_opmode_to_string(sc->opmode));
-}
-
-void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
- struct ieee80211_vif *vif)
+void
+ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
+ struct ieee80211_vif *vif)
{
struct ath_common *common = ath5k_hw_common(sc->ah);
- struct ath_vif_iter_data iter_data;
+ struct ath5k_vif_iter_data iter_data;
+ u32 rfilt;
/*
* Use the hardware MAC address as reference, the hardware uses it
@@ -581,12 +499,13 @@ void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
iter_data.found_active = false;
iter_data.need_set_hw_addr = true;
iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
+ iter_data.n_stas = 0;
if (vif)
- ath_vif_iter(&iter_data, vif->addr, vif);
+ ath5k_vif_iter(&iter_data, vif->addr, vif);
/* Get list of all active MAC addresses */
- ieee80211_iterate_active_interfaces_atomic(sc->hw, ath_vif_iter,
+ ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
&iter_data);
memcpy(sc->bssidmask, iter_data.mask, ETH_ALEN);
@@ -595,27 +514,28 @@ void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
/* Nothing active, default to station mode */
sc->opmode = NL80211_IFTYPE_STATION;
- ath_do_set_opmode(sc);
+ ath5k_hw_set_opmode(sc->ah, sc->opmode);
+ ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
+ sc->opmode, ath_opmode_to_string(sc->opmode));
if (iter_data.need_set_hw_addr && iter_data.found_active)
ath5k_hw_set_lladdr(sc->ah, iter_data.active_mac);
if (ath5k_hw_hasbssidmask(sc->ah))
ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
-}
-static void
-ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif)
-{
- struct ath5k_hw *ah = sc->ah;
- u32 rfilt;
+ /* Set up RX Filter */
+ if (iter_data.n_stas > 1) {
+ /* If you have multiple STA interfaces connected to
+ * different APs, ARPs are not received (most of the time?)
+ * Enabling PROMISC appears to fix that probem.
+ */
+ sc->filter_flags |= AR5K_RX_FILTER_PROM;
+ }
- /* configure rx filter */
rfilt = sc->filter_flags;
- ath5k_hw_set_rx_filter(ah, rfilt);
+ ath5k_hw_set_rx_filter(sc->ah, rfilt);
ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
-
- ath5k_update_bssid_mask_and_opmode(sc, vif);
}
static inline int
@@ -628,7 +548,7 @@ ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
"hw_rix out of bounds: %x\n", hw_rix))
return 0;
- rix = sc->rate_idx[sc->curband->band][hw_rix];
+ rix = sc->rate_idx[sc->curchan->band][hw_rix];
if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
rix = 0;
@@ -659,10 +579,11 @@ struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
return NULL;
}
- *skb_addr = pci_map_single(sc->pdev,
+ *skb_addr = dma_map_single(sc->dev,
skb->data, common->rx_bufsize,
- PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) {
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(sc->dev, *skb_addr))) {
ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
dev_kfree_skb(skb);
return NULL;
@@ -758,8 +679,8 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
/* XXX endianness */
- bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ bf->skbaddr = dma_map_single(sc->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
rate = ieee80211_get_tx_rate(sc->hw, info);
if (!rate) {
@@ -839,7 +760,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
return 0;
err_unmap:
- pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(sc->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
return ret;
}
@@ -848,7 +769,7 @@ err_unmap:
\*******************/
static int
-ath5k_desc_alloc(struct ath5k_softc *sc, struct pci_dev *pdev)
+ath5k_desc_alloc(struct ath5k_softc *sc)
{
struct ath5k_desc *ds;
struct ath5k_buf *bf;
@@ -859,7 +780,9 @@ ath5k_desc_alloc(struct ath5k_softc *sc, struct pci_dev *pdev)
/* allocate descriptors */
sc->desc_len = sizeof(struct ath5k_desc) *
(ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
- sc->desc = pci_alloc_consistent(pdev, sc->desc_len, &sc->desc_daddr);
+
+ sc->desc = dma_alloc_coherent(sc->dev, sc->desc_len,
+ &sc->desc_daddr, GFP_KERNEL);
if (sc->desc == NULL) {
ATH5K_ERR(sc, "can't allocate descriptors\n");
ret = -ENOMEM;
@@ -905,14 +828,45 @@ ath5k_desc_alloc(struct ath5k_softc *sc, struct pci_dev *pdev)
return 0;
err_free:
- pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr);
+ dma_free_coherent(sc->dev, sc->desc_len, sc->desc, sc->desc_daddr);
err:
sc->desc = NULL;
return ret;
}
+void
+ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf)
+{
+ BUG_ON(!bf);
+ if (!bf->skb)
+ return;
+ dma_unmap_single(sc->dev, bf->skbaddr, bf->skb->len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(bf->skb);
+ bf->skb = NULL;
+ bf->skbaddr = 0;
+ bf->desc->ds_data = 0;
+}
+
+void
+ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf)
+{
+ struct ath5k_hw *ah = sc->ah;
+ struct ath_common *common = ath5k_hw_common(ah);
+
+ BUG_ON(!bf);
+ if (!bf->skb)
+ return;
+ dma_unmap_single(sc->dev, bf->skbaddr, common->rx_bufsize,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(bf->skb);
+ bf->skb = NULL;
+ bf->skbaddr = 0;
+ bf->desc->ds_data = 0;
+}
+
static void
-ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev)
+ath5k_desc_free(struct ath5k_softc *sc)
{
struct ath5k_buf *bf;
@@ -924,7 +878,7 @@ ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev)
ath5k_txbuf_free_skb(sc, bf);
/* Free memory associated with all descriptors */
- pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr);
+ dma_free_coherent(sc->dev, sc->desc_len, sc->desc, sc->desc_daddr);
sc->desc = NULL;
sc->desc_daddr = 0;
@@ -989,6 +943,7 @@ ath5k_txq_setup(struct ath5k_softc *sc,
spin_lock_init(&txq->lock);
txq->setup = true;
txq->txq_len = 0;
+ txq->txq_max = ATH5K_TXQ_LEN_MAX;
txq->txq_poll_mark = false;
txq->txq_stuck = 0;
}
@@ -1069,62 +1024,44 @@ err:
return ret;
}
+/**
+ * ath5k_drain_tx_buffs - Empty tx buffers
+ *
+ * @sc The &struct ath5k_softc
+ *
+ * Empty tx buffers from all queues in preparation
+ * of a reset or during shutdown.
+ *
+ * NB: this assumes output has been stopped and
+ * we do not need to block ath5k_tx_tasklet
+ */
static void
-ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
+ath5k_drain_tx_buffs(struct ath5k_softc *sc)
{
+ struct ath5k_txq *txq;
struct ath5k_buf *bf, *bf0;
+ int i;
- /*
- * NB: this assumes output has been stopped and
- * we do not need to block ath5k_tx_tasklet
- */
- spin_lock_bh(&txq->lock);
- list_for_each_entry_safe(bf, bf0, &txq->q, list) {
- ath5k_debug_printtxbuf(sc, bf);
-
- ath5k_txbuf_free_skb(sc, bf);
+ for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
+ if (sc->txqs[i].setup) {
+ txq = &sc->txqs[i];
+ spin_lock_bh(&txq->lock);
+ list_for_each_entry_safe(bf, bf0, &txq->q, list) {
+ ath5k_debug_printtxbuf(sc, bf);
- spin_lock_bh(&sc->txbuflock);
- list_move_tail(&bf->list, &sc->txbuf);
- sc->txbuf_len++;
- txq->txq_len--;
- spin_unlock_bh(&sc->txbuflock);
- }
- txq->link = NULL;
- txq->txq_poll_mark = false;
- spin_unlock_bh(&txq->lock);
-}
+ ath5k_txbuf_free_skb(sc, bf);
-/*
- * Drain the transmit queues and reclaim resources.
- */
-static void
-ath5k_txq_cleanup(struct ath5k_softc *sc)
-{
- struct ath5k_hw *ah = sc->ah;
- unsigned int i;
-
- /* XXX return value */
- if (likely(!test_bit(ATH_STAT_INVALID, sc->status))) {
- /* don't touch the hardware if marked invalid */
- ath5k_hw_stop_tx_dma(ah, sc->bhalq);
- ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "beacon queue %x\n",
- ath5k_hw_get_txdp(ah, sc->bhalq));
- for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
- if (sc->txqs[i].setup) {
- ath5k_hw_stop_tx_dma(ah, sc->txqs[i].qnum);
- ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "txq [%u] %x, "
- "link %p\n",
- sc->txqs[i].qnum,
- ath5k_hw_get_txdp(ah,
- sc->txqs[i].qnum),
- sc->txqs[i].link);
+ spin_lock_bh(&sc->txbuflock);
+ list_move_tail(&bf->list, &sc->txbuf);
+ sc->txbuf_len++;
+ txq->txq_len--;
+ spin_unlock_bh(&sc->txbuflock);
}
+ txq->link = NULL;
+ txq->txq_poll_mark = false;
+ spin_unlock_bh(&txq->lock);
+ }
}
-
- for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
- if (sc->txqs[i].setup)
- ath5k_txq_drainq(sc, &sc->txqs[i]);
}
static void
@@ -1175,7 +1112,7 @@ ath5k_rx_start(struct ath5k_softc *sc)
spin_unlock_bh(&sc->rxbuflock);
ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */
- ath5k_mode_setup(sc, NULL); /* set filters, etc. */
+ ath5k_update_bssid_mask_and_opmode(sc, NULL); /* set filters, etc. */
ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */
return 0;
@@ -1184,16 +1121,19 @@ err:
}
/*
- * Disable the receive h/w in preparation for a reset.
+ * Disable the receive logic on PCU (DRU)
+ * In preparation for a shutdown.
+ *
+ * Note: Doesn't stop rx DMA, ath5k_hw_dma_stop
+ * does.
*/
static void
ath5k_rx_stop(struct ath5k_softc *sc)
{
struct ath5k_hw *ah = sc->ah;
- ath5k_hw_stop_rx_pcu(ah); /* disable PCU */
ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */
- ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */
+ ath5k_hw_stop_rx_pcu(ah); /* disable PCU */
ath5k_debug_printrxbuffs(sc, ah);
}
@@ -1307,8 +1247,7 @@ ath5k_update_beacon_rssi(struct ath5k_softc *sc, struct sk_buff *skb, int rssi)
memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
return;
- ah->ah_beacon_rssi_avg = ath5k_moving_average(ah->ah_beacon_rssi_avg,
- rssi);
+ ewma_add(&ah->ah_beacon_rssi_avg, rssi);
/* in IBSS mode we should keep RSSI statistics per neighbour */
/* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
@@ -1417,10 +1356,10 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
* right now, so it's not too bad...
*/
rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp);
- rxs->flag |= RX_FLAG_TSFT;
+ rxs->flag |= RX_FLAG_MACTIME_MPDU;
rxs->freq = sc->curchan->center_freq;
- rxs->band = sc->curband->band;
+ rxs->band = sc->curchan->band;
rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi;
@@ -1435,10 +1374,10 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
rxs->flag |= ath5k_rx_decrypted(sc, skb, rs);
if (rxs->rate_idx >= 0 && rs->rs_rate ==
- sc->curband->bitrates[rxs->rate_idx].hw_value_short)
+ sc->sbands[sc->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
rxs->flag |= RX_FLAG_SHORTPRE;
- ath5k_debug_dump_skb(sc, skb, "RX ", 0);
+ trace_ath5k_rx(sc, skb);
ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi);
@@ -1551,9 +1490,9 @@ ath5k_tasklet_rx(unsigned long data)
if (!next_skb)
goto next;
- pci_unmap_single(sc->pdev, bf->skbaddr,
+ dma_unmap_single(sc->dev, bf->skbaddr,
common->rx_bufsize,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
skb_put(skb, rs.rs_datalen);
@@ -1574,15 +1513,16 @@ unlock:
* TX Handling *
\*************/
-static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct ath5k_txq *txq)
+void
+ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct ath5k_txq *txq)
{
struct ath5k_softc *sc = hw->priv;
struct ath5k_buf *bf;
unsigned long flags;
int padsize;
- ath5k_debug_dump_skb(sc, skb, "TX ", 1);
+ trace_ath5k_tx(sc, skb, txq);
/*
* The hardware expects the header padded to 4 byte boundaries.
@@ -1595,7 +1535,7 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
goto drop_packet;
}
- if (txq->txq_len >= ATH5K_TXQ_LEN_MAX)
+ if (txq->txq_len >= txq->txq_max)
ieee80211_stop_queue(hw, txq->qnum);
spin_lock_irqsave(&sc->txbuflock, flags);
@@ -1622,16 +1562,15 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
spin_unlock_irqrestore(&sc->txbuflock, flags);
goto drop_packet;
}
- return NETDEV_TX_OK;
+ return;
drop_packet:
dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
}
static void
ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
- struct ath5k_tx_status *ts)
+ struct ath5k_txq *txq, struct ath5k_tx_status *ts)
{
struct ieee80211_tx_info *info;
int i;
@@ -1683,6 +1622,7 @@ ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
else
sc->stats.antenna_tx[0]++; /* invalid */
+ trace_ath5k_tx_complete(sc, skb, txq, ts);
ieee80211_tx_status(sc->hw, skb);
}
@@ -1716,9 +1656,10 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
skb = bf->skb;
bf->skb = NULL;
- pci_unmap_single(sc->pdev, bf->skbaddr, skb->len,
- PCI_DMA_TODEVICE);
- ath5k_tx_frame_completed(sc, skb, &ts);
+
+ dma_unmap_single(sc->dev, bf->skbaddr, skb->len,
+ DMA_TO_DEVICE);
+ ath5k_tx_frame_completed(sc, skb, txq, &ts);
}
/*
@@ -1771,12 +1712,13 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
u32 flags;
const int padsize = 0;
- bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ bf->skbaddr = dma_map_single(sc->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
"skbaddr %llx\n", skb, skb->data, skb->len,
(unsigned long long)bf->skbaddr);
- if (pci_dma_mapping_error(sc->pdev, bf->skbaddr)) {
+
+ if (dma_mapping_error(sc->dev, bf->skbaddr)) {
ATH5K_ERR(sc, "beacon DMA mapping failed\n");
return -EIO;
}
@@ -1828,7 +1770,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
return 0;
err_unmap:
- pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(sc->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
return ret;
}
@@ -1839,7 +1781,7 @@ err_unmap:
*
* Called with the beacon lock.
*/
-static int
+int
ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
int ret;
@@ -1859,8 +1801,6 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
goto out;
}
- ath5k_debug_dump_skb(sc, skb, "BC ", 1);
-
ath5k_txbuf_free_skb(sc, avf->bbuf);
avf->bbuf->skb = skb;
ret = ath5k_beacon_setup(sc, avf->bbuf);
@@ -1917,7 +1857,8 @@ ath5k_beacon_send(struct ath5k_softc *sc)
sc->bmisscount = 0;
}
- if (sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) {
+ if ((sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) ||
+ sc->opmode == NL80211_IFTYPE_MESH_POINT) {
u64 tsf = ath5k_hw_get_tsf64(ah);
u32 tsftu = TSF_TO_TU(tsf);
int slot = ((tsftu % sc->bintval) * ATH_BCBUF) / sc->bintval;
@@ -1944,15 +1885,18 @@ ath5k_beacon_send(struct ath5k_softc *sc)
* This should never fail since we check above that no frames
* are still pending on the queue.
*/
- if (unlikely(ath5k_hw_stop_tx_dma(ah, sc->bhalq))) {
+ if (unlikely(ath5k_hw_stop_beacon_queue(ah, sc->bhalq))) {
ATH5K_WARN(sc, "beacon queue %u didn't start/stop ?\n", sc->bhalq);
/* NB: hw still stops DMA, so proceed */
}
- /* refresh the beacon for AP mode */
- if (sc->opmode == NL80211_IFTYPE_AP)
+ /* refresh the beacon for AP or MESH mode */
+ if (sc->opmode == NL80211_IFTYPE_AP ||
+ sc->opmode == NL80211_IFTYPE_MESH_POINT)
ath5k_beacon_update(sc->hw, vif);
+ trace_ath5k_tx(sc, bf->skb, &sc->txqs[sc->bhalq]);
+
ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr);
ath5k_hw_start_tx_dma(ah, sc->bhalq);
ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
@@ -1983,7 +1927,7 @@ ath5k_beacon_send(struct ath5k_softc *sc)
* when we otherwise know we have to update the timers, but we keep it in this
* function to have it all together in one place.
*/
-static void
+void
ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
{
struct ath5k_hw *ah = sc->ah;
@@ -2085,7 +2029,7 @@ ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
* In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA
* interrupts to detect TSF updates only.
*/
-static void
+void
ath5k_beacon_config(struct ath5k_softc *sc)
{
struct ath5k_hw *ah = sc->ah;
@@ -2113,7 +2057,7 @@ ath5k_beacon_config(struct ath5k_softc *sc)
} else
ath5k_beacon_update_timers(sc, -1);
} else {
- ath5k_hw_stop_tx_dma(sc->ah, sc->bhalq);
+ ath5k_hw_stop_beacon_queue(sc->ah, sc->bhalq);
}
ath5k_hw_set_imr(ah, sc->imask);
@@ -2175,7 +2119,7 @@ ath5k_intr_calibration_poll(struct ath5k_hw *ah)
* AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
}
-static irqreturn_t
+irqreturn_t
ath5k_intr(int irq, void *dev_id)
{
struct ath5k_softc *sc = dev_id;
@@ -2184,7 +2128,8 @@ ath5k_intr(int irq, void *dev_id)
unsigned int counter = 1000;
if (unlikely(test_bit(ATH_STAT_INVALID, sc->status) ||
- !ath5k_hw_is_intr_pending(ah)))
+ ((ath5k_get_bus_type(ah) != ATH_AHB) &&
+ !ath5k_hw_is_intr_pending(ah))))
return IRQ_NONE;
do {
@@ -2250,6 +2195,10 @@ ath5k_intr(int irq, void *dev_id)
tasklet_schedule(&sc->rf_kill.toggleq);
}
+
+ if (ath5k_get_bus_type(ah) == ATH_AHB)
+ break;
+
} while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
if (unlikely(!counter))
@@ -2325,6 +2274,8 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
int i;
bool needreset = false;
+ mutex_lock(&sc->lock);
+
for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
if (sc->txqs[i].setup) {
txq = &sc->txqs[i];
@@ -2349,9 +2300,11 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
if (needreset) {
ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
"TX queues stuck, resetting\n");
- ath5k_reset(sc, sc->curchan);
+ ath5k_reset(sc, NULL, true);
}
+ mutex_unlock(&sc->lock);
+
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
}
@@ -2361,6 +2314,164 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
* Initialization routines *
\*************************/
+int
+ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_common *common;
+ int ret;
+ int csz;
+
+ /* Initialize driver private data */
+ SET_IEEE80211_DEV(hw, sc->dev);
+ hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ /* both antennas can be configured as RX or TX */
+ hw->wiphy->available_antennas_tx = 0x3;
+ hw->wiphy->available_antennas_rx = 0x3;
+
+ hw->extra_tx_headroom = 2;
+ hw->channel_change_time = 5000;
+
+ /*
+ * Mark the device as detached to avoid processing
+ * interrupts until setup is complete.
+ */
+ __set_bit(ATH_STAT_INVALID, sc->status);
+
+ sc->opmode = NL80211_IFTYPE_STATION;
+ sc->bintval = 1000;
+ mutex_init(&sc->lock);
+ spin_lock_init(&sc->rxbuflock);
+ spin_lock_init(&sc->txbuflock);
+ spin_lock_init(&sc->block);
+
+
+ /* Setup interrupt handler */
+ ret = request_irq(sc->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
+ if (ret) {
+ ATH5K_ERR(sc, "request_irq failed\n");
+ goto err;
+ }
+
+ /* If we passed the test, malloc an ath5k_hw struct */
+ sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
+ if (!sc->ah) {
+ ret = -ENOMEM;
+ ATH5K_ERR(sc, "out of memory\n");
+ goto err_irq;
+ }
+
+ sc->ah->ah_sc = sc;
+ sc->ah->ah_iobase = sc->iobase;
+ common = ath5k_hw_common(sc->ah);
+ common->ops = &ath5k_common_ops;
+ common->bus_ops = bus_ops;
+ common->ah = sc->ah;
+ common->hw = hw;
+ common->priv = sc;
+
+ /*
+ * Cache line size is used to size and align various
+ * structures used to communicate with the hardware.
+ */
+ ath5k_read_cachesize(common, &csz);
+ common->cachelsz = csz << 2; /* convert to bytes */
+
+ spin_lock_init(&common->cc_lock);
+
+ /* Initialize device */
+ ret = ath5k_hw_init(sc);
+ if (ret)
+ goto err_free_ah;
+
+ /* set up multi-rate retry capabilities */
+ if (sc->ah->ah_version == AR5K_AR5212) {
+ hw->max_rates = 4;
+ hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
+ AR5K_INIT_RETRY_LONG);
+ }
+
+ hw->vif_data_size = sizeof(struct ath5k_vif);
+
+ /* Finish private driver data initialization */
+ ret = ath5k_init(hw);
+ if (ret)
+ goto err_ah;
+
+ ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
+ ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
+ sc->ah->ah_mac_srev,
+ sc->ah->ah_phy_revision);
+
+ if (!sc->ah->ah_single_chip) {
+ /* Single chip radio (!RF5111) */
+ if (sc->ah->ah_radio_5ghz_revision &&
+ !sc->ah->ah_radio_2ghz_revision) {
+ /* No 5GHz support -> report 2GHz radio */
+ if (!test_bit(AR5K_MODE_11A,
+ sc->ah->ah_capabilities.cap_mode)) {
+ ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
+ ath5k_chip_name(AR5K_VERSION_RAD,
+ sc->ah->ah_radio_5ghz_revision),
+ sc->ah->ah_radio_5ghz_revision);
+ /* No 2GHz support (5110 and some
+ * 5Ghz only cards) -> report 5Ghz radio */
+ } else if (!test_bit(AR5K_MODE_11B,
+ sc->ah->ah_capabilities.cap_mode)) {
+ ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
+ ath5k_chip_name(AR5K_VERSION_RAD,
+ sc->ah->ah_radio_5ghz_revision),
+ sc->ah->ah_radio_5ghz_revision);
+ /* Multiband radio */
+ } else {
+ ATH5K_INFO(sc, "RF%s multiband radio found"
+ " (0x%x)\n",
+ ath5k_chip_name(AR5K_VERSION_RAD,
+ sc->ah->ah_radio_5ghz_revision),
+ sc->ah->ah_radio_5ghz_revision);
+ }
+ }
+ /* Multi chip radio (RF5111 - RF2111) ->
+ * report both 2GHz/5GHz radios */
+ else if (sc->ah->ah_radio_5ghz_revision &&
+ sc->ah->ah_radio_2ghz_revision){
+ ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
+ ath5k_chip_name(AR5K_VERSION_RAD,
+ sc->ah->ah_radio_5ghz_revision),
+ sc->ah->ah_radio_5ghz_revision);
+ ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
+ ath5k_chip_name(AR5K_VERSION_RAD,
+ sc->ah->ah_radio_2ghz_revision),
+ sc->ah->ah_radio_2ghz_revision);
+ }
+ }
+
+ ath5k_debug_init_device(sc);
+
+ /* ready to process interrupts */
+ __clear_bit(ATH_STAT_INVALID, sc->status);
+
+ return 0;
+err_ah:
+ ath5k_hw_deinit(sc->ah);
+err_free_ah:
+ kfree(sc->ah);
+err_irq:
+ free_irq(sc->irq, sc);
+err:
+ return ret;
+}
+
static int
ath5k_stop_locked(struct ath5k_softc *sc)
{
@@ -2389,19 +2500,18 @@ ath5k_stop_locked(struct ath5k_softc *sc)
if (!test_bit(ATH_STAT_INVALID, sc->status)) {
ath5k_led_off(sc);
ath5k_hw_set_imr(ah, 0);
- synchronize_irq(sc->pdev->irq);
- }
- ath5k_txq_cleanup(sc);
- if (!test_bit(ATH_STAT_INVALID, sc->status)) {
+ synchronize_irq(sc->irq);
ath5k_rx_stop(sc);
+ ath5k_hw_dma_stop(ah);
+ ath5k_drain_tx_buffs(sc);
ath5k_hw_phy_disable(ah);
}
return 0;
}
-static int
-ath5k_init(struct ath5k_softc *sc)
+int
+ath5k_init_hw(struct ath5k_softc *sc)
{
struct ath5k_hw *ah = sc->ah;
struct ath_common *common = ath5k_hw_common(ah);
@@ -2425,12 +2535,11 @@ ath5k_init(struct ath5k_softc *sc)
* and then setup of the interrupt mask.
*/
sc->curchan = sc->hw->conf.channel;
- sc->curband = &sc->sbands[sc->curchan->band];
sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
- ret = ath5k_reset(sc, NULL);
+ ret = ath5k_reset(sc, NULL, false);
if (ret)
goto done;
@@ -2443,7 +2552,9 @@ ath5k_init(struct ath5k_softc *sc)
for (i = 0; i < common->keymax; i++)
ath_hw_keyreset(common, (u16) i);
- ath5k_hw_set_ack_bitrate_high(ah, true);
+ /* Use higher rates for acks instead of base
+ * rate */
+ ah->ah_ack_bitrate_high = true;
for (i = 0; i < ARRAY_SIZE(sc->bslot); i++)
sc->bslot[i] = NULL;
@@ -2474,7 +2585,7 @@ static void stop_tasklets(struct ath5k_softc *sc)
* if another thread does a system call and the thread doing the
* stop is preempted).
*/
-static int
+int
ath5k_stop_hw(struct ath5k_softc *sc)
{
int ret;
@@ -2527,25 +2638,33 @@ ath5k_stop_hw(struct ath5k_softc *sc)
* This should be called with sc->lock.
*/
static int
-ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan)
+ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
+ bool skip_pcu)
{
struct ath5k_hw *ah = sc->ah;
- int ret;
+ struct ath_common *common = ath5k_hw_common(ah);
+ int ret, ani_mode;
ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");
ath5k_hw_set_imr(ah, 0);
- synchronize_irq(sc->pdev->irq);
+ synchronize_irq(sc->irq);
stop_tasklets(sc);
- if (chan) {
- ath5k_txq_cleanup(sc);
- ath5k_rx_stop(sc);
-
+ /* Save ani mode and disable ANI durring
+ * reset. If we don't we might get false
+ * PHY error interrupts. */
+ ani_mode = ah->ah_sc->ani_state.ani_mode;
+ ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);
+
+ /* We are going to empty hw queues
+ * so we should also free any remaining
+ * tx buffers */
+ ath5k_drain_tx_buffs(sc);
+ if (chan)
sc->curchan = chan;
- sc->curband = &sc->sbands[chan->band];
- }
- ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL);
+ ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL,
+ skip_pcu);
if (ret) {
ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret);
goto err;
@@ -2557,11 +2676,20 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan)
goto err;
}
- ath5k_ani_init(ah, ah->ah_sc->ani_state.ani_mode);
+ ath5k_ani_init(ah, ani_mode);
ah->ah_cal_next_full = jiffies;
ah->ah_cal_next_ani = jiffies;
ah->ah_cal_next_nf = jiffies;
+ ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
+
+ /* clear survey data and cycle counters */
+ memset(&sc->survey, 0, sizeof(sc->survey));
+ spin_lock_bh(&common->cc_lock);
+ ath_hw_cycle_counters_update(common);
+ memset(&common->cc_survey, 0, sizeof(common->cc_survey));
+ memset(&common->cc_ani, 0, sizeof(common->cc_ani));
+ spin_unlock_bh(&common->cc_lock);
/*
* Change channels and update the h/w rate map if we're switching;
@@ -2590,13 +2718,14 @@ static void ath5k_reset_work(struct work_struct *work)
reset_work);
mutex_lock(&sc->lock);
- ath5k_reset(sc, sc->curchan);
+ ath5k_reset(sc, NULL, true);
mutex_unlock(&sc->lock);
}
static int
-ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
+ath5k_init(struct ieee80211_hw *hw)
{
+
struct ath5k_softc *sc = hw->priv;
struct ath5k_hw *ah = sc->ah;
struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
@@ -2604,7 +2733,6 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
u8 mac[ETH_ALEN] = {};
int ret;
- ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device);
/*
* Check if the MAC has multi-rate retry support.
@@ -2632,16 +2760,10 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
goto err;
}
- /* NB: setup here so ath5k_rate_update is happy */
- if (test_bit(AR5K_MODE_11A, ah->ah_modes))
- ath5k_setcurmode(sc, AR5K_MODE_11A);
- else
- ath5k_setcurmode(sc, AR5K_MODE_11B);
-
/*
* Allocate tx+rx descriptors and populate the lists.
*/
- ret = ath5k_desc_alloc(sc, pdev);
+ ret = ath5k_desc_alloc(sc);
if (ret) {
ATH5K_ERR(sc, "can't allocate descriptors\n");
goto err;
@@ -2666,33 +2788,46 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
goto err_bhal;
}
- /* This order matches mac80211's queue priority, so we can
- * directly use the mac80211 queue number without any mapping */
- txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
- if (IS_ERR(txq)) {
- ATH5K_ERR(sc, "can't setup xmit queue\n");
- ret = PTR_ERR(txq);
- goto err_queues;
- }
- txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
- if (IS_ERR(txq)) {
- ATH5K_ERR(sc, "can't setup xmit queue\n");
- ret = PTR_ERR(txq);
- goto err_queues;
- }
- txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
- if (IS_ERR(txq)) {
- ATH5K_ERR(sc, "can't setup xmit queue\n");
- ret = PTR_ERR(txq);
- goto err_queues;
- }
- txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
- if (IS_ERR(txq)) {
- ATH5K_ERR(sc, "can't setup xmit queue\n");
- ret = PTR_ERR(txq);
- goto err_queues;
+ /* 5211 and 5212 usually support 10 queues but we better rely on the
+ * capability information */
+ if (ah->ah_capabilities.cap_queues.q_tx_num >= 6) {
+ /* This order matches mac80211's queue priority, so we can
+ * directly use the mac80211 queue number without any mapping */
+ txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
+ if (IS_ERR(txq)) {
+ ATH5K_ERR(sc, "can't setup xmit queue\n");
+ ret = PTR_ERR(txq);
+ goto err_queues;
+ }
+ txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
+ if (IS_ERR(txq)) {
+ ATH5K_ERR(sc, "can't setup xmit queue\n");
+ ret = PTR_ERR(txq);
+ goto err_queues;
+ }
+ txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
+ if (IS_ERR(txq)) {
+ ATH5K_ERR(sc, "can't setup xmit queue\n");
+ ret = PTR_ERR(txq);
+ goto err_queues;
+ }
+ txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
+ if (IS_ERR(txq)) {
+ ATH5K_ERR(sc, "can't setup xmit queue\n");
+ ret = PTR_ERR(txq);
+ goto err_queues;
+ }
+ hw->queues = 4;
+ } else {
+ /* older hardware (5210) can only support one data queue */
+ txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
+ if (IS_ERR(txq)) {
+ ATH5K_ERR(sc, "can't setup xmit queue\n");
+ ret = PTR_ERR(txq);
+ goto err_queues;
+ }
+ hw->queues = 1;
}
- hw->queues = 4;
tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc);
tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc);
@@ -2705,8 +2840,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
ret = ath5k_eeprom_read_mac(ah, mac);
if (ret) {
- ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n",
- sc->pdev->device);
+ ATH5K_ERR(sc, "unable to read address from EEPROM\n");
goto err_queues;
}
@@ -2741,15 +2875,15 @@ err_queues:
err_bhal:
ath5k_hw_release_tx_queue(ah, sc->bhalq);
err_desc:
- ath5k_desc_free(sc, pdev);
+ ath5k_desc_free(sc);
err:
return ret;
}
-static void
-ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw)
+void
+ath5k_deinit_softc(struct ath5k_softc *sc)
{
- struct ath5k_softc *sc = hw->priv;
+ struct ieee80211_hw *hw = sc->hw;
/*
* NB: the order of these is important:
@@ -2764,8 +2898,9 @@ ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw)
* XXX: ??? detach ath5k_hw ???
* Other than that, it's straightforward...
*/
+ ath5k_debug_finish_device(sc);
ieee80211_unregister_hw(hw);
- ath5k_desc_free(sc, pdev);
+ ath5k_desc_free(sc);
ath5k_txq_release(sc);
ath5k_hw_release_tx_queue(sc->ah, sc->bhalq);
ath5k_unregister_leds(sc);
@@ -2776,479 +2911,25 @@ ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw)
* returns because we'll get called back to reclaim node
* state and potentially want to use them.
*/
+ ath5k_hw_deinit(sc->ah);
+ free_irq(sc->irq, sc);
}
-/********************\
-* Mac80211 functions *
-\********************/
-
-static int
-ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct ath5k_softc *sc = hw->priv;
- u16 qnum = skb_get_queue_mapping(skb);
-
- if (WARN_ON(qnum >= sc->ah->ah_capabilities.cap_queues.q_tx_num)) {
- dev_kfree_skb_any(skb);
- return 0;
- }
-
- return ath5k_tx_queue(hw, skb, &sc->txqs[qnum]);
-}
-
-static int ath5k_start(struct ieee80211_hw *hw)
-{
- return ath5k_init(hw->priv);
-}
-
-static void ath5k_stop(struct ieee80211_hw *hw)
+bool
+ath_any_vif_assoc(struct ath5k_softc *sc)
{
- ath5k_stop_hw(hw->priv);
-}
-
-static int ath5k_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct ath5k_softc *sc = hw->priv;
- int ret;
- struct ath5k_vif *avf = (void *)vif->drv_priv;
-
- mutex_lock(&sc->lock);
-
- if ((vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_ADHOC)
- && (sc->num_ap_vifs + sc->num_adhoc_vifs) >= ATH_BCBUF) {
- ret = -ELNRNG;
- goto end;
- }
-
- /* Don't allow other interfaces if one ad-hoc is configured.
- * TODO: Fix the problems with ad-hoc and multiple other interfaces.
- * We would need to operate the HW in ad-hoc mode to allow TSF updates
- * for the IBSS, but this breaks with additional AP or STA interfaces
- * at the moment. */
- if (sc->num_adhoc_vifs ||
- (sc->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
- ATH5K_ERR(sc, "Only one single ad-hoc interface is allowed.\n");
- ret = -ELNRNG;
- goto end;
- }
-
- switch (vif->type) {
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_MESH_POINT:
- avf->opmode = vif->type;
- break;
- default:
- ret = -EOPNOTSUPP;
- goto end;
- }
-
- sc->nvifs++;
- ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "add interface mode %d\n", avf->opmode);
-
- /* Assign the vap/adhoc to a beacon xmit slot. */
- if ((avf->opmode == NL80211_IFTYPE_AP) ||
- (avf->opmode == NL80211_IFTYPE_ADHOC)) {
- int slot;
-
- WARN_ON(list_empty(&sc->bcbuf));
- avf->bbuf = list_first_entry(&sc->bcbuf, struct ath5k_buf,
- list);
- list_del(&avf->bbuf->list);
-
- avf->bslot = 0;
- for (slot = 0; slot < ATH_BCBUF; slot++) {
- if (!sc->bslot[slot]) {
- avf->bslot = slot;
- break;
- }
- }
- BUG_ON(sc->bslot[avf->bslot] != NULL);
- sc->bslot[avf->bslot] = vif;
- if (avf->opmode == NL80211_IFTYPE_AP)
- sc->num_ap_vifs++;
- else
- sc->num_adhoc_vifs++;
- }
-
- /* Any MAC address is fine, all others are included through the
- * filter.
- */
- memcpy(&sc->lladdr, vif->addr, ETH_ALEN);
- ath5k_hw_set_lladdr(sc->ah, vif->addr);
-
- memcpy(&avf->lladdr, vif->addr, ETH_ALEN);
-
- ath5k_mode_setup(sc, vif);
-
- ret = 0;
-end:
- mutex_unlock(&sc->lock);
- return ret;
-}
-
-static void
-ath5k_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct ath5k_softc *sc = hw->priv;
- struct ath5k_vif *avf = (void *)vif->drv_priv;
- unsigned int i;
-
- mutex_lock(&sc->lock);
- sc->nvifs--;
-
- if (avf->bbuf) {
- ath5k_txbuf_free_skb(sc, avf->bbuf);
- list_add_tail(&avf->bbuf->list, &sc->bcbuf);
- for (i = 0; i < ATH_BCBUF; i++) {
- if (sc->bslot[i] == vif) {
- sc->bslot[i] = NULL;
- break;
- }
- }
- avf->bbuf = NULL;
- }
- if (avf->opmode == NL80211_IFTYPE_AP)
- sc->num_ap_vifs--;
- else if (avf->opmode == NL80211_IFTYPE_ADHOC)
- sc->num_adhoc_vifs--;
-
- ath5k_update_bssid_mask_and_opmode(sc, NULL);
- mutex_unlock(&sc->lock);
-}
-
-/*
- * TODO: Phy disable/diversity etc
- */
-static int
-ath5k_config(struct ieee80211_hw *hw, u32 changed)
-{
- struct ath5k_softc *sc = hw->priv;
- struct ath5k_hw *ah = sc->ah;
- struct ieee80211_conf *conf = &hw->conf;
- int ret = 0;
-
- mutex_lock(&sc->lock);
-
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ret = ath5k_chan_set(sc, conf->channel);
- if (ret < 0)
- goto unlock;
- }
-
- if ((changed & IEEE80211_CONF_CHANGE_POWER) &&
- (sc->power_level != conf->power_level)) {
- sc->power_level = conf->power_level;
-
- /* Half dB steps */
- ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
- }
-
- /* TODO:
- * 1) Move this on config_interface and handle each case
- * separately eg. when we have only one STA vif, use
- * AR5K_ANTMODE_SINGLE_AP
- *
- * 2) Allow the user to change antenna mode eg. when only
- * one antenna is present
- *
- * 3) Allow the user to set default/tx antenna when possible
- *
- * 4) Default mode should handle 90% of the cases, together
- * with fixed a/b and single AP modes we should be able to
- * handle 99%. Sectored modes are extreme cases and i still
- * haven't found a usage for them. If we decide to support them,
- * then we must allow the user to set how many tx antennas we
- * have available
- */
- ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
-
-unlock:
- mutex_unlock(&sc->lock);
- return ret;
-}
-
-static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
- struct netdev_hw_addr_list *mc_list)
-{
- u32 mfilt[2], val;
- u8 pos;
- struct netdev_hw_addr *ha;
-
- mfilt[0] = 0;
- mfilt[1] = 1;
-
- netdev_hw_addr_list_for_each(ha, mc_list) {
- /* calculate XOR of eight 6-bit values */
- val = get_unaligned_le32(ha->addr + 0);
- pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
- val = get_unaligned_le32(ha->addr + 3);
- pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
- pos &= 0x3f;
- mfilt[pos / 32] |= (1 << (pos % 32));
- /* XXX: we might be able to just do this instead,
- * but not sure, needs testing, if we do use this we'd
- * neet to inform below to not reset the mcast */
- /* ath5k_hw_set_mcast_filterindex(ah,
- * ha->addr[5]); */
- }
-
- return ((u64)(mfilt[1]) << 32) | mfilt[0];
-}
-
-static bool ath_any_vif_assoc(struct ath5k_softc *sc)
-{
- struct ath_vif_iter_data iter_data;
+ struct ath5k_vif_iter_data iter_data;
iter_data.hw_macaddr = NULL;
iter_data.any_assoc = false;
iter_data.need_set_hw_addr = false;
iter_data.found_active = true;
- ieee80211_iterate_active_interfaces_atomic(sc->hw, ath_vif_iter,
+ ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
&iter_data);
return iter_data.any_assoc;
}
-#define SUPPORTED_FIF_FLAGS \
- FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | \
- FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \
- FIF_BCN_PRBRESP_PROMISC
-/*
- * o always accept unicast, broadcast, and multicast traffic
- * o multicast traffic for all BSSIDs will be enabled if mac80211
- * says it should be
- * o maintain current state of phy ofdm or phy cck error reception.
- * If the hardware detects any of these type of errors then
- * ath5k_hw_get_rx_filter() will pass to us the respective
- * hardware filters to be able to receive these type of frames.
- * o probe request frames are accepted only when operating in
- * hostap, adhoc, or monitor modes
- * o enable promiscuous mode according to the interface state
- * o accept beacons:
- * - when operating in adhoc mode so the 802.11 layer creates
- * node table entries for peers,
- * - when operating in station mode for collecting rssi data when
- * the station is otherwise quiet, or
- * - when scanning
- */
-static void ath5k_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *new_flags,
- u64 multicast)
-{
- struct ath5k_softc *sc = hw->priv;
- struct ath5k_hw *ah = sc->ah;
- u32 mfilt[2], rfilt;
-
- mutex_lock(&sc->lock);
-
- mfilt[0] = multicast;
- mfilt[1] = multicast >> 32;
-
- /* Only deal with supported flags */
- changed_flags &= SUPPORTED_FIF_FLAGS;
- *new_flags &= SUPPORTED_FIF_FLAGS;
-
- /* If HW detects any phy or radar errors, leave those filters on.
- * Also, always enable Unicast, Broadcasts and Multicast
- * XXX: move unicast, bssid broadcasts and multicast to mac80211 */
- rfilt = (ath5k_hw_get_rx_filter(ah) & (AR5K_RX_FILTER_PHYERR)) |
- (AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST |
- AR5K_RX_FILTER_MCAST);
-
- if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
- if (*new_flags & FIF_PROMISC_IN_BSS) {
- __set_bit(ATH_STAT_PROMISC, sc->status);
- } else {
- __clear_bit(ATH_STAT_PROMISC, sc->status);
- }
- }
-
- if (test_bit(ATH_STAT_PROMISC, sc->status))
- rfilt |= AR5K_RX_FILTER_PROM;
-
- /* Note, AR5K_RX_FILTER_MCAST is already enabled */
- if (*new_flags & FIF_ALLMULTI) {
- mfilt[0] = ~0;
- mfilt[1] = ~0;
- }
-
- /* This is the best we can do */
- if (*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL))
- rfilt |= AR5K_RX_FILTER_PHYERR;
-
- /* FIF_BCN_PRBRESP_PROMISC really means to enable beacons
- * and probes for any BSSID */
- if ((*new_flags & FIF_BCN_PRBRESP_PROMISC) || (sc->nvifs > 1))
- rfilt |= AR5K_RX_FILTER_BEACON;
-
- /* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not
- * set we should only pass on control frames for this
- * station. This needs testing. I believe right now this
- * enables *all* control frames, which is OK.. but
- * but we should see if we can improve on granularity */
- if (*new_flags & FIF_CONTROL)
- rfilt |= AR5K_RX_FILTER_CONTROL;
-
- /* Additional settings per mode -- this is per ath5k */
-
- /* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */
-
- switch (sc->opmode) {
- case NL80211_IFTYPE_MESH_POINT:
- rfilt |= AR5K_RX_FILTER_CONTROL |
- AR5K_RX_FILTER_BEACON |
- AR5K_RX_FILTER_PROBEREQ |
- AR5K_RX_FILTER_PROM;
- break;
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_ADHOC:
- rfilt |= AR5K_RX_FILTER_PROBEREQ |
- AR5K_RX_FILTER_BEACON;
- break;
- case NL80211_IFTYPE_STATION:
- if (sc->assoc)
- rfilt |= AR5K_RX_FILTER_BEACON;
- default:
- break;
- }
-
- /* Set filters */
- ath5k_hw_set_rx_filter(ah, rfilt);
-
- /* Set multicast bits */
- ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]);
- /* Set the cached hw filter flags, this will later actually
- * be set in HW */
- sc->filter_flags = rfilt;
-
- mutex_unlock(&sc->lock);
-}
-
-static int
-ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct ath5k_softc *sc = hw->priv;
- struct ath5k_hw *ah = sc->ah;
- struct ath_common *common = ath5k_hw_common(ah);
- int ret = 0;
-
- if (modparam_nohwcrypt)
- return -EOPNOTSUPP;
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- case WLAN_CIPHER_SUITE_TKIP:
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- if (common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)
- break;
- return -EOPNOTSUPP;
- default:
- WARN_ON(1);
- return -EINVAL;
- }
-
- mutex_lock(&sc->lock);
-
- switch (cmd) {
- case SET_KEY:
- ret = ath_key_config(common, vif, sta, key);
- if (ret >= 0) {
- key->hw_key_idx = ret;
- /* push IV and Michael MIC generation to stack */
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
- if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
- ret = 0;
- }
- break;
- case DISABLE_KEY:
- ath_key_delete(common, key);
- break;
- default:
- ret = -EINVAL;
- }
-
- mmiowb();
- mutex_unlock(&sc->lock);
- return ret;
-}
-
-static int
-ath5k_get_stats(struct ieee80211_hw *hw,
- struct ieee80211_low_level_stats *stats)
-{
- struct ath5k_softc *sc = hw->priv;
-
- /* Force update */
- ath5k_hw_update_mib_counters(sc->ah);
-
- stats->dot11ACKFailureCount = sc->stats.ack_fail;
- stats->dot11RTSFailureCount = sc->stats.rts_fail;
- stats->dot11RTSSuccessCount = sc->stats.rts_ok;
- stats->dot11FCSErrorCount = sc->stats.fcs_error;
-
- return 0;
-}
-
-static int ath5k_get_survey(struct ieee80211_hw *hw, int idx,
- struct survey_info *survey)
-{
- struct ath5k_softc *sc = hw->priv;
- struct ieee80211_conf *conf = &hw->conf;
-
- if (idx != 0)
- return -ENOENT;
-
- survey->channel = conf->channel;
- survey->filled = SURVEY_INFO_NOISE_DBM;
- survey->noise = sc->ah->ah_noise_floor;
-
- return 0;
-}
-
-static u64
-ath5k_get_tsf(struct ieee80211_hw *hw)
-{
- struct ath5k_softc *sc = hw->priv;
-
- return ath5k_hw_get_tsf64(sc->ah);
-}
-
-static void
-ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
-{
- struct ath5k_softc *sc = hw->priv;
-
- ath5k_hw_set_tsf64(sc->ah, tsf);
-}
-
-static void
-ath5k_reset_tsf(struct ieee80211_hw *hw)
-{
- struct ath5k_softc *sc = hw->priv;
-
- /*
- * in IBSS mode we need to update the beacon timers too.
- * this will also reset the TSF if we call it with 0
- */
- if (sc->opmode == NL80211_IFTYPE_ADHOC)
- ath5k_beacon_update_timers(sc, 0);
- else
- ath5k_hw_reset_tsf(sc->ah);
-}
-
-static void
+void
set_beacon_filter(struct ieee80211_hw *hw, bool enable)
{
struct ath5k_softc *sc = hw->priv;
@@ -3262,494 +2943,3 @@ set_beacon_filter(struct ieee80211_hw *hw, bool enable)
ath5k_hw_set_rx_filter(ah, rfilt);
sc->filter_flags = rfilt;
}
-
-static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- u32 changes)
-{
- struct ath5k_vif *avf = (void *)vif->drv_priv;
- struct ath5k_softc *sc = hw->priv;
- struct ath5k_hw *ah = sc->ah;
- struct ath_common *common = ath5k_hw_common(ah);
- unsigned long flags;
-
- mutex_lock(&sc->lock);
-
- if (changes & BSS_CHANGED_BSSID) {
- /* Cache for later use during resets */
- memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
- common->curaid = 0;
- ath5k_hw_set_bssid(ah);
- mmiowb();
- }
-
- if (changes & BSS_CHANGED_BEACON_INT)
- sc->bintval = bss_conf->beacon_int;
-
- if (changes & BSS_CHANGED_ASSOC) {
- avf->assoc = bss_conf->assoc;
- if (bss_conf->assoc)
- sc->assoc = bss_conf->assoc;
- else
- sc->assoc = ath_any_vif_assoc(sc);
-
- if (sc->opmode == NL80211_IFTYPE_STATION)
- set_beacon_filter(hw, sc->assoc);
- ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
- AR5K_LED_ASSOC : AR5K_LED_INIT);
- if (bss_conf->assoc) {
- ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
- "Bss Info ASSOC %d, bssid: %pM\n",
- bss_conf->aid, common->curbssid);
- common->curaid = bss_conf->aid;
- ath5k_hw_set_bssid(ah);
- /* Once ANI is available you would start it here */
- }
- }
-
- if (changes & BSS_CHANGED_BEACON) {
- spin_lock_irqsave(&sc->block, flags);
- ath5k_beacon_update(hw, vif);
- spin_unlock_irqrestore(&sc->block, flags);
- }
-
- if (changes & BSS_CHANGED_BEACON_ENABLED)
- sc->enable_beacon = bss_conf->enable_beacon;
-
- if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED |
- BSS_CHANGED_BEACON_INT))
- ath5k_beacon_config(sc);
-
- mutex_unlock(&sc->lock);
-}
-
-static void ath5k_sw_scan_start(struct ieee80211_hw *hw)
-{
- struct ath5k_softc *sc = hw->priv;
- if (!sc->assoc)
- ath5k_hw_set_ledstate(sc->ah, AR5K_LED_SCAN);
-}
-
-static void ath5k_sw_scan_complete(struct ieee80211_hw *hw)
-{
- struct ath5k_softc *sc = hw->priv;
- ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
- AR5K_LED_ASSOC : AR5K_LED_INIT);
-}
-
-/**
- * ath5k_set_coverage_class - Set IEEE 802.11 coverage class
- *
- * @hw: struct ieee80211_hw pointer
- * @coverage_class: IEEE 802.11 coverage class number
- *
- * Mac80211 callback. Sets slot time, ACK timeout and CTS timeout for given
- * coverage class. The values are persistent, they are restored after device
- * reset.
- */
-static void ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
-{
- struct ath5k_softc *sc = hw->priv;
-
- mutex_lock(&sc->lock);
- ath5k_hw_set_coverage_class(sc->ah, coverage_class);
- mutex_unlock(&sc->lock);
-}
-
-static int ath5k_conf_tx(struct ieee80211_hw *hw, u16 queue,
- const struct ieee80211_tx_queue_params *params)
-{
- struct ath5k_softc *sc = hw->priv;
- struct ath5k_hw *ah = sc->ah;
- struct ath5k_txq_info qi;
- int ret = 0;
-
- if (queue >= ah->ah_capabilities.cap_queues.q_tx_num)
- return 0;
-
- mutex_lock(&sc->lock);
-
- ath5k_hw_get_tx_queueprops(ah, queue, &qi);
-
- qi.tqi_aifs = params->aifs;
- qi.tqi_cw_min = params->cw_min;
- qi.tqi_cw_max = params->cw_max;
- qi.tqi_burst_time = params->txop;
-
- ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
- "Configure tx [queue %d], "
- "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
- queue, params->aifs, params->cw_min,
- params->cw_max, params->txop);
-
- if (ath5k_hw_set_tx_queueprops(ah, queue, &qi)) {
- ATH5K_ERR(sc,
- "Unable to update hardware queue %u!\n", queue);
- ret = -EIO;
- } else
- ath5k_hw_reset_tx_queue(ah, queue);
-
- mutex_unlock(&sc->lock);
-
- return ret;
-}
-
-static const struct ieee80211_ops ath5k_hw_ops = {
- .tx = ath5k_tx,
- .start = ath5k_start,
- .stop = ath5k_stop,
- .add_interface = ath5k_add_interface,
- .remove_interface = ath5k_remove_interface,
- .config = ath5k_config,
- .prepare_multicast = ath5k_prepare_multicast,
- .configure_filter = ath5k_configure_filter,
- .set_key = ath5k_set_key,
- .get_stats = ath5k_get_stats,
- .get_survey = ath5k_get_survey,
- .conf_tx = ath5k_conf_tx,
- .get_tsf = ath5k_get_tsf,
- .set_tsf = ath5k_set_tsf,
- .reset_tsf = ath5k_reset_tsf,
- .bss_info_changed = ath5k_bss_info_changed,
- .sw_scan_start = ath5k_sw_scan_start,
- .sw_scan_complete = ath5k_sw_scan_complete,
- .set_coverage_class = ath5k_set_coverage_class,
-};
-
-/********************\
-* PCI Initialization *
-\********************/
-
-static int __devinit
-ath5k_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- void __iomem *mem;
- struct ath5k_softc *sc;
- struct ath_common *common;
- struct ieee80211_hw *hw;
- int ret;
- u8 csz;
-
- /*
- * L0s needs to be disabled on all ath5k cards.
- *
- * For distributions shipping with CONFIG_PCIEASPM (this will be enabled
- * by default in the future in 2.6.36) this will also mean both L1 and
- * L0s will be disabled when a pre 1.1 PCIe device is detected. We do
- * know L1 works correctly even for all ath5k pre 1.1 PCIe devices
- * though but cannot currently undue the effect of a blacklist, for
- * details you can read pcie_aspm_sanity_check() and see how it adjusts
- * the device link capability.
- *
- * It may be possible in the future to implement some PCI API to allow
- * drivers to override blacklists for pre 1.1 PCIe but for now it is
- * best to accept that both L0s and L1 will be disabled completely for
- * distributions shipping with CONFIG_PCIEASPM rather than having this
- * issue present. Motivation for adding this new API will be to help
- * with power consumption for some of these devices.
- */
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
-
- ret = pci_enable_device(pdev);
- if (ret) {
- dev_err(&pdev->dev, "can't enable device\n");
- goto err;
- }
-
- /* XXX 32-bit addressing only */
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret) {
- dev_err(&pdev->dev, "32-bit DMA not available\n");
- goto err_dis;
- }
-
- /*
- * Cache line size is used to size and align various
- * structures used to communicate with the hardware.
- */
- pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
- if (csz == 0) {
- /*
- * Linux 2.4.18 (at least) writes the cache line size
- * register as a 16-bit wide register which is wrong.
- * We must have this setup properly for rx buffer
- * DMA to work so force a reasonable value here if it
- * comes up zero.
- */
- csz = L1_CACHE_BYTES >> 2;
- pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
- }
- /*
- * The default setting of latency timer yields poor results,
- * set it to the value used by other systems. It may be worth
- * tweaking this setting more.
- */
- pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
-
- /* Enable bus mastering */
- pci_set_master(pdev);
-
- /*
- * Disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state.
- */
- pci_write_config_byte(pdev, 0x41, 0);
-
- ret = pci_request_region(pdev, 0, "ath5k");
- if (ret) {
- dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
- goto err_dis;
- }
-
- mem = pci_iomap(pdev, 0, 0);
- if (!mem) {
- dev_err(&pdev->dev, "cannot remap PCI memory region\n") ;
- ret = -EIO;
- goto err_reg;
- }
-
- /*
- * Allocate hw (mac80211 main struct)
- * and hw->priv (driver private data)
- */
- hw = ieee80211_alloc_hw(sizeof(*sc), &ath5k_hw_ops);
- if (hw == NULL) {
- dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n");
- ret = -ENOMEM;
- goto err_map;
- }
-
- dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy));
-
- /* Initialize driver private data */
- SET_IEEE80211_DEV(hw, &pdev->dev);
- hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM;
-
- hw->wiphy->interface_modes =
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_MESH_POINT);
-
- hw->extra_tx_headroom = 2;
- hw->channel_change_time = 5000;
- sc = hw->priv;
- sc->hw = hw;
- sc->pdev = pdev;
-
- /*
- * Mark the device as detached to avoid processing
- * interrupts until setup is complete.
- */
- __set_bit(ATH_STAT_INVALID, sc->status);
-
- sc->iobase = mem; /* So we can unmap it on detach */
- sc->opmode = NL80211_IFTYPE_STATION;
- sc->bintval = 1000;
- mutex_init(&sc->lock);
- spin_lock_init(&sc->rxbuflock);
- spin_lock_init(&sc->txbuflock);
- spin_lock_init(&sc->block);
-
- /* Set private data */
- pci_set_drvdata(pdev, sc);
-
- /* Setup interrupt handler */
- ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
- if (ret) {
- ATH5K_ERR(sc, "request_irq failed\n");
- goto err_free;
- }
-
- /* If we passed the test, malloc an ath5k_hw struct */
- sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
- if (!sc->ah) {
- ret = -ENOMEM;
- ATH5K_ERR(sc, "out of memory\n");
- goto err_irq;
- }
-
- sc->ah->ah_sc = sc;
- sc->ah->ah_iobase = sc->iobase;
- common = ath5k_hw_common(sc->ah);
- common->ops = &ath5k_common_ops;
- common->ah = sc->ah;
- common->hw = hw;
- common->cachelsz = csz << 2; /* convert to bytes */
- spin_lock_init(&common->cc_lock);
-
- /* Initialize device */
- ret = ath5k_hw_attach(sc);
- if (ret) {
- goto err_free_ah;
- }
-
- /* set up multi-rate retry capabilities */
- if (sc->ah->ah_version == AR5K_AR5212) {
- hw->max_rates = 4;
- hw->max_rate_tries = 11;
- }
-
- hw->vif_data_size = sizeof(struct ath5k_vif);
-
- /* Finish private driver data initialization */
- ret = ath5k_attach(pdev, hw);
- if (ret)
- goto err_ah;
-
- ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
- ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
- sc->ah->ah_mac_srev,
- sc->ah->ah_phy_revision);
-
- if (!sc->ah->ah_single_chip) {
- /* Single chip radio (!RF5111) */
- if (sc->ah->ah_radio_5ghz_revision &&
- !sc->ah->ah_radio_2ghz_revision) {
- /* No 5GHz support -> report 2GHz radio */
- if (!test_bit(AR5K_MODE_11A,
- sc->ah->ah_capabilities.cap_mode)) {
- ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
- ath5k_chip_name(AR5K_VERSION_RAD,
- sc->ah->ah_radio_5ghz_revision),
- sc->ah->ah_radio_5ghz_revision);
- /* No 2GHz support (5110 and some
- * 5Ghz only cards) -> report 5Ghz radio */
- } else if (!test_bit(AR5K_MODE_11B,
- sc->ah->ah_capabilities.cap_mode)) {
- ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
- ath5k_chip_name(AR5K_VERSION_RAD,
- sc->ah->ah_radio_5ghz_revision),
- sc->ah->ah_radio_5ghz_revision);
- /* Multiband radio */
- } else {
- ATH5K_INFO(sc, "RF%s multiband radio found"
- " (0x%x)\n",
- ath5k_chip_name(AR5K_VERSION_RAD,
- sc->ah->ah_radio_5ghz_revision),
- sc->ah->ah_radio_5ghz_revision);
- }
- }
- /* Multi chip radio (RF5111 - RF2111) ->
- * report both 2GHz/5GHz radios */
- else if (sc->ah->ah_radio_5ghz_revision &&
- sc->ah->ah_radio_2ghz_revision){
- ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
- ath5k_chip_name(AR5K_VERSION_RAD,
- sc->ah->ah_radio_5ghz_revision),
- sc->ah->ah_radio_5ghz_revision);
- ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
- ath5k_chip_name(AR5K_VERSION_RAD,
- sc->ah->ah_radio_2ghz_revision),
- sc->ah->ah_radio_2ghz_revision);
- }
- }
-
- ath5k_debug_init_device(sc);
-
- /* ready to process interrupts */
- __clear_bit(ATH_STAT_INVALID, sc->status);
-
- return 0;
-err_ah:
- ath5k_hw_detach(sc->ah);
-err_free_ah:
- kfree(sc->ah);
-err_irq:
- free_irq(pdev->irq, sc);
-err_free:
- ieee80211_free_hw(hw);
-err_map:
- pci_iounmap(pdev, mem);
-err_reg:
- pci_release_region(pdev, 0);
-err_dis:
- pci_disable_device(pdev);
-err:
- return ret;
-}
-
-static void __devexit
-ath5k_pci_remove(struct pci_dev *pdev)
-{
- struct ath5k_softc *sc = pci_get_drvdata(pdev);
-
- ath5k_debug_finish_device(sc);
- ath5k_detach(pdev, sc->hw);
- ath5k_hw_detach(sc->ah);
- kfree(sc->ah);
- free_irq(pdev->irq, sc);
- pci_iounmap(pdev, sc->iobase);
- pci_release_region(pdev, 0);
- pci_disable_device(pdev);
- ieee80211_free_hw(sc->hw);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int ath5k_pci_suspend(struct device *dev)
-{
- struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
-
- ath5k_led_off(sc);
- return 0;
-}
-
-static int ath5k_pci_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct ath5k_softc *sc = pci_get_drvdata(pdev);
-
- /*
- * Suspend/Resume resets the PCI configuration space, so we have to
- * re-disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state
- */
- pci_write_config_byte(pdev, 0x41, 0);
-
- ath5k_led_enable(sc);
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
-#define ATH5K_PM_OPS (&ath5k_pm_ops)
-#else
-#define ATH5K_PM_OPS NULL
-#endif /* CONFIG_PM_SLEEP */
-
-static struct pci_driver ath5k_pci_driver = {
- .name = KBUILD_MODNAME,
- .id_table = ath5k_pci_id_table,
- .probe = ath5k_pci_probe,
- .remove = __devexit_p(ath5k_pci_remove),
- .driver.pm = ATH5K_PM_OPS,
-};
-
-/*
- * Module init/exit functions
- */
-static int __init
-init_ath5k_pci(void)
-{
- int ret;
-
- ret = pci_register_driver(&ath5k_pci_driver);
- if (ret) {
- printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
- return ret;
- }
-
- return 0;
-}
-
-static void __exit
-exit_ath5k_pci(void)
-{
- pci_unregister_driver(&ath5k_pci_driver);
-}
-
-module_init(init_ath5k_pci);
-module_exit(exit_ath5k_pci);
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 9a79773cdc2a..978f1f4ac2f3 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -86,6 +86,7 @@ struct ath5k_txq {
spinlock_t lock; /* lock on q and link */
bool setup;
int txq_len; /* number of queued buffers */
+ int txq_max; /* max allowed num of queued buffers */
bool txq_poll_mark;
unsigned int txq_stuck; /* informational counter */
};
@@ -169,7 +170,10 @@ struct ath5k_vif {
/* Software Carrier, keeps track of the driver state
* associated with an instance of a device */
struct ath5k_softc {
- struct pci_dev *pdev; /* for dma mapping */
+ struct pci_dev *pdev;
+ struct device *dev; /* for dma mapping */
+ int irq;
+ u16 devid;
void __iomem *iobase; /* address of the device */
struct mutex lock; /* dev-level lock */
struct ieee80211_hw *hw; /* IEEE 802.11 common */
@@ -180,8 +184,6 @@ struct ath5k_softc {
enum nl80211_iftype opmode;
struct ath5k_hw *ah; /* Atheros HW */
- struct ieee80211_supported_band *curband;
-
#ifdef CONFIG_ATH5K_DEBUG
struct ath5k_dbg_info debug; /* debug info */
#endif /* CONFIG_ATH5K_DEBUG */
@@ -199,7 +201,6 @@ struct ath5k_softc {
#define ATH_STAT_STARTED 4 /* opened & irqs enabled */
unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
- unsigned int curmode; /* current phy mode */
struct ieee80211_channel *curchan; /* current h/w channel */
u16 nvifs;
@@ -255,8 +256,23 @@ struct ath5k_softc {
struct tasklet_struct ani_tasklet; /* ANI calibration */
struct delayed_work tx_complete_work;
+
+ struct survey_info survey; /* collected survey info */
};
+struct ath5k_vif_iter_data {
+ const u8 *hw_macaddr;
+ u8 mask[ETH_ALEN];
+ u8 active_mac[ETH_ALEN]; /* first active MAC */
+ bool need_set_hw_addr;
+ bool found_active;
+ bool any_assoc;
+ enum nl80211_iftype opmode;
+ int n_stas;
+};
+void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif);
+
+
#define ath5k_hw_hasbssidmask(_ah) \
(ath5k_hw_get_capability(_ah, AR5K_CAP_BSSIDMASK, 0, NULL) == 0)
#define ath5k_hw_hasveol(_ah) \
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index beae519aa735..f77e8a703c5c 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -32,24 +32,24 @@
*/
int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
{
+ struct ath5k_capabilities *caps = &ah->ah_capabilities;
u16 ee_header;
/* Capabilities stored in the EEPROM */
- ee_header = ah->ah_capabilities.cap_eeprom.ee_header;
+ ee_header = caps->cap_eeprom.ee_header;
if (ah->ah_version == AR5K_AR5210) {
/*
* Set radio capabilities
* (The AR5110 only supports the middle 5GHz band)
*/
- ah->ah_capabilities.cap_range.range_5ghz_min = 5120;
- ah->ah_capabilities.cap_range.range_5ghz_max = 5430;
- ah->ah_capabilities.cap_range.range_2ghz_min = 0;
- ah->ah_capabilities.cap_range.range_2ghz_max = 0;
+ caps->cap_range.range_5ghz_min = 5120;
+ caps->cap_range.range_5ghz_max = 5430;
+ caps->cap_range.range_2ghz_min = 0;
+ caps->cap_range.range_2ghz_max = 0;
/* Set supported modes */
- __set_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode);
- __set_bit(AR5K_MODE_11A_TURBO, ah->ah_capabilities.cap_mode);
+ __set_bit(AR5K_MODE_11A, caps->cap_mode);
} else {
/*
* XXX The tranceiver supports frequencies from 4920 to 6100GHz
@@ -57,9 +57,8 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
* XXX current ieee80211 implementation because the IEEE
* XXX channel mapping does not support negative channel
* XXX numbers (2312MHz is channel -19). Of course, this
- * XXX doesn't matter because these channels are out of range
- * XXX but some regulation domains like MKK (Japan) will
- * XXX support frequencies somewhere around 4.8GHz.
+ * XXX doesn't matter because these channels are out of the
+ * XXX legal range.
*/
/*
@@ -67,18 +66,14 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
*/
if (AR5K_EEPROM_HDR_11A(ee_header)) {
- /* 4920 */
- ah->ah_capabilities.cap_range.range_5ghz_min = 5005;
- ah->ah_capabilities.cap_range.range_5ghz_max = 6100;
+ if (ath_is_49ghz_allowed(caps->cap_eeprom.ee_regdomain))
+ caps->cap_range.range_5ghz_min = 4920;
+ else
+ caps->cap_range.range_5ghz_min = 5005;
+ caps->cap_range.range_5ghz_max = 6100;
/* Set supported modes */
- __set_bit(AR5K_MODE_11A,
- ah->ah_capabilities.cap_mode);
- __set_bit(AR5K_MODE_11A_TURBO,
- ah->ah_capabilities.cap_mode);
- if (ah->ah_version == AR5K_AR5212)
- __set_bit(AR5K_MODE_11G_TURBO,
- ah->ah_capabilities.cap_mode);
+ __set_bit(AR5K_MODE_11A, caps->cap_mode);
}
/* Enable 802.11b if a 2GHz capable radio (2111/5112) is
@@ -87,32 +82,29 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
(AR5K_EEPROM_HDR_11G(ee_header) &&
ah->ah_version != AR5K_AR5211)) {
/* 2312 */
- ah->ah_capabilities.cap_range.range_2ghz_min = 2412;
- ah->ah_capabilities.cap_range.range_2ghz_max = 2732;
+ caps->cap_range.range_2ghz_min = 2412;
+ caps->cap_range.range_2ghz_max = 2732;
if (AR5K_EEPROM_HDR_11B(ee_header))
- __set_bit(AR5K_MODE_11B,
- ah->ah_capabilities.cap_mode);
+ __set_bit(AR5K_MODE_11B, caps->cap_mode);
if (AR5K_EEPROM_HDR_11G(ee_header) &&
ah->ah_version != AR5K_AR5211)
- __set_bit(AR5K_MODE_11G,
- ah->ah_capabilities.cap_mode);
+ __set_bit(AR5K_MODE_11G, caps->cap_mode);
}
}
/* Set number of supported TX queues */
if (ah->ah_version == AR5K_AR5210)
- ah->ah_capabilities.cap_queues.q_tx_num =
- AR5K_NUM_TX_QUEUES_NOQCU;
+ caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES_NOQCU;
else
- ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
+ caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
/* newer hardware has PHY error counters */
if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
- ah->ah_capabilities.cap_has_phyerr_counters = true;
+ caps->cap_has_phyerr_counters = true;
else
- ah->ah_capabilities.cap_has_phyerr_counters = false;
+ caps->cap_has_phyerr_counters = false;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index acda56ee521b..0230f30e9e9a 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -60,7 +60,6 @@
#include "base.h"
#include "debug.h"
-#include "../debug.h"
static unsigned int ath5k_debug;
module_param_named(debug, ath5k_debug, uint, 0);
@@ -309,9 +308,8 @@ static const struct {
{ ATH5K_DEBUG_CALIBRATE, "calib", "periodic calibration" },
{ ATH5K_DEBUG_TXPOWER, "txpower", "transmit power setting" },
{ ATH5K_DEBUG_LED, "led", "LED management" },
- { ATH5K_DEBUG_DUMP_RX, "dumprx", "print received skb content" },
- { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" },
{ ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
+ { ATH5K_DEBUG_DMA, "dma", "dma start/stop" },
{ ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" },
{ ATH5K_DEBUG_DESC, "desc", "descriptor chains" },
{ ATH5K_DEBUG_ANY, "all", "show all debug levels" },
@@ -554,63 +552,63 @@ static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
len += snprintf(buf+len, sizeof(buf)-len,
"RX\n---------------------\n");
- len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%d\t(%d%%)\n",
+ len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
st->rxerr_crc,
st->rx_all_count > 0 ?
st->rxerr_crc*100/st->rx_all_count : 0);
- len += snprintf(buf+len, sizeof(buf)-len, "PHY\t%d\t(%d%%)\n",
+ len += snprintf(buf+len, sizeof(buf)-len, "PHY\t%u\t(%u%%)\n",
st->rxerr_phy,
st->rx_all_count > 0 ?
st->rxerr_phy*100/st->rx_all_count : 0);
for (i = 0; i < 32; i++) {
if (st->rxerr_phy_code[i])
len += snprintf(buf+len, sizeof(buf)-len,
- " phy_err[%d]\t%d\n",
+ " phy_err[%u]\t%u\n",
i, st->rxerr_phy_code[i]);
}
- len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%d\t(%d%%)\n",
+ len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%u\t(%u%%)\n",
st->rxerr_fifo,
st->rx_all_count > 0 ?
st->rxerr_fifo*100/st->rx_all_count : 0);
- len += snprintf(buf+len, sizeof(buf)-len, "decrypt\t%d\t(%d%%)\n",
+ len += snprintf(buf+len, sizeof(buf)-len, "decrypt\t%u\t(%u%%)\n",
st->rxerr_decrypt,
st->rx_all_count > 0 ?
st->rxerr_decrypt*100/st->rx_all_count : 0);
- len += snprintf(buf+len, sizeof(buf)-len, "MIC\t%d\t(%d%%)\n",
+ len += snprintf(buf+len, sizeof(buf)-len, "MIC\t%u\t(%u%%)\n",
st->rxerr_mic,
st->rx_all_count > 0 ?
st->rxerr_mic*100/st->rx_all_count : 0);
- len += snprintf(buf+len, sizeof(buf)-len, "process\t%d\t(%d%%)\n",
+ len += snprintf(buf+len, sizeof(buf)-len, "process\t%u\t(%u%%)\n",
st->rxerr_proc,
st->rx_all_count > 0 ?
st->rxerr_proc*100/st->rx_all_count : 0);
- len += snprintf(buf+len, sizeof(buf)-len, "jumbo\t%d\t(%d%%)\n",
+ len += snprintf(buf+len, sizeof(buf)-len, "jumbo\t%u\t(%u%%)\n",
st->rxerr_jumbo,
st->rx_all_count > 0 ?
st->rxerr_jumbo*100/st->rx_all_count : 0);
- len += snprintf(buf+len, sizeof(buf)-len, "[RX all\t%d]\n",
+ len += snprintf(buf+len, sizeof(buf)-len, "[RX all\t%u]\n",
st->rx_all_count);
- len += snprintf(buf+len, sizeof(buf)-len, "RX-all-bytes\t%d\n",
+ len += snprintf(buf+len, sizeof(buf)-len, "RX-all-bytes\t%u\n",
st->rx_bytes_count);
len += snprintf(buf+len, sizeof(buf)-len,
"\nTX\n---------------------\n");
- len += snprintf(buf+len, sizeof(buf)-len, "retry\t%d\t(%d%%)\n",
+ len += snprintf(buf+len, sizeof(buf)-len, "retry\t%u\t(%u%%)\n",
st->txerr_retry,
st->tx_all_count > 0 ?
st->txerr_retry*100/st->tx_all_count : 0);
- len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%d\t(%d%%)\n",
+ len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%u\t(%u%%)\n",
st->txerr_fifo,
st->tx_all_count > 0 ?
st->txerr_fifo*100/st->tx_all_count : 0);
- len += snprintf(buf+len, sizeof(buf)-len, "filter\t%d\t(%d%%)\n",
+ len += snprintf(buf+len, sizeof(buf)-len, "filter\t%u\t(%u%%)\n",
st->txerr_filt,
st->tx_all_count > 0 ?
st->txerr_filt*100/st->tx_all_count : 0);
- len += snprintf(buf+len, sizeof(buf)-len, "[TX all\t%d]\n",
+ len += snprintf(buf+len, sizeof(buf)-len, "[TX all\t%u]\n",
st->tx_all_count);
- len += snprintf(buf+len, sizeof(buf)-len, "TX-all-bytes\t%d\n",
+ len += snprintf(buf+len, sizeof(buf)-len, "TX-all-bytes\t%u\n",
st->tx_bytes_count);
if (len > sizeof(buf))
@@ -719,7 +717,7 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
st->mib_intr);
len += snprintf(buf+len, sizeof(buf)-len,
"beacon RSSI average:\t%d\n",
- sc->ah->ah_beacon_rssi_avg.avg);
+ (int)ewma_read(&sc->ah->ah_beacon_rssi_avg));
#define CC_PRINT(_struct, _field) \
_struct._field, \
@@ -1036,24 +1034,6 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah)
}
void
-ath5k_debug_dump_skb(struct ath5k_softc *sc,
- struct sk_buff *skb, const char *prefix, int tx)
-{
- char buf[16];
-
- if (likely(!((tx && (sc->debug.level & ATH5K_DEBUG_DUMP_TX)) ||
- (!tx && (sc->debug.level & ATH5K_DEBUG_DUMP_RX)))))
- return;
-
- snprintf(buf, sizeof(buf), "%s %s", wiphy_name(sc->hw->wiphy), prefix);
-
- print_hex_dump_bytes(buf, DUMP_PREFIX_NONE, skb->data,
- min(200U, skb->len));
-
- printk(KERN_DEBUG "\n");
-}
-
-void
ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf)
{
struct ath5k_desc *ds = bf->desc;
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index 236edbd2507d..b0355aef68d3 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -95,6 +95,7 @@ struct ath5k_dbg_info {
* @ATH5K_DEBUG_DUMP_RX: print received skb content
* @ATH5K_DEBUG_DUMP_TX: print transmit skb content
* @ATH5K_DEBUG_DUMPBANDS: dump bands
+ * @ATH5K_DEBUG_DMA: debug dma start/stop
* @ATH5K_DEBUG_TRACE: trace function calls
* @ATH5K_DEBUG_DESC: descriptor setup
* @ATH5K_DEBUG_ANY: show at any debug level
@@ -115,9 +116,8 @@ enum ath5k_debug_level {
ATH5K_DEBUG_CALIBRATE = 0x00000020,
ATH5K_DEBUG_TXPOWER = 0x00000040,
ATH5K_DEBUG_LED = 0x00000080,
- ATH5K_DEBUG_DUMP_RX = 0x00000100,
- ATH5K_DEBUG_DUMP_TX = 0x00000200,
ATH5K_DEBUG_DUMPBANDS = 0x00000400,
+ ATH5K_DEBUG_DMA = 0x00000800,
ATH5K_DEBUG_ANI = 0x00002000,
ATH5K_DEBUG_DESC = 0x00004000,
ATH5K_DEBUG_ANY = 0xffffffff
@@ -150,10 +150,6 @@ void
ath5k_debug_dump_bands(struct ath5k_softc *sc);
void
-ath5k_debug_dump_skb(struct ath5k_softc *sc,
- struct sk_buff *skb, const char *prefix, int tx);
-
-void
ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf);
#else /* no debugging */
@@ -180,10 +176,6 @@ static inline void
ath5k_debug_dump_bands(struct ath5k_softc *sc) {}
static inline void
-ath5k_debug_dump_skb(struct ath5k_softc *sc,
- struct sk_buff *skb, const char *prefix, int tx) {}
-
-static inline void
ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf) {}
#endif /* ifdef CONFIG_ATH5K_DEBUG */
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index 43244382f213..16b44ff7dd3e 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -26,9 +26,10 @@
#include "debug.h"
#include "base.h"
-/*
- * TX Descriptors
- */
+
+/************************\
+* TX Control descriptors *
+\************************/
/*
* Initialize the 2-word tx control descriptor on 5210/5211
@@ -335,6 +336,11 @@ ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
return 0;
}
+
+/***********************\
+* TX Status descriptors *
+\***********************/
+
/*
* Proccess the tx status descriptor on 5210/5211
*/
@@ -476,9 +482,10 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
return 0;
}
-/*
- * RX Descriptors
- */
+
+/****************\
+* RX Descriptors *
+\****************/
/*
* Initialize an rx control descriptor
@@ -666,6 +673,11 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
return 0;
}
+
+/********\
+* Attach *
+\********/
+
/*
* Init function pointers inside ath5k_hw struct
*/
diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h
index b2adb2a281c2..2509d0bf037d 100644
--- a/drivers/net/wireless/ath/ath5k/desc.h
+++ b/drivers/net/wireless/ath/ath5k/desc.h
@@ -26,7 +26,7 @@
struct ath5k_hw_rx_ctl {
u32 rx_control_0; /* RX control word 0 */
u32 rx_control_1; /* RX control word 1 */
-} __packed;
+} __packed __aligned(4);
/* RX control word 1 fields/flags */
#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff /* data buffer length */
@@ -39,7 +39,7 @@ struct ath5k_hw_rx_ctl {
struct ath5k_hw_rx_status {
u32 rx_status_0; /* RX status word 0 */
u32 rx_status_1; /* RX status word 1 */
-} __packed;
+} __packed __aligned(4);
/* 5210/5211 */
/* RX status word 0 fields/flags */
@@ -129,7 +129,7 @@ enum ath5k_phy_error_code {
struct ath5k_hw_2w_tx_ctl {
u32 tx_control_0; /* TX control word 0 */
u32 tx_control_1; /* TX control word 1 */
-} __packed;
+} __packed __aligned(4);
/* TX control word 0 fields/flags */
#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN 0x00000fff /* frame length */
@@ -185,7 +185,7 @@ struct ath5k_hw_4w_tx_ctl {
u32 tx_control_1; /* TX control word 1 */
u32 tx_control_2; /* TX control word 2 */
u32 tx_control_3; /* TX control word 3 */
-} __packed;
+} __packed __aligned(4);
/* TX control word 0 fields/flags */
#define AR5K_4W_TX_DESC_CTL0_FRAME_LEN 0x00000fff /* frame length */
@@ -244,7 +244,7 @@ struct ath5k_hw_4w_tx_ctl {
struct ath5k_hw_tx_status {
u32 tx_status_0; /* TX status word 0 */
u32 tx_status_1; /* TX status word 1 */
-} __packed;
+} __packed __aligned(4);
/* TX status word 0 fields/flags */
#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK 0x00000001 /* TX success */
@@ -282,7 +282,7 @@ struct ath5k_hw_tx_status {
struct ath5k_hw_5210_tx_desc {
struct ath5k_hw_2w_tx_ctl tx_ctl;
struct ath5k_hw_tx_status tx_stat;
-} __packed;
+} __packed __aligned(4);
/*
* 5212 hardware TX descriptor
@@ -290,7 +290,7 @@ struct ath5k_hw_5210_tx_desc {
struct ath5k_hw_5212_tx_desc {
struct ath5k_hw_4w_tx_ctl tx_ctl;
struct ath5k_hw_tx_status tx_stat;
-} __packed;
+} __packed __aligned(4);
/*
* Common hardware RX descriptor
@@ -298,7 +298,7 @@ struct ath5k_hw_5212_tx_desc {
struct ath5k_hw_all_rx_desc {
struct ath5k_hw_rx_ctl rx_ctl;
struct ath5k_hw_rx_status rx_stat;
-} __packed;
+} __packed __aligned(4);
/*
* Atheros hardware DMA descriptor
@@ -313,7 +313,7 @@ struct ath5k_desc {
struct ath5k_hw_5212_tx_desc ds_tx5212;
struct ath5k_hw_all_rx_desc ds_rx;
} ud;
-} __packed;
+} __packed __aligned(4);
#define AR5K_RXDESC_INTREQ 0x0020
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 923c9ca5c4f0..21091c26a9a5 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -37,6 +37,7 @@
#include "debug.h"
#include "base.h"
+
/*********\
* Receive *
\*********/
@@ -57,7 +58,7 @@ void ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
*
* @ah: The &struct ath5k_hw
*/
-int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
+static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
{
unsigned int i;
@@ -69,7 +70,11 @@ int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
for (i = 1000; i > 0 &&
(ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0;
i--)
- udelay(10);
+ udelay(100);
+
+ if (!i)
+ ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
+ "failed to stop RX DMA !\n");
return i ? 0 : -EBUSY;
}
@@ -90,11 +95,18 @@ u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
* @ah: The &struct ath5k_hw
* @phys_addr: RX descriptor address
*
- * XXX: Should we check if rx is enabled before setting rxdp ?
+ * Returns -EIO if rx is active
*/
-void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
+int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
{
+ if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
+ ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
+ "tried to set RXDP while rx was active !\n");
+ return -EIO;
+ }
+
ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
+ return 0;
}
@@ -125,7 +137,7 @@ int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
/* Return if queue is declared inactive */
if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
- return -EIO;
+ return -EINVAL;
if (ah->ah_version == AR5K_AR5210) {
tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
@@ -173,10 +185,10 @@ int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
*
* Stop DMA transmit on a specific hw queue and drain queue so we don't
* have any pending frames. Returns -EBUSY if we still have pending frames,
- * -EINVAL if queue number is out of range.
+ * -EINVAL if queue number is out of range or inactive.
*
*/
-int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
+static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
{
unsigned int i = 40;
u32 tx_queue, pending;
@@ -185,7 +197,7 @@ int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
/* Return if queue is declared inactive */
if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
- return -EIO;
+ return -EINVAL;
if (ah->ah_version == AR5K_AR5210) {
tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
@@ -211,12 +223,31 @@ int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
ath5k_hw_reg_read(ah, AR5K_CR);
} else {
+
+ /*
+ * Enable DCU early termination to quickly
+ * flush any pending frames from QCU
+ */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
+ AR5K_QCU_MISC_DCU_EARLY);
+
/*
* Schedule TX disable and wait until queue is empty
*/
AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue);
- /*Check for pending frames*/
+ /* Wait for queue to stop */
+ for (i = 1000; i > 0 &&
+ (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue) != 0);
+ i--)
+ udelay(100);
+
+ if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
+ ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
+ "queue %i didn't stop !\n", queue);
+
+ /* Check for pending frames */
+ i = 1000;
do {
pending = ath5k_hw_reg_read(ah,
AR5K_QUEUE_STATUS(queue)) &
@@ -247,12 +278,12 @@ int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
/* Wait a while and disable mechanism */
- udelay(200);
+ udelay(400);
AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1,
AR5K_QUIET_CTL1_QT_EN);
/* Re-check for pending frames */
- i = 40;
+ i = 100;
do {
pending = ath5k_hw_reg_read(ah,
AR5K_QUEUE_STATUS(queue)) &
@@ -262,12 +293,27 @@ int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211,
AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
+
+ if (pending)
+ ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
+ "quiet mechanism didn't work q:%i !\n",
+ queue);
}
+ /*
+ * Disable DCU early termination
+ */
+ AR5K_REG_DISABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
+ AR5K_QCU_MISC_DCU_EARLY);
+
/* Clear register */
ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
- if (pending)
+ if (pending) {
+ ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
+ "tx dma didn't stop (q:%i, frm:%i) !\n",
+ queue, pending);
return -EBUSY;
+ }
}
/* TODO: Check for success on 5210 else return error */
@@ -275,6 +321,26 @@ int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
}
/**
+ * ath5k_hw_stop_beacon_queue - Stop beacon queue
+ *
+ * @ah The &struct ath5k_hw
+ * @queue The queue number
+ *
+ * Returns -EIO if queue didn't stop
+ */
+int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
+{
+ int ret;
+ ret = ath5k_hw_stop_tx_dma(ah, queue);
+ if (ret) {
+ ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
+ "beacon queue didn't stop !\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+/**
* ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue
*
* @ah: The &struct ath5k_hw
@@ -427,6 +493,7 @@ done:
return ret;
}
+
/*******************\
* Interrupt masking *
\*******************/
@@ -688,3 +755,92 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
return old_mask;
}
+
+/********************\
+ Init/Stop functions
+\********************/
+
+/**
+ * ath5k_hw_dma_init - Initialize DMA unit
+ *
+ * @ah: The &struct ath5k_hw
+ *
+ * Set DMA size and pre-enable interrupts
+ * (driver handles tx/rx buffer setup and
+ * dma start/stop)
+ *
+ * XXX: Save/restore RXDP/TXDP registers ?
+ */
+void ath5k_hw_dma_init(struct ath5k_hw *ah)
+{
+ /*
+ * Set Rx/Tx DMA Configuration
+ *
+ * Set standard DMA size (128). Note that
+ * a DMA size of 512 causes rx overruns and tx errors
+ * on pci-e cards (tested on 5424 but since rx overruns
+ * also occur on 5416/5418 with madwifi we set 128
+ * for all PCI-E cards to be safe).
+ *
+ * XXX: need to check 5210 for this
+ * TODO: Check out tx triger level, it's always 64 on dumps but I
+ * guess we can tweak it and see how it goes ;-)
+ */
+ if (ah->ah_version != AR5K_AR5210) {
+ AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
+ AR5K_TXCFG_SDMAMR, AR5K_DMASIZE_128B);
+ AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG,
+ AR5K_RXCFG_SDMAMW, AR5K_DMASIZE_128B);
+ }
+
+ /* Pre-enable interrupts on 5211/5212*/
+ if (ah->ah_version != AR5K_AR5210)
+ ath5k_hw_set_imr(ah, ah->ah_imr);
+
+}
+
+/**
+ * ath5k_hw_dma_stop - stop DMA unit
+ *
+ * @ah: The &struct ath5k_hw
+ *
+ * Stop tx/rx DMA and interrupts. Returns
+ * -EBUSY if tx or rx dma failed to stop.
+ *
+ * XXX: Sometimes DMA unit hangs and we have
+ * stuck frames on tx queues, only a reset
+ * can fix that.
+ */
+int ath5k_hw_dma_stop(struct ath5k_hw *ah)
+{
+ int i, qmax, err;
+ err = 0;
+
+ /* Disable interrupts */
+ ath5k_hw_set_imr(ah, 0);
+
+ /* Stop rx dma */
+ err = ath5k_hw_stop_rx_dma(ah);
+ if (err)
+ return err;
+
+ /* Clear any pending interrupts
+ * and disable tx dma */
+ if (ah->ah_version != AR5K_AR5210) {
+ ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR);
+ qmax = AR5K_NUM_TX_QUEUES;
+ } else {
+ /* PISR/SISR Not available on 5210 */
+ ath5k_hw_reg_read(ah, AR5K_ISR);
+ qmax = AR5K_NUM_TX_QUEUES_NOQCU;
+ }
+
+ for (i = 0; i < qmax; i++) {
+ err = ath5k_hw_stop_tx_dma(ah, i);
+ /* -EINVAL -> queue inactive */
+ if (err && err != -EINVAL)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 39722dd73e43..b6561f785c6e 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -28,45 +28,16 @@
#include "debug.h"
#include "base.h"
-/*
- * Read from eeprom
- */
-static int ath5k_hw_eeprom_read(struct ath5k_hw *ah, u32 offset, u16 *data)
-{
- u32 status, timeout;
-
- /*
- * Initialize EEPROM access
- */
- if (ah->ah_version == AR5K_AR5210) {
- AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_EEAE);
- (void)ath5k_hw_reg_read(ah, AR5K_EEPROM_BASE + (4 * offset));
- } else {
- ath5k_hw_reg_write(ah, offset, AR5K_EEPROM_BASE);
- AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD,
- AR5K_EEPROM_CMD_READ);
- }
- for (timeout = AR5K_TUNE_REGISTER_TIMEOUT; timeout > 0; timeout--) {
- status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
- if (status & AR5K_EEPROM_STAT_RDDONE) {
- if (status & AR5K_EEPROM_STAT_RDERR)
- return -EIO;
- *data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) &
- 0xffff);
- return 0;
- }
- udelay(15);
- }
-
- return -ETIMEDOUT;
-}
+/******************\
+* Helper functions *
+\******************/
/*
* Translate binary channel representation in EEPROM to frequency
*/
static u16 ath5k_eeprom_bin2freq(struct ath5k_eeprom_info *ee, u16 bin,
- unsigned int mode)
+ unsigned int mode)
{
u16 val;
@@ -89,6 +60,11 @@ static u16 ath5k_eeprom_bin2freq(struct ath5k_eeprom_info *ee, u16 bin,
return val;
}
+
+/*********\
+* Parsers *
+\*********/
+
/*
* Initialize eeprom & capabilities structs
*/
@@ -96,7 +72,6 @@ static int
ath5k_eeprom_init_header(struct ath5k_hw *ah)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
- int ret;
u16 val;
u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX;
@@ -198,7 +173,7 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
*
* XXX: Serdes values seem to be fixed so
* no need to read them here, we write them
- * during ath5k_hw_attach */
+ * during ath5k_hw_init */
AR5K_EEPROM_READ(AR5K_EEPROM_PCIE_OFFSET, val);
ee->ee_serdes = (val == AR5K_EEPROM_PCIE_SERDES_SECTION) ?
true : false;
@@ -216,7 +191,7 @@ static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset,
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 o = *offset;
u16 val;
- int ret, i = 0;
+ int i = 0;
AR5K_EEPROM_READ(o++, val);
ee->ee_switch_settling[mode] = (val >> 8) & 0x7f;
@@ -276,7 +251,6 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 o = *offset;
u16 val;
- int ret;
ee->ee_n_piers[mode] = 0;
AR5K_EEPROM_READ(o++, val);
@@ -539,7 +513,6 @@ ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max,
int o = *offset;
int i = 0;
u8 freq1, freq2;
- int ret;
u16 val;
ee->ee_n_piers[mode] = 0;
@@ -575,7 +548,7 @@ ath5k_eeprom_init_11a_pcal_freq(struct ath5k_hw *ah, int offset)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
struct ath5k_chan_pcal_info *pcal = ee->ee_pwr_cal_a;
- int i, ret;
+ int i;
u16 val;
u8 mask;
@@ -647,6 +620,7 @@ ath5k_eeprom_init_11bg_2413(struct ath5k_hw *ah, unsigned int mode, int offset)
return 0;
}
+
/*
* Read power calibration for RF5111 chips
*
@@ -993,7 +967,6 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
u32 offset;
u8 i, c;
u16 val;
- int ret;
u8 pd_gains = 0;
/* Count how many curves we have and
@@ -1251,7 +1224,7 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
struct ath5k_chan_pcal_info *chinfo;
u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
u32 offset;
- int idx, i, ret;
+ int idx, i;
u16 val;
u8 pd_gains = 0;
@@ -1442,7 +1415,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
u8 *rate_target_pwr_num;
u32 offset;
u16 val;
- int ret, i;
+ int i;
offset = AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1);
rate_target_pwr_num = &ee->ee_rate_target_pwr_num[mode];
@@ -1514,6 +1487,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
return 0;
}
+
/*
* Read per channel calibration info from EEPROM
*
@@ -1607,15 +1581,6 @@ ath5k_eeprom_free_pcal_info(struct ath5k_hw *ah, int mode)
return 0;
}
-void
-ath5k_eeprom_detach(struct ath5k_hw *ah)
-{
- u8 mode;
-
- for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++)
- ath5k_eeprom_free_pcal_info(ah, mode);
-}
-
/* Read conformance test limits used for regulatory control */
static int
ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah)
@@ -1624,7 +1589,7 @@ ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah)
struct ath5k_edge_power *rep;
unsigned int fmask, pmask;
unsigned int ctl_mode;
- int ret, i, j;
+ int i, j;
u32 offset;
u16 val;
@@ -1757,6 +1722,40 @@ ath5k_eeprom_read_spur_chans(struct ath5k_hw *ah)
}
/*
+ * Read the MAC address from eeprom
+ */
+int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
+{
+ u8 mac_d[ETH_ALEN] = {};
+ u32 total, offset;
+ u16 data;
+ int octet;
+
+ AR5K_EEPROM_READ(0x20, data);
+
+ for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
+ AR5K_EEPROM_READ(offset, data);
+
+ total += data;
+ mac_d[octet + 1] = data & 0xff;
+ mac_d[octet] = data >> 8;
+ octet += 2;
+ }
+
+ if (!total || total == 3 * 0xffff)
+ return -EINVAL;
+
+ memcpy(mac, mac_d, ETH_ALEN);
+
+ return 0;
+}
+
+
+/***********************\
+* Init/Detach functions *
+\***********************/
+
+/*
* Initialize eeprom data structure
*/
int
@@ -1787,35 +1786,27 @@ ath5k_eeprom_init(struct ath5k_hw *ah)
return 0;
}
-/*
- * Read the MAC address from eeprom
- */
-int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
+void
+ath5k_eeprom_detach(struct ath5k_hw *ah)
{
- u8 mac_d[ETH_ALEN] = {};
- u32 total, offset;
- u16 data;
- int octet, ret;
-
- ret = ath5k_hw_eeprom_read(ah, 0x20, &data);
- if (ret)
- return ret;
+ u8 mode;
- for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
- ret = ath5k_hw_eeprom_read(ah, offset, &data);
- if (ret)
- return ret;
+ for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++)
+ ath5k_eeprom_free_pcal_info(ah, mode);
+}
- total += data;
- mac_d[octet + 1] = data & 0xff;
- mac_d[octet] = data >> 8;
- octet += 2;
+int
+ath5k_eeprom_mode_from_channel(struct ieee80211_channel *channel)
+{
+ switch (channel->hw_value & CHANNEL_MODES) {
+ case CHANNEL_A:
+ case CHANNEL_XR:
+ return AR5K_EEPROM_MODE_11A;
+ case CHANNEL_G:
+ return AR5K_EEPROM_MODE_11G;
+ case CHANNEL_B:
+ return AR5K_EEPROM_MODE_11B;
+ default:
+ return -1;
}
-
- if (!total || total == 3 * 0xffff)
- return -EINVAL;
-
- memcpy(mac, mac_d, ETH_ALEN);
-
- return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index c4a6d5f26af4..6511c27d938e 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -241,9 +241,8 @@ enum ath5k_eeprom_freq_bands{
#define AR5K_SPUR_SYMBOL_WIDTH_TURBO_100Hz 6250
#define AR5K_EEPROM_READ(_o, _v) do { \
- ret = ath5k_hw_eeprom_read(ah, (_o), &(_v)); \
- if (ret) \
- return ret; \
+ if (!ath5k_hw_nvram_read(ah, (_o), &(_v))) \
+ return -EIO; \
} while (0)
#define AR5K_EEPROM_READ_HDR(_o, _v) \
@@ -269,29 +268,6 @@ enum ath5k_ctl_mode {
AR5K_CTL_MODE_M = 15,
};
-/* Default CTL ids for the 3 main reg domains.
- * Atheros only uses these by default but vendors
- * can have up to 32 different CTLs for different
- * scenarios. Note that theese values are ORed with
- * the mode id (above) so we can have up to 24 CTL
- * datasets out of these 3 main regdomains. That leaves
- * 8 ids that can be used by vendors and since 0x20 is
- * missing from HAL sources i guess this is the set of
- * custom CTLs vendors can use. */
-#define AR5K_CTL_FCC 0x10
-#define AR5K_CTL_CUSTOM 0x20
-#define AR5K_CTL_ETSI 0x30
-#define AR5K_CTL_MKK 0x40
-
-/* Indicates a CTL with only mode set and
- * no reg domain mapping, such CTLs are used
- * for world roaming domains or simply when
- * a reg domain is not set */
-#define AR5K_CTL_NO_REGDOMAIN 0xf0
-
-/* Indicates an empty (invalid) CTL */
-#define AR5K_CTL_NO_CTL 0xff
-
/* Per channel calibration data, used for power table setup */
struct ath5k_chan_pcal_info_rf5111 {
/* Power levels in half dbm units
@@ -517,3 +493,5 @@ struct ath5k_eeprom_info {
u32 ee_antenna[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
};
+int
+ath5k_eeprom_mode_from_channel(struct ieee80211_channel *channel);
diff --git a/drivers/net/wireless/ath/ath5k/initvals.c b/drivers/net/wireless/ath/ath5k/initvals.c
index 8fa439308828..e49340d18df4 100644
--- a/drivers/net/wireless/ath/ath5k/initvals.c
+++ b/drivers/net/wireless/ath/ath5k/initvals.c
@@ -44,7 +44,7 @@ struct ath5k_ini {
struct ath5k_ini_mode {
u16 mode_register;
- u32 mode_value[5];
+ u32 mode_value[3];
};
/* Initial register settings for AR5210 */
@@ -391,76 +391,74 @@ static const struct ath5k_ini ar5211_ini[] = {
*/
static const struct ath5k_ini_mode ar5211_ini_mode[] = {
{ AR5K_TXCFG,
- /* a aTurbo b g (OFDM) */
- { 0x00000015, 0x00000015, 0x0000001d, 0x00000015 } },
+ /* A/XR B G */
+ { 0x00000015, 0x0000001d, 0x00000015 } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(0),
- { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(1),
- { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(2),
- { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(3),
- { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(4),
- { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(5),
- { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(6),
- { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(7),
- { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(8),
- { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(9),
- { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_DCU_GBL_IFS_SLOT,
- { 0x00000168, 0x000001e0, 0x000001b8, 0x00000168 } },
+ { 0x00000168, 0x000001b8, 0x00000168 } },
{ AR5K_DCU_GBL_IFS_SIFS,
- { 0x00000230, 0x000001e0, 0x000000b0, 0x00000230 } },
+ { 0x00000230, 0x000000b0, 0x00000230 } },
{ AR5K_DCU_GBL_IFS_EIFS,
- { 0x00000d98, 0x00001180, 0x00001f48, 0x00000d98 } },
+ { 0x00000d98, 0x00001f48, 0x00000d98 } },
{ AR5K_DCU_GBL_IFS_MISC,
- { 0x0000a0e0, 0x00014068, 0x00005880, 0x0000a0e0 } },
+ { 0x0000a0e0, 0x00005880, 0x0000a0e0 } },
{ AR5K_TIME_OUT,
- { 0x04000400, 0x08000800, 0x20003000, 0x04000400 } },
+ { 0x04000400, 0x20003000, 0x04000400 } },
{ AR5K_USEC_5211,
- { 0x0e8d8fa7, 0x0e8d8fcf, 0x01608f95, 0x0e8d8fa7 } },
- { AR5K_PHY_TURBO,
- { 0x00000000, 0x00000003, 0x00000000, 0x00000000 } },
+ { 0x0e8d8fa7, 0x01608f95, 0x0e8d8fa7 } },
{ AR5K_PHY(8),
- { 0x02020200, 0x02020200, 0x02010200, 0x02020200 } },
- { AR5K_PHY(9),
- { 0x00000e0e, 0x00000e0e, 0x00000707, 0x00000e0e } },
- { AR5K_PHY(10),
- { 0x0a020001, 0x0a020001, 0x05010000, 0x0a020001 } },
- { AR5K_PHY(13),
- { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } },
- { AR5K_PHY(14),
- { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b } },
- { AR5K_PHY(17),
- { 0x1372169c, 0x137216a5, 0x137216a8, 0x1372169c } },
- { AR5K_PHY(18),
- { 0x0018ba67, 0x0018ba67, 0x0018ba69, 0x0018ba69 } },
- { AR5K_PHY(20),
- { 0x0c28b4e0, 0x0c28b4e0, 0x0c28b4e0, 0x0c28b4e0 } },
+ { 0x02020200, 0x02010200, 0x02020200 } },
+ { AR5K_PHY_RF_CTL2,
+ { 0x00000e0e, 0x00000707, 0x00000e0e } },
+ { AR5K_PHY_RF_CTL3,
+ { 0x0a020001, 0x05010000, 0x0a020001 } },
+ { AR5K_PHY_RF_CTL4,
+ { 0x00000e0e, 0x00000e0e, 0x00000e0e } },
+ { AR5K_PHY_PA_CTL,
+ { 0x00000007, 0x0000000b, 0x0000000b } },
+ { AR5K_PHY_SETTLING,
+ { 0x1372169c, 0x137216a8, 0x1372169c } },
+ { AR5K_PHY_GAIN,
+ { 0x0018ba67, 0x0018ba69, 0x0018ba69 } },
+ { AR5K_PHY_DESIRED_SIZE,
+ { 0x0c28b4e0, 0x0c28b4e0, 0x0c28b4e0 } },
{ AR5K_PHY_SIG,
- { 0x7e800d2e, 0x7e800d2e, 0x7ec00d2e, 0x7e800d2e } },
+ { 0x7e800d2e, 0x7ec00d2e, 0x7e800d2e } },
{ AR5K_PHY_AGCCOARSE,
- { 0x31375d5e, 0x31375d5e, 0x313a5d5e, 0x31375d5e } },
+ { 0x31375d5e, 0x313a5d5e, 0x31375d5e } },
{ AR5K_PHY_AGCCTL,
- { 0x0000bd10, 0x0000bd10, 0x0000bd38, 0x0000bd10 } },
+ { 0x0000bd10, 0x0000bd38, 0x0000bd10 } },
{ AR5K_PHY_NF,
- { 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 } },
+ { 0x0001ce00, 0x0001ce00, 0x0001ce00 } },
{ AR5K_PHY_RX_DELAY,
- { 0x00002710, 0x00002710, 0x0000157c, 0x00002710 } },
+ { 0x00002710, 0x0000157c, 0x00002710 } },
{ AR5K_PHY(70),
- { 0x00000190, 0x00000190, 0x00000084, 0x00000190 } },
+ { 0x00000190, 0x00000084, 0x00000190 } },
{ AR5K_PHY_FRAME_CTL_5211,
- { 0x6fe01020, 0x6fe01020, 0x6fe00920, 0x6fe01020 } },
+ { 0x6fe01020, 0x6fe00920, 0x6fe01020 } },
{ AR5K_PHY_PCDAC_TXPOWER_BASE,
- { 0x05ff14ff, 0x05ff14ff, 0x05ff14ff, 0x05ff19ff } },
+ { 0x05ff14ff, 0x05ff14ff, 0x05ff19ff } },
{ AR5K_RF_BUFFER_CONTROL_4,
- { 0x00000010, 0x00000014, 0x00000010, 0x00000010 } },
+ { 0x00000010, 0x00000010, 0x00000010 } },
};
/* Initial register settings for AR5212 */
@@ -677,89 +675,87 @@ static const struct ath5k_ini ar5212_ini_common_start[] = {
/* Initial mode-specific settings for AR5212 (Written before ar5212_ini) */
static const struct ath5k_ini_mode ar5212_ini_mode_start[] = {
{ AR5K_QUEUE_DFS_LOCAL_IFS(0),
- /* a/XR aTurbo b g (DYN) gTurbo */
- { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } },
+ /* A/XR B G */
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(1),
- { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } },
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(2),
- { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } },
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(3),
- { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } },
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(4),
- { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } },
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(5),
- { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } },
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(6),
- { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } },
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(7),
- { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } },
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(8),
- { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } },
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(9),
- { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } },
+ { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_DCU_GBL_IFS_SIFS,
- { 0x00000230, 0x000001e0, 0x000000b0, 0x00000160, 0x000001e0 } },
+ { 0x00000230, 0x000000b0, 0x00000160 } },
{ AR5K_DCU_GBL_IFS_SLOT,
- { 0x00000168, 0x000001e0, 0x000001b8, 0x0000018c, 0x000001e0 } },
+ { 0x00000168, 0x000001b8, 0x0000018c } },
{ AR5K_DCU_GBL_IFS_EIFS,
- { 0x00000e60, 0x00001180, 0x00001f1c, 0x00003e38, 0x00001180 } },
+ { 0x00000e60, 0x00001f1c, 0x00003e38 } },
{ AR5K_DCU_GBL_IFS_MISC,
- { 0x0000a0e0, 0x00014068, 0x00005880, 0x0000b0e0, 0x00014068 } },
+ { 0x0000a0e0, 0x00005880, 0x0000b0e0 } },
{ AR5K_TIME_OUT,
- { 0x03e803e8, 0x06e006e0, 0x04200420, 0x08400840, 0x06e006e0 } },
- { AR5K_PHY_TURBO,
- { 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x00000003 } },
+ { 0x03e803e8, 0x04200420, 0x08400840 } },
{ AR5K_PHY(8),
- { 0x02020200, 0x02020200, 0x02010200, 0x02020200, 0x02020200 } },
+ { 0x02020200, 0x02010200, 0x02020200 } },
{ AR5K_PHY_RF_CTL2,
- { 0x00000e0e, 0x00000e0e, 0x00000707, 0x00000e0e, 0x00000e0e } },
+ { 0x00000e0e, 0x00000707, 0x00000e0e } },
{ AR5K_PHY_SETTLING,
- { 0x1372161c, 0x13721c25, 0x13721722, 0x137216a2, 0x13721c25 } },
+ { 0x1372161c, 0x13721722, 0x137216a2 } },
{ AR5K_PHY_AGCCTL,
- { 0x00009d10, 0x00009d10, 0x00009d18, 0x00009d18, 0x00009d10 } },
+ { 0x00009d10, 0x00009d18, 0x00009d18 } },
{ AR5K_PHY_NF,
- { 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 } },
+ { 0x0001ce00, 0x0001ce00, 0x0001ce00 } },
{ AR5K_PHY_WEAK_OFDM_HIGH_THR,
- { 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 } },
+ { 0x409a4190, 0x409a4190, 0x409a4190 } },
{ AR5K_PHY(70),
- { 0x000001b8, 0x000001b8, 0x00000084, 0x00000108, 0x000001b8 } },
+ { 0x000001b8, 0x00000084, 0x00000108 } },
{ AR5K_PHY_OFDM_SELFCORR,
- { 0x10058a05, 0x10058a05, 0x10058a05, 0x10058a05, 0x10058a05 } },
+ { 0x10058a05, 0x10058a05, 0x10058a05 } },
{ 0xa230,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000108, 0x00000000 } },
+ { 0x00000000, 0x00000000, 0x00000108 } },
};
/* Initial mode-specific settings for AR5212 + RF5111 (Written after ar5212_ini) */
static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
{ AR5K_TXCFG,
- /* a/XR aTurbo b g (DYN) gTurbo */
- { 0x00008015, 0x00008015, 0x00008015, 0x00008015, 0x00008015 } },
+ /* A/XR B G */
+ { 0x00008015, 0x00008015, 0x00008015 } },
{ AR5K_USEC_5211,
- { 0x128d8fa7, 0x09880fcf, 0x04e00f95, 0x12e00fab, 0x09880fcf } },
+ { 0x128d8fa7, 0x04e00f95, 0x12e00fab } },
{ AR5K_PHY_RF_CTL3,
- { 0x0a020001, 0x0a020001, 0x05010100, 0x0a020001, 0x0a020001 } },
+ { 0x0a020001, 0x05010100, 0x0a020001 } },
{ AR5K_PHY_RF_CTL4,
- { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } },
+ { 0x00000e0e, 0x00000e0e, 0x00000e0e } },
{ AR5K_PHY_PA_CTL,
- { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b, 0x0000000b } },
+ { 0x00000007, 0x0000000b, 0x0000000b } },
{ AR5K_PHY_GAIN,
- { 0x0018da5a, 0x0018da5a, 0x0018ca69, 0x0018ca69, 0x0018ca69 } },
+ { 0x0018da5a, 0x0018ca69, 0x0018ca69 } },
{ AR5K_PHY_DESIRED_SIZE,
- { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } },
+ { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } },
{ AR5K_PHY_SIG,
- { 0x7e800d2e, 0x7e800d2e, 0x7ee84d2e, 0x7ee84d2e, 0x7e800d2e } },
+ { 0x7e800d2e, 0x7ee84d2e, 0x7ee84d2e } },
{ AR5K_PHY_AGCCOARSE,
- { 0x3137665e, 0x3137665e, 0x3137665e, 0x3137665e, 0x3137615e } },
+ { 0x3137665e, 0x3137665e, 0x3137665e } },
{ AR5K_PHY_WEAK_OFDM_LOW_THR,
- { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb080, 0x050cb080 } },
+ { 0x050cb081, 0x050cb081, 0x050cb080 } },
{ AR5K_PHY_RX_DELAY,
- { 0x00002710, 0x00002710, 0x0000157c, 0x00002af8, 0x00002710 } },
+ { 0x00002710, 0x0000157c, 0x00002af8 } },
{ AR5K_PHY_FRAME_CTL_5211,
- { 0xf7b81020, 0xf7b81020, 0xf7b80d20, 0xf7b81020, 0xf7b81020 } },
+ { 0xf7b81020, 0xf7b80d20, 0xf7b81020 } },
{ AR5K_PHY_GAIN_2GHZ,
- { 0x642c416a, 0x642c416a, 0x6440416a, 0x6440416a, 0x6440416a } },
+ { 0x642c416a, 0x6440416a, 0x6440416a } },
{ AR5K_PHY_CCK_RX_CTL_4,
- { 0x1883800a, 0x1883800a, 0x1873800a, 0x1883800a, 0x1883800a } },
+ { 0x1883800a, 0x1873800a, 0x1883800a } },
};
static const struct ath5k_ini rf5111_ini_common_end[] = {
@@ -782,38 +778,38 @@ static const struct ath5k_ini rf5111_ini_common_end[] = {
/* Initial mode-specific settings for AR5212 + RF5112 (Written after ar5212_ini) */
static const struct ath5k_ini_mode rf5112_ini_mode_end[] = {
{ AR5K_TXCFG,
- /* a/XR aTurbo b g (DYN) gTurbo */
- { 0x00008015, 0x00008015, 0x00008015, 0x00008015, 0x00008015 } },
+ /* A/XR B G */
+ { 0x00008015, 0x00008015, 0x00008015 } },
{ AR5K_USEC_5211,
- { 0x128d93a7, 0x098813cf, 0x04e01395, 0x12e013ab, 0x098813cf } },
+ { 0x128d93a7, 0x04e01395, 0x12e013ab } },
{ AR5K_PHY_RF_CTL3,
- { 0x0a020001, 0x0a020001, 0x05020100, 0x0a020001, 0x0a020001 } },
+ { 0x0a020001, 0x05020100, 0x0a020001 } },
{ AR5K_PHY_RF_CTL4,
- { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } },
+ { 0x00000e0e, 0x00000e0e, 0x00000e0e } },
{ AR5K_PHY_PA_CTL,
- { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b, 0x0000000b } },
+ { 0x00000007, 0x0000000b, 0x0000000b } },
{ AR5K_PHY_GAIN,
- { 0x0018da6d, 0x0018da6d, 0x0018ca75, 0x0018ca75, 0x0018ca75 } },
+ { 0x0018da6d, 0x0018ca75, 0x0018ca75 } },
{ AR5K_PHY_DESIRED_SIZE,
- { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } },
+ { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } },
{ AR5K_PHY_SIG,
- { 0x7e800d2e, 0x7e800d2e, 0x7ee80d2e, 0x7ee80d2e, 0x7e800d2e } },
+ { 0x7e800d2e, 0x7ee80d2e, 0x7ee80d2e } },
{ AR5K_PHY_AGCCOARSE,
- { 0x3137665e, 0x3137665e, 0x3137665e, 0x3137665e, 0x3137665e } },
+ { 0x3137665e, 0x3137665e, 0x3137665e } },
{ AR5K_PHY_WEAK_OFDM_LOW_THR,
- { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 } },
+ { 0x050cb081, 0x050cb081, 0x050cb081 } },
{ AR5K_PHY_RX_DELAY,
- { 0x000007d0, 0x000007d0, 0x0000044c, 0x00000898, 0x000007d0 } },
+ { 0x000007d0, 0x0000044c, 0x00000898 } },
{ AR5K_PHY_FRAME_CTL_5211,
- { 0xf7b81020, 0xf7b81020, 0xf7b80d10, 0xf7b81010, 0xf7b81010 } },
+ { 0xf7b81020, 0xf7b80d10, 0xf7b81010 } },
{ AR5K_PHY_CCKTXCTL,
- { 0x00000000, 0x00000000, 0x00000008, 0x00000008, 0x00000008 } },
+ { 0x00000000, 0x00000008, 0x00000008 } },
{ AR5K_PHY_CCK_CROSSCORR,
- { 0xd6be6788, 0xd6be6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } },
+ { 0xd6be6788, 0xd03e6788, 0xd03e6788 } },
{ AR5K_PHY_GAIN_2GHZ,
- { 0x642c0140, 0x642c0140, 0x6442c160, 0x6442c160, 0x6442c160 } },
+ { 0x642c0140, 0x6442c160, 0x6442c160 } },
{ AR5K_PHY_CCK_RX_CTL_4,
- { 0x1883800a, 0x1883800a, 0x1873800a, 0x1883800a, 0x1883800a } },
+ { 0x1883800a, 0x1873800a, 0x1883800a } },
};
static const struct ath5k_ini rf5112_ini_common_end[] = {
@@ -833,66 +829,66 @@ static const struct ath5k_ini rf5112_ini_common_end[] = {
/* Initial mode-specific settings for RF5413/5414 (Written after ar5212_ini) */
static const struct ath5k_ini_mode rf5413_ini_mode_end[] = {
{ AR5K_TXCFG,
- /* a/XR aTurbo b g (DYN) gTurbo */
- { 0x00000015, 0x00000015, 0x00000015, 0x00000015, 0x00000015 } },
+ /* A/XR B G */
+ { 0x00000015, 0x00000015, 0x00000015 } },
{ AR5K_USEC_5211,
- { 0x128d93a7, 0x098813cf, 0x04e01395, 0x12e013ab, 0x098813cf } },
+ { 0x128d93a7, 0x04e01395, 0x12e013ab } },
{ AR5K_PHY_RF_CTL3,
- { 0x0a020001, 0x0a020001, 0x05020100, 0x0a020001, 0x0a020001 } },
+ { 0x0a020001, 0x05020100, 0x0a020001 } },
{ AR5K_PHY_RF_CTL4,
- { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } },
+ { 0x00000e0e, 0x00000e0e, 0x00000e0e } },
{ AR5K_PHY_PA_CTL,
- { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b, 0x0000000b } },
+ { 0x00000007, 0x0000000b, 0x0000000b } },
{ AR5K_PHY_GAIN,
- { 0x0018fa61, 0x0018fa61, 0x001a1a63, 0x001a1a63, 0x001a1a63 } },
+ { 0x0018fa61, 0x001a1a63, 0x001a1a63 } },
{ AR5K_PHY_DESIRED_SIZE,
- { 0x0c98b4e0, 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da, 0x0c98b0da } },
+ { 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da } },
{ AR5K_PHY_SIG,
- { 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } },
+ { 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } },
{ AR5K_PHY_AGCCOARSE,
- { 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e } },
+ { 0x3139605e, 0x3139605e, 0x3139605e } },
{ AR5K_PHY_WEAK_OFDM_LOW_THR,
- { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 } },
+ { 0x050cb081, 0x050cb081, 0x050cb081 } },
{ AR5K_PHY_RX_DELAY,
- { 0x000007d0, 0x000007d0, 0x0000044c, 0x00000898, 0x000007d0 } },
+ { 0x000007d0, 0x0000044c, 0x00000898 } },
{ AR5K_PHY_FRAME_CTL_5211,
- { 0xf7b81000, 0xf7b81000, 0xf7b80d00, 0xf7b81000, 0xf7b81000 } },
+ { 0xf7b81000, 0xf7b80d00, 0xf7b81000 } },
{ AR5K_PHY_CCKTXCTL,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
+ { 0x00000000, 0x00000000, 0x00000000 } },
{ AR5K_PHY_CCK_CROSSCORR,
- { 0xd6be6788, 0xd6be6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } },
+ { 0xd6be6788, 0xd03e6788, 0xd03e6788 } },
{ AR5K_PHY_GAIN_2GHZ,
- { 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 } },
+ { 0x002ec1e0, 0x002ac120, 0x002ac120 } },
{ AR5K_PHY_CCK_RX_CTL_4,
- { 0x1883800a, 0x1883800a, 0x1863800a, 0x1883800a, 0x1883800a } },
+ { 0x1883800a, 0x1863800a, 0x1883800a } },
{ 0xa300,
- { 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 } },
+ { 0x18010000, 0x18010000, 0x18010000 } },
{ 0xa304,
- { 0x30032602, 0x30032602, 0x30032602, 0x30032602, 0x30032602 } },
+ { 0x30032602, 0x30032602, 0x30032602 } },
{ 0xa308,
- { 0x48073e06, 0x48073e06, 0x48073e06, 0x48073e06, 0x48073e06 } },
+ { 0x48073e06, 0x48073e06, 0x48073e06 } },
{ 0xa30c,
- { 0x560b4c0a, 0x560b4c0a, 0x560b4c0a, 0x560b4c0a, 0x560b4c0a } },
+ { 0x560b4c0a, 0x560b4c0a, 0x560b4c0a } },
{ 0xa310,
- { 0x641a600f, 0x641a600f, 0x641a600f, 0x641a600f, 0x641a600f } },
+ { 0x641a600f, 0x641a600f, 0x641a600f } },
{ 0xa314,
- { 0x784f6e1b, 0x784f6e1b, 0x784f6e1b, 0x784f6e1b, 0x784f6e1b } },
+ { 0x784f6e1b, 0x784f6e1b, 0x784f6e1b } },
{ 0xa318,
- { 0x868f7c5a, 0x868f7c5a, 0x868f7c5a, 0x868f7c5a, 0x868f7c5a } },
+ { 0x868f7c5a, 0x868f7c5a, 0x868f7c5a } },
{ 0xa31c,
- { 0x90cf865b, 0x90cf865b, 0x8ecf865b, 0x8ecf865b, 0x8ecf865b } },
+ { 0x90cf865b, 0x8ecf865b, 0x8ecf865b } },
{ 0xa320,
- { 0x9d4f970f, 0x9d4f970f, 0x9b4f970f, 0x9b4f970f, 0x9b4f970f } },
+ { 0x9d4f970f, 0x9b4f970f, 0x9b4f970f } },
{ 0xa324,
- { 0xa7cfa38f, 0xa7cfa38f, 0xa3cf9f8f, 0xa3cf9f8f, 0xa3cf9f8f } },
+ { 0xa7cfa38f, 0xa3cf9f8f, 0xa3cf9f8f } },
{ 0xa328,
- { 0xb55faf1f, 0xb55faf1f, 0xb35faf1f, 0xb35faf1f, 0xb35faf1f } },
+ { 0xb55faf1f, 0xb35faf1f, 0xb35faf1f } },
{ 0xa32c,
- { 0xbddfb99f, 0xbddfb99f, 0xbbdfb99f, 0xbbdfb99f, 0xbbdfb99f } },
+ { 0xbddfb99f, 0xbbdfb99f, 0xbbdfb99f } },
{ 0xa330,
- { 0xcb7fc53f, 0xcb7fc53f, 0xcb7fc73f, 0xcb7fc73f, 0xcb7fc73f } },
+ { 0xcb7fc53f, 0xcb7fc73f, 0xcb7fc73f } },
{ 0xa334,
- { 0xd5ffd1bf, 0xd5ffd1bf, 0xd3ffd1bf, 0xd3ffd1bf, 0xd3ffd1bf } },
+ { 0xd5ffd1bf, 0xd3ffd1bf, 0xd3ffd1bf } },
};
static const struct ath5k_ini rf5413_ini_common_end[] = {
@@ -972,38 +968,38 @@ static const struct ath5k_ini rf5413_ini_common_end[] = {
/* XXX: a mode ? */
static const struct ath5k_ini_mode rf2413_ini_mode_end[] = {
{ AR5K_TXCFG,
- /* a/XR aTurbo b g (DYN) gTurbo */
- { 0x00000015, 0x00000015, 0x00000015, 0x00000015, 0x00000015 } },
+ /* A/XR B G */
+ { 0x00000015, 0x00000015, 0x00000015 } },
{ AR5K_USEC_5211,
- { 0x128d93a7, 0x098813cf, 0x04e01395, 0x12e013ab, 0x098813cf } },
+ { 0x128d93a7, 0x04e01395, 0x12e013ab } },
{ AR5K_PHY_RF_CTL3,
- { 0x0a020001, 0x0a020001, 0x05020000, 0x0a020001, 0x0a020001 } },
+ { 0x0a020001, 0x05020000, 0x0a020001 } },
{ AR5K_PHY_RF_CTL4,
- { 0x00000e00, 0x00000e00, 0x00000e00, 0x00000e00, 0x00000e00 } },
+ { 0x00000e00, 0x00000e00, 0x00000e00 } },
{ AR5K_PHY_PA_CTL,
- { 0x00000002, 0x00000002, 0x0000000a, 0x0000000a, 0x0000000a } },
+ { 0x00000002, 0x0000000a, 0x0000000a } },
{ AR5K_PHY_GAIN,
- { 0x0018da6d, 0x0018da6d, 0x001a6a64, 0x001a6a64, 0x001a6a64 } },
+ { 0x0018da6d, 0x001a6a64, 0x001a6a64 } },
{ AR5K_PHY_DESIRED_SIZE,
- { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b0da, 0x0c98b0da, 0x0de8b0da } },
+ { 0x0de8b4e0, 0x0de8b0da, 0x0c98b0da } },
{ AR5K_PHY_SIG,
- { 0x7e800d2e, 0x7e800d2e, 0x7ee80d2e, 0x7ec80d2e, 0x7e800d2e } },
+ { 0x7e800d2e, 0x7ee80d2e, 0x7ec80d2e } },
{ AR5K_PHY_AGCCOARSE,
- { 0x3137665e, 0x3137665e, 0x3137665e, 0x3139605e, 0x3137665e } },
+ { 0x3137665e, 0x3137665e, 0x3139605e } },
{ AR5K_PHY_WEAK_OFDM_LOW_THR,
- { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 } },
+ { 0x050cb081, 0x050cb081, 0x050cb081 } },
{ AR5K_PHY_RX_DELAY,
- { 0x000007d0, 0x000007d0, 0x0000044c, 0x00000898, 0x000007d0 } },
+ { 0x000007d0, 0x0000044c, 0x00000898 } },
{ AR5K_PHY_FRAME_CTL_5211,
- { 0xf7b81000, 0xf7b81000, 0xf7b80d00, 0xf7b81000, 0xf7b81000 } },
+ { 0xf7b81000, 0xf7b80d00, 0xf7b81000 } },
{ AR5K_PHY_CCKTXCTL,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
+ { 0x00000000, 0x00000000, 0x00000000 } },
{ AR5K_PHY_CCK_CROSSCORR,
- { 0xd6be6788, 0xd6be6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } },
+ { 0xd6be6788, 0xd03e6788, 0xd03e6788 } },
{ AR5K_PHY_GAIN_2GHZ,
- { 0x002c0140, 0x002c0140, 0x0042c140, 0x0042c140, 0x0042c140 } },
+ { 0x002c0140, 0x0042c140, 0x0042c140 } },
{ AR5K_PHY_CCK_RX_CTL_4,
- { 0x1883800a, 0x1883800a, 0x1863800a, 0x1883800a, 0x1883800a } },
+ { 0x1883800a, 0x1863800a, 0x1883800a } },
};
static const struct ath5k_ini rf2413_ini_common_end[] = {
@@ -1094,52 +1090,50 @@ static const struct ath5k_ini rf2413_ini_common_end[] = {
/* XXX: a mode ? */
static const struct ath5k_ini_mode rf2425_ini_mode_end[] = {
{ AR5K_TXCFG,
- /* a/XR aTurbo b g (DYN) gTurbo */
- { 0x00000015, 0x00000015, 0x00000015, 0x00000015, 0x00000015 } },
+ /* A/XR B G */
+ { 0x00000015, 0x00000015, 0x00000015 } },
{ AR5K_USEC_5211,
- { 0x128d93a7, 0x098813cf, 0x04e01395, 0x12e013ab, 0x098813cf } },
- { AR5K_PHY_TURBO,
- { 0x00000000, 0x00000001, 0x00000000, 0x00000000, 0x00000001 } },
+ { 0x128d93a7, 0x04e01395, 0x12e013ab } },
{ AR5K_PHY_RF_CTL3,
- { 0x0a020001, 0x0a020001, 0x05020100, 0x0a020001, 0x0a020001 } },
+ { 0x0a020001, 0x05020100, 0x0a020001 } },
{ AR5K_PHY_RF_CTL4,
- { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } },
+ { 0x00000e0e, 0x00000e0e, 0x00000e0e } },
{ AR5K_PHY_PA_CTL,
- { 0x00000003, 0x00000003, 0x0000000b, 0x0000000b, 0x0000000b } },
+ { 0x00000003, 0x0000000b, 0x0000000b } },
{ AR5K_PHY_SETTLING,
- { 0x1372161c, 0x13721c25, 0x13721722, 0x13721422, 0x13721c25 } },
+ { 0x1372161c, 0x13721722, 0x13721422 } },
{ AR5K_PHY_GAIN,
- { 0x0018fa61, 0x0018fa61, 0x00199a65, 0x00199a65, 0x00199a65 } },
+ { 0x0018fa61, 0x00199a65, 0x00199a65 } },
{ AR5K_PHY_DESIRED_SIZE,
- { 0x0c98b4e0, 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da, 0x0c98b0da } },
+ { 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da } },
{ AR5K_PHY_SIG,
- { 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } },
+ { 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } },
{ AR5K_PHY_AGCCOARSE,
- { 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e } },
+ { 0x3139605e, 0x3139605e, 0x3139605e } },
{ AR5K_PHY_WEAK_OFDM_LOW_THR,
- { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 } },
+ { 0x050cb081, 0x050cb081, 0x050cb081 } },
{ AR5K_PHY_RX_DELAY,
- { 0x000007d0, 0x000007d0, 0x0000044c, 0x00000898, 0x000007d0 } },
+ { 0x000007d0, 0x0000044c, 0x00000898 } },
{ AR5K_PHY_FRAME_CTL_5211,
- { 0xf7b81000, 0xf7b81000, 0xf7b80d00, 0xf7b81000, 0xf7b81000 } },
+ { 0xf7b81000, 0xf7b80d00, 0xf7b81000 } },
{ AR5K_PHY_CCKTXCTL,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
+ { 0x00000000, 0x00000000, 0x00000000 } },
{ AR5K_PHY_CCK_CROSSCORR,
- { 0xd6be6788, 0xd6be6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } },
+ { 0xd6be6788, 0xd03e6788, 0xd03e6788 } },
{ AR5K_PHY_GAIN_2GHZ,
- { 0x00000140, 0x00000140, 0x0052c140, 0x0052c140, 0x0052c140 } },
+ { 0x00000140, 0x0052c140, 0x0052c140 } },
{ AR5K_PHY_CCK_RX_CTL_4,
- { 0x1883800a, 0x1883800a, 0x1863800a, 0x1883800a, 0x1883800a } },
+ { 0x1883800a, 0x1863800a, 0x1883800a } },
{ 0xa324,
- { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
+ { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
{ 0xa328,
- { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
+ { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
{ 0xa32c,
- { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
+ { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
{ 0xa330,
- { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
+ { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
{ 0xa334,
- { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
+ { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
};
static const struct ath5k_ini rf2425_ini_common_end[] = {
@@ -1368,15 +1362,15 @@ static const struct ath5k_ini rf5112_ini_bbgain[] = {
* Write initial register dump
*/
static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
- const struct ath5k_ini *ini_regs, bool change_channel)
+ const struct ath5k_ini *ini_regs, bool skip_pcu)
{
unsigned int i;
/* Write initial registers */
for (i = 0; i < size; i++) {
- /* On channel change there is
- * no need to mess with PCU */
- if (change_channel &&
+ /* Skip PCU registers if
+ * requested */
+ if (skip_pcu &&
ini_regs[i].ini_register >= AR5K_PCU_MIN &&
ini_regs[i].ini_register <= AR5K_PCU_MAX)
continue;
@@ -1409,7 +1403,7 @@ static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
}
-int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
+int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
{
/*
* Write initial register settings
@@ -1427,7 +1421,7 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
* Write initial settings common for all modes
*/
ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5212_ini_common_start),
- ar5212_ini_common_start, change_channel);
+ ar5212_ini_common_start, skip_pcu);
/* Second set of mode-specific settings */
switch (ah->ah_radio) {
@@ -1439,12 +1433,12 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf5111_ini_common_end),
- rf5111_ini_common_end, change_channel);
+ rf5111_ini_common_end, skip_pcu);
/* Baseband gain table */
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf5111_ini_bbgain),
- rf5111_ini_bbgain, change_channel);
+ rf5111_ini_bbgain, skip_pcu);
break;
case AR5K_RF5112:
@@ -1455,11 +1449,11 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf5112_ini_common_end),
- rf5112_ini_common_end, change_channel);
+ rf5112_ini_common_end, skip_pcu);
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf5112_ini_bbgain),
- rf5112_ini_bbgain, change_channel);
+ rf5112_ini_bbgain, skip_pcu);
break;
case AR5K_RF5413:
@@ -1470,11 +1464,11 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf5413_ini_common_end),
- rf5413_ini_common_end, change_channel);
+ rf5413_ini_common_end, skip_pcu);
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf5112_ini_bbgain),
- rf5112_ini_bbgain, change_channel);
+ rf5112_ini_bbgain, skip_pcu);
break;
case AR5K_RF2316:
@@ -1486,7 +1480,7 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf2413_ini_common_end),
- rf2413_ini_common_end, change_channel);
+ rf2413_ini_common_end, skip_pcu);
/* Override settings from rf2413_ini_common_end */
if (ah->ah_radio == AR5K_RF2316) {
@@ -1498,9 +1492,32 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf5112_ini_bbgain),
- rf5112_ini_bbgain, change_channel);
+ rf5112_ini_bbgain, skip_pcu);
break;
case AR5K_RF2317:
+
+ ath5k_hw_ini_mode_registers(ah,
+ ARRAY_SIZE(rf2413_ini_mode_end),
+ rf2413_ini_mode_end, mode);
+
+ ath5k_hw_ini_registers(ah,
+ ARRAY_SIZE(rf2425_ini_common_end),
+ rf2425_ini_common_end, skip_pcu);
+
+ /* Override settings from rf2413_ini_mode_end */
+ ath5k_hw_reg_write(ah, 0x00180a65, AR5K_PHY_GAIN);
+
+ /* Override settings from rf2413_ini_common_end */
+ ath5k_hw_reg_write(ah, 0x00004000, AR5K_PHY_AGC);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TPC_RG5,
+ AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP, 0xa);
+ ath5k_hw_reg_write(ah, 0x800000a8, 0x8140);
+ ath5k_hw_reg_write(ah, 0x000000ff, 0x9958);
+
+ ath5k_hw_ini_registers(ah,
+ ARRAY_SIZE(rf5112_ini_bbgain),
+ rf5112_ini_bbgain, skip_pcu);
+ break;
case AR5K_RF2425:
ath5k_hw_ini_mode_registers(ah,
@@ -1509,11 +1526,11 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf2425_ini_common_end),
- rf2425_ini_common_end, change_channel);
+ rf2425_ini_common_end, skip_pcu);
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf5112_ini_bbgain),
- rf5112_ini_bbgain, change_channel);
+ rf5112_ini_bbgain, skip_pcu);
break;
default:
return -EINVAL;
@@ -1538,17 +1555,17 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
* Write initial settings common for all modes
*/
ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5211_ini),
- ar5211_ini, change_channel);
+ ar5211_ini, skip_pcu);
/* AR5211 only comes with 5111 */
/* Baseband gain table */
ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5111_ini_bbgain),
- rf5111_ini_bbgain, change_channel);
+ rf5111_ini_bbgain, skip_pcu);
/* For AR5210 (for mode settings check out ath5k_hw_reset_tx_queue) */
} else if (ah->ah_version == AR5K_AR5210) {
ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5210_ini),
- ar5210_ini, change_channel);
+ ar5210_ini, skip_pcu);
}
return 0;
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index 67aa52e9bf94..576edf2965dc 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -133,7 +133,7 @@ ath5k_register_led(struct ath5k_softc *sc, struct ath5k_led *led,
led->led_dev.default_trigger = trigger;
led->led_dev.brightness_set = ath5k_led_brightness_set;
- err = led_classdev_register(&sc->pdev->dev, &led->led_dev);
+ err = led_classdev_register(sc->dev, &led->led_dev);
if (err) {
ATH5K_WARN(sc, "could not register LED %s\n", name);
led->sc = NULL;
@@ -161,11 +161,20 @@ int ath5k_init_leds(struct ath5k_softc *sc)
{
int ret = 0;
struct ieee80211_hw *hw = sc->hw;
+#ifndef CONFIG_ATHEROS_AR231X
struct pci_dev *pdev = sc->pdev;
+#endif
char name[ATH5K_LED_MAX_NAME_LEN + 1];
const struct pci_device_id *match;
+ if (!sc->pdev)
+ return 0;
+
+#ifdef CONFIG_ATHEROS_AR231X
+ match = NULL;
+#else
match = pci_match_id(&ath5k_led_devices[0], pdev);
+#endif
if (match) {
__set_bit(ATH_STAT_LEDSOFT, sc->status);
sc->led_pin = ATH_PIN(match->driver_data);
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
new file mode 100644
index 000000000000..9be29b728b1c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -0,0 +1,824 @@
+/*-
+ * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
+ * Copyright (c) 2004-2005 Atheros Communications, Inc.
+ * Copyright (c) 2006 Devicescape Software, Inc.
+ * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
+ * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
+ * Copyright (c) 2010 Bruno Randolf <br1@einfach.org>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ * redistribution must be conditioned upon including a substantially
+ * similar Disclaimer requirement for further binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+
+#include <asm/unaligned.h>
+
+#include "base.h"
+#include "reg.h"
+
+extern int ath5k_modparam_nohwcrypt;
+
+/********************\
+* Mac80211 functions *
+\********************/
+
+static void
+ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct ath5k_softc *sc = hw->priv;
+ u16 qnum = skb_get_queue_mapping(skb);
+
+ if (WARN_ON(qnum >= sc->ah->ah_capabilities.cap_queues.q_tx_num)) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ ath5k_tx_queue(hw, skb, &sc->txqs[qnum]);
+}
+
+
+static int
+ath5k_start(struct ieee80211_hw *hw)
+{
+ return ath5k_init_hw(hw->priv);
+}
+
+
+static void
+ath5k_stop(struct ieee80211_hw *hw)
+{
+ ath5k_stop_hw(hw->priv);
+}
+
+
+static int
+ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct ath5k_softc *sc = hw->priv;
+ int ret;
+ struct ath5k_vif *avf = (void *)vif->drv_priv;
+
+ mutex_lock(&sc->lock);
+
+ if ((vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)
+ && (sc->num_ap_vifs + sc->num_adhoc_vifs) >= ATH_BCBUF) {
+ ret = -ELNRNG;
+ goto end;
+ }
+
+ /* Don't allow other interfaces if one ad-hoc is configured.
+ * TODO: Fix the problems with ad-hoc and multiple other interfaces.
+ * We would need to operate the HW in ad-hoc mode to allow TSF updates
+ * for the IBSS, but this breaks with additional AP or STA interfaces
+ * at the moment. */
+ if (sc->num_adhoc_vifs ||
+ (sc->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
+ ATH5K_ERR(sc, "Only one single ad-hoc interface is allowed.\n");
+ ret = -ELNRNG;
+ goto end;
+ }
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
+ avf->opmode = vif->type;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto end;
+ }
+
+ sc->nvifs++;
+ ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "add interface mode %d\n", avf->opmode);
+
+ /* Assign the vap/adhoc to a beacon xmit slot. */
+ if ((avf->opmode == NL80211_IFTYPE_AP) ||
+ (avf->opmode == NL80211_IFTYPE_ADHOC) ||
+ (avf->opmode == NL80211_IFTYPE_MESH_POINT)) {
+ int slot;
+
+ WARN_ON(list_empty(&sc->bcbuf));
+ avf->bbuf = list_first_entry(&sc->bcbuf, struct ath5k_buf,
+ list);
+ list_del(&avf->bbuf->list);
+
+ avf->bslot = 0;
+ for (slot = 0; slot < ATH_BCBUF; slot++) {
+ if (!sc->bslot[slot]) {
+ avf->bslot = slot;
+ break;
+ }
+ }
+ BUG_ON(sc->bslot[avf->bslot] != NULL);
+ sc->bslot[avf->bslot] = vif;
+ if (avf->opmode == NL80211_IFTYPE_AP)
+ sc->num_ap_vifs++;
+ else if (avf->opmode == NL80211_IFTYPE_ADHOC)
+ sc->num_adhoc_vifs++;
+ }
+
+ /* Any MAC address is fine, all others are included through the
+ * filter.
+ */
+ memcpy(&sc->lladdr, vif->addr, ETH_ALEN);
+ ath5k_hw_set_lladdr(sc->ah, vif->addr);
+
+ memcpy(&avf->lladdr, vif->addr, ETH_ALEN);
+
+ ath5k_update_bssid_mask_and_opmode(sc, vif);
+ ret = 0;
+end:
+ mutex_unlock(&sc->lock);
+ return ret;
+}
+
+
+static void
+ath5k_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath5k_softc *sc = hw->priv;
+ struct ath5k_vif *avf = (void *)vif->drv_priv;
+ unsigned int i;
+
+ mutex_lock(&sc->lock);
+ sc->nvifs--;
+
+ if (avf->bbuf) {
+ ath5k_txbuf_free_skb(sc, avf->bbuf);
+ list_add_tail(&avf->bbuf->list, &sc->bcbuf);
+ for (i = 0; i < ATH_BCBUF; i++) {
+ if (sc->bslot[i] == vif) {
+ sc->bslot[i] = NULL;
+ break;
+ }
+ }
+ avf->bbuf = NULL;
+ }
+ if (avf->opmode == NL80211_IFTYPE_AP)
+ sc->num_ap_vifs--;
+ else if (avf->opmode == NL80211_IFTYPE_ADHOC)
+ sc->num_adhoc_vifs--;
+
+ ath5k_update_bssid_mask_and_opmode(sc, NULL);
+ mutex_unlock(&sc->lock);
+}
+
+
+/*
+ * TODO: Phy disable/diversity etc
+ */
+static int
+ath5k_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = sc->ah;
+ struct ieee80211_conf *conf = &hw->conf;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&sc->lock);
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ret = ath5k_chan_set(sc, conf->channel);
+ if (ret < 0)
+ goto unlock;
+ }
+
+ if ((changed & IEEE80211_CONF_CHANGE_POWER) &&
+ (sc->power_level != conf->power_level)) {
+ sc->power_level = conf->power_level;
+
+ /* Half dB steps */
+ ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
+ ah->ah_retry_long = conf->long_frame_max_tx_count;
+ ah->ah_retry_short = conf->short_frame_max_tx_count;
+
+ for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++)
+ ath5k_hw_set_tx_retry_limits(ah, i);
+ }
+
+ /* TODO:
+ * 1) Move this on config_interface and handle each case
+ * separately eg. when we have only one STA vif, use
+ * AR5K_ANTMODE_SINGLE_AP
+ *
+ * 2) Allow the user to change antenna mode eg. when only
+ * one antenna is present
+ *
+ * 3) Allow the user to set default/tx antenna when possible
+ *
+ * 4) Default mode should handle 90% of the cases, together
+ * with fixed a/b and single AP modes we should be able to
+ * handle 99%. Sectored modes are extreme cases and i still
+ * haven't found a usage for them. If we decide to support them,
+ * then we must allow the user to set how many tx antennas we
+ * have available
+ */
+ ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
+
+unlock:
+ mutex_unlock(&sc->lock);
+ return ret;
+}
+
+
+static void
+ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf, u32 changes)
+{
+ struct ath5k_vif *avf = (void *)vif->drv_priv;
+ struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = sc->ah;
+ struct ath_common *common = ath5k_hw_common(ah);
+ unsigned long flags;
+
+ mutex_lock(&sc->lock);
+
+ if (changes & BSS_CHANGED_BSSID) {
+ /* Cache for later use during resets */
+ memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+ common->curaid = 0;
+ ath5k_hw_set_bssid(ah);
+ mmiowb();
+ }
+
+ if (changes & BSS_CHANGED_BEACON_INT)
+ sc->bintval = bss_conf->beacon_int;
+
+ if (changes & BSS_CHANGED_ASSOC) {
+ avf->assoc = bss_conf->assoc;
+ if (bss_conf->assoc)
+ sc->assoc = bss_conf->assoc;
+ else
+ sc->assoc = ath_any_vif_assoc(sc);
+
+ if (sc->opmode == NL80211_IFTYPE_STATION)
+ set_beacon_filter(hw, sc->assoc);
+ ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
+ AR5K_LED_ASSOC : AR5K_LED_INIT);
+ if (bss_conf->assoc) {
+ ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
+ "Bss Info ASSOC %d, bssid: %pM\n",
+ bss_conf->aid, common->curbssid);
+ common->curaid = bss_conf->aid;
+ ath5k_hw_set_bssid(ah);
+ /* Once ANI is available you would start it here */
+ }
+ }
+
+ if (changes & BSS_CHANGED_BEACON) {
+ spin_lock_irqsave(&sc->block, flags);
+ ath5k_beacon_update(hw, vif);
+ spin_unlock_irqrestore(&sc->block, flags);
+ }
+
+ if (changes & BSS_CHANGED_BEACON_ENABLED)
+ sc->enable_beacon = bss_conf->enable_beacon;
+
+ if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED |
+ BSS_CHANGED_BEACON_INT))
+ ath5k_beacon_config(sc);
+
+ mutex_unlock(&sc->lock);
+}
+
+
+static u64
+ath5k_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
+{
+ u32 mfilt[2], val;
+ u8 pos;
+ struct netdev_hw_addr *ha;
+
+ mfilt[0] = 0;
+ mfilt[1] = 1;
+
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ /* calculate XOR of eight 6-bit values */
+ val = get_unaligned_le32(ha->addr + 0);
+ pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
+ val = get_unaligned_le32(ha->addr + 3);
+ pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
+ pos &= 0x3f;
+ mfilt[pos / 32] |= (1 << (pos % 32));
+ /* XXX: we might be able to just do this instead,
+ * but not sure, needs testing, if we do use this we'd
+ * neet to inform below to not reset the mcast */
+ /* ath5k_hw_set_mcast_filterindex(ah,
+ * ha->addr[5]); */
+ }
+
+ return ((u64)(mfilt[1]) << 32) | mfilt[0];
+}
+
+
+/*
+ * o always accept unicast, broadcast, and multicast traffic
+ * o multicast traffic for all BSSIDs will be enabled if mac80211
+ * says it should be
+ * o maintain current state of phy ofdm or phy cck error reception.
+ * If the hardware detects any of these type of errors then
+ * ath5k_hw_get_rx_filter() will pass to us the respective
+ * hardware filters to be able to receive these type of frames.
+ * o probe request frames are accepted only when operating in
+ * hostap, adhoc, or monitor modes
+ * o enable promiscuous mode according to the interface state
+ * o accept beacons:
+ * - when operating in adhoc mode so the 802.11 layer creates
+ * node table entries for peers,
+ * - when operating in station mode for collecting rssi data when
+ * the station is otherwise quiet, or
+ * - when scanning
+ */
+static void
+ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *new_flags, u64 multicast)
+{
+#define SUPPORTED_FIF_FLAGS \
+ (FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | \
+ FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \
+ FIF_BCN_PRBRESP_PROMISC)
+
+ struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = sc->ah;
+ u32 mfilt[2], rfilt;
+ struct ath5k_vif_iter_data iter_data; /* to count STA interfaces */
+
+ mutex_lock(&sc->lock);
+
+ mfilt[0] = multicast;
+ mfilt[1] = multicast >> 32;
+
+ /* Only deal with supported flags */
+ changed_flags &= SUPPORTED_FIF_FLAGS;
+ *new_flags &= SUPPORTED_FIF_FLAGS;
+
+ /* If HW detects any phy or radar errors, leave those filters on.
+ * Also, always enable Unicast, Broadcasts and Multicast
+ * XXX: move unicast, bssid broadcasts and multicast to mac80211 */
+ rfilt = (ath5k_hw_get_rx_filter(ah) & (AR5K_RX_FILTER_PHYERR)) |
+ (AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST |
+ AR5K_RX_FILTER_MCAST);
+
+ if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
+ if (*new_flags & FIF_PROMISC_IN_BSS)
+ __set_bit(ATH_STAT_PROMISC, sc->status);
+ else
+ __clear_bit(ATH_STAT_PROMISC, sc->status);
+ }
+
+ if (test_bit(ATH_STAT_PROMISC, sc->status))
+ rfilt |= AR5K_RX_FILTER_PROM;
+
+ /* Note, AR5K_RX_FILTER_MCAST is already enabled */
+ if (*new_flags & FIF_ALLMULTI) {
+ mfilt[0] = ~0;
+ mfilt[1] = ~0;
+ }
+
+ /* This is the best we can do */
+ if (*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL))
+ rfilt |= AR5K_RX_FILTER_PHYERR;
+
+ /* FIF_BCN_PRBRESP_PROMISC really means to enable beacons
+ * and probes for any BSSID */
+ if ((*new_flags & FIF_BCN_PRBRESP_PROMISC) || (sc->nvifs > 1))
+ rfilt |= AR5K_RX_FILTER_BEACON;
+
+ /* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not
+ * set we should only pass on control frames for this
+ * station. This needs testing. I believe right now this
+ * enables *all* control frames, which is OK.. but
+ * but we should see if we can improve on granularity */
+ if (*new_flags & FIF_CONTROL)
+ rfilt |= AR5K_RX_FILTER_CONTROL;
+
+ /* Additional settings per mode -- this is per ath5k */
+
+ /* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */
+
+ switch (sc->opmode) {
+ case NL80211_IFTYPE_MESH_POINT:
+ rfilt |= AR5K_RX_FILTER_CONTROL |
+ AR5K_RX_FILTER_BEACON |
+ AR5K_RX_FILTER_PROBEREQ |
+ AR5K_RX_FILTER_PROM;
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ rfilt |= AR5K_RX_FILTER_PROBEREQ |
+ AR5K_RX_FILTER_BEACON;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (sc->assoc)
+ rfilt |= AR5K_RX_FILTER_BEACON;
+ default:
+ break;
+ }
+
+ iter_data.hw_macaddr = NULL;
+ iter_data.n_stas = 0;
+ iter_data.need_set_hw_addr = false;
+ ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
+ &iter_data);
+
+ /* Set up RX Filter */
+ if (iter_data.n_stas > 1) {
+ /* If you have multiple STA interfaces connected to
+ * different APs, ARPs are not received (most of the time?)
+ * Enabling PROMISC appears to fix that probem.
+ */
+ rfilt |= AR5K_RX_FILTER_PROM;
+ }
+
+ /* Set filters */
+ ath5k_hw_set_rx_filter(ah, rfilt);
+
+ /* Set multicast bits */
+ ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]);
+ /* Set the cached hw filter flags, this will later actually
+ * be set in HW */
+ sc->filter_flags = rfilt;
+
+ mutex_unlock(&sc->lock);
+}
+
+
+static int
+ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = sc->ah;
+ struct ath_common *common = ath5k_hw_common(ah);
+ int ret = 0;
+
+ if (ath5k_modparam_nohwcrypt)
+ return -EOPNOTSUPP;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ if (common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)
+ break;
+ return -EOPNOTSUPP;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ mutex_lock(&sc->lock);
+
+ switch (cmd) {
+ case SET_KEY:
+ ret = ath_key_config(common, vif, sta, key);
+ if (ret >= 0) {
+ key->hw_key_idx = ret;
+ /* push IV and Michael MIC generation to stack */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
+ ret = 0;
+ }
+ break;
+ case DISABLE_KEY:
+ ath_key_delete(common, key);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mmiowb();
+ mutex_unlock(&sc->lock);
+ return ret;
+}
+
+
+static void
+ath5k_sw_scan_start(struct ieee80211_hw *hw)
+{
+ struct ath5k_softc *sc = hw->priv;
+ if (!sc->assoc)
+ ath5k_hw_set_ledstate(sc->ah, AR5K_LED_SCAN);
+}
+
+
+static void
+ath5k_sw_scan_complete(struct ieee80211_hw *hw)
+{
+ struct ath5k_softc *sc = hw->priv;
+ ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
+ AR5K_LED_ASSOC : AR5K_LED_INIT);
+}
+
+
+static int
+ath5k_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
+{
+ struct ath5k_softc *sc = hw->priv;
+
+ /* Force update */
+ ath5k_hw_update_mib_counters(sc->ah);
+
+ stats->dot11ACKFailureCount = sc->stats.ack_fail;
+ stats->dot11RTSFailureCount = sc->stats.rts_fail;
+ stats->dot11RTSSuccessCount = sc->stats.rts_ok;
+ stats->dot11FCSErrorCount = sc->stats.fcs_error;
+
+ return 0;
+}
+
+
+static int
+ath5k_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = sc->ah;
+ struct ath5k_txq_info qi;
+ int ret = 0;
+
+ if (queue >= ah->ah_capabilities.cap_queues.q_tx_num)
+ return 0;
+
+ mutex_lock(&sc->lock);
+
+ ath5k_hw_get_tx_queueprops(ah, queue, &qi);
+
+ qi.tqi_aifs = params->aifs;
+ qi.tqi_cw_min = params->cw_min;
+ qi.tqi_cw_max = params->cw_max;
+ qi.tqi_burst_time = params->txop;
+
+ ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
+ "Configure tx [queue %d], "
+ "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
+ queue, params->aifs, params->cw_min,
+ params->cw_max, params->txop);
+
+ if (ath5k_hw_set_tx_queueprops(ah, queue, &qi)) {
+ ATH5K_ERR(sc,
+ "Unable to update hardware queue %u!\n", queue);
+ ret = -EIO;
+ } else
+ ath5k_hw_reset_tx_queue(ah, queue);
+
+ mutex_unlock(&sc->lock);
+
+ return ret;
+}
+
+
+static u64
+ath5k_get_tsf(struct ieee80211_hw *hw)
+{
+ struct ath5k_softc *sc = hw->priv;
+
+ return ath5k_hw_get_tsf64(sc->ah);
+}
+
+
+static void
+ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
+{
+ struct ath5k_softc *sc = hw->priv;
+
+ ath5k_hw_set_tsf64(sc->ah, tsf);
+}
+
+
+static void
+ath5k_reset_tsf(struct ieee80211_hw *hw)
+{
+ struct ath5k_softc *sc = hw->priv;
+
+ /*
+ * in IBSS mode we need to update the beacon timers too.
+ * this will also reset the TSF if we call it with 0
+ */
+ if (sc->opmode == NL80211_IFTYPE_ADHOC)
+ ath5k_beacon_update_timers(sc, 0);
+ else
+ ath5k_hw_reset_tsf(sc->ah);
+}
+
+
+static int
+ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
+{
+ struct ath5k_softc *sc = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ath_common *common = ath5k_hw_common(sc->ah);
+ struct ath_cycle_counters *cc = &common->cc_survey;
+ unsigned int div = common->clockrate * 1000;
+
+ if (idx != 0)
+ return -ENOENT;
+
+ spin_lock_bh(&common->cc_lock);
+ ath_hw_cycle_counters_update(common);
+ if (cc->cycles > 0) {
+ sc->survey.channel_time += cc->cycles / div;
+ sc->survey.channel_time_busy += cc->rx_busy / div;
+ sc->survey.channel_time_rx += cc->rx_frame / div;
+ sc->survey.channel_time_tx += cc->tx_frame / div;
+ }
+ memset(cc, 0, sizeof(*cc));
+ spin_unlock_bh(&common->cc_lock);
+
+ memcpy(survey, &sc->survey, sizeof(*survey));
+
+ survey->channel = conf->channel;
+ survey->noise = sc->ah->ah_noise_floor;
+ survey->filled = SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_CHANNEL_TIME |
+ SURVEY_INFO_CHANNEL_TIME_BUSY |
+ SURVEY_INFO_CHANNEL_TIME_RX |
+ SURVEY_INFO_CHANNEL_TIME_TX;
+
+ return 0;
+}
+
+
+/**
+ * ath5k_set_coverage_class - Set IEEE 802.11 coverage class
+ *
+ * @hw: struct ieee80211_hw pointer
+ * @coverage_class: IEEE 802.11 coverage class number
+ *
+ * Mac80211 callback. Sets slot time, ACK timeout and CTS timeout for given
+ * coverage class. The values are persistent, they are restored after device
+ * reset.
+ */
+static void
+ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
+{
+ struct ath5k_softc *sc = hw->priv;
+
+ mutex_lock(&sc->lock);
+ ath5k_hw_set_coverage_class(sc->ah, coverage_class);
+ mutex_unlock(&sc->lock);
+}
+
+
+static int
+ath5k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct ath5k_softc *sc = hw->priv;
+
+ if (tx_ant == 1 && rx_ant == 1)
+ ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_A);
+ else if (tx_ant == 2 && rx_ant == 2)
+ ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_B);
+ else if ((tx_ant & 3) == 3 && (rx_ant & 3) == 3)
+ ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_DEFAULT);
+ else
+ return -EINVAL;
+ return 0;
+}
+
+
+static int
+ath5k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+ struct ath5k_softc *sc = hw->priv;
+
+ switch (sc->ah->ah_ant_mode) {
+ case AR5K_ANTMODE_FIXED_A:
+ *tx_ant = 1; *rx_ant = 1; break;
+ case AR5K_ANTMODE_FIXED_B:
+ *tx_ant = 2; *rx_ant = 2; break;
+ case AR5K_ANTMODE_DEFAULT:
+ *tx_ant = 3; *rx_ant = 3; break;
+ }
+ return 0;
+}
+
+
+static void ath5k_get_ringparam(struct ieee80211_hw *hw,
+ u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max)
+{
+ struct ath5k_softc *sc = hw->priv;
+
+ *tx = sc->txqs[AR5K_TX_QUEUE_ID_DATA_MIN].txq_max;
+
+ *tx_max = ATH5K_TXQ_LEN_MAX;
+ *rx = *rx_max = ATH_RXBUF;
+}
+
+
+static int ath5k_set_ringparam(struct ieee80211_hw *hw, u32 tx, u32 rx)
+{
+ struct ath5k_softc *sc = hw->priv;
+ u16 qnum;
+
+ /* only support setting tx ring size for now */
+ if (rx != ATH_RXBUF)
+ return -EINVAL;
+
+ /* restrict tx ring size min/max */
+ if (!tx || tx > ATH5K_TXQ_LEN_MAX)
+ return -EINVAL;
+
+ for (qnum = 0; qnum < ARRAY_SIZE(sc->txqs); qnum++) {
+ if (!sc->txqs[qnum].setup)
+ continue;
+ if (sc->txqs[qnum].qnum < AR5K_TX_QUEUE_ID_DATA_MIN ||
+ sc->txqs[qnum].qnum > AR5K_TX_QUEUE_ID_DATA_MAX)
+ continue;
+
+ sc->txqs[qnum].txq_max = tx;
+ if (sc->txqs[qnum].txq_len >= sc->txqs[qnum].txq_max)
+ ieee80211_stop_queue(hw, sc->txqs[qnum].qnum);
+ }
+
+ return 0;
+}
+
+
+const struct ieee80211_ops ath5k_hw_ops = {
+ .tx = ath5k_tx,
+ .start = ath5k_start,
+ .stop = ath5k_stop,
+ .add_interface = ath5k_add_interface,
+ /* .change_interface = not implemented */
+ .remove_interface = ath5k_remove_interface,
+ .config = ath5k_config,
+ .bss_info_changed = ath5k_bss_info_changed,
+ .prepare_multicast = ath5k_prepare_multicast,
+ .configure_filter = ath5k_configure_filter,
+ /* .set_tim = not implemented */
+ .set_key = ath5k_set_key,
+ /* .update_tkip_key = not implemented */
+ /* .hw_scan = not implemented */
+ .sw_scan_start = ath5k_sw_scan_start,
+ .sw_scan_complete = ath5k_sw_scan_complete,
+ .get_stats = ath5k_get_stats,
+ /* .get_tkip_seq = not implemented */
+ /* .set_frag_threshold = not implemented */
+ /* .set_rts_threshold = not implemented */
+ /* .sta_add = not implemented */
+ /* .sta_remove = not implemented */
+ /* .sta_notify = not implemented */
+ .conf_tx = ath5k_conf_tx,
+ .get_tsf = ath5k_get_tsf,
+ .set_tsf = ath5k_set_tsf,
+ .reset_tsf = ath5k_reset_tsf,
+ /* .tx_last_beacon = not implemented */
+ /* .ampdu_action = not needed */
+ .get_survey = ath5k_get_survey,
+ .set_coverage_class = ath5k_set_coverage_class,
+ /* .rfkill_poll = not implemented */
+ /* .flush = not implemented */
+ /* .channel_switch = not implemented */
+ /* .napi_poll = not implemented */
+ .set_antenna = ath5k_set_antenna,
+ .get_antenna = ath5k_get_antenna,
+ .set_ringparam = ath5k_set_ringparam,
+ .get_ringparam = ath5k_get_ringparam,
+};
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
new file mode 100644
index 000000000000..66598a0d1df0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/nl80211.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include "../ath.h"
+#include "ath5k.h"
+#include "debug.h"
+#include "base.h"
+#include "reg.h"
+
+/* Known PCI ids */
+static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
+ { PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */
+ { PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */
+ { PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/
+ { PCI_VDEVICE(ATHEROS, 0x0012) }, /* 5211 */
+ { PCI_VDEVICE(ATHEROS, 0x0013) }, /* 5212 */
+ { PCI_VDEVICE(3COM_2, 0x0013) }, /* 3com 5212 */
+ { PCI_VDEVICE(3COM, 0x0013) }, /* 3com 3CRDAG675 5212 */
+ { PCI_VDEVICE(ATHEROS, 0x1014) }, /* IBM minipci 5212 */
+ { PCI_VDEVICE(ATHEROS, 0x0014) }, /* 5212 combatible */
+ { PCI_VDEVICE(ATHEROS, 0x0015) }, /* 5212 combatible */
+ { PCI_VDEVICE(ATHEROS, 0x0016) }, /* 5212 combatible */
+ { PCI_VDEVICE(ATHEROS, 0x0017) }, /* 5212 combatible */
+ { PCI_VDEVICE(ATHEROS, 0x0018) }, /* 5212 combatible */
+ { PCI_VDEVICE(ATHEROS, 0x0019) }, /* 5212 combatible */
+ { PCI_VDEVICE(ATHEROS, 0x001a) }, /* 2413 Griffin-lite */
+ { PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */
+ { PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */
+ { PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
+
+/* return bus cachesize in 4B word units */
+static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz)
+{
+ struct ath5k_softc *sc = (struct ath5k_softc *) common->priv;
+ u8 u8tmp;
+
+ pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, &u8tmp);
+ *csz = (int)u8tmp;
+
+ /*
+ * This check was put in to avoid "unplesant" consequences if
+ * the bootrom has not fully initialized all PCI devices.
+ * Sometimes the cache line size register is not set
+ */
+
+ if (*csz == 0)
+ *csz = L1_CACHE_BYTES >> 2; /* Use the default size */
+}
+
+/*
+ * Read from eeprom
+ */
+static bool
+ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
+{
+ struct ath5k_hw *ah = (struct ath5k_hw *) common->ah;
+ u32 status, timeout;
+
+ /*
+ * Initialize EEPROM access
+ */
+ if (ah->ah_version == AR5K_AR5210) {
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_EEAE);
+ (void)ath5k_hw_reg_read(ah, AR5K_EEPROM_BASE + (4 * offset));
+ } else {
+ ath5k_hw_reg_write(ah, offset, AR5K_EEPROM_BASE);
+ AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD,
+ AR5K_EEPROM_CMD_READ);
+ }
+
+ for (timeout = AR5K_TUNE_REGISTER_TIMEOUT; timeout > 0; timeout--) {
+ status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
+ if (status & AR5K_EEPROM_STAT_RDDONE) {
+ if (status & AR5K_EEPROM_STAT_RDERR)
+ return false;
+ *data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) &
+ 0xffff);
+ return true;
+ }
+ udelay(15);
+ }
+
+ return false;
+}
+
+int ath5k_hw_read_srev(struct ath5k_hw *ah)
+{
+ ah->ah_mac_srev = ath5k_hw_reg_read(ah, AR5K_SREV);
+ return 0;
+}
+
+/* Common ath_bus_opts structure */
+static const struct ath_bus_ops ath_pci_bus_ops = {
+ .ath_bus_type = ATH_PCI,
+ .read_cachesize = ath5k_pci_read_cachesize,
+ .eeprom_read = ath5k_pci_eeprom_read,
+};
+
+/********************\
+* PCI Initialization *
+\********************/
+
+static int __devinit
+ath5k_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ void __iomem *mem;
+ struct ath5k_softc *sc;
+ struct ieee80211_hw *hw;
+ int ret;
+ u8 csz;
+
+ /*
+ * L0s needs to be disabled on all ath5k cards.
+ *
+ * For distributions shipping with CONFIG_PCIEASPM (this will be enabled
+ * by default in the future in 2.6.36) this will also mean both L1 and
+ * L0s will be disabled when a pre 1.1 PCIe device is detected. We do
+ * know L1 works correctly even for all ath5k pre 1.1 PCIe devices
+ * though but cannot currently undue the effect of a blacklist, for
+ * details you can read pcie_aspm_sanity_check() and see how it adjusts
+ * the device link capability.
+ *
+ * It may be possible in the future to implement some PCI API to allow
+ * drivers to override blacklists for pre 1.1 PCIe but for now it is
+ * best to accept that both L0s and L1 will be disabled completely for
+ * distributions shipping with CONFIG_PCIEASPM rather than having this
+ * issue present. Motivation for adding this new API will be to help
+ * with power consumption for some of these devices.
+ */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "can't enable device\n");
+ goto err;
+ }
+
+ /* XXX 32-bit addressing only */
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "32-bit DMA not available\n");
+ goto err_dis;
+ }
+
+ /*
+ * Cache line size is used to size and align various
+ * structures used to communicate with the hardware.
+ */
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
+ if (csz == 0) {
+ /*
+ * Linux 2.4.18 (at least) writes the cache line size
+ * register as a 16-bit wide register which is wrong.
+ * We must have this setup properly for rx buffer
+ * DMA to work so force a reasonable value here if it
+ * comes up zero.
+ */
+ csz = L1_CACHE_BYTES >> 2;
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
+ }
+ /*
+ * The default setting of latency timer yields poor results,
+ * set it to the value used by other systems. It may be worth
+ * tweaking this setting more.
+ */
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
+
+ /* Enable bus mastering */
+ pci_set_master(pdev);
+
+ /*
+ * Disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state.
+ */
+ pci_write_config_byte(pdev, 0x41, 0);
+
+ ret = pci_request_region(pdev, 0, "ath5k");
+ if (ret) {
+ dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
+ goto err_dis;
+ }
+
+ mem = pci_iomap(pdev, 0, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "cannot remap PCI memory region\n") ;
+ ret = -EIO;
+ goto err_reg;
+ }
+
+ /*
+ * Allocate hw (mac80211 main struct)
+ * and hw->priv (driver private data)
+ */
+ hw = ieee80211_alloc_hw(sizeof(*sc), &ath5k_hw_ops);
+ if (hw == NULL) {
+ dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n");
+ ret = -ENOMEM;
+ goto err_map;
+ }
+
+ dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy));
+
+ sc = hw->priv;
+ sc->hw = hw;
+ sc->pdev = pdev;
+ sc->dev = &pdev->dev;
+ sc->irq = pdev->irq;
+ sc->devid = id->device;
+ sc->iobase = mem; /* So we can unmap it on detach */
+
+ /* Initialize */
+ ret = ath5k_init_softc(sc, &ath_pci_bus_ops);
+ if (ret)
+ goto err_free;
+
+ /* Set private data */
+ pci_set_drvdata(pdev, hw);
+
+ return 0;
+err_free:
+ ieee80211_free_hw(hw);
+err_map:
+ pci_iounmap(pdev, mem);
+err_reg:
+ pci_release_region(pdev, 0);
+err_dis:
+ pci_disable_device(pdev);
+err:
+ return ret;
+}
+
+static void __devexit
+ath5k_pci_remove(struct pci_dev *pdev)
+{
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ath5k_softc *sc = hw->priv;
+
+ ath5k_deinit_softc(sc);
+ pci_iounmap(pdev, sc->iobase);
+ pci_release_region(pdev, 0);
+ pci_disable_device(pdev);
+ ieee80211_free_hw(hw);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ath5k_pci_suspend(struct device *dev)
+{
+ struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
+
+ ath5k_led_off(sc);
+ return 0;
+}
+
+static int ath5k_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ath5k_softc *sc = pci_get_drvdata(pdev);
+
+ /*
+ * Suspend/Resume resets the PCI configuration space, so we have to
+ * re-disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state
+ */
+ pci_write_config_byte(pdev, 0x41, 0);
+
+ ath5k_led_enable(sc);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
+#define ATH5K_PM_OPS (&ath5k_pm_ops)
+#else
+#define ATH5K_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static struct pci_driver ath5k_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ath5k_pci_id_table,
+ .probe = ath5k_pci_probe,
+ .remove = __devexit_p(ath5k_pci_remove),
+ .driver.pm = ATH5K_PM_OPS,
+};
+
+/*
+ * Module init/exit functions
+ */
+static int __init
+init_ath5k_pci(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&ath5k_pci_driver);
+ if (ret) {
+ printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit
+exit_ath5k_pci(void)
+{
+ pci_unregister_driver(&ath5k_pci_driver);
+}
+
+module_init(init_ath5k_pci);
+module_exit(exit_ath5k_pci);
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 074b4c644399..a702817daf72 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -31,87 +31,163 @@
#include "debug.h"
#include "base.h"
+/*
+ * AR5212+ can use higher rates for ack transmition
+ * based on current tx rate instead of the base rate.
+ * It does this to better utilize channel usage.
+ * This is a mapping between G rates (that cover both
+ * CCK and OFDM) and ack rates that we use when setting
+ * rate -> duration table. This mapping is hw-based so
+ * don't change anything.
+ *
+ * To enable this functionality we must set
+ * ah->ah_ack_bitrate_high to true else base rate is
+ * used (1Mb for CCK, 6Mb for OFDM).
+ */
+static const unsigned int ack_rates_high[] =
+/* Tx -> ACK */
+/* 1Mb -> 1Mb */ { 0,
+/* 2MB -> 2Mb */ 1,
+/* 5.5Mb -> 2Mb */ 1,
+/* 11Mb -> 2Mb */ 1,
+/* 6Mb -> 6Mb */ 4,
+/* 9Mb -> 6Mb */ 4,
+/* 12Mb -> 12Mb */ 6,
+/* 18Mb -> 12Mb */ 6,
+/* 24Mb -> 24Mb */ 8,
+/* 36Mb -> 24Mb */ 8,
+/* 48Mb -> 24Mb */ 8,
+/* 54Mb -> 24Mb */ 8 };
+
/*******************\
-* Generic functions *
+* Helper functions *
\*******************/
/**
- * ath5k_hw_set_opmode - Set PCU operating mode
+ * ath5k_hw_get_frame_duration - Get tx time of a frame
*
* @ah: The &struct ath5k_hw
- * @op_mode: &enum nl80211_iftype operating mode
+ * @len: Frame's length in bytes
+ * @rate: The @struct ieee80211_rate
*
- * Initialize PCU for the various operating modes (AP/STA etc)
+ * Calculate tx duration of a frame given it's rate and length
+ * It extends ieee80211_generic_frame_duration for non standard
+ * bwmodes.
*/
-int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
+int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
+ int len, struct ieee80211_rate *rate)
{
- struct ath_common *common = ath5k_hw_common(ah);
- u32 pcu_reg, beacon_reg, low_id, high_id;
+ struct ath5k_softc *sc = ah->ah_sc;
+ int sifs, preamble, plcp_bits, sym_time;
+ int bitrate, bits, symbols, symbol_bits;
+ int dur;
+
+ /* Fallback */
+ if (!ah->ah_bwmode) {
+ dur = ieee80211_generic_frame_duration(sc->hw,
+ NULL, len, rate);
+ return le16_to_cpu(dur);
+ }
- ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_MODE, "mode %d\n", op_mode);
+ bitrate = rate->bitrate;
+ preamble = AR5K_INIT_OFDM_PREAMPLE_TIME;
+ plcp_bits = AR5K_INIT_OFDM_PLCP_BITS;
+ sym_time = AR5K_INIT_OFDM_SYMBOL_TIME;
- /* Preserve rest settings */
- pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
- pcu_reg &= ~(AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_AP
- | AR5K_STA_ID1_KEYSRCH_MODE
- | (ah->ah_version == AR5K_AR5210 ?
- (AR5K_STA_ID1_PWR_SV | AR5K_STA_ID1_NO_PSPOLL) : 0));
+ switch (ah->ah_bwmode) {
+ case AR5K_BWMODE_40MHZ:
+ sifs = AR5K_INIT_SIFS_TURBO;
+ preamble = AR5K_INIT_OFDM_PREAMBLE_TIME_MIN;
+ break;
+ case AR5K_BWMODE_10MHZ:
+ sifs = AR5K_INIT_SIFS_HALF_RATE;
+ preamble *= 2;
+ sym_time *= 2;
+ break;
+ case AR5K_BWMODE_5MHZ:
+ sifs = AR5K_INIT_SIFS_QUARTER_RATE;
+ preamble *= 4;
+ sym_time *= 4;
+ break;
+ default:
+ sifs = AR5K_INIT_SIFS_DEFAULT_BG;
+ break;
+ }
- beacon_reg = 0;
+ bits = plcp_bits + (len << 3);
+ /* Bit rate is in 100Kbits */
+ symbol_bits = bitrate * sym_time;
+ symbols = DIV_ROUND_UP(bits * 10, symbol_bits);
- switch (op_mode) {
- case NL80211_IFTYPE_ADHOC:
- pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE;
- beacon_reg |= AR5K_BCR_ADHOC;
- if (ah->ah_version == AR5K_AR5210)
- pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
- else
- AR5K_REG_ENABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
- break;
+ dur = sifs + preamble + (sym_time * symbols);
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_MESH_POINT:
- pcu_reg |= AR5K_STA_ID1_AP | AR5K_STA_ID1_KEYSRCH_MODE;
- beacon_reg |= AR5K_BCR_AP;
- if (ah->ah_version == AR5K_AR5210)
- pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
- else
- AR5K_REG_DISABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
- break;
+ return dur;
+}
- case NL80211_IFTYPE_STATION:
- pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
- | (ah->ah_version == AR5K_AR5210 ?
- AR5K_STA_ID1_PWR_SV : 0);
- case NL80211_IFTYPE_MONITOR:
- pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
- | (ah->ah_version == AR5K_AR5210 ?
- AR5K_STA_ID1_NO_PSPOLL : 0);
- break;
+/**
+ * ath5k_hw_get_default_slottime - Get the default slot time for current mode
+ *
+ * @ah: The &struct ath5k_hw
+ */
+unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
+{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+ unsigned int slot_time;
+ switch (ah->ah_bwmode) {
+ case AR5K_BWMODE_40MHZ:
+ slot_time = AR5K_INIT_SLOT_TIME_TURBO;
+ break;
+ case AR5K_BWMODE_10MHZ:
+ slot_time = AR5K_INIT_SLOT_TIME_HALF_RATE;
+ break;
+ case AR5K_BWMODE_5MHZ:
+ slot_time = AR5K_INIT_SLOT_TIME_QUARTER_RATE;
+ break;
+ case AR5K_BWMODE_DEFAULT:
+ slot_time = AR5K_INIT_SLOT_TIME_DEFAULT;
default:
- return -EINVAL;
+ if (channel->hw_value & CHANNEL_CCK)
+ slot_time = AR5K_INIT_SLOT_TIME_B;
+ break;
}
- /*
- * Set PCU registers
- */
- low_id = get_unaligned_le32(common->macaddr);
- high_id = get_unaligned_le16(common->macaddr + 4);
- ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
- ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
+ return slot_time;
+}
- /*
- * Set Beacon Control Register on 5210
- */
- if (ah->ah_version == AR5K_AR5210)
- ath5k_hw_reg_write(ah, beacon_reg, AR5K_BCR);
+/**
+ * ath5k_hw_get_default_sifs - Get the default SIFS for current mode
+ *
+ * @ah: The &struct ath5k_hw
+ */
+unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
+{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+ unsigned int sifs;
- return 0;
+ switch (ah->ah_bwmode) {
+ case AR5K_BWMODE_40MHZ:
+ sifs = AR5K_INIT_SIFS_TURBO;
+ break;
+ case AR5K_BWMODE_10MHZ:
+ sifs = AR5K_INIT_SIFS_HALF_RATE;
+ break;
+ case AR5K_BWMODE_5MHZ:
+ sifs = AR5K_INIT_SIFS_QUARTER_RATE;
+ break;
+ case AR5K_BWMODE_DEFAULT:
+ sifs = AR5K_INIT_SIFS_DEFAULT_BG;
+ default:
+ if (channel->hw_value & CHANNEL_5GHZ)
+ sifs = AR5K_INIT_SIFS_DEFAULT_A;
+ break;
+ }
+
+ return sifs;
}
/**
- * ath5k_hw_update - Update MIB counters (mac layer statistics)
+ * ath5k_hw_update_mib_counters - Update MIB counters (mac layer statistics)
*
* @ah: The &struct ath5k_hw
*
@@ -133,36 +209,86 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
stats->beacons += ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
}
+
+/******************\
+* ACK/CTS Timeouts *
+\******************/
+
/**
- * ath5k_hw_set_ack_bitrate - set bitrate for ACKs
+ * ath5k_hw_write_rate_duration - fill rate code to duration table
*
- * @ah: The &struct ath5k_hw
- * @high: Flag to determine if we want to use high transmission rate
- * for ACKs or not
+ * @ah: the &struct ath5k_hw
+ * @mode: one of enum ath5k_driver_mode
+ *
+ * Write the rate code to duration table upon hw reset. This is a helper for
+ * ath5k_hw_pcu_init(). It seems all this is doing is setting an ACK timeout on
+ * the hardware, based on current mode, for each rate. The rates which are
+ * capable of short preamble (802.11b rates 2Mbps, 5.5Mbps, and 11Mbps) have
+ * different rate code so we write their value twice (one for long preamble
+ * and one for short).
+ *
+ * Note: Band doesn't matter here, if we set the values for OFDM it works
+ * on both a and g modes. So all we have to do is set values for all g rates
+ * that include all OFDM and CCK rates.
*
- * If high flag is set, we tell hw to use a set of control rates based on
- * the current transmission rate (check out control_rates array inside reset.c).
- * If not hw just uses the lowest rate available for the current modulation
- * scheme being used (1Mbit for CCK and 6Mbits for OFDM).
*/
-void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
+static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
{
- if (ah->ah_version != AR5K_AR5212)
- return;
- else {
- u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB;
- if (high)
- AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val);
+ struct ath5k_softc *sc = ah->ah_sc;
+ struct ieee80211_rate *rate;
+ unsigned int i;
+ /* 802.11g covers both OFDM and CCK */
+ u8 band = IEEE80211_BAND_2GHZ;
+
+ /* Write rate duration table */
+ for (i = 0; i < sc->sbands[band].n_bitrates; i++) {
+ u32 reg;
+ u16 tx_time;
+
+ if (ah->ah_ack_bitrate_high)
+ rate = &sc->sbands[band].bitrates[ack_rates_high[i]];
+ /* CCK -> 1Mb */
+ else if (i < 4)
+ rate = &sc->sbands[band].bitrates[0];
+ /* OFDM -> 6Mb */
else
- AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
+ rate = &sc->sbands[band].bitrates[4];
+
+ /* Set ACK timeout */
+ reg = AR5K_RATE_DUR(rate->hw_value);
+
+ /* An ACK frame consists of 10 bytes. If you add the FCS,
+ * which ieee80211_generic_frame_duration() adds,
+ * its 14 bytes. Note we use the control rate and not the
+ * actual rate for this rate. See mac80211 tx.c
+ * ieee80211_duration() for a brief description of
+ * what rate we should choose to TX ACKs. */
+ tx_time = ath5k_hw_get_frame_duration(ah, 10, rate);
+
+ ath5k_hw_reg_write(ah, tx_time, reg);
+
+ if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
+ continue;
+
+ /*
+ * We're not distinguishing short preamble here,
+ * This is true, all we'll get is a longer value here
+ * which is not necessarilly bad. We could use
+ * export ieee80211_frame_duration() but that needs to be
+ * fixed first to be properly used by mac802111 drivers:
+ *
+ * - remove erp stuff and let the routine figure ofdm
+ * erp rates
+ * - remove passing argument ieee80211_local as
+ * drivers don't have access to it
+ * - move drivers using ieee80211_generic_frame_duration()
+ * to this
+ */
+ ath5k_hw_reg_write(ah, tx_time,
+ reg + (AR5K_SET_SHORT_PREAMBLE << 2));
}
}
-
-/******************\
-* ACK/CTS Timeouts *
-\******************/
-
/**
* ath5k_hw_set_ack_timeout - Set ACK timeout on PCU
*
@@ -199,88 +325,10 @@ static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
return 0;
}
-/**
- * ath5k_hw_htoclock - Translate usec to hw clock units
- *
- * @ah: The &struct ath5k_hw
- * @usec: value in microseconds
- */
-unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
-{
- struct ath_common *common = ath5k_hw_common(ah);
- return usec * common->clockrate;
-}
-
-/**
- * ath5k_hw_clocktoh - Translate hw clock units to usec
- * @clock: value in hw clock units
- */
-unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
-{
- struct ath_common *common = ath5k_hw_common(ah);
- return clock / common->clockrate;
-}
-
-/**
- * ath5k_hw_set_clockrate - Set common->clockrate for the current channel
- *
- * @ah: The &struct ath5k_hw
- */
-void ath5k_hw_set_clockrate(struct ath5k_hw *ah)
-{
- struct ieee80211_channel *channel = ah->ah_current_channel;
- struct ath_common *common = ath5k_hw_common(ah);
- int clock;
-
- if (channel->hw_value & CHANNEL_5GHZ)
- clock = 40; /* 802.11a */
- else if (channel->hw_value & CHANNEL_CCK)
- clock = 22; /* 802.11b */
- else
- clock = 44; /* 802.11g */
-
- /* Clock rate in turbo modes is twice the normal rate */
- if (channel->hw_value & CHANNEL_TURBO)
- clock *= 2;
-
- common->clockrate = clock;
-}
-
-/**
- * ath5k_hw_get_default_slottime - Get the default slot time for current mode
- *
- * @ah: The &struct ath5k_hw
- */
-static unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
-{
- struct ieee80211_channel *channel = ah->ah_current_channel;
-
- if (channel->hw_value & CHANNEL_TURBO)
- return 6; /* both turbo modes */
-
- if (channel->hw_value & CHANNEL_CCK)
- return 20; /* 802.11b */
-
- return 9; /* 802.11 a/g */
-}
-
-/**
- * ath5k_hw_get_default_sifs - Get the default SIFS for current mode
- *
- * @ah: The &struct ath5k_hw
- */
-static unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
-{
- struct ieee80211_channel *channel = ah->ah_current_channel;
-
- if (channel->hw_value & CHANNEL_TURBO)
- return 8; /* both turbo modes */
- if (channel->hw_value & CHANNEL_5GHZ)
- return 16; /* 802.11a */
-
- return 10; /* 802.11 b/g */
-}
+/*******************\
+* RX filter Control *
+\*******************/
/**
* ath5k_hw_set_lladdr - Set station id
@@ -362,39 +410,6 @@ void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
ath_hw_setbssidmask(common);
}
-/************\
-* RX Control *
-\************/
-
-/**
- * ath5k_hw_start_rx_pcu - Start RX engine
- *
- * @ah: The &struct ath5k_hw
- *
- * Starts RX engine on PCU so that hw can process RXed frames
- * (ACK etc).
- *
- * NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
- */
-void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
-{
- AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
-}
-
-/**
- * at5k_hw_stop_rx_pcu - Stop RX engine
- *
- * @ah: The &struct ath5k_hw
- *
- * Stops RX engine on PCU
- *
- * TODO: Detach ANI here
- */
-void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
-{
- AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
-}
-
/*
* Set multicast filter
*/
@@ -746,7 +761,7 @@ ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval)
* @ah: The &struct ath5k_hw
* @coverage_class: IEEE 802.11 coverage class number
*
- * Sets slot time, ACK timeout and CTS timeout for given coverage class.
+ * Sets IFS intervals and ACK/CTS timeouts for given coverage class.
*/
void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
{
@@ -755,9 +770,175 @@ void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
int ack_timeout = ath5k_hw_get_default_sifs(ah) + slot_time;
int cts_timeout = ack_timeout;
- ath5k_hw_set_slot_time(ah, slot_time);
+ ath5k_hw_set_ifs_intervals(ah, slot_time);
ath5k_hw_set_ack_timeout(ah, ack_timeout);
ath5k_hw_set_cts_timeout(ah, cts_timeout);
ah->ah_coverage_class = coverage_class;
}
+
+/***************************\
+* Init/Start/Stop functions *
+\***************************/
+
+/**
+ * ath5k_hw_start_rx_pcu - Start RX engine
+ *
+ * @ah: The &struct ath5k_hw
+ *
+ * Starts RX engine on PCU so that hw can process RXed frames
+ * (ACK etc).
+ *
+ * NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
+ */
+void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
+{
+ AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
+}
+
+/**
+ * at5k_hw_stop_rx_pcu - Stop RX engine
+ *
+ * @ah: The &struct ath5k_hw
+ *
+ * Stops RX engine on PCU
+ */
+void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
+{
+ AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
+}
+
+/**
+ * ath5k_hw_set_opmode - Set PCU operating mode
+ *
+ * @ah: The &struct ath5k_hw
+ * @op_mode: &enum nl80211_iftype operating mode
+ *
+ * Configure PCU for the various operating modes (AP/STA etc)
+ */
+int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
+{
+ struct ath_common *common = ath5k_hw_common(ah);
+ u32 pcu_reg, beacon_reg, low_id, high_id;
+
+ ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_MODE, "mode %d\n", op_mode);
+
+ /* Preserve rest settings */
+ pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
+ pcu_reg &= ~(AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_AP
+ | AR5K_STA_ID1_KEYSRCH_MODE
+ | (ah->ah_version == AR5K_AR5210 ?
+ (AR5K_STA_ID1_PWR_SV | AR5K_STA_ID1_NO_PSPOLL) : 0));
+
+ beacon_reg = 0;
+
+ switch (op_mode) {
+ case NL80211_IFTYPE_ADHOC:
+ pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE;
+ beacon_reg |= AR5K_BCR_ADHOC;
+ if (ah->ah_version == AR5K_AR5210)
+ pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
+ else
+ AR5K_REG_ENABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
+ break;
+
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
+ pcu_reg |= AR5K_STA_ID1_AP | AR5K_STA_ID1_KEYSRCH_MODE;
+ beacon_reg |= AR5K_BCR_AP;
+ if (ah->ah_version == AR5K_AR5210)
+ pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
+ else
+ AR5K_REG_DISABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
+ break;
+
+ case NL80211_IFTYPE_STATION:
+ pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
+ | (ah->ah_version == AR5K_AR5210 ?
+ AR5K_STA_ID1_PWR_SV : 0);
+ case NL80211_IFTYPE_MONITOR:
+ pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
+ | (ah->ah_version == AR5K_AR5210 ?
+ AR5K_STA_ID1_NO_PSPOLL : 0);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Set PCU registers
+ */
+ low_id = get_unaligned_le32(common->macaddr);
+ high_id = get_unaligned_le16(common->macaddr + 4);
+ ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
+ ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
+
+ /*
+ * Set Beacon Control Register on 5210
+ */
+ if (ah->ah_version == AR5K_AR5210)
+ ath5k_hw_reg_write(ah, beacon_reg, AR5K_BCR);
+
+ return 0;
+}
+
+void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
+ u8 mode)
+{
+ /* Set bssid and bssid mask */
+ ath5k_hw_set_bssid(ah);
+
+ /* Set PCU config */
+ ath5k_hw_set_opmode(ah, op_mode);
+
+ /* Write rate duration table only on AR5212 and if
+ * virtual interface has already been brought up
+ * XXX: rethink this after new mode changes to
+ * mac80211 are integrated */
+ if (ah->ah_version == AR5K_AR5212 &&
+ ah->ah_sc->nvifs)
+ ath5k_hw_write_rate_duration(ah);
+
+ /* Set RSSI/BRSSI thresholds
+ *
+ * Note: If we decide to set this value
+ * dynamicaly, have in mind that when AR5K_RSSI_THR
+ * register is read it might return 0x40 if we haven't
+ * wrote anything to it plus BMISS RSSI threshold is zeroed.
+ * So doing a save/restore procedure here isn't the right
+ * choice. Instead store it on ath5k_hw */
+ ath5k_hw_reg_write(ah, (AR5K_TUNE_RSSI_THRES |
+ AR5K_TUNE_BMISS_THRES <<
+ AR5K_RSSI_THR_BMISS_S),
+ AR5K_RSSI_THR);
+
+ /* MIC QoS support */
+ if (ah->ah_mac_srev >= AR5K_SREV_AR2413) {
+ ath5k_hw_reg_write(ah, 0x000100aa, AR5K_MIC_QOS_CTL);
+ ath5k_hw_reg_write(ah, 0x00003210, AR5K_MIC_QOS_SEL);
+ }
+
+ /* QoS NOACK Policy */
+ if (ah->ah_version == AR5K_AR5212) {
+ ath5k_hw_reg_write(ah,
+ AR5K_REG_SM(2, AR5K_QOS_NOACK_2BIT_VALUES) |
+ AR5K_REG_SM(5, AR5K_QOS_NOACK_BIT_OFFSET) |
+ AR5K_REG_SM(0, AR5K_QOS_NOACK_BYTE_OFFSET),
+ AR5K_QOS_NOACK);
+ }
+
+ /* Restore slot time and ACK timeouts */
+ if (ah->ah_coverage_class > 0)
+ ath5k_hw_set_coverage_class(ah, ah->ah_coverage_class);
+
+ /* Set ACK bitrate mode (see ack_rates_high) */
+ if (ah->ah_version == AR5K_AR5212) {
+ u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB;
+ if (ah->ah_ack_bitrate_high)
+ AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val);
+ else
+ AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
+ }
+ return;
+}
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 219367884e64..62ce2f4e8605 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -29,6 +29,95 @@
#include "rfbuffer.h"
#include "rfgain.h"
+
+/******************\
+* Helper functions *
+\******************/
+
+/*
+ * Get the PHY Chip revision
+ */
+u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan)
+{
+ unsigned int i;
+ u32 srev;
+ u16 ret;
+
+ /*
+ * Set the radio chip access register
+ */
+ switch (chan) {
+ case CHANNEL_2GHZ:
+ ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_2GHZ, AR5K_PHY(0));
+ break;
+ case CHANNEL_5GHZ:
+ ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
+ break;
+ default:
+ return 0;
+ }
+
+ mdelay(2);
+
+ /* ...wait until PHY is ready and read the selected radio revision */
+ ath5k_hw_reg_write(ah, 0x00001c16, AR5K_PHY(0x34));
+
+ for (i = 0; i < 8; i++)
+ ath5k_hw_reg_write(ah, 0x00010000, AR5K_PHY(0x20));
+
+ if (ah->ah_version == AR5K_AR5210) {
+ srev = ath5k_hw_reg_read(ah, AR5K_PHY(256) >> 28) & 0xf;
+ ret = (u16)ath5k_hw_bitswap(srev, 4) + 1;
+ } else {
+ srev = (ath5k_hw_reg_read(ah, AR5K_PHY(0x100)) >> 24) & 0xff;
+ ret = (u16)ath5k_hw_bitswap(((srev & 0xf0) >> 4) |
+ ((srev & 0x0f) << 4), 8);
+ }
+
+ /* Reset to the 5GHz mode */
+ ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
+
+ return ret;
+}
+
+/*
+ * Check if a channel is supported
+ */
+bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags)
+{
+ /* Check if the channel is in our supported range */
+ if (flags & CHANNEL_2GHZ) {
+ if ((freq >= ah->ah_capabilities.cap_range.range_2ghz_min) &&
+ (freq <= ah->ah_capabilities.cap_range.range_2ghz_max))
+ return true;
+ } else if (flags & CHANNEL_5GHZ)
+ if ((freq >= ah->ah_capabilities.cap_range.range_5ghz_min) &&
+ (freq <= ah->ah_capabilities.cap_range.range_5ghz_max))
+ return true;
+
+ return false;
+}
+
+bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ u8 refclk_freq;
+
+ if ((ah->ah_radio == AR5K_RF5112) ||
+ (ah->ah_radio == AR5K_RF5413) ||
+ (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
+ refclk_freq = 40;
+ else
+ refclk_freq = 32;
+
+ if ((channel->center_freq % refclk_freq != 0) &&
+ ((channel->center_freq % refclk_freq < 10) ||
+ (channel->center_freq % refclk_freq > 22)))
+ return true;
+ else
+ return false;
+}
+
/*
* Used to modify RF Banks before writing them to AR5K_RF_BUFFER
*/
@@ -110,6 +199,118 @@ static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
return data;
}
+/**
+ * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212
+ *
+ * @ah: the &struct ath5k_hw
+ * @channel: the currently set channel upon reset
+ *
+ * Write the delta slope coefficient (used on pilot tracking ?) for OFDM
+ * operation on the AR5212 upon reset. This is a helper for ath5k_hw_phy_init.
+ *
+ * Since delta slope is floating point we split it on its exponent and
+ * mantissa and provide these values on hw.
+ *
+ * For more infos i think this patent is related
+ * http://www.freepatentsonline.com/7184495.html
+ */
+static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ /* Get exponent and mantissa and set it */
+ u32 coef_scaled, coef_exp, coef_man,
+ ds_coef_exp, ds_coef_man, clock;
+
+ BUG_ON(!(ah->ah_version == AR5K_AR5212) ||
+ !(channel->hw_value & CHANNEL_OFDM));
+
+ /* Get coefficient
+ * ALGO: coef = (5 * clock / carrier_freq) / 2
+ * we scale coef by shifting clock value by 24 for
+ * better precision since we use integers */
+ switch (ah->ah_bwmode) {
+ case AR5K_BWMODE_40MHZ:
+ clock = 40 * 2;
+ break;
+ case AR5K_BWMODE_10MHZ:
+ clock = 40 / 2;
+ break;
+ case AR5K_BWMODE_5MHZ:
+ clock = 40 / 4;
+ break;
+ default:
+ clock = 40;
+ break;
+ }
+ coef_scaled = ((5 * (clock << 24)) / 2) / channel->center_freq;
+
+ /* Get exponent
+ * ALGO: coef_exp = 14 - highest set bit position */
+ coef_exp = ilog2(coef_scaled);
+
+ /* Doesn't make sense if it's zero*/
+ if (!coef_scaled || !coef_exp)
+ return -EINVAL;
+
+ /* Note: we've shifted coef_scaled by 24 */
+ coef_exp = 14 - (coef_exp - 24);
+
+
+ /* Get mantissa (significant digits)
+ * ALGO: coef_mant = floor(coef_scaled* 2^coef_exp+0.5) */
+ coef_man = coef_scaled +
+ (1 << (24 - coef_exp - 1));
+
+ /* Calculate delta slope coefficient exponent
+ * and mantissa (remove scaling) and set them on hw */
+ ds_coef_man = coef_man >> (24 - coef_exp);
+ ds_coef_exp = coef_exp - 16;
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3,
+ AR5K_PHY_TIMING_3_DSC_MAN, ds_coef_man);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3,
+ AR5K_PHY_TIMING_3_DSC_EXP, ds_coef_exp);
+
+ return 0;
+}
+
+int ath5k_hw_phy_disable(struct ath5k_hw *ah)
+{
+ /*Just a try M.F.*/
+ ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
+
+ return 0;
+}
+
+/*
+ * Wait for synth to settle
+ */
+static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ /*
+ * On 5211+ read activation -> rx delay
+ * and use it (100ns steps).
+ */
+ if (ah->ah_version != AR5K_AR5210) {
+ u32 delay;
+ delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
+ AR5K_PHY_RX_DELAY_M;
+ delay = (channel->hw_value & CHANNEL_CCK) ?
+ ((delay << 2) / 22) : (delay / 10);
+ if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
+ delay = delay << 1;
+ if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
+ delay = delay << 2;
+ /* XXX: /2 on turbo ? Let's be safe
+ * for now */
+ udelay(100 + delay);
+ } else {
+ mdelay(1);
+ }
+}
+
+
/**********************\
* RF Gain optimization *
\**********************/
@@ -436,10 +637,10 @@ done:
/* Write initial RF gain table to set the RF sensitivity
* this one works on all RF chips and has nothing to do
* with gain_F calibration */
-int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq)
+static int ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
{
const struct ath5k_ini_rfgain *ath5k_rfg;
- unsigned int i, size;
+ unsigned int i, size, index;
switch (ah->ah_radio) {
case AR5K_RF5111:
@@ -471,17 +672,11 @@ int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq)
return -EINVAL;
}
- switch (freq) {
- case AR5K_INI_RFGAIN_2GHZ:
- case AR5K_INI_RFGAIN_5GHZ:
- break;
- default:
- return -EINVAL;
- }
+ index = (band == IEEE80211_BAND_2GHZ) ? 1 : 0;
for (i = 0; i < size; i++) {
AR5K_REG_WAIT(i);
- ath5k_hw_reg_write(ah, ath5k_rfg[i].rfg_value[freq],
+ ath5k_hw_reg_write(ah, ath5k_rfg[i].rfg_value[index],
(u32)ath5k_rfg[i].rfg_register);
}
@@ -494,12 +689,11 @@ int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq)
* RF Registers setup *
\********************/
-
/*
* Setup RF registers by writing RF buffer on hw
*/
-int ath5k_hw_rfregs_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
- unsigned int mode)
+static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel, unsigned int mode)
{
const struct ath5k_rf_reg *rf_regs;
const struct ath5k_ini_rfbuffer *ini_rfb;
@@ -652,6 +846,11 @@ int ath5k_hw_rfregs_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
g_step = &go->go_step[ah->ah_gain.g_step_idx];
+ /* Set turbo mode (N/A on RF5413) */
+ if ((ah->ah_bwmode == AR5K_BWMODE_40MHZ) &&
+ (ah->ah_radio != AR5K_RF5413))
+ ath5k_hw_rfb_op(ah, rf_regs, 1, AR5K_RF_TURBO, false);
+
/* Bank Modifications (chip-specific) */
if (ah->ah_radio == AR5K_RF5111) {
@@ -691,7 +890,23 @@ int ath5k_hw_rfregs_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
ath5k_hw_rfb_op(ah, rf_regs, ee->ee_xpd[ee_mode],
AR5K_RF_PLO_SEL, true);
- /* TODO: Half/quarter channel support */
+ /* Tweak power detectors for half/quarter rate support */
+ if (ah->ah_bwmode == AR5K_BWMODE_5MHZ ||
+ ah->ah_bwmode == AR5K_BWMODE_10MHZ) {
+ u8 wait_i;
+
+ ath5k_hw_rfb_op(ah, rf_regs, 0x1f,
+ AR5K_RF_WAIT_S, true);
+
+ wait_i = (ah->ah_bwmode == AR5K_BWMODE_5MHZ) ?
+ 0x1f : 0x10;
+
+ ath5k_hw_rfb_op(ah, rf_regs, wait_i,
+ AR5K_RF_WAIT_I, true);
+ ath5k_hw_rfb_op(ah, rf_regs, 3,
+ AR5K_RF_MAX_TIME, true);
+
+ }
}
if (ah->ah_radio == AR5K_RF5112) {
@@ -789,8 +1004,20 @@ int ath5k_hw_rfregs_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
ath5k_hw_rfb_op(ah, rf_regs, ee->ee_i_gain[ee_mode],
AR5K_RF_GAIN_I, true);
- /* TODO: Half/quarter channel support */
+ /* Tweak power detector for half/quarter rates */
+ if (ah->ah_bwmode == AR5K_BWMODE_5MHZ ||
+ ah->ah_bwmode == AR5K_BWMODE_10MHZ) {
+ u8 pd_delay;
+
+ pd_delay = (ah->ah_bwmode == AR5K_BWMODE_5MHZ) ?
+ 0xf : 0x8;
+ ath5k_hw_rfb_op(ah, rf_regs, pd_delay,
+ AR5K_RF_PD_PERIOD_A, true);
+ ath5k_hw_rfb_op(ah, rf_regs, 0xf,
+ AR5K_RF_PD_DELAY_A, true);
+
+ }
}
if (ah->ah_radio == AR5K_RF5413 &&
@@ -822,24 +1049,6 @@ int ath5k_hw_rfregs_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
\**************************/
/*
- * Check if a channel is supported
- */
-bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags)
-{
- /* Check if the channel is in our supported range */
- if (flags & CHANNEL_2GHZ) {
- if ((freq >= ah->ah_capabilities.cap_range.range_2ghz_min) &&
- (freq <= ah->ah_capabilities.cap_range.range_2ghz_max))
- return true;
- } else if (flags & CHANNEL_5GHZ)
- if ((freq >= ah->ah_capabilities.cap_range.range_5ghz_min) &&
- (freq <= ah->ah_capabilities.cap_range.range_5ghz_max))
- return true;
-
- return false;
-}
-
-/*
* Convertion needed for RF5110
*/
static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
@@ -1045,7 +1254,8 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
/*
* Set a channel on the radio chip
*/
-int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
+static int ath5k_hw_channel(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
{
int ret;
/*
@@ -1071,6 +1281,7 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
case AR5K_RF5111:
ret = ath5k_hw_rf5111_channel(ah, channel);
break;
+ case AR5K_RF2317:
case AR5K_RF2425:
ret = ath5k_hw_rf2425_channel(ah, channel);
break;
@@ -1092,8 +1303,6 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
}
ah->ah_current_channel = channel;
- ah->ah_turbo = channel->hw_value == CHANNEL_T ? true : false;
- ath5k_hw_set_clockrate(ah);
return 0;
}
@@ -1102,18 +1311,12 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
PHY calibration
\*****************/
-static int sign_extend(int val, const int nbits)
-{
- int order = BIT(nbits-1);
- return (val ^ order) - order;
-}
-
static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
{
s32 val;
val = ath5k_hw_reg_read(ah, AR5K_PHY_NF);
- return sign_extend(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 9);
+ return sign_extend32(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 8);
}
void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
@@ -1181,22 +1384,7 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
return;
}
- switch (ah->ah_current_channel->hw_value & CHANNEL_MODES) {
- case CHANNEL_A:
- case CHANNEL_T:
- case CHANNEL_XR:
- ee_mode = AR5K_EEPROM_MODE_11A;
- break;
- case CHANNEL_G:
- case CHANNEL_TG:
- ee_mode = AR5K_EEPROM_MODE_11G;
- break;
- default:
- case CHANNEL_B:
- ee_mode = AR5K_EEPROM_MODE_11B;
- break;
- }
-
+ ee_mode = ath5k_eeprom_mode_from_channel(ah->ah_current_channel);
/* completed NF calibration, test threshold */
nf = ath5k_hw_read_measured_noise_floor(ah);
@@ -1425,31 +1613,12 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
return ret;
}
+
/***************************\
* Spur mitigation functions *
\***************************/
-bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
- struct ieee80211_channel *channel)
-{
- u8 refclk_freq;
-
- if ((ah->ah_radio == AR5K_RF5112) ||
- (ah->ah_radio == AR5K_RF5413) ||
- (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
- refclk_freq = 40;
- else
- refclk_freq = 32;
-
- if ((channel->center_freq % refclk_freq != 0) &&
- ((channel->center_freq % refclk_freq < 10) ||
- (channel->center_freq % refclk_freq > 22)))
- return true;
- else
- return false;
-}
-
-void
+static void
ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
@@ -1478,7 +1647,7 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
spur_chan_fbin = AR5K_EEPROM_NO_SPUR;
spur_detection_window = AR5K_SPUR_CHAN_WIDTH;
/* XXX: Half/Quarter channels ?*/
- if (channel->hw_value & CHANNEL_TURBO)
+ if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
spur_detection_window *= 2;
for (i = 0; i < AR5K_EEPROM_N_SPUR_CHANS; i++) {
@@ -1507,32 +1676,43 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
* Calculate deltas:
* spur_freq_sigma_delta -> spur_offset / sample_freq << 21
* spur_delta_phase -> spur_offset / chip_freq << 11
- * Note: Both values have 100KHz resolution
+ * Note: Both values have 100Hz resolution
*/
- /* XXX: Half/Quarter rate channels ? */
- switch (channel->hw_value) {
- case CHANNEL_A:
- /* Both sample_freq and chip_freq are 40MHz */
- spur_delta_phase = (spur_offset << 17) / 25;
- spur_freq_sigma_delta = (spur_delta_phase >> 10);
- symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz;
- break;
- case CHANNEL_G:
- /* sample_freq -> 40MHz chip_freq -> 44MHz
- * (for b compatibility) */
- spur_freq_sigma_delta = (spur_offset << 8) / 55;
- spur_delta_phase = (spur_offset << 17) / 25;
- symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz;
- break;
- case CHANNEL_T:
- case CHANNEL_TG:
+ switch (ah->ah_bwmode) {
+ case AR5K_BWMODE_40MHZ:
/* Both sample_freq and chip_freq are 80MHz */
spur_delta_phase = (spur_offset << 16) / 25;
spur_freq_sigma_delta = (spur_delta_phase >> 10);
- symbol_width = AR5K_SPUR_SYMBOL_WIDTH_TURBO_100Hz;
+ symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz * 2;
break;
+ case AR5K_BWMODE_10MHZ:
+ /* Both sample_freq and chip_freq are 20MHz (?) */
+ spur_delta_phase = (spur_offset << 18) / 25;
+ spur_freq_sigma_delta = (spur_delta_phase >> 10);
+ symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 2;
+ case AR5K_BWMODE_5MHZ:
+ /* Both sample_freq and chip_freq are 10MHz (?) */
+ spur_delta_phase = (spur_offset << 19) / 25;
+ spur_freq_sigma_delta = (spur_delta_phase >> 10);
+ symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 4;
default:
- return;
+ if (channel->hw_value == CHANNEL_A) {
+ /* Both sample_freq and chip_freq are 40MHz */
+ spur_delta_phase = (spur_offset << 17) / 25;
+ spur_freq_sigma_delta =
+ (spur_delta_phase >> 10);
+ symbol_width =
+ AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz;
+ } else {
+ /* sample_freq -> 40MHz chip_freq -> 44MHz
+ * (for b compatibility) */
+ spur_delta_phase = (spur_offset << 17) / 25;
+ spur_freq_sigma_delta =
+ (spur_offset << 8) / 55;
+ symbol_width =
+ AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz;
+ }
+ break;
}
/* Calculate pilot and magnitude masks */
@@ -1672,63 +1852,6 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
}
}
-/********************\
- Misc PHY functions
-\********************/
-
-int ath5k_hw_phy_disable(struct ath5k_hw *ah)
-{
- /*Just a try M.F.*/
- ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
-
- return 0;
-}
-
-/*
- * Get the PHY Chip revision
- */
-u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan)
-{
- unsigned int i;
- u32 srev;
- u16 ret;
-
- /*
- * Set the radio chip access register
- */
- switch (chan) {
- case CHANNEL_2GHZ:
- ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_2GHZ, AR5K_PHY(0));
- break;
- case CHANNEL_5GHZ:
- ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
- break;
- default:
- return 0;
- }
-
- mdelay(2);
-
- /* ...wait until PHY is ready and read the selected radio revision */
- ath5k_hw_reg_write(ah, 0x00001c16, AR5K_PHY(0x34));
-
- for (i = 0; i < 8; i++)
- ath5k_hw_reg_write(ah, 0x00010000, AR5K_PHY(0x20));
-
- if (ah->ah_version == AR5K_AR5210) {
- srev = ath5k_hw_reg_read(ah, AR5K_PHY(256) >> 28) & 0xf;
- ret = (u16)ath5k_hw_bitswap(srev, 4) + 1;
- } else {
- srev = (ath5k_hw_reg_read(ah, AR5K_PHY(0x100)) >> 24) & 0xff;
- ret = (u16)ath5k_hw_bitswap(((srev & 0xf0) >> 4) |
- ((srev & 0x0f) << 4), 8);
- }
-
- /* Reset to the 5GHz mode */
- ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
-
- return ret;
-}
/*****************\
* Antenna control *
@@ -1822,7 +1945,8 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
struct ieee80211_channel *channel = ah->ah_current_channel;
bool use_def_for_tx, update_def_on_tx, use_def_for_rts, fast_div;
bool use_def_for_sg;
- u8 def_ant, tx_ant, ee_mode;
+ int ee_mode;
+ u8 def_ant, tx_ant;
u32 sta_id1 = 0;
/* if channel is not initialized yet we can't set the antennas
@@ -1834,20 +1958,8 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
def_ant = ah->ah_def_ant;
- switch (channel->hw_value & CHANNEL_MODES) {
- case CHANNEL_A:
- case CHANNEL_T:
- case CHANNEL_XR:
- ee_mode = AR5K_EEPROM_MODE_11A;
- break;
- case CHANNEL_G:
- case CHANNEL_TG:
- ee_mode = AR5K_EEPROM_MODE_11G;
- break;
- case CHANNEL_B:
- ee_mode = AR5K_EEPROM_MODE_11B;
- break;
- default:
+ ee_mode = ath5k_eeprom_mode_from_channel(channel);
+ if (ee_mode < 0) {
ATH5K_ERR(ah->ah_sc,
"invalid channel: %d\n", channel->center_freq);
return;
@@ -2275,20 +2387,20 @@ ath5k_get_max_ctl_power(struct ath5k_hw *ah,
switch (channel->hw_value & CHANNEL_MODES) {
case CHANNEL_A:
- ctl_mode |= AR5K_CTL_11A;
+ if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
+ ctl_mode |= AR5K_CTL_TURBO;
+ else
+ ctl_mode |= AR5K_CTL_11A;
break;
case CHANNEL_G:
- ctl_mode |= AR5K_CTL_11G;
+ if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
+ ctl_mode |= AR5K_CTL_TURBOG;
+ else
+ ctl_mode |= AR5K_CTL_11G;
break;
case CHANNEL_B:
ctl_mode |= AR5K_CTL_11B;
break;
- case CHANNEL_T:
- ctl_mode |= AR5K_CTL_TURBO;
- break;
- case CHANNEL_TG:
- ctl_mode |= AR5K_CTL_TURBOG;
- break;
case CHANNEL_XR:
/* Fall through */
default:
@@ -2482,7 +2594,7 @@ ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
/* Write PCDAC values on hw */
static void
-ath5k_setup_pcdac_table(struct ath5k_hw *ah)
+ath5k_write_pcdac_table(struct ath5k_hw *ah)
{
u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
int i;
@@ -2631,10 +2743,12 @@ ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
/* Write PDADC values on hw */
static void
-ath5k_setup_pwr_to_pdadc_table(struct ath5k_hw *ah,
- u8 pdcurves, u8 *pdg_to_idx)
+ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
{
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u8 *pdadc_out = ah->ah_txpower.txp_pd_table;
+ u8 *pdg_to_idx = ee->ee_pdc_to_idx[ee_mode];
+ u8 pdcurves = ee->ee_pd_gains[ee_mode];
u32 reg;
u8 i;
@@ -2844,8 +2958,7 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah,
(s16) pcinfo_R->freq,
pcinfo_L->max_pwr, pcinfo_R->max_pwr);
- /* We are ready to go, fill PCDAC/PDADC
- * table and write settings on hardware */
+ /* Fill PCDAC/PDADC table */
switch (type) {
case AR5K_PWRTABLE_LINEAR_PCDAC:
/* For RF5112 we can have one or two curves
@@ -2858,9 +2971,6 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah,
* match max power value with max
* table index */
ah->ah_txpower.txp_offset = 64 - (table_max[0] / 2);
-
- /* Write settings on hw */
- ath5k_setup_pcdac_table(ah);
break;
case AR5K_PWRTABLE_PWR_TO_PCDAC:
/* We are done for RF5111 since it has only
@@ -2870,9 +2980,6 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah,
/* No rate powertable adjustment for RF5111 */
ah->ah_txpower.txp_min_idx = 0;
ah->ah_txpower.txp_offset = 0;
-
- /* Write settings on hw */
- ath5k_setup_pcdac_table(ah);
break;
case AR5K_PWRTABLE_PWR_TO_PDADC:
/* Set PDADC boundaries and fill
@@ -2880,9 +2987,6 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah,
ath5k_combine_pwr_to_pdadc_curves(ah, table_min, table_max,
ee->ee_pd_gains[ee_mode]);
- /* Write settings on hw */
- ath5k_setup_pwr_to_pdadc_table(ah, pdg, pdg_curve_to_idx);
-
/* Set txp.offset, note that table_min
* can be negative */
ah->ah_txpower.txp_offset = table_min[0];
@@ -2891,9 +2995,20 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah,
return -EINVAL;
}
+ ah->ah_txpower.txp_setup = true;
+
return 0;
}
+/* Write power table for current channel to hw */
+static void
+ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type)
+{
+ if (type == AR5K_PWRTABLE_PWR_TO_PDADC)
+ ath5k_write_pwr_to_pdadc_table(ah, ee_mode);
+ else
+ ath5k_write_pcdac_table(ah);
+}
/*
* Per-rate tx power setting
@@ -2982,7 +3097,7 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
/* Min/max in 0.25dB units */
ah->ah_txpower.txp_min_pwr = 2 * rates[7];
- ah->ah_txpower.txp_max_pwr = 2 * rates[0];
+ ah->ah_txpower.txp_cur_pwr = 2 * rates[0];
ah->ah_txpower.txp_ofdm = rates[7];
}
@@ -2990,11 +3105,13 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
/*
* Set transmission power
*/
-int
+static int
ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
- u8 ee_mode, u8 txpower)
+ u8 txpower)
{
struct ath5k_rate_pcal_info rate_info;
+ struct ieee80211_channel *curr_channel = ah->ah_current_channel;
+ int ee_mode;
u8 type;
int ret;
@@ -3003,14 +3120,18 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
return -EINVAL;
}
- /* Reset TX power values */
- memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower));
- ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
- ah->ah_txpower.txp_min_pwr = 0;
- ah->ah_txpower.txp_max_pwr = AR5K_TUNE_MAX_TXPOWER;
+ ee_mode = ath5k_eeprom_mode_from_channel(channel);
+ if (ee_mode < 0) {
+ ATH5K_ERR(ah->ah_sc,
+ "invalid channel: %d\n", channel->center_freq);
+ return -EINVAL;
+ }
/* Initialize TX power table */
switch (ah->ah_radio) {
+ case AR5K_RF5110:
+ /* TODO */
+ return 0;
case AR5K_RF5111:
type = AR5K_PWRTABLE_PWR_TO_PCDAC;
break;
@@ -3028,10 +3149,26 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
return -EINVAL;
}
- /* FIXME: Only on channel/mode change */
- ret = ath5k_setup_channel_powertable(ah, channel, ee_mode, type);
- if (ret)
- return ret;
+ /*
+ * If we don't change channel/mode skip tx powertable calculation
+ * and use the cached one.
+ */
+ if (!ah->ah_txpower.txp_setup ||
+ (channel->hw_value != curr_channel->hw_value) ||
+ (channel->center_freq != curr_channel->center_freq)) {
+ /* Reset TX power values */
+ memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower));
+ ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
+
+ /* Calculate the powertable */
+ ret = ath5k_setup_channel_powertable(ah, channel,
+ ee_mode, type);
+ if (ret)
+ return ret;
+ }
+
+ /* Write table on hw */
+ ath5k_write_channel_powertable(ah, ee_mode, type);
/* Limit max power if we have a CTL available */
ath5k_get_max_ctl_power(ah, channel);
@@ -3086,31 +3223,223 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
{
- /*Just a try M.F.*/
- struct ieee80211_channel *channel = ah->ah_current_channel;
- u8 ee_mode;
+ ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_TXPOWER,
+ "changing txpower to %d\n", txpower);
- switch (channel->hw_value & CHANNEL_MODES) {
- case CHANNEL_A:
- case CHANNEL_T:
- case CHANNEL_XR:
- ee_mode = AR5K_EEPROM_MODE_11A;
- break;
- case CHANNEL_G:
- case CHANNEL_TG:
- ee_mode = AR5K_EEPROM_MODE_11G;
- break;
- case CHANNEL_B:
- ee_mode = AR5K_EEPROM_MODE_11B;
- break;
- default:
- ATH5K_ERR(ah->ah_sc,
- "invalid channel: %d\n", channel->center_freq);
+ return ath5k_hw_txpower(ah, ah->ah_current_channel, txpower);
+}
+
+/*************\
+ Init function
+\*************/
+
+int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
+ u8 mode, bool fast)
+{
+ struct ieee80211_channel *curr_channel;
+ int ret, i;
+ u32 phy_tst1;
+ ret = 0;
+
+ /*
+ * Sanity check for fast flag
+ * Don't try fast channel change when changing modulation
+ * mode/band. We check for chip compatibility on
+ * ath5k_hw_reset.
+ */
+ curr_channel = ah->ah_current_channel;
+ if (fast && (channel->hw_value != curr_channel->hw_value))
return -EINVAL;
+
+ /*
+ * On fast channel change we only set the synth parameters
+ * while PHY is running, enable calibration and skip the rest.
+ */
+ if (fast) {
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
+ AR5K_PHY_RFBUS_REQ_REQUEST);
+ for (i = 0; i < 100; i++) {
+ if (ath5k_hw_reg_read(ah, AR5K_PHY_RFBUS_GRANT))
+ break;
+ udelay(5);
+ }
+ /* Failed */
+ if (i >= 100)
+ return -EIO;
+
+ /* Set channel and wait for synth */
+ ret = ath5k_hw_channel(ah, channel);
+ if (ret)
+ return ret;
+
+ ath5k_hw_wait_for_synth(ah, channel);
}
- ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_TXPOWER,
- "changing txpower to %d\n", txpower);
+ /*
+ * Set TX power
+ *
+ * Note: We need to do that before we set
+ * RF buffer settings on 5211/5212+ so that we
+ * properly set curve indices.
+ */
+ ret = ath5k_hw_txpower(ah, channel, ah->ah_txpower.txp_cur_pwr ?
+ ah->ah_txpower.txp_cur_pwr / 2 : AR5K_TUNE_MAX_TXPOWER);
+ if (ret)
+ return ret;
+
+ /* Write OFDM timings on 5212*/
+ if (ah->ah_version == AR5K_AR5212 &&
+ channel->hw_value & CHANNEL_OFDM) {
+
+ ret = ath5k_hw_write_ofdm_timings(ah, channel);
+ if (ret)
+ return ret;
- return ath5k_hw_txpower(ah, channel, ee_mode, txpower);
+ /* Spur info is available only from EEPROM versions
+ * greater than 5.3, but the EEPROM routines will use
+ * static values for older versions */
+ if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
+ ath5k_hw_set_spur_mitigation_filter(ah,
+ channel);
+ }
+
+ /* If we used fast channel switching
+ * we are done, release RF bus and
+ * fire up NF calibration.
+ *
+ * Note: Only NF calibration due to
+ * channel change, not AGC calibration
+ * since AGC is still running !
+ */
+ if (fast) {
+ /*
+ * Release RF Bus grant
+ */
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
+ AR5K_PHY_RFBUS_REQ_REQUEST);
+
+ /*
+ * Start NF calibration
+ */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_NF);
+
+ return ret;
+ }
+
+ /*
+ * For 5210 we do all initialization using
+ * initvals, so we don't have to modify
+ * any settings (5210 also only supports
+ * a/aturbo modes)
+ */
+ if (ah->ah_version != AR5K_AR5210) {
+
+ /*
+ * Write initial RF gain settings
+ * This should work for both 5111/5112
+ */
+ ret = ath5k_hw_rfgain_init(ah, channel->band);
+ if (ret)
+ return ret;
+
+ mdelay(1);
+
+ /*
+ * Write RF buffer
+ */
+ ret = ath5k_hw_rfregs_init(ah, channel, mode);
+ if (ret)
+ return ret;
+
+ /*Enable/disable 802.11b mode on 5111
+ (enable 2111 frequency converter + CCK)*/
+ if (ah->ah_radio == AR5K_RF5111) {
+ if (mode == AR5K_MODE_11B)
+ AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG,
+ AR5K_TXCFG_B_MODE);
+ else
+ AR5K_REG_DISABLE_BITS(ah, AR5K_TXCFG,
+ AR5K_TXCFG_B_MODE);
+ }
+
+ } else if (ah->ah_version == AR5K_AR5210) {
+ mdelay(1);
+ /* Disable phy and wait */
+ ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
+ mdelay(1);
+ }
+
+ /* Set channel on PHY */
+ ret = ath5k_hw_channel(ah, channel);
+ if (ret)
+ return ret;
+
+ /*
+ * Enable the PHY and wait until completion
+ * This includes BaseBand and Synthesizer
+ * activation.
+ */
+ ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
+
+ ath5k_hw_wait_for_synth(ah, channel);
+
+ /*
+ * Perform ADC test to see if baseband is ready
+ * Set tx hold and check adc test register
+ */
+ phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
+ ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
+ for (i = 0; i <= 20; i++) {
+ if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
+ break;
+ udelay(200);
+ }
+ ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
+
+ /*
+ * Start automatic gain control calibration
+ *
+ * During AGC calibration RX path is re-routed to
+ * a power detector so we don't receive anything.
+ *
+ * This method is used to calibrate some static offsets
+ * used together with on-the fly I/Q calibration (the
+ * one performed via ath5k_hw_phy_calibrate), which doesn't
+ * interrupt rx path.
+ *
+ * While rx path is re-routed to the power detector we also
+ * start a noise floor calibration to measure the
+ * card's noise floor (the noise we measure when we are not
+ * transmitting or receiving anything).
+ *
+ * If we are in a noisy environment, AGC calibration may time
+ * out and/or noise floor calibration might timeout.
+ */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF);
+
+ /* At the same time start I/Q calibration for QAM constellation
+ * -no need for CCK- */
+ ah->ah_calibration = false;
+ if (!(mode == AR5K_MODE_11B)) {
+ ah->ah_calibration = true;
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
+ AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
+ AR5K_PHY_IQ_RUN);
+ }
+
+ /* Wait for gain calibration to finish (we check for I/Q calibration
+ * during ath5k_phy_calibrate) */
+ if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_CAL, 0, false)) {
+ ATH5K_ERR(ah->ah_sc, "gain calibration timeout (%uMHz)\n",
+ channel->center_freq);
+ }
+
+ /* Restore antenna mode */
+ ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
+
+ return ret;
}
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 84c717ded1c5..3343fb9e4940 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -25,14 +25,52 @@ Queue Control Unit, DFS Control Unit Functions
#include "debug.h"
#include "base.h"
+
+/******************\
+* Helper functions *
+\******************/
+
/*
- * Get properties for a transmit queue
+ * Get number of pending frames
+ * for a specific queue [5211+]
*/
-int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
- struct ath5k_txq_info *queue_info)
+u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
{
- memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
- return 0;
+ u32 pending;
+ AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
+
+ /* Return if queue is declared inactive */
+ if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
+ return false;
+
+ /* XXX: How about AR5K_CFG_TXCNT ? */
+ if (ah->ah_version == AR5K_AR5210)
+ return false;
+
+ pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
+ pending &= AR5K_QCU_STS_FRMPENDCNT;
+
+ /* It's possible to have no frames pending even if TXE
+ * is set. To indicate that q has not stopped return
+ * true */
+ if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
+ return true;
+
+ return pending;
+}
+
+/*
+ * Set a transmit queue inactive
+ */
+void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+{
+ if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
+ return;
+
+ /* This queue will be skipped in further operations */
+ ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
+ /*For SIMR setup*/
+ AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
}
/*
@@ -50,6 +88,16 @@ static u16 ath5k_cw_validate(u16 cw_req)
}
/*
+ * Get properties for a transmit queue
+ */
+int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
+ struct ath5k_txq_info *queue_info)
+{
+ memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
+ return 0;
+}
+
+/*
* Set properties for a transmit queue
*/
int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
@@ -104,8 +152,8 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
/*
* Get queue by type
*/
- /*5210 only has 2 queues*/
- if (ah->ah_version == AR5K_AR5210) {
+ /* 5210 only has 2 queues */
+ if (ah->ah_capabilities.cap_queues.q_tx_num == 2) {
switch (queue_type) {
case AR5K_TX_QUEUE_DATA:
queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
@@ -172,361 +220,431 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
return queue;
}
-/*
- * Get number of pending frames
- * for a specific queue [5211+]
- */
-u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
-{
- u32 pending;
- AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
-
- /* Return if queue is declared inactive */
- if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
- return false;
- /* XXX: How about AR5K_CFG_TXCNT ? */
- if (ah->ah_version == AR5K_AR5210)
- return false;
-
- pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
- pending &= AR5K_QCU_STS_FRMPENDCNT;
-
- /* It's possible to have no frames pending even if TXE
- * is set. To indicate that q has not stopped return
- * true */
- if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
- return true;
-
- return pending;
-}
+/*******************************\
+* Single QCU/DCU initialization *
+\*******************************/
/*
- * Set a transmit queue inactive
+ * Set tx retry limits on DCU
*/
-void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
+ unsigned int queue)
{
- if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
- return;
+ /* Single data queue on AR5210 */
+ if (ah->ah_version == AR5K_AR5210) {
+ struct ath5k_txq_info *tq = &ah->ah_txq[queue];
- /* This queue will be skipped in further operations */
- ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
- /*For SIMR setup*/
- AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
+ if (queue > 0)
+ return;
+
+ ath5k_hw_reg_write(ah,
+ (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
+ | AR5K_REG_SM(ah->ah_retry_long,
+ AR5K_NODCU_RETRY_LMT_SLG_RETRY)
+ | AR5K_REG_SM(ah->ah_retry_short,
+ AR5K_NODCU_RETRY_LMT_SSH_RETRY)
+ | AR5K_REG_SM(ah->ah_retry_long,
+ AR5K_NODCU_RETRY_LMT_LG_RETRY)
+ | AR5K_REG_SM(ah->ah_retry_short,
+ AR5K_NODCU_RETRY_LMT_SH_RETRY),
+ AR5K_NODCU_RETRY_LMT);
+ /* DCU on AR5211+ */
+ } else {
+ ath5k_hw_reg_write(ah,
+ AR5K_REG_SM(ah->ah_retry_long,
+ AR5K_DCU_RETRY_LMT_RTS)
+ | AR5K_REG_SM(ah->ah_retry_long,
+ AR5K_DCU_RETRY_LMT_STA_RTS)
+ | AR5K_REG_SM(max(ah->ah_retry_long, ah->ah_retry_short),
+ AR5K_DCU_RETRY_LMT_STA_DATA),
+ AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
+ }
}
-/*
- * Set DFS properties for a transmit queue on DCU
+/**
+ * ath5k_hw_reset_tx_queue - Initialize a single hw queue
+ *
+ * @ah The &struct ath5k_hw
+ * @queue The hw queue number
+ *
+ * Set DFS properties for the given transmit queue on DCU
+ * and configures all queue-specific parameters.
*/
int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
{
- u32 retry_lg, retry_sh;
struct ath5k_txq_info *tq = &ah->ah_txq[queue];
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
tq = &ah->ah_txq[queue];
- if (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE)
+ /* Skip if queue inactive or if we are on AR5210
+ * that doesn't have QCU/DCU */
+ if ((ah->ah_version == AR5K_AR5210) ||
+ (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE))
return 0;
- if (ah->ah_version == AR5K_AR5210) {
- /* Only handle data queues, others will be ignored */
- if (tq->tqi_type != AR5K_TX_QUEUE_DATA)
- return 0;
-
- /* Set Slot time */
- ath5k_hw_reg_write(ah, ah->ah_turbo ?
- AR5K_INIT_SLOT_TIME_TURBO : AR5K_INIT_SLOT_TIME,
- AR5K_SLOT_TIME);
- /* Set ACK_CTS timeout */
- ath5k_hw_reg_write(ah, ah->ah_turbo ?
- AR5K_INIT_ACK_CTS_TIMEOUT_TURBO :
- AR5K_INIT_ACK_CTS_TIMEOUT, AR5K_SLOT_TIME);
- /* Set Transmit Latency */
- ath5k_hw_reg_write(ah, ah->ah_turbo ?
- AR5K_INIT_TRANSMIT_LATENCY_TURBO :
- AR5K_INIT_TRANSMIT_LATENCY, AR5K_USEC_5210);
-
- /* Set IFS0 */
- if (ah->ah_turbo) {
- ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO +
- tq->tqi_aifs * AR5K_INIT_SLOT_TIME_TURBO) <<
- AR5K_IFS0_DIFS_S) | AR5K_INIT_SIFS_TURBO,
- AR5K_IFS0);
- } else {
- ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS +
- tq->tqi_aifs * AR5K_INIT_SLOT_TIME) <<
- AR5K_IFS0_DIFS_S) |
- AR5K_INIT_SIFS, AR5K_IFS0);
- }
+ /*
+ * Set contention window (cw_min/cw_max)
+ * and arbitrated interframe space (aifs)...
+ */
+ ath5k_hw_reg_write(ah,
+ AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
+ AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
+ AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS),
+ AR5K_QUEUE_DFS_LOCAL_IFS(queue));
+
+ /*
+ * Set tx retry limits for this queue
+ */
+ ath5k_hw_set_tx_retry_limits(ah, queue);
- /* Set IFS1 */
- ath5k_hw_reg_write(ah, ah->ah_turbo ?
- AR5K_INIT_PROTO_TIME_CNTRL_TURBO :
- AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1);
- /* Set AR5K_PHY_SETTLING */
- ath5k_hw_reg_write(ah, ah->ah_turbo ?
- (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
- | 0x38 :
- (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
- | 0x1C,
- AR5K_PHY_SETTLING);
- /* Set Frame Control Register */
- ath5k_hw_reg_write(ah, ah->ah_turbo ?
- (AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE |
- AR5K_PHY_TURBO_SHORT | 0x2020) :
- (AR5K_PHY_FRAME_CTL_INI | 0x1020),
- AR5K_PHY_FRAME_CTL_5210);
- }
/*
- * Calculate and set retry limits
+ * Set misc registers
*/
- if (ah->ah_software_retry) {
- /* XXX Need to test this */
- retry_lg = ah->ah_limit_tx_retries;
- retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ?
- AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg;
- } else {
- retry_lg = AR5K_INIT_LG_RETRY;
- retry_sh = AR5K_INIT_SH_RETRY;
- }
- /*No QCU/DCU [5210]*/
- if (ah->ah_version == AR5K_AR5210) {
- ath5k_hw_reg_write(ah,
- (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
- | AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
- AR5K_NODCU_RETRY_LMT_SLG_RETRY)
- | AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
- AR5K_NODCU_RETRY_LMT_SSH_RETRY)
- | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY)
- | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY),
- AR5K_NODCU_RETRY_LMT);
- } else {
- /*QCU/DCU [5211+]*/
- ath5k_hw_reg_write(ah,
- AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
- AR5K_DCU_RETRY_LMT_SLG_RETRY) |
- AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
- AR5K_DCU_RETRY_LMT_SSH_RETRY) |
- AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) |
- AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY),
- AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
+ /* Enable DCU to wait for next fragment from QCU */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
+ AR5K_DCU_MISC_FRAG_WAIT);
- /*===Rest is also for QCU/DCU only [5211+]===*/
+ /* On Maui and Spirit use the global seqnum on DCU */
+ if (ah->ah_mac_version < AR5K_SREV_AR5211)
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
+ AR5K_DCU_MISC_SEQNUM_CTL);
+
+ /* Constant bit rate period */
+ if (tq->tqi_cbr_period) {
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
+ AR5K_QCU_CBRCFG_INTVAL) |
+ AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
+ AR5K_QCU_CBRCFG_ORN_THRES),
+ AR5K_QUEUE_CBRCFG(queue));
- /*
- * Set contention window (cw_min/cw_max)
- * and arbitrated interframe space (aifs)...
- */
- ath5k_hw_reg_write(ah,
- AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
- AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
- AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS),
- AR5K_QUEUE_DFS_LOCAL_IFS(queue));
-
- /*
- * Set misc registers
- */
- /* Enable DCU early termination for this queue */
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
- AR5K_QCU_MISC_DCU_EARLY);
+ AR5K_QCU_MISC_FRSHED_CBR);
- /* Enable DCU to wait for next fragment from QCU */
- AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
- AR5K_DCU_MISC_FRAG_WAIT);
-
- /* On Maui and Spirit use the global seqnum on DCU */
- if (ah->ah_mac_version < AR5K_SREV_AR5211)
- AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
- AR5K_DCU_MISC_SEQNUM_CTL);
-
- if (tq->tqi_cbr_period) {
- ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
- AR5K_QCU_CBRCFG_INTVAL) |
- AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
- AR5K_QCU_CBRCFG_ORN_THRES),
- AR5K_QUEUE_CBRCFG(queue));
+ if (tq->tqi_cbr_overflow_limit)
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
- AR5K_QCU_MISC_FRSHED_CBR);
- if (tq->tqi_cbr_overflow_limit)
- AR5K_REG_ENABLE_BITS(ah,
- AR5K_QUEUE_MISC(queue),
AR5K_QCU_MISC_CBR_THRES_ENABLE);
- }
+ }
- if (tq->tqi_ready_time &&
- (tq->tqi_type != AR5K_TX_QUEUE_CAB))
- ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
- AR5K_QCU_RDYTIMECFG_INTVAL) |
- AR5K_QCU_RDYTIMECFG_ENABLE,
- AR5K_QUEUE_RDYTIMECFG(queue));
-
- if (tq->tqi_burst_time) {
- ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
- AR5K_DCU_CHAN_TIME_DUR) |
- AR5K_DCU_CHAN_TIME_ENABLE,
- AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
-
- if (tq->tqi_flags
- & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
- AR5K_REG_ENABLE_BITS(ah,
- AR5K_QUEUE_MISC(queue),
+ /* Ready time interval */
+ if (tq->tqi_ready_time && (tq->tqi_type != AR5K_TX_QUEUE_CAB))
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
+ AR5K_QCU_RDYTIMECFG_INTVAL) |
+ AR5K_QCU_RDYTIMECFG_ENABLE,
+ AR5K_QUEUE_RDYTIMECFG(queue));
+
+ if (tq->tqi_burst_time) {
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
+ AR5K_DCU_CHAN_TIME_DUR) |
+ AR5K_DCU_CHAN_TIME_ENABLE,
+ AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
AR5K_QCU_MISC_RDY_VEOL_POLICY);
- }
+ }
- if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
- ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
- AR5K_QUEUE_DFS_MISC(queue));
+ /* Enable/disable Post frame backoff */
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
+ ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
+ AR5K_QUEUE_DFS_MISC(queue));
- if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
- ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
- AR5K_QUEUE_DFS_MISC(queue));
+ /* Enable/disable fragmentation burst backoff */
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
+ ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
+ AR5K_QUEUE_DFS_MISC(queue));
- /*
- * Set registers by queue type
- */
- switch (tq->tqi_type) {
- case AR5K_TX_QUEUE_BEACON:
- AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
+ /*
+ * Set registers by queue type
+ */
+ switch (tq->tqi_type) {
+ case AR5K_TX_QUEUE_BEACON:
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
AR5K_QCU_MISC_FRSHED_DBA_GT |
AR5K_QCU_MISC_CBREXP_BCN_DIS |
AR5K_QCU_MISC_BCN_ENABLE);
- AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
(AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
AR5K_DCU_MISC_ARBLOCK_CTL_S) |
AR5K_DCU_MISC_ARBLOCK_IGNORE |
AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
AR5K_DCU_MISC_BCN_ENABLE);
- break;
+ break;
- case AR5K_TX_QUEUE_CAB:
- /* XXX: use BCN_SENT_GT, if we can figure out how */
- AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
- AR5K_QCU_MISC_FRSHED_DBA_GT |
- AR5K_QCU_MISC_CBREXP_DIS |
- AR5K_QCU_MISC_CBREXP_BCN_DIS);
+ case AR5K_TX_QUEUE_CAB:
+ /* XXX: use BCN_SENT_GT, if we can figure out how */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
+ AR5K_QCU_MISC_FRSHED_DBA_GT |
+ AR5K_QCU_MISC_CBREXP_DIS |
+ AR5K_QCU_MISC_CBREXP_BCN_DIS);
- ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
- (AR5K_TUNE_SW_BEACON_RESP -
- AR5K_TUNE_DMA_BEACON_RESP) -
+ ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
+ (AR5K_TUNE_SW_BEACON_RESP -
+ AR5K_TUNE_DMA_BEACON_RESP) -
AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
- AR5K_QCU_RDYTIMECFG_ENABLE,
- AR5K_QUEUE_RDYTIMECFG(queue));
+ AR5K_QCU_RDYTIMECFG_ENABLE,
+ AR5K_QUEUE_RDYTIMECFG(queue));
- AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
- (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
- AR5K_DCU_MISC_ARBLOCK_CTL_S));
- break;
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
+ (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
+ AR5K_DCU_MISC_ARBLOCK_CTL_S));
+ break;
- case AR5K_TX_QUEUE_UAPSD:
- AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
- AR5K_QCU_MISC_CBREXP_DIS);
- break;
+ case AR5K_TX_QUEUE_UAPSD:
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
+ AR5K_QCU_MISC_CBREXP_DIS);
+ break;
- case AR5K_TX_QUEUE_DATA:
- default:
+ case AR5K_TX_QUEUE_DATA:
+ default:
break;
- }
-
- /* TODO: Handle frame compression */
-
- /*
- * Enable interrupts for this tx queue
- * in the secondary interrupt mask registers
- */
- if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
- AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
-
- if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
- AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
-
- if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
- AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
-
- if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
- AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
-
- if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
- AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
-
- if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
- AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
-
- if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
- AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
-
- if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
- AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
-
- if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
- AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
-
- /* Update secondary interrupt mask registers */
-
- /* Filter out inactive queues */
- ah->ah_txq_imr_txok &= ah->ah_txq_status;
- ah->ah_txq_imr_txerr &= ah->ah_txq_status;
- ah->ah_txq_imr_txurn &= ah->ah_txq_status;
- ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
- ah->ah_txq_imr_txeol &= ah->ah_txq_status;
- ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
- ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
- ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
- ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
-
- ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
- AR5K_SIMR0_QCU_TXOK) |
- AR5K_REG_SM(ah->ah_txq_imr_txdesc,
- AR5K_SIMR0_QCU_TXDESC), AR5K_SIMR0);
- ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
- AR5K_SIMR1_QCU_TXERR) |
- AR5K_REG_SM(ah->ah_txq_imr_txeol,
- AR5K_SIMR1_QCU_TXEOL), AR5K_SIMR1);
- /* Update simr2 but don't overwrite rest simr2 settings */
- AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
- AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
- AR5K_REG_SM(ah->ah_txq_imr_txurn,
- AR5K_SIMR2_QCU_TXURN));
- ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
- AR5K_SIMR3_QCBRORN) |
- AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
- AR5K_SIMR3_QCBRURN), AR5K_SIMR3);
- ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
- AR5K_SIMR4_QTRIG), AR5K_SIMR4);
- /* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
- ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
- AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
- /* No queue has TXNOFRM enabled, disable the interrupt
- * by setting AR5K_TXNOFRM to zero */
- if (ah->ah_txq_imr_nofrm == 0)
- ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
-
- /* Set QCU mask for this DCU to save power */
- AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
}
+ /* TODO: Handle frame compression */
+
+ /*
+ * Enable interrupts for this tx queue
+ * in the secondary interrupt mask registers
+ */
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
+
+ /* Update secondary interrupt mask registers */
+
+ /* Filter out inactive queues */
+ ah->ah_txq_imr_txok &= ah->ah_txq_status;
+ ah->ah_txq_imr_txerr &= ah->ah_txq_status;
+ ah->ah_txq_imr_txurn &= ah->ah_txq_status;
+ ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
+ ah->ah_txq_imr_txeol &= ah->ah_txq_status;
+ ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
+ ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
+ ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
+ ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
+
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
+ AR5K_SIMR0_QCU_TXOK) |
+ AR5K_REG_SM(ah->ah_txq_imr_txdesc,
+ AR5K_SIMR0_QCU_TXDESC),
+ AR5K_SIMR0);
+
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
+ AR5K_SIMR1_QCU_TXERR) |
+ AR5K_REG_SM(ah->ah_txq_imr_txeol,
+ AR5K_SIMR1_QCU_TXEOL),
+ AR5K_SIMR1);
+
+ /* Update SIMR2 but don't overwrite rest simr2 settings */
+ AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
+ AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
+ AR5K_REG_SM(ah->ah_txq_imr_txurn,
+ AR5K_SIMR2_QCU_TXURN));
+
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
+ AR5K_SIMR3_QCBRORN) |
+ AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
+ AR5K_SIMR3_QCBRURN),
+ AR5K_SIMR3);
+
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
+ AR5K_SIMR4_QTRIG), AR5K_SIMR4);
+
+ /* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
+ AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
+
+ /* No queue has TXNOFRM enabled, disable the interrupt
+ * by setting AR5K_TXNOFRM to zero */
+ if (ah->ah_txq_imr_nofrm == 0)
+ ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
+
+ /* Set QCU mask for this DCU to save power */
+ AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
+
return 0;
}
-/*
- * Set slot time on DCU
+
+/**************************\
+* Global QCU/DCU functions *
+\**************************/
+
+/**
+ * ath5k_hw_set_ifs_intervals - Set global inter-frame spaces on DCU
+ *
+ * @ah The &struct ath5k_hw
+ * @slot_time Slot time in us
+ *
+ * Sets the global IFS intervals on DCU (also works on AR5210) for
+ * the given slot time and the current bwmode.
*/
-int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
+int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+ struct ath5k_softc *sc = ah->ah_sc;
+ struct ieee80211_rate *rate;
+ u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
return -EINVAL;
- if (ah->ah_version == AR5K_AR5210)
- ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
+ sifs = ath5k_hw_get_default_sifs(ah);
+ sifs_clock = ath5k_hw_htoclock(ah, sifs);
+
+ /* EIFS
+ * Txtime of ack at lowest rate + SIFS + DIFS
+ * (DIFS = SIFS + 2 * Slot time)
+ *
+ * Note: HAL has some predefined values for EIFS
+ * Turbo: (37 + 2 * 6)
+ * Default: (74 + 2 * 9)
+ * Half: (149 + 2 * 13)
+ * Quarter: (298 + 2 * 21)
+ *
+ * (74 + 2 * 6) for AR5210 default and turbo !
+ *
+ * According to the formula we have
+ * ack_tx_time = 25 for turbo and
+ * ack_tx_time = 42.5 * clock multiplier
+ * for default/half/quarter.
+ *
+ * This can't be right, 42 is what we would get
+ * from ath5k_hw_get_frame_dur_for_bwmode or
+ * ieee80211_generic_frame_duration for zero frame
+ * length and without SIFS !
+ *
+ * Also we have different lowest rate for 802.11a
+ */
+ if (channel->hw_value & CHANNEL_5GHZ)
+ rate = &sc->sbands[IEEE80211_BAND_5GHZ].bitrates[0];
else
- ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
+ rate = &sc->sbands[IEEE80211_BAND_2GHZ].bitrates[0];
+
+ ack_tx_time = ath5k_hw_get_frame_duration(ah, 10, rate);
+
+ /* ack_tx_time includes an SIFS already */
+ eifs = ack_tx_time + sifs + 2 * slot_time;
+ eifs_clock = ath5k_hw_htoclock(ah, eifs);
+
+ /* Set IFS settings on AR5210 */
+ if (ah->ah_version == AR5K_AR5210) {
+ u32 pifs, pifs_clock, difs, difs_clock;
+
+ /* Set slot time */
+ ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
+
+ /* Set EIFS */
+ eifs_clock = AR5K_REG_SM(eifs_clock, AR5K_IFS1_EIFS);
+
+ /* PIFS = Slot time + SIFS */
+ pifs = slot_time + sifs;
+ pifs_clock = ath5k_hw_htoclock(ah, pifs);
+ pifs_clock = AR5K_REG_SM(pifs_clock, AR5K_IFS1_PIFS);
+
+ /* DIFS = SIFS + 2 * Slot time */
+ difs = sifs + 2 * slot_time;
+ difs_clock = ath5k_hw_htoclock(ah, difs);
+
+ /* Set SIFS/DIFS */
+ ath5k_hw_reg_write(ah, (difs_clock <<
+ AR5K_IFS0_DIFS_S) | sifs_clock,
+ AR5K_IFS0);
+
+ /* Set PIFS/EIFS and preserve AR5K_INIT_CARR_SENSE_EN */
+ ath5k_hw_reg_write(ah, pifs_clock | eifs_clock |
+ (AR5K_INIT_CARR_SENSE_EN << AR5K_IFS1_CS_EN_S),
+ AR5K_IFS1);
+
+ return 0;
+ }
+
+ /* Set IFS slot time */
+ ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
+
+ /* Set EIFS interval */
+ ath5k_hw_reg_write(ah, eifs_clock, AR5K_DCU_GBL_IFS_EIFS);
+
+ /* Set SIFS interval in usecs */
+ AR5K_REG_WRITE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
+ AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC,
+ sifs);
+
+ /* Set SIFS interval in clock cycles */
+ ath5k_hw_reg_write(ah, sifs_clock, AR5K_DCU_GBL_IFS_SIFS);
return 0;
}
+
+int ath5k_hw_init_queues(struct ath5k_hw *ah)
+{
+ int i, ret;
+
+ /* TODO: HW Compression support for data queues */
+ /* TODO: Burst prefetch for data queues */
+
+ /*
+ * Reset queues and start beacon timers at the end of the reset routine
+ * This also sets QCU mask on each DCU for 1:1 qcu to dcu mapping
+ * Note: If we want we can assign multiple qcus on one dcu.
+ */
+ if (ah->ah_version != AR5K_AR5210)
+ for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
+ ret = ath5k_hw_reset_tx_queue(ah, i);
+ if (ret) {
+ ATH5K_ERR(ah->ah_sc,
+ "failed to reset TX queue #%d\n", i);
+ return ret;
+ }
+ }
+ else
+ /* No QCU/DCU on AR5210, just set tx
+ * retry limits. We set IFS parameters
+ * on ath5k_hw_set_ifs_intervals */
+ ath5k_hw_set_tx_retry_limits(ah, 0);
+
+ /* Set the turbo flag when operating on 40MHz */
+ if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
+ AR5K_REG_ENABLE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
+ AR5K_DCU_GBL_IFS_MISC_TURBO_MODE);
+
+ /* If we didn't set IFS timings through
+ * ath5k_hw_set_coverage_class make sure
+ * we set them here */
+ if (!ah->ah_coverage_class) {
+ unsigned int slot_time = ath5k_hw_get_default_slottime(ah);
+ ath5k_hw_set_ifs_intervals(ah, slot_time);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index ca79ecd832fd..e1c9abd8c879 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -686,16 +686,15 @@
/*
* DCU retry limit registers
+ * all these fields don't allow zero values
*/
#define AR5K_DCU_RETRY_LMT_BASE 0x1080 /* Register Address -Queue0 DCU_RETRY_LMT */
-#define AR5K_DCU_RETRY_LMT_SH_RETRY 0x0000000f /* Short retry limit mask */
-#define AR5K_DCU_RETRY_LMT_SH_RETRY_S 0
-#define AR5K_DCU_RETRY_LMT_LG_RETRY 0x000000f0 /* Long retry limit mask */
-#define AR5K_DCU_RETRY_LMT_LG_RETRY_S 4
-#define AR5K_DCU_RETRY_LMT_SSH_RETRY 0x00003f00 /* Station short retry limit mask (?) */
-#define AR5K_DCU_RETRY_LMT_SSH_RETRY_S 8
-#define AR5K_DCU_RETRY_LMT_SLG_RETRY 0x000fc000 /* Station long retry limit mask (?) */
-#define AR5K_DCU_RETRY_LMT_SLG_RETRY_S 14
+#define AR5K_DCU_RETRY_LMT_RTS 0x0000000f /* RTS failure limit. Transmission fails if no CTS is received for this number of times */
+#define AR5K_DCU_RETRY_LMT_RTS_S 0
+#define AR5K_DCU_RETRY_LMT_STA_RTS 0x00003f00 /* STA RTS failure limit. If exceeded CW reset */
+#define AR5K_DCU_RETRY_LMT_STA_RTS_S 8
+#define AR5K_DCU_RETRY_LMT_STA_DATA 0x000fc000 /* STA data failure limit. If exceeded CW reset. */
+#define AR5K_DCU_RETRY_LMT_STA_DATA_S 14
#define AR5K_QUEUE_DFS_RETRY_LIMIT(_q) AR5K_QUEUE_REG(AR5K_DCU_RETRY_LMT_BASE, _q)
/*
@@ -787,6 +786,7 @@
#define AR5K_DCU_GBL_IFS_MISC_LFSR_SLICE 0x00000007 /* LFSR Slice Select */
#define AR5K_DCU_GBL_IFS_MISC_TURBO_MODE 0x00000008 /* Turbo mode */
#define AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC 0x000003f0 /* SIFS Duration mask */
+#define AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC_S 4
#define AR5K_DCU_GBL_IFS_MISC_USEC_DUR 0x000ffc00 /* USEC Duration mask */
#define AR5K_DCU_GBL_IFS_MISC_USEC_DUR_S 10
#define AR5K_DCU_GBL_IFS_MISC_DCU_ARB_DELAY 0x00300000 /* DCU Arbiter delay mask */
@@ -1063,7 +1063,7 @@
/*
* EEPROM command register
*/
-#define AR5K_EEPROM_CMD 0x6008 /* Register Addres */
+#define AR5K_EEPROM_CMD 0x6008 /* Register Address */
#define AR5K_EEPROM_CMD_READ 0x00000001 /* EEPROM read */
#define AR5K_EEPROM_CMD_WRITE 0x00000002 /* EEPROM write */
#define AR5K_EEPROM_CMD_RESET 0x00000004 /* EEPROM reset */
@@ -1083,7 +1083,7 @@
/*
* EEPROM config register
*/
-#define AR5K_EEPROM_CFG 0x6010 /* Register Addres */
+#define AR5K_EEPROM_CFG 0x6010 /* Register Address */
#define AR5K_EEPROM_CFG_SIZE 0x00000003 /* Size determination override */
#define AR5K_EEPROM_CFG_SIZE_AUTO 0
#define AR5K_EEPROM_CFG_SIZE_4KBIT 1
@@ -1125,7 +1125,7 @@
* Second station id register (Upper 16 bits of MAC address + PCU settings)
*/
#define AR5K_STA_ID1 0x8004 /* Register Address */
-#define AR5K_STA_ID1_ADDR_U16 0x0000ffff /* Upper 16 bits of MAC addres */
+#define AR5K_STA_ID1_ADDR_U16 0x0000ffff /* Upper 16 bits of MAC address */
#define AR5K_STA_ID1_AP 0x00010000 /* Set AP mode */
#define AR5K_STA_ID1_ADHOC 0x00020000 /* Set Ad-Hoc mode */
#define AR5K_STA_ID1_PWR_SV 0x00040000 /* Power save reporting */
@@ -1311,7 +1311,7 @@
#define AR5K_IFS1_EIFS 0x03fff000
#define AR5K_IFS1_EIFS_S 12
#define AR5K_IFS1_CS_EN 0x04000000
-
+#define AR5K_IFS1_CS_EN_S 26
/*
* CFP duration register
@@ -2058,6 +2058,7 @@
#define AR5K_PHY_SCAL 0x9878
#define AR5K_PHY_SCAL_32MHZ 0x0000000e
+#define AR5K_PHY_SCAL_32MHZ_5311 0x00000008
#define AR5K_PHY_SCAL_32MHZ_2417 0x0000000a
#define AR5K_PHY_SCAL_32MHZ_HB63 0x00000032
@@ -2244,6 +2245,8 @@
#define AR5K_PHY_FRAME_CTL (ah->ah_version == AR5K_AR5210 ? \
AR5K_PHY_FRAME_CTL_5210 : AR5K_PHY_FRAME_CTL_5211)
/*---[5111+]---*/
+#define AR5K_PHY_FRAME_CTL_WIN_LEN 0x00000003 /* Force window length (?) */
+#define AR5K_PHY_FRAME_CTL_WIN_LEN_S 0
#define AR5K_PHY_FRAME_CTL_TX_CLIP 0x00000038 /* Mask for tx clip (?) */
#define AR5K_PHY_FRAME_CTL_TX_CLIP_S 3
#define AR5K_PHY_FRAME_CTL_PREP_CHINFO 0x00010000 /* Prepend chan info */
@@ -2558,3 +2561,28 @@
*/
#define AR5K_PHY_PDADC_TXPOWER_BASE 0xa280
#define AR5K_PHY_PDADC_TXPOWER(_n) (AR5K_PHY_PDADC_TXPOWER_BASE + ((_n) << 2))
+
+/*
+ * Platform registers for WiSoC
+ */
+#define AR5K_AR5312_RESET 0xbc003020
+#define AR5K_AR5312_RESET_BB0_COLD 0x00000004
+#define AR5K_AR5312_RESET_BB1_COLD 0x00000200
+#define AR5K_AR5312_RESET_WMAC0 0x00002000
+#define AR5K_AR5312_RESET_BB0_WARM 0x00004000
+#define AR5K_AR5312_RESET_WMAC1 0x00020000
+#define AR5K_AR5312_RESET_BB1_WARM 0x00040000
+
+#define AR5K_AR5312_ENABLE 0xbc003080
+#define AR5K_AR5312_ENABLE_WLAN0 0x00000001
+#define AR5K_AR5312_ENABLE_WLAN1 0x00000008
+
+#define AR5K_AR2315_RESET 0xb1000004
+#define AR5K_AR2315_RESET_WMAC 0x00000001
+#define AR5K_AR2315_RESET_BB_WARM 0x00000002
+
+#define AR5K_AR2315_AHB_ARB_CTL 0xb1000008
+#define AR5K_AR2315_AHB_ARB_CTL_WLAN 0x00000002
+
+#define AR5K_AR2315_BYTESWAP 0xb100000c
+#define AR5K_AR2315_BYTESWAP_WMAC 0x00000002
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 5b179d01f97d..84206898f77d 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -27,11 +27,17 @@
#include <linux/pci.h> /* To determine if a card is pci-e */
#include <linux/log2.h>
+#include <linux/platform_device.h>
#include "ath5k.h"
#include "reg.h"
#include "base.h"
#include "debug.h"
+
+/******************\
+* Helper functions *
+\******************/
+
/*
* Check if a register write has been completed
*/
@@ -53,146 +59,267 @@ int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
return (i <= 0) ? -EAGAIN : 0;
}
+
+/*************************\
+* Clock related functions *
+\*************************/
+
/**
- * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212
+ * ath5k_hw_htoclock - Translate usec to hw clock units
*
- * @ah: the &struct ath5k_hw
- * @channel: the currently set channel upon reset
- *
- * Write the delta slope coefficient (used on pilot tracking ?) for OFDM
- * operation on the AR5212 upon reset. This is a helper for ath5k_hw_reset().
+ * @ah: The &struct ath5k_hw
+ * @usec: value in microseconds
+ */
+unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
+{
+ struct ath_common *common = ath5k_hw_common(ah);
+ return usec * common->clockrate;
+}
+
+/**
+ * ath5k_hw_clocktoh - Translate hw clock units to usec
+ * @clock: value in hw clock units
+ */
+unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
+{
+ struct ath_common *common = ath5k_hw_common(ah);
+ return clock / common->clockrate;
+}
+
+/**
+ * ath5k_hw_init_core_clock - Initialize core clock
*
- * Since delta slope is floating point we split it on its exponent and
- * mantissa and provide these values on hw.
+ * @ah The &struct ath5k_hw
*
- * For more infos i think this patent is related
- * http://www.freepatentsonline.com/7184495.html
+ * Initialize core clock parameters (usec, usec32, latencies etc).
*/
-static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
- struct ieee80211_channel *channel)
+static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
{
- /* Get exponent and mantissa and set it */
- u32 coef_scaled, coef_exp, coef_man,
- ds_coef_exp, ds_coef_man, clock;
-
- BUG_ON(!(ah->ah_version == AR5K_AR5212) ||
- !(channel->hw_value & CHANNEL_OFDM));
-
- /* Get coefficient
- * ALGO: coef = (5 * clock / carrier_freq) / 2
- * we scale coef by shifting clock value by 24 for
- * better precision since we use integers */
- /* TODO: Half/quarter rate */
- clock = (channel->hw_value & CHANNEL_TURBO) ? 80 : 40;
- coef_scaled = ((5 * (clock << 24)) / 2) / channel->center_freq;
-
- /* Get exponent
- * ALGO: coef_exp = 14 - highest set bit position */
- coef_exp = ilog2(coef_scaled);
-
- /* Doesn't make sense if it's zero*/
- if (!coef_scaled || !coef_exp)
- return -EINVAL;
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+ struct ath_common *common = ath5k_hw_common(ah);
+ u32 usec_reg, txlat, rxlat, usec, clock, sclock, txf2txs;
+
+ /*
+ * Set core clock frequency
+ */
+ if (channel->hw_value & CHANNEL_5GHZ)
+ clock = 40; /* 802.11a */
+ else if (channel->hw_value & CHANNEL_CCK)
+ clock = 22; /* 802.11b */
+ else
+ clock = 44; /* 802.11g */
+
+ /* Use clock multiplier for non-default
+ * bwmode */
+ switch (ah->ah_bwmode) {
+ case AR5K_BWMODE_40MHZ:
+ clock *= 2;
+ break;
+ case AR5K_BWMODE_10MHZ:
+ clock /= 2;
+ break;
+ case AR5K_BWMODE_5MHZ:
+ clock /= 4;
+ break;
+ default:
+ break;
+ }
- /* Note: we've shifted coef_scaled by 24 */
- coef_exp = 14 - (coef_exp - 24);
+ common->clockrate = clock;
+ /*
+ * Set USEC parameters
+ */
+ /* Set USEC counter on PCU*/
+ usec = clock - 1;
+ usec = AR5K_REG_SM(usec, AR5K_USEC_1);
- /* Get mantissa (significant digits)
- * ALGO: coef_mant = floor(coef_scaled* 2^coef_exp+0.5) */
- coef_man = coef_scaled +
- (1 << (24 - coef_exp - 1));
+ /* Set usec duration on DCU */
+ if (ah->ah_version != AR5K_AR5210)
+ AR5K_REG_WRITE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
+ AR5K_DCU_GBL_IFS_MISC_USEC_DUR,
+ clock);
- /* Calculate delta slope coefficient exponent
- * and mantissa (remove scaling) and set them on hw */
- ds_coef_man = coef_man >> (24 - coef_exp);
- ds_coef_exp = coef_exp - 16;
+ /* Set 32MHz USEC counter */
+ if ((ah->ah_radio == AR5K_RF5112) ||
+ (ah->ah_radio == AR5K_RF5413) ||
+ (ah->ah_radio == AR5K_RF2316) ||
+ (ah->ah_radio == AR5K_RF2317))
+ /* Remain on 40MHz clock ? */
+ sclock = 40 - 1;
+ else
+ sclock = 32 - 1;
+ sclock = AR5K_REG_SM(sclock, AR5K_USEC_32);
- AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3,
- AR5K_PHY_TIMING_3_DSC_MAN, ds_coef_man);
- AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3,
- AR5K_PHY_TIMING_3_DSC_EXP, ds_coef_exp);
+ /*
+ * Set tx/rx latencies
+ */
+ usec_reg = ath5k_hw_reg_read(ah, AR5K_USEC_5211);
+ txlat = AR5K_REG_MS(usec_reg, AR5K_USEC_TX_LATENCY_5211);
+ rxlat = AR5K_REG_MS(usec_reg, AR5K_USEC_RX_LATENCY_5211);
- return 0;
-}
+ /*
+ * 5210 initvals don't include usec settings
+ * so we need to use magic values here for
+ * tx/rx latencies
+ */
+ if (ah->ah_version == AR5K_AR5210) {
+ /* same for turbo */
+ txlat = AR5K_INIT_TX_LATENCY_5210;
+ rxlat = AR5K_INIT_RX_LATENCY_5210;
+ }
+ if (ah->ah_mac_srev < AR5K_SREV_AR5211) {
+ /* 5311 has different tx/rx latency masks
+ * from 5211, since we deal 5311 the same
+ * as 5211 when setting initvals, shift
+ * values here to their proper locations
+ *
+ * Note: Initvals indicate tx/rx/ latencies
+ * are the same for turbo mode */
+ txlat = AR5K_REG_SM(txlat, AR5K_USEC_TX_LATENCY_5210);
+ rxlat = AR5K_REG_SM(rxlat, AR5K_USEC_RX_LATENCY_5210);
+ } else
+ switch (ah->ah_bwmode) {
+ case AR5K_BWMODE_10MHZ:
+ txlat = AR5K_REG_SM(txlat * 2,
+ AR5K_USEC_TX_LATENCY_5211);
+ rxlat = AR5K_REG_SM(AR5K_INIT_RX_LAT_MAX,
+ AR5K_USEC_RX_LATENCY_5211);
+ txf2txs = AR5K_INIT_TXF2TXD_START_DELAY_10MHZ;
+ break;
+ case AR5K_BWMODE_5MHZ:
+ txlat = AR5K_REG_SM(txlat * 4,
+ AR5K_USEC_TX_LATENCY_5211);
+ rxlat = AR5K_REG_SM(AR5K_INIT_RX_LAT_MAX,
+ AR5K_USEC_RX_LATENCY_5211);
+ txf2txs = AR5K_INIT_TXF2TXD_START_DELAY_5MHZ;
+ break;
+ case AR5K_BWMODE_40MHZ:
+ txlat = AR5K_INIT_TX_LAT_MIN;
+ rxlat = AR5K_REG_SM(rxlat / 2,
+ AR5K_USEC_RX_LATENCY_5211);
+ txf2txs = AR5K_INIT_TXF2TXD_START_DEFAULT;
+ break;
+ default:
+ break;
+ }
-/*
- * index into rates for control rates, we can set it up like this because
- * this is only used for AR5212 and we know it supports G mode
- */
-static const unsigned int control_rates[] =
- { 0, 1, 1, 1, 4, 4, 6, 6, 8, 8, 8, 8 };
+ usec_reg = (usec | sclock | txlat | rxlat);
+ ath5k_hw_reg_write(ah, usec_reg, AR5K_USEC);
-/**
- * ath5k_hw_write_rate_duration - fill rate code to duration table
- *
- * @ah: the &struct ath5k_hw
- * @mode: one of enum ath5k_driver_mode
- *
- * Write the rate code to duration table upon hw reset. This is a helper for
- * ath5k_hw_reset(). It seems all this is doing is setting an ACK timeout on
- * the hardware, based on current mode, for each rate. The rates which are
- * capable of short preamble (802.11b rates 2Mbps, 5.5Mbps, and 11Mbps) have
- * different rate code so we write their value twice (one for long preample
- * and one for short).
+ /* On 5112 set tx frane to tx data start delay */
+ if (ah->ah_radio == AR5K_RF5112) {
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RF_CTL2,
+ AR5K_PHY_RF_CTL2_TXF2TXD_START,
+ txf2txs);
+ }
+}
+
+/*
+ * If there is an external 32KHz crystal available, use it
+ * as ref. clock instead of 32/40MHz clock and baseband clocks
+ * to save power during sleep or restore normal 32/40MHz
+ * operation.
*
- * Note: Band doesn't matter here, if we set the values for OFDM it works
- * on both a and g modes. So all we have to do is set values for all g rates
- * that include all OFDM and CCK rates. If we operate in turbo or xr/half/
- * quarter rate mode, we need to use another set of bitrates (that's why we
- * need the mode parameter) but we don't handle these proprietary modes yet.
+ * XXX: When operating on 32KHz certain PHY registers (27 - 31,
+ * 123 - 127) require delay on access.
*/
-static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah,
- unsigned int mode)
+static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
{
- struct ath5k_softc *sc = ah->ah_sc;
- struct ieee80211_rate *rate;
- unsigned int i;
+ struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
+ u32 scal, spending;
+
+ /* Only set 32KHz settings if we have an external
+ * 32KHz crystal present */
+ if ((AR5K_EEPROM_HAS32KHZCRYSTAL(ee->ee_misc1) ||
+ AR5K_EEPROM_HAS32KHZCRYSTAL_OLD(ee->ee_misc1)) &&
+ enable) {
- /* Write rate duration table */
- for (i = 0; i < sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates; i++) {
- u32 reg;
- u16 tx_time;
+ /* 1 usec/cycle */
+ AR5K_REG_WRITE_BITS(ah, AR5K_USEC_5211, AR5K_USEC_32, 1);
+ /* Set up tsf increment on each cycle */
+ AR5K_REG_WRITE_BITS(ah, AR5K_TSF_PARM, AR5K_TSF_PARM_INC, 61);
- rate = &sc->sbands[IEEE80211_BAND_2GHZ].bitrates[control_rates[i]];
+ /* Set baseband sleep control registers
+ * and sleep control rate */
+ ath5k_hw_reg_write(ah, 0x1f, AR5K_PHY_SCR);
+
+ if ((ah->ah_radio == AR5K_RF5112) ||
+ (ah->ah_radio == AR5K_RF5413) ||
+ (ah->ah_radio == AR5K_RF2316) ||
+ (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
+ spending = 0x14;
+ else
+ spending = 0x18;
+ ath5k_hw_reg_write(ah, spending, AR5K_PHY_SPENDING);
- /* Set ACK timeout */
- reg = AR5K_RATE_DUR(rate->hw_value);
+ if ((ah->ah_radio == AR5K_RF5112) ||
+ (ah->ah_radio == AR5K_RF5413) ||
+ (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))) {
+ ath5k_hw_reg_write(ah, 0x26, AR5K_PHY_SLMT);
+ ath5k_hw_reg_write(ah, 0x0d, AR5K_PHY_SCAL);
+ ath5k_hw_reg_write(ah, 0x07, AR5K_PHY_SCLOCK);
+ ath5k_hw_reg_write(ah, 0x3f, AR5K_PHY_SDELAY);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG,
+ AR5K_PCICFG_SLEEP_CLOCK_RATE, 0x02);
+ } else {
+ ath5k_hw_reg_write(ah, 0x0a, AR5K_PHY_SLMT);
+ ath5k_hw_reg_write(ah, 0x0c, AR5K_PHY_SCAL);
+ ath5k_hw_reg_write(ah, 0x03, AR5K_PHY_SCLOCK);
+ ath5k_hw_reg_write(ah, 0x20, AR5K_PHY_SDELAY);
+ AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG,
+ AR5K_PCICFG_SLEEP_CLOCK_RATE, 0x03);
+ }
- /* An ACK frame consists of 10 bytes. If you add the FCS,
- * which ieee80211_generic_frame_duration() adds,
- * its 14 bytes. Note we use the control rate and not the
- * actual rate for this rate. See mac80211 tx.c
- * ieee80211_duration() for a brief description of
- * what rate we should choose to TX ACKs. */
- tx_time = le16_to_cpu(ieee80211_generic_frame_duration(sc->hw,
- NULL, 10, rate));
+ /* Enable sleep clock operation */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG,
+ AR5K_PCICFG_SLEEP_CLOCK_EN);
- ath5k_hw_reg_write(ah, tx_time, reg);
+ } else {
- if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
- continue;
+ /* Disable sleep clock operation and
+ * restore default parameters */
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG,
+ AR5K_PCICFG_SLEEP_CLOCK_EN);
- /*
- * We're not distinguishing short preamble here,
- * This is true, all we'll get is a longer value here
- * which is not necessarilly bad. We could use
- * export ieee80211_frame_duration() but that needs to be
- * fixed first to be properly used by mac802111 drivers:
- *
- * - remove erp stuff and let the routine figure ofdm
- * erp rates
- * - remove passing argument ieee80211_local as
- * drivers don't have access to it
- * - move drivers using ieee80211_generic_frame_duration()
- * to this
- */
- ath5k_hw_reg_write(ah, tx_time,
- reg + (AR5K_SET_SHORT_PREAMBLE << 2));
+ AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG,
+ AR5K_PCICFG_SLEEP_CLOCK_RATE, 0);
+
+ /* Set DAC/ADC delays */
+ ath5k_hw_reg_write(ah, 0x1f, AR5K_PHY_SCR);
+ ath5k_hw_reg_write(ah, AR5K_PHY_SLMT_32MHZ, AR5K_PHY_SLMT);
+
+ if (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))
+ scal = AR5K_PHY_SCAL_32MHZ_2417;
+ else if (ee->ee_is_hb63)
+ scal = AR5K_PHY_SCAL_32MHZ_HB63;
+ else
+ scal = AR5K_PHY_SCAL_32MHZ;
+ ath5k_hw_reg_write(ah, scal, AR5K_PHY_SCAL);
+
+ ath5k_hw_reg_write(ah, AR5K_PHY_SCLOCK_32MHZ, AR5K_PHY_SCLOCK);
+ ath5k_hw_reg_write(ah, AR5K_PHY_SDELAY_32MHZ, AR5K_PHY_SDELAY);
+
+ if ((ah->ah_radio == AR5K_RF5112) ||
+ (ah->ah_radio == AR5K_RF5413) ||
+ (ah->ah_radio == AR5K_RF2316) ||
+ (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
+ spending = 0x14;
+ else
+ spending = 0x18;
+ ath5k_hw_reg_write(ah, spending, AR5K_PHY_SPENDING);
+
+ /* Set up tsf increment on each cycle */
+ AR5K_REG_WRITE_BITS(ah, AR5K_TSF_PARM, AR5K_TSF_PARM_INC, 1);
}
}
+
+/*********************\
+* Reset/Sleep control *
+\*********************/
+
/*
* Reset chipset
*/
@@ -236,6 +363,64 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
}
/*
+ * Reset AHB chipset
+ * AR5K_RESET_CTL_PCU flag resets WMAC
+ * AR5K_RESET_CTL_BASEBAND flag resets WBB
+ */
+static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
+{
+ u32 mask = flags ? flags : ~0U;
+ volatile u32 *reg;
+ u32 regval;
+ u32 val = 0;
+
+ /* ah->ah_mac_srev is not available at this point yet */
+ if (ah->ah_sc->devid >= AR5K_SREV_AR2315_R6) {
+ reg = (u32 *) AR5K_AR2315_RESET;
+ if (mask & AR5K_RESET_CTL_PCU)
+ val |= AR5K_AR2315_RESET_WMAC;
+ if (mask & AR5K_RESET_CTL_BASEBAND)
+ val |= AR5K_AR2315_RESET_BB_WARM;
+ } else {
+ reg = (u32 *) AR5K_AR5312_RESET;
+ if (to_platform_device(ah->ah_sc->dev)->id == 0) {
+ if (mask & AR5K_RESET_CTL_PCU)
+ val |= AR5K_AR5312_RESET_WMAC0;
+ if (mask & AR5K_RESET_CTL_BASEBAND)
+ val |= AR5K_AR5312_RESET_BB0_COLD |
+ AR5K_AR5312_RESET_BB0_WARM;
+ } else {
+ if (mask & AR5K_RESET_CTL_PCU)
+ val |= AR5K_AR5312_RESET_WMAC1;
+ if (mask & AR5K_RESET_CTL_BASEBAND)
+ val |= AR5K_AR5312_RESET_BB1_COLD |
+ AR5K_AR5312_RESET_BB1_WARM;
+ }
+ }
+
+ /* Put BB/MAC into reset */
+ regval = __raw_readl(reg);
+ __raw_writel(regval | val, reg);
+ regval = __raw_readl(reg);
+ udelay(100);
+
+ /* Bring BB/MAC out of reset */
+ __raw_writel(regval & ~val, reg);
+ regval = __raw_readl(reg);
+
+ /*
+ * Reset configuration register (for hw byte-swap). Note that this
+ * is only set for big endian. We do the necessary magic in
+ * AR5K_INIT_CFG.
+ */
+ if ((flags & AR5K_RESET_CTL_PCU) == 0)
+ ath5k_hw_reg_write(ah, AR5K_INIT_CFG, AR5K_CFG);
+
+ return 0;
+}
+
+
+/*
* Sleep control
*/
static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
@@ -334,6 +519,9 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
u32 bus_flags;
int ret;
+ if (ath5k_get_bus_type(ah) == ATH_AHB)
+ return 0;
+
/* Make sure device is awake */
ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
@@ -349,7 +537,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
* we ingore that flag for PCI-E cards. On PCI cards
* this flag gets cleared after 64 PCI clocks.
*/
- bus_flags = (pdev->is_pcie) ? 0 : AR5K_RESET_CTL_PCI;
+ bus_flags = (pdev && pci_is_pcie(pdev)) ? 0 : AR5K_RESET_CTL_PCI;
if (ah->ah_version == AR5K_AR5210) {
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
@@ -378,7 +566,6 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
/*
* Bring up MAC + PHY Chips and program PLL
- * TODO: Half/Quarter rate support
*/
int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
{
@@ -390,11 +577,13 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
mode = 0;
clock = 0;
- /* Wakeup the device */
- ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
- if (ret) {
- ATH5K_ERR(ah->ah_sc, "failed to wakeup the MAC Chip\n");
- return ret;
+ if ((ath5k_get_bus_type(ah) != ATH_AHB) || !initial) {
+ /* Wakeup the device */
+ ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+ if (ret) {
+ ATH5K_ERR(ah->ah_sc, "failed to wakeup the MAC Chip\n");
+ return ret;
+ }
}
/*
@@ -405,7 +594,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
* we ingore that flag for PCI-E cards. On PCI cards
* this flag gets cleared after 64 PCI clocks.
*/
- bus_flags = (pdev->is_pcie) ? 0 : AR5K_RESET_CTL_PCI;
+ bus_flags = (pdev && pci_is_pcie(pdev)) ? 0 : AR5K_RESET_CTL_PCI;
if (ah->ah_version == AR5K_AR5210) {
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
@@ -413,8 +602,12 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
mdelay(2);
} else {
- ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
- AR5K_RESET_CTL_BASEBAND | bus_flags);
+ if (ath5k_get_bus_type(ah) == ATH_AHB)
+ ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU |
+ AR5K_RESET_CTL_BASEBAND);
+ else
+ ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
+ AR5K_RESET_CTL_BASEBAND | bus_flags);
}
if (ret) {
@@ -429,9 +622,15 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
return ret;
}
- /* ...clear reset control register and pull device out of
- * warm reset */
- if (ath5k_hw_nic_reset(ah, 0)) {
+ /* ...reset configuration regiter on Wisoc ...
+ * ...clear reset control register and pull device out of
+ * warm reset on others */
+ if (ath5k_get_bus_type(ah) == ATH_AHB)
+ ret = ath5k_hw_wisoc_reset(ah, 0);
+ else
+ ret = ath5k_hw_nic_reset(ah, 0);
+
+ if (ret) {
ATH5K_ERR(ah->ah_sc, "failed to warm reset the MAC Chip\n");
return -EIO;
}
@@ -466,7 +665,8 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
* CCK headers) operation. We need to test
* this, 5211 might support ofdm-only g after
* all, there are also initial register values
- * in the code for g mode (see initvals.c). */
+ * in the code for g mode (see initvals.c).
+ */
if (ah->ah_version == AR5K_AR5211)
mode |= AR5K_PHY_MODE_MOD_OFDM;
else
@@ -479,6 +679,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
} else if (flags & CHANNEL_5GHZ) {
mode |= AR5K_PHY_MODE_FREQ_5GHZ;
+ /* Different PLL setting for 5413 */
if (ah->ah_radio == AR5K_RF5413)
clock = AR5K_PHY_PLL_40MHZ_5413;
else
@@ -496,12 +697,29 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
return -EINVAL;
}
- if (flags & CHANNEL_TURBO)
- turbo = AR5K_PHY_TURBO_MODE | AR5K_PHY_TURBO_SHORT;
+ /*XXX: Can bwmode be used with dynamic mode ?
+ * (I don't think it supports 44MHz) */
+ /* On 2425 initvals TURBO_SHORT is not pressent */
+ if (ah->ah_bwmode == AR5K_BWMODE_40MHZ) {
+ turbo = AR5K_PHY_TURBO_MODE |
+ (ah->ah_radio == AR5K_RF2425) ? 0 :
+ AR5K_PHY_TURBO_SHORT;
+ } else if (ah->ah_bwmode != AR5K_BWMODE_DEFAULT) {
+ if (ah->ah_radio == AR5K_RF5413) {
+ mode |= (ah->ah_bwmode == AR5K_BWMODE_10MHZ) ?
+ AR5K_PHY_MODE_HALF_RATE :
+ AR5K_PHY_MODE_QUARTER_RATE;
+ } else if (ah->ah_version == AR5K_AR5212) {
+ clock |= (ah->ah_bwmode == AR5K_BWMODE_10MHZ) ?
+ AR5K_PHY_PLL_HALF_RATE :
+ AR5K_PHY_PLL_QUARTER_RATE;
+ }
+ }
+
} else { /* Reset the device */
/* ...enable Atheros turbo mode if requested */
- if (flags & CHANNEL_TURBO)
+ if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
ath5k_hw_reg_write(ah, AR5K_PHY_TURBO_MODE,
AR5K_PHY_TURBO);
}
@@ -522,107 +740,10 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
return 0;
}
-/*
- * If there is an external 32KHz crystal available, use it
- * as ref. clock instead of 32/40MHz clock and baseband clocks
- * to save power during sleep or restore normal 32/40MHz
- * operation.
- *
- * XXX: When operating on 32KHz certain PHY registers (27 - 31,
- * 123 - 127) require delay on access.
- */
-static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
-{
- struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
- u32 scal, spending, usec32;
- /* Only set 32KHz settings if we have an external
- * 32KHz crystal present */
- if ((AR5K_EEPROM_HAS32KHZCRYSTAL(ee->ee_misc1) ||
- AR5K_EEPROM_HAS32KHZCRYSTAL_OLD(ee->ee_misc1)) &&
- enable) {
-
- /* 1 usec/cycle */
- AR5K_REG_WRITE_BITS(ah, AR5K_USEC_5211, AR5K_USEC_32, 1);
- /* Set up tsf increment on each cycle */
- AR5K_REG_WRITE_BITS(ah, AR5K_TSF_PARM, AR5K_TSF_PARM_INC, 61);
-
- /* Set baseband sleep control registers
- * and sleep control rate */
- ath5k_hw_reg_write(ah, 0x1f, AR5K_PHY_SCR);
-
- if ((ah->ah_radio == AR5K_RF5112) ||
- (ah->ah_radio == AR5K_RF5413) ||
- (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
- spending = 0x14;
- else
- spending = 0x18;
- ath5k_hw_reg_write(ah, spending, AR5K_PHY_SPENDING);
-
- if ((ah->ah_radio == AR5K_RF5112) ||
- (ah->ah_radio == AR5K_RF5413) ||
- (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))) {
- ath5k_hw_reg_write(ah, 0x26, AR5K_PHY_SLMT);
- ath5k_hw_reg_write(ah, 0x0d, AR5K_PHY_SCAL);
- ath5k_hw_reg_write(ah, 0x07, AR5K_PHY_SCLOCK);
- ath5k_hw_reg_write(ah, 0x3f, AR5K_PHY_SDELAY);
- AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG,
- AR5K_PCICFG_SLEEP_CLOCK_RATE, 0x02);
- } else {
- ath5k_hw_reg_write(ah, 0x0a, AR5K_PHY_SLMT);
- ath5k_hw_reg_write(ah, 0x0c, AR5K_PHY_SCAL);
- ath5k_hw_reg_write(ah, 0x03, AR5K_PHY_SCLOCK);
- ath5k_hw_reg_write(ah, 0x20, AR5K_PHY_SDELAY);
- AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG,
- AR5K_PCICFG_SLEEP_CLOCK_RATE, 0x03);
- }
-
- /* Enable sleep clock operation */
- AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG,
- AR5K_PCICFG_SLEEP_CLOCK_EN);
-
- } else {
-
- /* Disable sleep clock operation and
- * restore default parameters */
- AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG,
- AR5K_PCICFG_SLEEP_CLOCK_EN);
-
- AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG,
- AR5K_PCICFG_SLEEP_CLOCK_RATE, 0);
-
- ath5k_hw_reg_write(ah, 0x1f, AR5K_PHY_SCR);
- ath5k_hw_reg_write(ah, AR5K_PHY_SLMT_32MHZ, AR5K_PHY_SLMT);
-
- if (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))
- scal = AR5K_PHY_SCAL_32MHZ_2417;
- else if (ee->ee_is_hb63)
- scal = AR5K_PHY_SCAL_32MHZ_HB63;
- else
- scal = AR5K_PHY_SCAL_32MHZ;
- ath5k_hw_reg_write(ah, scal, AR5K_PHY_SCAL);
-
- ath5k_hw_reg_write(ah, AR5K_PHY_SCLOCK_32MHZ, AR5K_PHY_SCLOCK);
- ath5k_hw_reg_write(ah, AR5K_PHY_SDELAY_32MHZ, AR5K_PHY_SDELAY);
-
- if ((ah->ah_radio == AR5K_RF5112) ||
- (ah->ah_radio == AR5K_RF5413) ||
- (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
- spending = 0x14;
- else
- spending = 0x18;
- ath5k_hw_reg_write(ah, spending, AR5K_PHY_SPENDING);
-
- if ((ah->ah_radio == AR5K_RF5112) ||
- (ah->ah_radio == AR5K_RF5413))
- usec32 = 39;
- else
- usec32 = 31;
- AR5K_REG_WRITE_BITS(ah, AR5K_USEC_5211, AR5K_USEC_32, usec32);
-
- AR5K_REG_WRITE_BITS(ah, AR5K_TSF_PARM, AR5K_TSF_PARM_INC, 1);
- }
-}
+/**************************************\
+* Post-initvals register modifications *
+\**************************************/
/* TODO: Half/Quarter rate */
static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
@@ -663,22 +784,10 @@ static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
AR5K_REG_DISABLE_BITS(ah, AR5K_TXCFG,
AR5K_TXCFG_DCU_DBL_BUF_DIS);
- /* Set DAC/ADC delays */
- if (ah->ah_version == AR5K_AR5212) {
- u32 scal;
- struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
- if (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))
- scal = AR5K_PHY_SCAL_32MHZ_2417;
- else if (ee->ee_is_hb63)
- scal = AR5K_PHY_SCAL_32MHZ_HB63;
- else
- scal = AR5K_PHY_SCAL_32MHZ;
- ath5k_hw_reg_write(ah, scal, AR5K_PHY_SCAL);
- }
-
/* Set fast ADC */
if ((ah->ah_radio == AR5K_RF5413) ||
- (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))) {
+ (ah->ah_radio == AR5K_RF2317) ||
+ (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))) {
u32 fast_adc = true;
if (channel->center_freq == 2462 ||
@@ -706,33 +815,68 @@ static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
}
if (ah->ah_mac_srev < AR5K_SREV_AR5211) {
- u32 usec_reg;
- /* 5311 has different tx/rx latency masks
- * from 5211, since we deal 5311 the same
- * as 5211 when setting initvals, shift
- * values here to their proper locations */
- usec_reg = ath5k_hw_reg_read(ah, AR5K_USEC_5211);
- ath5k_hw_reg_write(ah, usec_reg & (AR5K_USEC_1 |
- AR5K_USEC_32 |
- AR5K_USEC_TX_LATENCY_5211 |
- AR5K_REG_SM(29,
- AR5K_USEC_RX_LATENCY_5210)),
- AR5K_USEC_5211);
/* Clear QCU/DCU clock gating register */
ath5k_hw_reg_write(ah, 0, AR5K_QCUDCU_CLKGT);
/* Set DAC/ADC delays */
- ath5k_hw_reg_write(ah, 0x08, AR5K_PHY_SCAL);
+ ath5k_hw_reg_write(ah, AR5K_PHY_SCAL_32MHZ_5311,
+ AR5K_PHY_SCAL);
/* Enable PCU FIFO corruption ECO */
AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211,
AR5K_DIAG_SW_ECO_ENABLE);
}
+
+ if (ah->ah_bwmode) {
+ /* Increase PHY switch and AGC settling time
+ * on turbo mode (ath5k_hw_commit_eeprom_settings
+ * will override settling time if available) */
+ if (ah->ah_bwmode == AR5K_BWMODE_40MHZ) {
+
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SETTLING,
+ AR5K_PHY_SETTLING_AGC,
+ AR5K_AGC_SETTLING_TURBO);
+
+ /* XXX: Initvals indicate we only increase
+ * switch time on AR5212, 5211 and 5210
+ * only change agc time (bug?) */
+ if (ah->ah_version == AR5K_AR5212)
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SETTLING,
+ AR5K_PHY_SETTLING_SWITCH,
+ AR5K_SWITCH_SETTLING_TURBO);
+
+ if (ah->ah_version == AR5K_AR5210) {
+ /* Set Frame Control Register */
+ ath5k_hw_reg_write(ah,
+ (AR5K_PHY_FRAME_CTL_INI |
+ AR5K_PHY_TURBO_MODE |
+ AR5K_PHY_TURBO_SHORT | 0x2020),
+ AR5K_PHY_FRAME_CTL_5210);
+ }
+ /* On 5413 PHY force window length for half/quarter rate*/
+ } else if ((ah->ah_mac_srev >= AR5K_SREV_AR5424) &&
+ (ah->ah_mac_srev <= AR5K_SREV_AR5414)) {
+ AR5K_REG_WRITE_BITS(ah, AR5K_PHY_FRAME_CTL_5211,
+ AR5K_PHY_FRAME_CTL_WIN_LEN,
+ 3);
+ }
+ } else if (ah->ah_version == AR5K_AR5210) {
+ /* Set Frame Control Register for normal operation */
+ ath5k_hw_reg_write(ah, (AR5K_PHY_FRAME_CTL_INI | 0x1020),
+ AR5K_PHY_FRAME_CTL_5210);
+ }
}
static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
- struct ieee80211_channel *channel, u8 ee_mode)
+ struct ieee80211_channel *channel)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
s16 cck_ofdm_pwr_delta;
+ u8 ee_mode;
+
+ /* TODO: Add support for AR5210 EEPROM */
+ if (ah->ah_version == AR5K_AR5210)
+ return;
+
+ ee_mode = ath5k_eeprom_mode_from_channel(channel);
/* Adjust power delta for channel 14 */
if (channel->center_freq == 2484)
@@ -772,7 +916,7 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
AR5K_PHY_NF_SVAL(ee->ee_noise_floor_thr[ee_mode]),
AR5K_PHY_NFTHRES);
- if ((channel->hw_value & CHANNEL_TURBO) &&
+ if ((ah->ah_bwmode == AR5K_BWMODE_40MHZ) &&
(ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_0)) {
/* Switch settling time (Turbo) */
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SETTLING,
@@ -870,143 +1014,172 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
ath5k_hw_reg_write(ah, 0, AR5K_PHY_HEAVY_CLIP_ENABLE);
}
-/*
- * Main reset function
- */
+
+/*********************\
+* Main reset function *
+\*********************/
+
int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
- struct ieee80211_channel *channel, bool change_channel)
+ struct ieee80211_channel *channel, bool fast, bool skip_pcu)
{
- struct ath_common *common = ath5k_hw_common(ah);
- u32 s_seq[10], s_led[3], staid1_flags, tsf_up, tsf_lo;
- u32 phy_tst1;
- u8 mode, freq, ee_mode;
+ u32 s_seq[10], s_led[3], tsf_up, tsf_lo;
+ u8 mode;
int i, ret;
- ee_mode = 0;
- staid1_flags = 0;
tsf_up = 0;
tsf_lo = 0;
- freq = 0;
mode = 0;
/*
- * Save some registers before a reset
+ * Sanity check for fast flag
+ * Fast channel change only available
+ * on AR2413/AR5413.
*/
- /*DCU/Antenna selection not available on 5210*/
- if (ah->ah_version != AR5K_AR5210) {
+ if (fast && (ah->ah_radio != AR5K_RF2413) &&
+ (ah->ah_radio != AR5K_RF5413))
+ fast = 0;
- switch (channel->hw_value & CHANNEL_MODES) {
- case CHANNEL_A:
- mode = AR5K_MODE_11A;
- freq = AR5K_INI_RFGAIN_5GHZ;
- ee_mode = AR5K_EEPROM_MODE_11A;
- break;
- case CHANNEL_G:
- mode = AR5K_MODE_11G;
- freq = AR5K_INI_RFGAIN_2GHZ;
- ee_mode = AR5K_EEPROM_MODE_11G;
- break;
- case CHANNEL_B:
- mode = AR5K_MODE_11B;
- freq = AR5K_INI_RFGAIN_2GHZ;
- ee_mode = AR5K_EEPROM_MODE_11B;
- break;
- case CHANNEL_T:
- mode = AR5K_MODE_11A_TURBO;
- freq = AR5K_INI_RFGAIN_5GHZ;
- ee_mode = AR5K_EEPROM_MODE_11A;
- break;
- case CHANNEL_TG:
- if (ah->ah_version == AR5K_AR5211) {
- ATH5K_ERR(ah->ah_sc,
- "TurboG mode not available on 5211");
- return -EINVAL;
- }
- mode = AR5K_MODE_11G_TURBO;
- freq = AR5K_INI_RFGAIN_2GHZ;
- ee_mode = AR5K_EEPROM_MODE_11G;
- break;
- case CHANNEL_XR:
- if (ah->ah_version == AR5K_AR5211) {
- ATH5K_ERR(ah->ah_sc,
- "XR mode not available on 5211");
- return -EINVAL;
- }
- mode = AR5K_MODE_XR;
- freq = AR5K_INI_RFGAIN_5GHZ;
- ee_mode = AR5K_EEPROM_MODE_11A;
- break;
- default:
+ /* Disable sleep clock operation
+ * to avoid register access delay on certain
+ * PHY registers */
+ if (ah->ah_version == AR5K_AR5212)
+ ath5k_hw_set_sleep_clock(ah, false);
+
+ /*
+ * Stop PCU
+ */
+ ath5k_hw_stop_rx_pcu(ah);
+
+ /*
+ * Stop DMA
+ *
+ * Note: If DMA didn't stop continue
+ * since only a reset will fix it.
+ */
+ ret = ath5k_hw_dma_stop(ah);
+
+ /* RF Bus grant won't work if we have pending
+ * frames */
+ if (ret && fast) {
+ ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_RESET,
+ "DMA didn't stop, falling back to normal reset\n");
+ fast = 0;
+ /* Non fatal, just continue with
+ * normal reset */
+ ret = 0;
+ }
+
+ switch (channel->hw_value & CHANNEL_MODES) {
+ case CHANNEL_A:
+ mode = AR5K_MODE_11A;
+ break;
+ case CHANNEL_G:
+
+ if (ah->ah_version <= AR5K_AR5211) {
ATH5K_ERR(ah->ah_sc,
- "invalid channel: %d\n", channel->center_freq);
+ "G mode not available on 5210/5211");
return -EINVAL;
}
- if (change_channel) {
- /*
- * Save frame sequence count
- * For revs. after Oahu, only save
- * seq num for DCU 0 (Global seq num)
- */
- if (ah->ah_mac_srev < AR5K_SREV_AR5211) {
-
- for (i = 0; i < 10; i++)
- s_seq[i] = ath5k_hw_reg_read(ah,
- AR5K_QUEUE_DCU_SEQNUM(i));
+ mode = AR5K_MODE_11G;
+ break;
+ case CHANNEL_B:
- } else {
- s_seq[0] = ath5k_hw_reg_read(ah,
- AR5K_QUEUE_DCU_SEQNUM(0));
- }
+ if (ah->ah_version < AR5K_AR5211) {
+ ATH5K_ERR(ah->ah_sc,
+ "B mode not available on 5210");
+ return -EINVAL;
+ }
- /* TSF accelerates on AR5211 during reset
- * As a workaround save it here and restore
- * it later so that it's back in time after
- * reset. This way it'll get re-synced on the
- * next beacon without breaking ad-hoc.
- *
- * On AR5212 TSF is almost preserved across a
- * reset so it stays back in time anyway and
- * we don't have to save/restore it.
- *
- * XXX: Since this breaks power saving we have
- * to disable power saving until we receive the
- * next beacon, so we can resync beacon timers */
- if (ah->ah_version == AR5K_AR5211) {
- tsf_up = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
- tsf_lo = ath5k_hw_reg_read(ah, AR5K_TSF_L32);
- }
+ mode = AR5K_MODE_11B;
+ break;
+ case CHANNEL_XR:
+ if (ah->ah_version == AR5K_AR5211) {
+ ATH5K_ERR(ah->ah_sc,
+ "XR mode not available on 5211");
+ return -EINVAL;
}
+ mode = AR5K_MODE_XR;
+ break;
+ default:
+ ATH5K_ERR(ah->ah_sc,
+ "invalid channel: %d\n", channel->center_freq);
+ return -EINVAL;
+ }
- if (ah->ah_version == AR5K_AR5212) {
- /* Restore normal 32/40MHz clock operation
- * to avoid register access delay on certain
- * PHY registers */
- ath5k_hw_set_sleep_clock(ah, false);
+ /*
+ * If driver requested fast channel change and DMA has stopped
+ * go on. If it fails continue with a normal reset.
+ */
+ if (fast) {
+ ret = ath5k_hw_phy_init(ah, channel, mode, true);
+ if (ret) {
+ ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_RESET,
+ "fast chan change failed, falling back to normal reset\n");
+ /* Non fatal, can happen eg.
+ * on mode change */
+ ret = 0;
+ } else
+ return 0;
+ }
- /* Since we are going to write rf buffer
- * check if we have any pending gain_F
- * optimization settings */
- if (change_channel && ah->ah_rf_banks != NULL)
- ath5k_hw_gainf_calibrate(ah);
+ /*
+ * Save some registers before a reset
+ */
+ if (ah->ah_version != AR5K_AR5210) {
+ /*
+ * Save frame sequence count
+ * For revs. after Oahu, only save
+ * seq num for DCU 0 (Global seq num)
+ */
+ if (ah->ah_mac_srev < AR5K_SREV_AR5211) {
+
+ for (i = 0; i < 10; i++)
+ s_seq[i] = ath5k_hw_reg_read(ah,
+ AR5K_QUEUE_DCU_SEQNUM(i));
+
+ } else {
+ s_seq[0] = ath5k_hw_reg_read(ah,
+ AR5K_QUEUE_DCU_SEQNUM(0));
+ }
+
+ /* TSF accelerates on AR5211 during reset
+ * As a workaround save it here and restore
+ * it later so that it's back in time after
+ * reset. This way it'll get re-synced on the
+ * next beacon without breaking ad-hoc.
+ *
+ * On AR5212 TSF is almost preserved across a
+ * reset so it stays back in time anyway and
+ * we don't have to save/restore it.
+ *
+ * XXX: Since this breaks power saving we have
+ * to disable power saving until we receive the
+ * next beacon, so we can resync beacon timers */
+ if (ah->ah_version == AR5K_AR5211) {
+ tsf_up = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
+ tsf_lo = ath5k_hw_reg_read(ah, AR5K_TSF_L32);
}
}
+
/*GPIOs*/
s_led[0] = ath5k_hw_reg_read(ah, AR5K_PCICFG) &
AR5K_PCICFG_LEDSTATE;
s_led[1] = ath5k_hw_reg_read(ah, AR5K_GPIOCR);
s_led[2] = ath5k_hw_reg_read(ah, AR5K_GPIODO);
- /* AR5K_STA_ID1 flags, only preserve antenna
- * settings and ack/cts rate mode */
- staid1_flags = ath5k_hw_reg_read(ah, AR5K_STA_ID1) &
- (AR5K_STA_ID1_DEFAULT_ANTENNA |
- AR5K_STA_ID1_DESC_ANTENNA |
- AR5K_STA_ID1_RTS_DEF_ANTENNA |
- AR5K_STA_ID1_ACKCTS_6MB |
- AR5K_STA_ID1_BASE_RATE_11B |
- AR5K_STA_ID1_SELFGEN_DEF_ANT);
+
+ /*
+ * Since we are going to write rf buffer
+ * check if we have any pending gain_F
+ * optimization settings
+ */
+ if (ah->ah_version == AR5K_AR5212 &&
+ (ah->ah_radio <= AR5K_RF5112)) {
+ if (!fast && ah->ah_rf_banks != NULL)
+ ath5k_hw_gainf_calibrate(ah);
+ }
/* Wakeup the device */
ret = ath5k_hw_nic_wakeup(ah, channel->hw_value, false);
@@ -1021,121 +1194,42 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
AR5K_PHY(0));
/* Write initial settings */
- ret = ath5k_hw_write_initvals(ah, mode, change_channel);
+ ret = ath5k_hw_write_initvals(ah, mode, skip_pcu);
if (ret)
return ret;
+ /* Initialize core clock settings */
+ ath5k_hw_init_core_clock(ah);
+
/*
- * 5211/5212 Specific
+ * Tweak initval settings for revised
+ * chipsets and add some more config
+ * bits
*/
- if (ah->ah_version != AR5K_AR5210) {
-
- /*
- * Write initial RF gain settings
- * This should work for both 5111/5112
- */
- ret = ath5k_hw_rfgain_init(ah, freq);
- if (ret)
- return ret;
-
- mdelay(1);
-
- /*
- * Tweak initval settings for revised
- * chipsets and add some more config
- * bits
- */
- ath5k_hw_tweak_initval_settings(ah, channel);
-
- /*
- * Set TX power
- */
- ret = ath5k_hw_txpower(ah, channel, ee_mode,
- ah->ah_txpower.txp_max_pwr / 2);
- if (ret)
- return ret;
+ ath5k_hw_tweak_initval_settings(ah, channel);
- /* Write rate duration table only on AR5212 and if
- * virtual interface has already been brought up
- * XXX: rethink this after new mode changes to
- * mac80211 are integrated */
- if (ah->ah_version == AR5K_AR5212 &&
- ah->ah_sc->nvifs)
- ath5k_hw_write_rate_duration(ah, mode);
+ /* Commit values from EEPROM */
+ ath5k_hw_commit_eeprom_settings(ah, channel);
- /*
- * Write RF buffer
- */
- ret = ath5k_hw_rfregs_init(ah, channel, mode);
- if (ret)
- return ret;
-
-
- /* Write OFDM timings on 5212*/
- if (ah->ah_version == AR5K_AR5212 &&
- channel->hw_value & CHANNEL_OFDM) {
-
- ret = ath5k_hw_write_ofdm_timings(ah, channel);
- if (ret)
- return ret;
-
- /* Spur info is available only from EEPROM versions
- * greater than 5.3, but the EEPROM routines will use
- * static values for older versions */
- if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
- ath5k_hw_set_spur_mitigation_filter(ah,
- channel);
- }
-
- /*Enable/disable 802.11b mode on 5111
- (enable 2111 frequency converter + CCK)*/
- if (ah->ah_radio == AR5K_RF5111) {
- if (mode == AR5K_MODE_11B)
- AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG,
- AR5K_TXCFG_B_MODE);
- else
- AR5K_REG_DISABLE_BITS(ah, AR5K_TXCFG,
- AR5K_TXCFG_B_MODE);
- }
-
- /* Commit values from EEPROM */
- ath5k_hw_commit_eeprom_settings(ah, channel, ee_mode);
-
- } else {
- /*
- * For 5210 we do all initialization using
- * initvals, so we don't have to modify
- * any settings (5210 also only supports
- * a/aturbo modes)
- */
- mdelay(1);
- /* Disable phy and wait */
- ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
- mdelay(1);
- }
/*
* Restore saved values
*/
- /*DCU/Antenna selection not available on 5210*/
+ /* Seqnum, TSF */
if (ah->ah_version != AR5K_AR5210) {
+ if (ah->ah_mac_srev < AR5K_SREV_AR5211) {
+ for (i = 0; i < 10; i++)
+ ath5k_hw_reg_write(ah, s_seq[i],
+ AR5K_QUEUE_DCU_SEQNUM(i));
+ } else {
+ ath5k_hw_reg_write(ah, s_seq[0],
+ AR5K_QUEUE_DCU_SEQNUM(0));
+ }
- if (change_channel) {
- if (ah->ah_mac_srev < AR5K_SREV_AR5211) {
- for (i = 0; i < 10; i++)
- ath5k_hw_reg_write(ah, s_seq[i],
- AR5K_QUEUE_DCU_SEQNUM(i));
- } else {
- ath5k_hw_reg_write(ah, s_seq[0],
- AR5K_QUEUE_DCU_SEQNUM(0));
- }
-
-
- if (ah->ah_version == AR5K_AR5211) {
- ath5k_hw_reg_write(ah, tsf_up, AR5K_TSF_U32);
- ath5k_hw_reg_write(ah, tsf_lo, AR5K_TSF_L32);
- }
+ if (ah->ah_version == AR5K_AR5211) {
+ ath5k_hw_reg_write(ah, tsf_up, AR5K_TSF_U32);
+ ath5k_hw_reg_write(ah, tsf_lo, AR5K_TSF_L32);
}
}
@@ -1146,203 +1240,34 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
ath5k_hw_reg_write(ah, s_led[1], AR5K_GPIOCR);
ath5k_hw_reg_write(ah, s_led[2], AR5K_GPIODO);
- /* Restore sta_id flags and preserve our mac address*/
- ath5k_hw_reg_write(ah,
- get_unaligned_le32(common->macaddr),
- AR5K_STA_ID0);
- ath5k_hw_reg_write(ah,
- staid1_flags | get_unaligned_le16(common->macaddr + 4),
- AR5K_STA_ID1);
-
-
/*
- * Configure PCU
+ * Initialize PCU
*/
-
- /* Restore bssid and bssid mask */
- ath5k_hw_set_bssid(ah);
-
- /* Set PCU config */
- ath5k_hw_set_opmode(ah, op_mode);
-
- /* Clear any pending interrupts
- * PISR/SISR Not available on 5210 */
- if (ah->ah_version != AR5K_AR5210)
- ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR);
-
- /* Set RSSI/BRSSI thresholds
- *
- * Note: If we decide to set this value
- * dynamically, keep in mind that when AR5K_RSSI_THR
- * register is read, it might return 0x40 if we haven't
- * written anything to it. Also, BMISS RSSI threshold is zeroed.
- * So doing a save/restore procedure here isn't the right
- * choice. Instead, store it in ath5k_hw */
- ath5k_hw_reg_write(ah, (AR5K_TUNE_RSSI_THRES |
- AR5K_TUNE_BMISS_THRES <<
- AR5K_RSSI_THR_BMISS_S),
- AR5K_RSSI_THR);
-
- /* MIC QoS support */
- if (ah->ah_mac_srev >= AR5K_SREV_AR2413) {
- ath5k_hw_reg_write(ah, 0x000100aa, AR5K_MIC_QOS_CTL);
- ath5k_hw_reg_write(ah, 0x00003210, AR5K_MIC_QOS_SEL);
- }
-
- /* QoS NOACK Policy */
- if (ah->ah_version == AR5K_AR5212) {
- ath5k_hw_reg_write(ah,
- AR5K_REG_SM(2, AR5K_QOS_NOACK_2BIT_VALUES) |
- AR5K_REG_SM(5, AR5K_QOS_NOACK_BIT_OFFSET) |
- AR5K_REG_SM(0, AR5K_QOS_NOACK_BYTE_OFFSET),
- AR5K_QOS_NOACK);
- }
-
+ ath5k_hw_pcu_init(ah, op_mode, mode);
/*
- * Configure PHY
+ * Initialize PHY
*/
-
- /* Set channel on PHY */
- ret = ath5k_hw_channel(ah, channel);
- if (ret)
+ ret = ath5k_hw_phy_init(ah, channel, mode, false);
+ if (ret) {
+ ATH5K_ERR(ah->ah_sc,
+ "failed to initialize PHY (%i) !\n", ret);
return ret;
-
- /*
- * Enable the PHY and wait until completion
- * This includes BaseBand and Synthesizer
- * activation.
- */
- ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
-
- /*
- * On 5211+ read activation -> rx delay
- * and use it.
- *
- * TODO: Half/quarter rate support
- */
- if (ah->ah_version != AR5K_AR5210) {
- u32 delay;
- delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
- AR5K_PHY_RX_DELAY_M;
- delay = (channel->hw_value & CHANNEL_CCK) ?
- ((delay << 2) / 22) : (delay / 10);
-
- udelay(100 + (2 * delay));
- } else {
- mdelay(1);
}
/*
- * Perform ADC test to see if baseband is ready
- * Set TX hold and check ADC test register
- */
- phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
- ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
- for (i = 0; i <= 20; i++) {
- if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
- break;
- udelay(200);
- }
- ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
-
- /*
- * Start automatic gain control calibration
- *
- * During AGC calibration RX path is re-routed to
- * a power detector so we don't receive anything.
- *
- * This method is used to calibrate some static offsets
- * used together with on-the fly I/Q calibration (the
- * one performed via ath5k_hw_phy_calibrate), which doesn't
- * interrupt rx path.
- *
- * While rx path is re-routed to the power detector we also
- * start a noise floor calibration to measure the
- * card's noise floor (the noise we measure when we are not
- * transmitting or receiving anything).
- *
- * If we are in a noisy environment, AGC calibration may time
- * out and/or noise floor calibration might timeout.
- */
- AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
- AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF);
-
- /* At the same time start I/Q calibration for QAM constellation
- * -no need for CCK- */
- ah->ah_calibration = false;
- if (!(mode == AR5K_MODE_11B)) {
- ah->ah_calibration = true;
- AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
- AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
- AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
- AR5K_PHY_IQ_RUN);
- }
-
- /* Wait for gain calibration to finish (we check for I/Q calibration
- * during ath5k_phy_calibrate) */
- if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
- AR5K_PHY_AGCCTL_CAL, 0, false)) {
- ATH5K_ERR(ah->ah_sc, "gain calibration timeout (%uMHz)\n",
- channel->center_freq);
- }
-
- /* Restore antenna mode */
- ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
-
- /* Restore slot time and ACK timeouts */
- if (ah->ah_coverage_class > 0)
- ath5k_hw_set_coverage_class(ah, ah->ah_coverage_class);
-
- /*
* Configure QCUs/DCUs
*/
+ ret = ath5k_hw_init_queues(ah);
+ if (ret)
+ return ret;
- /* TODO: HW Compression support for data queues */
- /* TODO: Burst prefetch for data queues */
-
- /*
- * Reset queues and start beacon timers at the end of the reset routine
- * This also sets QCU mask on each DCU for 1:1 qcu to dcu mapping
- * Note: If we want we can assign multiple qcus on one dcu.
- */
- for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
- ret = ath5k_hw_reset_tx_queue(ah, i);
- if (ret) {
- ATH5K_ERR(ah->ah_sc,
- "failed to reset TX queue #%d\n", i);
- return ret;
- }
- }
-
-
- /*
- * Configure DMA/Interrupts
- */
/*
- * Set Rx/Tx DMA Configuration
- *
- * Set standard DMA size (128). Note that
- * a DMA size of 512 causes rx overruns and tx errors
- * on pci-e cards (tested on 5424 but since rx overruns
- * also occur on 5416/5418 with madwifi we set 128
- * for all PCI-E cards to be safe).
- *
- * XXX: need to check 5210 for this
- * TODO: Check out tx triger level, it's always 64 on dumps but I
- * guess we can tweak it and see how it goes ;-)
+ * Initialize DMA/Interrupts
*/
- if (ah->ah_version != AR5K_AR5210) {
- AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
- AR5K_TXCFG_SDMAMR, AR5K_DMASIZE_128B);
- AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG,
- AR5K_RXCFG_SDMAMW, AR5K_DMASIZE_128B);
- }
+ ath5k_hw_dma_init(ah);
- /* Pre-enable interrupts on 5211/5212*/
- if (ah->ah_version != AR5K_AR5210)
- ath5k_hw_set_imr(ah, ah->ah_imr);
/* Enable 32KHz clock function for AR5212+ chips
* Set clocks to 32KHz operation and use an
diff --git a/drivers/net/wireless/ath/ath5k/rfbuffer.h b/drivers/net/wireless/ath/ath5k/rfbuffer.h
index 3ac4cff4239d..16b67e84906d 100644
--- a/drivers/net/wireless/ath/ath5k/rfbuffer.h
+++ b/drivers/net/wireless/ath/ath5k/rfbuffer.h
@@ -51,7 +51,7 @@
struct ath5k_ini_rfbuffer {
u8 rfb_bank; /* RF Bank number */
u16 rfb_ctrl_register; /* RF Buffer control register */
- u32 rfb_mode_data[5]; /* RF Buffer data for each mode */
+ u32 rfb_mode_data[3]; /* RF Buffer data for each mode */
};
/*
@@ -79,8 +79,10 @@ struct ath5k_rf_reg {
* life easier by using an index for each register
* instead of a full rfb_field */
enum ath5k_rf_regs_idx {
+ /* BANK 2 */
+ AR5K_RF_TURBO = 0,
/* BANK 6 */
- AR5K_RF_OB_2GHZ = 0,
+ AR5K_RF_OB_2GHZ,
AR5K_RF_OB_5GHZ,
AR5K_RF_DB_2GHZ,
AR5K_RF_DB_5GHZ,
@@ -134,6 +136,9 @@ enum ath5k_rf_regs_idx {
* RF5111 (Sombrero) *
\*******************/
+/* BANK 2 len pos col */
+#define AR5K_RF5111_RF_TURBO { 1, 3, 0 }
+
/* BANK 6 len pos col */
#define AR5K_RF5111_OB_2GHZ { 3, 119, 0 }
#define AR5K_RF5111_DB_2GHZ { 3, 122, 0 }
@@ -158,6 +163,7 @@ enum ath5k_rf_regs_idx {
#define AR5K_RF5111_MAX_TIME { 2, 49, 0 }
static const struct ath5k_rf_reg rf_regs_5111[] = {
+ {2, AR5K_RF_TURBO, AR5K_RF5111_RF_TURBO},
{6, AR5K_RF_OB_2GHZ, AR5K_RF5111_OB_2GHZ},
{6, AR5K_RF_DB_2GHZ, AR5K_RF5111_DB_2GHZ},
{6, AR5K_RF_OB_5GHZ, AR5K_RF5111_OB_5GHZ},
@@ -177,97 +183,52 @@ static const struct ath5k_rf_reg rf_regs_5111[] = {
/* Default mode specific settings */
static const struct ath5k_ini_rfbuffer rfb_5111[] = {
- { 0, 0x989c,
- /* mode a/XR mode aTurbo mode b mode g mode gTurbo */
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 0, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 0, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 0, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 0, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 0, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 0, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 0, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 0, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 0, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 0, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 0, 0x989c,
- { 0x00380000, 0x00380000, 0x00380000, 0x00380000, 0x00380000 } },
- { 0, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 0, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 0, 0x989c,
- { 0x00000000, 0x00000000, 0x000000c0, 0x00000080, 0x00000080 } },
- { 0, 0x989c,
- { 0x000400f9, 0x000400f9, 0x000400ff, 0x000400fd, 0x000400fd } },
- { 0, 0x98d4,
- { 0x00000000, 0x00000000, 0x00000004, 0x00000004, 0x00000004 } },
- { 1, 0x98d4,
- { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } },
- { 2, 0x98d4,
- { 0x00000010, 0x00000014, 0x00000010, 0x00000010, 0x00000014 } },
- { 3, 0x98d8,
- { 0x00601068, 0x00601068, 0x00601068, 0x00601068, 0x00601068 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x10000000, 0x10000000, 0x10000000, 0x10000000, 0x10000000 } },
- { 6, 0x989c,
- { 0x04000000, 0x04000000, 0x04000000, 0x04000000, 0x04000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x0a000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x003800c0, 0x00380080, 0x023800c0, 0x003800c0, 0x003800c0 } },
- { 6, 0x989c,
- { 0x00020006, 0x00020006, 0x00000006, 0x00020006, 0x00020006 } },
- { 6, 0x989c,
- { 0x00000089, 0x00000089, 0x00000089, 0x00000089, 0x00000089 } },
- { 6, 0x989c,
- { 0x000000a0, 0x000000a0, 0x000000a0, 0x000000a0, 0x000000a0 } },
- { 6, 0x989c,
- { 0x00040007, 0x00040007, 0x00040007, 0x00040007, 0x00040007 } },
- { 6, 0x98d4,
- { 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a } },
- { 7, 0x989c,
- { 0x00000040, 0x00000048, 0x00000040, 0x00000040, 0x00000040 } },
- { 7, 0x989c,
- { 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 } },
- { 7, 0x989c,
- { 0x00000008, 0x00000008, 0x00000008, 0x00000008, 0x00000008 } },
- { 7, 0x989c,
- { 0x0000004f, 0x0000004f, 0x0000004f, 0x0000004f, 0x0000004f } },
- { 7, 0x989c,
- { 0x000000f1, 0x000000f1, 0x00000061, 0x000000f1, 0x000000f1 } },
- { 7, 0x989c,
- { 0x0000904f, 0x0000904f, 0x0000904c, 0x0000904f, 0x0000904f } },
- { 7, 0x989c,
- { 0x0000125a, 0x0000125a, 0x0000129a, 0x0000125a, 0x0000125a } },
- { 7, 0x98cc,
- { 0x0000000e, 0x0000000e, 0x0000000f, 0x0000000e, 0x0000000e } },
+ /* BANK / C.R. A/XR B G */
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00380000, 0x00380000, 0x00380000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 0, 0x989c, { 0x00000000, 0x000000c0, 0x00000080 } },
+ { 0, 0x989c, { 0x000400f9, 0x000400ff, 0x000400fd } },
+ { 0, 0x98d4, { 0x00000000, 0x00000004, 0x00000004 } },
+ { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
+ { 2, 0x98d4, { 0x00000010, 0x00000010, 0x00000010 } },
+ { 3, 0x98d8, { 0x00601068, 0x00601068, 0x00601068 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x10000000, 0x10000000, 0x10000000 } },
+ { 6, 0x989c, { 0x04000000, 0x04000000, 0x04000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x0a000000, 0x00000000 } },
+ { 6, 0x989c, { 0x003800c0, 0x023800c0, 0x003800c0 } },
+ { 6, 0x989c, { 0x00020006, 0x00000006, 0x00020006 } },
+ { 6, 0x989c, { 0x00000089, 0x00000089, 0x00000089 } },
+ { 6, 0x989c, { 0x000000a0, 0x000000a0, 0x000000a0 } },
+ { 6, 0x989c, { 0x00040007, 0x00040007, 0x00040007 } },
+ { 6, 0x98d4, { 0x0000001a, 0x0000001a, 0x0000001a } },
+ { 7, 0x989c, { 0x00000040, 0x00000040, 0x00000040 } },
+ { 7, 0x989c, { 0x00000010, 0x00000010, 0x00000010 } },
+ { 7, 0x989c, { 0x00000008, 0x00000008, 0x00000008 } },
+ { 7, 0x989c, { 0x0000004f, 0x0000004f, 0x0000004f } },
+ { 7, 0x989c, { 0x000000f1, 0x00000061, 0x000000f1 } },
+ { 7, 0x989c, { 0x0000904f, 0x0000904c, 0x0000904f } },
+ { 7, 0x989c, { 0x0000125a, 0x0000129a, 0x0000125a } },
+ { 7, 0x98cc, { 0x0000000e, 0x0000000f, 0x0000000e } },
};
@@ -276,6 +237,9 @@ static const struct ath5k_ini_rfbuffer rfb_5111[] = {
* RF5112/RF2112 (Derby) *
\***********************/
+/* BANK 2 (Common) len pos col */
+#define AR5K_RF5112X_RF_TURBO { 1, 1, 2 }
+
/* BANK 7 (Common) len pos col */
#define AR5K_RF5112X_GAIN_I { 6, 14, 0 }
#define AR5K_RF5112X_MIXVGA_OVR { 1, 36, 0 }
@@ -307,6 +271,7 @@ static const struct ath5k_ini_rfbuffer rfb_5111[] = {
#define AR5K_RF5112_PWD(_n) { 1, (302 - _n), 3 }
static const struct ath5k_rf_reg rf_regs_5112[] = {
+ {2, AR5K_RF_TURBO, AR5K_RF5112X_RF_TURBO},
{6, AR5K_RF_OB_2GHZ, AR5K_RF5112_OB_2GHZ},
{6, AR5K_RF_DB_2GHZ, AR5K_RF5112_DB_2GHZ},
{6, AR5K_RF_OB_5GHZ, AR5K_RF5112_OB_5GHZ},
@@ -335,115 +300,61 @@ static const struct ath5k_rf_reg rf_regs_5112[] = {
/* Default mode specific settings */
static const struct ath5k_ini_rfbuffer rfb_5112[] = {
- { 1, 0x98d4,
- /* mode a/XR mode aTurbo mode b mode g mode gTurbo */
- { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } },
- { 2, 0x98d0,
- { 0x03060408, 0x03070408, 0x03060408, 0x03060408, 0x03070408 } },
- { 3, 0x98dc,
- { 0x00a0c0c0, 0x00a0c0c0, 0x00e0c0c0, 0x00e0c0c0, 0x00e0c0c0 } },
- { 6, 0x989c,
- { 0x00a00000, 0x00a00000, 0x00a00000, 0x00a00000, 0x00a00000 } },
- { 6, 0x989c,
- { 0x000a0000, 0x000a0000, 0x000a0000, 0x000a0000, 0x000a0000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00660000, 0x00660000, 0x00660000, 0x00660000, 0x00660000 } },
- { 6, 0x989c,
- { 0x00db0000, 0x00db0000, 0x00db0000, 0x00db0000, 0x00db0000 } },
- { 6, 0x989c,
- { 0x00f10000, 0x00f10000, 0x00f10000, 0x00f10000, 0x00f10000 } },
- { 6, 0x989c,
- { 0x00120000, 0x00120000, 0x00120000, 0x00120000, 0x00120000 } },
- { 6, 0x989c,
- { 0x00120000, 0x00120000, 0x00120000, 0x00120000, 0x00120000 } },
- { 6, 0x989c,
- { 0x00730000, 0x00730000, 0x00730000, 0x00730000, 0x00730000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000 } },
- { 6, 0x989c,
- { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
- { 6, 0x989c,
- { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
- { 6, 0x989c,
- { 0x008b0000, 0x008b0000, 0x008b0000, 0x008b0000, 0x008b0000 } },
- { 6, 0x989c,
- { 0x00600000, 0x00600000, 0x00600000, 0x00600000, 0x00600000 } },
- { 6, 0x989c,
- { 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000 } },
- { 6, 0x989c,
- { 0x00840000, 0x00840000, 0x00840000, 0x00840000, 0x00840000 } },
- { 6, 0x989c,
- { 0x00640000, 0x00640000, 0x00640000, 0x00640000, 0x00640000 } },
- { 6, 0x989c,
- { 0x00200000, 0x00200000, 0x00200000, 0x00200000, 0x00200000 } },
- { 6, 0x989c,
- { 0x00240000, 0x00240000, 0x00240000, 0x00240000, 0x00240000 } },
- { 6, 0x989c,
- { 0x00250000, 0x00250000, 0x00250000, 0x00250000, 0x00250000 } },
- { 6, 0x989c,
- { 0x00110000, 0x00110000, 0x00110000, 0x00110000, 0x00110000 } },
- { 6, 0x989c,
- { 0x00110000, 0x00110000, 0x00110000, 0x00110000, 0x00110000 } },
- { 6, 0x989c,
- { 0x00510000, 0x00510000, 0x00510000, 0x00510000, 0x00510000 } },
- { 6, 0x989c,
- { 0x1c040000, 0x1c040000, 0x1c040000, 0x1c040000, 0x1c040000 } },
- { 6, 0x989c,
- { 0x000a0000, 0x000a0000, 0x000a0000, 0x000a0000, 0x000a0000 } },
- { 6, 0x989c,
- { 0x00a10000, 0x00a10000, 0x00a10000, 0x00a10000, 0x00a10000 } },
- { 6, 0x989c,
- { 0x00400000, 0x00400000, 0x00400000, 0x00400000, 0x00400000 } },
- { 6, 0x989c,
- { 0x03090000, 0x03090000, 0x03090000, 0x03090000, 0x03090000 } },
- { 6, 0x989c,
- { 0x06000000, 0x06000000, 0x06000000, 0x06000000, 0x06000000 } },
- { 6, 0x989c,
- { 0x000000b0, 0x000000b0, 0x000000a8, 0x000000a8, 0x000000a8 } },
- { 6, 0x989c,
- { 0x0000002e, 0x0000002e, 0x0000002e, 0x0000002e, 0x0000002e } },
- { 6, 0x989c,
- { 0x006c4a41, 0x006c4a41, 0x006c4af1, 0x006c4a61, 0x006c4a61 } },
- { 6, 0x989c,
- { 0x0050892a, 0x0050892a, 0x0050892b, 0x0050892b, 0x0050892b } },
- { 6, 0x989c,
- { 0x00842400, 0x00842400, 0x00842400, 0x00842400, 0x00842400 } },
- { 6, 0x989c,
- { 0x00c69200, 0x00c69200, 0x00c69200, 0x00c69200, 0x00c69200 } },
- { 6, 0x98d0,
- { 0x0002000c, 0x0002000c, 0x0002000c, 0x0002000c, 0x0002000c } },
- { 7, 0x989c,
- { 0x00000094, 0x00000094, 0x00000094, 0x00000094, 0x00000094 } },
- { 7, 0x989c,
- { 0x00000091, 0x00000091, 0x00000091, 0x00000091, 0x00000091 } },
- { 7, 0x989c,
- { 0x0000000a, 0x0000000a, 0x00000012, 0x00000012, 0x00000012 } },
- { 7, 0x989c,
- { 0x00000080, 0x00000080, 0x00000080, 0x00000080, 0x00000080 } },
- { 7, 0x989c,
- { 0x000000c1, 0x000000c1, 0x000000c1, 0x000000c1, 0x000000c1 } },
- { 7, 0x989c,
- { 0x00000060, 0x00000060, 0x00000060, 0x00000060, 0x00000060 } },
- { 7, 0x989c,
- { 0x000000f0, 0x000000f0, 0x000000f0, 0x000000f0, 0x000000f0 } },
- { 7, 0x989c,
- { 0x00000022, 0x00000022, 0x00000022, 0x00000022, 0x00000022 } },
- { 7, 0x989c,
- { 0x00000092, 0x00000092, 0x00000092, 0x00000092, 0x00000092 } },
- { 7, 0x989c,
- { 0x000000d4, 0x000000d4, 0x000000d4, 0x000000d4, 0x000000d4 } },
- { 7, 0x989c,
- { 0x000014cc, 0x000014cc, 0x000014cc, 0x000014cc, 0x000014cc } },
- { 7, 0x989c,
- { 0x0000048c, 0x0000048c, 0x0000048c, 0x0000048c, 0x0000048c } },
- { 7, 0x98c4,
- { 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003 } },
+ /* BANK / C.R. A/XR B G */
+ { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
+ { 2, 0x98d0, { 0x03060408, 0x03060408, 0x03060408 } },
+ { 3, 0x98dc, { 0x00a0c0c0, 0x00e0c0c0, 0x00e0c0c0 } },
+ { 6, 0x989c, { 0x00a00000, 0x00a00000, 0x00a00000 } },
+ { 6, 0x989c, { 0x000a0000, 0x000a0000, 0x000a0000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00660000, 0x00660000, 0x00660000 } },
+ { 6, 0x989c, { 0x00db0000, 0x00db0000, 0x00db0000 } },
+ { 6, 0x989c, { 0x00f10000, 0x00f10000, 0x00f10000 } },
+ { 6, 0x989c, { 0x00120000, 0x00120000, 0x00120000 } },
+ { 6, 0x989c, { 0x00120000, 0x00120000, 0x00120000 } },
+ { 6, 0x989c, { 0x00730000, 0x00730000, 0x00730000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x000c0000, 0x000c0000, 0x000c0000 } },
+ { 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
+ { 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
+ { 6, 0x989c, { 0x008b0000, 0x008b0000, 0x008b0000 } },
+ { 6, 0x989c, { 0x00600000, 0x00600000, 0x00600000 } },
+ { 6, 0x989c, { 0x000c0000, 0x000c0000, 0x000c0000 } },
+ { 6, 0x989c, { 0x00840000, 0x00840000, 0x00840000 } },
+ { 6, 0x989c, { 0x00640000, 0x00640000, 0x00640000 } },
+ { 6, 0x989c, { 0x00200000, 0x00200000, 0x00200000 } },
+ { 6, 0x989c, { 0x00240000, 0x00240000, 0x00240000 } },
+ { 6, 0x989c, { 0x00250000, 0x00250000, 0x00250000 } },
+ { 6, 0x989c, { 0x00110000, 0x00110000, 0x00110000 } },
+ { 6, 0x989c, { 0x00110000, 0x00110000, 0x00110000 } },
+ { 6, 0x989c, { 0x00510000, 0x00510000, 0x00510000 } },
+ { 6, 0x989c, { 0x1c040000, 0x1c040000, 0x1c040000 } },
+ { 6, 0x989c, { 0x000a0000, 0x000a0000, 0x000a0000 } },
+ { 6, 0x989c, { 0x00a10000, 0x00a10000, 0x00a10000 } },
+ { 6, 0x989c, { 0x00400000, 0x00400000, 0x00400000 } },
+ { 6, 0x989c, { 0x03090000, 0x03090000, 0x03090000 } },
+ { 6, 0x989c, { 0x06000000, 0x06000000, 0x06000000 } },
+ { 6, 0x989c, { 0x000000b0, 0x000000a8, 0x000000a8 } },
+ { 6, 0x989c, { 0x0000002e, 0x0000002e, 0x0000002e } },
+ { 6, 0x989c, { 0x006c4a41, 0x006c4af1, 0x006c4a61 } },
+ { 6, 0x989c, { 0x0050892a, 0x0050892b, 0x0050892b } },
+ { 6, 0x989c, { 0x00842400, 0x00842400, 0x00842400 } },
+ { 6, 0x989c, { 0x00c69200, 0x00c69200, 0x00c69200 } },
+ { 6, 0x98d0, { 0x0002000c, 0x0002000c, 0x0002000c } },
+ { 7, 0x989c, { 0x00000094, 0x00000094, 0x00000094 } },
+ { 7, 0x989c, { 0x00000091, 0x00000091, 0x00000091 } },
+ { 7, 0x989c, { 0x0000000a, 0x00000012, 0x00000012 } },
+ { 7, 0x989c, { 0x00000080, 0x00000080, 0x00000080 } },
+ { 7, 0x989c, { 0x000000c1, 0x000000c1, 0x000000c1 } },
+ { 7, 0x989c, { 0x00000060, 0x00000060, 0x00000060 } },
+ { 7, 0x989c, { 0x000000f0, 0x000000f0, 0x000000f0 } },
+ { 7, 0x989c, { 0x00000022, 0x00000022, 0x00000022 } },
+ { 7, 0x989c, { 0x00000092, 0x00000092, 0x00000092 } },
+ { 7, 0x989c, { 0x000000d4, 0x000000d4, 0x000000d4 } },
+ { 7, 0x989c, { 0x000014cc, 0x000014cc, 0x000014cc } },
+ { 7, 0x989c, { 0x0000048c, 0x0000048c, 0x0000048c } },
+ { 7, 0x98c4, { 0x00000003, 0x00000003, 0x00000003 } },
};
/* RFX112A (Derby 2) */
@@ -477,6 +388,7 @@ static const struct ath5k_ini_rfbuffer rfb_5112[] = {
#define AR5K_RF5112A_XB5_LVL { 2, 3, 3 }
static const struct ath5k_rf_reg rf_regs_5112a[] = {
+ {2, AR5K_RF_TURBO, AR5K_RF5112X_RF_TURBO},
{6, AR5K_RF_OB_2GHZ, AR5K_RF5112A_OB_2GHZ},
{6, AR5K_RF_DB_2GHZ, AR5K_RF5112A_DB_2GHZ},
{6, AR5K_RF_OB_5GHZ, AR5K_RF5112A_OB_5GHZ},
@@ -515,119 +427,63 @@ static const struct ath5k_rf_reg rf_regs_5112a[] = {
/* Default mode specific settings */
static const struct ath5k_ini_rfbuffer rfb_5112a[] = {
- { 1, 0x98d4,
- /* mode a/XR mode aTurbo mode b mode g mode gTurbo */
- { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } },
- { 2, 0x98d0,
- { 0x03060408, 0x03070408, 0x03060408, 0x03060408, 0x03070408 } },
- { 3, 0x98dc,
- { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } },
- { 6, 0x989c,
- { 0x0f000000, 0x0f000000, 0x0f000000, 0x0f000000, 0x0f000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00800000, 0x00800000, 0x00800000, 0x00800000, 0x00800000 } },
- { 6, 0x989c,
- { 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000 } },
- { 6, 0x989c,
- { 0x00010000, 0x00010000, 0x00010000, 0x00010000, 0x00010000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00180000, 0x00180000, 0x00180000, 0x00180000, 0x00180000 } },
- { 6, 0x989c,
- { 0x00600000, 0x00600000, 0x006e0000, 0x006e0000, 0x006e0000 } },
- { 6, 0x989c,
- { 0x00c70000, 0x00c70000, 0x00c70000, 0x00c70000, 0x00c70000 } },
- { 6, 0x989c,
- { 0x004b0000, 0x004b0000, 0x004b0000, 0x004b0000, 0x004b0000 } },
- { 6, 0x989c,
- { 0x04480000, 0x04480000, 0x04480000, 0x04480000, 0x04480000 } },
- { 6, 0x989c,
- { 0x004c0000, 0x004c0000, 0x004c0000, 0x004c0000, 0x004c0000 } },
- { 6, 0x989c,
- { 0x00e40000, 0x00e40000, 0x00e40000, 0x00e40000, 0x00e40000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00fc0000, 0x00fc0000, 0x00fc0000, 0x00fc0000, 0x00fc0000 } },
- { 6, 0x989c,
- { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
- { 6, 0x989c,
- { 0x043f0000, 0x043f0000, 0x043f0000, 0x043f0000, 0x043f0000 } },
- { 6, 0x989c,
- { 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000 } },
- { 6, 0x989c,
- { 0x02190000, 0x02190000, 0x02190000, 0x02190000, 0x02190000 } },
- { 6, 0x989c,
- { 0x00240000, 0x00240000, 0x00240000, 0x00240000, 0x00240000 } },
- { 6, 0x989c,
- { 0x00b40000, 0x00b40000, 0x00b40000, 0x00b40000, 0x00b40000 } },
- { 6, 0x989c,
- { 0x00990000, 0x00990000, 0x00990000, 0x00990000, 0x00990000 } },
- { 6, 0x989c,
- { 0x00500000, 0x00500000, 0x00500000, 0x00500000, 0x00500000 } },
- { 6, 0x989c,
- { 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000 } },
- { 6, 0x989c,
- { 0x00120000, 0x00120000, 0x00120000, 0x00120000, 0x00120000 } },
- { 6, 0x989c,
- { 0xc0320000, 0xc0320000, 0xc0320000, 0xc0320000, 0xc0320000 } },
- { 6, 0x989c,
- { 0x01740000, 0x01740000, 0x01740000, 0x01740000, 0x01740000 } },
- { 6, 0x989c,
- { 0x00110000, 0x00110000, 0x00110000, 0x00110000, 0x00110000 } },
- { 6, 0x989c,
- { 0x86280000, 0x86280000, 0x86280000, 0x86280000, 0x86280000 } },
- { 6, 0x989c,
- { 0x31840000, 0x31840000, 0x31840000, 0x31840000, 0x31840000 } },
- { 6, 0x989c,
- { 0x00f20080, 0x00f20080, 0x00f20080, 0x00f20080, 0x00f20080 } },
- { 6, 0x989c,
- { 0x00270019, 0x00270019, 0x00270019, 0x00270019, 0x00270019 } },
- { 6, 0x989c,
- { 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x000000b2, 0x000000b2, 0x000000b2, 0x000000b2, 0x000000b2 } },
- { 6, 0x989c,
- { 0x00b02084, 0x00b02084, 0x00b02084, 0x00b02084, 0x00b02084 } },
- { 6, 0x989c,
- { 0x004125a4, 0x004125a4, 0x004125a4, 0x004125a4, 0x004125a4 } },
- { 6, 0x989c,
- { 0x00119220, 0x00119220, 0x00119220, 0x00119220, 0x00119220 } },
- { 6, 0x989c,
- { 0x001a4800, 0x001a4800, 0x001a4800, 0x001a4800, 0x001a4800 } },
- { 6, 0x98d8,
- { 0x000b0230, 0x000b0230, 0x000b0230, 0x000b0230, 0x000b0230 } },
- { 7, 0x989c,
- { 0x00000094, 0x00000094, 0x00000094, 0x00000094, 0x00000094 } },
- { 7, 0x989c,
- { 0x00000091, 0x00000091, 0x00000091, 0x00000091, 0x00000091 } },
- { 7, 0x989c,
- { 0x00000012, 0x00000012, 0x00000012, 0x00000012, 0x00000012 } },
- { 7, 0x989c,
- { 0x00000080, 0x00000080, 0x00000080, 0x00000080, 0x00000080 } },
- { 7, 0x989c,
- { 0x000000d9, 0x000000d9, 0x000000d9, 0x000000d9, 0x000000d9 } },
- { 7, 0x989c,
- { 0x00000060, 0x00000060, 0x00000060, 0x00000060, 0x00000060 } },
- { 7, 0x989c,
- { 0x000000f0, 0x000000f0, 0x000000f0, 0x000000f0, 0x000000f0 } },
- { 7, 0x989c,
- { 0x000000a2, 0x000000a2, 0x000000a2, 0x000000a2, 0x000000a2 } },
- { 7, 0x989c,
- { 0x00000052, 0x00000052, 0x00000052, 0x00000052, 0x00000052 } },
- { 7, 0x989c,
- { 0x000000d4, 0x000000d4, 0x000000d4, 0x000000d4, 0x000000d4 } },
- { 7, 0x989c,
- { 0x000014cc, 0x000014cc, 0x000014cc, 0x000014cc, 0x000014cc } },
- { 7, 0x989c,
- { 0x0000048c, 0x0000048c, 0x0000048c, 0x0000048c, 0x0000048c } },
- { 7, 0x98c4,
- { 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003 } },
+ /* BANK / C.R. A/XR B G */
+ { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
+ { 2, 0x98d0, { 0x03060408, 0x03060408, 0x03060408 } },
+ { 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
+ { 6, 0x989c, { 0x0f000000, 0x0f000000, 0x0f000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00800000, 0x00800000, 0x00800000 } },
+ { 6, 0x989c, { 0x002a0000, 0x002a0000, 0x002a0000 } },
+ { 6, 0x989c, { 0x00010000, 0x00010000, 0x00010000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00180000, 0x00180000, 0x00180000 } },
+ { 6, 0x989c, { 0x00600000, 0x006e0000, 0x006e0000 } },
+ { 6, 0x989c, { 0x00c70000, 0x00c70000, 0x00c70000 } },
+ { 6, 0x989c, { 0x004b0000, 0x004b0000, 0x004b0000 } },
+ { 6, 0x989c, { 0x04480000, 0x04480000, 0x04480000 } },
+ { 6, 0x989c, { 0x004c0000, 0x004c0000, 0x004c0000 } },
+ { 6, 0x989c, { 0x00e40000, 0x00e40000, 0x00e40000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00fc0000, 0x00fc0000, 0x00fc0000 } },
+ { 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
+ { 6, 0x989c, { 0x043f0000, 0x043f0000, 0x043f0000 } },
+ { 6, 0x989c, { 0x000c0000, 0x000c0000, 0x000c0000 } },
+ { 6, 0x989c, { 0x02190000, 0x02190000, 0x02190000 } },
+ { 6, 0x989c, { 0x00240000, 0x00240000, 0x00240000 } },
+ { 6, 0x989c, { 0x00b40000, 0x00b40000, 0x00b40000 } },
+ { 6, 0x989c, { 0x00990000, 0x00990000, 0x00990000 } },
+ { 6, 0x989c, { 0x00500000, 0x00500000, 0x00500000 } },
+ { 6, 0x989c, { 0x002a0000, 0x002a0000, 0x002a0000 } },
+ { 6, 0x989c, { 0x00120000, 0x00120000, 0x00120000 } },
+ { 6, 0x989c, { 0xc0320000, 0xc0320000, 0xc0320000 } },
+ { 6, 0x989c, { 0x01740000, 0x01740000, 0x01740000 } },
+ { 6, 0x989c, { 0x00110000, 0x00110000, 0x00110000 } },
+ { 6, 0x989c, { 0x86280000, 0x86280000, 0x86280000 } },
+ { 6, 0x989c, { 0x31840000, 0x31840000, 0x31840000 } },
+ { 6, 0x989c, { 0x00f20080, 0x00f20080, 0x00f20080 } },
+ { 6, 0x989c, { 0x00270019, 0x00270019, 0x00270019 } },
+ { 6, 0x989c, { 0x00000003, 0x00000003, 0x00000003 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x000000b2, 0x000000b2, 0x000000b2 } },
+ { 6, 0x989c, { 0x00b02084, 0x00b02084, 0x00b02084 } },
+ { 6, 0x989c, { 0x004125a4, 0x004125a4, 0x004125a4 } },
+ { 6, 0x989c, { 0x00119220, 0x00119220, 0x00119220 } },
+ { 6, 0x989c, { 0x001a4800, 0x001a4800, 0x001a4800 } },
+ { 6, 0x98d8, { 0x000b0230, 0x000b0230, 0x000b0230 } },
+ { 7, 0x989c, { 0x00000094, 0x00000094, 0x00000094 } },
+ { 7, 0x989c, { 0x00000091, 0x00000091, 0x00000091 } },
+ { 7, 0x989c, { 0x00000012, 0x00000012, 0x00000012 } },
+ { 7, 0x989c, { 0x00000080, 0x00000080, 0x00000080 } },
+ { 7, 0x989c, { 0x000000d9, 0x000000d9, 0x000000d9 } },
+ { 7, 0x989c, { 0x00000060, 0x00000060, 0x00000060 } },
+ { 7, 0x989c, { 0x000000f0, 0x000000f0, 0x000000f0 } },
+ { 7, 0x989c, { 0x000000a2, 0x000000a2, 0x000000a2 } },
+ { 7, 0x989c, { 0x00000052, 0x00000052, 0x00000052 } },
+ { 7, 0x989c, { 0x000000d4, 0x000000d4, 0x000000d4 } },
+ { 7, 0x989c, { 0x000014cc, 0x000014cc, 0x000014cc } },
+ { 7, 0x989c, { 0x0000048c, 0x0000048c, 0x0000048c } },
+ { 7, 0x98c4, { 0x00000003, 0x00000003, 0x00000003 } },
};
@@ -636,11 +492,15 @@ static const struct ath5k_ini_rfbuffer rfb_5112a[] = {
* RF2413 (Griffin) *
\******************/
+/* BANK 2 len pos col */
+#define AR5K_RF2413_RF_TURBO { 1, 1, 2 }
+
/* BANK 6 len pos col */
#define AR5K_RF2413_OB_2GHZ { 3, 168, 0 }
#define AR5K_RF2413_DB_2GHZ { 3, 165, 0 }
static const struct ath5k_rf_reg rf_regs_2413[] = {
+ {2, AR5K_RF_TURBO, AR5K_RF2413_RF_TURBO},
{6, AR5K_RF_OB_2GHZ, AR5K_RF2413_OB_2GHZ},
{6, AR5K_RF_DB_2GHZ, AR5K_RF2413_DB_2GHZ},
};
@@ -649,73 +509,40 @@ static const struct ath5k_rf_reg rf_regs_2413[] = {
* XXX: a/aTurbo ???
*/
static const struct ath5k_ini_rfbuffer rfb_2413[] = {
- { 1, 0x98d4,
- /* mode a/XR mode aTurbo mode b mode g mode gTurbo */
- { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } },
- { 2, 0x98d0,
- { 0x02001408, 0x02011408, 0x02001408, 0x02001408, 0x02011408 } },
- { 3, 0x98dc,
- { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } },
- { 6, 0x989c,
- { 0xf0000000, 0xf0000000, 0xf0000000, 0xf0000000, 0xf0000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x03000000, 0x03000000, 0x03000000, 0x03000000, 0x03000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x40400000, 0x40400000, 0x40400000, 0x40400000, 0x40400000 } },
- { 6, 0x989c,
- { 0x65050000, 0x65050000, 0x65050000, 0x65050000, 0x65050000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00420000, 0x00420000, 0x00420000, 0x00420000, 0x00420000 } },
- { 6, 0x989c,
- { 0x00b50000, 0x00b50000, 0x00b50000, 0x00b50000, 0x00b50000 } },
- { 6, 0x989c,
- { 0x00030000, 0x00030000, 0x00030000, 0x00030000, 0x00030000 } },
- { 6, 0x989c,
- { 0x00f70000, 0x00f70000, 0x00f70000, 0x00f70000, 0x00f70000 } },
- { 6, 0x989c,
- { 0x009d0000, 0x009d0000, 0x009d0000, 0x009d0000, 0x009d0000 } },
- { 6, 0x989c,
- { 0x00220000, 0x00220000, 0x00220000, 0x00220000, 0x00220000 } },
- { 6, 0x989c,
- { 0x04220000, 0x04220000, 0x04220000, 0x04220000, 0x04220000 } },
- { 6, 0x989c,
- { 0x00230018, 0x00230018, 0x00230018, 0x00230018, 0x00230018 } },
- { 6, 0x989c,
- { 0x00280000, 0x00280000, 0x00280060, 0x00280060, 0x00280060 } },
- { 6, 0x989c,
- { 0x005000c0, 0x005000c0, 0x005000c3, 0x005000c3, 0x005000c3 } },
- { 6, 0x989c,
- { 0x0004007f, 0x0004007f, 0x0004007f, 0x0004007f, 0x0004007f } },
- { 6, 0x989c,
- { 0x00000458, 0x00000458, 0x00000458, 0x00000458, 0x00000458 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x0000c000, 0x0000c000, 0x0000c000, 0x0000c000, 0x0000c000 } },
- { 6, 0x98d8,
- { 0x00400230, 0x00400230, 0x00400230, 0x00400230, 0x00400230 } },
- { 7, 0x989c,
- { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } },
- { 7, 0x989c,
- { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } },
- { 7, 0x98cc,
- { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } },
+ /* BANK / C.R. A/XR B G */
+ { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
+ { 2, 0x98d0, { 0x02001408, 0x02001408, 0x02001408 } },
+ { 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
+ { 6, 0x989c, { 0xf0000000, 0xf0000000, 0xf0000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x03000000, 0x03000000, 0x03000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x40400000, 0x40400000, 0x40400000 } },
+ { 6, 0x989c, { 0x65050000, 0x65050000, 0x65050000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00420000, 0x00420000, 0x00420000 } },
+ { 6, 0x989c, { 0x00b50000, 0x00b50000, 0x00b50000 } },
+ { 6, 0x989c, { 0x00030000, 0x00030000, 0x00030000 } },
+ { 6, 0x989c, { 0x00f70000, 0x00f70000, 0x00f70000 } },
+ { 6, 0x989c, { 0x009d0000, 0x009d0000, 0x009d0000 } },
+ { 6, 0x989c, { 0x00220000, 0x00220000, 0x00220000 } },
+ { 6, 0x989c, { 0x04220000, 0x04220000, 0x04220000 } },
+ { 6, 0x989c, { 0x00230018, 0x00230018, 0x00230018 } },
+ { 6, 0x989c, { 0x00280000, 0x00280060, 0x00280060 } },
+ { 6, 0x989c, { 0x005000c0, 0x005000c3, 0x005000c3 } },
+ { 6, 0x989c, { 0x0004007f, 0x0004007f, 0x0004007f } },
+ { 6, 0x989c, { 0x00000458, 0x00000458, 0x00000458 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x0000c000, 0x0000c000, 0x0000c000 } },
+ { 6, 0x98d8, { 0x00400230, 0x00400230, 0x00400230 } },
+ { 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
+ { 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
+ { 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
};
@@ -724,88 +551,57 @@ static const struct ath5k_ini_rfbuffer rfb_2413[] = {
* RF2315/RF2316 (Cobra SoC) *
\***************************/
+/* BANK 2 len pos col */
+#define AR5K_RF2316_RF_TURBO { 1, 1, 2 }
+
/* BANK 6 len pos col */
#define AR5K_RF2316_OB_2GHZ { 3, 178, 0 }
#define AR5K_RF2316_DB_2GHZ { 3, 175, 0 }
static const struct ath5k_rf_reg rf_regs_2316[] = {
+ {2, AR5K_RF_TURBO, AR5K_RF2316_RF_TURBO},
{6, AR5K_RF_OB_2GHZ, AR5K_RF2316_OB_2GHZ},
{6, AR5K_RF_DB_2GHZ, AR5K_RF2316_DB_2GHZ},
};
/* Default mode specific settings */
static const struct ath5k_ini_rfbuffer rfb_2316[] = {
- { 1, 0x98d4,
- /* mode a/XR mode aTurbo mode b mode g mode gTurbo */
- { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } },
- { 2, 0x98d0,
- { 0x02001408, 0x02011408, 0x02001408, 0x02001408, 0x02011408 } },
- { 3, 0x98dc,
- { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000 } },
- { 6, 0x989c,
- { 0x0f000000, 0x0f000000, 0x0f000000, 0x0f000000, 0x0f000000 } },
- { 6, 0x989c,
- { 0x02000000, 0x02000000, 0x02000000, 0x02000000, 0x02000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0xf8000000, 0xf8000000, 0xf8000000, 0xf8000000, 0xf8000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x95150000, 0x95150000, 0x95150000, 0x95150000, 0x95150000 } },
- { 6, 0x989c,
- { 0xc1000000, 0xc1000000, 0xc1000000, 0xc1000000, 0xc1000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000 } },
- { 6, 0x989c,
- { 0x00d50000, 0x00d50000, 0x00d50000, 0x00d50000, 0x00d50000 } },
- { 6, 0x989c,
- { 0x000e0000, 0x000e0000, 0x000e0000, 0x000e0000, 0x000e0000 } },
- { 6, 0x989c,
- { 0x00dc0000, 0x00dc0000, 0x00dc0000, 0x00dc0000, 0x00dc0000 } },
- { 6, 0x989c,
- { 0x00770000, 0x00770000, 0x00770000, 0x00770000, 0x00770000 } },
- { 6, 0x989c,
- { 0x008a0000, 0x008a0000, 0x008a0000, 0x008a0000, 0x008a0000 } },
- { 6, 0x989c,
- { 0x10880000, 0x10880000, 0x10880000, 0x10880000, 0x10880000 } },
- { 6, 0x989c,
- { 0x008c0060, 0x008c0060, 0x008c0060, 0x008c0060, 0x008c0060 } },
- { 6, 0x989c,
- { 0x00a00000, 0x00a00000, 0x00a00080, 0x00a00080, 0x00a00080 } },
- { 6, 0x989c,
- { 0x00400000, 0x00400000, 0x0040000d, 0x0040000d, 0x0040000d } },
- { 6, 0x989c,
- { 0x00110400, 0x00110400, 0x00110400, 0x00110400, 0x00110400 } },
- { 6, 0x989c,
- { 0x00000060, 0x00000060, 0x00000060, 0x00000060, 0x00000060 } },
- { 6, 0x989c,
- { 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001 } },
- { 6, 0x989c,
- { 0x00000b00, 0x00000b00, 0x00000b00, 0x00000b00, 0x00000b00 } },
- { 6, 0x989c,
- { 0x00000be8, 0x00000be8, 0x00000be8, 0x00000be8, 0x00000be8 } },
- { 6, 0x98c0,
- { 0x00010000, 0x00010000, 0x00010000, 0x00010000, 0x00010000 } },
- { 7, 0x989c,
- { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } },
- { 7, 0x989c,
- { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } },
- { 7, 0x98cc,
- { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } },
+ /* BANK / C.R. A/XR B G */
+ { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
+ { 2, 0x98d0, { 0x02001408, 0x02001408, 0x02001408 } },
+ { 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0xc0000000, 0xc0000000, 0xc0000000 } },
+ { 6, 0x989c, { 0x0f000000, 0x0f000000, 0x0f000000 } },
+ { 6, 0x989c, { 0x02000000, 0x02000000, 0x02000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0xf8000000, 0xf8000000, 0xf8000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x95150000, 0x95150000, 0x95150000 } },
+ { 6, 0x989c, { 0xc1000000, 0xc1000000, 0xc1000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00080000, 0x00080000, 0x00080000 } },
+ { 6, 0x989c, { 0x00d50000, 0x00d50000, 0x00d50000 } },
+ { 6, 0x989c, { 0x000e0000, 0x000e0000, 0x000e0000 } },
+ { 6, 0x989c, { 0x00dc0000, 0x00dc0000, 0x00dc0000 } },
+ { 6, 0x989c, { 0x00770000, 0x00770000, 0x00770000 } },
+ { 6, 0x989c, { 0x008a0000, 0x008a0000, 0x008a0000 } },
+ { 6, 0x989c, { 0x10880000, 0x10880000, 0x10880000 } },
+ { 6, 0x989c, { 0x008c0060, 0x008c0060, 0x008c0060 } },
+ { 6, 0x989c, { 0x00a00000, 0x00a00080, 0x00a00080 } },
+ { 6, 0x989c, { 0x00400000, 0x0040000d, 0x0040000d } },
+ { 6, 0x989c, { 0x00110400, 0x00110400, 0x00110400 } },
+ { 6, 0x989c, { 0x00000060, 0x00000060, 0x00000060 } },
+ { 6, 0x989c, { 0x00000001, 0x00000001, 0x00000001 } },
+ { 6, 0x989c, { 0x00000b00, 0x00000b00, 0x00000b00 } },
+ { 6, 0x989c, { 0x00000be8, 0x00000be8, 0x00000be8 } },
+ { 6, 0x98c0, { 0x00010000, 0x00010000, 0x00010000 } },
+ { 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
+ { 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
+ { 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
};
@@ -835,93 +631,50 @@ static const struct ath5k_rf_reg rf_regs_5413[] = {
/* Default mode specific settings */
static const struct ath5k_ini_rfbuffer rfb_5413[] = {
- { 1, 0x98d4,
- /* mode a/XR mode aTurbo mode b mode g mode gTurbo */
- { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } },
- { 2, 0x98d0,
- { 0x00000008, 0x00000008, 0x00000008, 0x00000008, 0x00000008 } },
- { 3, 0x98dc,
- { 0x00a000c0, 0x00a000c0, 0x00e000c0, 0x00e000c0, 0x00e000c0 } },
- { 6, 0x989c,
- { 0x33000000, 0x33000000, 0x33000000, 0x33000000, 0x33000000 } },
- { 6, 0x989c,
- { 0x01000000, 0x01000000, 0x01000000, 0x01000000, 0x01000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x1f000000, 0x1f000000, 0x1f000000, 0x1f000000, 0x1f000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00b80000, 0x00b80000, 0x00b80000, 0x00b80000, 0x00b80000 } },
- { 6, 0x989c,
- { 0x00b70000, 0x00b70000, 0x00b70000, 0x00b70000, 0x00b70000 } },
- { 6, 0x989c,
- { 0x00840000, 0x00840000, 0x00840000, 0x00840000, 0x00840000 } },
- { 6, 0x989c,
- { 0x00980000, 0x00980000, 0x00980000, 0x00980000, 0x00980000 } },
- { 6, 0x989c,
- { 0x00c00000, 0x00c00000, 0x00c00000, 0x00c00000, 0x00c00000 } },
- { 6, 0x989c,
- { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
- { 6, 0x989c,
- { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
- { 6, 0x989c,
- { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
- { 6, 0x989c,
- { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
- { 6, 0x989c,
- { 0x00d70000, 0x00d70000, 0x00d70000, 0x00d70000, 0x00d70000 } },
- { 6, 0x989c,
- { 0x00610000, 0x00610000, 0x00610000, 0x00610000, 0x00610000 } },
- { 6, 0x989c,
- { 0x00fe0000, 0x00fe0000, 0x00fe0000, 0x00fe0000, 0x00fe0000 } },
- { 6, 0x989c,
- { 0x00de0000, 0x00de0000, 0x00de0000, 0x00de0000, 0x00de0000 } },
- { 6, 0x989c,
- { 0x007f0000, 0x007f0000, 0x007f0000, 0x007f0000, 0x007f0000 } },
- { 6, 0x989c,
- { 0x043d0000, 0x043d0000, 0x043d0000, 0x043d0000, 0x043d0000 } },
- { 6, 0x989c,
- { 0x00770000, 0x00770000, 0x00770000, 0x00770000, 0x00770000 } },
- { 6, 0x989c,
- { 0x00440000, 0x00440000, 0x00440000, 0x00440000, 0x00440000 } },
- { 6, 0x989c,
- { 0x00980000, 0x00980000, 0x00980000, 0x00980000, 0x00980000 } },
- { 6, 0x989c,
- { 0x00100080, 0x00100080, 0x00100080, 0x00100080, 0x00100080 } },
- { 6, 0x989c,
- { 0x0005c034, 0x0005c034, 0x0005c034, 0x0005c034, 0x0005c034 } },
- { 6, 0x989c,
- { 0x003100f0, 0x003100f0, 0x003100f0, 0x003100f0, 0x003100f0 } },
- { 6, 0x989c,
- { 0x000c011f, 0x000c011f, 0x000c011f, 0x000c011f, 0x000c011f } },
- { 6, 0x989c,
- { 0x00510040, 0x00510040, 0x00510040, 0x00510040, 0x00510040 } },
- { 6, 0x989c,
- { 0x005000da, 0x005000da, 0x005000da, 0x005000da, 0x005000da } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00004044, 0x00004044, 0x00004044, 0x00004044, 0x00004044 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x000060c0, 0x000060c0, 0x000060c0, 0x000060c0, 0x000060c0 } },
- { 6, 0x989c,
- { 0x00002c00, 0x00002c00, 0x00003600, 0x00003600, 0x00002c00 } },
- { 6, 0x98c8,
- { 0x00000403, 0x00000403, 0x00040403, 0x00040403, 0x00040403 } },
- { 7, 0x989c,
- { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } },
- { 7, 0x989c,
- { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } },
- { 7, 0x98cc,
- { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } },
+ /* BANK / C.R. A/XR B G */
+ { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
+ { 2, 0x98d0, { 0x00000008, 0x00000008, 0x00000008 } },
+ { 3, 0x98dc, { 0x00a000c0, 0x00e000c0, 0x00e000c0 } },
+ { 6, 0x989c, { 0x33000000, 0x33000000, 0x33000000 } },
+ { 6, 0x989c, { 0x01000000, 0x01000000, 0x01000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x1f000000, 0x1f000000, 0x1f000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00b80000, 0x00b80000, 0x00b80000 } },
+ { 6, 0x989c, { 0x00b70000, 0x00b70000, 0x00b70000 } },
+ { 6, 0x989c, { 0x00840000, 0x00840000, 0x00840000 } },
+ { 6, 0x989c, { 0x00980000, 0x00980000, 0x00980000 } },
+ { 6, 0x989c, { 0x00c00000, 0x00c00000, 0x00c00000 } },
+ { 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
+ { 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
+ { 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
+ { 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
+ { 6, 0x989c, { 0x00d70000, 0x00d70000, 0x00d70000 } },
+ { 6, 0x989c, { 0x00610000, 0x00610000, 0x00610000 } },
+ { 6, 0x989c, { 0x00fe0000, 0x00fe0000, 0x00fe0000 } },
+ { 6, 0x989c, { 0x00de0000, 0x00de0000, 0x00de0000 } },
+ { 6, 0x989c, { 0x007f0000, 0x007f0000, 0x007f0000 } },
+ { 6, 0x989c, { 0x043d0000, 0x043d0000, 0x043d0000 } },
+ { 6, 0x989c, { 0x00770000, 0x00770000, 0x00770000 } },
+ { 6, 0x989c, { 0x00440000, 0x00440000, 0x00440000 } },
+ { 6, 0x989c, { 0x00980000, 0x00980000, 0x00980000 } },
+ { 6, 0x989c, { 0x00100080, 0x00100080, 0x00100080 } },
+ { 6, 0x989c, { 0x0005c034, 0x0005c034, 0x0005c034 } },
+ { 6, 0x989c, { 0x003100f0, 0x003100f0, 0x003100f0 } },
+ { 6, 0x989c, { 0x000c011f, 0x000c011f, 0x000c011f } },
+ { 6, 0x989c, { 0x00510040, 0x00510040, 0x00510040 } },
+ { 6, 0x989c, { 0x005000da, 0x005000da, 0x005000da } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00004044, 0x00004044, 0x00004044 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x000060c0, 0x000060c0, 0x000060c0 } },
+ { 6, 0x989c, { 0x00002c00, 0x00003600, 0x00003600 } },
+ { 6, 0x98c8, { 0x00000403, 0x00040403, 0x00040403 } },
+ { 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
+ { 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
+ { 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
};
@@ -931,92 +684,59 @@ static const struct ath5k_ini_rfbuffer rfb_5413[] = {
* AR2317 (Spider SoC) *
\***************************/
+/* BANK 2 len pos col */
+#define AR5K_RF2425_RF_TURBO { 1, 1, 2 }
+
/* BANK 6 len pos col */
#define AR5K_RF2425_OB_2GHZ { 3, 193, 0 }
#define AR5K_RF2425_DB_2GHZ { 3, 190, 0 }
static const struct ath5k_rf_reg rf_regs_2425[] = {
+ {2, AR5K_RF_TURBO, AR5K_RF2425_RF_TURBO},
{6, AR5K_RF_OB_2GHZ, AR5K_RF2425_OB_2GHZ},
{6, AR5K_RF_DB_2GHZ, AR5K_RF2425_DB_2GHZ},
};
/* Default mode specific settings
- * XXX: a/aTurbo ?
*/
static const struct ath5k_ini_rfbuffer rfb_2425[] = {
- { 1, 0x98d4,
- /* mode a/XR mode aTurbo mode b mode g mode gTurbo */
- { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } },
- { 2, 0x98d0,
- { 0x02001408, 0x02001408, 0x02001408, 0x02001408, 0x02001408 } },
- { 3, 0x98dc,
- { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } },
- { 6, 0x989c,
- { 0x10000000, 0x10000000, 0x10000000, 0x10000000, 0x10000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00100000, 0x00100000, 0x00100000, 0x00100000, 0x00100000 } },
- { 6, 0x989c,
- { 0x00020000, 0x00020000, 0x00020000, 0x00020000, 0x00020000 } },
- { 6, 0x989c,
- { 0x00730000, 0x00730000, 0x00730000, 0x00730000, 0x00730000 } },
- { 6, 0x989c,
- { 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000 } },
- { 6, 0x989c,
- { 0x00e70000, 0x00e70000, 0x00e70000, 0x00e70000, 0x00e70000 } },
- { 6, 0x989c,
- { 0x00140000, 0x00140000, 0x00140000, 0x00140000, 0x00140000 } },
- { 6, 0x989c,
- { 0x00910040, 0x00910040, 0x00910040, 0x00910040, 0x00910040 } },
- { 6, 0x989c,
- { 0x0007001a, 0x0007001a, 0x0007001a, 0x0007001a, 0x0007001a } },
- { 6, 0x989c,
- { 0x00410000, 0x00410000, 0x00410000, 0x00410000, 0x00410000 } },
- { 6, 0x989c,
- { 0x00810000, 0x00810000, 0x00810060, 0x00810060, 0x00810060 } },
- { 6, 0x989c,
- { 0x00020800, 0x00020800, 0x00020803, 0x00020803, 0x00020803 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00001660, 0x00001660, 0x00001660, 0x00001660, 0x00001660 } },
- { 6, 0x989c,
- { 0x00001688, 0x00001688, 0x00001688, 0x00001688, 0x00001688 } },
- { 6, 0x98c4,
- { 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001 } },
- { 7, 0x989c,
- { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } },
- { 7, 0x989c,
- { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } },
- { 7, 0x98cc,
- { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } },
+ /* BANK / C.R. A/XR B G */
+ { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
+ { 2, 0x98d0, { 0x02001408, 0x02001408, 0x02001408 } },
+ { 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
+ { 6, 0x989c, { 0x10000000, 0x10000000, 0x10000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x002a0000, 0x002a0000, 0x002a0000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00100000, 0x00100000, 0x00100000 } },
+ { 6, 0x989c, { 0x00020000, 0x00020000, 0x00020000 } },
+ { 6, 0x989c, { 0x00730000, 0x00730000, 0x00730000 } },
+ { 6, 0x989c, { 0x00f80000, 0x00f80000, 0x00f80000 } },
+ { 6, 0x989c, { 0x00e70000, 0x00e70000, 0x00e70000 } },
+ { 6, 0x989c, { 0x00140000, 0x00140000, 0x00140000 } },
+ { 6, 0x989c, { 0x00910040, 0x00910040, 0x00910040 } },
+ { 6, 0x989c, { 0x0007001a, 0x0007001a, 0x0007001a } },
+ { 6, 0x989c, { 0x00410000, 0x00410000, 0x00410000 } },
+ { 6, 0x989c, { 0x00810000, 0x00810060, 0x00810060 } },
+ { 6, 0x989c, { 0x00020800, 0x00020803, 0x00020803 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00001660, 0x00001660, 0x00001660 } },
+ { 6, 0x989c, { 0x00001688, 0x00001688, 0x00001688 } },
+ { 6, 0x98c4, { 0x00000001, 0x00000001, 0x00000001 } },
+ { 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
+ { 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
+ { 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
};
/*
@@ -1024,158 +744,85 @@ static const struct ath5k_ini_rfbuffer rfb_2425[] = {
* bank modification and get rid of this
*/
static const struct ath5k_ini_rfbuffer rfb_2317[] = {
- { 1, 0x98d4,
- /* mode a/XR mode aTurbo mode b mode g mode gTurbo */
- { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } },
- { 2, 0x98d0,
- { 0x02001408, 0x02011408, 0x02001408, 0x02001408, 0x02011408 } },
- { 3, 0x98dc,
- { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } },
- { 6, 0x989c,
- { 0x10000000, 0x10000000, 0x10000000, 0x10000000, 0x10000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00100000, 0x00100000, 0x00100000, 0x00100000, 0x00100000 } },
- { 6, 0x989c,
- { 0x00020000, 0x00020000, 0x00020000, 0x00020000, 0x00020000 } },
- { 6, 0x989c,
- { 0x00730000, 0x00730000, 0x00730000, 0x00730000, 0x00730000 } },
- { 6, 0x989c,
- { 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000 } },
- { 6, 0x989c,
- { 0x00e70000, 0x00e70000, 0x00e70000, 0x00e70000, 0x00e70000 } },
- { 6, 0x989c,
- { 0x00140100, 0x00140100, 0x00140100, 0x00140100, 0x00140100 } },
- { 6, 0x989c,
- { 0x00910040, 0x00910040, 0x00910040, 0x00910040, 0x00910040 } },
- { 6, 0x989c,
- { 0x0007001a, 0x0007001a, 0x0007001a, 0x0007001a, 0x0007001a } },
- { 6, 0x989c,
- { 0x00410000, 0x00410000, 0x00410000, 0x00410000, 0x00410000 } },
- { 6, 0x989c,
- { 0x00810000, 0x00810000, 0x00810060, 0x00810060, 0x00810060 } },
- { 6, 0x989c,
- { 0x00020800, 0x00020800, 0x00020803, 0x00020803, 0x00020803 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00001660, 0x00001660, 0x00001660, 0x00001660, 0x00001660 } },
- { 6, 0x989c,
- { 0x00009688, 0x00009688, 0x00009688, 0x00009688, 0x00009688 } },
- { 6, 0x98c4,
- { 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001 } },
- { 7, 0x989c,
- { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } },
- { 7, 0x989c,
- { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } },
- { 7, 0x98cc,
- { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } },
+ /* BANK / C.R. A/XR B G */
+ { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
+ { 2, 0x98d0, { 0x02001408, 0x02001408, 0x02001408 } },
+ { 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
+ { 6, 0x989c, { 0x10000000, 0x10000000, 0x10000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x002a0000, 0x002a0000, 0x002a0000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00100000, 0x00100000, 0x00100000 } },
+ { 6, 0x989c, { 0x00020000, 0x00020000, 0x00020000 } },
+ { 6, 0x989c, { 0x00730000, 0x00730000, 0x00730000 } },
+ { 6, 0x989c, { 0x00f80000, 0x00f80000, 0x00f80000 } },
+ { 6, 0x989c, { 0x00e70000, 0x00e70000, 0x00e70000 } },
+ { 6, 0x989c, { 0x00140100, 0x00140100, 0x00140100 } },
+ { 6, 0x989c, { 0x00910040, 0x00910040, 0x00910040 } },
+ { 6, 0x989c, { 0x0007001a, 0x0007001a, 0x0007001a } },
+ { 6, 0x989c, { 0x00410000, 0x00410000, 0x00410000 } },
+ { 6, 0x989c, { 0x00810000, 0x00810060, 0x00810060 } },
+ { 6, 0x989c, { 0x00020800, 0x00020803, 0x00020803 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00001660, 0x00001660, 0x00001660 } },
+ { 6, 0x989c, { 0x00009688, 0x00009688, 0x00009688 } },
+ { 6, 0x98c4, { 0x00000001, 0x00000001, 0x00000001 } },
+ { 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
+ { 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
+ { 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
};
/*
* TODO: Handle the few differences with swan during
* bank modification and get rid of this
- * XXX: a/aTurbo ?
*/
static const struct ath5k_ini_rfbuffer rfb_2417[] = {
- { 1, 0x98d4,
- /* mode a/XR mode aTurbo mode b mode g mode gTurbo */
- { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } },
- { 2, 0x98d0,
- { 0x02001408, 0x02001408, 0x02001408, 0x02001408, 0x02001408 } },
- { 3, 0x98dc,
- { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } },
- { 6, 0x989c,
- { 0x10000000, 0x10000000, 0x10000000, 0x10000000, 0x10000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00100000, 0x00100000, 0x00100000, 0x00100000, 0x00100000 } },
- { 6, 0x989c,
- { 0x00020000, 0x00020000, 0x00020000, 0x00020000, 0x00020000 } },
- { 6, 0x989c,
- { 0x00730000, 0x00730000, 0x00730000, 0x00730000, 0x00730000 } },
- { 6, 0x989c,
- { 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000 } },
- { 6, 0x989c,
- { 0x00e70000, 0x00e70000, 0x80e70000, 0x80e70000, 0x00e70000 } },
- { 6, 0x989c,
- { 0x00140000, 0x00140000, 0x00140000, 0x00140000, 0x00140000 } },
- { 6, 0x989c,
- { 0x00910040, 0x00910040, 0x00910040, 0x00910040, 0x00910040 } },
- { 6, 0x989c,
- { 0x0007001a, 0x0007001a, 0x0207001a, 0x0207001a, 0x0007001a } },
- { 6, 0x989c,
- { 0x00410000, 0x00410000, 0x00410000, 0x00410000, 0x00410000 } },
- { 6, 0x989c,
- { 0x00810000, 0x00810000, 0x00810060, 0x00810060, 0x00810060 } },
- { 6, 0x989c,
- { 0x00020800, 0x00020800, 0x00020803, 0x00020803, 0x00020803 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
- { 6, 0x989c,
- { 0x00001660, 0x00001660, 0x00001660, 0x00001660, 0x00001660 } },
- { 6, 0x989c,
- { 0x00001688, 0x00001688, 0x00001688, 0x00001688, 0x00001688 } },
- { 6, 0x98c4,
- { 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001 } },
- { 7, 0x989c,
- { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } },
- { 7, 0x989c,
- { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } },
- { 7, 0x98cc,
- { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } },
+ /* BANK / C.R. A/XR B G */
+ { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
+ { 2, 0x98d0, { 0x02001408, 0x02001408, 0x02001408 } },
+ { 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
+ { 6, 0x989c, { 0x10000000, 0x10000000, 0x10000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x002a0000, 0x002a0000, 0x002a0000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00100000, 0x00100000, 0x00100000 } },
+ { 6, 0x989c, { 0x00020000, 0x00020000, 0x00020000 } },
+ { 6, 0x989c, { 0x00730000, 0x00730000, 0x00730000 } },
+ { 6, 0x989c, { 0x00f80000, 0x00f80000, 0x00f80000 } },
+ { 6, 0x989c, { 0x00e70000, 0x80e70000, 0x80e70000 } },
+ { 6, 0x989c, { 0x00140000, 0x00140000, 0x00140000 } },
+ { 6, 0x989c, { 0x00910040, 0x00910040, 0x00910040 } },
+ { 6, 0x989c, { 0x0007001a, 0x0207001a, 0x0207001a } },
+ { 6, 0x989c, { 0x00410000, 0x00410000, 0x00410000 } },
+ { 6, 0x989c, { 0x00810000, 0x00810060, 0x00810060 } },
+ { 6, 0x989c, { 0x00020800, 0x00020803, 0x00020803 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
+ { 6, 0x989c, { 0x00001660, 0x00001660, 0x00001660 } },
+ { 6, 0x989c, { 0x00001688, 0x00001688, 0x00001688 } },
+ { 6, 0x98c4, { 0x00000001, 0x00000001, 0x00000001 } },
+ { 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
+ { 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
+ { 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
};
diff --git a/drivers/net/wireless/ath/ath5k/sysfs.c b/drivers/net/wireless/ath/ath5k/sysfs.c
index 90757de7bf59..929c68cdf8ab 100644
--- a/drivers/net/wireless/ath/ath5k/sysfs.c
+++ b/drivers/net/wireless/ath/ath5k/sysfs.c
@@ -95,7 +95,7 @@ static struct attribute_group ath5k_attribute_group_ani = {
int
ath5k_sysfs_register(struct ath5k_softc *sc)
{
- struct device *dev = &sc->pdev->dev;
+ struct device *dev = sc->dev;
int err;
err = sysfs_create_group(&dev->kobj, &ath5k_attribute_group_ani);
@@ -110,7 +110,7 @@ ath5k_sysfs_register(struct ath5k_softc *sc)
void
ath5k_sysfs_unregister(struct ath5k_softc *sc)
{
- struct device *dev = &sc->pdev->dev;
+ struct device *dev = sc->dev;
sysfs_remove_group(&dev->kobj, &ath5k_attribute_group_ani);
}
diff --git a/drivers/net/wireless/ath/ath5k/trace.h b/drivers/net/wireless/ath/ath5k/trace.h
new file mode 100644
index 000000000000..2de68adb6240
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/trace.h
@@ -0,0 +1,107 @@
+#if !defined(__TRACE_ATH5K_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __TRACE_ATH5K_H
+
+#include <linux/tracepoint.h>
+#include "base.h"
+
+#ifndef CONFIG_ATH5K_TRACER
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif
+
+struct sk_buff;
+
+#define PRIV_ENTRY __field(struct ath5k_softc *, priv)
+#define PRIV_ASSIGN __entry->priv = priv
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath5k
+
+TRACE_EVENT(ath5k_rx,
+ TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb),
+ TP_ARGS(priv, skb),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __field(unsigned long, skbaddr)
+ __dynamic_array(u8, frame, skb->len)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->skbaddr = (unsigned long) skb;
+ memcpy(__get_dynamic_array(frame), skb->data, skb->len);
+ ),
+ TP_printk(
+ "[%p] RX skb=%lx", __entry->priv, __entry->skbaddr
+ )
+);
+
+TRACE_EVENT(ath5k_tx,
+ TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb,
+ struct ath5k_txq *q),
+
+ TP_ARGS(priv, skb, q),
+
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __field(unsigned long, skbaddr)
+ __field(u8, qnum)
+ __dynamic_array(u8, frame, skb->len)
+ ),
+
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->skbaddr = (unsigned long) skb;
+ __entry->qnum = (u8) q->qnum;
+ memcpy(__get_dynamic_array(frame), skb->data, skb->len);
+ ),
+
+ TP_printk(
+ "[%p] TX skb=%lx q=%d", __entry->priv, __entry->skbaddr,
+ __entry->qnum
+ )
+);
+
+TRACE_EVENT(ath5k_tx_complete,
+ TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb,
+ struct ath5k_txq *q, struct ath5k_tx_status *ts),
+
+ TP_ARGS(priv, skb, q, ts),
+
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __field(unsigned long, skbaddr)
+ __field(u8, qnum)
+ __field(u8, ts_status)
+ __field(s8, ts_rssi)
+ __field(u8, ts_antenna)
+ ),
+
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->skbaddr = (unsigned long) skb;
+ __entry->qnum = (u8) q->qnum;
+ __entry->ts_status = ts->ts_status;
+ __entry->ts_rssi = ts->ts_rssi;
+ __entry->ts_antenna = ts->ts_antenna;
+ ),
+
+ TP_printk(
+ "[%p] TX end skb=%lx q=%d stat=%x rssi=%d ant=%x",
+ __entry->priv, __entry->skbaddr, __entry->qnum,
+ __entry->ts_status, __entry->ts_rssi, __entry->ts_antenna
+ )
+);
+
+#endif /* __TRACE_ATH5K_H */
+
+#ifdef CONFIG_ATH5K_TRACER
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/net/wireless/ath/ath5k
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index aca01621c205..4d66ca8042eb 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -4,7 +4,6 @@ ath9k-y += beacon.o \
main.o \
recv.o \
xmit.o \
- virtual.o \
ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
ath9k-$(CONFIG_PCI) += pci.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 1a984b02e9e5..9cb0efa9b4c0 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -35,10 +35,9 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
pdata = (struct ath9k_platform_data *) pdev->dev.platform_data;
if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
- ath_print(common, ATH_DBG_FATAL,
- "%s: flash read failed, offset %08x "
- "is out of range\n",
- __func__, off);
+ ath_err(common,
+ "%s: flash read failed, offset %08x is out of range\n",
+ __func__, off);
return false;
}
@@ -55,7 +54,6 @@ static struct ath_bus_ops ath_ahb_bus_ops = {
static int ath_ahb_probe(struct platform_device *pdev)
{
void __iomem *mem;
- struct ath_wiphy *aphy;
struct ath_softc *sc;
struct ieee80211_hw *hw;
struct resource *res;
@@ -77,7 +75,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
goto err_out;
}
- mem = ioremap_nocache(res->start, res->end - res->start + 1);
+ mem = ioremap_nocache(res->start, resource_size(res));
if (mem == NULL) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -ENOMEM;
@@ -93,8 +91,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
irq = res->start;
- hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) +
- sizeof(struct ath_softc), &ath9k_ops);
+ hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
if (hw == NULL) {
dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
ret = -ENOMEM;
@@ -104,11 +101,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
SET_IEEE80211_DEV(hw, &pdev->dev);
platform_set_drvdata(pdev, hw);
- aphy = hw->priv;
- sc = (struct ath_softc *) (aphy + 1);
- aphy->sc = sc;
- aphy->hw = hw;
- sc->pri_wiphy = aphy;
+ sc = hw->priv;
sc->hw = hw;
sc->dev = &pdev->dev;
sc->mem = mem;
@@ -152,8 +145,7 @@ static int ath_ahb_remove(struct platform_device *pdev)
struct ieee80211_hw *hw = platform_get_drvdata(pdev);
if (hw) {
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
void __iomem *mem = sc->mem;
ath9k_deinit_device(sc);
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 63ccb39cdcd4..2e31c775351f 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -135,8 +135,8 @@ static void ath9k_ani_restart(struct ath_hw *ah)
cck_base = AR_PHY_COUNTMAX - ah->config.cck_trig_high;
}
- ath_print(common, ATH_DBG_ANI,
- "Writing ofdmbase=%u cckbase=%u\n", ofdm_base, cck_base);
+ ath_dbg(common, ATH_DBG_ANI,
+ "Writing ofdmbase=%u cckbase=%u\n", ofdm_base, cck_base);
ENABLE_REGWRITE_BUFFER(ah);
@@ -267,11 +267,11 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
aniState->noiseFloor = BEACON_RSSI(ah);
- ath_print(common, ATH_DBG_ANI,
- "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
- aniState->ofdmNoiseImmunityLevel,
- immunityLevel, aniState->noiseFloor,
- aniState->rssiThrLow, aniState->rssiThrHigh);
+ ath_dbg(common, ATH_DBG_ANI,
+ "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
+ aniState->ofdmNoiseImmunityLevel,
+ immunityLevel, aniState->noiseFloor,
+ aniState->rssiThrLow, aniState->rssiThrHigh);
aniState->ofdmNoiseImmunityLevel = immunityLevel;
@@ -334,11 +334,11 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel)
const struct ani_cck_level_entry *entry_cck;
aniState->noiseFloor = BEACON_RSSI(ah);
- ath_print(common, ATH_DBG_ANI,
- "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
- aniState->cckNoiseImmunityLevel, immunityLevel,
- aniState->noiseFloor, aniState->rssiThrLow,
- aniState->rssiThrHigh);
+ ath_dbg(common, ATH_DBG_ANI,
+ "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
+ aniState->cckNoiseImmunityLevel, immunityLevel,
+ aniState->noiseFloor, aniState->rssiThrLow,
+ aniState->rssiThrHigh);
if ((ah->opmode == NL80211_IFTYPE_STATION ||
ah->opmode == NL80211_IFTYPE_ADHOC) &&
@@ -358,7 +358,7 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel)
entry_cck->fir_step_level);
/* Skip MRC CCK for pre AR9003 families */
- if (!AR_SREV_9300_20_OR_LATER(ah))
+ if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah))
return;
if (aniState->mrcCCKOff == entry_cck->mrc_cck_on)
@@ -478,8 +478,8 @@ static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning)
if (ah->opmode != NL80211_IFTYPE_STATION
&& ah->opmode != NL80211_IFTYPE_ADHOC) {
- ath_print(common, ATH_DBG_ANI,
- "Reset ANI state opmode %u\n", ah->opmode);
+ ath_dbg(common, ATH_DBG_ANI,
+ "Reset ANI state opmode %u\n", ah->opmode);
ah->stats.ast_ani_reset++;
if (ah->opmode == NL80211_IFTYPE_AP) {
@@ -584,16 +584,14 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
ATH9K_ANI_OFDM_DEF_LEVEL ||
aniState->cckNoiseImmunityLevel !=
ATH9K_ANI_CCK_DEF_LEVEL) {
- ath_print(common, ATH_DBG_ANI,
- "Restore defaults: opmode %u "
- "chan %d Mhz/0x%x is_scanning=%d "
- "ofdm:%d cck:%d\n",
- ah->opmode,
- chan->channel,
- chan->channelFlags,
- is_scanning,
- aniState->ofdmNoiseImmunityLevel,
- aniState->cckNoiseImmunityLevel);
+ ath_dbg(common, ATH_DBG_ANI,
+ "Restore defaults: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
+ ah->opmode,
+ chan->channel,
+ chan->channelFlags,
+ is_scanning,
+ aniState->ofdmNoiseImmunityLevel,
+ aniState->cckNoiseImmunityLevel);
ath9k_hw_set_ofdm_nil(ah, ATH9K_ANI_OFDM_DEF_LEVEL);
ath9k_hw_set_cck_nil(ah, ATH9K_ANI_CCK_DEF_LEVEL);
@@ -602,16 +600,14 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
/*
* restore historical levels for this channel
*/
- ath_print(common, ATH_DBG_ANI,
- "Restore history: opmode %u "
- "chan %d Mhz/0x%x is_scanning=%d "
- "ofdm:%d cck:%d\n",
- ah->opmode,
- chan->channel,
- chan->channelFlags,
- is_scanning,
- aniState->ofdmNoiseImmunityLevel,
- aniState->cckNoiseImmunityLevel);
+ ath_dbg(common, ATH_DBG_ANI,
+ "Restore history: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
+ ah->opmode,
+ chan->channel,
+ chan->channelFlags,
+ is_scanning,
+ aniState->ofdmNoiseImmunityLevel,
+ aniState->cckNoiseImmunityLevel);
ath9k_hw_set_ofdm_nil(ah,
aniState->ofdmNoiseImmunityLevel);
@@ -666,19 +662,17 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
if (!use_new_ani(ah) && (phyCnt1 < ofdm_base || phyCnt2 < cck_base)) {
if (phyCnt1 < ofdm_base) {
- ath_print(common, ATH_DBG_ANI,
- "phyCnt1 0x%x, resetting "
- "counter value to 0x%x\n",
- phyCnt1, ofdm_base);
+ ath_dbg(common, ATH_DBG_ANI,
+ "phyCnt1 0x%x, resetting counter value to 0x%x\n",
+ phyCnt1, ofdm_base);
REG_WRITE(ah, AR_PHY_ERR_1, ofdm_base);
REG_WRITE(ah, AR_PHY_ERR_MASK_1,
AR_PHY_ERR_OFDM_TIMING);
}
if (phyCnt2 < cck_base) {
- ath_print(common, ATH_DBG_ANI,
- "phyCnt2 0x%x, resetting "
- "counter value to 0x%x\n",
- phyCnt2, cck_base);
+ ath_dbg(common, ATH_DBG_ANI,
+ "phyCnt2 0x%x, resetting counter value to 0x%x\n",
+ phyCnt2, cck_base);
REG_WRITE(ah, AR_PHY_ERR_2, cck_base);
REG_WRITE(ah, AR_PHY_ERR_MASK_2,
AR_PHY_ERR_CCK_TIMING);
@@ -719,13 +713,12 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan)
cckPhyErrRate = aniState->cckPhyErrCount * 1000 /
aniState->listenTime;
- ath_print(common, ATH_DBG_ANI,
- "listenTime=%d OFDM:%d errs=%d/s CCK:%d "
- "errs=%d/s ofdm_turn=%d\n",
- aniState->listenTime,
- aniState->ofdmNoiseImmunityLevel,
- ofdmPhyErrRate, aniState->cckNoiseImmunityLevel,
- cckPhyErrRate, aniState->ofdmsTurn);
+ ath_dbg(common, ATH_DBG_ANI,
+ "listenTime=%d OFDM:%d errs=%d/s CCK:%d errs=%d/s ofdm_turn=%d\n",
+ aniState->listenTime,
+ aniState->ofdmNoiseImmunityLevel,
+ ofdmPhyErrRate, aniState->cckNoiseImmunityLevel,
+ cckPhyErrRate, aniState->ofdmsTurn);
if (aniState->listenTime > 5 * ah->aniperiod) {
if (ofdmPhyErrRate <= ah->config.ofdm_trig_low &&
@@ -755,7 +748,7 @@ void ath9k_enable_mib_counters(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- ath_print(common, ATH_DBG_ANI, "Enable MIB counters\n");
+ ath_dbg(common, ATH_DBG_ANI, "Enable MIB counters\n");
ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
@@ -777,7 +770,7 @@ void ath9k_hw_disable_mib_counters(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- ath_print(common, ATH_DBG_ANI, "Disable MIB counters\n");
+ ath_dbg(common, ATH_DBG_ANI, "Disable MIB counters\n");
REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC);
ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
@@ -834,10 +827,10 @@ void ath9k_hw_ani_setup(struct ath_hw *ah)
{
int i;
- const int totalSizeDesired[] = { -55, -55, -55, -55, -62 };
- const int coarseHigh[] = { -14, -14, -14, -14, -12 };
- const int coarseLow[] = { -64, -64, -64, -64, -70 };
- const int firpwr[] = { -78, -78, -78, -78, -80 };
+ static const int totalSizeDesired[] = { -55, -55, -55, -55, -62 };
+ static const int coarseHigh[] = { -14, -14, -14, -14, -12 };
+ static const int coarseLow[] = { -64, -64, -64, -64, -70 };
+ static const int firpwr[] = { -78, -78, -78, -78, -80 };
for (i = 0; i < 5; i++) {
ah->totalSizeDesired[i] = totalSizeDesired[i];
@@ -852,7 +845,7 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
int i;
- ath_print(common, ATH_DBG_ANI, "Initialize ANI\n");
+ ath_dbg(common, ATH_DBG_ANI, "Initialize ANI\n");
if (use_new_ani(ah)) {
ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_NEW;
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index ea9f4497f58c..ffcf44a4058b 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -130,9 +130,8 @@ static void ar5008_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
/* pre-reverse this field */
tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
- ath_print(common, ATH_DBG_CONFIG,
- "Force rf_pwd_icsyndiv to %1d on %4d\n",
- new_bias, synth_freq);
+ ath_dbg(common, ATH_DBG_CONFIG, "Force rf_pwd_icsyndiv to %1d on %4d\n",
+ new_bias, synth_freq);
/* swizzle rf_pwd_icsyndiv */
ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3);
@@ -173,8 +172,7 @@ static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
channelSel = ((freq - 704) * 2 - 3040) / 10;
bModeSynth = 1;
} else {
- ath_print(common, ATH_DBG_FATAL,
- "Invalid channel %u MHz\n", freq);
+ ath_err(common, "Invalid channel %u MHz\n", freq);
return -EINVAL;
}
@@ -206,8 +204,7 @@ static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
aModeRefSel = ath9k_hw_reverse_bits(1, 2);
} else {
- ath_print(common, ATH_DBG_FATAL,
- "Invalid channel %u MHz\n", freq);
+ ath_err(common, "Invalid channel %u MHz\n", freq);
return -EINVAL;
}
@@ -244,13 +241,15 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
int upper, lower, cur_vit_mask;
int tmp, new;
int i;
- int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
- AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
+ static int pilot_mask_reg[4] = {
+ AR_PHY_TIMING7, AR_PHY_TIMING8,
+ AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
};
- int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
- AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
+ static int chan_mask_reg[4] = {
+ AR_PHY_TIMING9, AR_PHY_TIMING10,
+ AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
};
- int inc[4] = { 0, 100, 0, 0 };
+ static int inc[4] = { 0, 100, 0, 0 };
int8_t mask_m[123];
int8_t mask_p[123];
@@ -446,8 +445,7 @@ static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah)
#define ATH_ALLOC_BANK(bank, size) do { \
bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \
if (!bank) { \
- ath_print(common, ATH_DBG_FATAL, \
- "Cannot allocate RF banks\n"); \
+ ath_err(common, "Cannot allocate RF banks\n"); \
return -ENOMEM; \
} \
} while (0);
@@ -873,12 +871,11 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
channel->max_antenna_gain * 2,
channel->max_power * 2,
min((u32) MAX_RATE_POWER,
- (u32) regulatory->power_limit));
+ (u32) regulatory->power_limit), false);
/* Write analog registers */
if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
- "ar5416SetRfRegs failed\n");
+ ath_err(ath9k_hw_common(ah), "ar5416SetRfRegs failed\n");
return -EIO;
}
@@ -964,18 +961,6 @@ static void ar5008_hw_rfbus_done(struct ath_hw *ah)
REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
}
-static void ar5008_hw_enable_rfkill(struct ath_hw *ah)
-{
- REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
- AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
-
- REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
- AR_GPIO_INPUT_MUX2_RFSILENT);
-
- ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
- REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
-}
-
static void ar5008_restore_chainmask(struct ath_hw *ah)
{
int rx_chainmask = ah->rxchainmask;
@@ -1056,10 +1041,9 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
- ath_print(common, ATH_DBG_ANI,
- "level out of range (%u > %u)\n",
- level,
- (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
+ ath_dbg(common, ATH_DBG_ANI,
+ "level out of range (%u > %zu)\n",
+ level, ARRAY_SIZE(ah->totalSizeDesired));
return false;
}
@@ -1084,12 +1068,12 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
break;
}
case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
- const int m1ThreshLow[] = { 127, 50 };
- const int m2ThreshLow[] = { 127, 40 };
- const int m1Thresh[] = { 127, 0x4d };
- const int m2Thresh[] = { 127, 0x40 };
- const int m2CountThr[] = { 31, 16 };
- const int m2CountThrLow[] = { 63, 48 };
+ static const int m1ThreshLow[] = { 127, 50 };
+ static const int m2ThreshLow[] = { 127, 40 };
+ static const int m1Thresh[] = { 127, 0x4d };
+ static const int m2Thresh[] = { 127, 0x40 };
+ static const int m2CountThr[] = { 31, 16 };
+ static const int m2CountThrLow[] = { 63, 48 };
u32 on = param ? 1 : 0;
REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
@@ -1141,7 +1125,7 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
break;
}
case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
- const int weakSigThrCck[] = { 8, 6 };
+ static const int weakSigThrCck[] = { 8, 6 };
u32 high = param ? 1 : 0;
REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
@@ -1157,14 +1141,13 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
break;
}
case ATH9K_ANI_FIRSTEP_LEVEL:{
- const int firstep[] = { 0, 4, 8 };
+ static const int firstep[] = { 0, 4, 8 };
u32 level = param;
if (level >= ARRAY_SIZE(firstep)) {
- ath_print(common, ATH_DBG_ANI,
- "level out of range (%u > %u)\n",
- level,
- (unsigned) ARRAY_SIZE(firstep));
+ ath_dbg(common, ATH_DBG_ANI,
+ "level out of range (%u > %zu)\n",
+ level, ARRAY_SIZE(firstep));
return false;
}
REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
@@ -1178,14 +1161,13 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
break;
}
case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
- const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
+ static const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
u32 level = param;
if (level >= ARRAY_SIZE(cycpwrThr1)) {
- ath_print(common, ATH_DBG_ANI,
- "level out of range (%u > %u)\n",
- level,
- (unsigned) ARRAY_SIZE(cycpwrThr1));
+ ath_dbg(common, ATH_DBG_ANI,
+ "level out of range (%u > %zu)\n",
+ level, ARRAY_SIZE(cycpwrThr1));
return false;
}
REG_RMW_FIELD(ah, AR_PHY_TIMING5,
@@ -1201,25 +1183,22 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
case ATH9K_ANI_PRESENT:
break;
default:
- ath_print(common, ATH_DBG_ANI,
- "invalid cmd %u\n", cmd);
+ ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd);
return false;
}
- ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
- ath_print(common, ATH_DBG_ANI,
- "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
- "ofdmWeakSigDetectOff=%d\n",
- aniState->noiseImmunityLevel,
- aniState->spurImmunityLevel,
- !aniState->ofdmWeakSigDetectOff);
- ath_print(common, ATH_DBG_ANI,
- "cckWeakSigThreshold=%d, "
- "firstepLevel=%d, listenTime=%d\n",
- aniState->cckWeakSigThreshold,
- aniState->firstepLevel,
- aniState->listenTime);
- ath_print(common, ATH_DBG_ANI,
+ ath_dbg(common, ATH_DBG_ANI, "ANI parameters:\n");
+ ath_dbg(common, ATH_DBG_ANI,
+ "noiseImmunityLevel=%d, spurImmunityLevel=%d, ofdmWeakSigDetectOff=%d\n",
+ aniState->noiseImmunityLevel,
+ aniState->spurImmunityLevel,
+ !aniState->ofdmWeakSigDetectOff);
+ ath_dbg(common, ATH_DBG_ANI,
+ "cckWeakSigThreshold=%d, firstepLevel=%d, listenTime=%d\n",
+ aniState->cckWeakSigThreshold,
+ aniState->firstepLevel,
+ aniState->listenTime);
+ ath_dbg(common, ATH_DBG_ANI,
"ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
aniState->ofdmPhyErrCount,
aniState->cckPhyErrCount);
@@ -1304,12 +1283,12 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
if (!on != aniState->ofdmWeakSigDetectOff) {
- ath_print(common, ATH_DBG_ANI,
- "** ch %d: ofdm weak signal: %s=>%s\n",
- chan->channel,
- !aniState->ofdmWeakSigDetectOff ?
- "on" : "off",
- on ? "on" : "off");
+ ath_dbg(common, ATH_DBG_ANI,
+ "** ch %d: ofdm weak signal: %s=>%s\n",
+ chan->channel,
+ !aniState->ofdmWeakSigDetectOff ?
+ "on" : "off",
+ on ? "on" : "off");
if (on)
ah->stats.ast_ani_ofdmon++;
else
@@ -1322,11 +1301,9 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(firstep_table)) {
- ath_print(common, ATH_DBG_ANI,
- "ATH9K_ANI_FIRSTEP_LEVEL: level "
- "out of range (%u > %u)\n",
- level,
- (unsigned) ARRAY_SIZE(firstep_table));
+ ath_dbg(common, ATH_DBG_ANI,
+ "ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n",
+ level, ARRAY_SIZE(firstep_table));
return false;
}
@@ -1361,24 +1338,22 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
AR_PHY_FIND_SIG_FIRSTEP_LOW, value2);
if (level != aniState->firstepLevel) {
- ath_print(common, ATH_DBG_ANI,
- "** ch %d: level %d=>%d[def:%d] "
- "firstep[level]=%d ini=%d\n",
- chan->channel,
- aniState->firstepLevel,
- level,
- ATH9K_ANI_FIRSTEP_LVL_NEW,
- value,
- aniState->iniDef.firstep);
- ath_print(common, ATH_DBG_ANI,
- "** ch %d: level %d=>%d[def:%d] "
- "firstep_low[level]=%d ini=%d\n",
- chan->channel,
- aniState->firstepLevel,
- level,
- ATH9K_ANI_FIRSTEP_LVL_NEW,
- value2,
- aniState->iniDef.firstepLow);
+ ath_dbg(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] firstep[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->firstepLevel,
+ level,
+ ATH9K_ANI_FIRSTEP_LVL_NEW,
+ value,
+ aniState->iniDef.firstep);
+ ath_dbg(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] firstep_low[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->firstepLevel,
+ level,
+ ATH9K_ANI_FIRSTEP_LVL_NEW,
+ value2,
+ aniState->iniDef.firstepLow);
if (level > aniState->firstepLevel)
ah->stats.ast_ani_stepup++;
else if (level < aniState->firstepLevel)
@@ -1391,11 +1366,9 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
- ath_print(common, ATH_DBG_ANI,
- "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level "
- "out of range (%u > %u)\n",
- level,
- (unsigned) ARRAY_SIZE(cycpwrThr1_table));
+ ath_dbg(common, ATH_DBG_ANI,
+ "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n",
+ level, ARRAY_SIZE(cycpwrThr1_table));
return false;
}
/*
@@ -1429,24 +1402,22 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
AR_PHY_EXT_TIMING5_CYCPWR_THR1, value2);
if (level != aniState->spurImmunityLevel) {
- ath_print(common, ATH_DBG_ANI,
- "** ch %d: level %d=>%d[def:%d] "
- "cycpwrThr1[level]=%d ini=%d\n",
- chan->channel,
- aniState->spurImmunityLevel,
- level,
- ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
- value,
- aniState->iniDef.cycpwrThr1);
- ath_print(common, ATH_DBG_ANI,
- "** ch %d: level %d=>%d[def:%d] "
- "cycpwrThr1Ext[level]=%d ini=%d\n",
- chan->channel,
- aniState->spurImmunityLevel,
- level,
- ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
- value2,
- aniState->iniDef.cycpwrThr1Ext);
+ ath_dbg(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] cycpwrThr1[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->spurImmunityLevel,
+ level,
+ ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+ value,
+ aniState->iniDef.cycpwrThr1);
+ ath_dbg(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] cycpwrThr1Ext[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->spurImmunityLevel,
+ level,
+ ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+ value2,
+ aniState->iniDef.cycpwrThr1Ext);
if (level > aniState->spurImmunityLevel)
ah->stats.ast_ani_spurup++;
else if (level < aniState->spurImmunityLevel)
@@ -1465,22 +1436,19 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
case ATH9K_ANI_PRESENT:
break;
default:
- ath_print(common, ATH_DBG_ANI,
- "invalid cmd %u\n", cmd);
+ ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd);
return false;
}
- ath_print(common, ATH_DBG_ANI,
- "ANI parameters: SI=%d, ofdmWS=%s FS=%d "
- "MRCcck=%s listenTime=%d "
- "ofdmErrs=%d cckErrs=%d\n",
- aniState->spurImmunityLevel,
- !aniState->ofdmWeakSigDetectOff ? "on" : "off",
- aniState->firstepLevel,
- !aniState->mrcCCKOff ? "on" : "off",
- aniState->listenTime,
- aniState->ofdmPhyErrCount,
- aniState->cckPhyErrCount);
+ ath_dbg(common, ATH_DBG_ANI,
+ "ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
+ aniState->spurImmunityLevel,
+ !aniState->ofdmWeakSigDetectOff ? "on" : "off",
+ aniState->firstepLevel,
+ !aniState->mrcCCKOff ? "on" : "off",
+ aniState->listenTime,
+ aniState->ofdmPhyErrCount,
+ aniState->cckPhyErrCount);
return true;
}
@@ -1490,25 +1458,25 @@ static void ar5008_hw_do_getnf(struct ath_hw *ah,
int16_t nf;
nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
- nfarray[0] = sign_extend(nf, 9);
+ nfarray[0] = sign_extend32(nf, 8);
nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR_PHY_CH1_MINCCA_PWR);
- nfarray[1] = sign_extend(nf, 9);
+ nfarray[1] = sign_extend32(nf, 8);
nf = MS(REG_READ(ah, AR_PHY_CH2_CCA), AR_PHY_CH2_MINCCA_PWR);
- nfarray[2] = sign_extend(nf, 9);
+ nfarray[2] = sign_extend32(nf, 8);
if (!IS_CHAN_HT40(ah->curchan))
return;
nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
- nfarray[3] = sign_extend(nf, 9);
+ nfarray[3] = sign_extend32(nf, 8);
nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR_PHY_CH1_EXT_MINCCA_PWR);
- nfarray[4] = sign_extend(nf, 9);
+ nfarray[4] = sign_extend32(nf, 8);
nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA), AR_PHY_CH2_EXT_MINCCA_PWR);
- nfarray[5] = sign_extend(nf, 9);
+ nfarray[5] = sign_extend32(nf, 8);
}
/*
@@ -1526,13 +1494,12 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
iniDef = &aniState->iniDef;
- ath_print(common, ATH_DBG_ANI,
- "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
- ah->hw_version.macVersion,
- ah->hw_version.macRev,
- ah->opmode,
- chan->channel,
- chan->channelFlags);
+ ath_dbg(common, ATH_DBG_ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+ ah->hw_version.macVersion,
+ ah->hw_version.macRev,
+ ah->opmode,
+ chan->channel,
+ chan->channelFlags);
val = REG_READ(ah, AR_PHY_SFCORR);
iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
@@ -1579,10 +1546,55 @@ static void ar5008_hw_set_nf_limits(struct ath_hw *ah)
ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_5416_5GHZ;
}
+static void ar5008_hw_set_radar_params(struct ath_hw *ah,
+ struct ath_hw_radar_conf *conf)
+{
+ u32 radar_0 = 0, radar_1 = 0;
+
+ if (!conf) {
+ REG_CLR_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_ENA);
+ return;
+ }
+
+ radar_0 |= AR_PHY_RADAR_0_ENA | AR_PHY_RADAR_0_FFT_ENA;
+ radar_0 |= SM(conf->fir_power, AR_PHY_RADAR_0_FIRPWR);
+ radar_0 |= SM(conf->radar_rssi, AR_PHY_RADAR_0_RRSSI);
+ radar_0 |= SM(conf->pulse_height, AR_PHY_RADAR_0_HEIGHT);
+ radar_0 |= SM(conf->pulse_rssi, AR_PHY_RADAR_0_PRSSI);
+ radar_0 |= SM(conf->pulse_inband, AR_PHY_RADAR_0_INBAND);
+
+ radar_1 |= AR_PHY_RADAR_1_MAX_RRSSI;
+ radar_1 |= AR_PHY_RADAR_1_BLOCK_CHECK;
+ radar_1 |= SM(conf->pulse_maxlen, AR_PHY_RADAR_1_MAXLEN);
+ radar_1 |= SM(conf->pulse_inband_step, AR_PHY_RADAR_1_RELSTEP_THRESH);
+ radar_1 |= SM(conf->radar_inband, AR_PHY_RADAR_1_RELPWR_THRESH);
+
+ REG_WRITE(ah, AR_PHY_RADAR_0, radar_0);
+ REG_WRITE(ah, AR_PHY_RADAR_1, radar_1);
+ if (conf->ext_channel)
+ REG_SET_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
+ else
+ REG_CLR_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
+}
+
+static void ar5008_hw_set_radar_conf(struct ath_hw *ah)
+{
+ struct ath_hw_radar_conf *conf = &ah->radar_conf;
+
+ conf->fir_power = -33;
+ conf->radar_rssi = 20;
+ conf->pulse_height = 10;
+ conf->pulse_rssi = 24;
+ conf->pulse_inband = 15;
+ conf->pulse_maxlen = 255;
+ conf->pulse_inband_step = 12;
+ conf->radar_inband = 8;
+}
+
void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
- const u32 ar5416_cca_regs[6] = {
+ static const u32 ar5416_cca_regs[6] = {
AR_PHY_CCA,
AR_PHY_CH1_CCA,
AR_PHY_CH2_CCA,
@@ -1605,10 +1617,10 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
priv_ops->set_delta_slope = ar5008_hw_set_delta_slope;
priv_ops->rfbus_req = ar5008_hw_rfbus_req;
priv_ops->rfbus_done = ar5008_hw_rfbus_done;
- priv_ops->enable_rfkill = ar5008_hw_enable_rfkill;
priv_ops->restore_chainmask = ar5008_restore_chainmask;
priv_ops->set_diversity = ar5008_set_diversity;
priv_ops->do_getnf = ar5008_hw_do_getnf;
+ priv_ops->set_radar_params = ar5008_hw_set_radar_params;
if (modparam_force_new_ani) {
priv_ops->ani_control = ar5008_hw_ani_control_new;
@@ -1624,5 +1636,6 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
priv_ops->compute_pll_control = ar5008_hw_compute_pll_control;
ar5008_hw_set_nf_limits(ah);
+ ar5008_hw_set_radar_conf(ah);
memcpy(ah->nf_regs, ar5416_cca_regs, sizeof(ah->nf_regs));
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 15f62cd0cc38..76388c6d6692 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -39,18 +39,18 @@ static void ar9002_hw_setup_calibration(struct ath_hw *ah,
switch (currCal->calData->calType) {
case IQ_MISMATCH_CAL:
REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
- ath_print(common, ATH_DBG_CALIBRATE,
- "starting IQ Mismatch Calibration\n");
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "starting IQ Mismatch Calibration\n");
break;
case ADC_GAIN_CAL:
REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
- ath_print(common, ATH_DBG_CALIBRATE,
- "starting ADC Gain Calibration\n");
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "starting ADC Gain Calibration\n");
break;
case ADC_DC_CAL:
REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
- ath_print(common, ATH_DBG_CALIBRATE,
- "starting ADC DC Calibration\n");
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "starting ADC DC Calibration\n");
break;
}
@@ -107,11 +107,11 @@ static void ar9002_hw_iqcal_collect(struct ath_hw *ah)
REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
ah->totalIqCorrMeas[i] +=
(int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
- ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
- "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
- ah->cal_samples, i, ah->totalPowerMeasI[i],
- ah->totalPowerMeasQ[i],
- ah->totalIqCorrMeas[i]);
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
+ ah->cal_samples, i, ah->totalPowerMeasI[i],
+ ah->totalPowerMeasQ[i],
+ ah->totalIqCorrMeas[i]);
}
}
@@ -129,14 +129,13 @@ static void ar9002_hw_adc_gaincal_collect(struct ath_hw *ah)
ah->totalAdcQEvenPhase[i] +=
REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
- ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
- "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
- "oddq=0x%08x; evenq=0x%08x;\n",
- ah->cal_samples, i,
- ah->totalAdcIOddPhase[i],
- ah->totalAdcIEvenPhase[i],
- ah->totalAdcQOddPhase[i],
- ah->totalAdcQEvenPhase[i]);
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "%d: Chn %d oddi=0x%08x; eveni=0x%08x; oddq=0x%08x; evenq=0x%08x;\n",
+ ah->cal_samples, i,
+ ah->totalAdcIOddPhase[i],
+ ah->totalAdcIEvenPhase[i],
+ ah->totalAdcQOddPhase[i],
+ ah->totalAdcQEvenPhase[i]);
}
}
@@ -154,14 +153,13 @@ static void ar9002_hw_adc_dccal_collect(struct ath_hw *ah)
ah->totalAdcDcOffsetQEvenPhase[i] +=
(int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
- ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
- "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
- "oddq=0x%08x; evenq=0x%08x;\n",
- ah->cal_samples, i,
- ah->totalAdcDcOffsetIOddPhase[i],
- ah->totalAdcDcOffsetIEvenPhase[i],
- ah->totalAdcDcOffsetQOddPhase[i],
- ah->totalAdcDcOffsetQEvenPhase[i]);
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "%d: Chn %d oddi=0x%08x; eveni=0x%08x; oddq=0x%08x; evenq=0x%08x;\n",
+ ah->cal_samples, i,
+ ah->totalAdcDcOffsetIOddPhase[i],
+ ah->totalAdcDcOffsetIEvenPhase[i],
+ ah->totalAdcDcOffsetQOddPhase[i],
+ ah->totalAdcDcOffsetQEvenPhase[i]);
}
}
@@ -178,13 +176,13 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
powerMeasQ = ah->totalPowerMeasQ[i];
iqCorrMeas = ah->totalIqCorrMeas[i];
- ath_print(common, ATH_DBG_CALIBRATE,
- "Starting IQ Cal and Correction for Chain %d\n",
- i);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Starting IQ Cal and Correction for Chain %d\n",
+ i);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Orignal: Chn %diq_corr_meas = 0x%08x\n",
- i, ah->totalIqCorrMeas[i]);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+ i, ah->totalIqCorrMeas[i]);
iqCorrNeg = 0;
@@ -193,12 +191,12 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
iqCorrNeg = 1;
}
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
- ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
- iqCorrNeg);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
+ ath_dbg(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
+ iqCorrNeg);
iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
qCoffDenom = powerMeasQ / 64;
@@ -207,14 +205,14 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
(qCoffDenom != 0)) {
iCoff = iqCorrMeas / iCoffDenom;
qCoff = powerMeasI / qCoffDenom - 64;
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d iCoff = 0x%08x\n", i, iCoff);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d qCoff = 0x%08x\n", i, qCoff);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Chn %d iCoff = 0x%08x\n", i, iCoff);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Chn %d qCoff = 0x%08x\n", i, qCoff);
iCoff = iCoff & 0x3f;
- ath_print(common, ATH_DBG_CALIBRATE,
- "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
if (iqCorrNeg == 0x0)
iCoff = 0x40 - iCoff;
@@ -223,9 +221,9 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
else if (qCoff <= -16)
qCoff = -16;
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
- i, iCoff, qCoff);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
+ i, iCoff, qCoff);
REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
@@ -233,9 +231,9 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
qCoff);
- ath_print(common, ATH_DBG_CALIBRATE,
- "IQ Cal and Correction done for Chain %d\n",
- i);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "IQ Cal and Correction done for Chain %d\n",
+ i);
}
}
@@ -255,21 +253,21 @@ static void ar9002_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
qOddMeasOffset = ah->totalAdcQOddPhase[i];
qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
- ath_print(common, ATH_DBG_CALIBRATE,
- "Starting ADC Gain Cal for Chain %d\n", i);
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
- iOddMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_i = 0x%08x\n", i,
- iEvenMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
- qOddMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_q = 0x%08x\n", i,
- qEvenMeasOffset);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Starting ADC Gain Cal for Chain %d\n", i);
+
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
+ iOddMeasOffset);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_i = 0x%08x\n", i,
+ iEvenMeasOffset);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
+ qOddMeasOffset);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_q = 0x%08x\n", i,
+ qEvenMeasOffset);
if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
iGainMismatch =
@@ -279,20 +277,20 @@ static void ar9002_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
((qOddMeasOffset * 32) /
qEvenMeasOffset) & 0x3f;
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d gain_mismatch_i = 0x%08x\n", i,
- iGainMismatch);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d gain_mismatch_q = 0x%08x\n", i,
- qGainMismatch);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Chn %d gain_mismatch_i = 0x%08x\n", i,
+ iGainMismatch);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Chn %d gain_mismatch_q = 0x%08x\n", i,
+ qGainMismatch);
val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
val &= 0xfffff000;
val |= (qGainMismatch) | (iGainMismatch << 6);
REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
- ath_print(common, ATH_DBG_CALIBRATE,
- "ADC Gain Cal done for Chain %d\n", i);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "ADC Gain Cal done for Chain %d\n", i);
}
}
@@ -317,41 +315,41 @@ static void ar9002_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
- ath_print(common, ATH_DBG_CALIBRATE,
- "Starting ADC DC Offset Cal for Chain %d\n", i);
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_i = %d\n", i,
- iOddMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_i = %d\n", i,
- iEvenMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_q = %d\n", i,
- qOddMeasOffset);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_q = %d\n", i,
- qEvenMeasOffset);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Starting ADC DC Offset Cal for Chain %d\n", i);
+
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_i = %d\n", i,
+ iOddMeasOffset);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_i = %d\n", i,
+ iEvenMeasOffset);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_odd_q = %d\n", i,
+ qOddMeasOffset);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_even_q = %d\n", i,
+ qEvenMeasOffset);
iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
numSamples) & 0x1ff;
qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
numSamples) & 0x1ff;
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
- iDcMismatch);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
- qDcMismatch);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
+ iDcMismatch);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
+ qDcMismatch);
val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
val &= 0xc0000fff;
val |= (qDcMismatch << 12) | (iDcMismatch << 21);
REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
- ath_print(common, ATH_DBG_CALIBRATE,
- "ADC DC Offset Cal done for Chain %d\n", i);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "ADC DC Offset Cal done for Chain %d\n", i);
}
REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
@@ -540,7 +538,7 @@ static inline void ar9285_hw_pa_cal(struct ath_hw *ah, bool is_reset)
{ 0x7838, 0 },
};
- ath_print(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
+ ath_dbg(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
/* PA CAL is not needed for high power solution */
if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) ==
@@ -681,10 +679,6 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
/* Do NF cal only at longer intervals */
if (longcal || nfcal_pending) {
- /* Do periodic PAOffset Cal */
- ar9002_hw_pa_cal(ah, false);
- ar9002_hw_olc_temp_compensation(ah);
-
/*
* Get the value from the previous NF cal and update
* history buffer.
@@ -699,8 +693,12 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
ath9k_hw_loadnf(ah, ah->curchan);
}
- if (longcal)
+ if (longcal) {
ath9k_hw_start_nfcal(ah, false);
+ /* Do periodic PAOffset Cal */
+ ar9002_hw_pa_cal(ah, false);
+ ar9002_hw_olc_temp_compensation(ah);
+ }
}
return iscaldone;
@@ -721,9 +719,8 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
- ath_print(common, ATH_DBG_CALIBRATE, "offset "
- "calibration failed to complete in "
- "1ms; noisy ??\n");
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "offset calibration failed to complete in 1ms; noisy environment?\n");
return false;
}
REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
@@ -736,8 +733,8 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT)) {
- ath_print(common, ATH_DBG_CALIBRATE, "offset calibration "
- "failed to complete in 1ms; noisy ??\n");
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "offset calibration failed to complete in 1ms; noisy environment?\n");
return false;
}
@@ -808,7 +805,10 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
- if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) {
+ if (AR_SREV_9271(ah)) {
+ if (!ar9285_hw_cl_cal(ah, chan))
+ return false;
+ } else if (AR_SREV_9285_12_OR_LATER(ah)) {
if (!ar9285_hw_clc(ah, chan))
return false;
} else {
@@ -829,9 +829,8 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT)) {
- ath_print(common, ATH_DBG_CALIBRATE,
- "offset calibration failed to "
- "complete in 1ms; noisy environment?\n");
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "offset calibration failed to complete in 1ms; noisy environment?\n");
return false;
}
@@ -866,19 +865,19 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
INIT_CAL(&ah->adcgain_caldata);
INSERT_CAL(ah, &ah->adcgain_caldata);
- ath_print(common, ATH_DBG_CALIBRATE,
- "enabling ADC Gain Calibration.\n");
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "enabling ADC Gain Calibration.\n");
INIT_CAL(&ah->adcdc_caldata);
INSERT_CAL(ah, &ah->adcdc_caldata);
- ath_print(common, ATH_DBG_CALIBRATE,
- "enabling ADC DC Calibration.\n");
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "enabling ADC DC Calibration.\n");
}
INIT_CAL(&ah->iq_caldata);
INSERT_CAL(ah, &ah->iq_caldata);
- ath_print(common, ATH_DBG_CALIBRATE,
- "enabling IQ Calibration.\n");
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "enabling IQ Calibration.\n");
ah->cal_list_curr = ah->cal_list;
@@ -958,6 +957,9 @@ static void ar9002_hw_init_cal_settings(struct ath_hw *ah)
&adc_dc_cal_multi_sample;
}
ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
+
+ if (AR_SREV_9287(ah))
+ ah->supp_cals &= ~ADC_GAIN_CAL;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index 48261b7252d0..f44c84ab5dce 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -22,28 +22,10 @@
int modparam_force_new_ani;
module_param_named(force_new_ani, modparam_force_new_ani, int, 0444);
-MODULE_PARM_DESC(nohwcrypt, "Force new ANI for AR5008, AR9001, AR9002");
+MODULE_PARM_DESC(force_new_ani, "Force new ANI for AR5008, AR9001, AR9002");
/* General hardware code for the A5008/AR9001/AR9002 hadware families */
-static bool ar9002_hw_macversion_supported(u32 macversion)
-{
- switch (macversion) {
- case AR_SREV_VERSION_5416_PCI:
- case AR_SREV_VERSION_5416_PCIE:
- case AR_SREV_VERSION_9160:
- case AR_SREV_VERSION_9100:
- case AR_SREV_VERSION_9280:
- case AR_SREV_VERSION_9285:
- case AR_SREV_VERSION_9287:
- case AR_SREV_VERSION_9271:
- return true;
- default:
- break;
- }
- return false;
-}
-
static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
{
if (AR_SREV_9271(ah)) {
@@ -444,9 +426,8 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
}
/* WAR for ASPM system hang */
- if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) {
+ if (AR_SREV_9285(ah) || AR_SREV_9287(ah))
val |= (AR_WA_BIT6 | AR_WA_BIT7);
- }
if (AR_SREV_9285E_20(ah))
val |= AR_WA_BIT23;
@@ -494,9 +475,9 @@ int ar9002_hw_rf_claim(struct ath_hw *ah)
case AR_RAD2122_SREV_MAJOR:
break;
default:
- ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
- "Radio Chip Rev 0x%02X not supported\n",
- val & AR_RADIO_SREV_MAJOR);
+ ath_err(ath9k_hw_common(ah),
+ "Radio Chip Rev 0x%02X not supported\n",
+ val & AR_RADIO_SREV_MAJOR);
return -EOPNOTSUPP;
}
@@ -565,7 +546,6 @@ void ar9002_hw_attach_ops(struct ath_hw *ah)
priv_ops->init_mode_regs = ar9002_hw_init_mode_regs;
priv_ops->init_mode_gain_regs = ar9002_hw_init_mode_gain_regs;
- priv_ops->macversion_supported = ar9002_hw_macversion_supported;
ops->config_pci_powersave = ar9002_hw_configpcipowersave;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 50dda394f8be..399ab3bb299b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -90,13 +90,10 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
*masked = isr & ATH9K_INT_COMMON;
- if (ah->config.rx_intr_mitigation) {
- if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
- *masked |= ATH9K_INT_RX;
- }
-
- if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
+ if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM |
+ AR_ISR_RXOK | AR_ISR_RXERR))
*masked |= ATH9K_INT_RX;
+
if (isr &
(AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
AR_ISR_TXEOL)) {
@@ -114,16 +111,8 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
}
if (isr & AR_ISR_RXORN) {
- ath_print(common, ATH_DBG_INTERRUPT,
- "receive FIFO overrun interrupt\n");
- }
-
- if (!AR_SREV_9100(ah)) {
- if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
- u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
- if (isr5 & AR_ISR_S5_TIM_TIMER)
- *masked |= ATH9K_INT_TIM_TIMER;
- }
+ ath_dbg(common, ATH_DBG_INTERRUPT,
+ "receive FIFO overrun interrupt\n");
}
*masked |= mask2;
@@ -136,17 +125,18 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
u32 s5_s;
s5_s = REG_READ(ah, AR_ISR_S5_S);
- if (isr & AR_ISR_GENTMR) {
- ah->intr_gen_timer_trigger =
+ ah->intr_gen_timer_trigger =
MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
- ah->intr_gen_timer_thresh =
- MS(s5_s, AR_ISR_S5_GENTIMER_THRESH);
+ ah->intr_gen_timer_thresh =
+ MS(s5_s, AR_ISR_S5_GENTIMER_THRESH);
- if (ah->intr_gen_timer_trigger)
- *masked |= ATH9K_INT_GENTIMER;
+ if (ah->intr_gen_timer_trigger)
+ *masked |= ATH9K_INT_GENTIMER;
- }
+ if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
+ !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
+ *masked |= ATH9K_INT_TIM_TIMER;
}
if (sync_cause) {
@@ -157,25 +147,25 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
if (fatal_int) {
if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
- ath_print(common, ATH_DBG_ANY,
- "received PCI FATAL interrupt\n");
+ ath_dbg(common, ATH_DBG_ANY,
+ "received PCI FATAL interrupt\n");
}
if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
- ath_print(common, ATH_DBG_ANY,
- "received PCI PERR interrupt\n");
+ ath_dbg(common, ATH_DBG_ANY,
+ "received PCI PERR interrupt\n");
}
*masked |= ATH9K_INT_FATAL;
}
if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
- ath_print(common, ATH_DBG_INTERRUPT,
- "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
+ ath_dbg(common, ATH_DBG_INTERRUPT,
+ "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
REG_WRITE(ah, AR_RC, 0);
*masked |= ATH9K_INT_FATAL;
}
if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
- ath_print(common, ATH_DBG_INTERRUPT,
- "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
+ ath_dbg(common, ATH_DBG_INTERRUPT,
+ "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
}
REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
@@ -218,77 +208,70 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
struct ath_tx_status *ts)
{
struct ar5416_desc *ads = AR5416DESC(ds);
+ u32 status;
- if ((ads->ds_txstatus9 & AR_TxDone) == 0)
+ status = ACCESS_ONCE(ads->ds_txstatus9);
+ if ((status & AR_TxDone) == 0)
return -EINPROGRESS;
- ts->ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
ts->ts_tstamp = ads->AR_SendTimestamp;
ts->ts_status = 0;
ts->ts_flags = 0;
- if (ads->ds_txstatus1 & AR_FrmXmitOK)
+ if (status & AR_TxOpExceeded)
+ ts->ts_status |= ATH9K_TXERR_XTXOP;
+ ts->tid = MS(status, AR_TxTid);
+ ts->ts_rateindex = MS(status, AR_FinalTxIdx);
+ ts->ts_seqnum = MS(status, AR_SeqNum);
+
+ status = ACCESS_ONCE(ads->ds_txstatus0);
+ ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00);
+ ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01);
+ ts->ts_rssi_ctl2 = MS(status, AR_TxRSSIAnt02);
+ if (status & AR_TxBaStatus) {
+ ts->ts_flags |= ATH9K_TX_BA;
+ ts->ba_low = ads->AR_BaBitmapLow;
+ ts->ba_high = ads->AR_BaBitmapHigh;
+ }
+
+ status = ACCESS_ONCE(ads->ds_txstatus1);
+ if (status & AR_FrmXmitOK)
ts->ts_status |= ATH9K_TX_ACKED;
- if (ads->ds_txstatus1 & AR_ExcessiveRetries)
- ts->ts_status |= ATH9K_TXERR_XRETRY;
- if (ads->ds_txstatus1 & AR_Filtered)
- ts->ts_status |= ATH9K_TXERR_FILT;
- if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
- ts->ts_status |= ATH9K_TXERR_FIFO;
- ath9k_hw_updatetxtriglevel(ah, true);
+ else {
+ if (status & AR_ExcessiveRetries)
+ ts->ts_status |= ATH9K_TXERR_XRETRY;
+ if (status & AR_Filtered)
+ ts->ts_status |= ATH9K_TXERR_FILT;
+ if (status & AR_FIFOUnderrun) {
+ ts->ts_status |= ATH9K_TXERR_FIFO;
+ ath9k_hw_updatetxtriglevel(ah, true);
+ }
}
- if (ads->ds_txstatus9 & AR_TxOpExceeded)
- ts->ts_status |= ATH9K_TXERR_XTXOP;
- if (ads->ds_txstatus1 & AR_TxTimerExpired)
+ if (status & AR_TxTimerExpired)
ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
-
- if (ads->ds_txstatus1 & AR_DescCfgErr)
+ if (status & AR_DescCfgErr)
ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
- if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
+ if (status & AR_TxDataUnderrun) {
ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
ath9k_hw_updatetxtriglevel(ah, true);
}
- if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
+ if (status & AR_TxDelimUnderrun) {
ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
ath9k_hw_updatetxtriglevel(ah, true);
}
- if (ads->ds_txstatus0 & AR_TxBaStatus) {
- ts->ts_flags |= ATH9K_TX_BA;
- ts->ba_low = ads->AR_BaBitmapLow;
- ts->ba_high = ads->AR_BaBitmapHigh;
- }
+ ts->ts_shortretry = MS(status, AR_RTSFailCnt);
+ ts->ts_longretry = MS(status, AR_DataFailCnt);
+ ts->ts_virtcol = MS(status, AR_VirtRetryCnt);
- ts->ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
- switch (ts->ts_rateindex) {
- case 0:
- ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
- break;
- case 1:
- ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
- break;
- case 2:
- ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
- break;
- case 3:
- ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
- break;
- }
+ status = ACCESS_ONCE(ads->ds_txstatus5);
+ ts->ts_rssi = MS(status, AR_TxRSSICombined);
+ ts->ts_rssi_ext0 = MS(status, AR_TxRSSIAnt10);
+ ts->ts_rssi_ext1 = MS(status, AR_TxRSSIAnt11);
+ ts->ts_rssi_ext2 = MS(status, AR_TxRSSIAnt12);
- ts->ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
- ts->ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
- ts->ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
- ts->ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
- ts->ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
- ts->ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
- ts->ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
ts->evm0 = ads->AR_TxEVM0;
ts->evm1 = ads->AR_TxEVM1;
ts->evm2 = ads->AR_TxEVM2;
- ts->ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
- ts->ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
- ts->ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
- ts->tid = MS(ads->ds_txstatus9, AR_TxTid);
- ts->ts_antenna = 0;
return 0;
}
@@ -300,7 +283,6 @@ static void ar9002_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
{
struct ar5416_desc *ads = AR5416DESC(ds);
- txPower += ah->txpower_indexoffset;
if (txPower > 63)
txPower = 63;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index c00cdc67b55b..7d68d61e406b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -175,13 +175,15 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
int upper, lower, cur_vit_mask;
int tmp, newVal;
int i;
- int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
- AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
+ static const int pilot_mask_reg[4] = {
+ AR_PHY_TIMING7, AR_PHY_TIMING8,
+ AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
};
- int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
- AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
+ static const int chan_mask_reg[4] = {
+ AR_PHY_TIMING9, AR_PHY_TIMING10,
+ AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
};
- int inc[4] = { 0, 100, 0, 0 };
+ static const int inc[4] = { 0, 100, 0, 0 };
struct chan_centers centers;
int8_t mask_m[123];
@@ -201,13 +203,14 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
+ if (AR_NO_SPUR == cur_bb_spur)
+ break;
+
if (is2GHz)
cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
else
cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
- if (AR_NO_SPUR == cur_bb_spur)
- break;
cur_bb_spur = cur_bb_spur - freq;
if (IS_CHAN_HT40(chan)) {
@@ -473,21 +476,21 @@ static void ar9002_hw_do_getnf(struct ath_hw *ah,
int16_t nf;
nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
- nfarray[0] = sign_extend(nf, 9);
+ nfarray[0] = sign_extend32(nf, 8);
nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR9280_PHY_EXT_MINCCA_PWR);
if (IS_CHAN_HT40(ah->curchan))
- nfarray[3] = sign_extend(nf, 9);
+ nfarray[3] = sign_extend32(nf, 8);
if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
return;
nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR);
- nfarray[1] = sign_extend(nf, 9);
+ nfarray[1] = sign_extend32(nf, 8);
nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR9280_PHY_CH1_EXT_MINCCA_PWR);
if (IS_CHAN_HT40(ah->curchan))
- nfarray[4] = sign_extend(nf, 9);
+ nfarray[4] = sign_extend32(nf, 8);
}
static void ar9002_hw_set_nf_limits(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index a14a5e43cf56..9ecca93392e8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -34,9 +34,9 @@ static const u32 ar9300_2p2_radio_postamble[][5] = {
static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
- {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
- {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000a2dc, 0x00033800, 0x00033800, 0x00637800, 0x00637800},
+ {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03838000, 0x03838000},
+ {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03fc0000, 0x03fc0000},
{0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -56,21 +56,21 @@ static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
{0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
{0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
{0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
- {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
- {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
- {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83},
- {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84},
- {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3},
- {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5},
- {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9},
- {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb},
- {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
- {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
- {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
- {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
- {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
- {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
- {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a544, 0x52022470, 0x52022470, 0x3f001861, 0x3f001861},
+ {0x0000a548, 0x55022490, 0x55022490, 0x43001a81, 0x43001a81},
+ {0x0000a54c, 0x59022492, 0x59022492, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x5d022692, 0x5d022692, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x61022892, 0x61022892, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x65024890, 0x65024890, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x69024892, 0x69024892, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x6e024c92, 0x6e024c92, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
{0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
{0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
{0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
@@ -88,44 +88,44 @@ static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
{0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
{0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
{0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
- {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
- {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
- {0x0000a5cc, 0x5c82486b, 0x5c82486b, 0x47801a83, 0x47801a83},
- {0x0000a5d0, 0x61824a6c, 0x61824a6c, 0x4a801c84, 0x4a801c84},
- {0x0000a5d4, 0x66826a6c, 0x66826a6c, 0x4e801ce3, 0x4e801ce3},
- {0x0000a5d8, 0x6b826e6c, 0x6b826e6c, 0x52801ce5, 0x52801ce5},
- {0x0000a5dc, 0x7082708c, 0x7082708c, 0x56801ce9, 0x56801ce9},
- {0x0000a5e0, 0x7382b08a, 0x7382b08a, 0x5a801ceb, 0x5a801ceb},
- {0x0000a5e4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
- {0x0000a5e8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
- {0x0000a5ec, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
- {0x0000a5f0, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
- {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
- {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
- {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
+ {0x0000a5c4, 0x52822470, 0x52822470, 0x3f801861, 0x3f801861},
+ {0x0000a5c8, 0x55822490, 0x55822490, 0x43801a81, 0x43801a81},
+ {0x0000a5cc, 0x59822492, 0x59822492, 0x47801a83, 0x47801a83},
+ {0x0000a5d0, 0x5d822692, 0x5d822692, 0x4a801c84, 0x4a801c84},
+ {0x0000a5d4, 0x61822892, 0x61822892, 0x4e801ce3, 0x4e801ce3},
+ {0x0000a5d8, 0x65824890, 0x65824890, 0x52801ce5, 0x52801ce5},
+ {0x0000a5dc, 0x69824892, 0x69824892, 0x56801ce9, 0x56801ce9},
+ {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x5a801ceb, 0x5a801ceb},
+ {0x0000a5e4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5e8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5ec, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f0, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5f8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a5fc, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
- {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
- {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
- {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
- {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
- {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
- {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
- {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
- {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
- {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x00033800, 0x00033800, 0x00637800, 0x00637800},
+ {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03838000, 0x03838000},
+ {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03fc0000, 0x03fc0000},
{0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
- {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
- {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
+ {0x0000c2dc, 0x00033800, 0x00033800, 0x00637800, 0x00637800},
+ {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03838000, 0x03838000},
+ {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03fc0000, 0x03fc0000},
{0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
{0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
@@ -638,6 +638,7 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
{0x0000a204, 0x000037c0, 0x000037c4, 0x000037c4, 0x000037c0},
{0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x01026a2f, 0x01026a2f, 0x01026a2f, 0x01026a2f},
{0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
{0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
{0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
@@ -680,7 +681,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x0000981c, 0x00020028},
{0x00009834, 0x6400a290},
{0x00009838, 0x0108ecff},
- {0x0000983c, 0x14750600},
+ {0x0000983c, 0x0d000600},
{0x00009880, 0x201fff00},
{0x00009884, 0x00001042},
{0x000098a4, 0x00200400},
@@ -722,7 +723,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x0000a220, 0x00000000},
{0x0000a224, 0x00000000},
{0x0000a228, 0x10002310},
- {0x0000a22c, 0x01036a27},
{0x0000a23c, 0x00000000},
{0x0000a244, 0x0c000000},
{0x0000a2a0, 0x00000001},
@@ -1842,7 +1842,7 @@ static const u32 ar9300_2p2_soc_preamble[][2] = {
static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2[][2] = {
/* Addr allmodes */
- {0x00004040, 0x08212e5e},
+ {0x00004040, 0x0821265e},
{0x00004040, 0x0008003b},
{0x00004044, 0x00000000},
};
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 9e6edffe0bd1..4a4cd88429c0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -18,6 +18,16 @@
#include "hw-ops.h"
#include "ar9003_phy.h"
+#define MPASS 3
+#define MAX_MEASUREMENT 8
+#define MAX_DIFFERENCE 10
+
+struct coeff {
+ int mag_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MPASS];
+ int phs_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MPASS];
+ int iqc_coeff[2];
+};
+
enum ar9003_cal_types {
IQ_MISMATCH_CAL = BIT(0),
TEMP_COMP_CAL = BIT(1),
@@ -40,8 +50,8 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
currCal->calData->calCountMax);
REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
- ath_print(common, ATH_DBG_CALIBRATE,
- "starting IQ Mismatch Calibration\n");
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "starting IQ Mismatch Calibration\n");
/* Kick-off cal */
REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL);
@@ -52,8 +62,8 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM,
AR_PHY_65NM_CH0_THERM_START, 1);
- ath_print(common, ATH_DBG_CALIBRATE,
- "starting Temperature Compensation Calibration\n");
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "starting Temperature Compensation Calibration\n");
break;
}
}
@@ -181,11 +191,11 @@ static void ar9003_hw_iqcal_collect(struct ath_hw *ah)
REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
ah->totalIqCorrMeas[i] +=
(int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
- ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
- "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
- ah->cal_samples, i, ah->totalPowerMeasI[i],
- ah->totalPowerMeasQ[i],
- ah->totalIqCorrMeas[i]);
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
+ ah->cal_samples, i, ah->totalPowerMeasI[i],
+ ah->totalPowerMeasQ[i],
+ ah->totalIqCorrMeas[i]);
}
}
@@ -196,7 +206,7 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
u32 qCoffDenom, iCoffDenom;
int32_t qCoff, iCoff;
int iqCorrNeg, i;
- const u_int32_t offset_array[3] = {
+ static const u_int32_t offset_array[3] = {
AR_PHY_RX_IQCAL_CORR_B0,
AR_PHY_RX_IQCAL_CORR_B1,
AR_PHY_RX_IQCAL_CORR_B2,
@@ -207,13 +217,13 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
powerMeasQ = ah->totalPowerMeasQ[i];
iqCorrMeas = ah->totalIqCorrMeas[i];
- ath_print(common, ATH_DBG_CALIBRATE,
- "Starting IQ Cal and Correction for Chain %d\n",
- i);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Starting IQ Cal and Correction for Chain %d\n",
+ i);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Orignal: Chn %diq_corr_meas = 0x%08x\n",
- i, ah->totalIqCorrMeas[i]);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+ i, ah->totalIqCorrMeas[i]);
iqCorrNeg = 0;
@@ -222,12 +232,12 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
iqCorrNeg = 1;
}
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
- ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
- iqCorrNeg);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
+ ath_dbg(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
+ iqCorrNeg);
iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 256;
qCoffDenom = powerMeasQ / 64;
@@ -235,10 +245,10 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
if ((iCoffDenom != 0) && (qCoffDenom != 0)) {
iCoff = iqCorrMeas / iCoffDenom;
qCoff = powerMeasI / qCoffDenom - 64;
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d iCoff = 0x%08x\n", i, iCoff);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d qCoff = 0x%08x\n", i, qCoff);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Chn %d iCoff = 0x%08x\n", i, iCoff);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Chn %d qCoff = 0x%08x\n", i, qCoff);
/* Force bounds on iCoff */
if (iCoff >= 63)
@@ -259,14 +269,13 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
iCoff = iCoff & 0x7f;
qCoff = qCoff & 0x7f;
- ath_print(common, ATH_DBG_CALIBRATE,
- "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
- i, iCoff, qCoff);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Register offset (0x%04x) "
- "before update = 0x%x\n",
- offset_array[i],
- REG_READ(ah, offset_array[i]));
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
+ i, iCoff, qCoff);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Register offset (0x%04x) before update = 0x%x\n",
+ offset_array[i],
+ REG_READ(ah, offset_array[i]));
REG_RMW_FIELD(ah, offset_array[i],
AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
@@ -274,33 +283,29 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
REG_RMW_FIELD(ah, offset_array[i],
AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
qCoff);
- ath_print(common, ATH_DBG_CALIBRATE,
- "Register offset (0x%04x) QI COFF "
- "(bitfields 0x%08x) after update = 0x%x\n",
- offset_array[i],
- AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
- REG_READ(ah, offset_array[i]));
- ath_print(common, ATH_DBG_CALIBRATE,
- "Register offset (0x%04x) QQ COFF "
- "(bitfields 0x%08x) after update = 0x%x\n",
- offset_array[i],
- AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
- REG_READ(ah, offset_array[i]));
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "IQ Cal and Correction done for Chain %d\n",
- i);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Register offset (0x%04x) QI COFF (bitfields 0x%08x) after update = 0x%x\n",
+ offset_array[i],
+ AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
+ REG_READ(ah, offset_array[i]));
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Register offset (0x%04x) QQ COFF (bitfields 0x%08x) after update = 0x%x\n",
+ offset_array[i],
+ AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
+ REG_READ(ah, offset_array[i]));
+
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "IQ Cal and Correction done for Chain %d\n", i);
}
}
REG_SET_BIT(ah, AR_PHY_RX_IQCAL_CORR_B0,
AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE);
- ath_print(common, ATH_DBG_CALIBRATE,
- "IQ Cal and Correction (offset 0x%04x) enabled "
- "(bit position 0x%08x). New Value 0x%08x\n",
- (unsigned) (AR_PHY_RX_IQCAL_CORR_B0),
- AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE,
- REG_READ(ah, AR_PHY_RX_IQCAL_CORR_B0));
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "IQ Cal and Correction (offset 0x%04x) enabled (bit position 0x%08x). New Value 0x%08x\n",
+ (unsigned) (AR_PHY_RX_IQCAL_CORR_B0),
+ AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE,
+ REG_READ(ah, AR_PHY_RX_IQCAL_CORR_B0));
}
static const struct ath9k_percal_data iq_cal_single_sample = {
@@ -340,7 +345,7 @@ static bool ar9003_hw_solve_iq_cal(struct ath_hw *ah,
f2 = (f1 * f1 + f3 * f3) / result_shift;
if (!f2) {
- ath_print(common, ATH_DBG_CALIBRATE, "Divide by 0\n");
+ ath_dbg(common, ATH_DBG_CALIBRATE, "Divide by 0\n");
return false;
}
@@ -461,11 +466,14 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
if ((i2_p_q2_a0_d0 == 0) || (i2_p_q2_a0_d1 == 0) ||
(i2_p_q2_a1_d0 == 0) || (i2_p_q2_a1_d1 == 0)) {
- ath_print(common, ATH_DBG_CALIBRATE,
- "Divide by 0:\na0_d0=%d\n"
- "a0_d1=%d\na2_d0=%d\na1_d1=%d\n",
- i2_p_q2_a0_d0, i2_p_q2_a0_d1,
- i2_p_q2_a1_d0, i2_p_q2_a1_d1);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Divide by 0:\n"
+ "a0_d0=%d\n"
+ "a0_d1=%d\n"
+ "a2_d0=%d\n"
+ "a1_d1=%d\n",
+ i2_p_q2_a0_d0, i2_p_q2_a0_d1,
+ i2_p_q2_a1_d0, i2_p_q2_a1_d1);
return false;
}
@@ -498,9 +506,9 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
mag2 = ar9003_hw_find_mag_approx(ah, cos_2phi_2, sin_2phi_2);
if ((mag1 == 0) || (mag2 == 0)) {
- ath_print(common, ATH_DBG_CALIBRATE,
- "Divide by 0: mag1=%d, mag2=%d\n",
- mag1, mag2);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Divide by 0: mag1=%d, mag2=%d\n",
+ mag1, mag2);
return false;
}
@@ -517,8 +525,8 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
mag_a0_d0, phs_a0_d0,
mag_a1_d0,
phs_a1_d0, solved_eq)) {
- ath_print(common, ATH_DBG_CALIBRATE,
- "Call to ar9003_hw_solve_iq_cal() failed.\n");
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Call to ar9003_hw_solve_iq_cal() failed.\n");
return false;
}
@@ -527,14 +535,14 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
mag_rx = solved_eq[2];
phs_rx = solved_eq[3];
- ath_print(common, ATH_DBG_CALIBRATE,
- "chain %d: mag mismatch=%d phase mismatch=%d\n",
- chain_idx, mag_tx/res_scale, phs_tx/res_scale);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "chain %d: mag mismatch=%d phase mismatch=%d\n",
+ chain_idx, mag_tx/res_scale, phs_tx/res_scale);
if (res_scale == mag_tx) {
- ath_print(common, ATH_DBG_CALIBRATE,
- "Divide by 0: mag_tx=%d, res_scale=%d\n",
- mag_tx, res_scale);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Divide by 0: mag_tx=%d, res_scale=%d\n",
+ mag_tx, res_scale);
return false;
}
@@ -545,9 +553,9 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
q_q_coff = (mag_corr_tx * 128 / res_scale);
q_i_coff = (phs_corr_tx * 256 / res_scale);
- ath_print(common, ATH_DBG_CALIBRATE,
- "tx chain %d: mag corr=%d phase corr=%d\n",
- chain_idx, q_q_coff, q_i_coff);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "tx chain %d: mag corr=%d phase corr=%d\n",
+ chain_idx, q_q_coff, q_i_coff);
if (q_i_coff < -63)
q_i_coff = -63;
@@ -560,14 +568,14 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
iqc_coeff[0] = (q_q_coff * 128) + q_i_coff;
- ath_print(common, ATH_DBG_CALIBRATE,
- "tx chain %d: iq corr coeff=%x\n",
- chain_idx, iqc_coeff[0]);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "tx chain %d: iq corr coeff=%x\n",
+ chain_idx, iqc_coeff[0]);
if (-mag_rx == res_scale) {
- ath_print(common, ATH_DBG_CALIBRATE,
- "Divide by 0: mag_rx=%d, res_scale=%d\n",
- mag_rx, res_scale);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Divide by 0: mag_rx=%d, res_scale=%d\n",
+ mag_rx, res_scale);
return false;
}
@@ -578,9 +586,9 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
q_q_coff = (mag_corr_rx * 128 / res_scale);
q_i_coff = (phs_corr_rx * 256 / res_scale);
- ath_print(common, ATH_DBG_CALIBRATE,
- "rx chain %d: mag corr=%d phase corr=%d\n",
- chain_idx, q_q_coff, q_i_coff);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "rx chain %d: mag corr=%d phase corr=%d\n",
+ chain_idx, q_q_coff, q_i_coff);
if (q_i_coff < -63)
q_i_coff = -63;
@@ -593,140 +601,367 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
iqc_coeff[1] = (q_q_coff * 128) + q_i_coff;
- ath_print(common, ATH_DBG_CALIBRATE,
- "rx chain %d: iq corr coeff=%x\n",
- chain_idx, iqc_coeff[1]);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "rx chain %d: iq corr coeff=%x\n",
+ chain_idx, iqc_coeff[1]);
+
+ return true;
+}
+
+static bool ar9003_hw_compute_closest_pass_and_avg(int *mp_coeff, int *mp_avg)
+{
+ int diff[MPASS];
+
+ diff[0] = abs(mp_coeff[0] - mp_coeff[1]);
+ diff[1] = abs(mp_coeff[1] - mp_coeff[2]);
+ diff[2] = abs(mp_coeff[2] - mp_coeff[0]);
+
+ if (diff[0] > MAX_DIFFERENCE &&
+ diff[1] > MAX_DIFFERENCE &&
+ diff[2] > MAX_DIFFERENCE)
+ return false;
+
+ if (diff[0] <= diff[1] && diff[0] <= diff[2])
+ *mp_avg = (mp_coeff[0] + mp_coeff[1]) / 2;
+ else if (diff[1] <= diff[2])
+ *mp_avg = (mp_coeff[1] + mp_coeff[2]) / 2;
+ else
+ *mp_avg = (mp_coeff[2] + mp_coeff[0]) / 2;
return true;
}
+static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
+ u8 num_chains,
+ struct coeff *coeff)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int i, im, nmeasurement;
+ int magnitude, phase;
+ u32 tx_corr_coeff[MAX_MEASUREMENT][AR9300_MAX_CHAINS];
+
+ memset(tx_corr_coeff, 0, sizeof(tx_corr_coeff));
+ for (i = 0; i < MAX_MEASUREMENT / 2; i++) {
+ tx_corr_coeff[i * 2][0] = tx_corr_coeff[(i * 2) + 1][0] =
+ AR_PHY_TX_IQCAL_CORR_COEFF_B0(i);
+ if (!AR_SREV_9485(ah)) {
+ tx_corr_coeff[i * 2][1] =
+ tx_corr_coeff[(i * 2) + 1][1] =
+ AR_PHY_TX_IQCAL_CORR_COEFF_B1(i);
+
+ tx_corr_coeff[i * 2][2] =
+ tx_corr_coeff[(i * 2) + 1][2] =
+ AR_PHY_TX_IQCAL_CORR_COEFF_B2(i);
+ }
+ }
+
+ /* Load the average of 2 passes */
+ for (i = 0; i < num_chains; i++) {
+ if (AR_SREV_9485(ah))
+ nmeasurement = REG_READ_FIELD(ah,
+ AR_PHY_TX_IQCAL_STATUS_B0_9485,
+ AR_PHY_CALIBRATED_GAINS_0);
+ else
+ nmeasurement = REG_READ_FIELD(ah,
+ AR_PHY_TX_IQCAL_STATUS_B0,
+ AR_PHY_CALIBRATED_GAINS_0);
+
+ if (nmeasurement > MAX_MEASUREMENT)
+ nmeasurement = MAX_MEASUREMENT;
+
+ for (im = 0; im < nmeasurement; im++) {
+ /*
+ * Determine which 2 passes are closest and compute avg
+ * magnitude
+ */
+ if (!ar9003_hw_compute_closest_pass_and_avg(coeff->mag_coeff[i][im],
+ &magnitude))
+ goto disable_txiqcal;
+
+ /*
+ * Determine which 2 passes are closest and compute avg
+ * phase
+ */
+ if (!ar9003_hw_compute_closest_pass_and_avg(coeff->phs_coeff[i][im],
+ &phase))
+ goto disable_txiqcal;
+
+ coeff->iqc_coeff[0] = (magnitude & 0x7f) |
+ ((phase & 0x7f) << 7);
+
+ if ((im % 2) == 0)
+ REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
+ AR_PHY_TX_IQCAL_CORR_COEFF_00_COEFF_TABLE,
+ coeff->iqc_coeff[0]);
+ else
+ REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
+ AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE,
+ coeff->iqc_coeff[0]);
+ }
+ }
+
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_3,
+ AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
+
+ return;
+
+disable_txiqcal:
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_3,
+ AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN, 0x0);
+ REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
+ AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x0);
+
+ ath_dbg(common, ATH_DBG_CALIBRATE, "TX IQ Cal disabled\n");
+}
+
static void ar9003_hw_tx_iq_cal(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
+ static const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
AR_PHY_TX_IQCAL_STATUS_B0,
AR_PHY_TX_IQCAL_STATUS_B1,
AR_PHY_TX_IQCAL_STATUS_B2,
};
- const u32 tx_corr_coeff[AR9300_MAX_CHAINS] = {
- AR_PHY_TX_IQCAL_CORR_COEFF_01_B0,
- AR_PHY_TX_IQCAL_CORR_COEFF_01_B1,
- AR_PHY_TX_IQCAL_CORR_COEFF_01_B2,
- };
- const u32 rx_corr[AR9300_MAX_CHAINS] = {
- AR_PHY_RX_IQCAL_CORR_B0,
- AR_PHY_RX_IQCAL_CORR_B1,
- AR_PHY_RX_IQCAL_CORR_B2,
- };
- const u_int32_t chan_info_tab[] = {
+ static const u32 chan_info_tab[] = {
AR_PHY_CHAN_INFO_TAB_0,
AR_PHY_CHAN_INFO_TAB_1,
AR_PHY_CHAN_INFO_TAB_2,
};
+ struct coeff coeff;
s32 iq_res[6];
- s32 iqc_coeff[2];
- s32 i, j;
- u32 num_chains = 0;
+ s32 i, j, ip, im, nmeasurement;
+ u8 nchains = get_streams(common->tx_chainmask);
+
+ for (ip = 0; ip < MPASS; ip++) {
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
+ AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
+ DELPT);
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START,
+ AR_PHY_TX_IQCAL_START_DO_CAL,
+ AR_PHY_TX_IQCAL_START_DO_CAL);
+
+ if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START,
+ AR_PHY_TX_IQCAL_START_DO_CAL,
+ 0, AH_WAIT_TIMEOUT)) {
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Tx IQ Cal not complete.\n");
+ goto TX_IQ_CAL_FAILED;
+ }
- for (i = 0; i < AR9300_MAX_CHAINS; i++) {
- if (ah->txchainmask & (1 << i))
- num_chains++;
- }
+ nmeasurement = REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_STATUS_B0,
+ AR_PHY_CALIBRATED_GAINS_0);
+ if (nmeasurement > MAX_MEASUREMENT)
+ nmeasurement = MAX_MEASUREMENT;
+
+ for (i = 0; i < nchains; i++) {
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Doing Tx IQ Cal for chain %d.\n", i);
+ for (im = 0; im < nmeasurement; im++) {
+ if (REG_READ(ah, txiqcal_status[i]) &
+ AR_PHY_TX_IQCAL_STATUS_FAILED) {
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Tx IQ Cal failed for chain %d.\n", i);
+ goto TX_IQ_CAL_FAILED;
+ }
- REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
- AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
- DELPT);
- REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START,
- AR_PHY_TX_IQCAL_START_DO_CAL,
- AR_PHY_TX_IQCAL_START_DO_CAL);
+ for (j = 0; j < 3; j++) {
+ u8 idx = 2 * j,
+ offset = 4 * (3 * im + j);
- if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START,
- AR_PHY_TX_IQCAL_START_DO_CAL,
- 0, AH_WAIT_TIMEOUT)) {
- ath_print(common, ATH_DBG_CALIBRATE,
- "Tx IQ Cal not complete.\n");
- goto TX_IQ_CAL_FAILED;
- }
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_TAB_S2_READ,
+ 0);
- for (i = 0; i < num_chains; i++) {
- ath_print(common, ATH_DBG_CALIBRATE,
- "Doing Tx IQ Cal for chain %d.\n", i);
+ /* 32 bits */
+ iq_res[idx] = REG_READ(ah,
+ chan_info_tab[i] +
+ offset);
- if (REG_READ(ah, txiqcal_status[i]) &
- AR_PHY_TX_IQCAL_STATUS_FAILED) {
- ath_print(common, ATH_DBG_CALIBRATE,
- "Tx IQ Cal failed for chain %d.\n", i);
- goto TX_IQ_CAL_FAILED;
- }
+ REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_TAB_S2_READ,
+ 1);
- for (j = 0; j < 3; j++) {
- u_int8_t idx = 2 * j,
- offset = 4 * j;
+ /* 16 bits */
+ iq_res[idx+1] = 0xffff & REG_READ(ah,
+ chan_info_tab[i] +
+ offset);
- REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY,
- AR_PHY_CHAN_INFO_TAB_S2_READ, 0);
-
- /* 32 bits */
- iq_res[idx] = REG_READ(ah, chan_info_tab[i] + offset);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "IQ RES[%d]=0x%x IQ_RES[%d]=0x%x\n",
+ idx, iq_res[idx], idx+1, iq_res[idx+1]);
+ }
- REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY,
- AR_PHY_CHAN_INFO_TAB_S2_READ, 1);
+ if (!ar9003_hw_calc_iq_corr(ah, i, iq_res,
+ coeff.iqc_coeff)) {
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Failed in calculation of IQ correction.\n");
+ goto TX_IQ_CAL_FAILED;
+ }
+ coeff.mag_coeff[i][im][ip] =
+ coeff.iqc_coeff[0] & 0x7f;
+ coeff.phs_coeff[i][im][ip] =
+ (coeff.iqc_coeff[0] >> 7) & 0x7f;
- /* 16 bits */
- iq_res[idx+1] = 0xffff & REG_READ(ah,
- chan_info_tab[i] +
- offset);
+ if (coeff.mag_coeff[i][im][ip] > 63)
+ coeff.mag_coeff[i][im][ip] -= 128;
+ if (coeff.phs_coeff[i][im][ip] > 63)
+ coeff.phs_coeff[i][im][ip] -= 128;
- ath_print(common, ATH_DBG_CALIBRATE,
- "IQ RES[%d]=0x%x IQ_RES[%d]=0x%x\n",
- idx, iq_res[idx], idx+1, iq_res[idx+1]);
- }
-
- if (!ar9003_hw_calc_iq_corr(ah, i, iq_res, iqc_coeff)) {
- ath_print(common, ATH_DBG_CALIBRATE,
- "Failed in calculation of IQ correction.\n");
- goto TX_IQ_CAL_FAILED;
+ }
}
-
- ath_print(common, ATH_DBG_CALIBRATE,
- "IQ_COEFF[0] = 0x%x IQ_COEFF[1] = 0x%x\n",
- iqc_coeff[0], iqc_coeff[1]);
-
- REG_RMW_FIELD(ah, tx_corr_coeff[i],
- AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE,
- iqc_coeff[0]);
- REG_RMW_FIELD(ah, rx_corr[i],
- AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_Q_COFF,
- iqc_coeff[1] >> 7);
- REG_RMW_FIELD(ah, rx_corr[i],
- AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_I_COFF,
- iqc_coeff[1]);
}
- REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_3,
- AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN, 0x1);
- REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
- AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
+ ar9003_hw_tx_iqcal_load_avg_2_passes(ah, nchains, &coeff);
return;
TX_IQ_CAL_FAILED:
- ath_print(common, ATH_DBG_CALIBRATE, "Tx IQ Cal failed\n");
+ ath_dbg(common, ATH_DBG_CALIBRATE, "Tx IQ Cal failed\n");
+}
+
+static void ar9003_hw_tx_iq_cal_run(struct ath_hw *ah)
+{
+ u8 tx_gain_forced;
+
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1_9485,
+ AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT, DELPT);
+ tx_gain_forced = REG_READ_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TXGAIN_FORCE);
+ if (tx_gain_forced)
+ REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
+ AR_PHY_TXGAIN_FORCE, 0);
+
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START_9485,
+ AR_PHY_TX_IQCAL_START_DO_CAL_9485, 1);
}
+static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
+ AR_PHY_TX_IQCAL_STATUS_B0_9485,
+ AR_PHY_TX_IQCAL_STATUS_B1,
+ AR_PHY_TX_IQCAL_STATUS_B2,
+ };
+ const u_int32_t chan_info_tab[] = {
+ AR_PHY_CHAN_INFO_TAB_0,
+ AR_PHY_CHAN_INFO_TAB_1,
+ AR_PHY_CHAN_INFO_TAB_2,
+ };
+ struct coeff coeff;
+ s32 iq_res[6];
+ u8 num_chains = 0;
+ int i, ip, im, j;
+ int nmeasurement;
+
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (ah->txchainmask & (1 << i))
+ num_chains++;
+ }
+
+ for (ip = 0; ip < MPASS; ip++) {
+ for (i = 0; i < num_chains; i++) {
+ nmeasurement = REG_READ_FIELD(ah,
+ AR_PHY_TX_IQCAL_STATUS_B0_9485,
+ AR_PHY_CALIBRATED_GAINS_0);
+ if (nmeasurement > MAX_MEASUREMENT)
+ nmeasurement = MAX_MEASUREMENT;
+
+ for (im = 0; im < nmeasurement; im++) {
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Doing Tx IQ Cal for chain %d.\n", i);
+
+ if (REG_READ(ah, txiqcal_status[i]) &
+ AR_PHY_TX_IQCAL_STATUS_FAILED) {
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Tx IQ Cal failed for chain %d.\n", i);
+ goto tx_iqcal_fail;
+ }
+
+ for (j = 0; j < 3; j++) {
+ u32 idx = 2 * j, offset = 4 * (3 * im + j);
+
+ REG_RMW_FIELD(ah,
+ AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_TAB_S2_READ,
+ 0);
+
+ /* 32 bits */
+ iq_res[idx] = REG_READ(ah,
+ chan_info_tab[i] +
+ offset);
+
+ REG_RMW_FIELD(ah,
+ AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_TAB_S2_READ,
+ 1);
+
+ /* 16 bits */
+ iq_res[idx + 1] = 0xffff & REG_READ(ah,
+ chan_info_tab[i] + offset);
+
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "IQ RES[%d]=0x%x"
+ "IQ_RES[%d]=0x%x\n",
+ idx, iq_res[idx], idx + 1,
+ iq_res[idx + 1]);
+ }
+
+ if (!ar9003_hw_calc_iq_corr(ah, i, iq_res,
+ coeff.iqc_coeff)) {
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Failed in calculation of IQ correction.\n");
+ goto tx_iqcal_fail;
+ }
+
+ coeff.mag_coeff[i][im][ip] =
+ coeff.iqc_coeff[0] & 0x7f;
+ coeff.phs_coeff[i][im][ip] =
+ (coeff.iqc_coeff[0] >> 7) & 0x7f;
+
+ if (coeff.mag_coeff[i][im][ip] > 63)
+ coeff.mag_coeff[i][im][ip] -= 128;
+ if (coeff.phs_coeff[i][im][ip] > 63)
+ coeff.phs_coeff[i][im][ip] -= 128;
+ }
+ }
+ }
+ ar9003_hw_tx_iqcal_load_avg_2_passes(ah, num_chains, &coeff);
+
+ return;
+
+tx_iqcal_fail:
+ ath_dbg(common, ATH_DBG_CALIBRATE, "Tx IQ Cal failed\n");
+ return;
+}
static bool ar9003_hw_init_cal(struct ath_hw *ah,
struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
+ int val;
- /*
- * 0x7 = 0b111 , AR9003 needs to be configured for 3-chain mode before
- * running AGC/TxIQ cals
- */
- ar9003_hw_set_chain_masks(ah, 0x7, 0x7);
+ val = REG_READ(ah, AR_ENT_OTP);
+ ath_dbg(common, ATH_DBG_CALIBRATE, "ath9k: AR_ENT_OTP 0x%x\n", val);
+
+ if (AR_SREV_9485(ah))
+ ar9003_hw_set_chain_masks(ah, 0x1, 0x1);
+ else if (val & AR_ENT_OTP_CHAIN2_DISABLE)
+ ar9003_hw_set_chain_masks(ah, 0x3, 0x3);
+ else
+ /*
+ * 0x7 = 0b111 , AR9003 needs to be configured for 3-chain
+ * mode before running AGC/TxIQ cals
+ */
+ ar9003_hw_set_chain_masks(ah, 0x7, 0x7);
/* Do Tx IQ Calibration */
- ar9003_hw_tx_iq_cal(ah);
+ if (AR_SREV_9485(ah))
+ ar9003_hw_tx_iq_cal_run(ah);
+ else
+ ar9003_hw_tx_iq_cal(ah);
+
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
udelay(5);
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
@@ -739,12 +974,14 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
/* Poll for offset calibration complete */
if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT)) {
- ath_print(common, ATH_DBG_CALIBRATE,
- "offset calibration failed to "
- "complete in 1ms; noisy environment?\n");
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "offset calibration failed to complete in 1ms; noisy environment?\n");
return false;
}
+ if (AR_SREV_9485(ah))
+ ar9003_hw_tx_iq_cal_post_proc(ah);
+
/* Revert chainmasks to their original values before NF cal */
ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
@@ -757,15 +994,15 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
if (ah->supp_cals & IQ_MISMATCH_CAL) {
INIT_CAL(&ah->iq_caldata);
INSERT_CAL(ah, &ah->iq_caldata);
- ath_print(common, ATH_DBG_CALIBRATE,
- "enabling IQ Calibration.\n");
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "enabling IQ Calibration.\n");
}
if (ah->supp_cals & TEMP_COMP_CAL) {
INIT_CAL(&ah->tempCompCalData);
INSERT_CAL(ah, &ah->tempCompCalData);
- ath_print(common, ATH_DBG_CALIBRATE,
- "enabling Temperature Compensation Calibration.\n");
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "enabling Temperature Compensation Calibration.\n");
}
/* Initialize current pointer to first element in list */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index c4182359bee4..4a9271802991 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -22,12 +22,14 @@
#define COMP_CKSUM_LEN 2
#define AR_CH0_TOP (0x00016288)
-#define AR_CH0_TOP_XPABIASLVL (0x3)
+#define AR_CH0_TOP_XPABIASLVL (0x300)
#define AR_CH0_TOP_XPABIASLVL_S (8)
#define AR_CH0_THERM (0x00016290)
-#define AR_CH0_THERM_SPARE (0x3f)
-#define AR_CH0_THERM_SPARE_S (0)
+#define AR_CH0_THERM_XPABIASLVL_MSB 0x3
+#define AR_CH0_THERM_XPABIASLVL_MSB_S 0
+#define AR_CH0_THERM_XPASHORT2GND 0x4
+#define AR_CH0_THERM_XPASHORT2GND_S 2
#define AR_SWITCH_TABLE_COM_ALL (0xffff)
#define AR_SWITCH_TABLE_COM_ALL_S (0)
@@ -55,6 +57,14 @@
#define SUB_NUM_CTL_MODES_AT_5G_40 2 /* excluding HT40, EXT-OFDM */
#define SUB_NUM_CTL_MODES_AT_2G_40 3 /* excluding HT40, EXT-OFDM, EXT-CCK */
+#define CTL(_tpower, _flag) ((_tpower) | ((_flag) << 6))
+
+#define EEPROM_DATA_LEN_9485 1088
+
+static int ar9003_hw_power_interpolate(int32_t x,
+ int32_t *px, int32_t *py, u_int16_t np);
+
+
static const struct ar9300_eeprom ar9300_default = {
.eepromVersion = 2,
.templateVersion = 2,
@@ -65,7 +75,7 @@ static const struct ar9300_eeprom ar9300_default = {
.regDmn = { LE16(0), LE16(0x1f) },
.txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
.opCapFlags = {
- .opFlags = AR9300_OPFLAGS_11G | AR9300_OPFLAGS_11A,
+ .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
.eepMisc = 0,
},
.rfSilent = 0,
@@ -144,13 +154,16 @@ static const struct ar9300_eeprom ar9300_default = {
.txEndToRxOn = 0x2,
.txFrameToXpaOn = 0xe,
.thresh62 = 28,
- .papdRateMaskHt20 = LE32(0x80c080),
- .papdRateMaskHt40 = LE32(0x80c080),
+ .papdRateMaskHt20 = LE32(0x0cf0e0e0),
+ .papdRateMaskHt40 = LE32(0x6cf0e0e0),
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
},
},
+ .base_ext1 = {
+ .ant_div_control = 0,
+ .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ },
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
FREQ2FBIN(2437, 1),
@@ -285,25 +298,25 @@ static const struct ar9300_eeprom ar9300_default = {
/* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
/* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
/* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
- /* Data[11].ctlEdges[3].bChannel */
- FREQ2FBIN(2462, 1),
+ /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
}
},
.ctlPowerData_2G = {
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
- { { {60, 1}, {60, 0}, {60, 0}, {60, 1} } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
- { { {60, 1}, {60, 0}, {0, 0}, {0, 0} } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
- { { {60, 0}, {60, 1}, {60, 1}, {60, 0} } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
- { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
- { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
},
.modalHeader5G = {
/* 4 idle,t1,t2,b (4 bits per setting) */
@@ -343,13 +356,20 @@ static const struct ar9300_eeprom ar9300_default = {
.txEndToRxOn = 0x2,
.txFrameToXpaOn = 0xe,
.thresh62 = 28,
- .papdRateMaskHt20 = LE32(0xf0e0e0),
- .papdRateMaskHt40 = LE32(0xf0e0e0),
+ .papdRateMaskHt20 = LE32(0x0c80c080),
+ .papdRateMaskHt40 = LE32(0x0080c080),
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
},
},
+ .base_ext2 = {
+ .tempSlopeLow = 0,
+ .tempSlopeHigh = 0,
+ .xatten1DBLow = {0, 0, 0},
+ .xatten1MarginLow = {0, 0, 0},
+ .xatten1DBHigh = {0, 0, 0},
+ .xatten1MarginHigh = {0, 0, 0}
+ },
.calFreqPier5G = {
FREQ2FBIN(5180, 0),
FREQ2FBIN(5220, 0),
@@ -568,64 +588,2396 @@ static const struct ar9300_eeprom ar9300_default = {
.ctlPowerData_5G = {
{
{
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
- {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ }
+ },
+ {
+ {
+ CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ }
+ },
+ }
+};
+
+static const struct ar9300_eeprom ar9300_x113 = {
+ .eepromVersion = 2,
+ .templateVersion = 6,
+ .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0},
+ .custData = {"x113-023-f0000"},
+ .baseEepHeader = {
+ .regDmn = { LE16(0), LE16(0x1f) },
+ .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
+ .opCapFlags = {
+ .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
+ .eepMisc = 0,
+ },
+ .rfSilent = 0,
+ .blueToothOptions = 0,
+ .deviceCap = 0,
+ .deviceType = 5, /* takes lower byte in eeprom location */
+ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
+ .params_for_tuning_caps = {0, 0},
+ .featureEnable = 0x0d,
+ /*
+ * bit0 - enable tx temp comp - disabled
+ * bit1 - enable tx volt comp - disabled
+ * bit2 - enable fastClock - enabled
+ * bit3 - enable doubling - enabled
+ * bit4 - enable internal regulator - disabled
+ * bit5 - enable pa predistortion - disabled
+ */
+ .miscConfiguration = 0, /* bit0 - turn down drivestrength */
+ .eepromWriteEnableGpio = 6,
+ .wlanDisableGpio = 0,
+ .wlanLedGpio = 8,
+ .rxBandSelectGpio = 0xff,
+ .txrxgain = 0x21,
+ .swreg = 0,
+ },
+ .modalHeader2G = {
+ /* ar9300_modal_eep_header 2g */
+ /* 4 idle,t1,t2,b(4 bits per setting) */
+ .antCtrlCommon = LE32(0x110),
+ /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
+ .antCtrlCommon2 = LE32(0x44444),
+
+ /*
+ * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
+ * rx1, rx12, b (2 bits each)
+ */
+ .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) },
+
+ /*
+ * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db
+ * for ar9280 (0xa20c/b20c 5:0)
+ */
+ .xatten1DB = {0, 0, 0},
+
+ /*
+ * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
+ * for ar9280 (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0, 0, 0},
+ .tempSlope = 25,
+ .voltSlope = 0,
+
+ /*
+ * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
+ * channels in usual fbin coding format
+ */
+ .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0},
+
+ /*
+ * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
+ * if the register is per chain
+ */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .ob = {1, 1, 1},/* 3 chain */
+ .db_stage2 = {1, 1, 1}, /* 3 chain */
+ .db_stage3 = {0, 0, 0},
+ .db_stage4 = {0, 0, 0},
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2c,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .papdRateMaskHt20 = LE32(0x0c80c080),
+ .papdRateMaskHt40 = LE32(0x0080c080),
+ .futureModal = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ },
+ .base_ext1 = {
+ .ant_div_control = 0,
+ .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ },
+ .calFreqPier2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1),
+ },
+ /* ar9300_cal_data_per_freq_op_loop 2g */
+ .calPierData2G = {
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ },
+ .calTarget_freqbin_Cck = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2472, 1),
+ },
+ .calTarget_freqbin_2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT20 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT40 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTargetPowerCck = {
+ /* 1L-5L,5S,11L,11S */
+ { {34, 34, 34, 34} },
+ { {34, 34, 34, 34} },
+ },
+ .calTargetPower2G = {
+ /* 6-24,36,48,54 */
+ { {34, 34, 32, 32} },
+ { {34, 34, 32, 32} },
+ { {34, 34, 32, 32} },
+ },
+ .calTargetPower2GHT20 = {
+ { {32, 32, 32, 32, 32, 28, 32, 32, 30, 28, 0, 0, 0, 0} },
+ { {32, 32, 32, 32, 32, 28, 32, 32, 30, 28, 0, 0, 0, 0} },
+ { {32, 32, 32, 32, 32, 28, 32, 32, 30, 28, 0, 0, 0, 0} },
+ },
+ .calTargetPower2GHT40 = {
+ { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
+ { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
+ { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
+ },
+ .ctlIndex_2G = {
+ 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
+ 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
+ },
+ .ctl_freqbin_2G = {
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2457, 1),
+ FREQ2FBIN(2462, 1)
+ },
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+ {
+ FREQ2FBIN(2422, 1),
+ FREQ2FBIN(2427, 1),
+ FREQ2FBIN(2447, 1),
+ FREQ2FBIN(2452, 1)
+ },
+
+ {
+ /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1),
+ },
+
+ {
+ /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
+ /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
+ /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
+ /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
+ },
+
+ {
+ /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ },
+
+ {
+ /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
+ /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
+ /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
+ /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
+ }
+ },
+ .ctlPowerData_2G = {
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
+
+ { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+ },
+ .modalHeader5G = {
+ /* 4 idle,t1,t2,b (4 bits per setting) */
+ .antCtrlCommon = LE32(0x220),
+ /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
+ .antCtrlCommon2 = LE32(0x11111),
+ /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
+ .antCtrlChain = {
+ LE16(0x150), LE16(0x150), LE16(0x150),
+ },
+ /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
+ .xatten1DB = {0, 0, 0},
+
+ /*
+ * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
+ * for merlin (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0, 0, 0},
+ .tempSlope = 68,
+ .voltSlope = 0,
+ /* spurChans spur channels in usual fbin coding format */
+ .spurChans = {FREQ2FBIN(5500, 0), 0, 0, 0, 0},
+ /* noiseFloorThreshCh Check if the register is per chain */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .ob = {3, 3, 3}, /* 3 chain */
+ .db_stage2 = {3, 3, 3}, /* 3 chain */
+ .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
+ .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2d,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .papdRateMaskHt20 = LE32(0x0cf0e0e0),
+ .papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .futureModal = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ },
+ .base_ext2 = {
+ .tempSlopeLow = 72,
+ .tempSlopeHigh = 105,
+ .xatten1DBLow = {0, 0, 0},
+ .xatten1MarginLow = {0, 0, 0},
+ .xatten1DBHigh = {0, 0, 0},
+ .xatten1MarginHigh = {0, 0, 0}
+ },
+ .calFreqPier5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5785, 0)
+ },
+ .calPierData5G = {
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+
+ },
+ .calTarget_freqbin_5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5785, 0)
+ },
+ .calTarget_freqbin_5GHT20 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT40 = {
+ FREQ2FBIN(5190, 0),
+ FREQ2FBIN(5230, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5410, 0),
+ FREQ2FBIN(5510, 0),
+ FREQ2FBIN(5670, 0),
+ FREQ2FBIN(5755, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTargetPower5G = {
+ /* 6-24,36,48,54 */
+ { {42, 40, 40, 34} },
+ { {42, 40, 40, 34} },
+ { {42, 40, 40, 34} },
+ { {42, 40, 40, 34} },
+ { {42, 40, 40, 34} },
+ { {42, 40, 40, 34} },
+ { {42, 40, 40, 34} },
+ { {42, 40, 40, 34} },
+ },
+ .calTargetPower5GHT20 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
+ { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
+ { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
+ { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
+ { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
+ { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
+ { {38, 38, 38, 38, 32, 28, 38, 38, 32, 28, 38, 38, 32, 26} },
+ { {36, 36, 36, 36, 32, 28, 36, 36, 32, 28, 36, 36, 32, 26} },
+ },
+ .calTargetPower5GHT40 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
+ { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
+ { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
+ { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
+ { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
+ { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
+ { {36, 36, 36, 36, 30, 26, 36, 36, 30, 26, 36, 36, 30, 24} },
+ { {34, 34, 34, 34, 30, 26, 34, 34, 30, 26, 34, 34, 30, 24} },
+ },
+ .ctlIndex_5G = {
+ 0x10, 0x16, 0x18, 0x40, 0x46,
+ 0x48, 0x30, 0x36, 0x38
+ },
+ .ctl_freqbin_5G = {
+ {
+ /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
+ /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0),
+ /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+ {
+ /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
+ /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0),
+ /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
+ /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0),
+ /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0),
+ /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0)
+ },
+
+ {
+ /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
+ /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0),
+ /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[3].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[3].ctlEdges[7].bChannel */ 0xFF,
+ },
+
+ {
+ /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[4].ctlEdges[4].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[5].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[7].bChannel */ 0xFF,
+ },
+
+ {
+ /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0),
+ /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0),
+ /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[5].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[5].ctlEdges[7].bChannel */ 0xFF
+ },
+
+ {
+ /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
+ /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0),
+ /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0),
+ /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0)
+ },
+
+ {
+ /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0),
+ /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0),
+ /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
+ /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0),
+ /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0),
+ /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0)
+ }
+ },
+ .ctlPowerData_5G = {
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ }
+ },
+ {
+ {
+ CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ }
+ },
+ }
+};
+
+
+static const struct ar9300_eeprom ar9300_h112 = {
+ .eepromVersion = 2,
+ .templateVersion = 3,
+ .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0},
+ .custData = {"h112-241-f0000"},
+ .baseEepHeader = {
+ .regDmn = { LE16(0), LE16(0x1f) },
+ .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
+ .opCapFlags = {
+ .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
+ .eepMisc = 0,
+ },
+ .rfSilent = 0,
+ .blueToothOptions = 0,
+ .deviceCap = 0,
+ .deviceType = 5, /* takes lower byte in eeprom location */
+ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
+ .params_for_tuning_caps = {0, 0},
+ .featureEnable = 0x0d,
+ /*
+ * bit0 - enable tx temp comp - disabled
+ * bit1 - enable tx volt comp - disabled
+ * bit2 - enable fastClock - enabled
+ * bit3 - enable doubling - enabled
+ * bit4 - enable internal regulator - disabled
+ * bit5 - enable pa predistortion - disabled
+ */
+ .miscConfiguration = 0, /* bit0 - turn down drivestrength */
+ .eepromWriteEnableGpio = 6,
+ .wlanDisableGpio = 0,
+ .wlanLedGpio = 8,
+ .rxBandSelectGpio = 0xff,
+ .txrxgain = 0x10,
+ .swreg = 0,
+ },
+ .modalHeader2G = {
+ /* ar9300_modal_eep_header 2g */
+ /* 4 idle,t1,t2,b(4 bits per setting) */
+ .antCtrlCommon = LE32(0x110),
+ /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
+ .antCtrlCommon2 = LE32(0x44444),
+
+ /*
+ * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
+ * rx1, rx12, b (2 bits each)
+ */
+ .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) },
+
+ /*
+ * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db
+ * for ar9280 (0xa20c/b20c 5:0)
+ */
+ .xatten1DB = {0, 0, 0},
+
+ /*
+ * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
+ * for ar9280 (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0, 0, 0},
+ .tempSlope = 25,
+ .voltSlope = 0,
+
+ /*
+ * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
+ * channels in usual fbin coding format
+ */
+ .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0},
+
+ /*
+ * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
+ * if the register is per chain
+ */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .ob = {1, 1, 1},/* 3 chain */
+ .db_stage2 = {1, 1, 1}, /* 3 chain */
+ .db_stage3 = {0, 0, 0},
+ .db_stage4 = {0, 0, 0},
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2c,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .papdRateMaskHt20 = LE32(0x80c080),
+ .papdRateMaskHt40 = LE32(0x80c080),
+ .futureModal = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ },
+ .base_ext1 = {
+ .ant_div_control = 0,
+ .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ },
+ .calFreqPier2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1),
+ },
+ /* ar9300_cal_data_per_freq_op_loop 2g */
+ .calPierData2G = {
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ },
+ .calTarget_freqbin_Cck = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2484, 1),
+ },
+ .calTarget_freqbin_2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT20 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT40 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTargetPowerCck = {
+ /* 1L-5L,5S,11L,11S */
+ { {34, 34, 34, 34} },
+ { {34, 34, 34, 34} },
+ },
+ .calTargetPower2G = {
+ /* 6-24,36,48,54 */
+ { {34, 34, 32, 32} },
+ { {34, 34, 32, 32} },
+ { {34, 34, 32, 32} },
+ },
+ .calTargetPower2GHT20 = {
+ { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 28, 28, 28, 24} },
+ { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 28, 28, 28, 24} },
+ { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 28, 28, 28, 24} },
+ },
+ .calTargetPower2GHT40 = {
+ { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 26, 26, 26, 22} },
+ { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 26, 26, 26, 22} },
+ { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 26, 26, 26, 22} },
+ },
+ .ctlIndex_2G = {
+ 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
+ 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
+ },
+ .ctl_freqbin_2G = {
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2457, 1),
+ FREQ2FBIN(2462, 1)
+ },
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+ {
+ FREQ2FBIN(2422, 1),
+ FREQ2FBIN(2427, 1),
+ FREQ2FBIN(2447, 1),
+ FREQ2FBIN(2452, 1)
+ },
+
+ {
+ /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1),
+ },
+
+ {
+ /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
+ /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
+ /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
+ /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
+ },
+
+ {
+ /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ },
+
+ {
+ /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
+ /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
+ /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
+ /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
+ }
+ },
+ .ctlPowerData_2G = {
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
+
+ { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+ },
+ .modalHeader5G = {
+ /* 4 idle,t1,t2,b (4 bits per setting) */
+ .antCtrlCommon = LE32(0x220),
+ /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
+ .antCtrlCommon2 = LE32(0x44444),
+ /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
+ .antCtrlChain = {
+ LE16(0x150), LE16(0x150), LE16(0x150),
+ },
+ /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
+ .xatten1DB = {0, 0, 0},
+
+ /*
+ * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
+ * for merlin (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0, 0, 0},
+ .tempSlope = 45,
+ .voltSlope = 0,
+ /* spurChans spur channels in usual fbin coding format */
+ .spurChans = {0, 0, 0, 0, 0},
+ /* noiseFloorThreshCh Check if the register is per chain */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .ob = {3, 3, 3}, /* 3 chain */
+ .db_stage2 = {3, 3, 3}, /* 3 chain */
+ .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
+ .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2d,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .papdRateMaskHt20 = LE32(0x0cf0e0e0),
+ .papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .futureModal = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ },
+ .base_ext2 = {
+ .tempSlopeLow = 40,
+ .tempSlopeHigh = 50,
+ .xatten1DBLow = {0, 0, 0},
+ .xatten1MarginLow = {0, 0, 0},
+ .xatten1DBHigh = {0, 0, 0},
+ .xatten1MarginHigh = {0, 0, 0}
+ },
+ .calFreqPier5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calPierData5G = {
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+
+ },
+ .calTarget_freqbin_5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT20 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT40 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTargetPower5G = {
+ /* 6-24,36,48,54 */
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ },
+ .calTargetPower5GHT20 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 20, 20, 20, 16} },
+ { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 20, 20, 20, 16} },
+ { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 18, 18, 18, 16} },
+ { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 18, 18, 18, 16} },
+ { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 16, 16, 16, 14} },
+ { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 16, 16, 16, 14} },
+ { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 14, 14, 14, 12} },
+ { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 14, 14, 14, 12} },
+ },
+ .calTargetPower5GHT40 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 18, 18, 18, 14} },
+ { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 18, 18, 18, 14} },
+ { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 16, 16, 16, 12} },
+ { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 16, 16, 16, 12} },
+ { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 14, 14, 14, 10} },
+ { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 14, 14, 14, 10} },
+ { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 12, 12, 12, 8} },
+ { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 12, 12, 12, 8} },
+ },
+ .ctlIndex_5G = {
+ 0x10, 0x16, 0x18, 0x40, 0x46,
+ 0x48, 0x30, 0x36, 0x38
+ },
+ .ctl_freqbin_5G = {
+ {
+ /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
+ /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0),
+ /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+ {
+ /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
+ /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0),
+ /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
+ /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0),
+ /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0),
+ /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0)
+ },
+
+ {
+ /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
+ /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0),
+ /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[3].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[3].ctlEdges[7].bChannel */ 0xFF,
+ },
+
+ {
+ /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[4].ctlEdges[4].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[5].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[7].bChannel */ 0xFF,
+ },
+
+ {
+ /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0),
+ /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0),
+ /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[5].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[5].ctlEdges[7].bChannel */ 0xFF
+ },
+
+ {
+ /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
+ /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0),
+ /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0),
+ /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0)
+ },
+
+ {
+ /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0),
+ /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0),
+ /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
+ /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0),
+ /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0),
+ /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0)
+ }
+ },
+ .ctlPowerData_5G = {
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ }
+ },
+ {
+ {
+ CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ }
+ },
+ }
+};
+
+
+static const struct ar9300_eeprom ar9300_x112 = {
+ .eepromVersion = 2,
+ .templateVersion = 5,
+ .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0},
+ .custData = {"x112-041-f0000"},
+ .baseEepHeader = {
+ .regDmn = { LE16(0), LE16(0x1f) },
+ .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
+ .opCapFlags = {
+ .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
+ .eepMisc = 0,
+ },
+ .rfSilent = 0,
+ .blueToothOptions = 0,
+ .deviceCap = 0,
+ .deviceType = 5, /* takes lower byte in eeprom location */
+ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
+ .params_for_tuning_caps = {0, 0},
+ .featureEnable = 0x0d,
+ /*
+ * bit0 - enable tx temp comp - disabled
+ * bit1 - enable tx volt comp - disabled
+ * bit2 - enable fastclock - enabled
+ * bit3 - enable doubling - enabled
+ * bit4 - enable internal regulator - disabled
+ * bit5 - enable pa predistortion - disabled
+ */
+ .miscConfiguration = 0, /* bit0 - turn down drivestrength */
+ .eepromWriteEnableGpio = 6,
+ .wlanDisableGpio = 0,
+ .wlanLedGpio = 8,
+ .rxBandSelectGpio = 0xff,
+ .txrxgain = 0x0,
+ .swreg = 0,
+ },
+ .modalHeader2G = {
+ /* ar9300_modal_eep_header 2g */
+ /* 4 idle,t1,t2,b(4 bits per setting) */
+ .antCtrlCommon = LE32(0x110),
+ /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
+ .antCtrlCommon2 = LE32(0x22222),
+
+ /*
+ * antCtrlChain[ar9300_max_chains]; 6 idle, t, r,
+ * rx1, rx12, b (2 bits each)
+ */
+ .antCtrlChain = { LE16(0x10), LE16(0x10), LE16(0x10) },
+
+ /*
+ * xatten1DB[AR9300_max_chains]; 3 xatten1_db
+ * for ar9280 (0xa20c/b20c 5:0)
+ */
+ .xatten1DB = {0x1b, 0x1b, 0x1b},
+
+ /*
+ * xatten1Margin[ar9300_max_chains]; 3 xatten1_margin
+ * for ar9280 (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0x15, 0x15, 0x15},
+ .tempSlope = 50,
+ .voltSlope = 0,
+
+ /*
+ * spurChans[OSPrey_eeprom_modal_sPURS]; spur
+ * channels in usual fbin coding format
+ */
+ .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0},
+
+ /*
+ * noiseFloorThreshch[ar9300_max_cHAINS]; 3 Check
+ * if the register is per chain
+ */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .ob = {1, 1, 1},/* 3 chain */
+ .db_stage2 = {1, 1, 1}, /* 3 chain */
+ .db_stage3 = {0, 0, 0},
+ .db_stage4 = {0, 0, 0},
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2c,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .papdRateMaskHt20 = LE32(0x0c80c080),
+ .papdRateMaskHt40 = LE32(0x0080c080),
+ .futureModal = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ },
+ .base_ext1 = {
+ .ant_div_control = 0,
+ .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ },
+ .calFreqPier2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1),
+ },
+ /* ar9300_cal_data_per_freq_op_loop 2g */
+ .calPierData2G = {
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ },
+ .calTarget_freqbin_Cck = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2472, 1),
+ },
+ .calTarget_freqbin_2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT20 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT40 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTargetPowerCck = {
+ /* 1L-5L,5S,11L,11s */
+ { {38, 38, 38, 38} },
+ { {38, 38, 38, 38} },
+ },
+ .calTargetPower2G = {
+ /* 6-24,36,48,54 */
+ { {38, 38, 36, 34} },
+ { {38, 38, 36, 34} },
+ { {38, 38, 34, 32} },
+ },
+ .calTargetPower2GHT20 = {
+ { {36, 36, 36, 36, 36, 34, 34, 32, 30, 28, 28, 28, 28, 26} },
+ { {36, 36, 36, 36, 36, 34, 36, 34, 32, 30, 30, 30, 28, 26} },
+ { {36, 36, 36, 36, 36, 34, 34, 32, 30, 28, 28, 28, 28, 26} },
+ },
+ .calTargetPower2GHT40 = {
+ { {36, 36, 36, 36, 34, 32, 32, 30, 28, 26, 26, 26, 26, 24} },
+ { {36, 36, 36, 36, 34, 32, 34, 32, 30, 28, 28, 28, 28, 24} },
+ { {36, 36, 36, 36, 34, 32, 32, 30, 28, 26, 26, 26, 26, 24} },
+ },
+ .ctlIndex_2G = {
+ 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
+ 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
+ },
+ .ctl_freqbin_2G = {
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2457, 1),
+ FREQ2FBIN(2462, 1)
+ },
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+ {
+ FREQ2FBIN(2422, 1),
+ FREQ2FBIN(2427, 1),
+ FREQ2FBIN(2447, 1),
+ FREQ2FBIN(2452, 1)
+ },
+
+ {
+ /* Data[4].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
+ /* Data[4].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
+ /* Data[4].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
+ /* Data[4].ctledges[3].bchannel */ FREQ2FBIN(2484, 1),
+ },
+
+ {
+ /* Data[5].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
+ /* Data[5].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
+ /* Data[5].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[6].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
+ /* Data[6].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[7].ctledges[0].bchannel */ FREQ2FBIN(2422, 1),
+ /* Data[7].ctledges[1].bchannel */ FREQ2FBIN(2427, 1),
+ /* Data[7].ctledges[2].bchannel */ FREQ2FBIN(2447, 1),
+ /* Data[7].ctledges[3].bchannel */ FREQ2FBIN(2462, 1),
+ },
+
+ {
+ /* Data[8].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
+ /* Data[8].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
+ /* Data[8].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
+ },
+
+ {
+ /* Data[9].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
+ /* Data[9].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
+ /* Data[9].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[10].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
+ /* Data[10].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
+ /* Data[10].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[11].ctledges[0].bchannel */ FREQ2FBIN(2422, 1),
+ /* Data[11].ctledges[1].bchannel */ FREQ2FBIN(2427, 1),
+ /* Data[11].ctledges[2].bchannel */ FREQ2FBIN(2447, 1),
+ /* Data[11].ctledges[3].bchannel */ FREQ2FBIN(2462, 1),
+ }
+ },
+ .ctlPowerData_2G = {
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
+
+ { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+ },
+ .modalHeader5G = {
+ /* 4 idle,t1,t2,b (4 bits per setting) */
+ .antCtrlCommon = LE32(0x110),
+ /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
+ .antCtrlCommon2 = LE32(0x22222),
+ /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
+ .antCtrlChain = {
+ LE16(0x0), LE16(0x0), LE16(0x0),
+ },
+ /* xatten1DB 3 xatten1_db for ar9280 (0xa20c/b20c 5:0) */
+ .xatten1DB = {0x13, 0x19, 0x17},
+
+ /*
+ * xatten1Margin[ar9300_max_chains]; 3 xatten1_margin
+ * for merlin (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0x19, 0x19, 0x19},
+ .tempSlope = 70,
+ .voltSlope = 15,
+ /* spurChans spur channels in usual fbin coding format */
+ .spurChans = {0, 0, 0, 0, 0},
+ /* noiseFloorThreshch check if the register is per chain */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .ob = {3, 3, 3}, /* 3 chain */
+ .db_stage2 = {3, 3, 3}, /* 3 chain */
+ .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
+ .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2d,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .papdRateMaskHt20 = LE32(0x0cf0e0e0),
+ .papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .futureModal = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ },
+ .base_ext2 = {
+ .tempSlopeLow = 72,
+ .tempSlopeHigh = 105,
+ .xatten1DBLow = {0x10, 0x14, 0x10},
+ .xatten1MarginLow = {0x19, 0x19 , 0x19},
+ .xatten1DBHigh = {0x1d, 0x20, 0x24},
+ .xatten1MarginHigh = {0x10, 0x10, 0x10}
+ },
+ .calFreqPier5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5785, 0)
+ },
+ .calPierData5G = {
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+
+ },
+ .calTarget_freqbin_5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT20 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT40 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5725, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTargetPower5G = {
+ /* 6-24,36,48,54 */
+ { {32, 32, 28, 26} },
+ { {32, 32, 28, 26} },
+ { {32, 32, 28, 26} },
+ { {32, 32, 26, 24} },
+ { {32, 32, 26, 24} },
+ { {32, 32, 24, 22} },
+ { {30, 30, 24, 22} },
+ { {30, 30, 24, 22} },
+ },
+ .calTargetPower5GHT20 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {32, 32, 32, 32, 28, 26, 32, 28, 26, 24, 24, 24, 22, 22} },
+ { {32, 32, 32, 32, 28, 26, 32, 28, 26, 24, 24, 24, 22, 22} },
+ { {32, 32, 32, 32, 28, 26, 32, 28, 26, 24, 24, 24, 22, 22} },
+ { {32, 32, 32, 32, 28, 26, 32, 26, 24, 22, 22, 22, 20, 20} },
+ { {32, 32, 32, 32, 28, 26, 32, 26, 24, 22, 20, 18, 16, 16} },
+ { {32, 32, 32, 32, 28, 26, 32, 24, 20, 16, 18, 16, 14, 14} },
+ { {30, 30, 30, 30, 28, 26, 30, 24, 20, 16, 18, 16, 14, 14} },
+ { {30, 30, 30, 30, 28, 26, 30, 24, 20, 16, 18, 16, 14, 14} },
+ },
+ .calTargetPower5GHT40 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {32, 32, 32, 30, 28, 26, 30, 28, 26, 24, 24, 24, 22, 22} },
+ { {32, 32, 32, 30, 28, 26, 30, 28, 26, 24, 24, 24, 22, 22} },
+ { {32, 32, 32, 30, 28, 26, 30, 28, 26, 24, 24, 24, 22, 22} },
+ { {32, 32, 32, 30, 28, 26, 30, 26, 24, 22, 22, 22, 20, 20} },
+ { {32, 32, 32, 30, 28, 26, 30, 26, 24, 22, 20, 18, 16, 16} },
+ { {32, 32, 32, 30, 28, 26, 30, 22, 20, 16, 18, 16, 14, 14} },
+ { {30, 30, 30, 30, 28, 26, 30, 22, 20, 16, 18, 16, 14, 14} },
+ { {30, 30, 30, 30, 28, 26, 30, 22, 20, 16, 18, 16, 14, 14} },
+ },
+ .ctlIndex_5G = {
+ 0x10, 0x16, 0x18, 0x40, 0x46,
+ 0x48, 0x30, 0x36, 0x38
+ },
+ .ctl_freqbin_5G = {
+ {
+ /* Data[0].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
+ /* Data[0].ctledges[1].bchannel */ FREQ2FBIN(5260, 0),
+ /* Data[0].ctledges[2].bchannel */ FREQ2FBIN(5280, 0),
+ /* Data[0].ctledges[3].bchannel */ FREQ2FBIN(5500, 0),
+ /* Data[0].ctledges[4].bchannel */ FREQ2FBIN(5600, 0),
+ /* Data[0].ctledges[5].bchannel */ FREQ2FBIN(5700, 0),
+ /* Data[0].ctledges[6].bchannel */ FREQ2FBIN(5745, 0),
+ /* Data[0].ctledges[7].bchannel */ FREQ2FBIN(5825, 0)
+ },
+ {
+ /* Data[1].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
+ /* Data[1].ctledges[1].bchannel */ FREQ2FBIN(5260, 0),
+ /* Data[1].ctledges[2].bchannel */ FREQ2FBIN(5280, 0),
+ /* Data[1].ctledges[3].bchannel */ FREQ2FBIN(5500, 0),
+ /* Data[1].ctledges[4].bchannel */ FREQ2FBIN(5520, 0),
+ /* Data[1].ctledges[5].bchannel */ FREQ2FBIN(5700, 0),
+ /* Data[1].ctledges[6].bchannel */ FREQ2FBIN(5745, 0),
+ /* Data[1].ctledges[7].bchannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[2].ctledges[0].bchannel */ FREQ2FBIN(5190, 0),
+ /* Data[2].ctledges[1].bchannel */ FREQ2FBIN(5230, 0),
+ /* Data[2].ctledges[2].bchannel */ FREQ2FBIN(5270, 0),
+ /* Data[2].ctledges[3].bchannel */ FREQ2FBIN(5310, 0),
+ /* Data[2].ctledges[4].bchannel */ FREQ2FBIN(5510, 0),
+ /* Data[2].ctledges[5].bchannel */ FREQ2FBIN(5550, 0),
+ /* Data[2].ctledges[6].bchannel */ FREQ2FBIN(5670, 0),
+ /* Data[2].ctledges[7].bchannel */ FREQ2FBIN(5755, 0)
+ },
+
+ {
+ /* Data[3].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
+ /* Data[3].ctledges[1].bchannel */ FREQ2FBIN(5200, 0),
+ /* Data[3].ctledges[2].bchannel */ FREQ2FBIN(5260, 0),
+ /* Data[3].ctledges[3].bchannel */ FREQ2FBIN(5320, 0),
+ /* Data[3].ctledges[4].bchannel */ FREQ2FBIN(5500, 0),
+ /* Data[3].ctledges[5].bchannel */ FREQ2FBIN(5700, 0),
+ /* Data[3].ctledges[6].bchannel */ 0xFF,
+ /* Data[3].ctledges[7].bchannel */ 0xFF,
+ },
+
+ {
+ /* Data[4].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
+ /* Data[4].ctledges[1].bchannel */ FREQ2FBIN(5260, 0),
+ /* Data[4].ctledges[2].bchannel */ FREQ2FBIN(5500, 0),
+ /* Data[4].ctledges[3].bchannel */ FREQ2FBIN(5700, 0),
+ /* Data[4].ctledges[4].bchannel */ 0xFF,
+ /* Data[4].ctledges[5].bchannel */ 0xFF,
+ /* Data[4].ctledges[6].bchannel */ 0xFF,
+ /* Data[4].ctledges[7].bchannel */ 0xFF,
+ },
+
+ {
+ /* Data[5].ctledges[0].bchannel */ FREQ2FBIN(5190, 0),
+ /* Data[5].ctledges[1].bchannel */ FREQ2FBIN(5270, 0),
+ /* Data[5].ctledges[2].bchannel */ FREQ2FBIN(5310, 0),
+ /* Data[5].ctledges[3].bchannel */ FREQ2FBIN(5510, 0),
+ /* Data[5].ctledges[4].bchannel */ FREQ2FBIN(5590, 0),
+ /* Data[5].ctledges[5].bchannel */ FREQ2FBIN(5670, 0),
+ /* Data[5].ctledges[6].bchannel */ 0xFF,
+ /* Data[5].ctledges[7].bchannel */ 0xFF
+ },
+
+ {
+ /* Data[6].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
+ /* Data[6].ctledges[1].bchannel */ FREQ2FBIN(5200, 0),
+ /* Data[6].ctledges[2].bchannel */ FREQ2FBIN(5220, 0),
+ /* Data[6].ctledges[3].bchannel */ FREQ2FBIN(5260, 0),
+ /* Data[6].ctledges[4].bchannel */ FREQ2FBIN(5500, 0),
+ /* Data[6].ctledges[5].bchannel */ FREQ2FBIN(5600, 0),
+ /* Data[6].ctledges[6].bchannel */ FREQ2FBIN(5700, 0),
+ /* Data[6].ctledges[7].bchannel */ FREQ2FBIN(5745, 0)
+ },
+
+ {
+ /* Data[7].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
+ /* Data[7].ctledges[1].bchannel */ FREQ2FBIN(5260, 0),
+ /* Data[7].ctledges[2].bchannel */ FREQ2FBIN(5320, 0),
+ /* Data[7].ctledges[3].bchannel */ FREQ2FBIN(5500, 0),
+ /* Data[7].ctledges[4].bchannel */ FREQ2FBIN(5560, 0),
+ /* Data[7].ctledges[5].bchannel */ FREQ2FBIN(5700, 0),
+ /* Data[7].ctledges[6].bchannel */ FREQ2FBIN(5745, 0),
+ /* Data[7].ctledges[7].bchannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[8].ctledges[0].bchannel */ FREQ2FBIN(5190, 0),
+ /* Data[8].ctledges[1].bchannel */ FREQ2FBIN(5230, 0),
+ /* Data[8].ctledges[2].bchannel */ FREQ2FBIN(5270, 0),
+ /* Data[8].ctledges[3].bchannel */ FREQ2FBIN(5510, 0),
+ /* Data[8].ctledges[4].bchannel */ FREQ2FBIN(5550, 0),
+ /* Data[8].ctledges[5].bchannel */ FREQ2FBIN(5670, 0),
+ /* Data[8].ctledges[6].bchannel */ FREQ2FBIN(5755, 0),
+ /* Data[8].ctledges[7].bchannel */ FREQ2FBIN(5795, 0)
+ }
+ },
+ .ctlPowerData_5G = {
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ }
+ },
+ {
+ {
+ CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ }
+ },
+ {
+ {
+ CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ }
+ },
+ }
+};
+
+static const struct ar9300_eeprom ar9300_h116 = {
+ .eepromVersion = 2,
+ .templateVersion = 4,
+ .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0},
+ .custData = {"h116-041-f0000"},
+ .baseEepHeader = {
+ .regDmn = { LE16(0), LE16(0x1f) },
+ .txrxMask = 0x33, /* 4 bits tx and 4 bits rx */
+ .opCapFlags = {
+ .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
+ .eepMisc = 0,
+ },
+ .rfSilent = 0,
+ .blueToothOptions = 0,
+ .deviceCap = 0,
+ .deviceType = 5, /* takes lower byte in eeprom location */
+ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
+ .params_for_tuning_caps = {0, 0},
+ .featureEnable = 0x0d,
+ /*
+ * bit0 - enable tx temp comp - disabled
+ * bit1 - enable tx volt comp - disabled
+ * bit2 - enable fastClock - enabled
+ * bit3 - enable doubling - enabled
+ * bit4 - enable internal regulator - disabled
+ * bit5 - enable pa predistortion - disabled
+ */
+ .miscConfiguration = 0, /* bit0 - turn down drivestrength */
+ .eepromWriteEnableGpio = 6,
+ .wlanDisableGpio = 0,
+ .wlanLedGpio = 8,
+ .rxBandSelectGpio = 0xff,
+ .txrxgain = 0x10,
+ .swreg = 0,
+ },
+ .modalHeader2G = {
+ /* ar9300_modal_eep_header 2g */
+ /* 4 idle,t1,t2,b(4 bits per setting) */
+ .antCtrlCommon = LE32(0x110),
+ /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
+ .antCtrlCommon2 = LE32(0x44444),
+
+ /*
+ * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
+ * rx1, rx12, b (2 bits each)
+ */
+ .antCtrlChain = { LE16(0x10), LE16(0x10), LE16(0x10) },
+
+ /*
+ * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db
+ * for ar9280 (0xa20c/b20c 5:0)
+ */
+ .xatten1DB = {0x1f, 0x1f, 0x1f},
+
+ /*
+ * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
+ * for ar9280 (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0x12, 0x12, 0x12},
+ .tempSlope = 25,
+ .voltSlope = 0,
+
+ /*
+ * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
+ * channels in usual fbin coding format
+ */
+ .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0},
+
+ /*
+ * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
+ * if the register is per chain
+ */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .ob = {1, 1, 1},/* 3 chain */
+ .db_stage2 = {1, 1, 1}, /* 3 chain */
+ .db_stage3 = {0, 0, 0},
+ .db_stage4 = {0, 0, 0},
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2c,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .papdRateMaskHt20 = LE32(0x0c80C080),
+ .papdRateMaskHt40 = LE32(0x0080C080),
+ .futureModal = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ },
+ .base_ext1 = {
+ .ant_div_control = 0,
+ .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ },
+ .calFreqPier2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1),
+ },
+ /* ar9300_cal_data_per_freq_op_loop 2g */
+ .calPierData2G = {
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
+ },
+ .calTarget_freqbin_Cck = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2472, 1),
+ },
+ .calTarget_freqbin_2G = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT20 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTarget_freqbin_2GHT40 = {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2437, 1),
+ FREQ2FBIN(2472, 1)
+ },
+ .calTargetPowerCck = {
+ /* 1L-5L,5S,11L,11S */
+ { {34, 34, 34, 34} },
+ { {34, 34, 34, 34} },
+ },
+ .calTargetPower2G = {
+ /* 6-24,36,48,54 */
+ { {34, 34, 32, 32} },
+ { {34, 34, 32, 32} },
+ { {34, 34, 32, 32} },
+ },
+ .calTargetPower2GHT20 = {
+ { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 0, 0, 0, 0} },
+ { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 0, 0, 0, 0} },
+ { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 0, 0, 0, 0} },
+ },
+ .calTargetPower2GHT40 = {
+ { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
+ { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
+ { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
+ },
+ .ctlIndex_2G = {
+ 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
+ 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
+ },
+ .ctl_freqbin_2G = {
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2457, 1),
+ FREQ2FBIN(2462, 1)
+ },
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+
+ {
+ FREQ2FBIN(2412, 1),
+ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2462, 1),
+ 0xFF,
+ },
+ {
+ FREQ2FBIN(2422, 1),
+ FREQ2FBIN(2427, 1),
+ FREQ2FBIN(2447, 1),
+ FREQ2FBIN(2452, 1)
+ },
+
+ {
+ /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1),
+ },
+
+ {
+ /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ FREQ2FBIN(2472, 1),
+ 0,
+ },
+
+ {
+ /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
+ /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
+ /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
+ /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
+ },
+
+ {
+ /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ },
+
+ {
+ /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
+ /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
+ /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
+ 0
+ },
+
+ {
+ /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
+ /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
+ /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
+ /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
+ }
+ },
+ .ctlPowerData_2G = {
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
+
+ { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+ { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
+ },
+ .modalHeader5G = {
+ /* 4 idle,t1,t2,b (4 bits per setting) */
+ .antCtrlCommon = LE32(0x220),
+ /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
+ .antCtrlCommon2 = LE32(0x44444),
+ /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
+ .antCtrlChain = {
+ LE16(0x150), LE16(0x150), LE16(0x150),
+ },
+ /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
+ .xatten1DB = {0x19, 0x19, 0x19},
+
+ /*
+ * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
+ * for merlin (0xa20c/b20c 16:12
+ */
+ .xatten1Margin = {0x14, 0x14, 0x14},
+ .tempSlope = 70,
+ .voltSlope = 0,
+ /* spurChans spur channels in usual fbin coding format */
+ .spurChans = {0, 0, 0, 0, 0},
+ /* noiseFloorThreshCh Check if the register is per chain */
+ .noiseFloorThreshCh = {-1, 0, 0},
+ .ob = {3, 3, 3}, /* 3 chain */
+ .db_stage2 = {3, 3, 3}, /* 3 chain */
+ .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
+ .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .xpaBiasLvl = 0,
+ .txFrameToDataStart = 0x0e,
+ .txFrameToPaOn = 0x0e,
+ .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
+ .antennaGain = 0,
+ .switchSettling = 0x2d,
+ .adcDesiredSize = -30,
+ .txEndToXpaOff = 0,
+ .txEndToRxOn = 0x2,
+ .txFrameToXpaOn = 0xe,
+ .thresh62 = 28,
+ .papdRateMaskHt20 = LE32(0x0cf0e0e0),
+ .papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .futureModal = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ },
+ },
+ .base_ext2 = {
+ .tempSlopeLow = 35,
+ .tempSlopeHigh = 50,
+ .xatten1DBLow = {0, 0, 0},
+ .xatten1MarginLow = {0, 0, 0},
+ .xatten1DBHigh = {0, 0, 0},
+ .xatten1MarginHigh = {0, 0, 0}
+ },
+ .calFreqPier5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5220, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5785, 0)
+ },
+ .calPierData5G = {
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+ {
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0},
+ },
+
+ },
+ .calTarget_freqbin_5G = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5600, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT20 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTarget_freqbin_5GHT40 = {
+ FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5240, 0),
+ FREQ2FBIN(5320, 0),
+ FREQ2FBIN(5400, 0),
+ FREQ2FBIN(5500, 0),
+ FREQ2FBIN(5700, 0),
+ FREQ2FBIN(5745, 0),
+ FREQ2FBIN(5825, 0)
+ },
+ .calTargetPower5G = {
+ /* 6-24,36,48,54 */
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ { {30, 30, 28, 24} },
+ },
+ .calTargetPower5GHT20 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 0, 0, 0, 0} },
+ { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 0, 0, 0, 0} },
+ { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 0, 0, 0, 0} },
+ { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 0, 0, 0, 0} },
+ { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 0, 0, 0, 0} },
+ { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 0, 0, 0, 0} },
+ { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 0, 0, 0, 0} },
+ { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 0, 0, 0, 0} },
+ },
+ .calTargetPower5GHT40 = {
+ /*
+ * 0_8_16,1-3_9-11_17-19,
+ * 4,5,6,7,12,13,14,15,20,21,22,23
+ */
+ { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 0, 0, 0, 0} },
+ { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 0, 0, 0, 0} },
+ { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 0, 0, 0, 0} },
+ { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 0, 0, 0, 0} },
+ { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 0, 0, 0, 0} },
+ { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 0, 0, 0, 0} },
+ { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 0, 0, 0, 0} },
+ { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 0, 0, 0, 0} },
+ },
+ .ctlIndex_5G = {
+ 0x10, 0x16, 0x18, 0x40, 0x46,
+ 0x48, 0x30, 0x36, 0x38
+ },
+ .ctl_freqbin_5G = {
+ {
+ /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
+ /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0),
+ /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+ {
+ /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
+ /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0),
+ /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
+ /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0),
+ /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0),
+ /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0)
+ },
+
+ {
+ /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
+ /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0),
+ /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[3].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[3].ctlEdges[7].bChannel */ 0xFF,
+ },
+
+ {
+ /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[4].ctlEdges[4].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[5].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[4].ctlEdges[7].bChannel */ 0xFF,
+ },
+
+ {
+ /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0),
+ /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0),
+ /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[5].ctlEdges[6].bChannel */ 0xFF,
+ /* Data[5].ctlEdges[7].bChannel */ 0xFF
+ },
+
+ {
+ /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
+ /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0),
+ /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0),
+ /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0)
+ },
+
+ {
+ /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
+ /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
+ /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0),
+ /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
+ /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0),
+ /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
+ /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
+ /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
+ },
+
+ {
+ /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
+ /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
+ /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
+ /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
+ /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0),
+ /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
+ /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0),
+ /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0)
+ }
+ },
+ .ctlPowerData_5G = {
+ {
+ {
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
}
},
{
{
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
- {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
}
},
{
{
- {60, 0}, {60, 1}, {60, 0}, {60, 1},
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
}
},
{
{
- {60, 0}, {60, 1}, {60, 1}, {60, 0},
- {60, 1}, {60, 0}, {60, 0}, {60, 0},
+ CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
}
},
{
{
- {60, 1}, {60, 1}, {60, 1}, {60, 0},
- {60, 0}, {60, 0}, {60, 0}, {60, 0},
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
+ CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
}
},
{
{
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
- {60, 1}, {60, 0}, {60, 0}, {60, 0},
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
}
},
{
{
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
- {60, 1}, {60, 1}, {60, 1}, {60, 1},
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
}
},
{
{
- {60, 1}, {60, 1}, {60, 0}, {60, 1},
- {60, 1}, {60, 1}, {60, 1}, {60, 0},
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
}
},
{
{
- {60, 1}, {60, 0}, {60, 1}, {60, 1},
- {60, 1}, {60, 1}, {60, 0}, {60, 1},
+ CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
+ CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
}
},
}
};
+
+static const struct ar9300_eeprom *ar9300_eep_templates[] = {
+ &ar9300_default,
+ &ar9300_x112,
+ &ar9300_h116,
+ &ar9300_h112,
+ &ar9300_x113,
+};
+
+static const struct ar9300_eeprom *ar9003_eeprom_struct_find_by_id(int id)
+{
+#define N_LOOP (sizeof(ar9300_eep_templates) / sizeof(ar9300_eep_templates[0]))
+ int it;
+
+ for (it = 0; it < N_LOOP; it++)
+ if (ar9300_eep_templates[it]->templateVersion == id)
+ return ar9300_eep_templates[it];
+ return NULL;
+#undef N_LOOP
+}
+
+
static u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz)
{
- if (fbin == AR9300_BCHAN_UNUSED)
+ if (fbin == AR5416_BCHAN_UNUSED)
return fbin;
return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
@@ -636,6 +2988,16 @@ static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah)
return 0;
}
+static int interpolate(int x, int xa, int xb, int ya, int yb)
+{
+ int bf, factor, plus;
+
+ bf = 2 * (yb - ya) * (x - xa) / (xb - xa);
+ factor = bf / 2;
+ plus = bf % 2;
+ return ya + factor + plus;
+}
+
static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
enum eeprom_param param)
{
@@ -673,6 +3035,10 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
return le32_to_cpu(pBase->swreg);
case EEP_PAPRD:
return !!(pBase->featureEnable & BIT(5));
+ case EEP_CHAIN_MASK_REDUCE:
+ return (pBase->miscConfiguration >> 0x3) & 0x1;
+ case EEP_ANT_DIV_CTL1:
+ return le32_to_cpu(eep->base_ext1.ant_div_control);
default:
return 0;
}
@@ -711,8 +3077,8 @@ static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer,
int i;
if ((address < 0) || ((address + count) / 2 > AR9300_EEPROM_SIZE - 1)) {
- ath_print(common, ATH_DBG_EEPROM,
- "eeprom address not in range\n");
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "eeprom address not in range\n");
return false;
}
@@ -743,11 +3109,41 @@ static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer,
return true;
error:
- ath_print(common, ATH_DBG_EEPROM,
- "unable to read eeprom region at offset %d\n", address);
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "unable to read eeprom region at offset %d\n", address);
return false;
}
+static bool ar9300_otp_read_word(struct ath_hw *ah, int addr, u32 *data)
+{
+ REG_READ(ah, AR9300_OTP_BASE + (4 * addr));
+
+ if (!ath9k_hw_wait(ah, AR9300_OTP_STATUS, AR9300_OTP_STATUS_TYPE,
+ AR9300_OTP_STATUS_VALID, 1000))
+ return false;
+
+ *data = REG_READ(ah, AR9300_OTP_READ_DATA);
+ return true;
+}
+
+static bool ar9300_read_otp(struct ath_hw *ah, int address, u8 *buffer,
+ int count)
+{
+ u32 data;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ int offset = 8 * ((address - i) % 4);
+ if (!ar9300_otp_read_word(ah, (address - i) / 4, &data))
+ return false;
+
+ buffer[i] = (data >> offset) & 0xff;
+ }
+
+ return true;
+}
+
+
static void ar9300_comp_hdr_unpack(u8 *best, int *code, int *reference,
int *length, int *major, int *minor)
{
@@ -798,17 +3194,15 @@ static bool ar9300_uncompress_block(struct ath_hw *ah,
length &= 0xff;
if (length > 0 && spot >= 0 && spot+length <= mdataSize) {
- ath_print(common, ATH_DBG_EEPROM,
- "Restore at %d: spot=%d "
- "offset=%d length=%d\n",
- it, spot, offset, length);
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Restore at %d: spot=%d offset=%d length=%d\n",
+ it, spot, offset, length);
memcpy(&mptr[spot], &block[it+2], length);
spot += length;
} else if (length > 0) {
- ath_print(common, ATH_DBG_EEPROM,
- "Bad restore at %d: spot=%d "
- "offset=%d length=%d\n",
- it, spot, offset, length);
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Bad restore at %d: spot=%d offset=%d length=%d\n",
+ it, spot, offset, length);
return false;
}
}
@@ -824,45 +3218,80 @@ static int ar9300_compress_decision(struct ath_hw *ah,
{
struct ath_common *common = ath9k_hw_common(ah);
u8 *dptr;
+ const struct ar9300_eeprom *eep = NULL;
switch (code) {
case _CompressNone:
if (length != mdata_size) {
- ath_print(common, ATH_DBG_EEPROM,
- "EEPROM structure size mismatch"
- "memory=%d eeprom=%d\n", mdata_size, length);
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "EEPROM structure size mismatch memory=%d eeprom=%d\n",
+ mdata_size, length);
return -1;
}
memcpy(mptr, (u8 *) (word + COMP_HDR_LEN), length);
- ath_print(common, ATH_DBG_EEPROM, "restored eeprom %d:"
- " uncompressed, length %d\n", it, length);
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "restored eeprom %d: uncompressed, length %d\n",
+ it, length);
break;
case _CompressBlock:
if (reference == 0) {
dptr = mptr;
} else {
- if (reference != 2) {
- ath_print(common, ATH_DBG_EEPROM,
- "cant find reference eeprom"
- "struct %d\n", reference);
+ eep = ar9003_eeprom_struct_find_by_id(reference);
+ if (eep == NULL) {
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "cant find reference eeprom struct %d\n",
+ reference);
return -1;
}
- memcpy(mptr, &ar9300_default, mdata_size);
+ memcpy(mptr, eep, mdata_size);
}
- ath_print(common, ATH_DBG_EEPROM,
- "restore eeprom %d: block, reference %d,"
- " length %d\n", it, reference, length);
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "restore eeprom %d: block, reference %d, length %d\n",
+ it, reference, length);
ar9300_uncompress_block(ah, mptr, mdata_size,
(u8 *) (word + COMP_HDR_LEN), length);
break;
default:
- ath_print(common, ATH_DBG_EEPROM, "unknown compression"
- " code %d\n", code);
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "unknown compression code %d\n", code);
return -1;
}
return 0;
}
+typedef bool (*eeprom_read_op)(struct ath_hw *ah, int address, u8 *buffer,
+ int count);
+
+static bool ar9300_check_header(void *data)
+{
+ u32 *word = data;
+ return !(*word == 0 || *word == ~0);
+}
+
+static bool ar9300_check_eeprom_header(struct ath_hw *ah, eeprom_read_op read,
+ int base_addr)
+{
+ u8 header[4];
+
+ if (!read(ah, base_addr, header, 4))
+ return false;
+
+ return ar9300_check_header(header);
+}
+
+static int ar9300_eeprom_restore_flash(struct ath_hw *ah, u8 *mptr,
+ int mdata_size)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u16 *data = (u16 *) mptr;
+ int i;
+
+ for (i = 0; i < mdata_size / 2; i++, data++)
+ ath9k_hw_nvram_read(common, i, data);
+
+ return 0;
+}
/*
* Read the configuration data from the eeprom.
* The data can be put in any specified memory buffer.
@@ -883,6 +3312,10 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
int it;
u16 checksum, mchecksum;
struct ath_common *common = ath9k_hw_common(ah);
+ eeprom_read_op read;
+
+ if (ath9k_hw_use_flash(ah))
+ return ar9300_eeprom_restore_flash(ah, mptr, mdata_size);
word = kzalloc(2048, GFP_KERNEL);
if (!word)
@@ -890,43 +3323,73 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
memcpy(mptr, &ar9300_default, mdata_size);
+ read = ar9300_read_eeprom;
+ if (AR_SREV_9485(ah))
+ cptr = AR9300_BASE_ADDR_4K;
+ else
+ cptr = AR9300_BASE_ADDR;
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Trying EEPROM accesss at Address 0x%04x\n", cptr);
+ if (ar9300_check_eeprom_header(ah, read, cptr))
+ goto found;
+
+ cptr = AR9300_BASE_ADDR_512;
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Trying EEPROM accesss at Address 0x%04x\n", cptr);
+ if (ar9300_check_eeprom_header(ah, read, cptr))
+ goto found;
+
+ read = ar9300_read_otp;
cptr = AR9300_BASE_ADDR;
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Trying OTP accesss at Address 0x%04x\n", cptr);
+ if (ar9300_check_eeprom_header(ah, read, cptr))
+ goto found;
+
+ cptr = AR9300_BASE_ADDR_512;
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Trying OTP accesss at Address 0x%04x\n", cptr);
+ if (ar9300_check_eeprom_header(ah, read, cptr))
+ goto found;
+
+ goto fail;
+
+found:
+ ath_dbg(common, ATH_DBG_EEPROM, "Found valid EEPROM data\n");
+
for (it = 0; it < MSTATE; it++) {
- if (!ar9300_read_eeprom(ah, cptr, word, COMP_HDR_LEN))
+ if (!read(ah, cptr, word, COMP_HDR_LEN))
goto fail;
- if ((word[0] == 0 && word[1] == 0 && word[2] == 0 &&
- word[3] == 0) || (word[0] == 0xff && word[1] == 0xff
- && word[2] == 0xff && word[3] == 0xff))
+ if (!ar9300_check_header(word))
break;
ar9300_comp_hdr_unpack(word, &code, &reference,
&length, &major, &minor);
- ath_print(common, ATH_DBG_EEPROM,
- "Found block at %x: code=%d ref=%d"
- "length=%d major=%d minor=%d\n", cptr, code,
- reference, length, major, minor);
- if (length >= 1024) {
- ath_print(common, ATH_DBG_EEPROM,
- "Skipping bad header\n");
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Found block at %x: code=%d ref=%d length=%d major=%d minor=%d\n",
+ cptr, code, reference, length, major, minor);
+ if ((!AR_SREV_9485(ah) && length >= 1024) ||
+ (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485)) {
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Skipping bad header\n");
cptr -= COMP_HDR_LEN;
continue;
}
osize = length;
- ar9300_read_eeprom(ah, cptr, word,
- COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
+ read(ah, cptr, word, COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length);
mchecksum = word[COMP_HDR_LEN + osize] |
(word[COMP_HDR_LEN + osize + 1] << 8);
- ath_print(common, ATH_DBG_EEPROM,
- "checksum %x %x\n", checksum, mchecksum);
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "checksum %x %x\n", checksum, mchecksum);
if (checksum == mchecksum) {
ar9300_compress_decision(ah, it, code, reference, mptr,
word, length, mdata_size);
} else {
- ath_print(common, ATH_DBG_EEPROM,
- "skipping block with bad checksum\n");
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "skipping block with bad checksum\n");
}
cptr -= (COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
}
@@ -967,18 +3430,6 @@ static int ath9k_hw_ar9300_get_eeprom_rev(struct ath_hw *ah)
return 0;
}
-static u8 ath9k_hw_ar9300_get_num_ant_config(struct ath_hw *ah,
- enum ath9k_hal_freq_band freq_band)
-{
- return 1;
-}
-
-static u32 ath9k_hw_ar9300_get_eeprom_antenna_cfg(struct ath_hw *ah,
- struct ath9k_channel *chan)
-{
- return -EINVAL;
-}
-
static s32 ar9003_hw_xpa_bias_level_get(struct ath_hw *ah, bool is2ghz)
{
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
@@ -992,9 +3443,15 @@ static s32 ar9003_hw_xpa_bias_level_get(struct ath_hw *ah, bool is2ghz)
static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
{
int bias = ar9003_hw_xpa_bias_level_get(ah, is2ghz);
- REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, (bias & 0x3));
- REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_SPARE,
- ((bias >> 2) & 0x3));
+
+ if (AR_SREV_9485(ah))
+ REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias);
+ else {
+ REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
+ REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_XPABIASLVL_MSB,
+ bias >> 2);
+ REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_XPASHORT2GND, 1);
+ }
}
static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
@@ -1049,11 +3506,25 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
value = ar9003_hw_ant_ctrl_chain_get(ah, 0, is2ghz);
REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_0, AR_SWITCH_TABLE_ALL, value);
- value = ar9003_hw_ant_ctrl_chain_get(ah, 1, is2ghz);
- REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_1, AR_SWITCH_TABLE_ALL, value);
+ if (!AR_SREV_9485(ah)) {
+ value = ar9003_hw_ant_ctrl_chain_get(ah, 1, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_1, AR_SWITCH_TABLE_ALL,
+ value);
+
+ value = ar9003_hw_ant_ctrl_chain_get(ah, 2, is2ghz);
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_2, AR_SWITCH_TABLE_ALL,
+ value);
+ }
- value = ar9003_hw_ant_ctrl_chain_get(ah, 2, is2ghz);
- REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_2, AR_SWITCH_TABLE_ALL, value);
+ if (AR_SREV_9485(ah)) {
+ value = ath9k_hw_ar9300_get_eeprom(ah, EEP_ANT_DIV_CTL1);
+ REG_RMW_FIELD(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_CTRL_ALL,
+ value);
+ REG_RMW_FIELD(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE,
+ value >> 6);
+ REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE,
+ value >> 7);
+ }
}
static void ar9003_hw_drive_strength_apply(struct ath_hw *ah)
@@ -1097,28 +3568,177 @@ static void ar9003_hw_drive_strength_apply(struct ath_hw *ah)
REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS4, reg);
}
+static u16 ar9003_hw_atten_chain_get(struct ath_hw *ah, int chain,
+ struct ath9k_channel *chan)
+{
+ int f[3], t[3];
+ u16 value;
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ if (chain >= 0 && chain < 3) {
+ if (IS_CHAN_2GHZ(chan))
+ return eep->modalHeader2G.xatten1DB[chain];
+ else if (eep->base_ext2.xatten1DBLow[chain] != 0) {
+ t[0] = eep->base_ext2.xatten1DBLow[chain];
+ f[0] = 5180;
+ t[1] = eep->modalHeader5G.xatten1DB[chain];
+ f[1] = 5500;
+ t[2] = eep->base_ext2.xatten1DBHigh[chain];
+ f[2] = 5785;
+ value = ar9003_hw_power_interpolate((s32) chan->channel,
+ f, t, 3);
+ return value;
+ } else
+ return eep->modalHeader5G.xatten1DB[chain];
+ }
+
+ return 0;
+}
+
+
+static u16 ar9003_hw_atten_chain_get_margin(struct ath_hw *ah, int chain,
+ struct ath9k_channel *chan)
+{
+ int f[3], t[3];
+ u16 value;
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ if (chain >= 0 && chain < 3) {
+ if (IS_CHAN_2GHZ(chan))
+ return eep->modalHeader2G.xatten1Margin[chain];
+ else if (eep->base_ext2.xatten1MarginLow[chain] != 0) {
+ t[0] = eep->base_ext2.xatten1MarginLow[chain];
+ f[0] = 5180;
+ t[1] = eep->modalHeader5G.xatten1Margin[chain];
+ f[1] = 5500;
+ t[2] = eep->base_ext2.xatten1MarginHigh[chain];
+ f[2] = 5785;
+ value = ar9003_hw_power_interpolate((s32) chan->channel,
+ f, t, 3);
+ return value;
+ } else
+ return eep->modalHeader5G.xatten1Margin[chain];
+ }
+
+ return 0;
+}
+
+static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ int i;
+ u16 value;
+ unsigned long ext_atten_reg[3] = {AR_PHY_EXT_ATTEN_CTL_0,
+ AR_PHY_EXT_ATTEN_CTL_1,
+ AR_PHY_EXT_ATTEN_CTL_2,
+ };
+
+ /* Test value. if 0 then attenuation is unused. Don't load anything. */
+ for (i = 0; i < 3; i++) {
+ value = ar9003_hw_atten_chain_get(ah, i, chan);
+ REG_RMW_FIELD(ah, ext_atten_reg[i],
+ AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB, value);
+
+ value = ar9003_hw_atten_chain_get_margin(ah, i, chan);
+ REG_RMW_FIELD(ah, ext_atten_reg[i],
+ AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN, value);
+ }
+}
+
+static bool is_pmu_set(struct ath_hw *ah, u32 pmu_reg, int pmu_set)
+{
+ int timeout = 100;
+
+ while (pmu_set != REG_READ(ah, pmu_reg)) {
+ if (timeout-- == 0)
+ return false;
+ REG_WRITE(ah, pmu_reg, pmu_set);
+ udelay(10);
+ }
+
+ return true;
+}
+
static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
{
int internal_regulator =
ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR);
if (internal_regulator) {
- /* Internal regulator is ON. Write swreg register. */
- int swreg = ath9k_hw_ar9300_get_eeprom(ah, EEP_SWREG);
- REG_WRITE(ah, AR_RTC_REG_CONTROL1,
- REG_READ(ah, AR_RTC_REG_CONTROL1) &
- (~AR_RTC_REG_CONTROL1_SWREG_PROGRAM));
- REG_WRITE(ah, AR_RTC_REG_CONTROL0, swreg);
- /* Set REG_CONTROL1.SWREG_PROGRAM */
- REG_WRITE(ah, AR_RTC_REG_CONTROL1,
- REG_READ(ah,
- AR_RTC_REG_CONTROL1) |
- AR_RTC_REG_CONTROL1_SWREG_PROGRAM);
+ if (AR_SREV_9485(ah)) {
+ int reg_pmu_set;
+
+ reg_pmu_set = REG_READ(ah, AR_PHY_PMU2) & ~AR_PHY_PMU2_PGM;
+ REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
+ if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
+ return;
+
+ reg_pmu_set = (5 << 1) | (7 << 4) | (1 << 8) |
+ (2 << 14) | (6 << 17) | (1 << 20) |
+ (3 << 24) | (1 << 28);
+
+ REG_WRITE(ah, AR_PHY_PMU1, reg_pmu_set);
+ if (!is_pmu_set(ah, AR_PHY_PMU1, reg_pmu_set))
+ return;
+
+ reg_pmu_set = (REG_READ(ah, AR_PHY_PMU2) & ~0xFFC00000)
+ | (4 << 26);
+ REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
+ if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
+ return;
+
+ reg_pmu_set = (REG_READ(ah, AR_PHY_PMU2) & ~0x00200000)
+ | (1 << 21);
+ REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
+ if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
+ return;
+ } else {
+ /* Internal regulator is ON. Write swreg register. */
+ int swreg = ath9k_hw_ar9300_get_eeprom(ah, EEP_SWREG);
+ REG_WRITE(ah, AR_RTC_REG_CONTROL1,
+ REG_READ(ah, AR_RTC_REG_CONTROL1) &
+ (~AR_RTC_REG_CONTROL1_SWREG_PROGRAM));
+ REG_WRITE(ah, AR_RTC_REG_CONTROL0, swreg);
+ /* Set REG_CONTROL1.SWREG_PROGRAM */
+ REG_WRITE(ah, AR_RTC_REG_CONTROL1,
+ REG_READ(ah,
+ AR_RTC_REG_CONTROL1) |
+ AR_RTC_REG_CONTROL1_SWREG_PROGRAM);
+ }
} else {
- REG_WRITE(ah, AR_RTC_SLEEP_CLK,
- (REG_READ(ah,
- AR_RTC_SLEEP_CLK) |
- AR_RTC_FORCE_SWREG_PRD));
+ if (AR_SREV_9485(ah)) {
+ REG_RMW_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM, 0);
+ while (REG_READ_FIELD(ah, AR_PHY_PMU2,
+ AR_PHY_PMU2_PGM))
+ udelay(10);
+
+ REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1);
+ while (!REG_READ_FIELD(ah, AR_PHY_PMU1,
+ AR_PHY_PMU1_PWD))
+ udelay(10);
+ REG_RMW_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM, 0x1);
+ while (!REG_READ_FIELD(ah, AR_PHY_PMU2,
+ AR_PHY_PMU2_PGM))
+ udelay(10);
+ } else
+ REG_WRITE(ah, AR_RTC_SLEEP_CLK,
+ (REG_READ(ah,
+ AR_RTC_SLEEP_CLK) |
+ AR_RTC_FORCE_SWREG_PRD));
+ }
+
+}
+
+static void ar9003_hw_apply_tuning_caps(struct ath_hw *ah)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ u8 tuning_caps_param = eep->baseEepHeader.params_for_tuning_caps[0];
+
+ if (eep->baseEepHeader.featureEnable & 0x40) {
+ tuning_caps_param &= 0x7f;
+ REG_RMW_FIELD(ah, AR_CH0_XTAL, AR_CH0_XTAL_CAPINDAC,
+ tuning_caps_param);
+ REG_RMW_FIELD(ah, AR_CH0_XTAL, AR_CH0_XTAL_CAPOUTDAC,
+ tuning_caps_param);
}
}
@@ -1128,7 +3748,10 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
ar9003_hw_xpa_bias_level_apply(ah, IS_CHAN_2GHZ(chan));
ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan));
ar9003_hw_drive_strength_apply(ah);
+ ar9003_hw_atten_apply(ah, chan);
ar9003_hw_internal_regulator_apply(ah);
+ if (AR_SREV_9485(ah))
+ ar9003_hw_apply_tuning_caps(ah);
}
static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah,
@@ -1189,7 +3812,7 @@ static int ar9003_hw_power_interpolate(int32_t x,
if (hx == lx)
y = ly;
else /* interpolate */
- y = ly + (((x - lx) * (hy - ly)) / (hx - lx));
+ y = interpolate(x, lx, hx, ly, hy);
} else /* only low is good, use it */
y = ly;
} else if (hhave) /* only high is good, use it */
@@ -1336,19 +3959,19 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
{
#define POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
/* make sure forced gain is not set */
- REG_WRITE(ah, 0xa458, 0);
+ REG_WRITE(ah, AR_PHY_TX_FORCED_GAIN, 0);
/* Write the OFDM power per rate set */
/* 6 (LSB), 9, 12, 18 (MSB) */
- REG_WRITE(ah, 0xa3c0,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(0),
POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 16) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0));
/* 24 (LSB), 36, 48, 54 (MSB) */
- REG_WRITE(ah, 0xa3c4,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(1),
POW_SM(pPwrArray[ALL_TARGET_LEGACY_54], 24) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_48], 16) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_36], 8) |
@@ -1357,14 +3980,14 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
/* Write the CCK power per rate set */
/* 1L (LSB), reserved, 2L, 2S (MSB) */
- REG_WRITE(ah, 0xa3c8,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(2),
POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 24) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) |
/* POW_SM(txPowerTimes2, 8) | this is reserved for AR9003 */
POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0));
/* 5.5L (LSB), 5.5S, 11L, 11S (MSB) */
- REG_WRITE(ah, 0xa3cc,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(3),
POW_SM(pPwrArray[ALL_TARGET_LEGACY_11S], 24) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_11L], 16) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_5S], 8) |
@@ -1374,7 +3997,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
/* Write the HT20 power per rate set */
/* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */
- REG_WRITE(ah, 0xa3d0,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(4),
POW_SM(pPwrArray[ALL_TARGET_HT20_5], 24) |
POW_SM(pPwrArray[ALL_TARGET_HT20_4], 16) |
POW_SM(pPwrArray[ALL_TARGET_HT20_1_3_9_11_17_19], 8) |
@@ -1382,7 +4005,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
);
/* 6 (LSB), 7, 12, 13 (MSB) */
- REG_WRITE(ah, 0xa3d4,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(5),
POW_SM(pPwrArray[ALL_TARGET_HT20_13], 24) |
POW_SM(pPwrArray[ALL_TARGET_HT20_12], 16) |
POW_SM(pPwrArray[ALL_TARGET_HT20_7], 8) |
@@ -1390,7 +4013,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
);
/* 14 (LSB), 15, 20, 21 */
- REG_WRITE(ah, 0xa3e4,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(9),
POW_SM(pPwrArray[ALL_TARGET_HT20_21], 24) |
POW_SM(pPwrArray[ALL_TARGET_HT20_20], 16) |
POW_SM(pPwrArray[ALL_TARGET_HT20_15], 8) |
@@ -1400,7 +4023,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
/* Mixed HT20 and HT40 rates */
/* HT20 22 (LSB), HT20 23, HT40 22, HT40 23 (MSB) */
- REG_WRITE(ah, 0xa3e8,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(10),
POW_SM(pPwrArray[ALL_TARGET_HT40_23], 24) |
POW_SM(pPwrArray[ALL_TARGET_HT40_22], 16) |
POW_SM(pPwrArray[ALL_TARGET_HT20_23], 8) |
@@ -1412,7 +4035,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
* correct PAR difference between HT40 and HT20/LEGACY
* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB)
*/
- REG_WRITE(ah, 0xa3d8,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(6),
POW_SM(pPwrArray[ALL_TARGET_HT40_5], 24) |
POW_SM(pPwrArray[ALL_TARGET_HT40_4], 16) |
POW_SM(pPwrArray[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
@@ -1420,7 +4043,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
);
/* 6 (LSB), 7, 12, 13 (MSB) */
- REG_WRITE(ah, 0xa3dc,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(7),
POW_SM(pPwrArray[ALL_TARGET_HT40_13], 24) |
POW_SM(pPwrArray[ALL_TARGET_HT40_12], 16) |
POW_SM(pPwrArray[ALL_TARGET_HT40_7], 8) |
@@ -1428,7 +4051,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
);
/* 14 (LSB), 15, 20, 21 */
- REG_WRITE(ah, 0xa3ec,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(11),
POW_SM(pPwrArray[ALL_TARGET_HT40_21], 24) |
POW_SM(pPwrArray[ALL_TARGET_HT40_20], 16) |
POW_SM(pPwrArray[ALL_TARGET_HT40_15], 8) |
@@ -1558,22 +4181,9 @@ static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq,
ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_23, freq,
is2GHz) + ht40PowerIncForPdadc;
- while (i < ar9300RateSize) {
- ath_print(common, ATH_DBG_EEPROM,
- "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
- i++;
-
- ath_print(common, ATH_DBG_EEPROM,
- "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
- i++;
-
- ath_print(common, ATH_DBG_EEPROM,
- "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
- i++;
-
- ath_print(common, ATH_DBG_EEPROM,
- "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
- i++;
+ for (i = 0; i < ar9300RateSize; i++) {
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
}
}
@@ -1592,18 +4202,17 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
struct ath_common *common = ath9k_hw_common(ah);
if (ichain >= AR9300_MAX_CHAINS) {
- ath_print(common, ATH_DBG_EEPROM,
- "Invalid chain index, must be less than %d\n",
- AR9300_MAX_CHAINS);
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Invalid chain index, must be less than %d\n",
+ AR9300_MAX_CHAINS);
return -1;
}
if (mode) { /* 5GHz */
if (ipier >= AR9300_NUM_5G_CAL_PIERS) {
- ath_print(common, ATH_DBG_EEPROM,
- "Invalid 5GHz cal pier index, must "
- "be less than %d\n",
- AR9300_NUM_5G_CAL_PIERS);
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Invalid 5GHz cal pier index, must be less than %d\n",
+ AR9300_NUM_5G_CAL_PIERS);
return -1;
}
pCalPier = &(eep->calFreqPier5G[ipier]);
@@ -1611,9 +4220,9 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
is2GHz = 0;
} else {
if (ipier >= AR9300_NUM_2G_CAL_PIERS) {
- ath_print(common, ATH_DBG_EEPROM,
- "Invalid 2GHz cal pier index, must "
- "be less than %d\n", AR9300_NUM_2G_CAL_PIERS);
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Invalid 2GHz cal pier index, must be less than %d\n",
+ AR9300_NUM_2G_CAL_PIERS);
return -1;
}
@@ -1637,27 +4246,32 @@ static int ar9003_hw_power_control_override(struct ath_hw *ah,
{
int tempSlope = 0;
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ int f[3], t[3];
REG_RMW(ah, AR_PHY_TPC_11_B0,
(correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
AR_PHY_TPC_OLPC_GAIN_DELTA);
- REG_RMW(ah, AR_PHY_TPC_11_B1,
- (correction[1] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
- AR_PHY_TPC_OLPC_GAIN_DELTA);
- REG_RMW(ah, AR_PHY_TPC_11_B2,
- (correction[2] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
- AR_PHY_TPC_OLPC_GAIN_DELTA);
+ if (ah->caps.tx_chainmask & BIT(1))
+ REG_RMW(ah, AR_PHY_TPC_11_B1,
+ (correction[1] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
+ AR_PHY_TPC_OLPC_GAIN_DELTA);
+ if (ah->caps.tx_chainmask & BIT(2))
+ REG_RMW(ah, AR_PHY_TPC_11_B2,
+ (correction[2] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
+ AR_PHY_TPC_OLPC_GAIN_DELTA);
/* enable open loop power control on chip */
REG_RMW(ah, AR_PHY_TPC_6_B0,
(3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
AR_PHY_TPC_6_ERROR_EST_MODE);
- REG_RMW(ah, AR_PHY_TPC_6_B1,
- (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
- AR_PHY_TPC_6_ERROR_EST_MODE);
- REG_RMW(ah, AR_PHY_TPC_6_B2,
- (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
- AR_PHY_TPC_6_ERROR_EST_MODE);
+ if (ah->caps.tx_chainmask & BIT(1))
+ REG_RMW(ah, AR_PHY_TPC_6_B1,
+ (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
+ AR_PHY_TPC_6_ERROR_EST_MODE);
+ if (ah->caps.tx_chainmask & BIT(2))
+ REG_RMW(ah, AR_PHY_TPC_6_B2,
+ (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
+ AR_PHY_TPC_6_ERROR_EST_MODE);
/*
* enable temperature compensation
@@ -1665,7 +4279,16 @@ static int ar9003_hw_power_control_override(struct ath_hw *ah,
*/
if (frequency < 4000)
tempSlope = eep->modalHeader2G.tempSlope;
- else
+ else if (eep->base_ext2.tempSlopeLow != 0) {
+ t[0] = eep->base_ext2.tempSlopeLow;
+ f[0] = 5180;
+ t[1] = eep->modalHeader5G.tempSlope;
+ f[1] = 5500;
+ t[2] = eep->base_ext2.tempSlopeHigh;
+ f[2] = 5785;
+ tempSlope = ar9003_hw_power_interpolate((s32) frequency,
+ f, t, 3);
+ } else
tempSlope = eep->modalHeader5G.tempSlope;
REG_RMW_FIELD(ah, AR_PHY_TPC_19, AR_PHY_TPC_19_ALPHA_THERM, tempSlope);
@@ -1753,11 +4376,11 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
/* interpolate */
for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
- ath_print(common, ATH_DBG_EEPROM,
- "ch=%d f=%d low=%d %d h=%d %d\n",
- ichain, frequency, lfrequency[ichain],
- lcorrection[ichain], hfrequency[ichain],
- hcorrection[ichain]);
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "ch=%d f=%d low=%d %d h=%d %d\n",
+ ichain, frequency, lfrequency[ichain],
+ lcorrection[ichain], hfrequency[ichain],
+ hcorrection[ichain]);
/* they're the same, so just pick one */
if (hfrequency[ichain] == lfrequency[ichain]) {
correction[ichain] = lcorrection[ichain];
@@ -1769,25 +4392,23 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
/* so is the high frequency, interpolate */
if (hfrequency[ichain] - frequency < 1000) {
- correction[ichain] = lcorrection[ichain] +
- (((frequency - lfrequency[ichain]) *
- (hcorrection[ichain] -
- lcorrection[ichain])) /
- (hfrequency[ichain] - lfrequency[ichain]));
-
- temperature[ichain] = ltemperature[ichain] +
- (((frequency - lfrequency[ichain]) *
- (htemperature[ichain] -
- ltemperature[ichain])) /
- (hfrequency[ichain] - lfrequency[ichain]));
-
- voltage[ichain] =
- lvoltage[ichain] +
- (((frequency -
- lfrequency[ichain]) * (hvoltage[ichain] -
- lvoltage[ichain]))
- / (hfrequency[ichain] -
- lfrequency[ichain]));
+ correction[ichain] = interpolate(frequency,
+ lfrequency[ichain],
+ hfrequency[ichain],
+ lcorrection[ichain],
+ hcorrection[ichain]);
+
+ temperature[ichain] = interpolate(frequency,
+ lfrequency[ichain],
+ hfrequency[ichain],
+ ltemperature[ichain],
+ htemperature[ichain]);
+
+ voltage[ichain] = interpolate(frequency,
+ lfrequency[ichain],
+ hfrequency[ichain],
+ lvoltage[ichain],
+ hvoltage[ichain]);
}
/* only low is good, use it */
else {
@@ -1811,9 +4432,9 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
ar9003_hw_power_control_override(ah, frequency, correction, voltage,
temperature);
- ath_print(common, ATH_DBG_EEPROM,
- "for frequency=%d, calibration correction = %d %d %d\n",
- frequency, correction[0], correction[1], correction[2]);
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "for frequency=%d, calibration correction = %d %d %d\n",
+ frequency, correction[0], correction[1], correction[2]);
return 0;
}
@@ -1827,9 +4448,9 @@ static u16 ar9003_hw_get_direct_edge_power(struct ar9300_eeprom *eep,
struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G;
if (is2GHz)
- return ctl_2g[idx].ctlEdges[edge].tPower;
+ return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge]);
else
- return ctl_5g[idx].ctlEdges[edge].tPower;
+ return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge]);
}
static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep,
@@ -1847,15 +4468,15 @@ static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep,
if (is2GHz) {
if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 1) < freq &&
- ctl_2g[idx].ctlEdges[edge - 1].flag)
- return ctl_2g[idx].ctlEdges[edge - 1].tPower;
+ CTL_EDGE_FLAGS(ctl_2g[idx].ctlEdges[edge - 1]))
+ return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge - 1]);
} else {
if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 0) < freq &&
- ctl_5g[idx].ctlEdges[edge - 1].flag)
- return ctl_5g[idx].ctlEdges[edge - 1].tPower;
+ CTL_EDGE_FLAGS(ctl_5g[idx].ctlEdges[edge - 1]))
+ return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge - 1]);
}
- return AR9300_MAX_RATE_POWER;
+ return MAX_RATE_POWER;
}
/*
@@ -1864,7 +4485,7 @@ static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep,
static u16 ar9003_hw_get_max_edge_power(struct ar9300_eeprom *eep,
u16 freq, int idx, bool is2GHz)
{
- u16 twiceMaxEdgePower = AR9300_MAX_RATE_POWER;
+ u16 twiceMaxEdgePower = MAX_RATE_POWER;
u8 *ctl_freqbin = is2GHz ?
&eep->ctl_freqbin_2G[idx][0] :
&eep->ctl_freqbin_5G[idx][0];
@@ -1874,7 +4495,7 @@ static u16 ar9003_hw_get_max_edge_power(struct ar9300_eeprom *eep,
/* Get the edge power */
for (edge = 0;
- (edge < num_edges) && (ctl_freqbin[edge] != AR9300_BCHAN_UNUSED);
+ (edge < num_edges) && (ctl_freqbin[edge] != AR5416_BCHAN_UNUSED);
edge++) {
/*
* If there's an exact channel match or an inband flag set
@@ -1912,21 +4533,23 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ath_common *common = ath9k_hw_common(ah);
struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep;
- u16 twiceMaxEdgePower = AR9300_MAX_RATE_POWER;
+ u16 twiceMaxEdgePower = MAX_RATE_POWER;
static const u16 tpScaleReductionTable[5] = {
- 0, 3, 6, 9, AR9300_MAX_RATE_POWER
+ 0, 3, 6, 9, MAX_RATE_POWER
};
int i;
int16_t twiceLargestAntenna;
u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
- u16 ctlModesFor11a[] = {
+ static const u16 ctlModesFor11a[] = {
CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40
};
- u16 ctlModesFor11g[] = {
+ static const u16 ctlModesFor11g[] = {
CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT,
CTL_11G_EXT, CTL_2GHT40
};
- u16 numCtlModes, *pCtlMode, ctlMode, freq;
+ u16 numCtlModes;
+ const u16 *pCtlMode;
+ u16 ctlMode, freq;
struct chan_centers centers;
u8 *ctlIndex;
u8 ctlNum;
@@ -2016,11 +4639,10 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
else
freq = centers.ctl_center;
- ath_print(common, ATH_DBG_REGULATORY,
- "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, "
- "EXT_ADDITIVE %d\n",
- ctlMode, numCtlModes, isHt40CtlMode,
- (pCtlMode[ctlMode] & EXT_ADDITIVE));
+ ath_dbg(common, ATH_DBG_REGULATORY,
+ "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, EXT_ADDITIVE %d\n",
+ ctlMode, numCtlModes, isHt40CtlMode,
+ (pCtlMode[ctlMode] & EXT_ADDITIVE));
/* walk through each CTL index stored in EEPROM */
if (is2ghz) {
@@ -2032,12 +4654,10 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
}
for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) {
- ath_print(common, ATH_DBG_REGULATORY,
- "LOOP-Ctlidx %d: cfgCtl 0x%2.2x "
- "pCtlMode 0x%2.2x ctlIndex 0x%2.2x "
- "chan %dn",
- i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i],
- chan->channel);
+ ath_dbg(common, ATH_DBG_REGULATORY,
+ "LOOP-Ctlidx %d: cfgCtl 0x%2.2x pCtlMode 0x%2.2x ctlIndex 0x%2.2x chan %d\n",
+ i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i],
+ chan->channel);
/*
* compare test group from regulatory
@@ -2076,11 +4696,10 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
- ath_print(common, ATH_DBG_REGULATORY,
- "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d "
- "sP %d minCtlPwr %d\n",
- ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
- scaledPower, minCtlPower);
+ ath_dbg(common, ATH_DBG_REGULATORY,
+ "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n",
+ ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
+ scaledPower, minCtlPower);
/* Apply ctl mode to correct target power set */
switch (pCtlMode[ctlMode]) {
@@ -2127,40 +4746,101 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
} /* end ctl mode checking */
}
+static inline u8 mcsidx_to_tgtpwridx(unsigned int mcs_idx, u8 base_pwridx)
+{
+ u8 mod_idx = mcs_idx % 8;
+
+ if (mod_idx <= 3)
+ return mod_idx ? (base_pwridx + 1) : base_pwridx;
+ else
+ return base_pwridx + 4 * (mcs_idx / 8) + mod_idx - 2;
+}
+
static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
struct ath9k_channel *chan, u16 cfgCtl,
u8 twiceAntennaReduction,
u8 twiceMaxRegulatoryPower,
- u8 powerLimit)
+ u8 powerLimit, bool test)
{
+ struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ath_common *common = ath9k_hw_common(ah);
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_modal_eep_header *modal_hdr;
u8 targetPowerValT2[ar9300RateSize];
- unsigned int i = 0;
+ u8 target_power_val_t2_eep[ar9300RateSize];
+ unsigned int i = 0, paprd_scale_factor = 0;
+ u8 pwr_idx, min_pwridx = 0;
ar9003_hw_set_target_power_eeprom(ah, chan->channel, targetPowerValT2);
+
+ if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) {
+ if (IS_CHAN_2GHZ(chan))
+ modal_hdr = &eep->modalHeader2G;
+ else
+ modal_hdr = &eep->modalHeader5G;
+
+ ah->paprd_ratemask =
+ le32_to_cpu(modal_hdr->papdRateMaskHt20) &
+ AR9300_PAPRD_RATE_MASK;
+
+ ah->paprd_ratemask_ht40 =
+ le32_to_cpu(modal_hdr->papdRateMaskHt40) &
+ AR9300_PAPRD_RATE_MASK;
+
+ paprd_scale_factor = ar9003_get_paprd_scale_factor(ah, chan);
+ min_pwridx = IS_CHAN_HT40(chan) ? ALL_TARGET_HT40_0_8_16 :
+ ALL_TARGET_HT20_0_8_16;
+
+ if (!ah->paprd_table_write_done) {
+ memcpy(target_power_val_t2_eep, targetPowerValT2,
+ sizeof(targetPowerValT2));
+ for (i = 0; i < 24; i++) {
+ pwr_idx = mcsidx_to_tgtpwridx(i, min_pwridx);
+ if (ah->paprd_ratemask & (1 << i)) {
+ if (targetPowerValT2[pwr_idx] &&
+ targetPowerValT2[pwr_idx] ==
+ target_power_val_t2_eep[pwr_idx])
+ targetPowerValT2[pwr_idx] -=
+ paprd_scale_factor;
+ }
+ }
+ }
+ memcpy(target_power_val_t2_eep, targetPowerValT2,
+ sizeof(targetPowerValT2));
+ }
+
ar9003_hw_set_power_per_rate_table(ah, chan,
targetPowerValT2, cfgCtl,
twiceAntennaReduction,
twiceMaxRegulatoryPower,
powerLimit);
- while (i < ar9300RateSize) {
- ath_print(common, ATH_DBG_EEPROM,
- "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
- i++;
- ath_print(common, ATH_DBG_EEPROM,
- "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
- i++;
- ath_print(common, ATH_DBG_EEPROM,
- "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
- i++;
- ath_print(common, ATH_DBG_EEPROM,
- "TPC[%02d] 0x%08x\n\n", i, targetPowerValT2[i]);
- i++;
+ if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) {
+ for (i = 0; i < ar9300RateSize; i++) {
+ if ((ah->paprd_ratemask & (1 << i)) &&
+ (abs(targetPowerValT2[i] -
+ target_power_val_t2_eep[i]) >
+ paprd_scale_factor)) {
+ ah->paprd_ratemask &= ~(1 << i);
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "paprd disabled for mcs %d\n", i);
+ }
+ }
}
- /* Write target power array to registers */
- ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
+ regulatory->max_power_level = 0;
+ for (i = 0; i < ar9300RateSize; i++) {
+ if (targetPowerValT2[i] > regulatory->max_power_level)
+ regulatory->max_power_level = targetPowerValT2[i];
+ }
+
+ if (test)
+ return;
+
+ for (i = 0; i < ar9300RateSize; i++) {
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
+ }
/*
* This is the TX power we send back to driver core,
@@ -2180,8 +4860,24 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
i = ALL_TARGET_HT20_0_8_16; /* ht20 */
ah->txpower_limit = targetPowerValT2[i];
+ regulatory->max_power_level = targetPowerValT2[i];
+ /* Write target power array to registers */
+ ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
ar9003_hw_calibration_apply(ah, chan->channel);
+
+ if (IS_CHAN_2GHZ(chan)) {
+ if (IS_CHAN_HT40(chan))
+ i = ALL_TARGET_HT40_0_8_16;
+ else
+ i = ALL_TARGET_HT20_0_8_16;
+ } else {
+ if (IS_CHAN_HT40(chan))
+ i = ALL_TARGET_HT40_7;
+ else
+ i = ALL_TARGET_HT20_7;
+ }
+ ah->paprd_target_power = targetPowerValT2[i];
}
static u16 ath9k_hw_ar9300_get_spur_channel(struct ath_hw *ah,
@@ -2204,14 +4900,43 @@ s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah)
return (eep->baseEepHeader.txrxgain) & 0xf; /* bits 3:0 */
}
+u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ if (is_2ghz)
+ return eep->modalHeader2G.spurChans;
+ else
+ return eep->modalHeader5G.spurChans;
+}
+
+unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ if (IS_CHAN_2GHZ(chan))
+ return MS(le32_to_cpu(eep->modalHeader2G.papdRateMaskHt20),
+ AR9300_PAPRD_SCALE_1);
+ else {
+ if (chan->channel >= 5700)
+ return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20),
+ AR9300_PAPRD_SCALE_1);
+ else if (chan->channel >= 5400)
+ return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40),
+ AR9300_PAPRD_SCALE_2);
+ else
+ return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40),
+ AR9300_PAPRD_SCALE_1);
+ }
+}
+
const struct eeprom_ops eep_ar9300_ops = {
.check_eeprom = ath9k_hw_ar9300_check_eeprom,
.get_eeprom = ath9k_hw_ar9300_get_eeprom,
.fill_eeprom = ath9k_hw_ar9300_fill_eeprom,
.get_eeprom_ver = ath9k_hw_ar9300_get_eeprom_ver,
.get_eeprom_rev = ath9k_hw_ar9300_get_eeprom_rev,
- .get_num_ant_config = ath9k_hw_ar9300_get_num_ant_config,
- .get_eeprom_antenna_cfg = ath9k_hw_ar9300_get_eeprom_antenna_cfg,
.set_board_values = ath9k_hw_ar9300_set_board_values,
.set_addac = ath9k_hw_ar9300_set_addac,
.set_txpower = ath9k_hw_ar9300_set_txpower,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 3c533bb983c7..afb0b5ee1865 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -20,47 +20,22 @@
/* #define AR9300_NUM_CTLS 21 */
#define AR9300_NUM_CTLS_5G 9
#define AR9300_NUM_CTLS_2G 12
-#define AR9300_CTL_MODE_M 0xF
#define AR9300_NUM_BAND_EDGES_5G 8
#define AR9300_NUM_BAND_EDGES_2G 4
-#define AR9300_NUM_PD_GAINS 4
-#define AR9300_PD_GAINS_IN_MASK 4
-#define AR9300_PD_GAIN_ICEPTS 5
-#define AR9300_EEPROM_MODAL_SPURS 5
-#define AR9300_MAX_RATE_POWER 63
-#define AR9300_NUM_PDADC_VALUES 128
-#define AR9300_NUM_RATES 16
-#define AR9300_BCHAN_UNUSED 0xFF
-#define AR9300_MAX_PWR_RANGE_IN_HALF_DB 64
-#define AR9300_OPFLAGS_11A 0x01
-#define AR9300_OPFLAGS_11G 0x02
-#define AR9300_OPFLAGS_5G_HT40 0x04
-#define AR9300_OPFLAGS_2G_HT40 0x08
-#define AR9300_OPFLAGS_5G_HT20 0x10
-#define AR9300_OPFLAGS_2G_HT20 0x20
#define AR9300_EEPMISC_BIG_ENDIAN 0x01
#define AR9300_EEPMISC_WOW 0x02
#define AR9300_CUSTOMER_DATA_SIZE 20
-#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
#define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x))
#define AR9300_MAX_CHAINS 3
#define AR9300_ANT_16S 25
#define AR9300_FUTURE_MODAL_SZ 6
-#define AR9300_NUM_ANT_CHAIN_FIELDS 7
-#define AR9300_NUM_ANT_COMMON_FIELDS 4
-#define AR9300_SIZE_ANT_CHAIN_FIELD 3
-#define AR9300_SIZE_ANT_COMMON_FIELD 4
-#define AR9300_ANT_CHAIN_MASK 0x7
-#define AR9300_ANT_COMMON_MASK 0xf
-#define AR9300_CHAIN_0_IDX 0
-#define AR9300_CHAIN_1_IDX 1
-#define AR9300_CHAIN_2_IDX 2
-
-#define AR928X_NUM_ANT_CHAIN_FIELDS 6
-#define AR928X_SIZE_ANT_CHAIN_FIELD 2
-#define AR928X_ANT_CHAIN_MASK 0x3
+#define AR9300_PAPRD_RATE_MASK 0x01ffffff
+#define AR9300_PAPRD_SCALE_1 0x0e000000
+#define AR9300_PAPRD_SCALE_1_S 25
+#define AR9300_PAPRD_SCALE_2 0x70000000
+#define AR9300_PAPRD_SCALE_2_S 28
/* Delta from which to start power to pdadc table */
/* This offset is used in both open loop and closed loop power control
@@ -71,14 +46,20 @@
*/
#define AR9300_PWR_TABLE_OFFSET 0
-/* enable flags for voltage and temp compensation */
-#define ENABLE_TEMP_COMPENSATION 0x01
-#define ENABLE_VOLT_COMPENSATION 0x02
/* byte addressable */
#define AR9300_EEPROM_SIZE (16*1024)
-#define FIXED_CCA_THRESHOLD 15
+#define AR9300_BASE_ADDR_4K 0xfff
#define AR9300_BASE_ADDR 0x3ff
+#define AR9300_BASE_ADDR_512 0x1ff
+
+#define AR9300_OTP_BASE 0x14000
+#define AR9300_OTP_STATUS 0x15f18
+#define AR9300_OTP_STATUS_TYPE 0x7
+#define AR9300_OTP_STATUS_VALID 0x4
+#define AR9300_OTP_STATUS_ACCESS_BUSY 0x2
+#define AR9300_OTP_STATUS_SM_BUSY 0x1
+#define AR9300_OTP_READ_DATA 0x15f1c
enum targetPowerHTRates {
HT_TARGET_RATE_0_8_16,
@@ -216,7 +197,7 @@ struct ar9300_modal_eep_header {
int8_t tempSlope;
int8_t voltSlope;
/* spur channels in usual fbin coding format */
- u8 spurChans[AR9300_EEPROM_MODAL_SPURS];
+ u8 spurChans[AR_EEPROM_MODAL_SPURS];
/* 3 Check if the register is per chain */
int8_t noiseFloorThreshCh[AR9300_MAX_CHAINS];
u8 ob[AR9300_MAX_CHAINS];
@@ -236,7 +217,7 @@ struct ar9300_modal_eep_header {
u8 thresh62;
__le32 papdRateMaskHt20;
__le32 papdRateMaskHt40;
- u8 futureModal[24];
+ u8 futureModal[10];
} __packed;
struct ar9300_cal_data_per_freq_op_loop {
@@ -261,17 +242,26 @@ struct cal_tgt_pow_ht {
u8 tPow2x[14];
} __packed;
-struct cal_ctl_edge_pwr {
- u8 tPower:6,
- flag:2;
-} __packed;
-
struct cal_ctl_data_2g {
- struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_2G];
+ u8 ctlEdges[AR9300_NUM_BAND_EDGES_2G];
} __packed;
struct cal_ctl_data_5g {
- struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_5G];
+ u8 ctlEdges[AR9300_NUM_BAND_EDGES_5G];
+} __packed;
+
+struct ar9300_BaseExtension_1 {
+ u8 ant_div_control;
+ u8 future[13];
+} __packed;
+
+struct ar9300_BaseExtension_2 {
+ int8_t tempSlopeLow;
+ int8_t tempSlopeHigh;
+ u8 xatten1DBLow[AR9300_MAX_CHAINS];
+ u8 xatten1MarginLow[AR9300_MAX_CHAINS];
+ u8 xatten1DBHigh[AR9300_MAX_CHAINS];
+ u8 xatten1MarginHigh[AR9300_MAX_CHAINS];
} __packed;
struct ar9300_eeprom {
@@ -283,6 +273,7 @@ struct ar9300_eeprom {
struct ar9300_base_eep_hdr baseEepHeader;
struct ar9300_modal_eep_header modalHeader2G;
+ struct ar9300_BaseExtension_1 base_ext1;
u8 calFreqPier2G[AR9300_NUM_2G_CAL_PIERS];
struct ar9300_cal_data_per_freq_op_loop
calPierData2G[AR9300_MAX_CHAINS][AR9300_NUM_2G_CAL_PIERS];
@@ -302,6 +293,7 @@ struct ar9300_eeprom {
u8 ctl_freqbin_2G[AR9300_NUM_CTLS_2G][AR9300_NUM_BAND_EDGES_2G];
struct cal_ctl_data_2g ctlPowerData_2G[AR9300_NUM_CTLS_2G];
struct ar9300_modal_eep_header modalHeader5G;
+ struct ar9300_BaseExtension_2 base_ext2;
u8 calFreqPier5G[AR9300_NUM_5G_CAL_PIERS];
struct ar9300_cal_data_per_freq_op_loop
calPierData5G[AR9300_MAX_CHAINS][AR9300_NUM_5G_CAL_PIERS];
@@ -322,4 +314,8 @@ struct ar9300_eeprom {
s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah);
s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah);
+u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz);
+
+unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
+ struct ath9k_channel *chan);
#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index c2a057156bfa..7f5de6e4448b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -17,20 +17,10 @@
#include "hw.h"
#include "ar9003_mac.h"
#include "ar9003_2p2_initvals.h"
+#include "ar9485_initvals.h"
/* General hardware code for the AR9003 hadware family */
-static bool ar9003_hw_macversion_supported(u32 macversion)
-{
- switch (macversion) {
- case AR_SREV_VERSION_9300:
- return true;
- default:
- break;
- }
- return false;
-}
-
/*
* The AR9003 family uses a new INI format (pre, core, post
* arrays per subsystem). This provides support for the
@@ -38,72 +28,194 @@ static bool ar9003_hw_macversion_supported(u32 macversion)
*/
static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
{
- /* mac */
- INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
- INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
- ar9300_2p2_mac_core,
- ARRAY_SIZE(ar9300_2p2_mac_core), 2);
- INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
- ar9300_2p2_mac_postamble,
- ARRAY_SIZE(ar9300_2p2_mac_postamble), 5);
-
- /* bb */
- INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
- INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
- ar9300_2p2_baseband_core,
- ARRAY_SIZE(ar9300_2p2_baseband_core), 2);
- INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
- ar9300_2p2_baseband_postamble,
- ARRAY_SIZE(ar9300_2p2_baseband_postamble), 5);
-
- /* radio */
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
- ar9300_2p2_radio_core,
- ARRAY_SIZE(ar9300_2p2_radio_core), 2);
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
- ar9300_2p2_radio_postamble,
- ARRAY_SIZE(ar9300_2p2_radio_postamble), 5);
-
- /* soc */
- INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
- ar9300_2p2_soc_preamble,
- ARRAY_SIZE(ar9300_2p2_soc_preamble), 2);
- INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
- INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
- ar9300_2p2_soc_postamble,
- ARRAY_SIZE(ar9300_2p2_soc_postamble), 5);
-
- /* rx/tx gain */
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9300Common_rx_gain_table_2p2,
- ARRAY_SIZE(ar9300Common_rx_gain_table_2p2), 2);
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_lowest_ob_db_tx_gain_table_2p2,
- ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2),
- 5);
-
- /* Load PCIE SERDES settings from INI */
-
- /* Awake Setting */
-
- INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9300PciePhy_pll_on_clkreq_disable_L1_2p2,
- ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p2),
- 2);
-
- /* Sleep Setting */
-
- INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- ar9300PciePhy_clkreq_enable_L1_2p2,
- ARRAY_SIZE(ar9300PciePhy_clkreq_enable_L1_2p2),
- 2);
-
- /* Fast clock modal settings */
- INIT_INI_ARRAY(&ah->iniModesAdditional,
- ar9300Modes_fast_clock_2p2,
- ARRAY_SIZE(ar9300Modes_fast_clock_2p2),
- 3);
+ if (AR_SREV_9485_11(ah)) {
+ /* mac */
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9485_1_1_mac_core,
+ ARRAY_SIZE(ar9485_1_1_mac_core), 2);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9485_1_1_mac_postamble,
+ ARRAY_SIZE(ar9485_1_1_mac_postamble), 5);
+
+ /* bb */
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], ar9485_1_1,
+ ARRAY_SIZE(ar9485_1_1), 2);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9485_1_1_baseband_core,
+ ARRAY_SIZE(ar9485_1_1_baseband_core), 2);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9485_1_1_baseband_postamble,
+ ARRAY_SIZE(ar9485_1_1_baseband_postamble), 5);
+
+ /* radio */
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9485_1_1_radio_core,
+ ARRAY_SIZE(ar9485_1_1_radio_core), 2);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9485_1_1_radio_postamble,
+ ARRAY_SIZE(ar9485_1_1_radio_postamble), 2);
+
+ /* soc */
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9485_1_1_soc_preamble,
+ ARRAY_SIZE(ar9485_1_1_soc_preamble), 2);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST], NULL, 0, 0);
+
+ /* rx/tx gain */
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9485_common_rx_gain_1_1,
+ ARRAY_SIZE(ar9485_common_rx_gain_1_1), 2);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485_modes_lowest_ob_db_tx_gain_1_1,
+ ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
+ 5);
+
+ /* Load PCIE SERDES settings from INI */
+
+ /* Awake Setting */
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9485_1_1_pcie_phy_clkreq_disable_L1,
+ ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1),
+ 2);
+
+ /* Sleep Setting */
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9485_1_1_pcie_phy_clkreq_disable_L1,
+ ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1),
+ 2);
+ } else if (AR_SREV_9485(ah)) {
+ /* mac */
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9485_1_0_mac_core,
+ ARRAY_SIZE(ar9485_1_0_mac_core), 2);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9485_1_0_mac_postamble,
+ ARRAY_SIZE(ar9485_1_0_mac_postamble), 5);
+
+ /* bb */
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], ar9485_1_0,
+ ARRAY_SIZE(ar9485_1_0), 2);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9485_1_0_baseband_core,
+ ARRAY_SIZE(ar9485_1_0_baseband_core), 2);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9485_1_0_baseband_postamble,
+ ARRAY_SIZE(ar9485_1_0_baseband_postamble), 5);
+
+ /* radio */
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9485_1_0_radio_core,
+ ARRAY_SIZE(ar9485_1_0_radio_core), 2);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9485_1_0_radio_postamble,
+ ARRAY_SIZE(ar9485_1_0_radio_postamble), 2);
+
+ /* soc */
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9485_1_0_soc_preamble,
+ ARRAY_SIZE(ar9485_1_0_soc_preamble), 2);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST], NULL, 0, 0);
+
+ /* rx/tx gain */
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9485Common_rx_gain_1_0,
+ ARRAY_SIZE(ar9485Common_rx_gain_1_0), 2);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_lowest_ob_db_tx_gain_1_0,
+ ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0),
+ 5);
+
+ /* Load PCIE SERDES settings from INI */
+
+ /* Awake Setting */
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1,
+ ARRAY_SIZE(ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1),
+ 2);
+
+ /* Sleep Setting */
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1,
+ ARRAY_SIZE(ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1),
+ 2);
+ } else {
+ /* mac */
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9300_2p2_mac_core,
+ ARRAY_SIZE(ar9300_2p2_mac_core), 2);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9300_2p2_mac_postamble,
+ ARRAY_SIZE(ar9300_2p2_mac_postamble), 5);
+
+ /* bb */
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9300_2p2_baseband_core,
+ ARRAY_SIZE(ar9300_2p2_baseband_core), 2);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9300_2p2_baseband_postamble,
+ ARRAY_SIZE(ar9300_2p2_baseband_postamble), 5);
+
+ /* radio */
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9300_2p2_radio_core,
+ ARRAY_SIZE(ar9300_2p2_radio_core), 2);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar9300_2p2_radio_postamble,
+ ARRAY_SIZE(ar9300_2p2_radio_postamble), 5);
+
+ /* soc */
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9300_2p2_soc_preamble,
+ ARRAY_SIZE(ar9300_2p2_soc_preamble), 2);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9300_2p2_soc_postamble,
+ ARRAY_SIZE(ar9300_2p2_soc_postamble), 5);
+
+ /* rx/tx gain */
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9300Common_rx_gain_table_2p2,
+ ARRAY_SIZE(ar9300Common_rx_gain_table_2p2), 2);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_lowest_ob_db_tx_gain_table_2p2,
+ ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2),
+ 5);
+
+ /* Load PCIE SERDES settings from INI */
+
+ /* Awake Setting */
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+ ar9300PciePhy_pll_on_clkreq_disable_L1_2p2,
+ ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p2),
+ 2);
+
+ /* Sleep Setting */
+
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+ ar9300PciePhy_pll_on_clkreq_disable_L1_2p2,
+ ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p2),
+ 2);
+
+ /* Fast clock modal settings */
+ INIT_INI_ARRAY(&ah->iniModesAdditional,
+ ar9300Modes_fast_clock_2p2,
+ ARRAY_SIZE(ar9300Modes_fast_clock_2p2),
+ 3);
+ }
}
static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
@@ -111,22 +223,72 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
switch (ar9003_hw_get_tx_gain_idx(ah)) {
case 0:
default:
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_lowest_ob_db_tx_gain_table_2p2,
- ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2),
- 5);
+ if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485_modes_lowest_ob_db_tx_gain_1_1,
+ ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
+ 5);
+ else if (AR_SREV_9485(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_lowest_ob_db_tx_gain_1_0,
+ ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0),
+ 5);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_lowest_ob_db_tx_gain_table_2p2,
+ ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2),
+ 5);
break;
case 1:
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_high_ob_db_tx_gain_table_2p2,
- ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p2),
- 5);
+ if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_high_ob_db_tx_gain_1_1,
+ ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_1),
+ 5);
+ else if (AR_SREV_9485(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_high_ob_db_tx_gain_1_0,
+ ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_0),
+ 5);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_high_ob_db_tx_gain_table_2p2,
+ ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p2),
+ 5);
break;
case 2:
- INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_low_ob_db_tx_gain_table_2p2,
- ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p2),
- 5);
+ if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_low_ob_db_tx_gain_1_1,
+ ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_1),
+ 5);
+ else if (AR_SREV_9485(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_low_ob_db_tx_gain_1_0,
+ ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_0),
+ 5);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_low_ob_db_tx_gain_table_2p2,
+ ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p2),
+ 5);
+ break;
+ case 3:
+ if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_high_power_tx_gain_1_1,
+ ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_1),
+ 5);
+ else if (AR_SREV_9485(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9485Modes_high_power_tx_gain_1_0,
+ ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_0),
+ 5);
+ else
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9300Modes_high_power_tx_gain_table_2p2,
+ ARRAY_SIZE(ar9300Modes_high_power_tx_gain_table_2p2),
+ 5);
break;
}
}
@@ -136,16 +298,38 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
switch (ar9003_hw_get_rx_gain_idx(ah)) {
case 0:
default:
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9300Common_rx_gain_table_2p2,
- ARRAY_SIZE(ar9300Common_rx_gain_table_2p2),
- 2);
+ if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9485_common_rx_gain_1_1,
+ ARRAY_SIZE(ar9485_common_rx_gain_1_1),
+ 2);
+ else if (AR_SREV_9485(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9485Common_rx_gain_1_0,
+ ARRAY_SIZE(ar9485Common_rx_gain_1_0),
+ 2);
+ else
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9300Common_rx_gain_table_2p2,
+ ARRAY_SIZE(ar9300Common_rx_gain_table_2p2),
+ 2);
break;
case 1:
- INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9300Common_wo_xlna_rx_gain_table_2p2,
- ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p2),
- 2);
+ if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9485Common_wo_xlna_rx_gain_1_1,
+ ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1),
+ 2);
+ else if (AR_SREV_9485(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9485Common_wo_xlna_rx_gain_1_0,
+ ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_0),
+ 2);
+ else
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9300Common_wo_xlna_rx_gain_table_2p2,
+ ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p2),
+ 2);
break;
}
}
@@ -216,7 +400,6 @@ void ar9003_hw_attach_ops(struct ath_hw *ah)
priv_ops->init_mode_regs = ar9003_hw_init_mode_regs;
priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs;
- priv_ops->macversion_supported = ar9003_hw_macversion_supported;
ops->config_pci_powersave = ar9003_hw_configpcipowersave;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 3b424ca1ba84..038a0cbfc6e7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -182,8 +182,8 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
}
if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
- ath_print(common, ATH_DBG_INTERRUPT,
- "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
+ ath_dbg(common, ATH_DBG_INTERRUPT,
+ "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
(void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
@@ -237,73 +237,76 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
struct ath_tx_status *ts)
{
struct ar9003_txs *ads;
+ u32 status;
ads = &ah->ts_ring[ah->ts_tail];
- if ((ads->status8 & AR_TxDone) == 0)
+ status = ACCESS_ONCE(ads->status8);
+ if ((status & AR_TxDone) == 0)
return -EINPROGRESS;
ah->ts_tail = (ah->ts_tail + 1) % ah->ts_size;
if ((MS(ads->ds_info, AR_DescId) != ATHEROS_VENDOR_ID) ||
(MS(ads->ds_info, AR_TxRxDesc) != 1)) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT,
- "Tx Descriptor error %x\n", ads->ds_info);
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT,
+ "Tx Descriptor error %x\n", ads->ds_info);
memset(ads, 0, sizeof(*ads));
return -EIO;
}
+ if (status & AR_TxOpExceeded)
+ ts->ts_status |= ATH9K_TXERR_XTXOP;
+ ts->ts_rateindex = MS(status, AR_FinalTxIdx);
+ ts->ts_seqnum = MS(status, AR_SeqNum);
+ ts->tid = MS(status, AR_TxTid);
+
ts->qid = MS(ads->ds_info, AR_TxQcuNum);
ts->desc_id = MS(ads->status1, AR_TxDescId);
- ts->ts_seqnum = MS(ads->status8, AR_SeqNum);
ts->ts_tstamp = ads->status4;
ts->ts_status = 0;
ts->ts_flags = 0;
- if (ads->status3 & AR_ExcessiveRetries)
+ status = ACCESS_ONCE(ads->status2);
+ ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00);
+ ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01);
+ ts->ts_rssi_ctl2 = MS(status, AR_TxRSSIAnt02);
+ if (status & AR_TxBaStatus) {
+ ts->ts_flags |= ATH9K_TX_BA;
+ ts->ba_low = ads->status5;
+ ts->ba_high = ads->status6;
+ }
+
+ status = ACCESS_ONCE(ads->status3);
+ if (status & AR_ExcessiveRetries)
ts->ts_status |= ATH9K_TXERR_XRETRY;
- if (ads->status3 & AR_Filtered)
+ if (status & AR_Filtered)
ts->ts_status |= ATH9K_TXERR_FILT;
- if (ads->status3 & AR_FIFOUnderrun) {
+ if (status & AR_FIFOUnderrun) {
ts->ts_status |= ATH9K_TXERR_FIFO;
ath9k_hw_updatetxtriglevel(ah, true);
}
- if (ads->status8 & AR_TxOpExceeded)
- ts->ts_status |= ATH9K_TXERR_XTXOP;
- if (ads->status3 & AR_TxTimerExpired)
+ if (status & AR_TxTimerExpired)
ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
-
- if (ads->status3 & AR_DescCfgErr)
+ if (status & AR_DescCfgErr)
ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
- if (ads->status3 & AR_TxDataUnderrun) {
+ if (status & AR_TxDataUnderrun) {
ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
ath9k_hw_updatetxtriglevel(ah, true);
}
- if (ads->status3 & AR_TxDelimUnderrun) {
+ if (status & AR_TxDelimUnderrun) {
ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
ath9k_hw_updatetxtriglevel(ah, true);
}
- if (ads->status2 & AR_TxBaStatus) {
- ts->ts_flags |= ATH9K_TX_BA;
- ts->ba_low = ads->status5;
- ts->ba_high = ads->status6;
- }
+ ts->ts_shortretry = MS(status, AR_RTSFailCnt);
+ ts->ts_longretry = MS(status, AR_DataFailCnt);
+ ts->ts_virtcol = MS(status, AR_VirtRetryCnt);
- ts->ts_rateindex = MS(ads->status8, AR_FinalTxIdx);
-
- ts->ts_rssi = MS(ads->status7, AR_TxRSSICombined);
- ts->ts_rssi_ctl0 = MS(ads->status2, AR_TxRSSIAnt00);
- ts->ts_rssi_ctl1 = MS(ads->status2, AR_TxRSSIAnt01);
- ts->ts_rssi_ctl2 = MS(ads->status2, AR_TxRSSIAnt02);
- ts->ts_rssi_ext0 = MS(ads->status7, AR_TxRSSIAnt10);
- ts->ts_rssi_ext1 = MS(ads->status7, AR_TxRSSIAnt11);
- ts->ts_rssi_ext2 = MS(ads->status7, AR_TxRSSIAnt12);
- ts->ts_shortretry = MS(ads->status3, AR_RTSFailCnt);
- ts->ts_longretry = MS(ads->status3, AR_DataFailCnt);
- ts->ts_virtcol = MS(ads->status3, AR_VirtRetryCnt);
- ts->ts_antenna = 0;
-
- ts->tid = MS(ads->status8, AR_TxTid);
+ status = ACCESS_ONCE(ads->status7);
+ ts->ts_rssi = MS(status, AR_TxRSSICombined);
+ ts->ts_rssi_ext0 = MS(status, AR_TxRSSIAnt10);
+ ts->ts_rssi_ext1 = MS(status, AR_TxRSSIAnt11);
+ ts->ts_rssi_ext2 = MS(status, AR_TxRSSIAnt12);
memset(ads, 0, sizeof(*ads));
@@ -319,7 +322,6 @@ static void ar9003_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
if (txpower > ah->txpower_limit)
txpower = ah->txpower_limit;
- txpower += ah->txpower_indexoffset;
if (txpower > 63)
txpower = 63;
@@ -407,12 +409,36 @@ static void ar9003_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
static void ar9003_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
u32 aggrLen)
{
+#define FIRST_DESC_NDELIMS 60
struct ar9003_txc *ads = (struct ar9003_txc *) ds;
ads->ctl12 |= (AR_IsAggr | AR_MoreAggr);
- ads->ctl17 &= ~AR_AggrLen;
- ads->ctl17 |= SM(aggrLen, AR_AggrLen);
+ if (ah->ent_mode & AR_ENT_OTP_MPSD) {
+ u32 ctl17, ndelim;
+ /*
+ * Add delimiter when using RTS/CTS with aggregation
+ * and non enterprise AR9003 card
+ */
+ ctl17 = ads->ctl17;
+ ndelim = MS(ctl17, AR_PadDelim);
+
+ if (ndelim < FIRST_DESC_NDELIMS) {
+ aggrLen += (FIRST_DESC_NDELIMS - ndelim) * 4;
+ ndelim = FIRST_DESC_NDELIMS;
+ }
+
+ ctl17 &= ~AR_AggrLen;
+ ctl17 |= SM(aggrLen, AR_AggrLen);
+
+ ctl17 &= ~AR_PadDelim;
+ ctl17 |= SM(ndelim, AR_PadDelim);
+
+ ads->ctl17 = ctl17;
+ } else {
+ ads->ctl17 &= ~AR_AggrLen;
+ ads->ctl17 |= SM(aggrLen, AR_AggrLen);
+ }
}
static void ar9003_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
@@ -587,9 +613,9 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
* possibly be reviewing the last subframe. AR_CRCErr
* is the CRC of the actual data.
*/
- if (rxsp->status11 & AR_CRCErr) {
+ if (rxsp->status11 & AR_CRCErr)
rxs->rs_status |= ATH9K_RXERR_CRC;
- } else if (rxsp->status11 & AR_PHYErr) {
+ else if (rxsp->status11 & AR_PHYErr) {
phyerr = MS(rxsp->status11, AR_PHYErrCode);
/*
* If we reach a point here where AR_PostDelimCRCErr is
@@ -612,11 +638,12 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
rxs->rs_phyerr = phyerr;
}
- } else if (rxsp->status11 & AR_DecryptCRCErr) {
+ } else if (rxsp->status11 & AR_DecryptCRCErr)
rxs->rs_status |= ATH9K_RXERR_DECRYPT;
- } else if (rxsp->status11 & AR_MichaelErr) {
+ else if (rxsp->status11 & AR_MichaelErr)
rxs->rs_status |= ATH9K_RXERR_MIC;
- } else if (rxsp->status11 & AR_KeyMiss)
+
+ if (rxsp->status11 & AR_KeyMiss)
rxs->rs_status |= ATH9K_RXERR_DECRYPT;
}
@@ -631,10 +658,10 @@ void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah)
memset((void *) ah->ts_ring, 0,
ah->ts_size * sizeof(struct ar9003_txs));
- ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT,
- "TS Start 0x%x End 0x%x Virt %p, Size %d\n",
- ah->ts_paddr_start, ah->ts_paddr_end,
- ah->ts_ring, ah->ts_size);
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT,
+ "TS Start 0x%x End 0x%x Virt %p, Size %d\n",
+ ah->ts_paddr_start, ah->ts_paddr_end,
+ ah->ts_ring, ah->ts_size);
REG_WRITE(ah, AR_Q_STATUS_RING_START, ah->ts_paddr_start);
REG_WRITE(ah, AR_Q_STATUS_RING_END, ah->ts_paddr_end);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
index 9f2cea70a840..45cc7e80436c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
@@ -65,7 +65,7 @@ struct ar9003_rxs {
u32 status9;
u32 status10;
u32 status11;
-} __packed;
+} __packed __aligned(4);
/* Transmit Control Descriptor */
struct ar9003_txc {
@@ -93,7 +93,7 @@ struct ar9003_txc {
u32 ctl21; /* DMA control 21 */
u32 ctl22; /* DMA control 22 */
u32 pad[9]; /* pad to cache line (128 bytes/32 dwords) */
-} __packed;
+} __packed __aligned(4);
struct ar9003_txs {
u32 ds_info;
@@ -105,7 +105,7 @@ struct ar9003_txs {
u32 status6;
u32 status7;
u32 status8;
-} __packed;
+} __packed __aligned(4);
void ar9003_hw_attach_mac_ops(struct ath_hw *hw);
void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 716db414c258..356d2fd78822 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -19,45 +19,124 @@
void ar9003_paprd_enable(struct ath_hw *ah, bool val)
{
+ struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
+ struct ath9k_channel *chan = ah->curchan;
+
+ if (val) {
+ ah->paprd_table_write_done = true;
+
+ ah->eep_ops->set_txpower(ah, chan,
+ ath9k_regd_get_ctl(regulatory, chan),
+ chan->chan->max_antenna_gain * 2,
+ chan->chan->max_power * 2,
+ min((u32) MAX_RATE_POWER,
+ (u32) regulatory->power_limit), false);
+ }
+
REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B0,
AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B1,
- AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B2,
- AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
+ if (ah->caps.tx_chainmask & BIT(1))
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B1,
+ AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
+ if (ah->caps.tx_chainmask & BIT(2))
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B2,
+ AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
}
EXPORT_SYMBOL(ar9003_paprd_enable);
-static void ar9003_paprd_setup_single_table(struct ath_hw *ah)
+static int ar9003_get_training_power_2g(struct ath_hw *ah)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_modal_eep_header *hdr = &eep->modalHeader2G;
+ unsigned int power, scale, delta;
+
+ scale = MS(le32_to_cpu(hdr->papdRateMaskHt20), AR9300_PAPRD_SCALE_1);
+ power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5,
+ AR_PHY_POWERTX_RATE5_POWERTXHT20_0);
+
+ delta = abs((int) ah->paprd_target_power - (int) power);
+ if (delta > scale)
+ return -1;
+
+ if (delta < 4)
+ power -= 4 - delta;
+
+ return power;
+}
+
+static int ar9003_get_training_power_5g(struct ath_hw *ah)
{
+ struct ath_common *common = ath9k_hw_common(ah);
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
- struct ar9300_modal_eep_header *hdr;
- const u32 ctrl0[3] = {
+ struct ar9300_modal_eep_header *hdr = &eep->modalHeader5G;
+ struct ath9k_channel *chan = ah->curchan;
+ unsigned int power, scale, delta;
+
+ if (chan->channel >= 5700)
+ scale = MS(le32_to_cpu(hdr->papdRateMaskHt20),
+ AR9300_PAPRD_SCALE_1);
+ else if (chan->channel >= 5400)
+ scale = MS(le32_to_cpu(hdr->papdRateMaskHt40),
+ AR9300_PAPRD_SCALE_2);
+ else
+ scale = MS(le32_to_cpu(hdr->papdRateMaskHt40),
+ AR9300_PAPRD_SCALE_1);
+
+ if (IS_CHAN_HT40(chan))
+ power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE8,
+ AR_PHY_POWERTX_RATE8_POWERTXHT40_5);
+ else
+ power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE6,
+ AR_PHY_POWERTX_RATE6_POWERTXHT20_5);
+
+ power += scale;
+ delta = abs((int) ah->paprd_target_power - (int) power);
+ if (delta > scale)
+ return -1;
+
+ power += 2 * get_streams(common->tx_chainmask);
+ return power;
+}
+
+static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ static const u32 ctrl0[3] = {
AR_PHY_PAPRD_CTRL0_B0,
AR_PHY_PAPRD_CTRL0_B1,
AR_PHY_PAPRD_CTRL0_B2
};
- const u32 ctrl1[3] = {
+ static const u32 ctrl1[3] = {
AR_PHY_PAPRD_CTRL1_B0,
AR_PHY_PAPRD_CTRL1_B1,
AR_PHY_PAPRD_CTRL1_B2
};
- u32 am_mask, ht40_mask;
+ int training_power;
int i;
- if (ah->curchan && IS_CHAN_5GHZ(ah->curchan))
- hdr = &eep->modalHeader5G;
+ if (IS_CHAN_2GHZ(ah->curchan))
+ training_power = ar9003_get_training_power_2g(ah);
else
- hdr = &eep->modalHeader2G;
-
- am_mask = le32_to_cpu(hdr->papdRateMaskHt20);
- ht40_mask = le32_to_cpu(hdr->papdRateMaskHt40);
+ training_power = ar9003_get_training_power_5g(ah);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2AM, AR_PHY_PAPRD_AM2AM_MASK, am_mask);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2PM, AR_PHY_PAPRD_AM2PM_MASK, am_mask);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_HT40, AR_PHY_PAPRD_HT40_MASK, ht40_mask);
-
- for (i = 0; i < 3; i++) {
+ if (training_power < 0) {
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "PAPRD target power delta out of range");
+ return -ERANGE;
+ }
+ ah->paprd_training_power = training_power;
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Training power: %d, Target power: %d\n",
+ ah->paprd_training_power, ah->paprd_target_power);
+
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2AM, AR_PHY_PAPRD_AM2AM_MASK,
+ ah->paprd_ratemask);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2PM, AR_PHY_PAPRD_AM2PM_MASK,
+ ah->paprd_ratemask);
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_HT40, AR_PHY_PAPRD_HT40_MASK,
+ ah->paprd_ratemask_ht40);
+
+ for (i = 0; i < ah->caps.max_txchains; i++) {
REG_RMW_FIELD(ah, ctrl0[i],
AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK, 1);
REG_RMW_FIELD(ah, ctrl1[i],
@@ -102,8 +181,14 @@ static void ar9003_paprd_setup_single_table(struct ath_hw *ah)
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES, 7);
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL, 1);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
- AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, -6);
+ if (AR_SREV_9485(ah))
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP,
+ -3);
+ else
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP,
+ -6);
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE,
-15);
@@ -132,6 +217,7 @@ static void ar9003_paprd_setup_single_table(struct ath_hw *ah)
AR_PHY_PAPRD_PRE_POST_SCALING, 185706);
REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_7_B0,
AR_PHY_PAPRD_PRE_POST_SCALING, 175487);
+ return 0;
}
static void ar9003_paprd_get_gain_table(struct ath_hw *ah)
@@ -586,15 +672,10 @@ void ar9003_paprd_populate_single_table(struct ath_hw *ah,
{
u32 *paprd_table_val = caldata->pa_table[chain];
u32 small_signal_gain = caldata->small_signal_gain[chain];
- u32 training_power;
+ u32 training_power = ah->paprd_training_power;
u32 reg = 0;
int i;
- training_power =
- REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5,
- AR_PHY_POWERTX_RATE5_POWERTXHT20_0);
- training_power -= 4;
-
if (chain == 0)
reg = AR_PHY_PAPRD_MEM_TAB_B0;
else if (chain == 1)
@@ -620,26 +701,22 @@ void ar9003_paprd_populate_single_table(struct ath_hw *ah,
AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
training_power);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B1,
- AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
- training_power);
+ if (ah->caps.tx_chainmask & BIT(1))
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B1,
+ AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
+ training_power);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B2,
- AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
- training_power);
+ if (ah->caps.tx_chainmask & BIT(2))
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B2,
+ AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
+ training_power);
}
EXPORT_SYMBOL(ar9003_paprd_populate_single_table);
int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain)
{
-
unsigned int i, desired_gain, gain_index;
- unsigned int train_power;
-
- train_power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5,
- AR_PHY_POWERTX_RATE5_POWERTXHT20_0);
-
- train_power = train_power - 4;
+ unsigned int train_power = ah->paprd_training_power;
desired_gain = ar9003_get_desired_gain(ah, chain, train_power);
@@ -705,7 +782,12 @@ EXPORT_SYMBOL(ar9003_paprd_create_curve);
int ar9003_paprd_init_table(struct ath_hw *ah)
{
- ar9003_paprd_setup_single_table(ah);
+ int ret;
+
+ ret = ar9003_paprd_setup_single_table(ah);
+ if (ret < 0)
+ return ret;
+
ar9003_paprd_get_gain_table(ah);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 669b777729b3..eb250d6b8038 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -75,7 +75,10 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
freq = centers.synth_center;
if (freq < 4800) { /* 2 GHz, fractional mode */
- channelSel = CHANSEL_2G(freq);
+ if (AR_SREV_9485(ah))
+ channelSel = CHANSEL_2G_9485(freq);
+ else
+ channelSel = CHANSEL_2G(freq);
/* Set to 2G mode */
bMode = 1;
} else {
@@ -128,24 +131,53 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
struct ath9k_channel *chan)
{
- u32 spur_freq[4] = { 2420, 2440, 2464, 2480 };
+ static const u32 spur_freq[4] = { 2420, 2440, 2464, 2480 };
int cur_bb_spur, negative = 0, cck_spur_freq;
int i;
+ int range, max_spur_cnts, synth_freq;
+ u8 *spur_fbin_ptr = NULL;
/*
* Need to verify range +/- 10 MHz in control channel, otherwise spur
* is out-of-band and can be ignored.
*/
- for (i = 0; i < 4; i++) {
+ if (AR_SREV_9485(ah)) {
+ spur_fbin_ptr = ar9003_get_spur_chan_ptr(ah,
+ IS_CHAN_2GHZ(chan));
+ if (spur_fbin_ptr[0] == 0) /* No spur */
+ return;
+ max_spur_cnts = 5;
+ if (IS_CHAN_HT40(chan)) {
+ range = 19;
+ if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
+ AR_PHY_GC_DYN2040_PRI_CH) == 0)
+ synth_freq = chan->channel + 10;
+ else
+ synth_freq = chan->channel - 10;
+ } else {
+ range = 10;
+ synth_freq = chan->channel;
+ }
+ } else {
+ range = 10;
+ max_spur_cnts = 4;
+ synth_freq = chan->channel;
+ }
+
+ for (i = 0; i < max_spur_cnts; i++) {
negative = 0;
- cur_bb_spur = spur_freq[i] - chan->channel;
+ if (AR_SREV_9485(ah))
+ cur_bb_spur = FBIN2FREQ(spur_fbin_ptr[i],
+ IS_CHAN_2GHZ(chan)) - synth_freq;
+ else
+ cur_bb_spur = spur_freq[i] - synth_freq;
if (cur_bb_spur < 0) {
negative = 1;
cur_bb_spur = -cur_bb_spur;
}
- if (cur_bb_spur < 10) {
+ if (cur_bb_spur < range) {
cck_spur_freq = (int)((cur_bb_spur << 19) / 11);
if (negative == 1)
@@ -487,7 +519,11 @@ void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
break;
}
- REG_WRITE(ah, AR_SELFGEN_MASK, tx);
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7))
+ REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
+ else
+ REG_WRITE(ah, AR_SELFGEN_MASK, tx);
+
if (tx == 0x5) {
REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
AR_PHY_SWAP_ALT_CHAIN);
@@ -542,10 +578,7 @@ static void ar9003_hw_prog_ini(struct ath_hw *ah,
u32 reg = INI_RA(iniArr, i, 0);
u32 val = INI_RA(iniArr, i, column);
- if (reg >= 0x16000 && reg < 0x17000)
- ath9k_hw_analog_shift_regwrite(ah, reg, val);
- else
- REG_WRITE(ah, reg, val);
+ REG_WRITE(ah, reg, val);
DO_DELAY(regWrites);
}
@@ -614,7 +647,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
channel->max_antenna_gain * 2,
channel->max_power * 2,
min((u32) MAX_RATE_POWER,
- (u32) regulatory->power_limit));
+ (u32) regulatory->power_limit), false);
return 0;
}
@@ -712,28 +745,6 @@ static void ar9003_hw_rfbus_done(struct ath_hw *ah)
REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
}
-/*
- * Set the interrupt and GPIO values so the ISR can disable RF
- * on a switch signal. Assumes GPIO port and interrupt polarity
- * are set prior to call.
- */
-static void ar9003_hw_enable_rfkill(struct ath_hw *ah)
-{
- /* Connect rfsilent_bb_l to baseband */
- REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
- AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
- /* Set input mux for rfsilent_bb_l to GPIO #0 */
- REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
- AR_GPIO_INPUT_MUX2_RFSILENT);
-
- /*
- * Configure the desired GPIO port for input and
- * enable baseband rf silence.
- */
- ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
- REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
-}
-
static void ar9003_hw_set_diversity(struct ath_hw *ah, bool value)
{
u32 v = REG_READ(ah, AR_PHY_CCK_DETECT);
@@ -820,12 +831,12 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
if (!on != aniState->ofdmWeakSigDetectOff) {
- ath_print(common, ATH_DBG_ANI,
- "** ch %d: ofdm weak signal: %s=>%s\n",
- chan->channel,
- !aniState->ofdmWeakSigDetectOff ?
- "on" : "off",
- on ? "on" : "off");
+ ath_dbg(common, ATH_DBG_ANI,
+ "** ch %d: ofdm weak signal: %s=>%s\n",
+ chan->channel,
+ !aniState->ofdmWeakSigDetectOff ?
+ "on" : "off",
+ on ? "on" : "off");
if (on)
ah->stats.ast_ani_ofdmon++;
else
@@ -838,11 +849,9 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(firstep_table)) {
- ath_print(common, ATH_DBG_ANI,
- "ATH9K_ANI_FIRSTEP_LEVEL: level "
- "out of range (%u > %u)\n",
- level,
- (unsigned) ARRAY_SIZE(firstep_table));
+ ath_dbg(common, ATH_DBG_ANI,
+ "ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n",
+ level, ARRAY_SIZE(firstep_table));
return false;
}
@@ -877,24 +886,22 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW, value2);
if (level != aniState->firstepLevel) {
- ath_print(common, ATH_DBG_ANI,
- "** ch %d: level %d=>%d[def:%d] "
- "firstep[level]=%d ini=%d\n",
- chan->channel,
- aniState->firstepLevel,
- level,
- ATH9K_ANI_FIRSTEP_LVL_NEW,
- value,
- aniState->iniDef.firstep);
- ath_print(common, ATH_DBG_ANI,
- "** ch %d: level %d=>%d[def:%d] "
- "firstep_low[level]=%d ini=%d\n",
- chan->channel,
- aniState->firstepLevel,
- level,
- ATH9K_ANI_FIRSTEP_LVL_NEW,
- value2,
- aniState->iniDef.firstepLow);
+ ath_dbg(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] firstep[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->firstepLevel,
+ level,
+ ATH9K_ANI_FIRSTEP_LVL_NEW,
+ value,
+ aniState->iniDef.firstep);
+ ath_dbg(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] firstep_low[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->firstepLevel,
+ level,
+ ATH9K_ANI_FIRSTEP_LVL_NEW,
+ value2,
+ aniState->iniDef.firstepLow);
if (level > aniState->firstepLevel)
ah->stats.ast_ani_stepup++;
else if (level < aniState->firstepLevel)
@@ -907,11 +914,9 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
- ath_print(common, ATH_DBG_ANI,
- "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level "
- "out of range (%u > %u)\n",
- level,
- (unsigned) ARRAY_SIZE(cycpwrThr1_table));
+ ath_dbg(common, ATH_DBG_ANI,
+ "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n",
+ level, ARRAY_SIZE(cycpwrThr1_table));
return false;
}
/*
@@ -945,24 +950,22 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
AR_PHY_EXT_CYCPWR_THR1, value2);
if (level != aniState->spurImmunityLevel) {
- ath_print(common, ATH_DBG_ANI,
- "** ch %d: level %d=>%d[def:%d] "
- "cycpwrThr1[level]=%d ini=%d\n",
- chan->channel,
- aniState->spurImmunityLevel,
- level,
- ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
- value,
- aniState->iniDef.cycpwrThr1);
- ath_print(common, ATH_DBG_ANI,
- "** ch %d: level %d=>%d[def:%d] "
- "cycpwrThr1Ext[level]=%d ini=%d\n",
- chan->channel,
- aniState->spurImmunityLevel,
- level,
- ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
- value2,
- aniState->iniDef.cycpwrThr1Ext);
+ ath_dbg(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] cycpwrThr1[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->spurImmunityLevel,
+ level,
+ ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+ value,
+ aniState->iniDef.cycpwrThr1);
+ ath_dbg(common, ATH_DBG_ANI,
+ "** ch %d: level %d=>%d[def:%d] cycpwrThr1Ext[level]=%d ini=%d\n",
+ chan->channel,
+ aniState->spurImmunityLevel,
+ level,
+ ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+ value2,
+ aniState->iniDef.cycpwrThr1Ext);
if (level > aniState->spurImmunityLevel)
ah->stats.ast_ani_spurup++;
else if (level < aniState->spurImmunityLevel)
@@ -982,11 +985,11 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
AR_PHY_MRC_CCK_MUX_REG, is_on);
if (!is_on != aniState->mrcCCKOff) {
- ath_print(common, ATH_DBG_ANI,
- "** ch %d: MRC CCK: %s=>%s\n",
- chan->channel,
- !aniState->mrcCCKOff ? "on" : "off",
- is_on ? "on" : "off");
+ ath_dbg(common, ATH_DBG_ANI,
+ "** ch %d: MRC CCK: %s=>%s\n",
+ chan->channel,
+ !aniState->mrcCCKOff ? "on" : "off",
+ is_on ? "on" : "off");
if (is_on)
ah->stats.ast_ani_ccklow++;
else
@@ -998,50 +1001,48 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
case ATH9K_ANI_PRESENT:
break;
default:
- ath_print(common, ATH_DBG_ANI,
- "invalid cmd %u\n", cmd);
+ ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd);
return false;
}
- ath_print(common, ATH_DBG_ANI,
- "ANI parameters: SI=%d, ofdmWS=%s FS=%d "
- "MRCcck=%s listenTime=%d "
- "ofdmErrs=%d cckErrs=%d\n",
- aniState->spurImmunityLevel,
- !aniState->ofdmWeakSigDetectOff ? "on" : "off",
- aniState->firstepLevel,
- !aniState->mrcCCKOff ? "on" : "off",
- aniState->listenTime,
- aniState->ofdmPhyErrCount,
- aniState->cckPhyErrCount);
+ ath_dbg(common, ATH_DBG_ANI,
+ "ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
+ aniState->spurImmunityLevel,
+ !aniState->ofdmWeakSigDetectOff ? "on" : "off",
+ aniState->firstepLevel,
+ !aniState->mrcCCKOff ? "on" : "off",
+ aniState->listenTime,
+ aniState->ofdmPhyErrCount,
+ aniState->cckPhyErrCount);
return true;
}
static void ar9003_hw_do_getnf(struct ath_hw *ah,
int16_t nfarray[NUM_NF_READINGS])
{
- int16_t nf;
-
- nf = MS(REG_READ(ah, AR_PHY_CCA_0), AR_PHY_MINCCA_PWR);
- nfarray[0] = sign_extend(nf, 9);
+#define AR_PHY_CH_MINCCA_PWR 0x1FF00000
+#define AR_PHY_CH_MINCCA_PWR_S 20
+#define AR_PHY_CH_EXT_MINCCA_PWR 0x01FF0000
+#define AR_PHY_CH_EXT_MINCCA_PWR_S 16
- nf = MS(REG_READ(ah, AR_PHY_CCA_1), AR_PHY_CH1_MINCCA_PWR);
- nfarray[1] = sign_extend(nf, 9);
-
- nf = MS(REG_READ(ah, AR_PHY_CCA_2), AR_PHY_CH2_MINCCA_PWR);
- nfarray[2] = sign_extend(nf, 9);
-
- if (!IS_CHAN_HT40(ah->curchan))
- return;
+ int16_t nf;
+ int i;
- nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
- nfarray[3] = sign_extend(nf, 9);
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (ah->rxchainmask & BIT(i)) {
+ nf = MS(REG_READ(ah, ah->nf_regs[i]),
+ AR_PHY_CH_MINCCA_PWR);
+ nfarray[i] = sign_extend32(nf, 8);
- nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_1), AR_PHY_CH1_EXT_MINCCA_PWR);
- nfarray[4] = sign_extend(nf, 9);
+ if (IS_CHAN_HT40(ah->curchan)) {
+ u8 ext_idx = AR9300_MAX_CHAINS + i;
- nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_2), AR_PHY_CH2_EXT_MINCCA_PWR);
- nfarray[5] = sign_extend(nf, 9);
+ nf = MS(REG_READ(ah, ah->nf_regs[ext_idx]),
+ AR_PHY_CH_EXT_MINCCA_PWR);
+ nfarray[ext_idx] = sign_extend32(nf, 8);
+ }
+ }
+ }
}
static void ar9003_hw_set_nf_limits(struct ath_hw *ah)
@@ -1070,13 +1071,13 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
aniState = &ah->curchan->ani;
iniDef = &aniState->iniDef;
- ath_print(common, ATH_DBG_ANI,
- "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
- ah->hw_version.macVersion,
- ah->hw_version.macRev,
- ah->opmode,
- chan->channel,
- chan->channelFlags);
+ ath_dbg(common, ATH_DBG_ANI,
+ "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+ ah->hw_version.macVersion,
+ ah->hw_version.macRev,
+ ah->opmode,
+ chan->channel,
+ chan->channelFlags);
val = REG_READ(ah, AR_PHY_SFCORR);
iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
@@ -1113,10 +1114,55 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
aniState->mrcCCKOff = !ATH9K_ANI_ENABLE_MRC_CCK;
}
+static void ar9003_hw_set_radar_params(struct ath_hw *ah,
+ struct ath_hw_radar_conf *conf)
+{
+ u32 radar_0 = 0, radar_1 = 0;
+
+ if (!conf) {
+ REG_CLR_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_ENA);
+ return;
+ }
+
+ radar_0 |= AR_PHY_RADAR_0_ENA | AR_PHY_RADAR_0_FFT_ENA;
+ radar_0 |= SM(conf->fir_power, AR_PHY_RADAR_0_FIRPWR);
+ radar_0 |= SM(conf->radar_rssi, AR_PHY_RADAR_0_RRSSI);
+ radar_0 |= SM(conf->pulse_height, AR_PHY_RADAR_0_HEIGHT);
+ radar_0 |= SM(conf->pulse_rssi, AR_PHY_RADAR_0_PRSSI);
+ radar_0 |= SM(conf->pulse_inband, AR_PHY_RADAR_0_INBAND);
+
+ radar_1 |= AR_PHY_RADAR_1_MAX_RRSSI;
+ radar_1 |= AR_PHY_RADAR_1_BLOCK_CHECK;
+ radar_1 |= SM(conf->pulse_maxlen, AR_PHY_RADAR_1_MAXLEN);
+ radar_1 |= SM(conf->pulse_inband_step, AR_PHY_RADAR_1_RELSTEP_THRESH);
+ radar_1 |= SM(conf->radar_inband, AR_PHY_RADAR_1_RELPWR_THRESH);
+
+ REG_WRITE(ah, AR_PHY_RADAR_0, radar_0);
+ REG_WRITE(ah, AR_PHY_RADAR_1, radar_1);
+ if (conf->ext_channel)
+ REG_SET_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
+ else
+ REG_CLR_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
+}
+
+static void ar9003_hw_set_radar_conf(struct ath_hw *ah)
+{
+ struct ath_hw_radar_conf *conf = &ah->radar_conf;
+
+ conf->fir_power = -28;
+ conf->radar_rssi = 0;
+ conf->pulse_height = 10;
+ conf->pulse_rssi = 24;
+ conf->pulse_inband = 8;
+ conf->pulse_maxlen = 255;
+ conf->pulse_inband_step = 12;
+ conf->radar_inband = 8;
+}
+
void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
- const u32 ar9300_cca_regs[6] = {
+ static const u32 ar9300_cca_regs[6] = {
AR_PHY_CCA_0,
AR_PHY_CCA_1,
AR_PHY_CCA_2,
@@ -1136,13 +1182,14 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
priv_ops->set_delta_slope = ar9003_hw_set_delta_slope;
priv_ops->rfbus_req = ar9003_hw_rfbus_req;
priv_ops->rfbus_done = ar9003_hw_rfbus_done;
- priv_ops->enable_rfkill = ar9003_hw_enable_rfkill;
priv_ops->set_diversity = ar9003_hw_set_diversity;
priv_ops->ani_control = ar9003_hw_ani_control;
priv_ops->do_getnf = ar9003_hw_do_getnf;
priv_ops->ani_cache_ini_regs = ar9003_hw_ani_cache_ini_regs;
+ priv_ops->set_radar_params = ar9003_hw_set_radar_params;
ar9003_hw_set_nf_limits(ah);
+ ar9003_hw_set_radar_conf(ah);
memcpy(ah->nf_regs, ar9300_cca_regs, sizeof(ah->nf_regs));
}
@@ -1165,7 +1212,7 @@ void ar9003_hw_bb_watchdog_config(struct ath_hw *ah)
~(AR_PHY_WATCHDOG_NON_IDLE_ENABLE |
AR_PHY_WATCHDOG_IDLE_ENABLE));
- ath_print(common, ATH_DBG_RESET, "Disabled BB Watchdog\n");
+ ath_dbg(common, ATH_DBG_RESET, "Disabled BB Watchdog\n");
return;
}
@@ -1201,9 +1248,9 @@ void ar9003_hw_bb_watchdog_config(struct ath_hw *ah)
AR_PHY_WATCHDOG_IDLE_MASK |
(AR_PHY_WATCHDOG_NON_IDLE_MASK & (idle_count << 2)));
- ath_print(common, ATH_DBG_RESET,
- "Enabled BB Watchdog timeout (%u ms)\n",
- idle_tmo_ms);
+ ath_dbg(common, ATH_DBG_RESET,
+ "Enabled BB Watchdog timeout (%u ms)\n",
+ idle_tmo_ms);
}
void ar9003_hw_bb_watchdog_read(struct ath_hw *ah)
@@ -1231,37 +1278,35 @@ void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah)
return;
status = ah->bb_watchdog_last_status;
- ath_print(common, ATH_DBG_RESET,
- "\n==== BB update: BB status=0x%08x ====\n", status);
- ath_print(common, ATH_DBG_RESET,
- "** BB state: wd=%u det=%u rdar=%u rOFDM=%d "
- "rCCK=%u tOFDM=%u tCCK=%u agc=%u src=%u **\n",
- MS(status, AR_PHY_WATCHDOG_INFO),
- MS(status, AR_PHY_WATCHDOG_DET_HANG),
- MS(status, AR_PHY_WATCHDOG_RADAR_SM),
- MS(status, AR_PHY_WATCHDOG_RX_OFDM_SM),
- MS(status, AR_PHY_WATCHDOG_RX_CCK_SM),
- MS(status, AR_PHY_WATCHDOG_TX_OFDM_SM),
- MS(status, AR_PHY_WATCHDOG_TX_CCK_SM),
- MS(status, AR_PHY_WATCHDOG_AGC_SM),
- MS(status,AR_PHY_WATCHDOG_SRCH_SM));
-
- ath_print(common, ATH_DBG_RESET,
- "** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n",
- REG_READ(ah, AR_PHY_WATCHDOG_CTL_1),
- REG_READ(ah, AR_PHY_WATCHDOG_CTL_2));
- ath_print(common, ATH_DBG_RESET,
- "** BB mode: BB_gen_controls=0x%08x **\n",
- REG_READ(ah, AR_PHY_GEN_CTRL));
+ ath_dbg(common, ATH_DBG_RESET,
+ "\n==== BB update: BB status=0x%08x ====\n", status);
+ ath_dbg(common, ATH_DBG_RESET,
+ "** BB state: wd=%u det=%u rdar=%u rOFDM=%d rCCK=%u tOFDM=%u tCCK=%u agc=%u src=%u **\n",
+ MS(status, AR_PHY_WATCHDOG_INFO),
+ MS(status, AR_PHY_WATCHDOG_DET_HANG),
+ MS(status, AR_PHY_WATCHDOG_RADAR_SM),
+ MS(status, AR_PHY_WATCHDOG_RX_OFDM_SM),
+ MS(status, AR_PHY_WATCHDOG_RX_CCK_SM),
+ MS(status, AR_PHY_WATCHDOG_TX_OFDM_SM),
+ MS(status, AR_PHY_WATCHDOG_TX_CCK_SM),
+ MS(status, AR_PHY_WATCHDOG_AGC_SM),
+ MS(status, AR_PHY_WATCHDOG_SRCH_SM));
+
+ ath_dbg(common, ATH_DBG_RESET,
+ "** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n",
+ REG_READ(ah, AR_PHY_WATCHDOG_CTL_1),
+ REG_READ(ah, AR_PHY_WATCHDOG_CTL_2));
+ ath_dbg(common, ATH_DBG_RESET,
+ "** BB mode: BB_gen_controls=0x%08x **\n",
+ REG_READ(ah, AR_PHY_GEN_CTRL));
#define PCT(_field) (common->cc_survey._field * 100 / common->cc_survey.cycles)
if (common->cc_survey.cycles)
- ath_print(common, ATH_DBG_RESET,
- "** BB busy times: rx_clear=%d%%, "
- "rx_frame=%d%%, tx_frame=%d%% **\n",
- PCT(rx_busy), PCT(rx_frame), PCT(tx_frame));
+ ath_dbg(common, ATH_DBG_RESET,
+ "** BB busy times: rx_clear=%d%%, rx_frame=%d%%, tx_frame=%d%% **\n",
+ PCT(rx_busy), PCT(rx_frame), PCT(tx_frame));
- ath_print(common, ATH_DBG_RESET,
- "==== BB update: done ====\n\n");
+ ath_dbg(common, ATH_DBG_RESET,
+ "==== BB update: done ====\n\n");
}
EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 3394dfe52b42..8bdda2cf9dd7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -260,7 +260,13 @@
#define AR_PHY_CCA_0 (AR_AGC_BASE + 0x1c)
#define AR_PHY_EXT_CCA0 (AR_AGC_BASE + 0x20)
#define AR_PHY_RESTART (AR_AGC_BASE + 0x24)
+
#define AR_PHY_MC_GAIN_CTRL (AR_AGC_BASE + 0x28)
+#define AR_ANT_DIV_CTRL_ALL 0x7e000000
+#define AR_ANT_DIV_CTRL_ALL_S 25
+#define AR_ANT_DIV_ENABLE 0x1000000
+#define AR_ANT_DIV_ENABLE_S 24
+
#define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c)
#define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30)
#define AR_PHY_20_40_DET_THR (AR_AGC_BASE + 0x34)
@@ -271,7 +277,11 @@
#define AR_PHY_RX_GAIN_BOUNDS_2 (AR_AGC_BASE + 0x48)
#define AR_PHY_RSSI_0 (AR_AGC_BASE + 0x180)
#define AR_PHY_SPUR_CCK_REP0 (AR_AGC_BASE + 0x184)
+
#define AR_PHY_CCK_DETECT (AR_AGC_BASE + 0x1c0)
+#define AR_FAST_DIV_ENABLE 0x2000
+#define AR_FAST_DIV_ENABLE_S 13
+
#define AR_PHY_DAG_CTRLCCK (AR_AGC_BASE + 0x1c4)
#define AR_PHY_IQCORR_CTRL_CCK (AR_AGC_BASE + 0x1c8)
@@ -476,6 +486,8 @@
#define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac)
#define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0)
+#define AR_PHY_POWER_TX_RATE(_d) (AR_SM_BASE + 0x1c0 + ((_d) << 2))
+
#define AR_PHY_PWRTX_MAX (AR_SM_BASE + 0x1f0)
#define AR_PHY_POWER_TX_SUB (AR_SM_BASE + 0x1f4)
@@ -536,10 +548,18 @@
#define AR_PHY_TXGAIN_TABLE (AR_SM_BASE + 0x300)
+#define AR_PHY_TX_IQCAL_START_9485 (AR_SM_BASE + 0x3c4)
+#define AR_PHY_TX_IQCAL_START_DO_CAL_9485 0x80000000
+#define AR_PHY_TX_IQCAL_START_DO_CAL_9485_S 31
+#define AR_PHY_TX_IQCAL_CONTROL_1_9485 (AR_SM_BASE + 0x3c8)
+#define AR_PHY_TX_IQCAL_STATUS_B0_9485 (AR_SM_BASE + 0x3f0)
+
#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + 0x448)
#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + 0x440)
#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + 0x48c)
-#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B0 (AR_SM_BASE + 0x450)
+#define AR_PHY_TX_IQCAL_CORR_COEFF_B0(_i) (AR_SM_BASE + \
+ (AR_SREV_9485(ah) ? \
+ 0x3d0 : 0x450) + ((_i) << 2))
#define AR_PHY_WATCHDOG_STATUS (AR_SM_BASE + 0x5c0)
#define AR_PHY_WATCHDOG_CTL_1 (AR_SM_BASE + 0x5c4)
@@ -568,7 +588,7 @@
#define AR_PHY_65NM_CH0_BIAS2 0x160c4
#define AR_PHY_65NM_CH0_BIAS4 0x160cc
#define AR_PHY_65NM_CH0_RXTX4 0x1610c
-#define AR_PHY_65NM_CH0_THERM 0x16290
+#define AR_PHY_65NM_CH0_THERM (AR_SREV_9485(ah) ? 0x1628c : 0x16290)
#define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000
#define AR_PHY_65NM_CH0_THERM_LOCAL_S 31
@@ -584,6 +604,24 @@
#define AR_PHY_65NM_CH2_RXTX1 0x16900
#define AR_PHY_65NM_CH2_RXTX2 0x16904
+#define AR_CH0_TOP2 (AR_SREV_9485(ah) ? 0x00016284 : 0x0001628c)
+#define AR_CH0_TOP2_XPABIASLVL 0xf000
+#define AR_CH0_TOP2_XPABIASLVL_S 12
+
+#define AR_CH0_XTAL (AR_SREV_9485(ah) ? 0x16290 : 0x16294)
+#define AR_CH0_XTAL_CAPINDAC 0x7f000000
+#define AR_CH0_XTAL_CAPINDAC_S 24
+#define AR_CH0_XTAL_CAPOUTDAC 0x00fe0000
+#define AR_CH0_XTAL_CAPOUTDAC_S 17
+
+#define AR_PHY_PMU1 0x16c40
+#define AR_PHY_PMU1_PWD 0x1
+#define AR_PHY_PMU1_PWD_S 0
+
+#define AR_PHY_PMU2 0x16c44
+#define AR_PHY_PMU2_PGM 0x00200000
+#define AR_PHY_PMU2_PGM_S 21
+
#define AR_PHY_RX1DB_BIQUAD_LONG_SHIFT 0x00380000
#define AR_PHY_RX1DB_BIQUAD_LONG_SHIFT_S 19
#define AR_PHY_RX6DB_BIQUAD_LONG_SHIFT 0x00c00000
@@ -683,6 +721,7 @@
#define AR_PHY_TPCGR1_FORCED_DAC_GAIN_S 1
#define AR_PHY_TPCGR1_FORCE_DAC_GAIN 0x00000001
#define AR_PHY_TXGAIN_FORCE 0x00000001
+#define AR_PHY_TXGAIN_FORCE_S 0
#define AR_PHY_TXGAIN_FORCED_PADVGNRA 0x00003c00
#define AR_PHY_TXGAIN_FORCED_PADVGNRA_S 10
#define AR_PHY_TXGAIN_FORCED_PADVGNRB 0x0003c000
@@ -725,8 +764,13 @@
#define AR_PHY_TX_IQCAL_START_DO_CAL_S 0
#define AR_PHY_TX_IQCAL_STATUS_FAILED 0x00000001
-#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE 0x00003fff
-#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE_S 0
+#define AR_PHY_CALIBRATED_GAINS_0 0x3e
+#define AR_PHY_CALIBRATED_GAINS_0_S 1
+
+#define AR_PHY_TX_IQCAL_CORR_COEFF_00_COEFF_TABLE 0x00003fff
+#define AR_PHY_TX_IQCAL_CORR_COEFF_00_COEFF_TABLE_S 0
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE 0x0fffc000
+#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE_S 14
#define AR_PHY_65NM_CH0_RXTX4_THERM_ON 0x10000000
#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S 28
@@ -785,7 +829,7 @@
#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + 0x240)
#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c)
-#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B1 (AR_SM1_BASE + 0x450)
+#define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM_BASE + 0x450 + ((_i) << 2))
/*
* Channel 2 Register Map
@@ -838,7 +882,7 @@
#define AR_PHY_TPC_11_B2 (AR_SM2_BASE + 0x220)
#define AR_PHY_PDADC_TAB_2 (AR_SM2_BASE + 0x240)
#define AR_PHY_TX_IQCAL_STATUS_B2 (AR_SM2_BASE + 0x48c)
-#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B2 (AR_SM2_BASE + 0x450)
+#define AR_PHY_TX_IQCAL_CORR_COEFF_B2(_i) (AR_SM2_BASE + 0x450 + ((_i) << 2))
#define AR_PHY_TX_IQCAL_STATUS_B2_FAILED 0x00000001
@@ -945,7 +989,9 @@
#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT 0x0ffe0000
#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT_S 17
-#define AR_PHY_PAPRD_TRAINER_CNTL1 (AR_SM_BASE + 0x490)
+#define AR_PHY_PAPRD_TRAINER_CNTL1 (AR_SM_BASE + \
+ (AR_SREV_9485(ah) ? \
+ 0x580 : 0x490))
#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE 0x00000001
#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE_S 0
#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING 0x0000007e
@@ -961,11 +1007,15 @@
#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP 0x0003f000
#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP_S 12
-#define AR_PHY_PAPRD_TRAINER_CNTL2 (AR_SM_BASE + 0x494)
+#define AR_PHY_PAPRD_TRAINER_CNTL2 (AR_SM_BASE + \
+ (AR_SREV_9485(ah) ? \
+ 0x584 : 0x494))
#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN 0xFFFFFFFF
#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN_S 0
-#define AR_PHY_PAPRD_TRAINER_CNTL3 (AR_SM_BASE + 0x498)
+#define AR_PHY_PAPRD_TRAINER_CNTL3 (AR_SM_BASE + \
+ (AR_SREV_9485(ah) ? \
+ 0x588 : 0x498))
#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE 0x0000003f
#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE_S 0
#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP 0x00000fc0
@@ -981,7 +1031,9 @@
#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE 0x20000000
#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE_S 29
-#define AR_PHY_PAPRD_TRAINER_CNTL4 (AR_SM_BASE + 0x49c)
+#define AR_PHY_PAPRD_TRAINER_CNTL4 (AR_SM_BASE + \
+ (AR_SREV_9485(ah) ? \
+ 0x58c : 0x49c))
#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES 0x03ff0000
#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES_S 16
#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA 0x0000f000
@@ -1040,6 +1092,14 @@
#define AR_PHY_POWERTX_RATE5_POWERTXHT20_0 0x3F
#define AR_PHY_POWERTX_RATE5_POWERTXHT20_0_S 0
+#define AR_PHY_POWERTX_RATE6 (AR_SM_BASE + 0x1d4)
+#define AR_PHY_POWERTX_RATE6_POWERTXHT20_5 0x3F00
+#define AR_PHY_POWERTX_RATE6_POWERTXHT20_5_S 8
+
+#define AR_PHY_POWERTX_RATE8 (AR_SM_BASE + 0x1dc)
+#define AR_PHY_POWERTX_RATE8_POWERTXHT40_5 0x3F00
+#define AR_PHY_POWERTX_RATE8_POWERTXHT40_5_S 8
+
void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
#endif /* AR9003_PHY_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
new file mode 100644
index 000000000000..71cc0a3a29fb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -0,0 +1,2086 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9485_H
+#define INITVALS_9485_H
+
+static const u32 ar9485Common_1_0[][2] = {
+ /* Addr allmodes */
+ {0x00007010, 0x00000022},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+};
+
+static const u32 ar9485_1_0_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x10212e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000580c},
+};
+
+static const u32 ar9485Common_wo_xlna_rx_gain_1_0[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x01800082},
+ {0x0000a014, 0x01820181},
+ {0x0000a018, 0x01840183},
+ {0x0000a01c, 0x01880185},
+ {0x0000a020, 0x018a0189},
+ {0x0000a024, 0x02850284},
+ {0x0000a028, 0x02890288},
+ {0x0000a02c, 0x03850384},
+ {0x0000a030, 0x03890388},
+ {0x0000a034, 0x038b038a},
+ {0x0000a038, 0x038d038c},
+ {0x0000a03c, 0x03910390},
+ {0x0000a040, 0x03930392},
+ {0x0000a044, 0x03950394},
+ {0x0000a048, 0x00000396},
+ {0x0000a04c, 0x00000000},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x28282828},
+ {0x0000a084, 0x28282828},
+ {0x0000a088, 0x28282828},
+ {0x0000a08c, 0x28282828},
+ {0x0000a090, 0x28282828},
+ {0x0000a094, 0x21212128},
+ {0x0000a098, 0x171c1c1c},
+ {0x0000a09c, 0x02020212},
+ {0x0000a0a0, 0x00000202},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x111f1100},
+ {0x0000a0c8, 0x111d111e},
+ {0x0000a0cc, 0x111b111c},
+ {0x0000a0d0, 0x22032204},
+ {0x0000a0d4, 0x22012202},
+ {0x0000a0d8, 0x221f2200},
+ {0x0000a0dc, 0x221d221e},
+ {0x0000a0e0, 0x33013302},
+ {0x0000a0e4, 0x331f3300},
+ {0x0000a0e8, 0x4402331e},
+ {0x0000a0ec, 0x44004401},
+ {0x0000a0f0, 0x441e441f},
+ {0x0000a0f4, 0x55015502},
+ {0x0000a0f8, 0x551f5500},
+ {0x0000a0fc, 0x6602551e},
+ {0x0000a100, 0x66006601},
+ {0x0000a104, 0x661e661f},
+ {0x0000a108, 0x7703661d},
+ {0x0000a10c, 0x77017702},
+ {0x0000a110, 0x00007700},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x111f1100},
+ {0x0000a148, 0x111d111e},
+ {0x0000a14c, 0x111b111c},
+ {0x0000a150, 0x22032204},
+ {0x0000a154, 0x22012202},
+ {0x0000a158, 0x221f2200},
+ {0x0000a15c, 0x221d221e},
+ {0x0000a160, 0x33013302},
+ {0x0000a164, 0x331f3300},
+ {0x0000a168, 0x4402331e},
+ {0x0000a16c, 0x44004401},
+ {0x0000a170, 0x441e441f},
+ {0x0000a174, 0x55015502},
+ {0x0000a178, 0x551f5500},
+ {0x0000a17c, 0x6602551e},
+ {0x0000a180, 0x66006601},
+ {0x0000a184, 0x661e661f},
+ {0x0000a188, 0x7703661d},
+ {0x0000a18c, 0x77017702},
+ {0x0000a190, 0x00007700},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000296},
+};
+
+static const u32 ar9485Modes_high_power_tx_gain_1_0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x2e000a20, 0x2e000a20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x34000e20, 0x34000e20},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000e22, 0x38000e22},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3c000e24, 0x3c000e24},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x40000e26, 0x40000e26},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x43001640, 0x43001640},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46001660, 0x46001660},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x49001861, 0x49001861},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4c001a81, 0x4c001a81},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4f001a83, 0x4f001a83},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x54001c85, 0x54001c85},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x58001ce5, 0x58001ce5},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5b001ce9, 0x5b001ce9},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x60001eeb, 0x60001eeb},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x00016044, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db},
+};
+
+static const u32 ar9485_1_0[][2] = {
+ /* Addr allmodes */
+ {0x0000a580, 0x00000000},
+ {0x0000a584, 0x00000000},
+ {0x0000a588, 0x00000000},
+ {0x0000a58c, 0x00000000},
+ {0x0000a590, 0x00000000},
+ {0x0000a594, 0x00000000},
+ {0x0000a598, 0x00000000},
+ {0x0000a59c, 0x00000000},
+ {0x0000a5a0, 0x00000000},
+ {0x0000a5a4, 0x00000000},
+ {0x0000a5a8, 0x00000000},
+ {0x0000a5ac, 0x00000000},
+ {0x0000a5b0, 0x00000000},
+ {0x0000a5b4, 0x00000000},
+ {0x0000a5b8, 0x00000000},
+ {0x0000a5bc, 0x00000000},
+};
+
+static const u32 ar9485_1_0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73800000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x7f80fff8},
+ {0x00016048, 0x6c92426e},
+ {0x0001604c, 0x000f0278},
+ {0x00016050, 0x6db6db6c},
+ {0x00016054, 0x6db60000},
+ {0x00016080, 0x00080000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x14214514},
+ {0x0001608c, 0x119f081e},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd28b3330},
+ {0x000160a0, 0xc2108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92480040},
+ {0x000160c0, 0x006db6db},
+ {0x000160c4, 0x0186db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x6de6fbe0},
+ {0x000160d0, 0xf7dfcf3c},
+ {0x00016100, 0x04cb0001},
+ {0x00016104, 0xfff80015},
+ {0x00016108, 0x00080010},
+ {0x00016144, 0x01884080},
+ {0x00016148, 0x00008040},
+ {0x00016180, 0x08453333},
+ {0x00016184, 0x18e82f01},
+ {0x00016188, 0x00000000},
+ {0x0001618c, 0x00000000},
+ {0x00016240, 0x08400000},
+ {0x00016244, 0x1bf90f00},
+ {0x00016248, 0x00000000},
+ {0x0001624c, 0x00000000},
+ {0x00016280, 0x01000015},
+ {0x00016284, 0x00d30000},
+ {0x00016288, 0x00318000},
+ {0x0001628c, 0x50000000},
+ {0x00016290, 0x4b96210f},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016c40, 0x1319c178},
+ {0x00016c44, 0x10000000},
+};
+
+static const u32 ar9485Modes_lowest_ob_db_tx_gain_1_0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x2e000a20, 0x2e000a20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x34000e20, 0x34000e20},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000e22, 0x38000e22},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3c000e24, 0x3c000e24},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x40000e26, 0x40000e26},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x43001640, 0x43001640},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46001660, 0x46001660},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x49001861, 0x49001861},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4c001a81, 0x4c001a81},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4f001a83, 0x4f001a83},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x54001c85, 0x54001c85},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x58001ce5, 0x58001ce5},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5b001ce9, 0x5b001ce9},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x60001eeb, 0x60001eeb},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x00016044, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db},
+};
+
+static const u32 ar9485_1_0_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a8f6b},
+ {0x0000980c, 0x04800000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x5f3ca3de},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14750600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x52440bbe},
+ {0x000098bc, 0x00000002},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0x00000000},
+ {0x00009c08, 0x03200000},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x1883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c00400},
+ {0x00009d18, 0x00000000},
+ {0x00009d1c, 0x00000000},
+ {0x00009e08, 0x0038233c},
+ {0x00009e24, 0x990bb515},
+ {0x00009e28, 0x0a6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009fc0, 0x80be4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x0000a20c, 0x00000000},
+ {0x0000a210, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2dc, 0x00000000},
+ {0x0000a2e0, 0x00000000},
+ {0x0000a2e4, 0x00000000},
+ {0x0000a2e8, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa},
+ {0x0000a3ac, 0x3c466478},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000006},
+ {0x0000a3f8, 0x0cdbd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d0011ce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00000000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x04000000},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a5c4, 0x3fad9d74},
+ {0x0000a5c8, 0x0048060a},
+ {0x0000a5cc, 0x00000637},
+ {0x0000a760, 0x03020100},
+ {0x0000a764, 0x09080504},
+ {0x0000a768, 0x0d0c0b0a},
+ {0x0000a76c, 0x13121110},
+ {0x0000a770, 0x31301514},
+ {0x0000a774, 0x35343332},
+ {0x0000a778, 0x00000036},
+ {0x0000a780, 0x00000838},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000001},
+};
+
+static const u32 ar9485Modes_high_ob_db_tx_gain_1_0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x2e000a20, 0x2e000a20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x34000e20, 0x34000e20},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000e22, 0x38000e22},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3c000e24, 0x3c000e24},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x40000e26, 0x40000e26},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x43001640, 0x43001640},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46001660, 0x46001660},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x49001861, 0x49001861},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4c001a81, 0x4c001a81},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4f001a83, 0x4f001a83},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x54001c85, 0x54001c85},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x58001ce5, 0x58001ce5},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5b001ce9, 0x5b001ce9},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x60001eeb, 0x60001eeb},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x00016044, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db},
+};
+
+static const u32 ar9485Common_rx_gain_1_0[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x01800082},
+ {0x0000a014, 0x01820181},
+ {0x0000a018, 0x01840183},
+ {0x0000a01c, 0x01880185},
+ {0x0000a020, 0x018a0189},
+ {0x0000a024, 0x02850284},
+ {0x0000a028, 0x02890288},
+ {0x0000a02c, 0x03850384},
+ {0x0000a030, 0x03890388},
+ {0x0000a034, 0x038b038a},
+ {0x0000a038, 0x038d038c},
+ {0x0000a03c, 0x03910390},
+ {0x0000a040, 0x03930392},
+ {0x0000a044, 0x03950394},
+ {0x0000a048, 0x00000396},
+ {0x0000a04c, 0x00000000},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x28282828},
+ {0x0000a084, 0x28282828},
+ {0x0000a088, 0x28282828},
+ {0x0000a08c, 0x28282828},
+ {0x0000a090, 0x28282828},
+ {0x0000a094, 0x21212128},
+ {0x0000a098, 0x171c1c1c},
+ {0x0000a09c, 0x02020212},
+ {0x0000a0a0, 0x00000202},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x111f1100},
+ {0x0000a0c8, 0x111d111e},
+ {0x0000a0cc, 0x111b111c},
+ {0x0000a0d0, 0x22032204},
+ {0x0000a0d4, 0x22012202},
+ {0x0000a0d8, 0x221f2200},
+ {0x0000a0dc, 0x221d221e},
+ {0x0000a0e0, 0x33013302},
+ {0x0000a0e4, 0x331f3300},
+ {0x0000a0e8, 0x4402331e},
+ {0x0000a0ec, 0x44004401},
+ {0x0000a0f0, 0x441e441f},
+ {0x0000a0f4, 0x55015502},
+ {0x0000a0f8, 0x551f5500},
+ {0x0000a0fc, 0x6602551e},
+ {0x0000a100, 0x66006601},
+ {0x0000a104, 0x661e661f},
+ {0x0000a108, 0x7703661d},
+ {0x0000a10c, 0x77017702},
+ {0x0000a110, 0x00007700},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x111f1100},
+ {0x0000a148, 0x111d111e},
+ {0x0000a14c, 0x111b111c},
+ {0x0000a150, 0x22032204},
+ {0x0000a154, 0x22012202},
+ {0x0000a158, 0x221f2200},
+ {0x0000a15c, 0x221d221e},
+ {0x0000a160, 0x33013302},
+ {0x0000a164, 0x331f3300},
+ {0x0000a168, 0x4402331e},
+ {0x0000a16c, 0x44004401},
+ {0x0000a170, 0x441e441f},
+ {0x0000a174, 0x55015502},
+ {0x0000a178, 0x551f5500},
+ {0x0000a17c, 0x6602551e},
+ {0x0000a180, 0x66006601},
+ {0x0000a184, 0x661e661f},
+ {0x0000a188, 0x7703661d},
+ {0x0000a18c, 0x77017702},
+ {0x0000a190, 0x00007700},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000296},
+};
+
+static const u32 ar9485_1_0_pcie_phy_pll_on_clkreq_enable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x10252e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000580c},
+};
+
+static const u32 ar9485_1_0_pcie_phy_clkreq_enable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x10253e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000580c},
+};
+
+static const u32 ar9485_1_0_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x00004090, 0x00aa10aa},
+ {0x000040a4, 0x00a0c9c9},
+ {0x00007048, 0x00000004},
+};
+
+static const u32 ar9485_fast_clock_1_0_baseband_postamble[][3] = {
+ /* Addr 5G_HT20 5G_HT40 */
+ {0x00009e00, 0x03721821, 0x03721821},
+ {0x0000a230, 0x0000400b, 0x00004016},
+ {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9485_1_0_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
+ {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
+ {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
+ {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
+ {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+ {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
+ {0x0000a234, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
+ {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000be04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+ {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9485Modes_low_ob_db_tx_gain_1_0[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x2e000a20, 0x2e000a20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x34000e20, 0x34000e20},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000e22, 0x38000e22},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3c000e24, 0x3c000e24},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x40000e26, 0x40000e26},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x43001640, 0x43001640},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46001660, 0x46001660},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x49001861, 0x49001861},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4c001a81, 0x4c001a81},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4f001a83, 0x4f001a83},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x54001c85, 0x54001c85},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x58001ce5, 0x58001ce5},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5b001ce9, 0x5b001ce9},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x60001eeb, 0x60001eeb},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
+ {0x00016044, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db},
+};
+
+static const u32 ar9485_1_0_pcie_phy_clkreq_disable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x10213e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000580c},
+};
+
+static const u32 ar9485_1_0_radio_postamble[][2] = {
+ /* Addr allmodes */
+ {0x0001609c, 0x0b283f31},
+ {0x000160ac, 0x24611800},
+ {0x000160b0, 0x03284f3e},
+ {0x0001610c, 0x00170000},
+ {0x00016140, 0x10804008},
+};
+
+static const u32 ar9485_1_0_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c20},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18486200},
+ {0x00008174, 0x33332210},
+ {0x00008178, 0x00000000},
+ {0x0000817c, 0x00020000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081c8, 0x00000000},
+ {0x000081cc, 0x00000000},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x9ca00010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xa248105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9485_1_1_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c22},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18486200},
+ {0x00008174, 0x33332210},
+ {0x00008178, 0x00000000},
+ {0x0000817c, 0x00020000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x9ca00010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xa248105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9485_1_1_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a8f6b},
+ {0x0000980c, 0x04800000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x5f3ca3de},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14750600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x52440bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0x00000000},
+ {0x00009c08, 0x03200000},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x1883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c00400},
+ {0x00009d18, 0x00000000},
+ {0x00009d1c, 0x00000000},
+ {0x00009e08, 0x0038233c},
+ {0x00009e24, 0x9927b515},
+ {0x00009e28, 0x12ef0200},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009fc0, 0x80be4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x0000a20c, 0x00000000},
+ {0x0000a210, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2dc, 0x00000000},
+ {0x0000a2e0, 0x00000000},
+ {0x0000a2e4, 0x00000000},
+ {0x0000a2e8, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x000000ff},
+ {0x0000a3a8, 0x3b3b3b3b},
+ {0x0000a3ac, 0x2f2f2f2f},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000006},
+ {0x0000a3f8, 0x0cdbd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739cf},
+ {0x0000a418, 0x2d0019ce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00000000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x04000000},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a5c4, 0xbfad9d74},
+ {0x0000a5c8, 0x0048060a},
+ {0x0000a5cc, 0x00000637},
+ {0x0000a760, 0x03020100},
+ {0x0000a764, 0x09080504},
+ {0x0000a768, 0x0d0c0b0a},
+ {0x0000a76c, 0x13121110},
+ {0x0000a770, 0x31301514},
+ {0x0000a774, 0x35343332},
+ {0x0000a778, 0x00000036},
+ {0x0000a780, 0x00000838},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000000},
+};
+
+static const u32 ar9485Common_1_1[][2] = {
+ /* Addr allmodes */
+ {0x00007010, 0x00000022},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+};
+
+static const u32 ar9485_1_1_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
+ {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
+ {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
+ {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
+ {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+ {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
+ {0x0000a234, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
+ {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000be04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+ {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485_1_1_radio_postamble[][2] = {
+ /* Addr allmodes */
+ {0x0001609c, 0x0b283f31},
+ {0x000160ac, 0x24611800},
+ {0x000160b0, 0x03284f3e},
+ {0x0001610c, 0x00170000},
+ {0x00016140, 0x10804008},
+};
+
+static const u32 ar9485_1_1_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar9485_1_1_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73800000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x7f80fff8},
+ {0x0001604c, 0x000f0278},
+ {0x00016050, 0x4db6db8c},
+ {0x00016054, 0x6db60000},
+ {0x00016080, 0x00080000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x14214514},
+ {0x0001608c, 0x119f081e},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd28b3330},
+ {0x000160a0, 0xc2108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92480040},
+ {0x000160c0, 0x006db6db},
+ {0x000160c4, 0x0186db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x6de6fbe0},
+ {0x000160d0, 0xf7dfcf3c},
+ {0x00016100, 0x04cb0001},
+ {0x00016104, 0xfff80015},
+ {0x00016108, 0x00080010},
+ {0x00016144, 0x01884080},
+ {0x00016148, 0x00008040},
+ {0x00016240, 0x08400000},
+ {0x00016244, 0x1bf90f00},
+ {0x00016248, 0x00000000},
+ {0x0001624c, 0x00000000},
+ {0x00016280, 0x01000015},
+ {0x00016284, 0x00d30000},
+ {0x00016288, 0x00318000},
+ {0x0001628c, 0x50000000},
+ {0x00016290, 0x4b96210f},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016c40, 0x13188278},
+ {0x00016c44, 0x12000000},
+};
+
+static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x10052e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000080c},
+};
+
+static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485_1_1[][2] = {
+ /* Addr allmodes */
+ {0x0000a580, 0x00000000},
+ {0x0000a584, 0x00000000},
+ {0x0000a588, 0x00000000},
+ {0x0000a58c, 0x00000000},
+ {0x0000a590, 0x00000000},
+ {0x0000a594, 0x00000000},
+ {0x0000a598, 0x00000000},
+ {0x0000a59c, 0x00000000},
+ {0x0000a5a0, 0x00000000},
+ {0x0000a5a4, 0x00000000},
+ {0x0000a5a8, 0x00000000},
+ {0x0000a5ac, 0x00000000},
+ {0x0000a5b0, 0x00000000},
+ {0x0000a5b4, 0x00000000},
+ {0x0000a5b8, 0x00000000},
+ {0x0000a5bc, 0x00000000},
+};
+
+static const u32 ar9485_modes_green_ob_db_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
+ {0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x06000203, 0x06000203},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0a000401, 0x0a000401},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x0e000403, 0x0e000403},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x12000405, 0x12000405},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x15000604, 0x15000604},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x18000605, 0x18000605},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x1c000a04, 0x1c000a04},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x21000a06, 0x21000a06},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x29000a24, 0x29000a24},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2f000e21, 0x2f000e21},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000e20, 0x31000e20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x33000e20, 0x33000e20},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b50c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b510, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b514, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b518, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b51c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b520, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b524, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b528, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
+ {0x0000b52c, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a},
+ {0x0000b530, 0x0000003a, 0x0000003a, 0x0000003a, 0x0000003a},
+ {0x0000b534, 0x0000004a, 0x0000004a, 0x0000004a, 0x0000004a},
+ {0x0000b538, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b53c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b540, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b544, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b548, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b54c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b550, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b554, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b558, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b55c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b560, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b564, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b568, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b56c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b570, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b574, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b578, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x0000b57c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x10013e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000080c},
+};
+
+static const u32 ar9485_1_1_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x00004014, 0xba280400},
+ {0x00004090, 0x00aa10aa},
+ {0x000040a4, 0x00a0c9c9},
+ {0x00007010, 0x00000022},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+ {0x00007048, 0x00000002},
+};
+
+static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x6f7f0301},
+ {0x0000a3a0, 0xca9228ee},
+};
+
+static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+ {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+ {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+ {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485_fast_clock_1_1_baseband_postamble[][3] = {
+ /* Addr 5G_HT2 5G_HT40 */
+ {0x00009e00, 0x03721821, 0x03721821},
+ {0x0000a230, 0x0000400b, 0x00004016},
+ {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x10012e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000080c},
+};
+
+static const u32 ar9485_common_rx_gain_1_1[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x01800082},
+ {0x0000a014, 0x01820181},
+ {0x0000a018, 0x01840183},
+ {0x0000a01c, 0x01880185},
+ {0x0000a020, 0x018a0189},
+ {0x0000a024, 0x02850284},
+ {0x0000a028, 0x02890288},
+ {0x0000a02c, 0x03850384},
+ {0x0000a030, 0x03890388},
+ {0x0000a034, 0x038b038a},
+ {0x0000a038, 0x038d038c},
+ {0x0000a03c, 0x03910390},
+ {0x0000a040, 0x03930392},
+ {0x0000a044, 0x03950394},
+ {0x0000a048, 0x00000396},
+ {0x0000a04c, 0x00000000},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x28282828},
+ {0x0000a084, 0x28282828},
+ {0x0000a088, 0x28282828},
+ {0x0000a08c, 0x28282828},
+ {0x0000a090, 0x28282828},
+ {0x0000a094, 0x21212128},
+ {0x0000a098, 0x171c1c1c},
+ {0x0000a09c, 0x02020212},
+ {0x0000a0a0, 0x00000202},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x111f1100},
+ {0x0000a0c8, 0x111d111e},
+ {0x0000a0cc, 0x111b111c},
+ {0x0000a0d0, 0x22032204},
+ {0x0000a0d4, 0x22012202},
+ {0x0000a0d8, 0x221f2200},
+ {0x0000a0dc, 0x221d221e},
+ {0x0000a0e0, 0x33013302},
+ {0x0000a0e4, 0x331f3300},
+ {0x0000a0e8, 0x4402331e},
+ {0x0000a0ec, 0x44004401},
+ {0x0000a0f0, 0x441e441f},
+ {0x0000a0f4, 0x55015502},
+ {0x0000a0f8, 0x551f5500},
+ {0x0000a0fc, 0x6602551e},
+ {0x0000a100, 0x66006601},
+ {0x0000a104, 0x661e661f},
+ {0x0000a108, 0x7703661d},
+ {0x0000a10c, 0x77017702},
+ {0x0000a110, 0x00007700},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x111f1100},
+ {0x0000a148, 0x111d111e},
+ {0x0000a14c, 0x111b111c},
+ {0x0000a150, 0x22032204},
+ {0x0000a154, 0x22012202},
+ {0x0000a158, 0x221f2200},
+ {0x0000a15c, 0x221d221e},
+ {0x0000a160, 0x33013302},
+ {0x0000a164, 0x331f3300},
+ {0x0000a168, 0x4402331e},
+ {0x0000a16c, 0x44004401},
+ {0x0000a170, 0x441e441f},
+ {0x0000a174, 0x55015502},
+ {0x0000a178, 0x551f5500},
+ {0x0000a17c, 0x6602551e},
+ {0x0000a180, 0x66006601},
+ {0x0000a184, 0x661e661f},
+ {0x0000a188, 0x7703661d},
+ {0x0000a18c, 0x77017702},
+ {0x0000a190, 0x00007700},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000296},
+};
+
+static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x10053e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000080c},
+};
+
+static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00060005},
+ {0x0000a004, 0x00810080},
+ {0x0000a008, 0x00830082},
+ {0x0000a00c, 0x00850084},
+ {0x0000a010, 0x01820181},
+ {0x0000a014, 0x01840183},
+ {0x0000a018, 0x01880185},
+ {0x0000a01c, 0x018a0189},
+ {0x0000a020, 0x02850284},
+ {0x0000a024, 0x02890288},
+ {0x0000a028, 0x028b028a},
+ {0x0000a02c, 0x03850384},
+ {0x0000a030, 0x03890388},
+ {0x0000a034, 0x038b038a},
+ {0x0000a038, 0x038d038c},
+ {0x0000a03c, 0x03910390},
+ {0x0000a040, 0x03930392},
+ {0x0000a044, 0x03950394},
+ {0x0000a048, 0x00000396},
+ {0x0000a04c, 0x00000000},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x28282828},
+ {0x0000a084, 0x28282828},
+ {0x0000a088, 0x28282828},
+ {0x0000a08c, 0x28282828},
+ {0x0000a090, 0x28282828},
+ {0x0000a094, 0x24242428},
+ {0x0000a098, 0x171e1e1e},
+ {0x0000a09c, 0x02020b0b},
+ {0x0000a0a0, 0x02020202},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x22072208},
+ {0x0000a0c4, 0x22052206},
+ {0x0000a0c8, 0x22032204},
+ {0x0000a0cc, 0x22012202},
+ {0x0000a0d0, 0x221f2200},
+ {0x0000a0d4, 0x221d221e},
+ {0x0000a0d8, 0x33023303},
+ {0x0000a0dc, 0x33003301},
+ {0x0000a0e0, 0x331e331f},
+ {0x0000a0e4, 0x4402331d},
+ {0x0000a0e8, 0x44004401},
+ {0x0000a0ec, 0x441e441f},
+ {0x0000a0f0, 0x55025503},
+ {0x0000a0f4, 0x55005501},
+ {0x0000a0f8, 0x551e551f},
+ {0x0000a0fc, 0x6602551d},
+ {0x0000a100, 0x66006601},
+ {0x0000a104, 0x661e661f},
+ {0x0000a108, 0x7703661d},
+ {0x0000a10c, 0x77017702},
+ {0x0000a110, 0x00007700},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x111f1100},
+ {0x0000a148, 0x111d111e},
+ {0x0000a14c, 0x111b111c},
+ {0x0000a150, 0x22032204},
+ {0x0000a154, 0x22012202},
+ {0x0000a158, 0x221f2200},
+ {0x0000a15c, 0x221d221e},
+ {0x0000a160, 0x33013302},
+ {0x0000a164, 0x331f3300},
+ {0x0000a168, 0x4402331e},
+ {0x0000a16c, 0x44004401},
+ {0x0000a170, 0x441e441f},
+ {0x0000a174, 0x55015502},
+ {0x0000a178, 0x551f5500},
+ {0x0000a17c, 0x6602551e},
+ {0x0000a180, 0x66006601},
+ {0x0000a184, 0x661e661f},
+ {0x0000a188, 0x7703661d},
+ {0x0000a18c, 0x77017702},
+ {0x0000a190, 0x00007700},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000296},
+};
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 170d44a35ccb..099bd4183ad0 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -86,33 +86,19 @@ struct ath_config {
/**
* enum buffer_type - Buffer type flags
*
- * @BUF_HT: Send this buffer using HT capabilities
* @BUF_AMPDU: This buffer is an ampdu, as part of an aggregate (during TX)
* @BUF_AGGR: Indicates whether the buffer can be aggregated
* (used in aggregation scheduling)
- * @BUF_RETRY: Indicates whether the buffer is retried
* @BUF_XRETRY: To denote excessive retries of the buffer
*/
enum buffer_type {
- BUF_HT = BIT(1),
- BUF_AMPDU = BIT(2),
- BUF_AGGR = BIT(3),
- BUF_RETRY = BIT(4),
- BUF_XRETRY = BIT(5),
+ BUF_AMPDU = BIT(0),
+ BUF_AGGR = BIT(1),
+ BUF_XRETRY = BIT(2),
};
-#define bf_nframes bf_state.bfs_nframes
-#define bf_al bf_state.bfs_al
-#define bf_frmlen bf_state.bfs_frmlen
-#define bf_retries bf_state.bfs_retries
-#define bf_seqno bf_state.bfs_seqno
-#define bf_tidno bf_state.bfs_tidno
-#define bf_keyix bf_state.bfs_keyix
-#define bf_keytype bf_state.bfs_keytype
-#define bf_isht(bf) (bf->bf_state.bf_type & BUF_HT)
#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU)
#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR)
-#define bf_isretried(bf) (bf->bf_state.bf_type & BUF_RETRY)
#define bf_isxretried(bf) (bf->bf_state.bf_type & BUF_XRETRY)
#define ATH_TXSTATUS_RING_SIZE 64
@@ -148,7 +134,6 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
(((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
WME_AC_VO)
-#define ADDBA_EXCHANGE_ATTEMPTS 10
#define ATH_AGGR_DELIM_SZ 4
#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */
/* number of delimiters for encryption padding */
@@ -177,8 +162,8 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
/* returns delimiter padding required given the packet length */
#define ATH_AGGR_GET_NDELIM(_len) \
- (((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ? \
- (ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2)
+ (((_len) >= ATH_AGGR_MINPLEN) ? 0 : \
+ DIV_ROUND_UP(ATH_AGGR_MINPLEN - (_len), ATH_AGGR_DELIM_SZ))
#define BAW_WITHIN(_start, _bawsz, _seqno) \
((((_seqno) - (_start)) & 4095) < (_bawsz))
@@ -195,12 +180,13 @@ enum ATH_AGGR_STATUS {
#define ATH_TXFIFO_DEPTH 8
struct ath_txq {
- int axq_class;
- u32 axq_qnum;
+ int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
+ u32 axq_qnum; /* ath9k hardware queue number */
u32 *axq_link;
struct list_head axq_q;
spinlock_t axq_lock;
u32 axq_depth;
+ u32 axq_ampdu_depth;
bool stopped;
bool axq_tx_inprogress;
struct list_head axq_acq;
@@ -208,27 +194,29 @@ struct ath_txq {
struct list_head txq_fifo_pending;
u8 txq_headidx;
u8 txq_tailidx;
+ int pending_frames;
};
struct ath_atx_ac {
+ struct ath_txq *txq;
int sched;
- int qnum;
struct list_head list;
struct list_head tid_q;
};
+struct ath_frame_info {
+ int framelen;
+ u32 keyix;
+ enum ath9k_key_type keytype;
+ u8 retries;
+ u16 seqno;
+};
+
struct ath_buf_state {
- int bfs_nframes;
- u16 bfs_al;
- u16 bfs_frmlen;
- int bfs_seqno;
- int bfs_tidno;
- int bfs_retries;
u8 bf_type;
u8 bfs_paprd;
unsigned long bfs_paprd_timestamp;
- u32 bfs_keyix;
- enum ath9k_key_type bfs_keytype;
+ enum ath9k_internal_frame_type bfs_ftype;
};
struct ath_buf {
@@ -241,10 +229,8 @@ struct ath_buf {
dma_addr_t bf_daddr; /* physical addr of desc */
dma_addr_t bf_buf_addr; /* physical addr of data buffer, for DMA */
bool bf_stale;
- bool bf_tx_aborted;
u16 bf_flags;
struct ath_buf_state bf_state;
- struct ath_wiphy *aphy;
};
struct ath_atx_tid {
@@ -265,12 +251,14 @@ struct ath_atx_tid {
};
struct ath_node {
- struct ath_common *common;
+#ifdef CONFIG_ATH9K_DEBUGFS
+ struct list_head list; /* for sc->nodes */
+ struct ieee80211_sta *sta; /* station struct we're part of */
+#endif
struct ath_atx_tid tid[WME_NUM_TID];
struct ath_atx_ac ac[WME_NUM_AC];
u16 maxampdu;
u8 mpdudensity;
- int last_rssi;
};
#define AGGR_CLEANUP BIT(1)
@@ -279,6 +267,7 @@ struct ath_node {
struct ath_tx_control {
struct ath_txq *txq;
+ struct ath_node *an;
int if_id;
enum ath9k_internal_frame_type frame_type;
u8 paprd;
@@ -288,15 +277,19 @@ struct ath_tx_control {
#define ATH_TX_XRETRY 0x02
#define ATH_TX_BAR 0x04
+/**
+ * @txq_map: Index is mac80211 queue number. This is
+ * not necessarily the same as the hardware queue number
+ * (axq_qnum).
+ */
struct ath_tx {
u16 seq_no;
u32 txqsetup;
- int hwq_map[WME_NUM_AC];
spinlock_t txbuflock;
struct list_head txbuf;
struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
struct ath_descdma txdma;
- int pending_frames[WME_NUM_AC];
+ struct ath_txq *txq_map[WME_NUM_AC];
};
struct ath_rx_edma {
@@ -310,12 +303,13 @@ struct ath_rx {
u8 rxotherant;
u32 *rxlink;
unsigned int rxfilter;
- spinlock_t pcu_lock;
spinlock_t rxbuflock;
struct list_head rxbuf;
struct ath_descdma rxdma;
struct ath_buf *rx_bufptr;
struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
+
+ struct sk_buff *frag;
};
int ath_startrecv(struct ath_softc *sc);
@@ -327,8 +321,7 @@ void ath_rx_cleanup(struct ath_softc *sc);
int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp);
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
-int ath_tx_setup(struct ath_softc *sc, int haltype);
-void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx);
+bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx);
void ath_draintxq(struct ath_softc *sc,
struct ath_txq *txq, bool retry_tx);
void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
@@ -342,7 +335,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_tx_control *txctl);
void ath_tx_tasklet(struct ath_softc *sc);
void ath_tx_edma_tasklet(struct ath_softc *sc);
-void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb);
int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
u16 tid, u16 *ssn);
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
@@ -354,10 +346,10 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid
struct ath_vif {
int av_bslot;
+ bool is_bslot_active;
__le64 tsf_adjust; /* TSF adjustment for staggered beacons */
enum nl80211_iftype av_opmode;
struct ath_buf *av_bcbuf;
- struct ath_tx_control av_btxctl;
u8 bssid[ETH_ALEN]; /* current BSSID from config_interface */
};
@@ -377,7 +369,7 @@ struct ath_vif {
#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
struct ath_beacon_config {
- u16 beacon_interval;
+ int beacon_interval;
u16 listen_interval;
u16 dtim_period;
u16 bmiss_timeout;
@@ -396,7 +388,6 @@ struct ath_beacon {
u32 ast_be_xmit;
u64 bc_tstamp;
struct ieee80211_vif *bslot[ATH_BCBUF];
- struct ath_wiphy *bslot_aphy[ATH_BCBUF];
int slottime;
int slotupdate;
struct ath9k_tx_queue_info beacon_qi;
@@ -407,9 +398,10 @@ struct ath_beacon {
void ath_beacon_tasklet(unsigned long data);
void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif);
-int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif);
+int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp);
int ath_beaconq_config(struct ath_softc *sc);
+void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
/*******/
/* ANI */
@@ -456,26 +448,21 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc);
#define ATH_LED_PIN_DEF 1
#define ATH_LED_PIN_9287 8
-#define ATH_LED_ON_DURATION_IDLE 350 /* in msecs */
-#define ATH_LED_OFF_DURATION_IDLE 250 /* in msecs */
-
-enum ath_led_type {
- ATH_LED_RADIO,
- ATH_LED_ASSOC,
- ATH_LED_TX,
- ATH_LED_RX
-};
-
-struct ath_led {
- struct ath_softc *sc;
- struct led_classdev led_cdev;
- enum ath_led_type led_type;
- char name[32];
- bool registered;
-};
+#define ATH_LED_PIN_9485 6
+#ifdef CONFIG_MAC80211_LEDS
void ath_init_leds(struct ath_softc *sc);
void ath_deinit_leds(struct ath_softc *sc);
+#else
+static inline void ath_init_leds(struct ath_softc *sc)
+{
+}
+
+static inline void ath_deinit_leds(struct ath_softc *sc)
+{
+}
+#endif
+
/* Antenna diversity/combining */
#define ATH_ANT_RX_CURRENT_SHIFT 4
@@ -544,7 +531,6 @@ struct ath_ant_comb {
#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
#define ATH_MAX_SW_RETRIES 10
#define ATH_CHAN_MAX 255
-#define IEEE80211_WEP_NKID 4 /* number of key ids */
#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
#define ATH_RATE_DUMMY_MARKER 0
@@ -563,6 +549,7 @@ struct ath_ant_comb {
#define SC_OP_BT_PRIORITY_DETECTED BIT(12)
#define SC_OP_BT_SCAN BIT(13)
#define SC_OP_ANI_RUN BIT(14)
+#define SC_OP_ENABLE_APM BIT(15)
/* Powersave flags */
#define PS_WAIT_FOR_BEACON BIT(0)
@@ -571,27 +558,28 @@ struct ath_ant_comb {
#define PS_WAIT_FOR_TX_ACK BIT(3)
#define PS_BEACON_SYNC BIT(4)
-struct ath_wiphy;
struct ath_rate_table;
+struct ath9k_vif_iter_data {
+ const u8 *hw_macaddr; /* phy's hardware address, set
+ * before starting iteration for
+ * valid bssid mask.
+ */
+ u8 mask[ETH_ALEN]; /* bssid mask */
+ int naps; /* number of AP vifs */
+ int nmeshes; /* number of mesh vifs */
+ int nstations; /* number of station vifs */
+ int nwds; /* number of nwd vifs */
+ int nadhocs; /* number of adhoc vifs */
+ int nothers; /* number of vifs not specified above. */
+};
+
struct ath_softc {
struct ieee80211_hw *hw;
struct device *dev;
- spinlock_t wiphy_lock; /* spinlock to protect ath_wiphy data */
- struct ath_wiphy *pri_wiphy;
- struct ath_wiphy **sec_wiphy; /* secondary wiphys (virtual radios); may
- * have NULL entries */
- int num_sec_wiphy; /* number of sec_wiphy pointers in the array */
int chan_idx;
int chan_is_ht;
- struct ath_wiphy *next_wiphy;
- struct work_struct chan_work;
- int wiphy_select_failures;
- unsigned long wiphy_select_first_fail;
- struct delayed_work wiphy_work;
- unsigned long wiphy_scheduler_int;
- int wiphy_scheduler_index;
struct survey_info *cur_survey;
struct survey_info survey[ATH9K_NUM_CHANNELS];
@@ -600,22 +588,24 @@ struct ath_softc {
struct ath_hw *sc_ah;
void __iomem *mem;
int irq;
- spinlock_t sc_resetlock;
spinlock_t sc_serial_rw;
spinlock_t sc_pm_lock;
+ spinlock_t sc_pcu_lock;
struct mutex mutex;
struct work_struct paprd_work;
struct work_struct hw_check_work;
struct completion paprd_complete;
+ unsigned int hw_busy_count;
+
u32 intrstatus;
u32 sc_flags; /* SC_OP_* */
u16 ps_flags; /* PS_* */
u16 curtxpow;
- u8 nbcnvifs;
- u16 nvifs;
bool ps_enabled;
bool ps_idle;
+ short nbcnvifs;
+ short nvifs;
unsigned long ps_usecount;
struct ath_config config;
@@ -624,23 +614,24 @@ struct ath_softc {
struct ath_beacon beacon;
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
- struct ath_led radio_led;
- struct ath_led assoc_led;
- struct ath_led tx_led;
- struct ath_led rx_led;
- struct delayed_work ath_led_blink_work;
- int led_on_duration;
- int led_off_duration;
- int led_on_cnt;
- int led_off_cnt;
+#ifdef CONFIG_MAC80211_LEDS
+ bool led_registered;
+ char led_name[32];
+ struct led_classdev led_cdev;
+#endif
- int beacon_interval;
+ struct ath9k_hw_cal_data caldata;
+ int last_rssi;
#ifdef CONFIG_ATH9K_DEBUGFS
struct ath9k_debug debug;
+ spinlock_t nodes_lock;
+ struct list_head nodes; /* basically, stations */
+ unsigned int tx_complete_poll_work_seen;
#endif
struct ath_beacon_config cur_beacon_conf;
struct delayed_work tx_complete_work;
+ struct delayed_work hw_pll_work;
struct ath_btcoex btcoex;
struct ath_descdma txsdma;
@@ -648,25 +639,8 @@ struct ath_softc {
struct ath_ant_comb ant_comb;
};
-struct ath_wiphy {
- struct ath_softc *sc; /* shared for all virtual wiphys */
- struct ieee80211_hw *hw;
- struct ath9k_hw_cal_data caldata;
- enum ath_wiphy_state {
- ATH_WIPHY_INACTIVE,
- ATH_WIPHY_ACTIVE,
- ATH_WIPHY_PAUSING,
- ATH_WIPHY_PAUSED,
- ATH_WIPHY_SCAN,
- } state;
- bool idle;
- int chan_idx;
- int chan_is_ht;
-};
-
void ath9k_tasklet(unsigned long data);
int ath_reset(struct ath_softc *sc, bool retry_tx);
-int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
int ath_cabq_update(struct ath_softc *);
static inline void ath_read_cachesize(struct ath_common *common, int *csz)
@@ -675,24 +649,23 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
}
extern struct ieee80211_ops ath9k_ops;
-extern struct pm_qos_request_list ath9k_pm_qos_req;
-extern int modparam_nohwcrypt;
+extern int ath9k_modparam_nohwcrypt;
extern int led_blink;
+extern bool is_ath9k_unloaded;
irqreturn_t ath_isr(int irq, void *dev);
+void ath9k_init_crypto(struct ath_softc *sc);
int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
const struct ath_bus_ops *bus_ops);
void ath9k_deinit_device(struct ath_softc *sc);
void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
-void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
- struct ath9k_channel *ichan);
-void ath_update_chainmask(struct ath_softc *sc, int is_ht);
int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
struct ath9k_channel *hchan);
void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode);
+bool ath9k_uses_beacons(int type);
#ifdef CONFIG_PCI
int ath_pci_init(void);
@@ -713,27 +686,15 @@ static inline void ath_ahb_exit(void) {};
void ath9k_ps_wakeup(struct ath_softc *sc);
void ath9k_ps_restore(struct ath_softc *sc);
+u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
+
void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
-int ath9k_wiphy_add(struct ath_softc *sc);
-int ath9k_wiphy_del(struct ath_wiphy *aphy);
-void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb);
-int ath9k_wiphy_pause(struct ath_wiphy *aphy);
-int ath9k_wiphy_unpause(struct ath_wiphy *aphy);
-int ath9k_wiphy_select(struct ath_wiphy *aphy);
-void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int);
-void ath9k_wiphy_chan_work(struct work_struct *work);
-bool ath9k_wiphy_started(struct ath_softc *sc);
-void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
- struct ath_wiphy *selected);
-bool ath9k_wiphy_scanning(struct ath_softc *sc);
-void ath9k_wiphy_work(struct work_struct *work);
-bool ath9k_all_wiphys_idle(struct ath_softc *sc);
-void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle);
-
-void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
-bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
void ath_start_rfkill_poll(struct ath_softc *sc);
extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
+void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ath9k_vif_iter_data *iter_data);
+
#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 19891e7d49ae..6d2a545fc35e 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -28,7 +28,7 @@ int ath_beaconq_config(struct ath_softc *sc)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_tx_queue_info qi, qi_be;
- int qnum;
+ struct ath_txq *txq;
ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi);
if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
@@ -38,16 +38,16 @@ int ath_beaconq_config(struct ath_softc *sc)
qi.tqi_cwmax = 0;
} else {
/* Adhoc mode; important thing is to use 2x cwmin. */
- qnum = sc->tx.hwq_map[WME_AC_BE];
- ath9k_hw_get_txq_props(ah, qnum, &qi_be);
+ txq = sc->tx.txq_map[WME_AC_BE];
+ ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi_be);
qi.tqi_aifs = qi_be.tqi_aifs;
qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
qi.tqi_cwmax = qi_be.tqi_cwmax;
}
if (!ath9k_hw_set_txq_props(ah, sc->beacon.beaconq, &qi)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to update h/w beacon queue parameters\n");
+ ath_err(common,
+ "Unable to update h/w beacon queue parameters\n");
return 0;
} else {
ath9k_hw_resettxqueue(ah, sc->beacon.beaconq);
@@ -103,17 +103,35 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
series[0].Tries = 1;
series[0].Rate = rate;
- series[0].ChSel = common->tx_chainmask;
+ series[0].ChSel = ath_txchainmask_reduction(sc,
+ common->tx_chainmask, series[0].Rate);
series[0].RateFlags = (ctsrate) ? ATH9K_RATESERIES_RTS_CTS : 0;
ath9k_hw_set11n_ratescenario(ah, ds, ds, 0, ctsrate, ctsduration,
series, 4, 0);
}
+static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_tx_control txctl;
+
+ memset(&txctl, 0, sizeof(struct ath_tx_control));
+ txctl.txq = sc->beacon.cabq;
+
+ ath_dbg(common, ATH_DBG_XMIT,
+ "transmitting CABQ packet, skb: %p\n", skb);
+
+ if (ath_tx_start(hw, skb, &txctl) != 0) {
+ ath_dbg(common, ATH_DBG_XMIT, "CABQ TX failed\n");
+ dev_kfree_skb_any(skb);
+ }
+}
+
static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_buf *bf;
struct ath_vif *avp;
@@ -122,13 +140,10 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info;
int cabq_depth;
- if (aphy->state != ATH_WIPHY_ACTIVE)
- return NULL;
-
avp = (void *)vif->drv_priv;
cabq = sc->beacon.cabq;
- if (avp->av_bcbuf == NULL)
+ if ((avp->av_bcbuf == NULL) || !avp->is_bslot_active)
return NULL;
/* Release the old beacon first */
@@ -169,8 +184,7 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
dev_kfree_skb_any(skb);
bf->bf_mpdu = NULL;
bf->bf_buf_addr = 0;
- ath_print(common, ATH_DBG_FATAL,
- "dma_mapping_error on beaconing\n");
+ ath_err(common, "dma_mapping_error on beaconing\n");
return NULL;
}
@@ -190,8 +204,8 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
if (skb && cabq_depth) {
if (sc->nvifs > 1) {
- ath_print(common, ATH_DBG_BEACON,
- "Flushing previous cabq traffic\n");
+ ath_dbg(common, ATH_DBG_BEACON,
+ "Flushing previous cabq traffic\n");
ath_draintxq(sc, cabq, false);
}
}
@@ -206,13 +220,13 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
return bf;
}
-int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
+int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif)
{
- struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_vif *avp;
struct ath_buf *bf;
struct sk_buff *skb;
+ struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
__le64 tstamp;
avp = (void *)vif->drv_priv;
@@ -225,9 +239,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
struct ath_buf, list);
list_del(&avp->av_bcbuf->list);
- if (sc->sc_ah->opmode == NL80211_IFTYPE_AP ||
- sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC ||
- sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) {
+ if (ath9k_uses_beacons(vif->type)) {
int slot;
/*
* Assign the vif to a beacon xmit slot. As
@@ -237,6 +249,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
for (slot = 0; slot < ATH_BCBUF; slot++)
if (sc->beacon.bslot[slot] == NULL) {
avp->av_bslot = slot;
+ avp->is_bslot_active = false;
/* NB: keep looking for a double slot */
if (slot == 0 || !sc->beacon.bslot[slot-1])
@@ -244,7 +257,6 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
}
BUG_ON(sc->beacon.bslot[avp->av_bslot] != NULL);
sc->beacon.bslot[avp->av_bslot] = vif;
- sc->beacon.bslot_aphy[avp->av_bslot] = aphy;
sc->nbcnvifs++;
}
}
@@ -262,10 +274,8 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
/* NB: the beacon data buffer must be 32-bit aligned. */
skb = ieee80211_beacon_get(sc->hw, vif);
- if (skb == NULL) {
- ath_print(common, ATH_DBG_BEACON, "cannot get skb\n");
+ if (skb == NULL)
return -ENOMEM;
- }
tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
sc->beacon.bc_tstamp = le64_to_cpu(tstamp);
@@ -274,7 +284,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
u64 tsfadjust;
int intval;
- intval = sc->beacon_interval ? : ATH_DEFAULT_BINTVAL;
+ intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
/*
* Calculate the TSF offset for this beacon slot, i.e., the
@@ -287,10 +297,9 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
tsfadjust = intval * avp->av_bslot / ATH_BCBUF;
avp->tsf_adjust = cpu_to_le64(TU_TO_USEC(tsfadjust));
- ath_print(common, ATH_DBG_BEACON,
- "stagger beacons, bslot %d intval "
- "%u tsfadjust %llu\n",
- avp->av_bslot, intval, (unsigned long long)tsfadjust);
+ ath_dbg(common, ATH_DBG_BEACON,
+ "stagger beacons, bslot %d intval %u tsfadjust %llu\n",
+ avp->av_bslot, intval, (unsigned long long)tsfadjust);
((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp =
avp->tsf_adjust;
@@ -304,10 +313,10 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
dev_kfree_skb_any(skb);
bf->bf_mpdu = NULL;
bf->bf_buf_addr = 0;
- ath_print(common, ATH_DBG_FATAL,
- "dma_mapping_error on beacon alloc\n");
+ ath_err(common, "dma_mapping_error on beacon alloc\n");
return -ENOMEM;
}
+ avp->is_bslot_active = true;
return 0;
}
@@ -319,7 +328,6 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp)
if (avp->av_bslot != -1) {
sc->beacon.bslot[avp->av_bslot] = NULL;
- sc->beacon.bslot_aphy[avp->av_bslot] = NULL;
sc->nbcnvifs--;
}
@@ -341,11 +349,11 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp)
void ath_beacon_tasklet(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
+ struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_buf *bf = NULL;
struct ieee80211_vif *vif;
- struct ath_wiphy *aphy;
int slot;
u32 bfaddr, bc = 0, tsftu;
u64 tsf;
@@ -362,13 +370,14 @@ void ath_beacon_tasklet(unsigned long data)
sc->beacon.bmisscnt++;
if (sc->beacon.bmisscnt < BSTUCK_THRESH) {
- ath_print(common, ATH_DBG_BSTUCK,
- "missed %u consecutive beacons\n",
- sc->beacon.bmisscnt);
+ ath_dbg(common, ATH_DBG_BSTUCK,
+ "missed %u consecutive beacons\n",
+ sc->beacon.bmisscnt);
+ ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq);
ath9k_hw_bstuck_nfcal(ah);
} else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
- ath_print(common, ATH_DBG_BSTUCK,
- "beacon is officially stuck\n");
+ ath_dbg(common, ATH_DBG_BSTUCK,
+ "beacon is officially stuck\n");
sc->sc_flags |= SC_OP_TSF_RESET;
ath_reset(sc, true);
}
@@ -377,9 +386,9 @@ void ath_beacon_tasklet(unsigned long data)
}
if (sc->beacon.bmisscnt != 0) {
- ath_print(common, ATH_DBG_BSTUCK,
- "resume beacon xmit after %u misses\n",
- sc->beacon.bmisscnt);
+ ath_dbg(common, ATH_DBG_BSTUCK,
+ "resume beacon xmit after %u misses\n",
+ sc->beacon.bmisscnt);
sc->beacon.bmisscnt = 0;
}
@@ -389,7 +398,7 @@ void ath_beacon_tasklet(unsigned long data)
* on the tsf to safeguard against missing an swba.
*/
- intval = sc->beacon_interval ? : ATH_DEFAULT_BINTVAL;
+ intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
tsf = ath9k_hw_gettsf64(ah);
tsftu = TSF_TO_TU(tsf>>32, tsf);
@@ -403,15 +412,14 @@ void ath_beacon_tasklet(unsigned long data)
*/
slot = ATH_BCBUF - slot - 1;
vif = sc->beacon.bslot[slot];
- aphy = sc->beacon.bslot_aphy[slot];
- ath_print(common, ATH_DBG_BEACON,
- "slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
- slot, tsf, tsftu, intval, vif);
+ ath_dbg(common, ATH_DBG_BEACON,
+ "slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
+ slot, tsf, tsftu, intval, vif);
bfaddr = 0;
if (vif) {
- bf = ath_beacon_generate(aphy->hw, vif);
+ bf = ath_beacon_generate(sc->hw, vif);
if (bf != NULL) {
bfaddr = bf->bf_daddr;
bc = 1;
@@ -443,16 +451,6 @@ void ath_beacon_tasklet(unsigned long data)
sc->beacon.updateslot = OK;
}
if (bfaddr != 0) {
- /*
- * Stop any current dma and put the new frame(s) on the queue.
- * This should never fail since we check above that no frames
- * are still pending on the queue.
- */
- if (!ath9k_hw_stoptxdma(ah, sc->beacon.beaconq)) {
- ath_print(common, ATH_DBG_FATAL,
- "beacon queue %u did not stop?\n", sc->beacon.beaconq);
- }
-
/* NB: cabq traffic should already be queued and primed */
ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bfaddr);
ath9k_hw_txstart(ah, sc->beacon.beaconq);
@@ -503,7 +501,7 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
/* Set the computed AP beacon timers */
- ath9k_hw_set_interrupts(ah, 0);
+ ath9k_hw_disable_interrupts(ah);
ath9k_beacon_init(sc, nexttbtt, intval);
sc->beacon.bmisscnt = 0;
ath9k_hw_set_interrupts(ah, ah->imask);
@@ -536,8 +534,8 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
/* No need to configure beacon if we are not associated */
if (!common->curaid) {
- ath_print(common, ATH_DBG_BEACON,
- "STA is not yet associated..skipping beacon config\n");
+ ath_dbg(common, ATH_DBG_BEACON,
+ "STA is not yet associated..skipping beacon config\n");
return;
}
@@ -549,8 +547,6 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
* last beacon we received (which may be none).
*/
dtimperiod = conf->dtim_period;
- if (dtimperiod <= 0) /* NB: 0 if not known */
- dtimperiod = 1;
dtimcount = conf->dtim_count;
if (dtimcount >= dtimperiod) /* NB: sanity check */
dtimcount = 0;
@@ -558,8 +554,6 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
cfpcount = 0;
sleepduration = conf->listen_interval * intval;
- if (sleepduration <= 0)
- sleepduration = intval;
/*
* Pull nexttbtt forward to reflect the current
@@ -630,23 +624,22 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
/* TSF out of range threshold fixed at 1 second */
bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
- ath_print(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
- ath_print(common, ATH_DBG_BEACON,
- "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
- bs.bs_bmissthreshold, bs.bs_sleepduration,
- bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
+ ath_dbg(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
+ ath_dbg(common, ATH_DBG_BEACON,
+ "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
+ bs.bs_bmissthreshold, bs.bs_sleepduration,
+ bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
/* Set the computed STA beacon timers */
- ath9k_hw_set_interrupts(ah, 0);
+ ath9k_hw_disable_interrupts(ah);
ath9k_hw_set_sta_beacon_timers(ah, &bs);
ah->imask |= ATH9K_INT_BMISS;
ath9k_hw_set_interrupts(ah, ah->imask);
}
static void ath_beacon_config_adhoc(struct ath_softc *sc,
- struct ath_beacon_config *conf,
- struct ieee80211_vif *vif)
+ struct ath_beacon_config *conf)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -670,9 +663,9 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
nexttbtt += intval;
} while (nexttbtt < tsftu);
- ath_print(common, ATH_DBG_BEACON,
- "IBSS nexttbtt %u intval %u (%u)\n",
- nexttbtt, intval, conf->beacon_interval);
+ ath_dbg(common, ATH_DBG_BEACON,
+ "IBSS nexttbtt %u intval %u (%u)\n",
+ nexttbtt, intval, conf->beacon_interval);
/*
* In IBSS mode enable the beacon timers but only enable SWBA interrupts
@@ -686,7 +679,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
/* Set the computed ADHOC beacon timers */
- ath9k_hw_set_interrupts(ah, 0);
+ ath9k_hw_disable_interrupts(ah);
ath9k_beacon_init(sc, nexttbtt, intval);
sc->beacon.bmisscnt = 0;
ath9k_hw_set_interrupts(ah, ah->imask);
@@ -701,19 +694,18 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
/* Setup the beacon configuration parameters */
if (vif) {
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
-
iftype = vif->type;
-
cur_conf->beacon_interval = bss_conf->beacon_int;
cur_conf->dtim_period = bss_conf->dtim_period;
- cur_conf->listen_interval = 1;
- cur_conf->dtim_count = 1;
- cur_conf->bmiss_timeout =
- ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
} else {
iftype = sc->sc_ah->opmode;
}
+ cur_conf->listen_interval = 1;
+ cur_conf->dtim_count = 1;
+ cur_conf->bmiss_timeout =
+ ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
+
/*
* It looks like mac80211 may end up using beacon interval of zero in
* some cases (at least for mesh point). Avoid getting into an
@@ -723,22 +715,63 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
if (cur_conf->beacon_interval == 0)
cur_conf->beacon_interval = 100;
+ /*
+ * We don't parse dtim period from mac80211 during the driver
+ * initialization as it breaks association with hidden-ssid
+ * AP and it causes latency in roaming
+ */
+ if (cur_conf->dtim_period == 0)
+ cur_conf->dtim_period = 1;
+
switch (iftype) {
case NL80211_IFTYPE_AP:
ath_beacon_config_ap(sc, cur_conf);
break;
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
- ath_beacon_config_adhoc(sc, cur_conf, vif);
+ ath_beacon_config_adhoc(sc, cur_conf);
break;
case NL80211_IFTYPE_STATION:
ath_beacon_config_sta(sc, cur_conf);
break;
default:
- ath_print(common, ATH_DBG_CONFIG,
- "Unsupported beaconing mode\n");
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Unsupported beaconing mode\n");
return;
}
sc->sc_flags |= SC_OP_BEACONS;
}
+
+void ath9k_set_beaconing_status(struct ath_softc *sc, bool status)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_vif *avp;
+ int slot;
+ bool found = false;
+
+ ath9k_ps_wakeup(sc);
+ if (status) {
+ for (slot = 0; slot < ATH_BCBUF; slot++) {
+ if (sc->beacon.bslot[slot]) {
+ avp = (void *)sc->beacon.bslot[slot]->drv_priv;
+ if (avp->is_bslot_active) {
+ found = true;
+ break;
+ }
+ }
+ }
+ if (found) {
+ /* Re-enable beaconing */
+ ah->imask |= ATH9K_INT_SWBA;
+ ath9k_hw_set_interrupts(ah, ah->imask);
+ }
+ } else {
+ /* Disable SWBA interrupt */
+ ah->imask &= ~ATH9K_INT_SWBA;
+ ath9k_hw_set_interrupts(ah, ah->imask);
+ tasklet_kill(&sc->bcon_tasklet);
+ ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq);
+ }
+ ath9k_ps_restore(sc);
+}
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 6a92e57fddf0..d33bf204c995 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -35,29 +35,6 @@ struct ath_btcoex_config {
bool bt_hold_rx_clear;
};
-static const u16 ath_subsysid_tbl[] = {
- AR9280_COEX2WIRE_SUBSYSID,
- AT9285_COEX3WIRE_SA_SUBSYSID,
- AT9285_COEX3WIRE_DA_SUBSYSID
-};
-
-/*
- * Checks the subsystem id of the device to see if it
- * supports btcoex
- */
-bool ath9k_hw_btcoex_supported(struct ath_hw *ah)
-{
- int i;
-
- if (!ah->hw_version.subsysid)
- return false;
-
- for (i = 0; i < ARRAY_SIZE(ath_subsysid_tbl); i++)
- if (ah->hw_version.subsysid == ath_subsysid_tbl[i])
- return true;
-
- return false;
-}
void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
{
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 1ee5a15ccbb1..588dfd464dd1 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -49,7 +49,6 @@ struct ath_btcoex_hw {
u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */
};
-bool ath9k_hw_btcoex_supported(struct ath_hw *ah);
void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah);
void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah);
void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum);
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 6d509484b5f6..8649581fa4dd 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -97,12 +97,12 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
if (h[i].privNF > limit->max) {
high_nf_mid = true;
- ath_print(common, ATH_DBG_CALIBRATE,
- "NFmid[%d] (%d) > MAX (%d), %s\n",
- i, h[i].privNF, limit->max,
- (cal->nfcal_interference ?
- "not corrected (due to interference)" :
- "correcting to MAX"));
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "NFmid[%d] (%d) > MAX (%d), %s\n",
+ i, h[i].privNF, limit->max,
+ (cal->nfcal_interference ?
+ "not corrected (due to interference)" :
+ "correcting to MAX"));
/*
* Normally we limit the average noise floor by the
@@ -180,18 +180,18 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
return true;
if (currCal->calState != CAL_DONE) {
- ath_print(common, ATH_DBG_CALIBRATE,
- "Calibration state incorrect, %d\n",
- currCal->calState);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Calibration state incorrect, %d\n",
+ currCal->calState);
return true;
}
if (!(ah->supp_cals & currCal->calData->calType))
return true;
- ath_print(common, ATH_DBG_CALIBRATE,
- "Resetting Cal %d state for channel %u\n",
- currCal->calData->calType, conf->channel->center_freq);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Resetting Cal %d state for channel %u\n",
+ currCal->calData->calType, conf->channel->center_freq);
ah->caldata->CalValid &= ~currCal->calData->calType;
currCal->calState = CAL_WAITING;
@@ -262,7 +262,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
* since 250us often results in NF load timeout and causes deaf
* condition during stress testing 12/12/2009
*/
- for (j = 0; j < 1000; j++) {
+ for (j = 0; j < 10000; j++) {
if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
AR_PHY_AGC_CONTROL_NF) == 0)
break;
@@ -278,10 +278,10 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
* here, the baseband nf cal will just be capped by our present
* noisefloor until the next calibration timer.
*/
- if (j == 1000) {
- ath_print(common, ATH_DBG_ANY, "Timeout while waiting for nf "
- "to load: AR_PHY_AGC_CONTROL=0x%x\n",
- REG_READ(ah, AR_PHY_AGC_CONTROL));
+ if (j == 10000) {
+ ath_dbg(common, ATH_DBG_ANY,
+ "Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n",
+ REG_READ(ah, AR_PHY_AGC_CONTROL));
return;
}
@@ -318,19 +318,19 @@ static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)
if (!nf[i])
continue;
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF calibrated [%s] [chain %d] is %d\n",
- (i >= 3 ? "ext" : "ctl"), i % 3, nf[i]);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "NF calibrated [%s] [chain %d] is %d\n",
+ (i >= 3 ? "ext" : "ctl"), i % 3, nf[i]);
if (nf[i] > ATH9K_NF_TOO_HIGH) {
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF[%d] (%d) > MAX (%d), correcting to MAX",
- i, nf[i], ATH9K_NF_TOO_HIGH);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "NF[%d] (%d) > MAX (%d), correcting to MAX\n",
+ i, nf[i], ATH9K_NF_TOO_HIGH);
nf[i] = limit->max;
} else if (nf[i] < limit->min) {
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF[%d] (%d) < MIN (%d), correcting to NOM",
- i, nf[i], limit->min);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "NF[%d] (%d) < MIN (%d), correcting to NOM\n",
+ i, nf[i], limit->min);
nf[i] = limit->nominal;
}
}
@@ -347,8 +347,8 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
chan->channelFlags &= (~CHANNEL_CW_INT);
if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
- ath_print(common, ATH_DBG_CALIBRATE,
- "NF did not complete in calibration window\n");
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "NF did not complete in calibration window\n");
return false;
}
@@ -357,10 +357,9 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
nf = nfarray[0];
if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh)
&& nf > nfThresh) {
- ath_print(common, ATH_DBG_CALIBRATE,
- "noise floor failed detected; "
- "detected %d, threshold %d\n",
- nf, nfThresh);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "noise floor failed detected; detected %d, threshold %d\n",
+ nf, nfThresh);
chan->channelFlags |= CHANNEL_CW_INT;
}
@@ -383,9 +382,8 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
s16 default_nf;
int i, j;
- if (!ah->caldata)
- return;
-
+ ah->caldata->channel = chan->channel;
+ ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT;
h = ah->caldata->nfCalHist;
default_nf = ath9k_hw_get_default_nf(ah, chan);
for (i = 0; i < NUM_NF_READINGS; i++) {
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index f43a2d98421c..615e68276e72 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -107,12 +107,10 @@ static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan,
/*
* Update internal channel flags.
*/
-void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
- struct ath9k_channel *ichan)
+void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type)
{
- struct ieee80211_channel *chan = hw->conf.channel;
- struct ieee80211_conf *conf = &hw->conf;
-
ichan->channel = chan->center_freq;
ichan->chan = chan;
@@ -124,9 +122,8 @@ void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
}
- if (conf_is_ht(conf))
- ichan->chanmode = ath9k_get_extchanmode(chan,
- conf->channel_type);
+ if (channel_type != NL80211_CHAN_NO_HT)
+ ichan->chanmode = ath9k_get_extchanmode(chan, channel_type);
}
EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
@@ -142,7 +139,7 @@ struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
chan_idx = curchan->hw_value;
channel = &ah->channels[chan_idx];
- ath9k_cmn_update_ichannel(hw, channel);
+ ath9k_cmn_update_ichannel(channel, curchan, hw->conf.channel_type);
return channel;
}
@@ -183,8 +180,8 @@ void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
AR_STOMP_NONE_WLAN_WGHT);
break;
default:
- ath_print(common, ATH_DBG_BTCOEX,
- "Invalid Stomptype\n");
+ ath_dbg(common, ATH_DBG_BTCOEX,
+ "Invalid Stomptype\n");
break;
}
@@ -192,6 +189,17 @@ void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
}
EXPORT_SYMBOL(ath9k_cmn_btcoex_bt_stomp);
+void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
+ u16 new_txpow, u16 *txpower)
+{
+ if (cur_txpow != new_txpow) {
+ ath9k_hw_set_txpowerlimit(ah, new_txpow, false);
+ /* read back in case value is clamped */
+ *txpower = ath9k_hw_regulatory(ah)->power_limit;
+ }
+}
+EXPORT_SYMBOL(ath9k_cmn_update_txpow);
+
static int __init ath9k_cmn_init(void)
{
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index fea3b3315391..b2f7b5f89097 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -17,24 +17,22 @@
#include <net/mac80211.h>
#include "../ath.h"
-#include "../debug.h"
#include "hw.h"
#include "hw-ops.h"
/* Common header for Atheros 802.11n base driver cores */
-#define IEEE80211_WEP_NKID 4
-
#define WME_NUM_TID 16
#define WME_BA_BMP_SIZE 64
#define WME_MAX_BA WME_BA_BMP_SIZE
#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
-#define WME_AC_BE 0
-#define WME_AC_BK 1
-#define WME_AC_VI 2
-#define WME_AC_VO 3
+/* These must match mac80211 skb queue mapping numbers */
+#define WME_AC_VO 0
+#define WME_AC_VI 1
+#define WME_AC_BE 2
+#define WME_AC_BK 3
#define WME_NUM_AC 4
#define ATH_RSSI_DUMMY_MARKER 0x127
@@ -62,10 +60,13 @@ enum ath_stomp_type {
int ath9k_cmn_padpos(__le16 frame_control);
int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
-void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
- struct ath9k_channel *ichan);
+void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type);
struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
struct ath_hw *ah);
int ath9k_cmn_count_streams(unsigned int chainmask, int max);
void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
enum ath_stomp_type stomp_type);
+void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
+ u16 new_txpow, u16 *txpower);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 43e71a944cb1..8df5a92a20f1 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -15,6 +15,7 @@
*/
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <asm/unaligned.h>
#include "ath9k.h"
@@ -24,14 +25,25 @@
#define REG_READ_D(_ah, _reg) \
ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
-static struct dentry *ath9k_debugfs_root;
-
static int ath9k_debugfs_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
+static ssize_t ath9k_debugfs_read_buf(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ u8 *buf = file->private_data;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+static int ath9k_debugfs_release_buf(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+ return 0;
+}
+
#ifdef CONFIG_ATH_DEBUG
static ssize_t read_file_debug(struct file *file, char __user *user_buf,
@@ -383,41 +395,40 @@ static const struct file_operations fops_interrupt = {
.llseek = default_llseek,
};
-static const char * ath_wiphy_state_str(enum ath_wiphy_state state)
+static const char *channel_type_str(enum nl80211_channel_type t)
{
- switch (state) {
- case ATH_WIPHY_INACTIVE:
- return "INACTIVE";
- case ATH_WIPHY_ACTIVE:
- return "ACTIVE";
- case ATH_WIPHY_PAUSING:
- return "PAUSING";
- case ATH_WIPHY_PAUSED:
- return "PAUSED";
- case ATH_WIPHY_SCAN:
- return "SCAN";
+ switch (t) {
+ case NL80211_CHAN_NO_HT:
+ return "no ht";
+ case NL80211_CHAN_HT20:
+ return "ht20";
+ case NL80211_CHAN_HT40MINUS:
+ return "ht40-";
+ case NL80211_CHAN_HT40PLUS:
+ return "ht40+";
+ default:
+ return "???";
}
- return "?";
}
static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
- struct ath_wiphy *aphy = sc->pri_wiphy;
- struct ieee80211_channel *chan = aphy->hw->conf.channel;
+ struct ieee80211_channel *chan = sc->hw->conf.channel;
+ struct ieee80211_conf *conf = &(sc->hw->conf);
char buf[512];
unsigned int len = 0;
- int i;
u8 addr[ETH_ALEN];
u32 tmp;
len += snprintf(buf + len, sizeof(buf) - len,
- "primary: %s (%s chan=%d ht=%d)\n",
- wiphy_name(sc->pri_wiphy->hw->wiphy),
- ath_wiphy_state_str(sc->pri_wiphy->state),
+ "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
+ wiphy_name(sc->hw->wiphy),
ieee80211_frequency_to_channel(chan->center_freq),
- aphy->chan_is_ht);
+ chan->center_freq,
+ conf->channel_type,
+ channel_type_str(conf->channel_type));
put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_STA_ID0), addr);
put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4);
@@ -459,156 +470,82 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
else
len += snprintf(buf + len, sizeof(buf) - len, "\n");
- /* Put variable-length stuff down here, and check for overflows. */
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (aphy == NULL)
- continue;
- chan = aphy->hw->conf.channel;
- len += snprintf(buf + len, sizeof(buf) - len,
- "secondary: %s (%s chan=%d ht=%d)\n",
- wiphy_name(aphy->hw->wiphy),
- ath_wiphy_state_str(aphy->state),
- ieee80211_frequency_to_channel(chan->center_freq),
- aphy->chan_is_ht);
- }
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
-static struct ath_wiphy * get_wiphy(struct ath_softc *sc, const char *name)
-{
- int i;
- if (strcmp(name, wiphy_name(sc->pri_wiphy->hw->wiphy)) == 0)
- return sc->pri_wiphy;
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (aphy && strcmp(name, wiphy_name(aphy->hw->wiphy)) == 0)
- return aphy;
- }
- return NULL;
-}
-
-static int del_wiphy(struct ath_softc *sc, const char *name)
-{
- struct ath_wiphy *aphy = get_wiphy(sc, name);
- if (!aphy)
- return -ENOENT;
- return ath9k_wiphy_del(aphy);
-}
-
-static int pause_wiphy(struct ath_softc *sc, const char *name)
-{
- struct ath_wiphy *aphy = get_wiphy(sc, name);
- if (!aphy)
- return -ENOENT;
- return ath9k_wiphy_pause(aphy);
-}
-
-static int unpause_wiphy(struct ath_softc *sc, const char *name)
-{
- struct ath_wiphy *aphy = get_wiphy(sc, name);
- if (!aphy)
- return -ENOENT;
- return ath9k_wiphy_unpause(aphy);
-}
-
-static int select_wiphy(struct ath_softc *sc, const char *name)
-{
- struct ath_wiphy *aphy = get_wiphy(sc, name);
- if (!aphy)
- return -ENOENT;
- return ath9k_wiphy_select(aphy);
-}
-
-static int schedule_wiphy(struct ath_softc *sc, const char *msec)
-{
- ath9k_wiphy_set_scheduler(sc, simple_strtoul(msec, NULL, 0));
- return 0;
-}
-
-static ssize_t write_file_wiphy(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- char buf[50];
- size_t len;
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
- buf[len] = '\0';
- if (len > 0 && buf[len - 1] == '\n')
- buf[len - 1] = '\0';
-
- if (strncmp(buf, "add", 3) == 0) {
- int res = ath9k_wiphy_add(sc);
- if (res < 0)
- return res;
- } else if (strncmp(buf, "del=", 4) == 0) {
- int res = del_wiphy(sc, buf + 4);
- if (res < 0)
- return res;
- } else if (strncmp(buf, "pause=", 6) == 0) {
- int res = pause_wiphy(sc, buf + 6);
- if (res < 0)
- return res;
- } else if (strncmp(buf, "unpause=", 8) == 0) {
- int res = unpause_wiphy(sc, buf + 8);
- if (res < 0)
- return res;
- } else if (strncmp(buf, "select=", 7) == 0) {
- int res = select_wiphy(sc, buf + 7);
- if (res < 0)
- return res;
- } else if (strncmp(buf, "schedule=", 9) == 0) {
- int res = schedule_wiphy(sc, buf + 9);
- if (res < 0)
- return res;
- } else
- return -EOPNOTSUPP;
-
- return count;
-}
-
static const struct file_operations fops_wiphy = {
.read = read_file_wiphy,
- .write = write_file_wiphy,
.open = ath9k_debugfs_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
+#define PR_QNUM(_n) sc->tx.txq_map[_n]->axq_qnum
#define PR(str, elem) \
do { \
len += snprintf(buf + len, size - len, \
"%s%13u%11u%10u%10u\n", str, \
- sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_BE]].elem, \
- sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_BK]].elem, \
- sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_VI]].elem, \
- sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_VO]].elem); \
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].elem, \
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].elem, \
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].elem, \
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].elem); \
+ if (len >= size) \
+ goto done; \
+} while(0)
+
+#define PRX(str, elem) \
+do { \
+ len += snprintf(buf + len, size - len, \
+ "%s%13u%11u%10u%10u\n", str, \
+ (unsigned int)(sc->tx.txq_map[WME_AC_BE]->elem), \
+ (unsigned int)(sc->tx.txq_map[WME_AC_BK]->elem), \
+ (unsigned int)(sc->tx.txq_map[WME_AC_VI]->elem), \
+ (unsigned int)(sc->tx.txq_map[WME_AC_VO]->elem)); \
+ if (len >= size) \
+ goto done; \
} while(0)
+#define PRQLE(str, elem) \
+do { \
+ len += snprintf(buf + len, size - len, \
+ "%s%13i%11i%10i%10i\n", str, \
+ list_empty(&sc->tx.txq_map[WME_AC_BE]->elem), \
+ list_empty(&sc->tx.txq_map[WME_AC_BK]->elem), \
+ list_empty(&sc->tx.txq_map[WME_AC_VI]->elem), \
+ list_empty(&sc->tx.txq_map[WME_AC_VO]->elem)); \
+ if (len >= size) \
+ goto done; \
+} while (0)
+
static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
char *buf;
- unsigned int len = 0, size = 2048;
+ unsigned int len = 0, size = 8000;
+ int i;
ssize_t retval = 0;
+ char tmp[32];
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- len += sprintf(buf, "%30s %10s%10s%10s\n\n", "BE", "BK", "VI", "VO");
+ len += sprintf(buf, "Num-Tx-Queues: %i tx-queues-setup: 0x%x"
+ " poll-work-seen: %u\n"
+ "%30s %10s%10s%10s\n\n",
+ ATH9K_NUM_TX_QUEUES, sc->tx.txqsetup,
+ sc->tx_complete_poll_work_seen,
+ "BE", "BK", "VI", "VO");
PR("MPDUs Queued: ", queued);
PR("MPDUs Completed: ", completed);
PR("Aggregates: ", a_aggr);
- PR("AMPDUs Queued: ", a_queued);
+ PR("AMPDUs Queued HW:", a_queued_hw);
+ PR("AMPDUs Queued SW:", a_queued_sw);
PR("AMPDUs Completed:", a_completed);
PR("AMPDUs Retried: ", a_retries);
PR("AMPDUs XRetried: ", a_xretries);
@@ -620,6 +557,223 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
PR("DELIM Underrun: ", delim_underrun);
PR("TX-Pkts-All: ", tx_pkts_all);
PR("TX-Bytes-All: ", tx_bytes_all);
+ PR("hw-put-tx-buf: ", puttxbuf);
+ PR("hw-tx-start: ", txstart);
+ PR("hw-tx-proc-desc: ", txprocdesc);
+ len += snprintf(buf + len, size - len,
+ "%s%11p%11p%10p%10p\n", "txq-memory-address:",
+ sc->tx.txq_map[WME_AC_BE],
+ sc->tx.txq_map[WME_AC_BK],
+ sc->tx.txq_map[WME_AC_VI],
+ sc->tx.txq_map[WME_AC_VO]);
+ if (len >= size)
+ goto done;
+
+ PRX("axq-qnum: ", axq_qnum);
+ PRX("axq-depth: ", axq_depth);
+ PRX("axq-ampdu_depth: ", axq_ampdu_depth);
+ PRX("axq-stopped ", stopped);
+ PRX("tx-in-progress ", axq_tx_inprogress);
+ PRX("pending-frames ", pending_frames);
+ PRX("txq_headidx: ", txq_headidx);
+ PRX("txq_tailidx: ", txq_headidx);
+
+ PRQLE("axq_q empty: ", axq_q);
+ PRQLE("axq_acq empty: ", axq_acq);
+ PRQLE("txq_fifo_pending: ", txq_fifo_pending);
+ for (i = 0; i < ATH_TXFIFO_DEPTH; i++) {
+ snprintf(tmp, sizeof(tmp) - 1, "txq_fifo[%i] empty: ", i);
+ PRQLE(tmp, txq_fifo[i]);
+ }
+
+ /* Print out more detailed queue-info */
+ for (i = 0; i <= WME_AC_BK; i++) {
+ struct ath_txq *txq = &(sc->tx.txq[i]);
+ struct ath_atx_ac *ac;
+ struct ath_atx_tid *tid;
+ if (len >= size)
+ goto done;
+ spin_lock_bh(&txq->axq_lock);
+ if (!list_empty(&txq->axq_acq)) {
+ ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac,
+ list);
+ len += snprintf(buf + len, size - len,
+ "txq[%i] first-ac: %p sched: %i\n",
+ i, ac, ac->sched);
+ if (list_empty(&ac->tid_q) || (len >= size))
+ goto done_for;
+ tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
+ list);
+ len += snprintf(buf + len, size - len,
+ " first-tid: %p sched: %i paused: %i\n",
+ tid, tid->sched, tid->paused);
+ }
+ done_for:
+ spin_unlock_bh(&txq->axq_lock);
+ }
+
+done:
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static ssize_t read_file_stations(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char *buf;
+ unsigned int len = 0, size = 64000;
+ struct ath_node *an = NULL;
+ ssize_t retval = 0;
+ int q;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ len += snprintf(buf + len, size - len,
+ "Stations:\n"
+ " tid: addr sched paused buf_q-empty an ac\n"
+ " ac: addr sched tid_q-empty txq\n");
+
+ spin_lock(&sc->nodes_lock);
+ list_for_each_entry(an, &sc->nodes, list) {
+ len += snprintf(buf + len, size - len,
+ "%pM\n", an->sta->addr);
+ if (len >= size)
+ goto done;
+
+ for (q = 0; q < WME_NUM_TID; q++) {
+ struct ath_atx_tid *tid = &(an->tid[q]);
+ len += snprintf(buf + len, size - len,
+ " tid: %p %s %s %i %p %p\n",
+ tid, tid->sched ? "sched" : "idle",
+ tid->paused ? "paused" : "running",
+ list_empty(&tid->buf_q),
+ tid->an, tid->ac);
+ if (len >= size)
+ goto done;
+ }
+
+ for (q = 0; q < WME_NUM_AC; q++) {
+ struct ath_atx_ac *ac = &(an->ac[q]);
+ len += snprintf(buf + len, size - len,
+ " ac: %p %s %i %p\n",
+ ac, ac->sched ? "sched" : "idle",
+ list_empty(&ac->tid_q), ac->txq);
+ if (len >= size)
+ goto done;
+ }
+ }
+
+done:
+ spin_unlock(&sc->nodes_lock);
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static ssize_t read_file_misc(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ieee80211_hw *hw = sc->hw;
+ char *buf;
+ unsigned int len = 0, size = 8000;
+ ssize_t retval = 0;
+ const char *tmp;
+ unsigned int reg;
+ struct ath9k_vif_iter_data iter_data;
+
+ ath9k_calculate_iter_data(hw, NULL, &iter_data);
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ switch (sc->sc_ah->opmode) {
+ case NL80211_IFTYPE_ADHOC:
+ tmp = "ADHOC";
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ tmp = "MESH";
+ break;
+ case NL80211_IFTYPE_AP:
+ tmp = "AP";
+ break;
+ case NL80211_IFTYPE_STATION:
+ tmp = "STATION";
+ break;
+ default:
+ tmp = "???";
+ break;
+ }
+
+ len += snprintf(buf + len, size - len,
+ "curbssid: %pM\n"
+ "OP-Mode: %s(%i)\n"
+ "Beacon-Timer-Register: 0x%x\n",
+ common->curbssid,
+ tmp, (int)(sc->sc_ah->opmode),
+ REG_READ(ah, AR_BEACON_PERIOD));
+
+ reg = REG_READ(ah, AR_TIMER_MODE);
+ len += snprintf(buf + len, size - len, "Timer-Mode-Register: 0x%x (",
+ reg);
+ if (reg & AR_TBTT_TIMER_EN)
+ len += snprintf(buf + len, size - len, "TBTT ");
+ if (reg & AR_DBA_TIMER_EN)
+ len += snprintf(buf + len, size - len, "DBA ");
+ if (reg & AR_SWBA_TIMER_EN)
+ len += snprintf(buf + len, size - len, "SWBA ");
+ if (reg & AR_HCF_TIMER_EN)
+ len += snprintf(buf + len, size - len, "HCF ");
+ if (reg & AR_TIM_TIMER_EN)
+ len += snprintf(buf + len, size - len, "TIM ");
+ if (reg & AR_DTIM_TIMER_EN)
+ len += snprintf(buf + len, size - len, "DTIM ");
+ len += snprintf(buf + len, size - len, ")\n");
+
+ reg = sc->sc_ah->imask;
+ len += snprintf(buf + len, size - len, "imask: 0x%x (", reg);
+ if (reg & ATH9K_INT_SWBA)
+ len += snprintf(buf + len, size - len, "SWBA ");
+ if (reg & ATH9K_INT_BMISS)
+ len += snprintf(buf + len, size - len, "BMISS ");
+ if (reg & ATH9K_INT_CST)
+ len += snprintf(buf + len, size - len, "CST ");
+ if (reg & ATH9K_INT_RX)
+ len += snprintf(buf + len, size - len, "RX ");
+ if (reg & ATH9K_INT_RXHP)
+ len += snprintf(buf + len, size - len, "RXHP ");
+ if (reg & ATH9K_INT_RXLP)
+ len += snprintf(buf + len, size - len, "RXLP ");
+ if (reg & ATH9K_INT_BB_WATCHDOG)
+ len += snprintf(buf + len, size - len, "BB_WATCHDOG ");
+ /* there are other IRQs if one wanted to add them. */
+ len += snprintf(buf + len, size - len, ")\n");
+
+ len += snprintf(buf + len, size - len,
+ "VIF Counts: AP: %i STA: %i MESH: %i WDS: %i"
+ " ADHOC: %i OTHER: %i nvifs: %hi beacon-vifs: %hi\n",
+ iter_data.naps, iter_data.nstations, iter_data.nmeshes,
+ iter_data.nwds, iter_data.nadhocs, iter_data.nothers,
+ sc->nvifs, sc->nbcnvifs);
+
+ len += snprintf(buf + len, size - len,
+ "Calculated-BSSID-Mask: %pM\n",
+ iter_data.mask);
if (len > size)
len = size;
@@ -630,33 +784,35 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
return retval;
}
-void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_buf *bf, struct ath_tx_status *ts)
+void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
+ struct ath_tx_status *ts, struct ath_txq *txq)
{
- TX_STAT_INC(txq->axq_qnum, tx_pkts_all);
- sc->debug.stats.txstats[txq->axq_qnum].tx_bytes_all += bf->bf_mpdu->len;
+ int qnum = txq->axq_qnum;
+
+ TX_STAT_INC(qnum, tx_pkts_all);
+ sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
if (bf_isampdu(bf)) {
if (bf_isxretried(bf))
- TX_STAT_INC(txq->axq_qnum, a_xretries);
+ TX_STAT_INC(qnum, a_xretries);
else
- TX_STAT_INC(txq->axq_qnum, a_completed);
+ TX_STAT_INC(qnum, a_completed);
} else {
- TX_STAT_INC(txq->axq_qnum, completed);
+ TX_STAT_INC(qnum, completed);
}
if (ts->ts_status & ATH9K_TXERR_FIFO)
- TX_STAT_INC(txq->axq_qnum, fifo_underrun);
+ TX_STAT_INC(qnum, fifo_underrun);
if (ts->ts_status & ATH9K_TXERR_XTXOP)
- TX_STAT_INC(txq->axq_qnum, xtxop);
+ TX_STAT_INC(qnum, xtxop);
if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED)
- TX_STAT_INC(txq->axq_qnum, timer_exp);
+ TX_STAT_INC(qnum, timer_exp);
if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR)
- TX_STAT_INC(txq->axq_qnum, desc_cfg_err);
+ TX_STAT_INC(qnum, desc_cfg_err);
if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN)
- TX_STAT_INC(txq->axq_qnum, data_underrun);
+ TX_STAT_INC(qnum, data_underrun);
if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
- TX_STAT_INC(txq->axq_qnum, delim_underrun);
+ TX_STAT_INC(qnum, delim_underrun);
}
static const struct file_operations fops_xmit = {
@@ -666,6 +822,20 @@ static const struct file_operations fops_xmit = {
.llseek = default_llseek,
};
+static const struct file_operations fops_stations = {
+ .read = read_file_stations,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static const struct file_operations fops_misc = {
+ .read = read_file_misc,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
static ssize_t read_file_recv(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -871,16 +1041,49 @@ static const struct file_operations fops_regval = {
.llseek = default_llseek,
};
+#define REGDUMP_LINE_SIZE 20
+
+static int open_file_regdump(struct inode *inode, struct file *file)
+{
+ struct ath_softc *sc = inode->i_private;
+ unsigned int len = 0;
+ u8 *buf;
+ int i;
+ unsigned long num_regs, regdump_len, max_reg_offset;
+
+ max_reg_offset = AR_SREV_9300_20_OR_LATER(sc->sc_ah) ? 0x16bd4 : 0xb500;
+ num_regs = max_reg_offset / 4 + 1;
+ regdump_len = num_regs * REGDUMP_LINE_SIZE + 1;
+ buf = vmalloc(regdump_len);
+ if (!buf)
+ return -ENOMEM;
+
+ ath9k_ps_wakeup(sc);
+ for (i = 0; i < num_regs; i++)
+ len += scnprintf(buf + len, regdump_len - len,
+ "0x%06x 0x%08x\n", i << 2, REG_READ(sc->sc_ah, i << 2));
+ ath9k_ps_restore(sc);
+
+ file->private_data = buf;
+
+ return 0;
+}
+
+static const struct file_operations fops_regdump = {
+ .open = open_file_regdump,
+ .read = ath9k_debugfs_read_buf,
+ .release = ath9k_debugfs_release_buf,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,/* read accesses f_pos */
+};
+
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath_softc *sc = (struct ath_softc *) common->priv;
- if (!ath9k_debugfs_root)
- return -ENOENT;
-
- sc->debug.debugfs_phy = debugfs_create_dir(wiphy_name(sc->hw->wiphy),
- ath9k_debugfs_root);
+ sc->debug.debugfs_phy = debugfs_create_dir("ath9k",
+ sc->hw->wiphy->debugfsdir);
if (!sc->debug.debugfs_phy)
return -ENOMEM;
@@ -906,6 +1109,14 @@ int ath9k_init_debug(struct ath_hw *ah)
sc, &fops_xmit))
goto err;
+ if (!debugfs_create_file("stations", S_IRUSR, sc->debug.debugfs_phy,
+ sc, &fops_stations))
+ goto err;
+
+ if (!debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy,
+ sc, &fops_misc))
+ goto err;
+
if (!debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy,
sc, &fops_recv))
goto err;
@@ -930,32 +1141,14 @@ int ath9k_init_debug(struct ath_hw *ah)
sc->debug.debugfs_phy, &ah->config.cwm_ignore_extcca))
goto err;
+ if (!debugfs_create_file("regdump", S_IRUSR, sc->debug.debugfs_phy,
+ sc, &fops_regdump))
+ goto err;
+
sc->debug.regidx = 0;
return 0;
err:
- ath9k_exit_debug(ah);
- return -ENOMEM;
-}
-
-void ath9k_exit_debug(struct ath_hw *ah)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
-
debugfs_remove_recursive(sc->debug.debugfs_phy);
-}
-
-int ath9k_debug_create_root(void)
-{
- ath9k_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (!ath9k_debugfs_root)
- return -ENOENT;
-
- return 0;
-}
-
-void ath9k_debug_remove_root(void)
-{
- debugfs_remove(ath9k_debugfs_root);
- ath9k_debugfs_root = NULL;
+ sc->debug.debugfs_phy = NULL;
+ return -ENOMEM;
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index bb0823242ba0..59338de0ce19 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -89,7 +89,8 @@ struct ath_interrupt_stats {
* @queued: Total MPDUs (non-aggr) queued
* @completed: Total MPDUs (non-aggr) completed
* @a_aggr: Total no. of aggregates queued
- * @a_queued: Total AMPDUs queued
+ * @a_queued_hw: Total AMPDUs queued to hardware
+ * @a_queued_sw: Total AMPDUs queued to software queues
* @a_completed: Total AMPDUs completed
* @a_retries: No. of AMPDUs retried (SW)
* @a_xretries: No. of AMPDUs dropped due to xretries
@@ -102,6 +103,9 @@ struct ath_interrupt_stats {
* @desc_cfg_err: Descriptor configuration errors
* @data_urn: TX data underrun errors
* @delim_urn: TX delimiter underrun errors
+ * @puttxbuf: Number of times hardware was given txbuf to write.
+ * @txstart: Number of times hardware was told to start tx.
+ * @txprocdesc: Number of times tx descriptor was processed
*/
struct ath_tx_stats {
u32 tx_pkts_all;
@@ -109,7 +113,8 @@ struct ath_tx_stats {
u32 queued;
u32 completed;
u32 a_aggr;
- u32 a_queued;
+ u32 a_queued_hw;
+ u32 a_queued_sw;
u32 a_completed;
u32 a_retries;
u32 a_xretries;
@@ -119,6 +124,9 @@ struct ath_tx_stats {
u32 desc_cfg_err;
u32 data_underrun;
u32 delim_underrun;
+ u32 puttxbuf;
+ u32 txstart;
+ u32 txprocdesc;
};
/**
@@ -164,13 +172,10 @@ struct ath9k_debug {
};
int ath9k_init_debug(struct ath_hw *ah);
-void ath9k_exit_debug(struct ath_hw *ah);
-int ath9k_debug_create_root(void);
-void ath9k_debug_remove_root(void);
void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
-void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_buf *bf, struct ath_tx_status *ts);
+void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
+ struct ath_tx_status *ts, struct ath_txq *txq);
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
#else
@@ -180,28 +185,15 @@ static inline int ath9k_init_debug(struct ath_hw *ah)
return 0;
}
-static inline void ath9k_exit_debug(struct ath_hw *ah)
-{
-}
-
-static inline int ath9k_debug_create_root(void)
-{
- return 0;
-}
-
-static inline void ath9k_debug_remove_root(void)
-{
-}
-
static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
enum ath9k_int status)
{
}
static inline void ath_debug_stat_tx(struct ath_softc *sc,
- struct ath_txq *txq,
struct ath_buf *bf,
- struct ath_tx_status *ts)
+ struct ath_tx_status *ts,
+ struct ath_txq *txq)
{
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index 1266333f586d..8c18bed3a558 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -89,6 +89,38 @@ bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
return false;
}
+void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
+ int eep_start_loc, int size)
+{
+ int i = 0, j, addr;
+ u32 addrdata[8];
+ u32 data[8];
+
+ for (addr = 0; addr < size; addr++) {
+ addrdata[i] = AR5416_EEPROM_OFFSET +
+ ((addr + eep_start_loc) << AR5416_EEPROM_S);
+ i++;
+ if (i == 8) {
+ REG_READ_MULTI(ah, addrdata, data, i);
+
+ for (j = 0; j < i; j++) {
+ *eep_data = data[j];
+ eep_data++;
+ }
+ i = 0;
+ }
+ }
+
+ if (i != 0) {
+ REG_READ_MULTI(ah, addrdata, data, i);
+
+ for (j = 0; j < i; j++) {
+ *eep_data = data[j];
+ eep_data++;
+ }
+ }
+}
+
bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data)
{
return common->bus_ops->eeprom_read(common, off, data);
@@ -234,22 +266,22 @@ void ath9k_hw_get_target_powers(struct ath_hw *ah,
u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower,
bool is2GHz, int num_band_edges)
{
- u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
+ u16 twiceMaxEdgePower = MAX_RATE_POWER;
int i;
for (i = 0; (i < num_band_edges) &&
(pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, is2GHz)) {
- twiceMaxEdgePower = pRdEdgesPower[i].tPower;
+ twiceMaxEdgePower = CTL_EDGE_TPOWER(pRdEdgesPower[i].ctl);
break;
} else if ((i > 0) &&
(freq < ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel,
is2GHz))) {
if (ath9k_hw_fbin2freq(pRdEdgesPower[i - 1].bChannel,
is2GHz) < freq &&
- pRdEdgesPower[i - 1].flag) {
+ CTL_EDGE_FLAGS(pRdEdgesPower[i - 1].ctl)) {
twiceMaxEdgePower =
- pRdEdgesPower[i - 1].tPower;
+ CTL_EDGE_TPOWER(pRdEdgesPower[i - 1].ctl);
}
break;
}
@@ -273,12 +305,225 @@ void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah)
regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
break;
default:
- ath_print(common, ATH_DBG_EEPROM,
- "Invalid chainmask configuration\n");
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Invalid chainmask configuration\n");
break;
}
}
+void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ void *pRawDataSet,
+ u8 *bChans, u16 availPiers,
+ u16 tPdGainOverlap,
+ u16 *pPdGainBoundaries, u8 *pPDADCValues,
+ u16 numXpdGains)
+{
+ int i, j, k;
+ int16_t ss;
+ u16 idxL = 0, idxR = 0, numPiers;
+ static u8 vpdTableL[AR5416_NUM_PD_GAINS]
+ [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
+ static u8 vpdTableR[AR5416_NUM_PD_GAINS]
+ [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
+ static u8 vpdTableI[AR5416_NUM_PD_GAINS]
+ [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
+
+ u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
+ u8 minPwrT4[AR5416_NUM_PD_GAINS];
+ u8 maxPwrT4[AR5416_NUM_PD_GAINS];
+ int16_t vpdStep;
+ int16_t tmpVal;
+ u16 sizeCurrVpdTable, maxIndex, tgtIndex;
+ bool match;
+ int16_t minDelta = 0;
+ struct chan_centers centers;
+ int pdgain_boundary_default;
+ struct cal_data_per_freq *data_def = pRawDataSet;
+ struct cal_data_per_freq_4k *data_4k = pRawDataSet;
+ struct cal_data_per_freq_ar9287 *data_9287 = pRawDataSet;
+ bool eeprom_4k = AR_SREV_9285(ah) || AR_SREV_9271(ah);
+ int intercepts;
+
+ if (AR_SREV_9287(ah))
+ intercepts = AR9287_PD_GAIN_ICEPTS;
+ else
+ intercepts = AR5416_PD_GAIN_ICEPTS;
+
+ memset(&minPwrT4, 0, AR5416_NUM_PD_GAINS);
+ ath9k_hw_get_channel_centers(ah, chan, &centers);
+
+ for (numPiers = 0; numPiers < availPiers; numPiers++) {
+ if (bChans[numPiers] == AR5416_BCHAN_UNUSED)
+ break;
+ }
+
+ match = ath9k_hw_get_lower_upper_index((u8)FREQ2FBIN(centers.synth_center,
+ IS_CHAN_2GHZ(chan)),
+ bChans, numPiers, &idxL, &idxR);
+
+ if (match) {
+ if (AR_SREV_9287(ah)) {
+ /* FIXME: array overrun? */
+ for (i = 0; i < numXpdGains; i++) {
+ minPwrT4[i] = data_9287[idxL].pwrPdg[i][0];
+ maxPwrT4[i] = data_9287[idxL].pwrPdg[i][4];
+ ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
+ data_9287[idxL].pwrPdg[i],
+ data_9287[idxL].vpdPdg[i],
+ intercepts,
+ vpdTableI[i]);
+ }
+ } else if (eeprom_4k) {
+ for (i = 0; i < numXpdGains; i++) {
+ minPwrT4[i] = data_4k[idxL].pwrPdg[i][0];
+ maxPwrT4[i] = data_4k[idxL].pwrPdg[i][4];
+ ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
+ data_4k[idxL].pwrPdg[i],
+ data_4k[idxL].vpdPdg[i],
+ intercepts,
+ vpdTableI[i]);
+ }
+ } else {
+ for (i = 0; i < numXpdGains; i++) {
+ minPwrT4[i] = data_def[idxL].pwrPdg[i][0];
+ maxPwrT4[i] = data_def[idxL].pwrPdg[i][4];
+ ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
+ data_def[idxL].pwrPdg[i],
+ data_def[idxL].vpdPdg[i],
+ intercepts,
+ vpdTableI[i]);
+ }
+ }
+ } else {
+ for (i = 0; i < numXpdGains; i++) {
+ if (AR_SREV_9287(ah)) {
+ pVpdL = data_9287[idxL].vpdPdg[i];
+ pPwrL = data_9287[idxL].pwrPdg[i];
+ pVpdR = data_9287[idxR].vpdPdg[i];
+ pPwrR = data_9287[idxR].pwrPdg[i];
+ } else if (eeprom_4k) {
+ pVpdL = data_4k[idxL].vpdPdg[i];
+ pPwrL = data_4k[idxL].pwrPdg[i];
+ pVpdR = data_4k[idxR].vpdPdg[i];
+ pPwrR = data_4k[idxR].pwrPdg[i];
+ } else {
+ pVpdL = data_def[idxL].vpdPdg[i];
+ pPwrL = data_def[idxL].pwrPdg[i];
+ pVpdR = data_def[idxR].vpdPdg[i];
+ pPwrR = data_def[idxR].pwrPdg[i];
+ }
+
+ minPwrT4[i] = max(pPwrL[0], pPwrR[0]);
+
+ maxPwrT4[i] =
+ min(pPwrL[intercepts - 1],
+ pPwrR[intercepts - 1]);
+
+
+ ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
+ pPwrL, pVpdL,
+ intercepts,
+ vpdTableL[i]);
+ ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
+ pPwrR, pVpdR,
+ intercepts,
+ vpdTableR[i]);
+
+ for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) {
+ vpdTableI[i][j] =
+ (u8)(ath9k_hw_interpolate((u16)
+ FREQ2FBIN(centers.
+ synth_center,
+ IS_CHAN_2GHZ
+ (chan)),
+ bChans[idxL], bChans[idxR],
+ vpdTableL[i][j], vpdTableR[i][j]));
+ }
+ }
+ }
+
+ k = 0;
+
+ for (i = 0; i < numXpdGains; i++) {
+ if (i == (numXpdGains - 1))
+ pPdGainBoundaries[i] =
+ (u16)(maxPwrT4[i] / 2);
+ else
+ pPdGainBoundaries[i] =
+ (u16)((maxPwrT4[i] + minPwrT4[i + 1]) / 4);
+
+ pPdGainBoundaries[i] =
+ min((u16)MAX_RATE_POWER, pPdGainBoundaries[i]);
+
+ if ((i == 0) && !AR_SREV_5416_20_OR_LATER(ah)) {
+ minDelta = pPdGainBoundaries[0] - 23;
+ pPdGainBoundaries[0] = 23;
+ } else {
+ minDelta = 0;
+ }
+
+ if (i == 0) {
+ if (AR_SREV_9280_20_OR_LATER(ah))
+ ss = (int16_t)(0 - (minPwrT4[i] / 2));
+ else
+ ss = 0;
+ } else {
+ ss = (int16_t)((pPdGainBoundaries[i - 1] -
+ (minPwrT4[i] / 2)) -
+ tPdGainOverlap + 1 + minDelta);
+ }
+ vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]);
+ vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
+
+ while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
+ tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep);
+ pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal);
+ ss++;
+ }
+
+ sizeCurrVpdTable = (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1);
+ tgtIndex = (u8)(pPdGainBoundaries[i] + tPdGainOverlap -
+ (minPwrT4[i] / 2));
+ maxIndex = (tgtIndex < sizeCurrVpdTable) ?
+ tgtIndex : sizeCurrVpdTable;
+
+ while ((ss < maxIndex) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
+ pPDADCValues[k++] = vpdTableI[i][ss++];
+ }
+
+ vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] -
+ vpdTableI[i][sizeCurrVpdTable - 2]);
+ vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
+
+ if (tgtIndex >= maxIndex) {
+ while ((ss <= tgtIndex) &&
+ (k < (AR5416_NUM_PDADC_VALUES - 1))) {
+ tmpVal = (int16_t)((vpdTableI[i][sizeCurrVpdTable - 1] +
+ (ss - maxIndex + 1) * vpdStep));
+ pPDADCValues[k++] = (u8)((tmpVal > 255) ?
+ 255 : tmpVal);
+ ss++;
+ }
+ }
+ }
+
+ if (eeprom_4k)
+ pdgain_boundary_default = 58;
+ else
+ pdgain_boundary_default = pPdGainBoundaries[i - 1];
+
+ while (i < AR5416_PD_GAINS_IN_MASK) {
+ pPdGainBoundaries[i] = pdgain_boundary_default;
+ i++;
+ }
+
+ while (k < AR5416_NUM_PDADC_VALUES) {
+ pPDADCValues[k] = pPDADCValues[k - 1];
+ k++;
+ }
+}
+
int ath9k_hw_eeprom_init(struct ath_hw *ah)
{
int status;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index dacb45e1b906..bd82447f5b78 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -17,12 +17,12 @@
#ifndef EEPROM_H
#define EEPROM_H
+#define AR_EEPROM_MODAL_SPURS 5
+
#include "../ath.h"
#include <net/cfg80211.h>
#include "ar9003_eeprom.h"
-#define AH_USE_EEPROM 0x1
-
#ifdef __BIG_ENDIAN
#define AR5416_EEPROM_MAGIC 0x5aa5
#else
@@ -149,8 +149,6 @@
#define AR5416_NUM_PD_GAINS 4
#define AR5416_PD_GAINS_IN_MASK 4
#define AR5416_PD_GAIN_ICEPTS 5
-#define AR5416_EEPROM_MODAL_SPURS 5
-#define AR5416_MAX_RATE_POWER 63
#define AR5416_NUM_PDADC_VALUES 128
#define AR5416_BCHAN_UNUSED 0xFF
#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64
@@ -175,8 +173,6 @@
#define AR5416_EEP4K_NUM_CTLS 12
#define AR5416_EEP4K_NUM_BAND_EDGES 4
#define AR5416_EEP4K_NUM_PD_GAINS 2
-#define AR5416_EEP4K_PD_GAINS_IN_MASK 4
-#define AR5416_EEP4K_PD_GAIN_ICEPTS 5
#define AR5416_EEP4K_MAX_CHAINS 1
#define AR9280_TX_GAIN_TABLE_SIZE 22
@@ -198,41 +194,30 @@
#define AR9287_NUM_2G_40_TARGET_POWERS 3
#define AR9287_NUM_CTLS 12
#define AR9287_NUM_BAND_EDGES 4
-#define AR9287_NUM_PD_GAINS 4
-#define AR9287_PD_GAINS_IN_MASK 4
#define AR9287_PD_GAIN_ICEPTS 1
-#define AR9287_EEPROM_MODAL_SPURS 5
-#define AR9287_MAX_RATE_POWER 63
-#define AR9287_NUM_PDADC_VALUES 128
-#define AR9287_NUM_RATES 16
-#define AR9287_BCHAN_UNUSED 0xFF
-#define AR9287_MAX_PWR_RANGE_IN_HALF_DB 64
-#define AR9287_OPFLAGS_11A 0x01
-#define AR9287_OPFLAGS_11G 0x02
-#define AR9287_OPFLAGS_2G_HT40 0x08
-#define AR9287_OPFLAGS_2G_HT20 0x20
-#define AR9287_OPFLAGS_5G_HT40 0x04
-#define AR9287_OPFLAGS_5G_HT20 0x10
#define AR9287_EEPMISC_BIG_ENDIAN 0x01
#define AR9287_EEPMISC_WOW 0x02
#define AR9287_MAX_CHAINS 2
#define AR9287_ANT_16S 32
-#define AR9287_custdatasize 20
-
-#define AR9287_NUM_ANT_CHAIN_FIELDS 6
-#define AR9287_NUM_ANT_COMMON_FIELDS 4
-#define AR9287_SIZE_ANT_CHAIN_FIELD 2
-#define AR9287_SIZE_ANT_COMMON_FIELD 4
-#define AR9287_ANT_CHAIN_MASK 0x3
-#define AR9287_ANT_COMMON_MASK 0xf
-#define AR9287_CHAIN_0_IDX 0
-#define AR9287_CHAIN_1_IDX 1
+
#define AR9287_DATA_SZ 32
#define AR9287_PWR_TABLE_OFFSET_DB -5
#define AR9287_CHECKSUM_LOCATION (AR9287_EEP_START_LOC + 1)
+#define CTL_EDGE_TPOWER(_ctl) ((_ctl) & 0x3f)
+#define CTL_EDGE_FLAGS(_ctl) (((_ctl) >> 6) & 0x03)
+
+#define LNA_CTL_BUF_MODE BIT(0)
+#define LNA_CTL_ISEL_LO BIT(1)
+#define LNA_CTL_ISEL_HI BIT(2)
+#define LNA_CTL_BUF_IN BIT(3)
+#define LNA_CTL_FEM_BAND BIT(4)
+#define LNA_CTL_LOCAL_BIAS BIT(5)
+#define LNA_CTL_FORCE_XPA BIT(6)
+#define LNA_CTL_USE_ANT1 BIT(7)
+
enum eeprom_param {
EEP_NFTHRESH_5,
EEP_NFTHRESH_2,
@@ -268,6 +253,7 @@ enum eeprom_param {
EEP_PAPRD,
EEP_MODAL_VER,
EEP_ANT_DIV_CTL1,
+ EEP_CHAIN_MASK_REDUCE
};
enum ar5416_rates {
@@ -378,15 +364,12 @@ struct modal_eep_header {
u8 xatten2Margin[AR5416_MAX_CHAINS];
u8 ob_ch1;
u8 db_ch1;
- u8 useAnt1:1,
- force_xpaon:1,
- local_bias:1,
- femBandSelectUsed:1, xlnabufin:1, xlnaisel:2, xlnabufmode:1;
+ u8 lna_ctl;
u8 miscBits;
u16 xpaBiasLvlFreq[3];
u8 futureModal[6];
- struct spur_chan spurChans[AR5416_EEPROM_MODAL_SPURS];
+ struct spur_chan spurChans[AR_EEPROM_MODAL_SPURS];
} __packed;
struct calDataPerFreqOpLoop {
@@ -454,7 +437,7 @@ struct modal_eep_4k_header {
u8 db2_4:4, reserved:4;
#endif
u8 futureModal[4];
- struct spur_chan spurChans[AR5416_EEPROM_MODAL_SPURS];
+ struct spur_chan spurChans[AR_EEPROM_MODAL_SPURS];
} __packed;
struct base_eep_ar9287_header {
@@ -512,7 +495,7 @@ struct modal_eep_ar9287_header {
u8 ob_qam;
u8 ob_pal_off;
u8 futureModal[30];
- struct spur_chan spurChans[AR9287_EEPROM_MODAL_SPURS];
+ struct spur_chan spurChans[AR_EEPROM_MODAL_SPURS];
} __packed;
struct cal_data_per_freq {
@@ -521,8 +504,8 @@ struct cal_data_per_freq {
} __packed;
struct cal_data_per_freq_4k {
- u8 pwrPdg[AR5416_EEP4K_NUM_PD_GAINS][AR5416_EEP4K_PD_GAIN_ICEPTS];
- u8 vpdPdg[AR5416_EEP4K_NUM_PD_GAINS][AR5416_EEP4K_PD_GAIN_ICEPTS];
+ u8 pwrPdg[AR5416_EEP4K_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
+ u8 vpdPdg[AR5416_EEP4K_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
} __packed;
struct cal_target_power_leg {
@@ -535,18 +518,10 @@ struct cal_target_power_ht {
u8 tPow2x[8];
} __packed;
-
-#ifdef __BIG_ENDIAN_BITFIELD
struct cal_ctl_edges {
u8 bChannel;
- u8 flag:2, tPower:6;
+ u8 ctl;
} __packed;
-#else
-struct cal_ctl_edges {
- u8 bChannel;
- u8 tPower:6, flag:2;
-} __packed;
-#endif
struct cal_data_op_loop_ar9287 {
u8 pwrPdg[2][5];
@@ -556,8 +531,8 @@ struct cal_data_op_loop_ar9287 {
} __packed;
struct cal_data_per_freq_ar9287 {
- u8 pwrPdg[AR9287_NUM_PD_GAINS][AR9287_PD_GAIN_ICEPTS];
- u8 vpdPdg[AR9287_NUM_PD_GAINS][AR9287_PD_GAIN_ICEPTS];
+ u8 pwrPdg[AR5416_NUM_PD_GAINS][AR9287_PD_GAIN_ICEPTS];
+ u8 vpdPdg[AR5416_NUM_PD_GAINS][AR9287_PD_GAIN_ICEPTS];
} __packed;
union cal_data_per_freq_ar9287_u {
@@ -672,15 +647,12 @@ struct eeprom_ops {
bool (*fill_eeprom)(struct ath_hw *hw);
int (*get_eeprom_ver)(struct ath_hw *hw);
int (*get_eeprom_rev)(struct ath_hw *hw);
- u8 (*get_num_ant_config)(struct ath_hw *hw,
- enum ath9k_hal_freq_band band);
- u32 (*get_eeprom_antenna_cfg)(struct ath_hw *hw,
- struct ath9k_channel *chan);
void (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan);
void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan);
void (*set_txpower)(struct ath_hw *hw, struct ath9k_channel *chan,
u16 cfgCtl, u8 twiceAntennaReduction,
- u8 twiceMaxRegulatoryPower, u8 powerLimit);
+ u8 twiceMaxRegulatoryPower, u8 powerLimit,
+ bool test);
u16 (*get_spur_channel)(struct ath_hw *ah, u16 i, bool is2GHz);
};
@@ -693,6 +665,8 @@ int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight,
bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
u16 *indexL, u16 *indexR);
bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data);
+void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
+ int eep_start_loc, int size);
void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
u8 *pVpdList, u16 numIntercepts,
u8 *pRetVpdList);
@@ -713,6 +687,14 @@ u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower,
void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah);
int ath9k_hw_eeprom_init(struct ath_hw *ah);
+void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
+ struct ath9k_channel *chan,
+ void *pRawDataSet,
+ u8 *bChans, u16 availPiers,
+ u16 tPdGainOverlap,
+ u16 *pPdGainBoundaries, u8 *pPDADCValues,
+ u16 numXpdGains);
+
#define ar5416_get_ntxchains(_txchainmask) \
(((_txchainmask >> 2) & 1) + \
((_txchainmask >> 1) & 1) + (_txchainmask & 1))
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 4fa4d8e28c64..bc77a308c901 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -27,33 +27,52 @@ static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah)
return ((ah->eeprom.map4k.baseEepHeader.version) & 0xFFF);
}
-static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
-{
#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
+
+static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
+{
struct ath_common *common = ath9k_hw_common(ah);
u16 *eep_data = (u16 *)&ah->eeprom.map4k;
- int addr, eep_start_loc = 0;
-
- eep_start_loc = 64;
-
- if (!ath9k_hw_use_flash(ah)) {
- ath_print(common, ATH_DBG_EEPROM,
- "Reading from EEPROM, not flash\n");
- }
+ int addr, eep_start_loc = 64;
for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
- ath_print(common, ATH_DBG_EEPROM,
- "Unable to read eeprom region\n");
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Unable to read eeprom region\n");
return false;
}
eep_data++;
}
return true;
-#undef SIZE_EEPROM_4K
}
+static bool __ath9k_hw_usb_4k_fill_eeprom(struct ath_hw *ah)
+{
+ u16 *eep_data = (u16 *)&ah->eeprom.map4k;
+
+ ath9k_hw_usb_gen_fill_eeprom(ah, eep_data, 64, SIZE_EEPROM_4K);
+
+ return true;
+}
+
+static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!ath9k_hw_use_flash(ah)) {
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Reading from EEPROM, not flash\n");
+ }
+
+ if (common->bus_ops->ath_bus_type == ATH_USB)
+ return __ath9k_hw_usb_4k_fill_eeprom(ah);
+ else
+ return __ath9k_hw_4k_fill_eeprom(ah);
+}
+
+#undef SIZE_EEPROM_4K
+
static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
{
#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
@@ -69,13 +88,12 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
if (!ath9k_hw_use_flash(ah)) {
if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET,
&magic)) {
- ath_print(common, ATH_DBG_FATAL,
- "Reading Magic # failed\n");
+ ath_err(common, "Reading Magic # failed\n");
return false;
}
- ath_print(common, ATH_DBG_EEPROM,
- "Read Magic = 0x%04X\n", magic);
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Read Magic = 0x%04X\n", magic);
if (magic != AR5416_EEPROM_MAGIC) {
magic2 = swab16(magic);
@@ -90,16 +108,15 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
eepdata++;
}
} else {
- ath_print(common, ATH_DBG_FATAL,
- "Invalid EEPROM Magic. "
- "endianness mismatch.\n");
+ ath_err(common,
+ "Invalid EEPROM Magic. Endianness mismatch.\n");
return -EINVAL;
}
}
}
- ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
- need_swap ? "True" : "False");
+ ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
+ need_swap ? "True" : "False");
if (need_swap)
el = swab16(ah->eeprom.map4k.baseEepHeader.length);
@@ -120,8 +137,8 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
u32 integer;
u16 word;
- ath_print(common, ATH_DBG_EEPROM,
- "EEPROM Endianness is not native.. Changing\n");
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "EEPROM Endianness is not native.. Changing\n");
word = swab16(eep->baseEepHeader.length);
eep->baseEepHeader.length = word;
@@ -155,7 +172,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
eep->modalHeader.antCtrlChain[i] = integer;
}
- for (i = 0; i < AR5416_EEPROM_MODAL_SPURS; i++) {
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
word = swab16(eep->modalHeader.spurChans[i].spurChan);
eep->modalHeader.spurChans[i].spurChan = word;
}
@@ -163,9 +180,8 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER ||
ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
- ath_print(common, ATH_DBG_FATAL,
- "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
- sum, ah->eep_ops->get_eeprom_ver(ah));
+ ath_err(common, "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
+ sum, ah->eep_ops->get_eeprom_ver(ah));
return -EINVAL;
}
@@ -230,173 +246,6 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
}
}
-static void ath9k_hw_get_4k_gain_boundaries_pdadcs(struct ath_hw *ah,
- struct ath9k_channel *chan,
- struct cal_data_per_freq_4k *pRawDataSet,
- u8 *bChans, u16 availPiers,
- u16 tPdGainOverlap,
- u16 *pPdGainBoundaries, u8 *pPDADCValues,
- u16 numXpdGains)
-{
-#define TMP_VAL_VPD_TABLE \
- ((vpdTableI[i][sizeCurrVpdTable - 1] + (ss - maxIndex + 1) * vpdStep));
- int i, j, k;
- int16_t ss;
- u16 idxL = 0, idxR = 0, numPiers;
- static u8 vpdTableL[AR5416_EEP4K_NUM_PD_GAINS]
- [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
- static u8 vpdTableR[AR5416_EEP4K_NUM_PD_GAINS]
- [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
- static u8 vpdTableI[AR5416_EEP4K_NUM_PD_GAINS]
- [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
-
- u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
- u8 minPwrT4[AR5416_EEP4K_NUM_PD_GAINS];
- u8 maxPwrT4[AR5416_EEP4K_NUM_PD_GAINS];
- int16_t vpdStep;
- int16_t tmpVal;
- u16 sizeCurrVpdTable, maxIndex, tgtIndex;
- bool match;
- int16_t minDelta = 0;
- struct chan_centers centers;
-#define PD_GAIN_BOUNDARY_DEFAULT 58;
-
- memset(&minPwrT4, 0, AR9287_NUM_PD_GAINS);
- ath9k_hw_get_channel_centers(ah, chan, &centers);
-
- for (numPiers = 0; numPiers < availPiers; numPiers++) {
- if (bChans[numPiers] == AR5416_BCHAN_UNUSED)
- break;
- }
-
- match = ath9k_hw_get_lower_upper_index(
- (u8)FREQ2FBIN(centers.synth_center,
- IS_CHAN_2GHZ(chan)), bChans, numPiers,
- &idxL, &idxR);
-
- if (match) {
- for (i = 0; i < numXpdGains; i++) {
- minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0];
- maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4];
- ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
- pRawDataSet[idxL].pwrPdg[i],
- pRawDataSet[idxL].vpdPdg[i],
- AR5416_EEP4K_PD_GAIN_ICEPTS,
- vpdTableI[i]);
- }
- } else {
- for (i = 0; i < numXpdGains; i++) {
- pVpdL = pRawDataSet[idxL].vpdPdg[i];
- pPwrL = pRawDataSet[idxL].pwrPdg[i];
- pVpdR = pRawDataSet[idxR].vpdPdg[i];
- pPwrR = pRawDataSet[idxR].pwrPdg[i];
-
- minPwrT4[i] = max(pPwrL[0], pPwrR[0]);
-
- maxPwrT4[i] =
- min(pPwrL[AR5416_EEP4K_PD_GAIN_ICEPTS - 1],
- pPwrR[AR5416_EEP4K_PD_GAIN_ICEPTS - 1]);
-
-
- ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
- pPwrL, pVpdL,
- AR5416_EEP4K_PD_GAIN_ICEPTS,
- vpdTableL[i]);
- ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
- pPwrR, pVpdR,
- AR5416_EEP4K_PD_GAIN_ICEPTS,
- vpdTableR[i]);
-
- for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) {
- vpdTableI[i][j] =
- (u8)(ath9k_hw_interpolate((u16)
- FREQ2FBIN(centers.
- synth_center,
- IS_CHAN_2GHZ
- (chan)),
- bChans[idxL], bChans[idxR],
- vpdTableL[i][j], vpdTableR[i][j]));
- }
- }
- }
-
- k = 0;
-
- for (i = 0; i < numXpdGains; i++) {
- if (i == (numXpdGains - 1))
- pPdGainBoundaries[i] =
- (u16)(maxPwrT4[i] / 2);
- else
- pPdGainBoundaries[i] =
- (u16)((maxPwrT4[i] + minPwrT4[i + 1]) / 4);
-
- pPdGainBoundaries[i] =
- min((u16)AR5416_MAX_RATE_POWER, pPdGainBoundaries[i]);
-
- if ((i == 0) && !AR_SREV_5416_20_OR_LATER(ah)) {
- minDelta = pPdGainBoundaries[0] - 23;
- pPdGainBoundaries[0] = 23;
- } else {
- minDelta = 0;
- }
-
- if (i == 0) {
- if (AR_SREV_9280_20_OR_LATER(ah))
- ss = (int16_t)(0 - (minPwrT4[i] / 2));
- else
- ss = 0;
- } else {
- ss = (int16_t)((pPdGainBoundaries[i - 1] -
- (minPwrT4[i] / 2)) -
- tPdGainOverlap + 1 + minDelta);
- }
- vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]);
- vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
-
- while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
- tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep);
- pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal);
- ss++;
- }
-
- sizeCurrVpdTable = (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1);
- tgtIndex = (u8)(pPdGainBoundaries[i] + tPdGainOverlap -
- (minPwrT4[i] / 2));
- maxIndex = (tgtIndex < sizeCurrVpdTable) ?
- tgtIndex : sizeCurrVpdTable;
-
- while ((ss < maxIndex) && (k < (AR5416_NUM_PDADC_VALUES - 1)))
- pPDADCValues[k++] = vpdTableI[i][ss++];
-
- vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] -
- vpdTableI[i][sizeCurrVpdTable - 2]);
- vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
-
- if (tgtIndex >= maxIndex) {
- while ((ss <= tgtIndex) &&
- (k < (AR5416_NUM_PDADC_VALUES - 1))) {
- tmpVal = (int16_t) TMP_VAL_VPD_TABLE;
- pPDADCValues[k++] = (u8)((tmpVal > 255) ?
- 255 : tmpVal);
- ss++;
- }
- }
- }
-
- while (i < AR5416_EEP4K_PD_GAINS_IN_MASK) {
- pPdGainBoundaries[i] = PD_GAIN_BOUNDARY_DEFAULT;
- i++;
- }
-
- while (k < AR5416_NUM_PDADC_VALUES) {
- pPDADCValues[k] = pPDADCValues[k - 1];
- k++;
- }
-
- return;
-#undef TMP_VAL_VPD_TABLE
-}
-
static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
struct ath9k_channel *chan,
int16_t *pTxPowerIndexOffset)
@@ -407,7 +256,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
u8 *pCalBChans = NULL;
u16 pdGainOverlap_t2;
static u8 pdadcValues[AR5416_NUM_PDADC_VALUES];
- u16 gainBoundaries[AR5416_EEP4K_PD_GAINS_IN_MASK];
+ u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK];
u16 numPiers, i, j;
u16 numXpdGain, xpdMask;
u16 xpdGainValues[AR5416_EEP4K_NUM_PD_GAINS] = { 0, 0 };
@@ -429,12 +278,12 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
numXpdGain = 0;
- for (i = 1; i <= AR5416_EEP4K_PD_GAINS_IN_MASK; i++) {
- if ((xpdMask >> (AR5416_EEP4K_PD_GAINS_IN_MASK - i)) & 1) {
+ for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) {
+ if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) {
if (numXpdGain >= AR5416_EEP4K_NUM_PD_GAINS)
break;
xpdGainValues[numXpdGain] =
- (u16)(AR5416_EEP4K_PD_GAINS_IN_MASK - i);
+ (u16)(AR5416_PD_GAINS_IN_MASK - i);
numXpdGain++;
}
}
@@ -458,7 +307,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
if (pEepData->baseEepHeader.txMask & (1 << i)) {
pRawDataset = pEepData->calPierData2G[i];
- ath9k_hw_get_4k_gain_boundaries_pdadcs(ah, chan,
+ ath9k_hw_get_gain_boundaries_pdadcs(ah, chan,
pRawDataset, pCalBChans,
numPiers, pdGainOverlap_t2,
gainBoundaries,
@@ -488,21 +337,20 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
((pdadcValues[4 * j + 3] & 0xFF) << 24);
REG_WRITE(ah, regOffset, reg32);
- ath_print(common, ATH_DBG_EEPROM,
- "PDADC (%d,%4x): %4.4x %8.8x\n",
- i, regChainOffset, regOffset,
- reg32);
- ath_print(common, ATH_DBG_EEPROM,
- "PDADC: Chain %d | "
- "PDADC %3d Value %3d | "
- "PDADC %3d Value %3d | "
- "PDADC %3d Value %3d | "
- "PDADC %3d Value %3d |\n",
- i, 4 * j, pdadcValues[4 * j],
- 4 * j + 1, pdadcValues[4 * j + 1],
- 4 * j + 2, pdadcValues[4 * j + 2],
- 4 * j + 3,
- pdadcValues[4 * j + 3]);
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "PDADC (%d,%4x): %4.4x %8.8x\n",
+ i, regChainOffset, regOffset,
+ reg32);
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "PDADC: Chain %d | "
+ "PDADC %3d Value %3d | "
+ "PDADC %3d Value %3d | "
+ "PDADC %3d Value %3d | "
+ "PDADC %3d Value %3d |\n",
+ i, 4 * j, pdadcValues[4 * j],
+ 4 * j + 1, pdadcValues[4 * j + 1],
+ 4 * j + 2, pdadcValues[4 * j + 2],
+ 4 * j + 3, pdadcValues[4 * j + 3]);
regOffset += 4;
}
@@ -532,14 +380,16 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
int i;
int16_t twiceLargestAntenna;
u16 twiceMinEdgePower;
- u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
+ u16 twiceMaxEdgePower = MAX_RATE_POWER;
u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
- u16 numCtlModes, *pCtlMode, ctlMode, freq;
+ u16 numCtlModes;
+ const u16 *pCtlMode;
+ u16 ctlMode, freq;
struct chan_centers centers;
struct cal_ctl_data_4k *rep;
struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k;
static const u16 tpScaleReductionTable[5] =
- { 0, 3, 6, 9, AR5416_MAX_RATE_POWER };
+ { 0, 3, 6, 9, MAX_RATE_POWER };
struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
0, { 0, 0, 0, 0}
};
@@ -550,10 +400,10 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = {
0, {0, 0, 0, 0}
};
- u16 ctlModesFor11g[] =
- { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT,
- CTL_2GHT40
- };
+ static const u16 ctlModesFor11g[] = {
+ CTL_11B, CTL_11G, CTL_2GHT20,
+ CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40
+ };
ath9k_hw_get_channel_centers(ah, chan, &centers);
@@ -615,7 +465,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
ah->eep_ops->get_eeprom_rev(ah) <= 2)
- twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
+ twiceMaxEdgePower = MAX_RATE_POWER;
for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) &&
pEepData->ctlIndex[i]; i++) {
@@ -726,7 +576,7 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
u16 cfgCtl,
u8 twiceAntennaReduction,
u8 twiceMaxRegulatoryPower,
- u8 powerLimit)
+ u8 powerLimit, bool test)
{
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k;
@@ -751,15 +601,20 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
ath9k_hw_set_4k_power_cal_table(ah, chan, &txPowerIndexOffset);
+ regulatory->max_power_level = 0;
for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
- if (ratesArray[i] > AR5416_MAX_RATE_POWER)
- ratesArray[i] = AR5416_MAX_RATE_POWER;
+ if (ratesArray[i] > MAX_RATE_POWER)
+ ratesArray[i] = MAX_RATE_POWER;
+
+ if (ratesArray[i] > regulatory->max_power_level)
+ regulatory->max_power_level = ratesArray[i];
}
+ if (test)
+ return;
/* Update regulatory */
-
i = rate6mb;
if (IS_CHAN_HT40(chan))
i = rateHt40_0;
@@ -934,8 +789,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
pModal = &eep->modalHeader;
txRxAttenLocal = 23;
- REG_WRITE(ah, AR_PHY_SWITCH_COM,
- ah->eep_ops->get_eeprom_antenna_cfg(ah, chan));
+ REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon);
/* Single chain for 4K EEPROM*/
ath9k_hw_4k_set_gain(ah, pModal, eep, txRxAttenLocal);
@@ -1151,21 +1005,6 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
}
}
-static u32 ath9k_hw_4k_get_eeprom_antenna_cfg(struct ath_hw *ah,
- struct ath9k_channel *chan)
-{
- struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
- struct modal_eep_4k_header *pModal = &eep->modalHeader;
-
- return pModal->antCtrlCommon;
-}
-
-static u8 ath9k_hw_4k_get_num_ant_config(struct ath_hw *ah,
- enum ath9k_hal_freq_band freq_band)
-{
- return 1;
-}
-
static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
{
#define EEP_MAP4K_SPURCHAN \
@@ -1174,17 +1013,17 @@ static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
u16 spur_val = AR_NO_SPUR;
- ath_print(common, ATH_DBG_ANI,
- "Getting spur idx %d is2Ghz. %d val %x\n",
- i, is2GHz, ah->config.spurchans[i][is2GHz]);
+ ath_dbg(common, ATH_DBG_ANI,
+ "Getting spur idx:%d is2Ghz:%d val:%x\n",
+ i, is2GHz, ah->config.spurchans[i][is2GHz]);
switch (ah->config.spurmode) {
case SPUR_DISABLE:
break;
case SPUR_ENABLE_IOCTL:
spur_val = ah->config.spurchans[i][is2GHz];
- ath_print(common, ATH_DBG_ANI,
- "Getting spur val from new loc. %d\n", spur_val);
+ ath_dbg(common, ATH_DBG_ANI,
+ "Getting spur val from new loc. %d\n", spur_val);
break;
case SPUR_ENABLE_EEPROM:
spur_val = EEP_MAP4K_SPURCHAN;
@@ -1202,8 +1041,6 @@ const struct eeprom_ops eep_4k_ops = {
.fill_eeprom = ath9k_hw_4k_fill_eeprom,
.get_eeprom_ver = ath9k_hw_4k_get_eeprom_ver,
.get_eeprom_rev = ath9k_hw_4k_get_eeprom_rev,
- .get_num_ant_config = ath9k_hw_4k_get_num_ant_config,
- .get_eeprom_antenna_cfg = ath9k_hw_4k_get_eeprom_antenna_cfg,
.set_board_values = ath9k_hw_4k_set_board_values,
.set_addac = ath9k_hw_4k_set_addac,
.set_txpower = ath9k_hw_4k_set_txpower,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 195406db3bd8..8cd8333cc086 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -17,7 +17,7 @@
#include "hw.h"
#include "ar9002_phy.h"
-#define NUM_EEP_WORDS (sizeof(struct ar9287_eeprom) / sizeof(u16))
+#define SIZE_EEPROM_AR9287 (sizeof(struct ar9287_eeprom) / sizeof(u16))
static int ath9k_hw_ar9287_get_eeprom_ver(struct ath_hw *ah)
{
@@ -29,29 +29,19 @@ static int ath9k_hw_ar9287_get_eeprom_rev(struct ath_hw *ah)
return (ah->eeprom.map9287.baseEepHeader.version) & 0xFFF;
}
-static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
+static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
{
struct ar9287_eeprom *eep = &ah->eeprom.map9287;
struct ath_common *common = ath9k_hw_common(ah);
u16 *eep_data;
- int addr, eep_start_loc;
+ int addr, eep_start_loc = AR9287_EEP_START_LOC;
eep_data = (u16 *)eep;
- if (AR9287_HTC_DEVID(ah))
- eep_start_loc = AR9287_HTC_EEP_START_LOC;
- else
- eep_start_loc = AR9287_EEP_START_LOC;
-
- if (!ath9k_hw_use_flash(ah)) {
- ath_print(common, ATH_DBG_EEPROM,
- "Reading from EEPROM, not flash\n");
- }
-
- for (addr = 0; addr < NUM_EEP_WORDS; addr++) {
+ for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) {
if (!ath9k_hw_nvram_read(common, addr + eep_start_loc,
eep_data)) {
- ath_print(common, ATH_DBG_EEPROM,
- "Unable to read eeprom region\n");
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Unable to read eeprom region\n");
return false;
}
eep_data++;
@@ -60,6 +50,31 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
return true;
}
+static bool __ath9k_hw_usb_ar9287_fill_eeprom(struct ath_hw *ah)
+{
+ u16 *eep_data = (u16 *)&ah->eeprom.map9287;
+
+ ath9k_hw_usb_gen_fill_eeprom(ah, eep_data,
+ AR9287_HTC_EEP_START_LOC,
+ SIZE_EEPROM_AR9287);
+ return true;
+}
+
+static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!ath9k_hw_use_flash(ah)) {
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Reading from EEPROM, not flash\n");
+ }
+
+ if (common->bus_ops->ath_bus_type == ATH_USB)
+ return __ath9k_hw_usb_ar9287_fill_eeprom(ah);
+ else
+ return __ath9k_hw_ar9287_fill_eeprom(ah);
+}
+
static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
{
u32 sum = 0, el, integer;
@@ -72,13 +87,12 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
if (!ath9k_hw_use_flash(ah)) {
if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET,
&magic)) {
- ath_print(common, ATH_DBG_FATAL,
- "Reading Magic # failed\n");
+ ath_err(common, "Reading Magic # failed\n");
return false;
}
- ath_print(common, ATH_DBG_EEPROM,
- "Read Magic = 0x%04X\n", magic);
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Read Magic = 0x%04X\n", magic);
if (magic != AR5416_EEPROM_MAGIC) {
magic2 = swab16(magic);
@@ -87,22 +101,21 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
need_swap = true;
eepdata = (u16 *)(&ah->eeprom);
- for (addr = 0; addr < NUM_EEP_WORDS; addr++) {
+ for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) {
temp = swab16(*eepdata);
*eepdata = temp;
eepdata++;
}
} else {
- ath_print(common, ATH_DBG_FATAL,
- "Invalid EEPROM Magic. "
- "Endianness mismatch.\n");
+ ath_err(common,
+ "Invalid EEPROM Magic. Endianness mismatch.\n");
return -EINVAL;
}
}
}
- ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
- need_swap ? "True" : "False");
+ ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
+ need_swap ? "True" : "False");
if (need_swap)
el = swab16(ah->eeprom.map9287.baseEepHeader.length);
@@ -152,7 +165,7 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
eep->modalHeader.antCtrlChain[i] = integer;
}
- for (i = 0; i < AR9287_EEPROM_MODAL_SPURS; i++) {
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
word = swab16(eep->modalHeader.spurChans[i].spurChan);
eep->modalHeader.spurChans[i].spurChan = word;
}
@@ -160,9 +173,8 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR9287_EEP_VER
|| ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
- ath_print(common, ATH_DBG_FATAL,
- "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
- sum, ah->eep_ops->get_eeprom_ver(ah));
+ ath_err(common, "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
+ sum, ah->eep_ops->get_eeprom_ver(ah));
return -EINVAL;
}
@@ -223,163 +235,6 @@ static u32 ath9k_hw_ar9287_get_eeprom(struct ath_hw *ah,
}
}
-static void ath9k_hw_get_ar9287_gain_boundaries_pdadcs(struct ath_hw *ah,
- struct ath9k_channel *chan,
- struct cal_data_per_freq_ar9287 *pRawDataSet,
- u8 *bChans, u16 availPiers,
- u16 tPdGainOverlap,
- u16 *pPdGainBoundaries,
- u8 *pPDADCValues,
- u16 numXpdGains)
-{
-#define TMP_VAL_VPD_TABLE \
- ((vpdTableI[i][sizeCurrVpdTable - 1] + (ss - maxIndex + 1) * vpdStep));
-
- int i, j, k;
- int16_t ss;
- u16 idxL = 0, idxR = 0, numPiers;
- u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
- u8 minPwrT4[AR9287_NUM_PD_GAINS];
- u8 maxPwrT4[AR9287_NUM_PD_GAINS];
- int16_t vpdStep;
- int16_t tmpVal;
- u16 sizeCurrVpdTable, maxIndex, tgtIndex;
- bool match;
- int16_t minDelta = 0;
- struct chan_centers centers;
- static u8 vpdTableL[AR5416_EEP4K_NUM_PD_GAINS]
- [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
- static u8 vpdTableR[AR5416_EEP4K_NUM_PD_GAINS]
- [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
- static u8 vpdTableI[AR5416_EEP4K_NUM_PD_GAINS]
- [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
-
- memset(&minPwrT4, 0, AR9287_NUM_PD_GAINS);
- ath9k_hw_get_channel_centers(ah, chan, &centers);
-
- for (numPiers = 0; numPiers < availPiers; numPiers++) {
- if (bChans[numPiers] == AR9287_BCHAN_UNUSED)
- break;
- }
-
- match = ath9k_hw_get_lower_upper_index(
- (u8)FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan)),
- bChans, numPiers, &idxL, &idxR);
-
- if (match) {
- for (i = 0; i < numXpdGains; i++) {
- minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0];
- maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4];
- ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
- pRawDataSet[idxL].pwrPdg[i],
- pRawDataSet[idxL].vpdPdg[i],
- AR9287_PD_GAIN_ICEPTS,
- vpdTableI[i]);
- }
- } else {
- for (i = 0; i < numXpdGains; i++) {
- pVpdL = pRawDataSet[idxL].vpdPdg[i];
- pPwrL = pRawDataSet[idxL].pwrPdg[i];
- pVpdR = pRawDataSet[idxR].vpdPdg[i];
- pPwrR = pRawDataSet[idxR].pwrPdg[i];
-
- minPwrT4[i] = max(pPwrL[0], pPwrR[0]);
-
- maxPwrT4[i] = min(pPwrL[AR9287_PD_GAIN_ICEPTS - 1],
- pPwrR[AR9287_PD_GAIN_ICEPTS - 1]);
-
- ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
- pPwrL, pVpdL,
- AR9287_PD_GAIN_ICEPTS,
- vpdTableL[i]);
- ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
- pPwrR, pVpdR,
- AR9287_PD_GAIN_ICEPTS,
- vpdTableR[i]);
-
- for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) {
- vpdTableI[i][j] = (u8)(ath9k_hw_interpolate(
- (u16)FREQ2FBIN(centers. synth_center,
- IS_CHAN_2GHZ(chan)),
- bChans[idxL], bChans[idxR],
- vpdTableL[i][j], vpdTableR[i][j]));
- }
- }
- }
-
- k = 0;
-
- for (i = 0; i < numXpdGains; i++) {
- if (i == (numXpdGains - 1))
- pPdGainBoundaries[i] =
- (u16)(maxPwrT4[i] / 2);
- else
- pPdGainBoundaries[i] =
- (u16)((maxPwrT4[i] + minPwrT4[i+1]) / 4);
-
- pPdGainBoundaries[i] = min((u16)AR5416_MAX_RATE_POWER,
- pPdGainBoundaries[i]);
-
-
- minDelta = 0;
-
- if (i == 0) {
- if (AR_SREV_9280_20_OR_LATER(ah))
- ss = (int16_t)(0 - (minPwrT4[i] / 2));
- else
- ss = 0;
- } else {
- ss = (int16_t)((pPdGainBoundaries[i-1] -
- (minPwrT4[i] / 2)) -
- tPdGainOverlap + 1 + minDelta);
- }
-
- vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]);
- vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
-
- while ((ss < 0) && (k < (AR9287_NUM_PDADC_VALUES - 1))) {
- tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep);
- pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal);
- ss++;
- }
-
- sizeCurrVpdTable = (u8)((maxPwrT4[i] - minPwrT4[i]) / 2 + 1);
- tgtIndex = (u8)(pPdGainBoundaries[i] +
- tPdGainOverlap - (minPwrT4[i] / 2));
- maxIndex = (tgtIndex < sizeCurrVpdTable) ?
- tgtIndex : sizeCurrVpdTable;
-
- while ((ss < maxIndex) && (k < (AR9287_NUM_PDADC_VALUES - 1)))
- pPDADCValues[k++] = vpdTableI[i][ss++];
-
- vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] -
- vpdTableI[i][sizeCurrVpdTable - 2]);
- vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
-
- if (tgtIndex > maxIndex) {
- while ((ss <= tgtIndex) &&
- (k < (AR9287_NUM_PDADC_VALUES - 1))) {
- tmpVal = (int16_t) TMP_VAL_VPD_TABLE;
- pPDADCValues[k++] =
- (u8)((tmpVal > 255) ? 255 : tmpVal);
- ss++;
- }
- }
- }
-
- while (i < AR9287_PD_GAINS_IN_MASK) {
- pPdGainBoundaries[i] = pPdGainBoundaries[i-1];
- i++;
- }
-
- while (k < AR9287_NUM_PDADC_VALUES) {
- pPDADCValues[k] = pPDADCValues[k-1];
- k++;
- }
-
-#undef TMP_VAL_VPD_TABLE
-}
-
static void ar9287_eeprom_get_tx_gain_index(struct ath_hw *ah,
struct ath9k_channel *chan,
struct cal_data_op_loop_ar9287 *pRawDatasetOpLoop,
@@ -392,7 +247,7 @@ static void ar9287_eeprom_get_tx_gain_index(struct ath_hw *ah,
ath9k_hw_get_channel_centers(ah, chan, &centers);
for (numPiers = 0; numPiers < availPiers; numPiers++) {
- if (pCalChans[numPiers] == AR9287_BCHAN_UNUSED)
+ if (pCalChans[numPiers] == AR5416_BCHAN_UNUSED)
break;
}
@@ -458,11 +313,11 @@ static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
struct cal_data_op_loop_ar9287 *pRawDatasetOpenLoop;
u8 *pCalBChans = NULL;
u16 pdGainOverlap_t2;
- u8 pdadcValues[AR9287_NUM_PDADC_VALUES];
- u16 gainBoundaries[AR9287_PD_GAINS_IN_MASK];
+ u8 pdadcValues[AR5416_NUM_PDADC_VALUES];
+ u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK];
u16 numPiers = 0, i, j;
u16 numXpdGain, xpdMask;
- u16 xpdGainValues[AR9287_NUM_PD_GAINS] = {0, 0, 0, 0};
+ u16 xpdGainValues[AR5416_NUM_PD_GAINS] = {0, 0, 0, 0};
u32 reg32, regOffset, regChainOffset, regval;
int16_t modalIdx, diff = 0;
struct ar9287_eeprom *pEepData = &ah->eeprom.map9287;
@@ -490,12 +345,12 @@ static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
numXpdGain = 0;
/* Calculate the value of xpdgains from the xpdGain Mask */
- for (i = 1; i <= AR9287_PD_GAINS_IN_MASK; i++) {
- if ((xpdMask >> (AR9287_PD_GAINS_IN_MASK - i)) & 1) {
- if (numXpdGain >= AR9287_NUM_PD_GAINS)
+ for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) {
+ if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) {
+ if (numXpdGain >= AR5416_NUM_PD_GAINS)
break;
xpdGainValues[numXpdGain] =
- (u16)(AR9287_PD_GAINS_IN_MASK-i);
+ (u16)(AR5416_PD_GAINS_IN_MASK-i);
numXpdGain++;
}
}
@@ -528,7 +383,7 @@ static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
(struct cal_data_per_freq_ar9287 *)
pEepData->calPierData2G[i];
- ath9k_hw_get_ar9287_gain_boundaries_pdadcs(ah, chan,
+ ath9k_hw_get_gain_boundaries_pdadcs(ah, chan,
pRawDataset,
pCalBChans, numPiers,
pdGainOverlap_t2,
@@ -564,13 +419,13 @@ static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
(int32_t)AR9287_PWR_TABLE_OFFSET_DB);
diff *= 2;
- for (j = 0; j < ((u16)AR9287_NUM_PDADC_VALUES-diff); j++)
+ for (j = 0; j < ((u16)AR5416_NUM_PDADC_VALUES-diff); j++)
pdadcValues[j] = pdadcValues[j+diff];
- for (j = (u16)(AR9287_NUM_PDADC_VALUES-diff);
- j < AR9287_NUM_PDADC_VALUES; j++)
+ for (j = (u16)(AR5416_NUM_PDADC_VALUES-diff);
+ j < AR5416_NUM_PDADC_VALUES; j++)
pdadcValues[j] =
- pdadcValues[AR9287_NUM_PDADC_VALUES-diff];
+ pdadcValues[AR5416_NUM_PDADC_VALUES-diff];
}
if (!ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
@@ -613,9 +468,9 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
- u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
+ u16 twiceMaxEdgePower = MAX_RATE_POWER;
static const u16 tpScaleReductionTable[5] =
- { 0, 3, 6, 9, AR5416_MAX_RATE_POWER };
+ { 0, 3, 6, 9, MAX_RATE_POWER };
int i;
int16_t twiceLargestAntenna;
struct cal_ctl_data_ar9287 *rep;
@@ -626,13 +481,13 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
struct cal_target_power_ht targetPowerHt20,
targetPowerHt40 = {0, {0, 0, 0, 0} };
u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
- u16 ctlModesFor11g[] = {CTL_11B,
- CTL_11G,
- CTL_2GHT20,
- CTL_11B_EXT,
- CTL_11G_EXT,
- CTL_2GHT40};
- u16 numCtlModes = 0, *pCtlMode = NULL, ctlMode, freq;
+ static const u16 ctlModesFor11g[] = {
+ CTL_11B, CTL_11G, CTL_2GHT20,
+ CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40
+ };
+ u16 numCtlModes = 0;
+ const u16 *pCtlMode = NULL;
+ u16 ctlMode, freq;
struct chan_centers centers;
int tx_chainmask;
u16 twiceMinEdgePower;
@@ -853,7 +708,7 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
struct ath9k_channel *chan, u16 cfgCtl,
u8 twiceAntennaReduction,
u8 twiceMaxRegulatoryPower,
- u8 powerLimit)
+ u8 powerLimit, bool test)
{
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ar9287_eeprom *pEepData = &ah->eeprom.map9287;
@@ -877,12 +732,26 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
ath9k_hw_set_ar9287_power_cal_table(ah, chan, &txPowerIndexOffset);
+ regulatory->max_power_level = 0;
for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
- if (ratesArray[i] > AR9287_MAX_RATE_POWER)
- ratesArray[i] = AR9287_MAX_RATE_POWER;
+ if (ratesArray[i] > MAX_RATE_POWER)
+ ratesArray[i] = MAX_RATE_POWER;
+
+ if (ratesArray[i] > regulatory->max_power_level)
+ regulatory->max_power_level = ratesArray[i];
}
+ if (test)
+ return;
+
+ if (IS_CHAN_2GHZ(chan))
+ i = rate1l;
+ else
+ i = rate6mb;
+
+ regulatory->max_power_level = ratesArray[i];
+
if (AR_SREV_9280_20_OR_LATER(ah)) {
for (i = 0; i < Ar5416RateSize; i++)
ratesArray[i] -= AR9287_PWR_TABLE_OFFSET_DB * 2;
@@ -971,17 +840,6 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
| ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
| ATH9K_POW_SM(ratesArray[rateDupCck], 0));
}
-
- if (IS_CHAN_2GHZ(chan))
- i = rate1l;
- else
- i = rate6mb;
-
- if (AR_SREV_9280_20_OR_LATER(ah))
- regulatory->max_power_level =
- ratesArray[i] + AR9287_PWR_TABLE_OFFSET_DB * 2;
- else
- regulatory->max_power_level = ratesArray[i];
}
static void ath9k_hw_ar9287_set_addac(struct ath_hw *ah,
@@ -1023,8 +881,7 @@ static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah,
antWrites[j++] = (u16)(pModal->antCtrlChain[i] & 0x3);
}
- REG_WRITE(ah, AR_PHY_SWITCH_COM,
- ah->eep_ops->get_eeprom_antenna_cfg(ah, chan));
+ REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon);
for (i = 0; i < AR9287_MAX_CHAINS; i++) {
regChainOffset = i * 0x1000;
@@ -1125,21 +982,6 @@ static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah,
pModal->xpaBiasLvl);
}
-static u8 ath9k_hw_ar9287_get_num_ant_config(struct ath_hw *ah,
- enum ath9k_hal_freq_band freq_band)
-{
- return 1;
-}
-
-static u32 ath9k_hw_ar9287_get_eeprom_antenna_cfg(struct ath_hw *ah,
- struct ath9k_channel *chan)
-{
- struct ar9287_eeprom *eep = &ah->eeprom.map9287;
- struct modal_eep_ar9287_header *pModal = &eep->modalHeader;
-
- return pModal->antCtrlCommon;
-}
-
static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah,
u16 i, bool is2GHz)
{
@@ -1149,17 +991,17 @@ static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah,
struct ath_common *common = ath9k_hw_common(ah);
u16 spur_val = AR_NO_SPUR;
- ath_print(common, ATH_DBG_ANI,
- "Getting spur idx %d is2Ghz. %d val %x\n",
- i, is2GHz, ah->config.spurchans[i][is2GHz]);
+ ath_dbg(common, ATH_DBG_ANI,
+ "Getting spur idx:%d is2Ghz:%d val:%x\n",
+ i, is2GHz, ah->config.spurchans[i][is2GHz]);
switch (ah->config.spurmode) {
case SPUR_DISABLE:
break;
case SPUR_ENABLE_IOCTL:
spur_val = ah->config.spurchans[i][is2GHz];
- ath_print(common, ATH_DBG_ANI,
- "Getting spur val from new loc. %d\n", spur_val);
+ ath_dbg(common, ATH_DBG_ANI,
+ "Getting spur val from new loc. %d\n", spur_val);
break;
case SPUR_ENABLE_EEPROM:
spur_val = EEP_MAP9287_SPURCHAN;
@@ -1177,8 +1019,6 @@ const struct eeprom_ops eep_ar9287_ops = {
.fill_eeprom = ath9k_hw_ar9287_fill_eeprom,
.get_eeprom_ver = ath9k_hw_ar9287_get_eeprom_ver,
.get_eeprom_rev = ath9k_hw_ar9287_get_eeprom_rev,
- .get_num_ant_config = ath9k_hw_ar9287_get_num_ant_config,
- .get_eeprom_antenna_cfg = ath9k_hw_ar9287_get_eeprom_antenna_cfg,
.set_board_values = ath9k_hw_ar9287_set_board_values,
.set_addac = ath9k_hw_ar9287_set_addac,
.set_txpower = ath9k_hw_ar9287_set_txpower,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 76b4d65472dd..fccd87df7300 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -86,9 +86,10 @@ static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah)
return ((ah->eeprom.def.baseEepHeader.version) & 0xFFF);
}
-static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
-{
#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16))
+
+static bool __ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
+{
struct ath_common *common = ath9k_hw_common(ah);
u16 *eep_data = (u16 *)&ah->eeprom.def;
int addr, ar5416_eep_start_loc = 0x100;
@@ -96,16 +97,41 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
for (addr = 0; addr < SIZE_EEPROM_DEF; addr++) {
if (!ath9k_hw_nvram_read(common, addr + ar5416_eep_start_loc,
eep_data)) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
- "Unable to read eeprom region\n");
+ ath_err(ath9k_hw_common(ah),
+ "Unable to read eeprom region\n");
return false;
}
eep_data++;
}
return true;
-#undef SIZE_EEPROM_DEF
}
+static bool __ath9k_hw_usb_def_fill_eeprom(struct ath_hw *ah)
+{
+ u16 *eep_data = (u16 *)&ah->eeprom.def;
+
+ ath9k_hw_usb_gen_fill_eeprom(ah, eep_data,
+ 0x100, SIZE_EEPROM_DEF);
+ return true;
+}
+
+static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!ath9k_hw_use_flash(ah)) {
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Reading from EEPROM, not flash\n");
+ }
+
+ if (common->bus_ops->ath_bus_type == ATH_USB)
+ return __ath9k_hw_usb_def_fill_eeprom(ah);
+ else
+ return __ath9k_hw_def_fill_eeprom(ah);
+}
+
+#undef SIZE_EEPROM_DEF
+
static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
{
struct ar5416_eeprom_def *eep =
@@ -117,13 +143,13 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
int i, addr, size;
if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET, &magic)) {
- ath_print(common, ATH_DBG_FATAL, "Reading Magic # failed\n");
+ ath_err(common, "Reading Magic # failed\n");
return false;
}
if (!ath9k_hw_use_flash(ah)) {
- ath_print(common, ATH_DBG_EEPROM,
- "Read Magic = 0x%04X\n", magic);
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Read Magic = 0x%04X\n", magic);
if (magic != AR5416_EEPROM_MAGIC) {
magic2 = swab16(magic);
@@ -139,16 +165,15 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
eepdata++;
}
} else {
- ath_print(common, ATH_DBG_FATAL,
- "Invalid EEPROM Magic. "
- "Endianness mismatch.\n");
+ ath_err(common,
+ "Invalid EEPROM Magic. Endianness mismatch.\n");
return -EINVAL;
}
}
}
- ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
- need_swap ? "True" : "False");
+ ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
+ need_swap ? "True" : "False");
if (need_swap)
el = swab16(ah->eeprom.def.baseEepHeader.length);
@@ -169,8 +194,8 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
u32 integer, j;
u16 word;
- ath_print(common, ATH_DBG_EEPROM,
- "EEPROM Endianness is not native.. Changing.\n");
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "EEPROM Endianness is not native.. Changing.\n");
word = swab16(eep->baseEepHeader.length);
eep->baseEepHeader.length = word;
@@ -207,7 +232,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
pModal->antCtrlChain[i] = integer;
}
- for (i = 0; i < AR5416_EEPROM_MODAL_SPURS; i++) {
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
word = swab16(pModal->spurChans[i].spurChan);
pModal->spurChans[i].spurChan = word;
}
@@ -216,18 +241,21 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER ||
ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
- ath_print(common, ATH_DBG_FATAL,
- "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
+ ath_err(common, "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
sum, ah->eep_ops->get_eeprom_ver(ah));
return -EINVAL;
}
/* Enable fixup for AR_AN_TOP2 if necessary */
- if (AR_SREV_9280_20_OR_LATER(ah) &&
- (eep->baseEepHeader.version & 0xff) > 0x0a &&
- eep->baseEepHeader.pwdclkind == 0)
+ if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
+ ((eep->baseEepHeader.version & 0xff) > 0x0a) &&
+ (eep->baseEepHeader.pwdclkind == 0))
ah->need_an_top2_fixup = 1;
+ if ((common->bus_ops->ath_bus_type == ATH_USB) &&
+ (AR_SREV_9280(ah)))
+ eep->modalHeader[0].xpaBiasLvl = 0;
+
return 0;
}
@@ -376,8 +404,7 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44;
- REG_WRITE(ah, AR_PHY_SWITCH_COM,
- ah->eep_ops->get_eeprom_antenna_cfg(ah, chan));
+ REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon & 0xffff);
for (i = 0; i < AR5416_MAX_CHAINS; i++) {
if (AR_SREV_9280(ah)) {
@@ -451,9 +478,10 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2,
AR_AN_TOP2_LOCALBIAS,
AR_AN_TOP2_LOCALBIAS_S,
- pModal->local_bias);
+ !!(pModal->lna_ctl &
+ LNA_CTL_LOCAL_BIAS));
REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG,
- pModal->force_xpaon);
+ !!(pModal->lna_ctl & LNA_CTL_FORCE_XPA));
}
REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH,
@@ -589,168 +617,6 @@ static void ath9k_hw_def_set_addac(struct ath_hw *ah,
#undef XPA_LVL_FREQ
}
-static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah,
- struct ath9k_channel *chan,
- struct cal_data_per_freq *pRawDataSet,
- u8 *bChans, u16 availPiers,
- u16 tPdGainOverlap,
- u16 *pPdGainBoundaries, u8 *pPDADCValues,
- u16 numXpdGains)
-{
- int i, j, k;
- int16_t ss;
- u16 idxL = 0, idxR = 0, numPiers;
- static u8 vpdTableL[AR5416_NUM_PD_GAINS]
- [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
- static u8 vpdTableR[AR5416_NUM_PD_GAINS]
- [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
- static u8 vpdTableI[AR5416_NUM_PD_GAINS]
- [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
-
- u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
- u8 minPwrT4[AR5416_NUM_PD_GAINS];
- u8 maxPwrT4[AR5416_NUM_PD_GAINS];
- int16_t vpdStep;
- int16_t tmpVal;
- u16 sizeCurrVpdTable, maxIndex, tgtIndex;
- bool match;
- int16_t minDelta = 0;
- struct chan_centers centers;
-
- memset(&minPwrT4, 0, AR9287_NUM_PD_GAINS);
- ath9k_hw_get_channel_centers(ah, chan, &centers);
-
- for (numPiers = 0; numPiers < availPiers; numPiers++) {
- if (bChans[numPiers] == AR5416_BCHAN_UNUSED)
- break;
- }
-
- match = ath9k_hw_get_lower_upper_index((u8)FREQ2FBIN(centers.synth_center,
- IS_CHAN_2GHZ(chan)),
- bChans, numPiers, &idxL, &idxR);
-
- if (match) {
- for (i = 0; i < numXpdGains; i++) {
- minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0];
- maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4];
- ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
- pRawDataSet[idxL].pwrPdg[i],
- pRawDataSet[idxL].vpdPdg[i],
- AR5416_PD_GAIN_ICEPTS,
- vpdTableI[i]);
- }
- } else {
- for (i = 0; i < numXpdGains; i++) {
- pVpdL = pRawDataSet[idxL].vpdPdg[i];
- pPwrL = pRawDataSet[idxL].pwrPdg[i];
- pVpdR = pRawDataSet[idxR].vpdPdg[i];
- pPwrR = pRawDataSet[idxR].pwrPdg[i];
-
- minPwrT4[i] = max(pPwrL[0], pPwrR[0]);
-
- maxPwrT4[i] =
- min(pPwrL[AR5416_PD_GAIN_ICEPTS - 1],
- pPwrR[AR5416_PD_GAIN_ICEPTS - 1]);
-
-
- ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
- pPwrL, pVpdL,
- AR5416_PD_GAIN_ICEPTS,
- vpdTableL[i]);
- ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
- pPwrR, pVpdR,
- AR5416_PD_GAIN_ICEPTS,
- vpdTableR[i]);
-
- for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) {
- vpdTableI[i][j] =
- (u8)(ath9k_hw_interpolate((u16)
- FREQ2FBIN(centers.
- synth_center,
- IS_CHAN_2GHZ
- (chan)),
- bChans[idxL], bChans[idxR],
- vpdTableL[i][j], vpdTableR[i][j]));
- }
- }
- }
-
- k = 0;
-
- for (i = 0; i < numXpdGains; i++) {
- if (i == (numXpdGains - 1))
- pPdGainBoundaries[i] =
- (u16)(maxPwrT4[i] / 2);
- else
- pPdGainBoundaries[i] =
- (u16)((maxPwrT4[i] + minPwrT4[i + 1]) / 4);
-
- pPdGainBoundaries[i] =
- min((u16)AR5416_MAX_RATE_POWER, pPdGainBoundaries[i]);
-
- if ((i == 0) && !AR_SREV_5416_20_OR_LATER(ah)) {
- minDelta = pPdGainBoundaries[0] - 23;
- pPdGainBoundaries[0] = 23;
- } else {
- minDelta = 0;
- }
-
- if (i == 0) {
- if (AR_SREV_9280_20_OR_LATER(ah))
- ss = (int16_t)(0 - (minPwrT4[i] / 2));
- else
- ss = 0;
- } else {
- ss = (int16_t)((pPdGainBoundaries[i - 1] -
- (minPwrT4[i] / 2)) -
- tPdGainOverlap + 1 + minDelta);
- }
- vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]);
- vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
-
- while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
- tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep);
- pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal);
- ss++;
- }
-
- sizeCurrVpdTable = (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1);
- tgtIndex = (u8)(pPdGainBoundaries[i] + tPdGainOverlap -
- (minPwrT4[i] / 2));
- maxIndex = (tgtIndex < sizeCurrVpdTable) ?
- tgtIndex : sizeCurrVpdTable;
-
- while ((ss < maxIndex) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
- pPDADCValues[k++] = vpdTableI[i][ss++];
- }
-
- vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] -
- vpdTableI[i][sizeCurrVpdTable - 2]);
- vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
-
- if (tgtIndex >= maxIndex) {
- while ((ss <= tgtIndex) &&
- (k < (AR5416_NUM_PDADC_VALUES - 1))) {
- tmpVal = (int16_t)((vpdTableI[i][sizeCurrVpdTable - 1] +
- (ss - maxIndex + 1) * vpdStep));
- pPDADCValues[k++] = (u8)((tmpVal > 255) ?
- 255 : tmpVal);
- ss++;
- }
- }
- }
-
- while (i < AR5416_PD_GAINS_IN_MASK) {
- pPdGainBoundaries[i] = pPdGainBoundaries[i - 1];
- i++;
- }
-
- while (k < AR5416_NUM_PDADC_VALUES) {
- pPDADCValues[k] = pPDADCValues[k - 1];
- k++;
- }
-}
-
static int16_t ath9k_change_gain_boundary_setting(struct ath_hw *ah,
u16 *gb,
u16 numXpdGain,
@@ -783,7 +649,7 @@ static int16_t ath9k_change_gain_boundary_setting(struct ath_hw *ah,
/* Because of a hardware limitation, ensure the gain boundary
* is not larger than (63 - overlap)
*/
- gb_limit = (u16)(AR5416_MAX_RATE_POWER - pdGainOverlap_t2);
+ gb_limit = (u16)(MAX_RATE_POWER - pdGainOverlap_t2);
for (k = 0; k < numXpdGain; k++)
gb[k] = (u16)min(gb_limit, gb[k]);
@@ -917,7 +783,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
ath9k_olc_get_pdadcs(ah, pcdacIdx,
txPower/2, pdadcValues);
} else {
- ath9k_hw_get_def_gain_boundaries_pdadcs(ah,
+ ath9k_hw_get_gain_boundaries_pdadcs(ah,
chan, pRawDataset,
pCalBChans, numPiers,
pdGainOverlap_t2,
@@ -965,20 +831,19 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
((pdadcValues[4 * j + 3] & 0xFF) << 24);
REG_WRITE(ah, regOffset, reg32);
- ath_print(common, ATH_DBG_EEPROM,
- "PDADC (%d,%4x): %4.4x %8.8x\n",
- i, regChainOffset, regOffset,
- reg32);
- ath_print(common, ATH_DBG_EEPROM,
- "PDADC: Chain %d | PDADC %3d "
- "Value %3d | PDADC %3d Value %3d | "
- "PDADC %3d Value %3d | PDADC %3d "
- "Value %3d |\n",
- i, 4 * j, pdadcValues[4 * j],
- 4 * j + 1, pdadcValues[4 * j + 1],
- 4 * j + 2, pdadcValues[4 * j + 2],
- 4 * j + 3,
- pdadcValues[4 * j + 3]);
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "PDADC (%d,%4x): %4.4x %8.8x\n",
+ i, regChainOffset, regOffset,
+ reg32);
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "PDADC: Chain %d | PDADC %3d "
+ "Value %3d | PDADC %3d Value %3d | "
+ "PDADC %3d Value %3d | PDADC %3d "
+ "Value %3d |\n",
+ i, 4 * j, pdadcValues[4 * j],
+ 4 * j + 1, pdadcValues[4 * j + 1],
+ 4 * j + 2, pdadcValues[4 * j + 2],
+ 4 * j + 3, pdadcValues[4 * j + 3]);
regOffset += 4;
}
@@ -1003,9 +868,9 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
- u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
+ u16 twiceMaxEdgePower = MAX_RATE_POWER;
static const u16 tpScaleReductionTable[5] =
- { 0, 3, 6, 9, AR5416_MAX_RATE_POWER };
+ { 0, 3, 6, 9, MAX_RATE_POWER };
int i;
int16_t twiceLargestAntenna;
@@ -1021,13 +886,16 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
0, {0, 0, 0, 0}
};
u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
- u16 ctlModesFor11a[] =
- { CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40 };
- u16 ctlModesFor11g[] =
- { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT,
- CTL_2GHT40
- };
- u16 numCtlModes, *pCtlMode, ctlMode, freq;
+ static const u16 ctlModesFor11a[] = {
+ CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40
+ };
+ static const u16 ctlModesFor11g[] = {
+ CTL_11B, CTL_11G, CTL_2GHT20,
+ CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40
+ };
+ u16 numCtlModes;
+ const u16 *pCtlMode;
+ u16 ctlMode, freq;
struct chan_centers centers;
int tx_chainmask;
u16 twiceMinEdgePower;
@@ -1062,15 +930,19 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
case 1:
break;
case 2:
- scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+ if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
+ scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
+ else
+ scaledPower = 0;
break;
case 3:
- scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+ if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
+ scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
+ else
+ scaledPower = 0;
break;
}
- scaledPower = max((u16)0, scaledPower);
-
if (IS_CHAN_2GHZ(chan)) {
numCtlModes = ARRAY_SIZE(ctlModesFor11g) -
SUB_NUM_CTL_MODES_AT_2G_40;
@@ -1143,7 +1015,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
ah->eep_ops->get_eeprom_rev(ah) <= 2)
- twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
+ twiceMaxEdgePower = MAX_RATE_POWER;
for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
if ((((cfgCtl & ~CTL_MODE_M) |
@@ -1258,7 +1130,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
u16 cfgCtl,
u8 twiceAntennaReduction,
u8 twiceMaxRegulatoryPower,
- u8 powerLimit)
+ u8 powerLimit, bool test)
{
#define RT_AR_DELTA(x) (ratesArray[x] - cck_ofdm_delta)
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
@@ -1285,12 +1157,44 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
ath9k_hw_set_def_power_cal_table(ah, chan, &txPowerIndexOffset);
+ regulatory->max_power_level = 0;
for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
- if (ratesArray[i] > AR5416_MAX_RATE_POWER)
- ratesArray[i] = AR5416_MAX_RATE_POWER;
+ if (ratesArray[i] > MAX_RATE_POWER)
+ ratesArray[i] = MAX_RATE_POWER;
+ if (ratesArray[i] > regulatory->max_power_level)
+ regulatory->max_power_level = ratesArray[i];
+ }
+
+ if (!test) {
+ i = rate6mb;
+
+ if (IS_CHAN_HT40(chan))
+ i = rateHt40_0;
+ else if (IS_CHAN_HT20(chan))
+ i = rateHt20_0;
+
+ regulatory->max_power_level = ratesArray[i];
}
+ switch(ar5416_get_ntxchains(ah->txchainmask)) {
+ case 1:
+ break;
+ case 2:
+ regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN;
+ break;
+ case 3:
+ regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
+ break;
+ default:
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_EEPROM,
+ "Invalid chainmask configuration\n");
+ break;
+ }
+
+ if (test)
+ return;
+
if (AR_SREV_9280_20_OR_LATER(ah)) {
for (i = 0; i < Ar5416RateSize; i++) {
int8_t pwr_table_offset;
@@ -1387,62 +1291,6 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
REG_WRITE(ah, AR_PHY_POWER_TX_SUB,
ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6)
| ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0));
-
- i = rate6mb;
-
- if (IS_CHAN_HT40(chan))
- i = rateHt40_0;
- else if (IS_CHAN_HT20(chan))
- i = rateHt20_0;
-
- if (AR_SREV_9280_20_OR_LATER(ah))
- regulatory->max_power_level =
- ratesArray[i] + AR5416_PWR_TABLE_OFFSET_DB * 2;
- else
- regulatory->max_power_level = ratesArray[i];
-
- switch(ar5416_get_ntxchains(ah->txchainmask)) {
- case 1:
- break;
- case 2:
- regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN;
- break;
- case 3:
- regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
- break;
- default:
- ath_print(ath9k_hw_common(ah), ATH_DBG_EEPROM,
- "Invalid chainmask configuration\n");
- break;
- }
-}
-
-static u8 ath9k_hw_def_get_num_ant_config(struct ath_hw *ah,
- enum ath9k_hal_freq_band freq_band)
-{
- struct ar5416_eeprom_def *eep = &ah->eeprom.def;
- struct modal_eep_header *pModal =
- &(eep->modalHeader[freq_band]);
- struct base_eep_header *pBase = &eep->baseEepHeader;
- u8 num_ant_config;
-
- num_ant_config = 1;
-
- if (pBase->version >= 0x0E0D)
- if (pModal->useAnt1)
- num_ant_config += 1;
-
- return num_ant_config;
-}
-
-static u32 ath9k_hw_def_get_eeprom_antenna_cfg(struct ath_hw *ah,
- struct ath9k_channel *chan)
-{
- struct ar5416_eeprom_def *eep = &ah->eeprom.def;
- struct modal_eep_header *pModal =
- &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
-
- return pModal->antCtrlCommon;
}
static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
@@ -1453,17 +1301,17 @@ static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
u16 spur_val = AR_NO_SPUR;
- ath_print(common, ATH_DBG_ANI,
- "Getting spur idx %d is2Ghz. %d val %x\n",
- i, is2GHz, ah->config.spurchans[i][is2GHz]);
+ ath_dbg(common, ATH_DBG_ANI,
+ "Getting spur idx:%d is2Ghz:%d val:%x\n",
+ i, is2GHz, ah->config.spurchans[i][is2GHz]);
switch (ah->config.spurmode) {
case SPUR_DISABLE:
break;
case SPUR_ENABLE_IOCTL:
spur_val = ah->config.spurchans[i][is2GHz];
- ath_print(common, ATH_DBG_ANI,
- "Getting spur val from new loc. %d\n", spur_val);
+ ath_dbg(common, ATH_DBG_ANI,
+ "Getting spur val from new loc. %d\n", spur_val);
break;
case SPUR_ENABLE_EEPROM:
spur_val = EEP_DEF_SPURCHAN;
@@ -1481,8 +1329,6 @@ const struct eeprom_ops eep_def_ops = {
.fill_eeprom = ath9k_hw_def_fill_eeprom,
.get_eeprom_ver = ath9k_hw_def_get_eeprom_ver,
.get_eeprom_rev = ath9k_hw_def_get_eeprom_rev,
- .get_num_ant_config = ath9k_hw_def_get_num_ant_config,
- .get_eeprom_antenna_cfg = ath9k_hw_def_get_eeprom_antenna_cfg,
.set_board_values = ath9k_hw_def_set_board_values,
.set_addac = ath9k_hw_def_set_addac,
.set_txpower = ath9k_hw_def_set_txpower,
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 4a9a68bba324..0fb8f8ac275a 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -20,121 +20,31 @@
/* LED functions */
/********************************/
-static void ath_led_blink_work(struct work_struct *work)
-{
- struct ath_softc *sc = container_of(work, struct ath_softc,
- ath_led_blink_work.work);
-
- if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
- return;
-
- if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
- (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
- else
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
- (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
-
- ieee80211_queue_delayed_work(sc->hw,
- &sc->ath_led_blink_work,
- (sc->sc_flags & SC_OP_LED_ON) ?
- msecs_to_jiffies(sc->led_off_duration) :
- msecs_to_jiffies(sc->led_on_duration));
-
- sc->led_on_duration = sc->led_on_cnt ?
- max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
- ATH_LED_ON_DURATION_IDLE;
- sc->led_off_duration = sc->led_off_cnt ?
- max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
- ATH_LED_OFF_DURATION_IDLE;
- sc->led_on_cnt = sc->led_off_cnt = 0;
- if (sc->sc_flags & SC_OP_LED_ON)
- sc->sc_flags &= ~SC_OP_LED_ON;
- else
- sc->sc_flags |= SC_OP_LED_ON;
-}
-
+#ifdef CONFIG_MAC80211_LEDS
static void ath_led_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
- struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
- struct ath_softc *sc = led->sc;
-
- switch (brightness) {
- case LED_OFF:
- if (led->led_type == ATH_LED_ASSOC ||
- led->led_type == ATH_LED_RADIO) {
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
- (led->led_type == ATH_LED_RADIO));
- sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
- if (led->led_type == ATH_LED_RADIO)
- sc->sc_flags &= ~SC_OP_LED_ON;
- } else {
- sc->led_off_cnt++;
- }
- break;
- case LED_FULL:
- if (led->led_type == ATH_LED_ASSOC) {
- sc->sc_flags |= SC_OP_LED_ASSOCIATED;
- if (led_blink)
- ieee80211_queue_delayed_work(sc->hw,
- &sc->ath_led_blink_work, 0);
- } else if (led->led_type == ATH_LED_RADIO) {
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
- sc->sc_flags |= SC_OP_LED_ON;
- } else {
- sc->led_on_cnt++;
- }
- break;
- default:
- break;
- }
-}
-
-static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
- char *trigger)
-{
- int ret;
-
- led->sc = sc;
- led->led_cdev.name = led->name;
- led->led_cdev.default_trigger = trigger;
- led->led_cdev.brightness_set = ath_led_brightness;
-
- ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
- if (ret)
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
- "Failed to register led:%s", led->name);
- else
- led->registered = 1;
- return ret;
-}
-
-static void ath_unregister_led(struct ath_led *led)
-{
- if (led->registered) {
- led_classdev_unregister(&led->led_cdev);
- led->registered = 0;
- }
+ struct ath_softc *sc = container_of(led_cdev, struct ath_softc, led_cdev);
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, (brightness == LED_OFF));
}
void ath_deinit_leds(struct ath_softc *sc)
{
- ath_unregister_led(&sc->assoc_led);
- sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
- ath_unregister_led(&sc->tx_led);
- ath_unregister_led(&sc->rx_led);
- ath_unregister_led(&sc->radio_led);
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
+ if (!sc->led_registered)
+ return;
+
+ ath_led_brightness(&sc->led_cdev, LED_OFF);
+ led_classdev_unregister(&sc->led_cdev);
}
void ath_init_leds(struct ath_softc *sc)
{
- char *trigger;
int ret;
if (AR_SREV_9287(sc->sc_ah))
sc->sc_ah->led_pin = ATH_LED_PIN_9287;
+ else if (AR_SREV_9485(sc->sc_ah))
+ sc->sc_ah->led_pin = ATH_LED_PIN_9485;
else
sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
@@ -144,48 +54,22 @@ void ath_init_leds(struct ath_softc *sc)
/* LED off, active low */
ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
- if (led_blink)
- INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
-
- trigger = ieee80211_get_radio_led_name(sc->hw);
- snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
- "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->radio_led, trigger);
- sc->radio_led.led_type = ATH_LED_RADIO;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_assoc_led_name(sc->hw);
- snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
- "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->assoc_led, trigger);
- sc->assoc_led.led_type = ATH_LED_ASSOC;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_tx_led_name(sc->hw);
- snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
- "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->tx_led, trigger);
- sc->tx_led.led_type = ATH_LED_TX;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_rx_led_name(sc->hw);
- snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
- "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->rx_led, trigger);
- sc->rx_led.led_type = ATH_LED_RX;
- if (ret)
- goto fail;
-
- return;
-
-fail:
- if (led_blink)
- cancel_delayed_work_sync(&sc->ath_led_blink_work);
- ath_deinit_leds(sc);
+ if (!led_blink)
+ sc->led_cdev.default_trigger =
+ ieee80211_get_radio_led_name(sc->hw);
+
+ snprintf(sc->led_name, sizeof(sc->led_name),
+ "ath9k-%s", wiphy_name(sc->hw->wiphy));
+ sc->led_cdev.name = sc->led_name;
+ sc->led_cdev.brightness_set = ath_led_brightness;
+
+ ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &sc->led_cdev);
+ if (ret < 0)
+ return;
+
+ sc->led_registered = true;
}
+#endif
/*******************/
/* Rfkill */
@@ -201,8 +85,7 @@ static bool ath_is_rfkill_set(struct ath_softc *sc)
void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
bool blocked = !!ath_is_rfkill_set(sc);
wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
@@ -236,13 +119,13 @@ static void ath_detect_bt_priority(struct ath_softc *sc)
sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN);
/* Detect if colocated bt started scanning */
if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
- "BT scan detected");
+ ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
+ "BT scan detected\n");
sc->sc_flags |= (SC_OP_BT_SCAN |
SC_OP_BT_PRIORITY_DETECTED);
} else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
- "BT priority traffic detected");
+ ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
+ "BT priority traffic detected\n");
sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
}
@@ -259,7 +142,7 @@ static void ath9k_gen_timer_start(struct ath_hw *ah,
ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
- ath9k_hw_set_interrupts(ah, 0);
+ ath9k_hw_disable_interrupts(ah);
ah->imask |= ATH9K_INT_GENTIMER;
ath9k_hw_set_interrupts(ah, ah->imask);
}
@@ -273,7 +156,7 @@ static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
/* if no timer is enabled, turn off interrupt mask */
if (timer_table->timer_mask.val == 0) {
- ath9k_hw_set_interrupts(ah, 0);
+ ath9k_hw_disable_interrupts(ah);
ah->imask &= ~ATH9K_INT_GENTIMER;
ath9k_hw_set_interrupts(ah, ah->imask);
}
@@ -310,10 +193,8 @@ static void ath_btcoex_period_timer(unsigned long data)
timer_period = is_btscan ? btcoex->btscan_no_stomp :
btcoex->btcoex_no_stomp;
- ath9k_gen_timer_start(ah,
- btcoex->no_stomp_timer,
- (ath9k_hw_gettsf32(ah) +
- timer_period), timer_period * 10);
+ ath9k_gen_timer_start(ah, btcoex->no_stomp_timer, 0,
+ timer_period * 10);
btcoex->hw_timer_enabled = true;
}
@@ -333,8 +214,8 @@ static void ath_btcoex_no_stomp_timer(void *arg)
struct ath_common *common = ath9k_hw_common(ah);
bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
- ath_print(common, ATH_DBG_BTCOEX,
- "no stomp timer running\n");
+ ath_dbg(common, ATH_DBG_BTCOEX,
+ "no stomp timer running\n");
spin_lock_bh(&btcoex->btcoex_lock);
@@ -380,8 +261,8 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_hw *ah = sc->sc_ah;
- ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "Starting btcoex timers");
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ "Starting btcoex timers\n");
/* make sure duty cycle timer is also stopped when resuming */
if (btcoex->hw_timer_enabled)
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index dfb6560dab92..f1b8af64569c 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -28,10 +28,7 @@ MODULE_FIRMWARE(FIRMWARE_AR9271);
static struct usb_device_id ath9k_hif_usb_ids[] = {
{ USB_DEVICE(0x0cf3, 0x9271) }, /* Atheros */
{ USB_DEVICE(0x0cf3, 0x1006) }, /* Atheros */
- { USB_DEVICE(0x0cf3, 0x7010) }, /* Atheros */
- { USB_DEVICE(0x0cf3, 0x7015) }, /* Atheros */
{ USB_DEVICE(0x0846, 0x9030) }, /* Netgear N150 */
- { USB_DEVICE(0x0846, 0x9018) }, /* Netgear WNDA3200 */
{ USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */
{ USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */
{ USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */
@@ -40,9 +37,24 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
{ USB_DEVICE(0x13D3, 0x3349) }, /* Azurewave */
{ USB_DEVICE(0x13D3, 0x3350) }, /* Azurewave */
{ USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
- { USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */
{ USB_DEVICE(0x040D, 0x3801) }, /* VIA */
- { USB_DEVICE(0x1668, 0x1200) }, /* Verizon */
+ { USB_DEVICE(0x0cf3, 0xb003) }, /* Ubiquiti WifiStation Ext */
+
+ { USB_DEVICE(0x0cf3, 0x7015),
+ .driver_info = AR9287_USB }, /* Atheros */
+ { USB_DEVICE(0x1668, 0x1200),
+ .driver_info = AR9287_USB }, /* Verizon */
+
+ { USB_DEVICE(0x0cf3, 0x7010),
+ .driver_info = AR9280_USB }, /* Atheros */
+ { USB_DEVICE(0x0846, 0x9018),
+ .driver_info = AR9280_USB }, /* Netgear WNDA3200 */
+ { USB_DEVICE(0x083A, 0xA704),
+ .driver_info = AR9280_USB }, /* SMC Networks */
+
+ { USB_DEVICE(0x0cf3, 0x20ff),
+ .driver_info = STORAGE_DEVICE },
+
{ },
};
@@ -144,16 +156,36 @@ static void hif_usb_tx_cb(struct urb *urb)
case -ENODEV:
case -ESHUTDOWN:
/*
- * The URB has been killed, free the SKBs
- * and return.
+ * The URB has been killed, free the SKBs.
*/
ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
- return;
+
+ /*
+ * If the URBs are being flushed, no need to add this
+ * URB to the free list.
+ */
+ spin_lock(&hif_dev->tx.tx_lock);
+ if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
+ spin_unlock(&hif_dev->tx.tx_lock);
+ return;
+ }
+ spin_unlock(&hif_dev->tx.tx_lock);
+
+ /*
+ * In the stop() case, this URB has to be added to
+ * the free list.
+ */
+ goto add_free;
default:
break;
}
- /* Check if TX has been stopped */
+ /*
+ * Check if TX has been stopped, this is needed because
+ * this CB could have been invoked just after the TX lock
+ * was released in hif_stop() and kill_urb() hasn't been
+ * called yet.
+ */
spin_lock(&hif_dev->tx.tx_lock);
if (hif_dev->tx.flags & HIF_USB_TX_STOP) {
spin_unlock(&hif_dev->tx.tx_lock);
@@ -190,8 +222,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
struct tx_buf *tx_buf = NULL;
struct sk_buff *nskb = NULL;
int ret = 0, i;
- u16 *hdr, tx_skb_cnt = 0;
+ u16 tx_skb_cnt = 0;
u8 *buf;
+ __le16 *hdr;
if (hif_dev->tx.tx_skb_cnt == 0)
return 0;
@@ -216,9 +249,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
buf = tx_buf->buf;
buf += tx_buf->offset;
- hdr = (u16 *)buf;
- *hdr++ = nskb->len;
- *hdr++ = ATH_USB_TX_STREAM_MODE_TAG;
+ hdr = (__le16 *)buf;
+ *hdr++ = cpu_to_le16(nskb->len);
+ *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
buf += 4;
memcpy(buf, nskb->data, nskb->len);
tx_buf->len = nskb->len + 4;
@@ -305,6 +338,7 @@ static void hif_usb_start(void *hif_handle, u8 pipe_id)
static void hif_usb_stop(void *hif_handle, u8 pipe_id)
{
struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
+ struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
unsigned long flags;
spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
@@ -312,6 +346,12 @@ static void hif_usb_stop(void *hif_handle, u8 pipe_id)
hif_dev->tx.tx_skb_cnt = 0;
hif_dev->tx.flags |= HIF_USB_TX_STOP;
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+
+ /* The pending URBs have to be canceled. */
+ list_for_each_entry_safe(tx_buf, tx_buf_tmp,
+ &hif_dev->tx.tx_pending, list) {
+ usb_kill_urb(tx_buf->urb);
+ }
}
static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb,
@@ -353,9 +393,9 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
struct sk_buff *skb)
{
struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER];
- int index = 0, i = 0, chk_idx, len = skb->len;
- int rx_remain_len = 0, rx_pkt_len = 0;
- u16 pkt_len, pkt_tag, pool_index = 0;
+ int index = 0, i = 0, len = skb->len;
+ int rx_remain_len, rx_pkt_len;
+ u16 pool_index = 0;
u8 *ptr;
spin_lock(&hif_dev->rx_lock);
@@ -389,64 +429,64 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
spin_unlock(&hif_dev->rx_lock);
while (index < len) {
+ u16 pkt_len;
+ u16 pkt_tag;
+ u16 pad_len;
+ int chk_idx;
+
ptr = (u8 *) skb->data;
pkt_len = ptr[index] + (ptr[index+1] << 8);
pkt_tag = ptr[index+2] + (ptr[index+3] << 8);
- if (pkt_tag == ATH_USB_RX_STREAM_MODE_TAG) {
- u16 pad_len;
-
- pad_len = 4 - (pkt_len & 0x3);
- if (pad_len == 4)
- pad_len = 0;
-
- chk_idx = index;
- index = index + 4 + pkt_len + pad_len;
-
- if (index > MAX_RX_BUF_SIZE) {
- spin_lock(&hif_dev->rx_lock);
- hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
- hif_dev->rx_transfer_len =
- MAX_RX_BUF_SIZE - chk_idx - 4;
- hif_dev->rx_pad_len = pad_len;
-
- nskb = __dev_alloc_skb(pkt_len + 32,
- GFP_ATOMIC);
- if (!nskb) {
- dev_err(&hif_dev->udev->dev,
- "ath9k_htc: RX memory allocation"
- " error\n");
- spin_unlock(&hif_dev->rx_lock);
- goto err;
- }
- skb_reserve(nskb, 32);
- RX_STAT_INC(skb_allocated);
-
- memcpy(nskb->data, &(skb->data[chk_idx+4]),
- hif_dev->rx_transfer_len);
-
- /* Record the buffer pointer */
- hif_dev->remain_skb = nskb;
+ if (pkt_tag != ATH_USB_RX_STREAM_MODE_TAG) {
+ RX_STAT_INC(skb_dropped);
+ return;
+ }
+
+ pad_len = 4 - (pkt_len & 0x3);
+ if (pad_len == 4)
+ pad_len = 0;
+
+ chk_idx = index;
+ index = index + 4 + pkt_len + pad_len;
+
+ if (index > MAX_RX_BUF_SIZE) {
+ spin_lock(&hif_dev->rx_lock);
+ hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
+ hif_dev->rx_transfer_len =
+ MAX_RX_BUF_SIZE - chk_idx - 4;
+ hif_dev->rx_pad_len = pad_len;
+
+ nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
+ if (!nskb) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: RX memory allocation error\n");
spin_unlock(&hif_dev->rx_lock);
- } else {
- nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
- if (!nskb) {
- dev_err(&hif_dev->udev->dev,
- "ath9k_htc: RX memory allocation"
- " error\n");
- goto err;
- }
- skb_reserve(nskb, 32);
- RX_STAT_INC(skb_allocated);
-
- memcpy(nskb->data, &(skb->data[chk_idx+4]), pkt_len);
- skb_put(nskb, pkt_len);
- skb_pool[pool_index++] = nskb;
+ goto err;
}
+ skb_reserve(nskb, 32);
+ RX_STAT_INC(skb_allocated);
+
+ memcpy(nskb->data, &(skb->data[chk_idx+4]),
+ hif_dev->rx_transfer_len);
+
+ /* Record the buffer pointer */
+ hif_dev->remain_skb = nskb;
+ spin_unlock(&hif_dev->rx_lock);
} else {
- RX_STAT_INC(skb_dropped);
- return;
+ nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
+ if (!nskb) {
+ dev_err(&hif_dev->udev->dev,
+ "ath9k_htc: RX memory allocation error\n");
+ goto err;
+ }
+ skb_reserve(nskb, 32);
+ RX_STAT_INC(skb_allocated);
+
+ memcpy(nskb->data, &(skb->data[chk_idx+4]), pkt_len);
+ skb_put(nskb, pkt_len);
+ skb_pool[pool_index++] = nskb;
}
}
@@ -461,7 +501,7 @@ err:
static void ath9k_hif_usb_rx_cb(struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
- struct hif_device_usb *hif_dev = (struct hif_device_usb *)
+ struct hif_device_usb *hif_dev =
usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
int ret;
@@ -508,7 +548,7 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct sk_buff *nskb;
- struct hif_device_usb *hif_dev = (struct hif_device_usb *)
+ struct hif_device_usb *hif_dev =
usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
int ret;
@@ -578,6 +618,7 @@ free:
static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
{
struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
+ unsigned long flags;
list_for_each_entry_safe(tx_buf, tx_buf_tmp,
&hif_dev->tx.tx_buf, list) {
@@ -588,6 +629,10 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
kfree(tx_buf);
}
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
+ hif_dev->tx.flags |= HIF_USB_TX_FLUSH;
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+
list_for_each_entry_safe(tx_buf, tx_buf_tmp,
&hif_dev->tx.tx_pending, list) {
usb_kill_urb(tx_buf->urb);
@@ -776,7 +821,8 @@ static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
}
-static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
+static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev,
+ u32 drv_info)
{
int transfer, err;
const void *data = hif_dev->firmware->data;
@@ -807,18 +853,10 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
}
kfree(buf);
- switch (hif_dev->device_id) {
- case 0x7010:
- case 0x7015:
- case 0x9018:
- case 0xA704:
- case 0x1200:
+ if (IS_AR7010_DEVICE(drv_info))
firm_offset = AR7010_FIRMWARE_TEXT;
- break;
- default:
+ else
firm_offset = AR9271_FIRMWARE_TEXT;
- break;
- }
/*
* Issue FW download complete command to firmware.
@@ -836,7 +874,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
return 0;
}
-static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
+static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev, u32 drv_info)
{
int ret, idx;
struct usb_host_interface *alt = &hif_dev->interface->altsetting[0];
@@ -852,7 +890,7 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
}
/* Download firmware */
- ret = ath9k_hif_usb_download_fw(hif_dev);
+ ret = ath9k_hif_usb_download_fw(hif_dev, drv_info);
if (ret) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: Firmware - %s download failed\n",
@@ -879,14 +917,12 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
if (ret) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: Unable to allocate URBs\n");
- goto err_urb;
+ goto err_fw_download;
}
return 0;
err_fw_download:
- ath9k_hif_usb_dealloc_urbs(hif_dev);
-err_urb:
release_firmware(hif_dev->firmware);
err_fw_req:
hif_dev->firmware = NULL;
@@ -900,6 +936,61 @@ static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev)
release_firmware(hif_dev->firmware);
}
+/*
+ * An exact copy of the function from zd1211rw.
+ */
+static int send_eject_command(struct usb_interface *interface)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct usb_host_interface *iface_desc = &interface->altsetting[0];
+ struct usb_endpoint_descriptor *endpoint;
+ unsigned char *cmd;
+ u8 bulk_out_ep;
+ int r;
+
+ /* Find bulk out endpoint */
+ for (r = 1; r >= 0; r--) {
+ endpoint = &iface_desc->endpoint[r].desc;
+ if (usb_endpoint_dir_out(endpoint) &&
+ usb_endpoint_xfer_bulk(endpoint)) {
+ bulk_out_ep = endpoint->bEndpointAddress;
+ break;
+ }
+ }
+ if (r == -1) {
+ dev_err(&udev->dev,
+ "ath9k_htc: Could not find bulk out endpoint\n");
+ return -ENODEV;
+ }
+
+ cmd = kzalloc(31, GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENODEV;
+
+ /* USB bulk command block */
+ cmd[0] = 0x55; /* bulk command signature */
+ cmd[1] = 0x53; /* bulk command signature */
+ cmd[2] = 0x42; /* bulk command signature */
+ cmd[3] = 0x43; /* bulk command signature */
+ cmd[14] = 6; /* command length */
+
+ cmd[15] = 0x1b; /* SCSI command: START STOP UNIT */
+ cmd[19] = 0x2; /* eject disc */
+
+ dev_info(&udev->dev, "Ejecting storage device...\n");
+ r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, bulk_out_ep),
+ cmd, 31, NULL, 2000);
+ kfree(cmd);
+ if (r)
+ return r;
+
+ /* At this point, the device disconnects and reconnects with the real
+ * ID numbers. */
+
+ usb_set_intfdata(interface, NULL);
+ return 0;
+}
+
static int ath9k_hif_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
@@ -907,6 +998,9 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
struct hif_device_usb *hif_dev;
int ret = 0;
+ if (id->driver_info == STORAGE_DEVICE)
+ return send_eject_command(interface);
+
hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL);
if (!hif_dev) {
ret = -ENOMEM;
@@ -931,23 +1025,15 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
/* Find out which firmware to load */
- switch(hif_dev->device_id) {
- case 0x7010:
- case 0x7015:
- case 0x9018:
- case 0xA704:
- case 0x1200:
+ if (IS_AR7010_DEVICE(id->driver_info))
if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202)
hif_dev->fw_name = FIRMWARE_AR7010_1_1;
else
hif_dev->fw_name = FIRMWARE_AR7010;
- break;
- default:
+ else
hif_dev->fw_name = FIRMWARE_AR9271;
- break;
- }
- ret = ath9k_hif_usb_dev_init(hif_dev);
+ ret = ath9k_hif_usb_dev_init(hif_dev, id->driver_info);
if (ret) {
ret = -EINVAL;
goto err_hif_init_usb;
@@ -955,7 +1041,7 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
ret = ath9k_htc_hw_init(hif_dev->htc_handle,
&hif_dev->udev->dev, hif_dev->device_id,
- hif_dev->udev->product);
+ hif_dev->udev->product, id->driver_info);
if (ret) {
ret = -EINVAL;
goto err_htc_hw_init;
@@ -998,18 +1084,18 @@ static void ath9k_hif_usb_reboot(struct usb_device *udev)
static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
{
struct usb_device *udev = interface_to_usbdev(interface);
- struct hif_device_usb *hif_dev =
- (struct hif_device_usb *) usb_get_intfdata(interface);
-
- if (hif_dev) {
- ath9k_htc_hw_deinit(hif_dev->htc_handle,
- (udev->state == USB_STATE_NOTATTACHED) ? true : false);
- ath9k_htc_hw_free(hif_dev->htc_handle);
- ath9k_hif_usb_dev_deinit(hif_dev);
- usb_set_intfdata(interface, NULL);
- }
+ struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
+ bool unplugged = (udev->state == USB_STATE_NOTATTACHED) ? true : false;
+
+ if (!hif_dev)
+ return;
+
+ ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
+ ath9k_htc_hw_free(hif_dev->htc_handle);
+ ath9k_hif_usb_dev_deinit(hif_dev);
+ usb_set_intfdata(interface, NULL);
- if (hif_dev->flags & HIF_USB_START)
+ if (!unplugged && (hif_dev->flags & HIF_USB_START))
ath9k_hif_usb_reboot(udev);
kfree(hif_dev);
@@ -1021,8 +1107,14 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
static int ath9k_hif_usb_suspend(struct usb_interface *interface,
pm_message_t message)
{
- struct hif_device_usb *hif_dev =
- (struct hif_device_usb *) usb_get_intfdata(interface);
+ struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
+
+ /*
+ * The device has to be set to FULLSLEEP mode in case no
+ * interface is up.
+ */
+ if (!(hif_dev->flags & HIF_USB_START))
+ ath9k_htc_suspend(hif_dev->htc_handle);
ath9k_hif_usb_dealloc_urbs(hif_dev);
@@ -1031,8 +1123,8 @@ static int ath9k_hif_usb_suspend(struct usb_interface *interface,
static int ath9k_hif_usb_resume(struct usb_interface *interface)
{
- struct hif_device_usb *hif_dev =
- (struct hif_device_usb *) usb_get_intfdata(interface);
+ struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
+ struct htc_target *htc_handle = hif_dev->htc_handle;
int ret;
ret = ath9k_hif_usb_alloc_urbs(hif_dev);
@@ -1040,7 +1132,8 @@ static int ath9k_hif_usb_resume(struct usb_interface *interface)
return ret;
if (hif_dev->firmware) {
- ret = ath9k_hif_usb_download_fw(hif_dev);
+ ret = ath9k_hif_usb_download_fw(hif_dev,
+ htc_handle->drv_priv->ah->hw_version.usbdev);
if (ret)
goto fail_resume;
} else {
@@ -1050,7 +1143,7 @@ static int ath9k_hif_usb_resume(struct usb_interface *interface)
mdelay(100);
- ret = ath9k_htc_resume(hif_dev->htc_handle);
+ ret = ath9k_htc_resume(htc_handle);
if (ret)
goto fail_resume;
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
index 2daf97b11c08..7b9d863d4035 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.h
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -17,6 +17,8 @@
#ifndef HTC_USB_H
#define HTC_USB_H
+#define IS_AR7010_DEVICE(_v) (((_v) == AR9280_USB) || ((_v) == AR9287_USB))
+
#define AR9271_FIRMWARE 0x501000
#define AR9271_FIRMWARE_TEXT 0x903000
#define AR7010_FIRMWARE_TEXT 0x906000
@@ -62,6 +64,7 @@ struct tx_buf {
};
#define HIF_USB_TX_STOP BIT(0)
+#define HIF_USB_TX_FLUSH BIT(1)
struct hif_usb_tx {
u8 flags;
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 75ecf6a30d25..753a245c5ad1 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -32,6 +32,7 @@
#include "wmi.h"
#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */
+#define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */
#define ATH_ANI_POLLINTERVAL 100 /* 100 ms */
#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
@@ -78,7 +79,7 @@ struct tx_frame_hdr {
u8 node_idx;
u8 vif_idx;
u8 tidno;
- u32 flags; /* ATH9K_HTC_TX_* */
+ __be32 flags; /* ATH9K_HTC_TX_* */
u8 key_type;
u8 keyix;
u8 reserved[26];
@@ -204,8 +205,50 @@ struct ath9k_htc_target_stats {
__be32 ht_tx_xretries;
} __packed;
+#define ATH9K_HTC_MAX_VIF 2
+#define ATH9K_HTC_MAX_BCN_VIF 2
+
+#define INC_VIF(_priv, _type) do { \
+ switch (_type) { \
+ case NL80211_IFTYPE_STATION: \
+ _priv->num_sta_vif++; \
+ break; \
+ case NL80211_IFTYPE_ADHOC: \
+ _priv->num_ibss_vif++; \
+ break; \
+ case NL80211_IFTYPE_AP: \
+ _priv->num_ap_vif++; \
+ break; \
+ default: \
+ break; \
+ } \
+ } while (0)
+
+#define DEC_VIF(_priv, _type) do { \
+ switch (_type) { \
+ case NL80211_IFTYPE_STATION: \
+ _priv->num_sta_vif--; \
+ break; \
+ case NL80211_IFTYPE_ADHOC: \
+ _priv->num_ibss_vif--; \
+ break; \
+ case NL80211_IFTYPE_AP: \
+ _priv->num_ap_vif--; \
+ break; \
+ default: \
+ break; \
+ } \
+ } while (0)
+
struct ath9k_htc_vif {
u8 index;
+ u16 seq_no;
+ bool beacon_configured;
+};
+
+struct ath9k_vif_iter_data {
+ const u8 *hw_macaddr;
+ u8 mask[ETH_ALEN];
};
#define ATH9K_HTC_MAX_STA 8
@@ -310,10 +353,8 @@ struct ath_led {
struct htc_beacon_config {
u16 beacon_interval;
- u16 listen_interval;
u16 dtim_period;
u16 bmiss_timeout;
- u8 dtim_count;
};
struct ath_btcoex {
@@ -331,17 +372,14 @@ void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv);
#define OP_INVALID BIT(0)
#define OP_SCANNING BIT(1)
-#define OP_FULL_RESET BIT(2)
-#define OP_LED_ASSOCIATED BIT(3)
-#define OP_LED_ON BIT(4)
-#define OP_PREAMBLE_SHORT BIT(5)
-#define OP_PROTECT_ENABLE BIT(6)
-#define OP_ASSOCIATED BIT(7)
-#define OP_ENABLE_BEACON BIT(8)
-#define OP_LED_DEINIT BIT(9)
-#define OP_UNPLUGGED BIT(10)
-#define OP_BT_PRIORITY_DETECTED BIT(11)
-#define OP_BT_SCAN BIT(12)
+#define OP_LED_ASSOCIATED BIT(2)
+#define OP_LED_ON BIT(3)
+#define OP_ENABLE_BEACON BIT(4)
+#define OP_LED_DEINIT BIT(5)
+#define OP_BT_PRIORITY_DETECTED BIT(6)
+#define OP_BT_SCAN BIT(7)
+#define OP_ANI_RUNNING BIT(8)
+#define OP_TSF_RESET BIT(9)
struct ath9k_htc_priv {
struct device *dev;
@@ -360,15 +398,24 @@ struct ath9k_htc_priv {
enum htc_endpoint_id data_vi_ep;
enum htc_endpoint_id data_vo_ep;
+ u8 vif_slot;
+ u8 mon_vif_idx;
+ u8 sta_slot;
+ u8 vif_sta_pos[ATH9K_HTC_MAX_VIF];
+ u8 num_ibss_vif;
+ u8 num_sta_vif;
+ u8 num_ap_vif;
+
u16 op_flags;
u16 curtxpow;
u16 txpowlimit;
u16 nvifs;
u16 nstations;
- u16 seq_no;
u32 bmiss_cnt;
+ bool rearm_ani;
+ bool reconfig_beacon;
- struct ath9k_hw_cal_data caldata[38];
+ struct ath9k_hw_cal_data caldata;
spinlock_t beacon_lock;
@@ -378,14 +425,15 @@ struct ath9k_htc_priv {
struct ieee80211_vif *vif;
struct htc_beacon_config cur_beacon_conf;
unsigned int rxfilter;
- struct tasklet_struct wmi_tasklet;
+ struct tasklet_struct swba_tasklet;
struct tasklet_struct rx_tasklet;
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
struct ath9k_htc_rx rx;
struct tasklet_struct tx_tasklet;
struct sk_buff_head tx_queue;
- struct delayed_work ath9k_ani_work;
+ struct delayed_work ani_work;
struct work_struct ps_work;
+ struct work_struct fatal_work;
struct mutex htc_pm_lock;
unsigned long ps_usecount;
@@ -420,9 +468,12 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
common->bus_ops->read_cachesize(common, csz);
}
+void ath9k_htc_reset(struct ath9k_htc_priv *priv);
+
void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv);
void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
struct ieee80211_vif *vif);
+void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv);
void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending);
void ath9k_htc_rxep(void *priv, struct sk_buff *skb,
@@ -432,9 +483,12 @@ void ath9k_htc_txep(void *priv, struct sk_buff *skb, enum htc_endpoint_id ep_id,
void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
enum htc_endpoint_id ep_id, bool txok);
+int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv);
void ath9k_htc_station_work(struct work_struct *work);
void ath9k_htc_aggr_work(struct work_struct *work);
-void ath9k_ani_work(struct work_struct *work);;
+void ath9k_htc_ani_work(struct work_struct *work);
+void ath9k_htc_start_ani(struct ath9k_htc_priv *priv);
+void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv);
int ath9k_tx_init(struct ath9k_htc_priv *priv);
void ath9k_tx_tasklet(unsigned long data);
@@ -455,15 +509,22 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv);
void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv);
void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv);
void ath9k_ps_work(struct work_struct *work);
+bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
+ enum ath9k_power_mode mode);
void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
+void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw);
+void ath9k_htc_radio_enable(struct ieee80211_hw *hw);
+void ath9k_htc_radio_disable(struct ieee80211_hw *hw);
+void ath9k_led_stop_brightness(struct ath9k_htc_priv *priv);
void ath9k_init_leds(struct ath9k_htc_priv *priv);
void ath9k_deinit_leds(struct ath9k_htc_priv *priv);
int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
- u16 devid, char *product);
+ u16 devid, char *product, u32 drv_info);
void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug);
#ifdef CONFIG_PM
+void ath9k_htc_suspend(struct htc_target *htc_handle);
int ath9k_htc_resume(struct htc_target *htc_handle);
#endif
#ifdef CONFIG_ATH9K_HTC_DEBUGFS
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 1b72aa482ac7..8d1d8792436d 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -123,11 +123,12 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
/* TSF out of range threshold fixed at 1 second */
bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
- ath_print(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
- ath_print(common, ATH_DBG_BEACON,
- "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
- bs.bs_bmissthreshold, bs.bs_sleepduration,
- bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
+ ath_dbg(common, ATH_DBG_CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
+ intval, tsf, tsftu);
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
+ bs.bs_bmissthreshold, bs.bs_sleepduration,
+ bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
/* Set the computed STA beacon timers */
@@ -138,25 +139,81 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
}
+static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
+ struct htc_beacon_config *bss_conf)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ enum ath9k_int imask = 0;
+ u32 nexttbtt, intval, tsftu;
+ __be32 htc_imask = 0;
+ int ret;
+ u8 cmd_rsp;
+ u64 tsf;
+
+ intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
+ intval /= ATH9K_HTC_MAX_BCN_VIF;
+ nexttbtt = intval;
+
+ if (priv->op_flags & OP_TSF_RESET) {
+ intval |= ATH9K_BEACON_RESET_TSF;
+ priv->op_flags &= ~OP_TSF_RESET;
+ } else {
+ /*
+ * Pull nexttbtt forward to reflect the current TSF.
+ */
+ tsf = ath9k_hw_gettsf64(priv->ah);
+ tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
+ do {
+ nexttbtt += intval;
+ } while (nexttbtt < tsftu);
+ }
+
+ intval |= ATH9K_BEACON_ENA;
+
+ if (priv->op_flags & OP_ENABLE_BEACON)
+ imask |= ATH9K_INT_SWBA;
+
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "AP Beacon config, intval: %d, nexttbtt: %u imask: 0x%x\n",
+ bss_conf->beacon_interval, nexttbtt, imask);
+
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ ath9k_hw_beaconinit(priv->ah, nexttbtt, intval);
+ priv->bmiss_cnt = 0;
+ htc_imask = cpu_to_be32(imask);
+ WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
+}
+
static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
struct htc_beacon_config *bss_conf)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
enum ath9k_int imask = 0;
- u32 nexttbtt, intval;
+ u32 nexttbtt, intval, tsftu;
__be32 htc_imask = 0;
int ret;
u8 cmd_rsp;
+ u64 tsf;
intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
nexttbtt = intval;
+
+ /*
+ * Pull nexttbtt forward to reflect the current TSF.
+ */
+ tsf = ath9k_hw_gettsf64(priv->ah);
+ tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
+ do {
+ nexttbtt += intval;
+ } while (nexttbtt < tsftu);
+
intval |= ATH9K_BEACON_ENA;
if (priv->op_flags & OP_ENABLE_BEACON)
imask |= ATH9K_INT_SWBA;
- ath_print(common, ATH_DBG_BEACON,
- "IBSS Beacon config, intval: %d, imask: 0x%x\n",
- bss_conf->beacon_interval, imask);
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "IBSS Beacon config, intval: %d, nexttbtt: %u, imask: 0x%x\n",
+ bss_conf->beacon_interval, nexttbtt, imask);
WMI_CMD(WMI_DISABLE_INTR_CMDID);
ath9k_hw_beaconinit(priv->ah, nexttbtt, intval);
@@ -207,9 +264,9 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending)
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
struct ieee80211_hdr *hdr =
(struct ieee80211_hdr *) beacon->data;
- priv->seq_no += 0x10;
+ avp->seq_no += 0x10;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(priv->seq_no);
+ hdr->seq_ctrl |= cpu_to_le16(avp->seq_no);
}
tx_ctl.type = ATH9K_HTC_NORMAL;
@@ -246,40 +303,133 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
qi.tqi_cwmax = qi_be.tqi_cwmax;
if (!ath9k_hw_set_txq_props(ah, priv->beaconq, &qi)) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
- "Unable to update beacon queue %u!\n", qnum);
+ ath_err(ath9k_hw_common(ah),
+ "Unable to update beacon queue %u!\n", qnum);
} else {
ath9k_hw_resettxqueue(ah, priv->beaconq);
}
}
+static void ath9k_htc_beacon_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ bool *beacon_configured = (bool *)data;
+ struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ avp->beacon_configured)
+ *beacon_configured = true;
+}
+
+static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ bool beacon_configured;
+
+ /*
+ * Changing the beacon interval when multiple AP interfaces
+ * are configured will affect beacon transmission of all
+ * of them.
+ */
+ if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
+ (priv->num_ap_vif > 1) &&
+ (vif->type == NL80211_IFTYPE_AP) &&
+ (cur_conf->beacon_interval != bss_conf->beacon_int)) {
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Changing beacon interval of multiple AP interfaces !\n");
+ return false;
+ }
+
+ /*
+ * If the HW is operating in AP mode, any new station interfaces that
+ * are added cannot change the beacon parameters.
+ */
+ if (priv->num_ap_vif &&
+ (vif->type != NL80211_IFTYPE_AP)) {
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "HW in AP mode, cannot set STA beacon parameters\n");
+ return false;
+ }
+
+ /*
+ * The beacon parameters are configured only for the first
+ * station interface.
+ */
+ if ((priv->ah->opmode == NL80211_IFTYPE_STATION) &&
+ (priv->num_sta_vif > 1) &&
+ (vif->type == NL80211_IFTYPE_STATION)) {
+ beacon_configured = false;
+ ieee80211_iterate_active_interfaces_atomic(priv->hw,
+ ath9k_htc_beacon_iter,
+ &beacon_configured);
+
+ if (beacon_configured) {
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Beacon already configured for a station interface\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
struct ieee80211_vif *vif)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
+
+ if (!ath9k_htc_check_beacon_config(priv, vif))
+ return;
cur_conf->beacon_interval = bss_conf->beacon_int;
if (cur_conf->beacon_interval == 0)
cur_conf->beacon_interval = 100;
cur_conf->dtim_period = bss_conf->dtim_period;
- cur_conf->listen_interval = 1;
- cur_conf->dtim_count = 1;
cur_conf->bmiss_timeout =
ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
switch (vif->type) {
case NL80211_IFTYPE_STATION:
ath9k_htc_beacon_config_sta(priv, cur_conf);
+ avp->beacon_configured = true;
break;
case NL80211_IFTYPE_ADHOC:
ath9k_htc_beacon_config_adhoc(priv, cur_conf);
break;
+ case NL80211_IFTYPE_AP:
+ ath9k_htc_beacon_config_ap(priv, cur_conf);
+ break;
+ default:
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Unsupported beaconing mode\n");
+ return;
+ }
+}
+
+void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+
+ switch (priv->ah->opmode) {
+ case NL80211_IFTYPE_STATION:
+ ath9k_htc_beacon_config_sta(priv, cur_conf);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ath9k_htc_beacon_config_adhoc(priv, cur_conf);
+ break;
+ case NL80211_IFTYPE_AP:
+ ath9k_htc_beacon_config_ap(priv, cur_conf);
+ break;
default:
- ath_print(common, ATH_DBG_CONFIG,
- "Unsupported beaconing mode\n");
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Unsupported beaconing mode\n");
return;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 50eec9a3b88c..7e630a81b453 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -1,3 +1,19 @@
+/*
+ * Copyright (c) 2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
#include "htc.h"
/******************/
@@ -20,13 +36,13 @@ static void ath_detect_bt_priority(struct ath9k_htc_priv *priv)
priv->op_flags &= ~(OP_BT_PRIORITY_DETECTED | OP_BT_SCAN);
/* Detect if colocated bt started scanning */
if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "BT scan detected");
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ "BT scan detected\n");
priv->op_flags |= (OP_BT_SCAN |
OP_BT_PRIORITY_DETECTED);
} else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "BT priority traffic detected");
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ "BT priority traffic detected\n");
priv->op_flags |= OP_BT_PRIORITY_DETECTED;
}
@@ -83,8 +99,8 @@ static void ath_btcoex_duty_cycle_work(struct work_struct *work)
struct ath_common *common = ath9k_hw_common(ah);
bool is_btscan = priv->op_flags & OP_BT_SCAN;
- ath_print(common, ATH_DBG_BTCOEX,
- "time slice work for bt and wlan\n");
+ ath_dbg(common, ATH_DBG_BTCOEX,
+ "time slice work for bt and wlan\n");
if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan)
ath9k_cmn_btcoex_bt_stomp(common, ATH_BTCOEX_STOMP_NONE);
@@ -114,8 +130,7 @@ void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv)
struct ath_btcoex *btcoex = &priv->btcoex;
struct ath_hw *ah = priv->ah;
- ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "Starting btcoex work");
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX, "Starting btcoex work\n");
btcoex->bt_priority_cnt = 0;
btcoex->bt_priority_time = jiffies;
@@ -132,3 +147,315 @@ void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv)
cancel_delayed_work_sync(&priv->coex_period_work);
cancel_delayed_work_sync(&priv->duty_cycle_work);
}
+
+/*******/
+/* LED */
+/*******/
+
+static void ath9k_led_blink_work(struct work_struct *work)
+{
+ struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
+ ath9k_led_blink_work.work);
+
+ if (!(priv->op_flags & OP_LED_ASSOCIATED))
+ return;
+
+ if ((priv->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
+ (priv->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 0);
+ else
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
+ (priv->op_flags & OP_LED_ON) ? 1 : 0);
+
+ ieee80211_queue_delayed_work(priv->hw,
+ &priv->ath9k_led_blink_work,
+ (priv->op_flags & OP_LED_ON) ?
+ msecs_to_jiffies(priv->led_off_duration) :
+ msecs_to_jiffies(priv->led_on_duration));
+
+ priv->led_on_duration = priv->led_on_cnt ?
+ max((ATH_LED_ON_DURATION_IDLE - priv->led_on_cnt), 25) :
+ ATH_LED_ON_DURATION_IDLE;
+ priv->led_off_duration = priv->led_off_cnt ?
+ max((ATH_LED_OFF_DURATION_IDLE - priv->led_off_cnt), 10) :
+ ATH_LED_OFF_DURATION_IDLE;
+ priv->led_on_cnt = priv->led_off_cnt = 0;
+
+ if (priv->op_flags & OP_LED_ON)
+ priv->op_flags &= ~OP_LED_ON;
+ else
+ priv->op_flags |= OP_LED_ON;
+}
+
+static void ath9k_led_brightness_work(struct work_struct *work)
+{
+ struct ath_led *led = container_of(work, struct ath_led,
+ brightness_work.work);
+ struct ath9k_htc_priv *priv = led->priv;
+
+ switch (led->brightness) {
+ case LED_OFF:
+ if (led->led_type == ATH_LED_ASSOC ||
+ led->led_type == ATH_LED_RADIO) {
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
+ (led->led_type == ATH_LED_RADIO));
+ priv->op_flags &= ~OP_LED_ASSOCIATED;
+ if (led->led_type == ATH_LED_RADIO)
+ priv->op_flags &= ~OP_LED_ON;
+ } else {
+ priv->led_off_cnt++;
+ }
+ break;
+ case LED_FULL:
+ if (led->led_type == ATH_LED_ASSOC) {
+ priv->op_flags |= OP_LED_ASSOCIATED;
+ ieee80211_queue_delayed_work(priv->hw,
+ &priv->ath9k_led_blink_work, 0);
+ } else if (led->led_type == ATH_LED_RADIO) {
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 0);
+ priv->op_flags |= OP_LED_ON;
+ } else {
+ priv->led_on_cnt++;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath9k_led_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
+ struct ath9k_htc_priv *priv = led->priv;
+
+ led->brightness = brightness;
+ if (!(priv->op_flags & OP_LED_DEINIT))
+ ieee80211_queue_delayed_work(priv->hw,
+ &led->brightness_work, 0);
+}
+
+void ath9k_led_stop_brightness(struct ath9k_htc_priv *priv)
+{
+ cancel_delayed_work_sync(&priv->radio_led.brightness_work);
+ cancel_delayed_work_sync(&priv->assoc_led.brightness_work);
+ cancel_delayed_work_sync(&priv->tx_led.brightness_work);
+ cancel_delayed_work_sync(&priv->rx_led.brightness_work);
+}
+
+static int ath9k_register_led(struct ath9k_htc_priv *priv, struct ath_led *led,
+ char *trigger)
+{
+ int ret;
+
+ led->priv = priv;
+ led->led_cdev.name = led->name;
+ led->led_cdev.default_trigger = trigger;
+ led->led_cdev.brightness_set = ath9k_led_brightness;
+
+ ret = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_cdev);
+ if (ret)
+ ath_err(ath9k_hw_common(priv->ah),
+ "Failed to register led:%s", led->name);
+ else
+ led->registered = 1;
+
+ INIT_DELAYED_WORK(&led->brightness_work, ath9k_led_brightness_work);
+
+ return ret;
+}
+
+static void ath9k_unregister_led(struct ath_led *led)
+{
+ if (led->registered) {
+ led_classdev_unregister(&led->led_cdev);
+ led->registered = 0;
+ }
+}
+
+void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
+{
+ priv->op_flags |= OP_LED_DEINIT;
+ ath9k_unregister_led(&priv->assoc_led);
+ priv->op_flags &= ~OP_LED_ASSOCIATED;
+ ath9k_unregister_led(&priv->tx_led);
+ ath9k_unregister_led(&priv->rx_led);
+ ath9k_unregister_led(&priv->radio_led);
+}
+
+void ath9k_init_leds(struct ath9k_htc_priv *priv)
+{
+ char *trigger;
+ int ret;
+
+ if (AR_SREV_9287(priv->ah))
+ priv->ah->led_pin = ATH_LED_PIN_9287;
+ else if (AR_SREV_9271(priv->ah))
+ priv->ah->led_pin = ATH_LED_PIN_9271;
+ else if (AR_DEVID_7010(priv->ah))
+ priv->ah->led_pin = ATH_LED_PIN_7010;
+ else
+ priv->ah->led_pin = ATH_LED_PIN_DEF;
+
+ /* Configure gpio 1 for output */
+ ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ /* LED off, active low */
+ ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
+
+ INIT_DELAYED_WORK(&priv->ath9k_led_blink_work, ath9k_led_blink_work);
+
+ trigger = ieee80211_get_radio_led_name(priv->hw);
+ snprintf(priv->radio_led.name, sizeof(priv->radio_led.name),
+ "ath9k-%s::radio", wiphy_name(priv->hw->wiphy));
+ ret = ath9k_register_led(priv, &priv->radio_led, trigger);
+ priv->radio_led.led_type = ATH_LED_RADIO;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_assoc_led_name(priv->hw);
+ snprintf(priv->assoc_led.name, sizeof(priv->assoc_led.name),
+ "ath9k-%s::assoc", wiphy_name(priv->hw->wiphy));
+ ret = ath9k_register_led(priv, &priv->assoc_led, trigger);
+ priv->assoc_led.led_type = ATH_LED_ASSOC;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_tx_led_name(priv->hw);
+ snprintf(priv->tx_led.name, sizeof(priv->tx_led.name),
+ "ath9k-%s::tx", wiphy_name(priv->hw->wiphy));
+ ret = ath9k_register_led(priv, &priv->tx_led, trigger);
+ priv->tx_led.led_type = ATH_LED_TX;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_rx_led_name(priv->hw);
+ snprintf(priv->rx_led.name, sizeof(priv->rx_led.name),
+ "ath9k-%s::rx", wiphy_name(priv->hw->wiphy));
+ ret = ath9k_register_led(priv, &priv->rx_led, trigger);
+ priv->rx_led.led_type = ATH_LED_RX;
+ if (ret)
+ goto fail;
+
+ priv->op_flags &= ~OP_LED_DEINIT;
+
+ return;
+
+fail:
+ cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
+ ath9k_deinit_leds(priv);
+}
+
+/*******************/
+/* Rfkill */
+/*******************/
+
+static bool ath_is_rfkill_set(struct ath9k_htc_priv *priv)
+{
+ return ath9k_hw_gpio_get(priv->ah, priv->ah->rfkill_gpio) ==
+ priv->ah->rfkill_polarity;
+}
+
+void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ bool blocked = !!ath_is_rfkill_set(priv);
+
+ wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
+}
+
+void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv)
+{
+ if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
+ wiphy_rfkill_start_polling(priv->hw->wiphy);
+}
+
+void ath9k_htc_radio_enable(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int ret;
+ u8 cmd_rsp;
+
+ if (!ah->curchan)
+ ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
+
+ /* Reset the HW */
+ ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
+ if (ret) {
+ ath_err(common,
+ "Unable to reset hardware; reset status %d (freq %u MHz)\n",
+ ret, ah->curchan->channel);
+ }
+
+ ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+ &priv->curtxpow);
+
+ /* Start RX */
+ WMI_CMD(WMI_START_RECV_CMDID);
+ ath9k_host_rx_init(priv);
+
+ /* Start TX */
+ htc_start(priv->htc);
+ spin_lock_bh(&priv->tx_lock);
+ priv->tx_queues_stop = false;
+ spin_unlock_bh(&priv->tx_lock);
+ ieee80211_wake_queues(hw);
+
+ WMI_CMD(WMI_ENABLE_INTR_CMDID);
+
+ /* Enable LED */
+ ath9k_hw_cfg_output(ah, ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ ath9k_hw_set_gpio(ah, ah->led_pin, 0);
+}
+
+void ath9k_htc_radio_disable(struct ieee80211_hw *hw)
+{
+ struct ath9k_htc_priv *priv = hw->priv;
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int ret;
+ u8 cmd_rsp;
+
+ ath9k_htc_ps_wakeup(priv);
+
+ /* Disable LED */
+ ath9k_hw_set_gpio(ah, ah->led_pin, 1);
+ ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
+
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+
+ /* Stop TX */
+ ieee80211_stop_queues(hw);
+ htc_stop(priv->htc);
+ WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
+ skb_queue_purge(&priv->tx_queue);
+
+ /* Stop RX */
+ WMI_CMD(WMI_STOP_RECV_CMDID);
+
+ /*
+ * The MIB counters have to be disabled here,
+ * since the target doesn't do it.
+ */
+ ath9k_hw_disable_mib_counters(ah);
+
+ if (!ah->curchan)
+ ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
+
+ /* Reset the HW */
+ ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
+ if (ret) {
+ ath_err(common,
+ "Unable to reset hardware; reset status %d (freq %u MHz)\n",
+ ret, ah->curchan->channel);
+ }
+
+ /* Disable the PHY */
+ ath9k_hw_phy_disable(ah);
+
+ ath9k_htc_ps_restore(priv);
+ ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
+}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 7c8a38d04561..fc67c937e172 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -142,9 +142,6 @@ static void ath9k_deinit_priv(struct ath9k_htc_priv *priv)
{
ath9k_htc_exit_debug(priv->ah);
ath9k_hw_deinit(priv->ah);
- tasklet_kill(&priv->wmi_tasklet);
- tasklet_kill(&priv->rx_tasklet);
- tasklet_kill(&priv->tx_tasklet);
kfree(priv->ah);
priv->ah = NULL;
}
@@ -181,7 +178,8 @@ static inline int ath9k_htc_connect_svc(struct ath9k_htc_priv *priv,
return htc_connect_service(priv->htc, &req, ep_id);
}
-static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid)
+static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid,
+ u32 drv_info)
{
int ret;
@@ -245,17 +243,10 @@ static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid)
* the HIF layer, shouldn't matter much.
*/
- switch(devid) {
- case 0x7010:
- case 0x7015:
- case 0x9018:
- case 0xA704:
- case 0x1200:
+ if (IS_AR7010_DEVICE(drv_info))
priv->htc->credits = 45;
- break;
- default:
+ else
priv->htc->credits = 33;
- }
ret = htc_init(priv->htc);
if (ret)
@@ -294,21 +285,49 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
(u8 *) &val, sizeof(val),
100);
if (unlikely(r)) {
- ath_print(common, ATH_DBG_WMI,
- "REGISTER READ FAILED: (0x%04x, %d)\n",
- reg_offset, r);
+ ath_dbg(common, ATH_DBG_WMI,
+ "REGISTER READ FAILED: (0x%04x, %d)\n",
+ reg_offset, r);
return -EIO;
}
return be32_to_cpu(val);
}
+static void ath9k_multi_regread(void *hw_priv, u32 *addr,
+ u32 *val, u16 count)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ __be32 tmpaddr[8];
+ __be32 tmpval[8];
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ tmpaddr[i] = cpu_to_be32(addr[i]);
+ }
+
+ ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
+ (u8 *)tmpaddr , sizeof(u32) * count,
+ (u8 *)tmpval, sizeof(u32) * count,
+ 100);
+ if (unlikely(ret)) {
+ ath_dbg(common, ATH_DBG_WMI,
+ "Multiple REGISTER READ FAILED (count: %d)\n", count);
+ }
+
+ for (i = 0; i < count; i++) {
+ val[i] = be32_to_cpu(tmpval[i]);
+ }
+}
+
static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
{
struct ath_hw *ah = (struct ath_hw *) hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
- __be32 buf[2] = {
+ const __be32 buf[2] = {
cpu_to_be32(reg_offset),
cpu_to_be32(val),
};
@@ -319,9 +338,9 @@ static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
(u8 *) &val, sizeof(val),
100);
if (unlikely(r)) {
- ath_print(common, ATH_DBG_WMI,
- "REGISTER WRITE FAILED:(0x%04x, %d)\n",
- reg_offset, r);
+ ath_dbg(common, ATH_DBG_WMI,
+ "REGISTER WRITE FAILED:(0x%04x, %d)\n",
+ reg_offset, r);
}
}
@@ -351,9 +370,9 @@ static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
(u8 *) &rsp_status, sizeof(rsp_status),
100);
if (unlikely(r)) {
- ath_print(common, ATH_DBG_WMI,
- "REGISTER WRITE FAILED, multi len: %d\n",
- priv->wmi->multi_write_idx);
+ ath_dbg(common, ATH_DBG_WMI,
+ "REGISTER WRITE FAILED, multi len: %d\n",
+ priv->wmi->multi_write_idx);
}
priv->wmi->multi_write_idx = 0;
}
@@ -401,9 +420,9 @@ static void ath9k_regwrite_flush(void *hw_priv)
(u8 *) &rsp_status, sizeof(rsp_status),
100);
if (unlikely(r)) {
- ath_print(common, ATH_DBG_WMI,
- "REGISTER WRITE FAILED, multi len: %d\n",
- priv->wmi->multi_write_idx);
+ ath_dbg(common, ATH_DBG_WMI,
+ "REGISTER WRITE FAILED, multi len: %d\n",
+ priv->wmi->multi_write_idx);
}
priv->wmi->multi_write_idx = 0;
}
@@ -413,6 +432,7 @@ static void ath9k_regwrite_flush(void *hw_priv)
static const struct ath_ops ath9k_common_ops = {
.read = ath9k_regread,
+ .multi_read = ath9k_multi_regread,
.write = ath9k_regwrite,
.enable_write_buffer = ath9k_enable_regwrite_buffer,
.write_flush = ath9k_regwrite_flush,
@@ -475,9 +495,9 @@ static void setup_ht_cap(struct ath9k_htc_priv *priv,
tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, 2);
rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, 2);
- ath_print(common, ATH_DBG_CONFIG,
- "TX streams %d, RX streams: %d\n",
- tx_streams, rx_streams);
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "TX streams %d, RX streams: %d\n",
+ tx_streams, rx_streams);
if (tx_streams != rx_streams) {
ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
@@ -501,37 +521,31 @@ static int ath9k_init_queues(struct ath9k_htc_priv *priv)
priv->beaconq = ath9k_hw_beaconq_setup(priv->ah);
if (priv->beaconq == -1) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup BEACON xmit queue\n");
+ ath_err(common, "Unable to setup BEACON xmit queue\n");
goto err;
}
priv->cabq = ath9k_htc_cabq_setup(priv);
if (priv->cabq == -1) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup CAB xmit queue\n");
+ ath_err(common, "Unable to setup CAB xmit queue\n");
goto err;
}
if (!ath9k_htc_txq_setup(priv, WME_AC_BE)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for BE traffic\n");
+ ath_err(common, "Unable to setup xmit queue for BE traffic\n");
goto err;
}
if (!ath9k_htc_txq_setup(priv, WME_AC_BK)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for BK traffic\n");
+ ath_err(common, "Unable to setup xmit queue for BK traffic\n");
goto err;
}
if (!ath9k_htc_txq_setup(priv, WME_AC_VI)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for VI traffic\n");
+ ath_err(common, "Unable to setup xmit queue for VI traffic\n");
goto err;
}
if (!ath9k_htc_txq_setup(priv, WME_AC_VO)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for VO traffic\n");
+ ath_err(common, "Unable to setup xmit queue for VO traffic\n");
goto err;
}
@@ -549,9 +563,9 @@ static void ath9k_init_crypto(struct ath9k_htc_priv *priv)
/* Get the hardware key cache size. */
common->keymax = priv->ah->caps.keycache_size;
if (common->keymax > ATH_KEYMAX) {
- ath_print(common, ATH_DBG_ANY,
- "Warning, using only %u entries in %u key cache\n",
- ATH_KEYMAX, common->keymax);
+ ath_dbg(common, ATH_DBG_ANY,
+ "Warning, using only %u entries in %u key cache\n",
+ ATH_KEYMAX, common->keymax);
common->keymax = ATH_KEYMAX;
}
@@ -627,7 +641,8 @@ static void ath9k_init_btcoex(struct ath9k_htc_priv *priv)
}
static int ath9k_init_priv(struct ath9k_htc_priv *priv,
- u16 devid, char *product)
+ u16 devid, char *product,
+ u32 drv_info)
{
struct ath_hw *ah = NULL;
struct ath_common *common;
@@ -641,6 +656,8 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
ah->hw_version.devid = devid;
ah->hw_version.subsysid = 0; /* FIXME */
+ ah->hw_version.usbdev = drv_info;
+ ah->ah_flags |= AH_USE_EEPROM;
priv->ah = ah;
common = ath9k_hw_common(ah);
@@ -656,13 +673,15 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
spin_lock_init(&priv->tx_lock);
mutex_init(&priv->mutex);
mutex_init(&priv->htc_pm_lock);
- tasklet_init(&priv->wmi_tasklet, ath9k_wmi_tasklet,
+ tasklet_init(&priv->swba_tasklet, ath9k_swba_tasklet,
(unsigned long)priv);
tasklet_init(&priv->rx_tasklet, ath9k_rx_tasklet,
(unsigned long)priv);
- tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet, (unsigned long)priv);
- INIT_DELAYED_WORK(&priv->ath9k_ani_work, ath9k_ani_work);
+ tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet,
+ (unsigned long)priv);
+ INIT_DELAYED_WORK(&priv->ani_work, ath9k_htc_ani_work);
INIT_WORK(&priv->ps_work, ath9k_ps_work);
+ INIT_WORK(&priv->fatal_work, ath9k_fatal_work);
/*
* Cache line size is used to size and align various
@@ -673,16 +692,15 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
ret = ath9k_hw_init(ah);
if (ret) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to initialize hardware; "
- "initialization status: %d\n", ret);
+ ath_err(common,
+ "Unable to initialize hardware; initialization status: %d\n",
+ ret);
goto err_hw;
}
ret = ath9k_htc_init_debug(ah);
if (ret) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to create debugfs files\n");
+ ath_err(common, "Unable to create debugfs files\n");
goto err_debug;
}
@@ -762,16 +780,17 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
}
static int ath9k_init_device(struct ath9k_htc_priv *priv,
- u16 devid, char *product)
+ u16 devid, char *product, u32 drv_info)
{
struct ieee80211_hw *hw = priv->hw;
struct ath_common *common;
struct ath_hw *ah;
int error = 0;
struct ath_regulatory *reg;
+ char hw_name[64];
/* Bring up device */
- error = ath9k_init_priv(priv, devid, product);
+ error = ath9k_init_priv(priv, devid, product, drv_info);
if (error != 0)
goto err_init;
@@ -809,6 +828,22 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
goto err_world;
}
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, "
+ "BE:%d, BK:%d, VI:%d, VO:%d\n",
+ priv->wmi_cmd_ep,
+ priv->beacon_ep,
+ priv->cab_ep,
+ priv->uapsd_ep,
+ priv->mgmt_ep,
+ priv->data_be_ep,
+ priv->data_bk_ep,
+ priv->data_vi_ep,
+ priv->data_vo_ep);
+
+ ath9k_hw_name(priv->ah, hw_name, sizeof(hw_name));
+ wiphy_info(hw->wiphy, "%s\n", hw_name);
+
ath9k_init_leds(priv);
ath9k_start_rfkill_poll(priv);
@@ -829,7 +864,7 @@ err_init:
}
int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
- u16 devid, char *product)
+ u16 devid, char *product, u32 drv_info)
{
struct ieee80211_hw *hw;
struct ath9k_htc_priv *priv;
@@ -856,14 +891,11 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
goto err_free;
}
- ret = ath9k_init_htc_services(priv, devid);
+ ret = ath9k_init_htc_services(priv, devid, drv_info);
if (ret)
goto err_init;
- /* The device may have been unplugged earlier. */
- priv->op_flags &= ~OP_UNPLUGGED;
-
- ret = ath9k_init_device(priv, devid, product);
+ ret = ath9k_init_device(priv, devid, product, drv_info);
if (ret)
goto err_init;
@@ -882,7 +914,7 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
/* Check if the device has been yanked out. */
if (hotunplug)
- htc_handle->drv_priv->op_flags |= OP_UNPLUGGED;
+ htc_handle->drv_priv->ah->ah_flags |= AH_UNPLUGGED;
ath9k_deinit_device(htc_handle->drv_priv);
ath9k_deinit_wmi(htc_handle->drv_priv);
@@ -891,16 +923,23 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
}
#ifdef CONFIG_PM
+
+void ath9k_htc_suspend(struct htc_target *htc_handle)
+{
+ ath9k_htc_setpower(htc_handle->drv_priv, ATH9K_PM_FULL_SLEEP);
+}
+
int ath9k_htc_resume(struct htc_target *htc_handle)
{
+ struct ath9k_htc_priv *priv = htc_handle->drv_priv;
int ret;
- ret = ath9k_htc_wait_for_target(htc_handle->drv_priv);
+ ret = ath9k_htc_wait_for_target(priv);
if (ret)
return ret;
- ret = ath9k_init_htc_services(htc_handle->drv_priv,
- htc_handle->drv_priv->ah->hw_version.devid);
+ ret = ath9k_init_htc_services(priv, priv->ah->hw_version.devid,
+ priv->ah->hw_version.usbdev);
return ret;
}
#endif
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 9a3be8da755d..db8c0c044e9e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -24,17 +24,6 @@ static struct dentry *ath9k_debugfs_root;
/* Utilities */
/*************/
-static void ath_update_txpow(struct ath9k_htc_priv *priv)
-{
- struct ath_hw *ah = priv->ah;
-
- if (priv->curtxpow != priv->txpowlimit) {
- ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit);
- /* read back in case value is clamped */
- priv->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
- }
-}
-
/* HACK Alert: Use 11NG for 2.4, use 11NA for 5 */
static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
struct ath9k_channel *ichan)
@@ -63,8 +52,8 @@ static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
return mode;
}
-static bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
- enum ath9k_power_mode mode)
+bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
+ enum ath9k_power_mode mode)
{
bool ret;
@@ -116,6 +105,130 @@ void ath9k_ps_work(struct work_struct *work)
ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
}
+static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ath9k_htc_priv *priv = data;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+
+ if ((vif->type == NL80211_IFTYPE_AP) && bss_conf->enable_beacon)
+ priv->reconfig_beacon = true;
+
+ if (bss_conf->assoc) {
+ priv->rearm_ani = true;
+ priv->reconfig_beacon = true;
+ }
+}
+
+static void ath9k_htc_vif_reconfig(struct ath9k_htc_priv *priv)
+{
+ priv->rearm_ani = false;
+ priv->reconfig_beacon = false;
+
+ ieee80211_iterate_active_interfaces_atomic(priv->hw,
+ ath9k_htc_vif_iter, priv);
+ if (priv->rearm_ani)
+ ath9k_htc_start_ani(priv);
+
+ if (priv->reconfig_beacon) {
+ ath9k_htc_ps_wakeup(priv);
+ ath9k_htc_beacon_reconfig(priv);
+ ath9k_htc_ps_restore(priv);
+ }
+}
+
+static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ath9k_vif_iter_data *iter_data = data;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
+}
+
+static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_vif_iter_data iter_data;
+
+ /*
+ * Use the hardware MAC address as reference, the hardware uses it
+ * together with the BSSID mask when matching addresses.
+ */
+ iter_data.hw_macaddr = common->macaddr;
+ memset(&iter_data.mask, 0xff, ETH_ALEN);
+
+ if (vif)
+ ath9k_htc_bssid_iter(&iter_data, vif->addr, vif);
+
+ /* Get list of all active MAC addresses */
+ ieee80211_iterate_active_interfaces_atomic(priv->hw, ath9k_htc_bssid_iter,
+ &iter_data);
+
+ memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
+ ath_hw_setbssidmask(common);
+}
+
+static void ath9k_htc_set_opmode(struct ath9k_htc_priv *priv)
+{
+ if (priv->num_ibss_vif)
+ priv->ah->opmode = NL80211_IFTYPE_ADHOC;
+ else if (priv->num_ap_vif)
+ priv->ah->opmode = NL80211_IFTYPE_AP;
+ else
+ priv->ah->opmode = NL80211_IFTYPE_STATION;
+
+ ath9k_hw_setopmode(priv->ah);
+}
+
+void ath9k_htc_reset(struct ath9k_htc_priv *priv)
+{
+ struct ath_hw *ah = priv->ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_channel *channel = priv->hw->conf.channel;
+ struct ath9k_hw_cal_data *caldata = NULL;
+ enum htc_phymode mode;
+ __be16 htc_mode;
+ u8 cmd_rsp;
+ int ret;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+
+ ath9k_htc_stop_ani(priv);
+ ieee80211_stop_queues(priv->hw);
+ htc_stop(priv->htc);
+ WMI_CMD(WMI_DISABLE_INTR_CMDID);
+ WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
+ WMI_CMD(WMI_STOP_RECV_CMDID);
+
+ caldata = &priv->caldata;
+ ret = ath9k_hw_reset(ah, ah->curchan, caldata, false);
+ if (ret) {
+ ath_err(common,
+ "Unable to reset device (%u Mhz) reset status %d\n",
+ channel->center_freq, ret);
+ }
+
+ ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+ &priv->curtxpow);
+
+ WMI_CMD(WMI_START_RECV_CMDID);
+ ath9k_host_rx_init(priv);
+
+ mode = ath9k_htc_get_curmode(priv, ah->curchan);
+ htc_mode = cpu_to_be16(mode);
+ WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
+
+ WMI_CMD(WMI_ENABLE_INTR_CMDID);
+ htc_start(priv->htc);
+ ath9k_htc_vif_reconfig(priv);
+ ieee80211_wake_queues(priv->hw);
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
struct ieee80211_hw *hw,
struct ath9k_channel *hchan)
@@ -123,9 +236,9 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_conf *conf = &common->hw->conf;
- bool fastcc = true;
+ bool fastcc;
struct ieee80211_channel *channel = hw->conf.channel;
- struct ath9k_hw_cal_data *caldata;
+ struct ath9k_hw_cal_data *caldata = NULL;
enum htc_phymode mode;
__be16 htc_mode;
u8 cmd_rsp;
@@ -134,8 +247,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
if (priv->op_flags & OP_INVALID)
return -EIO;
- if (priv->op_flags & OP_FULL_RESET)
- fastcc = false;
+ fastcc = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
ath9k_htc_ps_wakeup(priv);
htc_stop(priv->htc);
@@ -143,22 +255,24 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
WMI_CMD(WMI_STOP_RECV_CMDID);
- ath_print(common, ATH_DBG_CONFIG,
- "(%u MHz) -> (%u MHz), HT: %d, HT40: %d fastcc: %d\n",
- priv->ah->curchan->channel,
- channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf),
- fastcc);
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "(%u MHz) -> (%u MHz), HT: %d, HT40: %d fastcc: %d\n",
+ priv->ah->curchan->channel,
+ channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf),
+ fastcc);
- caldata = &priv->caldata[channel->hw_value];
+ if (!fastcc)
+ caldata = &priv->caldata;
ret = ath9k_hw_reset(ah, hchan, caldata, fastcc);
if (ret) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to reset channel (%u Mhz) "
- "reset status %d\n", channel->center_freq, ret);
+ ath_err(common,
+ "Unable to reset channel (%u Mhz) reset status %d\n",
+ channel->center_freq, ret);
goto err;
}
- ath_update_txpow(priv);
+ ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+ &priv->curtxpow);
WMI_CMD(WMI_START_RECV_CMDID);
if (ret)
@@ -178,51 +292,151 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
htc_start(priv->htc);
- priv->op_flags &= ~OP_FULL_RESET;
+ if (!(priv->op_flags & OP_SCANNING) &&
+ !(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+ ath9k_htc_vif_reconfig(priv);
+
err:
ath9k_htc_ps_restore(priv);
return ret;
}
-static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
+/*
+ * Monitor mode handling is a tad complicated because the firmware requires
+ * an interface to be created exclusively, while mac80211 doesn't associate
+ * an interface with the mode.
+ *
+ * So, for now, only one monitor interface can be configured.
+ */
+static void __ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ath9k_htc_target_vif hvif;
int ret = 0;
u8 cmd_rsp;
- if (priv->nvifs > 0)
- return -ENOBUFS;
+ memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
+ memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
+ hvif.index = priv->mon_vif_idx;
+ WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
+ priv->nvifs--;
+ priv->vif_slot &= ~(1 << priv->mon_vif_idx);
+}
+
+static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
+{
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_vif hvif;
+ struct ath9k_htc_target_sta tsta;
+ int ret = 0, sta_idx;
+ u8 cmd_rsp;
+
+ if ((priv->nvifs >= ATH9K_HTC_MAX_VIF) ||
+ (priv->nstations >= ATH9K_HTC_MAX_STA)) {
+ ret = -ENOBUFS;
+ goto err_vif;
+ }
+
+ sta_idx = ffz(priv->sta_slot);
+ if ((sta_idx < 0) || (sta_idx > ATH9K_HTC_MAX_STA)) {
+ ret = -ENOBUFS;
+ goto err_vif;
+ }
+ /*
+ * Add an interface.
+ */
memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
hvif.opmode = cpu_to_be32(HTC_M_MONITOR);
- priv->ah->opmode = NL80211_IFTYPE_MONITOR;
- hvif.index = priv->nvifs;
+ hvif.index = ffz(priv->vif_slot);
WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
if (ret)
- return ret;
+ goto err_vif;
+
+ /*
+ * Assign the monitor interface index as a special case here.
+ * This is needed when the interface is brought down.
+ */
+ priv->mon_vif_idx = hvif.index;
+ priv->vif_slot |= (1 << hvif.index);
+
+ /*
+ * Set the hardware mode to monitor only if there are no
+ * other interfaces.
+ */
+ if (!priv->nvifs)
+ priv->ah->opmode = NL80211_IFTYPE_MONITOR;
priv->nvifs++;
+
+ /*
+ * Associate a station with the interface for packet injection.
+ */
+ memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta));
+
+ memcpy(&tsta.macaddr, common->macaddr, ETH_ALEN);
+
+ tsta.is_vif_sta = 1;
+ tsta.sta_index = sta_idx;
+ tsta.vif_index = hvif.index;
+ tsta.maxampdu = 0xffff;
+
+ WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
+ if (ret) {
+ ath_err(common, "Unable to add station entry for monitor mode\n");
+ goto err_sta;
+ }
+
+ priv->sta_slot |= (1 << sta_idx);
+ priv->nstations++;
+ priv->vif_sta_pos[priv->mon_vif_idx] = sta_idx;
+ priv->ah->is_monitoring = true;
+
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Attached a monitor interface at idx: %d, sta idx: %d\n",
+ priv->mon_vif_idx, sta_idx);
+
return 0;
+
+err_sta:
+ /*
+ * Remove the interface from the target.
+ */
+ __ath9k_htc_remove_monitor_interface(priv);
+err_vif:
+ ath_dbg(common, ATH_DBG_FATAL, "Unable to attach a monitor interface\n");
+
+ return ret;
}
static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
- struct ath9k_htc_target_vif hvif;
int ret = 0;
- u8 cmd_rsp;
+ u8 cmd_rsp, sta_idx;
- memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
- memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
- hvif.index = 0; /* Should do for now */
- WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
- priv->nvifs--;
+ __ath9k_htc_remove_monitor_interface(priv);
- return ret;
+ sta_idx = priv->vif_sta_pos[priv->mon_vif_idx];
+
+ WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx);
+ if (ret) {
+ ath_err(common, "Unable to remove station entry for monitor mode\n");
+ return ret;
+ }
+
+ priv->sta_slot &= ~(1 << sta_idx);
+ priv->nstations--;
+ priv->ah->is_monitoring = false;
+
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Removed a monitor interface at idx: %d, sta idx: %d\n",
+ priv->mon_vif_idx, sta_idx);
+
+ return 0;
}
static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
@@ -233,12 +447,16 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
struct ath9k_htc_target_sta tsta;
struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
struct ath9k_htc_sta *ista;
- int ret;
+ int ret, sta_idx;
u8 cmd_rsp;
if (priv->nstations >= ATH9K_HTC_MAX_STA)
return -ENOBUFS;
+ sta_idx = ffz(priv->sta_slot);
+ if ((sta_idx < 0) || (sta_idx > ATH9K_HTC_MAX_STA))
+ return -ENOBUFS;
+
memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta));
if (sta) {
@@ -248,13 +466,13 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
tsta.associd = common->curaid;
tsta.is_vif_sta = 0;
tsta.valid = true;
- ista->index = priv->nstations;
+ ista->index = sta_idx;
} else {
memcpy(&tsta.macaddr, vif->addr, ETH_ALEN);
tsta.is_vif_sta = 1;
}
- tsta.sta_index = priv->nstations;
+ tsta.sta_index = sta_idx;
tsta.vif_index = avp->index;
tsta.maxampdu = 0xffff;
if (sta && sta->ht_cap.ht_supported)
@@ -263,17 +481,27 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
if (ret) {
if (sta)
- ath_print(common, ATH_DBG_FATAL,
- "Unable to add station entry for: %pM\n", sta->addr);
+ ath_err(common,
+ "Unable to add station entry for: %pM\n",
+ sta->addr);
return ret;
}
- if (sta)
- ath_print(common, ATH_DBG_CONFIG,
- "Added a station entry for: %pM (idx: %d)\n",
- sta->addr, tsta.sta_index);
+ if (sta) {
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Added a station entry for: %pM (idx: %d)\n",
+ sta->addr, tsta.sta_index);
+ } else {
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Added a station entry for VIF %d (idx: %d)\n",
+ avp->index, tsta.sta_index);
+ }
+ priv->sta_slot |= (1 << sta_idx);
priv->nstations++;
+ if (!sta)
+ priv->vif_sta_pos[avp->index] = sta_idx;
+
return 0;
}
@@ -282,6 +510,7 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
struct ieee80211_sta *sta)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
struct ath9k_htc_sta *ista;
int ret;
u8 cmd_rsp, sta_idx;
@@ -290,28 +519,35 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
ista = (struct ath9k_htc_sta *) sta->drv_priv;
sta_idx = ista->index;
} else {
- sta_idx = 0;
+ sta_idx = priv->vif_sta_pos[avp->index];
}
WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx);
if (ret) {
if (sta)
- ath_print(common, ATH_DBG_FATAL,
- "Unable to remove station entry for: %pM\n",
- sta->addr);
+ ath_err(common,
+ "Unable to remove station entry for: %pM\n",
+ sta->addr);
return ret;
}
- if (sta)
- ath_print(common, ATH_DBG_CONFIG,
- "Removed a station entry for: %pM (idx: %d)\n",
- sta->addr, sta_idx);
+ if (sta) {
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Removed a station entry for: %pM (idx: %d)\n",
+ sta->addr, sta_idx);
+ } else {
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Removed a station entry for VIF %d (idx: %d)\n",
+ avp->index, sta_idx);
+ }
+ priv->sta_slot &= ~(1 << sta_idx);
priv->nstations--;
+
return 0;
}
-static int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
+int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
{
struct ath9k_htc_cap_target tcap;
int ret;
@@ -390,8 +626,8 @@ static int ath9k_htc_send_rate_cmd(struct ath9k_htc_priv *priv,
WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, trate);
if (ret) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to initialize Rate information on target\n");
+ ath_err(common,
+ "Unable to initialize Rate information on target\n");
}
return ret;
@@ -408,9 +644,9 @@ static void ath9k_htc_init_rate(struct ath9k_htc_priv *priv,
ath9k_htc_setup_rate(priv, sta, &trate);
ret = ath9k_htc_send_rate_cmd(priv, &trate);
if (!ret)
- ath_print(common, ATH_DBG_CONFIG,
- "Updated target sta: %pM, rate caps: 0x%X\n",
- sta->addr, be32_to_cpu(trate.capflags));
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Updated target sta: %pM, rate caps: 0x%X\n",
+ sta->addr, be32_to_cpu(trate.capflags));
}
static void ath9k_htc_update_rate(struct ath9k_htc_priv *priv,
@@ -435,9 +671,9 @@ static void ath9k_htc_update_rate(struct ath9k_htc_priv *priv,
ret = ath9k_htc_send_rate_cmd(priv, &trate);
if (!ret)
- ath_print(common, ATH_DBG_CONFIG,
- "Updated target sta: %pM, rate caps: 0x%X\n",
- bss_conf->bssid, be32_to_cpu(trate.capflags));
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Updated target sta: %pM, rate caps: 0x%X\n",
+ bss_conf->bssid, be32_to_cpu(trate.capflags));
}
static int ath9k_htc_tx_aggr_oper(struct ath9k_htc_priv *priv,
@@ -464,14 +700,14 @@ static int ath9k_htc_tx_aggr_oper(struct ath9k_htc_priv *priv,
WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr);
if (ret)
- ath_print(common, ATH_DBG_CONFIG,
- "Unable to %s TX aggregation for (%pM, %d)\n",
- (aggr.aggr_enable) ? "start" : "stop", sta->addr, tid);
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Unable to %s TX aggregation for (%pM, %d)\n",
+ (aggr.aggr_enable) ? "start" : "stop", sta->addr, tid);
else
- ath_print(common, ATH_DBG_CONFIG,
- "%s TX aggregation for (%pM, %d)\n",
- (aggr.aggr_enable) ? "Starting" : "Stopping",
- sta->addr, tid);
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "%s TX aggregation for (%pM, %d)\n",
+ (aggr.aggr_enable) ? "Starting" : "Stopping",
+ sta->addr, tid);
spin_lock_bh(&priv->tx_lock);
ista->tid_state[tid] = (aggr.aggr_enable && !ret) ? AGGR_START : AGGR_STOP;
@@ -689,7 +925,7 @@ void ath9k_htc_debug_remove_root(void)
/* ANI */
/*******/
-static void ath_start_ani(struct ath9k_htc_priv *priv)
+void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
unsigned long timestamp = jiffies_to_msecs(jiffies);
@@ -698,15 +934,22 @@ static void ath_start_ani(struct ath9k_htc_priv *priv)
common->ani.shortcal_timer = timestamp;
common->ani.checkani_timer = timestamp;
- ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work,
+ priv->op_flags |= OP_ANI_RUNNING;
+
+ ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
}
-void ath9k_ani_work(struct work_struct *work)
+void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv)
+{
+ cancel_delayed_work_sync(&priv->ani_work);
+ priv->op_flags &= ~OP_ANI_RUNNING;
+}
+
+void ath9k_htc_ani_work(struct work_struct *work)
{
struct ath9k_htc_priv *priv =
- container_of(work, struct ath9k_htc_priv,
- ath9k_ani_work.work);
+ container_of(work, struct ath9k_htc_priv, ani_work.work);
struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(ah);
bool longcal = false;
@@ -715,7 +958,8 @@ void ath9k_ani_work(struct work_struct *work)
unsigned int timestamp = jiffies_to_msecs(jiffies);
u32 cal_interval, short_cal_interval;
- short_cal_interval = ATH_STA_SHORT_CALINTERVAL;
+ short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
+ ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
/* Only calibrate if awake */
if (ah->power_mode != ATH9K_PM_AWAKE)
@@ -724,7 +968,7 @@ void ath9k_ani_work(struct work_struct *work)
/* Long calibration runs independently of short calibration. */
if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
longcal = true;
- ath_print(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
+ ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
common->ani.longcal_timer = timestamp;
}
@@ -733,8 +977,8 @@ void ath9k_ani_work(struct work_struct *work)
if ((timestamp - common->ani.shortcal_timer) >=
short_cal_interval) {
shortcal = true;
- ath_print(common, ATH_DBG_ANI,
- "shortcal @%lu\n", jiffies);
+ ath_dbg(common, ATH_DBG_ANI,
+ "shortcal @%lu\n", jiffies);
common->ani.shortcal_timer = timestamp;
common->ani.resetcal_timer = timestamp;
}
@@ -784,326 +1028,15 @@ set_timer:
if (!common->ani.caldone)
cal_interval = min(cal_interval, (u32)short_cal_interval);
- ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work,
+ ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
msecs_to_jiffies(cal_interval));
}
-/*******/
-/* LED */
-/*******/
-
-static void ath9k_led_blink_work(struct work_struct *work)
-{
- struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
- ath9k_led_blink_work.work);
-
- if (!(priv->op_flags & OP_LED_ASSOCIATED))
- return;
-
- if ((priv->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
- (priv->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
- ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 0);
- else
- ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
- (priv->op_flags & OP_LED_ON) ? 1 : 0);
-
- ieee80211_queue_delayed_work(priv->hw,
- &priv->ath9k_led_blink_work,
- (priv->op_flags & OP_LED_ON) ?
- msecs_to_jiffies(priv->led_off_duration) :
- msecs_to_jiffies(priv->led_on_duration));
-
- priv->led_on_duration = priv->led_on_cnt ?
- max((ATH_LED_ON_DURATION_IDLE - priv->led_on_cnt), 25) :
- ATH_LED_ON_DURATION_IDLE;
- priv->led_off_duration = priv->led_off_cnt ?
- max((ATH_LED_OFF_DURATION_IDLE - priv->led_off_cnt), 10) :
- ATH_LED_OFF_DURATION_IDLE;
- priv->led_on_cnt = priv->led_off_cnt = 0;
-
- if (priv->op_flags & OP_LED_ON)
- priv->op_flags &= ~OP_LED_ON;
- else
- priv->op_flags |= OP_LED_ON;
-}
-
-static void ath9k_led_brightness_work(struct work_struct *work)
-{
- struct ath_led *led = container_of(work, struct ath_led,
- brightness_work.work);
- struct ath9k_htc_priv *priv = led->priv;
-
- switch (led->brightness) {
- case LED_OFF:
- if (led->led_type == ATH_LED_ASSOC ||
- led->led_type == ATH_LED_RADIO) {
- ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
- (led->led_type == ATH_LED_RADIO));
- priv->op_flags &= ~OP_LED_ASSOCIATED;
- if (led->led_type == ATH_LED_RADIO)
- priv->op_flags &= ~OP_LED_ON;
- } else {
- priv->led_off_cnt++;
- }
- break;
- case LED_FULL:
- if (led->led_type == ATH_LED_ASSOC) {
- priv->op_flags |= OP_LED_ASSOCIATED;
- ieee80211_queue_delayed_work(priv->hw,
- &priv->ath9k_led_blink_work, 0);
- } else if (led->led_type == ATH_LED_RADIO) {
- ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 0);
- priv->op_flags |= OP_LED_ON;
- } else {
- priv->led_on_cnt++;
- }
- break;
- default:
- break;
- }
-}
-
-static void ath9k_led_brightness(struct led_classdev *led_cdev,
- enum led_brightness brightness)
-{
- struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
- struct ath9k_htc_priv *priv = led->priv;
-
- led->brightness = brightness;
- if (!(priv->op_flags & OP_LED_DEINIT))
- ieee80211_queue_delayed_work(priv->hw,
- &led->brightness_work, 0);
-}
-
-static void ath9k_led_stop_brightness(struct ath9k_htc_priv *priv)
-{
- cancel_delayed_work_sync(&priv->radio_led.brightness_work);
- cancel_delayed_work_sync(&priv->assoc_led.brightness_work);
- cancel_delayed_work_sync(&priv->tx_led.brightness_work);
- cancel_delayed_work_sync(&priv->rx_led.brightness_work);
-}
-
-static int ath9k_register_led(struct ath9k_htc_priv *priv, struct ath_led *led,
- char *trigger)
-{
- int ret;
-
- led->priv = priv;
- led->led_cdev.name = led->name;
- led->led_cdev.default_trigger = trigger;
- led->led_cdev.brightness_set = ath9k_led_brightness;
-
- ret = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_cdev);
- if (ret)
- ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
- "Failed to register led:%s", led->name);
- else
- led->registered = 1;
-
- INIT_DELAYED_WORK(&led->brightness_work, ath9k_led_brightness_work);
-
- return ret;
-}
-
-static void ath9k_unregister_led(struct ath_led *led)
-{
- if (led->registered) {
- led_classdev_unregister(&led->led_cdev);
- led->registered = 0;
- }
-}
-
-void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
-{
- priv->op_flags |= OP_LED_DEINIT;
- ath9k_unregister_led(&priv->assoc_led);
- priv->op_flags &= ~OP_LED_ASSOCIATED;
- ath9k_unregister_led(&priv->tx_led);
- ath9k_unregister_led(&priv->rx_led);
- ath9k_unregister_led(&priv->radio_led);
-}
-
-void ath9k_init_leds(struct ath9k_htc_priv *priv)
-{
- char *trigger;
- int ret;
-
- if (AR_SREV_9287(priv->ah))
- priv->ah->led_pin = ATH_LED_PIN_9287;
- else if (AR_SREV_9271(priv->ah))
- priv->ah->led_pin = ATH_LED_PIN_9271;
- else if (AR_DEVID_7010(priv->ah))
- priv->ah->led_pin = ATH_LED_PIN_7010;
- else
- priv->ah->led_pin = ATH_LED_PIN_DEF;
-
- /* Configure gpio 1 for output */
- ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- /* LED off, active low */
- ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
-
- INIT_DELAYED_WORK(&priv->ath9k_led_blink_work, ath9k_led_blink_work);
-
- trigger = ieee80211_get_radio_led_name(priv->hw);
- snprintf(priv->radio_led.name, sizeof(priv->radio_led.name),
- "ath9k-%s::radio", wiphy_name(priv->hw->wiphy));
- ret = ath9k_register_led(priv, &priv->radio_led, trigger);
- priv->radio_led.led_type = ATH_LED_RADIO;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_assoc_led_name(priv->hw);
- snprintf(priv->assoc_led.name, sizeof(priv->assoc_led.name),
- "ath9k-%s::assoc", wiphy_name(priv->hw->wiphy));
- ret = ath9k_register_led(priv, &priv->assoc_led, trigger);
- priv->assoc_led.led_type = ATH_LED_ASSOC;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_tx_led_name(priv->hw);
- snprintf(priv->tx_led.name, sizeof(priv->tx_led.name),
- "ath9k-%s::tx", wiphy_name(priv->hw->wiphy));
- ret = ath9k_register_led(priv, &priv->tx_led, trigger);
- priv->tx_led.led_type = ATH_LED_TX;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_rx_led_name(priv->hw);
- snprintf(priv->rx_led.name, sizeof(priv->rx_led.name),
- "ath9k-%s::rx", wiphy_name(priv->hw->wiphy));
- ret = ath9k_register_led(priv, &priv->rx_led, trigger);
- priv->rx_led.led_type = ATH_LED_RX;
- if (ret)
- goto fail;
-
- priv->op_flags &= ~OP_LED_DEINIT;
-
- return;
-
-fail:
- cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
- ath9k_deinit_leds(priv);
-}
-
-/*******************/
-/* Rfkill */
-/*******************/
-
-static bool ath_is_rfkill_set(struct ath9k_htc_priv *priv)
-{
- return ath9k_hw_gpio_get(priv->ah, priv->ah->rfkill_gpio) ==
- priv->ah->rfkill_polarity;
-}
-
-static void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw)
-{
- struct ath9k_htc_priv *priv = hw->priv;
- bool blocked = !!ath_is_rfkill_set(priv);
-
- wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
-}
-
-void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv)
-{
- if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
- wiphy_rfkill_start_polling(priv->hw->wiphy);
-}
-
-static void ath9k_htc_radio_enable(struct ieee80211_hw *hw)
-{
- struct ath9k_htc_priv *priv = hw->priv;
- struct ath_hw *ah = priv->ah;
- struct ath_common *common = ath9k_hw_common(ah);
- int ret;
- u8 cmd_rsp;
-
- if (!ah->curchan)
- ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
-
- /* Reset the HW */
- ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
- if (ret) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to reset hardware; reset status %d "
- "(freq %u MHz)\n", ret, ah->curchan->channel);
- }
-
- ath_update_txpow(priv);
-
- /* Start RX */
- WMI_CMD(WMI_START_RECV_CMDID);
- ath9k_host_rx_init(priv);
-
- /* Start TX */
- htc_start(priv->htc);
- spin_lock_bh(&priv->tx_lock);
- priv->tx_queues_stop = false;
- spin_unlock_bh(&priv->tx_lock);
- ieee80211_wake_queues(hw);
-
- WMI_CMD(WMI_ENABLE_INTR_CMDID);
-
- /* Enable LED */
- ath9k_hw_cfg_output(ah, ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- ath9k_hw_set_gpio(ah, ah->led_pin, 0);
-}
-
-static void ath9k_htc_radio_disable(struct ieee80211_hw *hw)
-{
- struct ath9k_htc_priv *priv = hw->priv;
- struct ath_hw *ah = priv->ah;
- struct ath_common *common = ath9k_hw_common(ah);
- int ret;
- u8 cmd_rsp;
-
- ath9k_htc_ps_wakeup(priv);
-
- /* Disable LED */
- ath9k_hw_set_gpio(ah, ah->led_pin, 1);
- ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
-
- WMI_CMD(WMI_DISABLE_INTR_CMDID);
-
- /* Stop TX */
- ieee80211_stop_queues(hw);
- htc_stop(priv->htc);
- WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
- skb_queue_purge(&priv->tx_queue);
-
- /* Stop RX */
- WMI_CMD(WMI_STOP_RECV_CMDID);
-
- /*
- * The MIB counters have to be disabled here,
- * since the target doesn't do it.
- */
- ath9k_hw_disable_mib_counters(ah);
-
- if (!ah->curchan)
- ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
-
- /* Reset the HW */
- ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
- if (ret) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to reset hardware; reset status %d "
- "(freq %u MHz)\n", ret, ah->curchan->channel);
- }
-
- /* Disable the PHY */
- ath9k_hw_phy_disable(ah);
-
- ath9k_htc_ps_restore(priv);
- ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
-}
-
/**********************/
/* mac80211 Callbacks */
/**********************/
-static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
struct ath9k_htc_priv *priv = hw->priv;
@@ -1116,7 +1049,7 @@ static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
padsize = padpos & 3;
if (padsize && skb->len > padpos) {
if (skb_headroom(skb) < padsize)
- return -1;
+ goto fail_tx;
skb_push(skb, padsize);
memmove(skb->data, skb->data + padsize, padpos);
}
@@ -1124,24 +1057,23 @@ static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
ret = ath9k_htc_tx_start(priv, skb);
if (ret != 0) {
if (ret == -ENOMEM) {
- ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
- "Stopping TX queues\n");
+ ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+ "Stopping TX queues\n");
ieee80211_stop_queues(hw);
spin_lock_bh(&priv->tx_lock);
priv->tx_queues_stop = true;
spin_unlock_bh(&priv->tx_lock);
} else {
- ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
- "Tx failed");
+ ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+ "Tx failed\n");
}
goto fail_tx;
}
- return 0;
+ return;
fail_tx:
dev_kfree_skb_any(skb);
- return 0;
}
static int ath9k_htc_start(struct ieee80211_hw *hw)
@@ -1158,9 +1090,9 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
- ath_print(common, ATH_DBG_CONFIG,
- "Starting driver with initial channel: %d MHz\n",
- curchan->center_freq);
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Starting driver with initial channel: %d MHz\n",
+ curchan->center_freq);
/* Ensure that HW is awake before flushing RX */
ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
@@ -1169,20 +1101,18 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
/* setup initial channel */
init_channel = ath9k_cmn_get_curchannel(hw, ah);
- /* Reset SERDES registers */
- ath9k_hw_configpcipowersave(ah, 0, 0);
-
ath9k_hw_htc_resetinit(ah);
ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
if (ret) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to reset hardware; reset status %d "
- "(freq %u MHz)\n", ret, curchan->center_freq);
+ ath_err(common,
+ "Unable to reset hardware; reset status %d (freq %u MHz)\n",
+ ret, curchan->center_freq);
mutex_unlock(&priv->mutex);
return ret;
}
- ath_update_txpow(priv);
+ ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+ &priv->curtxpow);
mode = ath9k_htc_get_curmode(priv, init_channel);
htc_mode = cpu_to_be16(mode);
@@ -1192,6 +1122,11 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
ath9k_host_rx_init(priv);
+ ret = ath9k_htc_update_cap_target(priv);
+ if (ret)
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Failed to update capability in target\n");
+
priv->op_flags &= ~OP_INVALID;
htc_start(priv->htc);
@@ -1223,32 +1158,33 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
if (priv->op_flags & OP_INVALID) {
- ath_print(common, ATH_DBG_ANY, "Device not present\n");
+ ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
mutex_unlock(&priv->mutex);
return;
}
- /* Cancel all the running timers/work .. */
- cancel_work_sync(&priv->ps_work);
- cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
- ath9k_led_stop_brightness(priv);
-
ath9k_htc_ps_wakeup(priv);
htc_stop(priv->htc);
WMI_CMD(WMI_DISABLE_INTR_CMDID);
WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
WMI_CMD(WMI_STOP_RECV_CMDID);
+
+ tasklet_kill(&priv->swba_tasklet);
+ tasklet_kill(&priv->rx_tasklet);
+ tasklet_kill(&priv->tx_tasklet);
+
skb_queue_purge(&priv->tx_queue);
- /* Remove monitor interface here */
- if (ah->opmode == NL80211_IFTYPE_MONITOR) {
- if (ath9k_htc_remove_monitor_interface(priv))
- ath_print(common, ATH_DBG_FATAL,
- "Unable to remove monitor interface\n");
- else
- ath_print(common, ATH_DBG_CONFIG,
- "Monitor interface removed\n");
- }
+ mutex_unlock(&priv->mutex);
+
+ /* Cancel all the running timers/work .. */
+ cancel_work_sync(&priv->fatal_work);
+ cancel_work_sync(&priv->ps_work);
+ cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
+ ath9k_htc_stop_ani(priv);
+ ath9k_led_stop_brightness(priv);
+
+ mutex_lock(&priv->mutex);
if (ah->btcoex_hw.enabled) {
ath9k_hw_btcoex_disable(ah);
@@ -1256,15 +1192,18 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
ath_htc_cancel_btcoex_work(priv);
}
+ /* Remove a monitor interface if it's present. */
+ if (priv->ah->is_monitoring)
+ ath9k_htc_remove_monitor_interface(priv);
+
ath9k_hw_phy_disable(ah);
ath9k_hw_disable(ah);
- ath9k_hw_configpcipowersave(ah, 1, 1);
ath9k_htc_ps_restore(priv);
ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
priv->op_flags |= OP_INVALID;
- ath_print(common, ATH_DBG_CONFIG, "Driver halt\n");
+ ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
mutex_unlock(&priv->mutex);
}
@@ -1280,10 +1219,24 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
- /* Only one interface for now */
- if (priv->nvifs > 0) {
- ret = -ENOBUFS;
- goto out;
+ if (priv->nvifs >= ATH9K_HTC_MAX_VIF) {
+ mutex_unlock(&priv->mutex);
+ return -ENOBUFS;
+ }
+
+ if (priv->num_ibss_vif ||
+ (priv->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
+ ath_err(common, "IBSS coexistence with other modes is not allowed\n");
+ mutex_unlock(&priv->mutex);
+ return -ENOBUFS;
+ }
+
+ if (((vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_ADHOC)) &&
+ ((priv->num_ap_vif + priv->num_ibss_vif) >= ATH9K_HTC_MAX_BCN_VIF)) {
+ ath_err(common, "Max. number of beaconing interfaces reached\n");
+ mutex_unlock(&priv->mutex);
+ return -ENOBUFS;
}
ath9k_htc_ps_wakeup(priv);
@@ -1297,41 +1250,49 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
case NL80211_IFTYPE_ADHOC:
hvif.opmode = cpu_to_be32(HTC_M_IBSS);
break;
+ case NL80211_IFTYPE_AP:
+ hvif.opmode = cpu_to_be32(HTC_M_HOSTAP);
+ break;
default:
- ath_print(common, ATH_DBG_FATAL,
+ ath_err(common,
"Interface type %d not yet supported\n", vif->type);
ret = -EOPNOTSUPP;
goto out;
}
- ath_print(common, ATH_DBG_CONFIG,
- "Attach a VIF of type: %d\n", vif->type);
-
- priv->ah->opmode = vif->type;
-
/* Index starts from zero on the target */
- avp->index = hvif.index = priv->nvifs;
+ avp->index = hvif.index = ffz(priv->vif_slot);
hvif.rtsthreshold = cpu_to_be16(2304);
WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
if (ret)
goto out;
- priv->nvifs++;
-
/*
* We need a node in target to tx mgmt frames
* before association.
*/
ret = ath9k_htc_add_station(priv, vif, NULL);
- if (ret)
+ if (ret) {
+ WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
goto out;
+ }
- ret = ath9k_htc_update_cap_target(priv);
- if (ret)
- ath_print(common, ATH_DBG_CONFIG, "Failed to update"
- " capability in target \n");
+ ath9k_htc_set_bssid_mask(priv, vif);
+ priv->vif_slot |= (1 << avp->index);
+ priv->nvifs++;
priv->vif = vif;
+
+ INC_VIF(priv, vif->type);
+ ath9k_htc_set_opmode(priv);
+
+ if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
+ !(priv->op_flags & OP_ANI_RUNNING))
+ ath9k_htc_start_ani(priv);
+
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Attach a VIF of type: %d at idx: %d\n", vif->type, avp->index);
+
out:
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
@@ -1349,8 +1310,6 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
int ret = 0;
u8 cmd_rsp;
- ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
-
mutex_lock(&priv->mutex);
ath9k_htc_ps_wakeup(priv);
@@ -1359,10 +1318,27 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
hvif.index = avp->index;
WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
priv->nvifs--;
+ priv->vif_slot &= ~(1 << avp->index);
ath9k_htc_remove_station(priv, vif, NULL);
priv->vif = NULL;
+ DEC_VIF(priv, vif->type);
+ ath9k_htc_set_opmode(priv);
+
+ /*
+ * Stop ANI only if there are no associated station interfaces.
+ */
+ if ((vif->type == NL80211_IFTYPE_AP) && (priv->num_ap_vif == 0)) {
+ priv->rearm_ani = false;
+ ieee80211_iterate_active_interfaces_atomic(priv->hw,
+ ath9k_htc_vif_iter, priv);
+ if (!priv->rearm_ani)
+ ath9k_htc_stop_ani(priv);
+ }
+
+ ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface at idx: %d\n", avp->index);
+
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
}
@@ -1386,30 +1362,44 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
mutex_unlock(&priv->htc_pm_lock);
if (enable_radio) {
- ath_print(common, ATH_DBG_CONFIG,
- "not-idle: enabling radio\n");
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "not-idle: enabling radio\n");
ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
ath9k_htc_radio_enable(hw);
}
}
+ /*
+ * Monitor interface should be added before
+ * IEEE80211_CONF_CHANGE_CHANNEL is handled.
+ */
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if ((conf->flags & IEEE80211_CONF_MONITOR) &&
+ !priv->ah->is_monitoring)
+ ath9k_htc_add_monitor_interface(priv);
+ else if (priv->ah->is_monitoring)
+ ath9k_htc_remove_monitor_interface(priv);
+ }
+
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
struct ieee80211_channel *curchan = hw->conf.channel;
int pos = curchan->hw_value;
- ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
- curchan->center_freq);
+ ath_dbg(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
+ curchan->center_freq);
- ath9k_cmn_update_ichannel(hw, &priv->ah->channels[pos]);
+ ath9k_cmn_update_ichannel(&priv->ah->channels[pos],
+ hw->conf.channel,
+ hw->conf.channel_type);
if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to set channel\n");
+ ath_err(common, "Unable to set channel\n");
mutex_unlock(&priv->mutex);
return -EINVAL;
}
}
+
if (changed & IEEE80211_CONF_CHANGE_PS) {
if (conf->flags & IEEE80211_CONF_PS) {
ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
@@ -1421,15 +1411,10 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
}
}
- if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
- if (conf->flags & IEEE80211_CONF_MONITOR) {
- if (ath9k_htc_add_monitor_interface(priv))
- ath_print(common, ATH_DBG_FATAL,
- "Failed to set monitor mode\n");
- else
- ath_print(common, ATH_DBG_CONFIG,
- "HW opmode set to Monitor mode\n");
- }
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ priv->txpowlimit = 2 * conf->power_level;
+ ath9k_cmn_update_txpow(priv->ah, priv->curtxpow,
+ priv->txpowlimit, &priv->curtxpow);
}
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
@@ -1440,8 +1425,8 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
}
mutex_unlock(&priv->htc_pm_lock);
- ath_print(common, ATH_DBG_CONFIG,
- "idle: disabling radio\n");
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "idle: disabling radio\n");
ath9k_htc_radio_disable(hw);
}
@@ -1478,8 +1463,8 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
rfilt = ath9k_htc_calcrxfilter(priv);
ath9k_hw_setrxfilter(priv->ah, rfilt);
- ath_print(ath9k_hw_common(priv->ah), ATH_DBG_CONFIG,
- "Set HW RX filter: 0x%x\n", rfilt);
+ ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_CONFIG,
+ "Set HW RX filter: 0x%x\n", rfilt);
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
@@ -1542,15 +1527,14 @@ static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, u16 queue,
qnum = get_hw_qnum(queue, priv->hwq_map);
- ath_print(common, ATH_DBG_CONFIG,
- "Configure tx [queue/hwq] [%d/%d], "
- "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
- queue, qnum, params->aifs, params->cw_min,
- params->cw_max, params->txop);
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Configure tx [queue/hwq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
+ queue, qnum, params->aifs, params->cw_min,
+ params->cw_max, params->txop);
ret = ath_htc_txq_update(priv, qnum, &qi);
if (ret) {
- ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
+ ath_err(common, "TXQ Update failed\n");
goto out;
}
@@ -1578,7 +1562,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
return -ENOSPC;
mutex_lock(&priv->mutex);
- ath_print(common, ATH_DBG_CONFIG, "Set HW Key\n");
+ ath_dbg(common, ATH_DBG_CONFIG, "Set HW Key\n");
ath9k_htc_ps_wakeup(priv);
switch (cmd) {
@@ -1617,66 +1601,81 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
struct ath9k_htc_priv *priv = hw->priv;
struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(ah);
+ bool set_assoc;
mutex_lock(&priv->mutex);
ath9k_htc_ps_wakeup(priv);
+ /*
+ * Set the HW AID/BSSID only for the first station interface
+ * or in IBSS mode.
+ */
+ set_assoc = !!((priv->ah->opmode == NL80211_IFTYPE_ADHOC) ||
+ ((priv->ah->opmode == NL80211_IFTYPE_STATION) &&
+ (priv->num_sta_vif == 1)));
+
+
if (changed & BSS_CHANGED_ASSOC) {
- common->curaid = bss_conf->assoc ?
- bss_conf->aid : 0;
- ath_print(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
- bss_conf->assoc);
-
- if (bss_conf->assoc) {
- priv->op_flags |= OP_ASSOCIATED;
- ath_start_ani(priv);
- } else {
- priv->op_flags &= ~OP_ASSOCIATED;
- cancel_delayed_work_sync(&priv->ath9k_ani_work);
+ if (set_assoc) {
+ ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
+ bss_conf->assoc);
+
+ common->curaid = bss_conf->assoc ?
+ bss_conf->aid : 0;
+
+ if (bss_conf->assoc)
+ ath9k_htc_start_ani(priv);
+ else
+ ath9k_htc_stop_ani(priv);
}
}
if (changed & BSS_CHANGED_BSSID) {
- /* Set BSSID */
- memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
- ath9k_hw_write_associd(ah);
+ if (set_assoc) {
+ memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+ ath9k_hw_write_associd(ah);
- ath_print(common, ATH_DBG_CONFIG,
- "BSSID: %pM aid: 0x%x\n",
- common->curbssid, common->curaid);
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "BSSID: %pM aid: 0x%x\n",
+ common->curbssid, common->curaid);
+ }
}
- if ((changed & BSS_CHANGED_BEACON_INT) ||
- (changed & BSS_CHANGED_BEACON) ||
- ((changed & BSS_CHANGED_BEACON_ENABLED) &&
- bss_conf->enable_beacon)) {
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon) {
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Beacon enabled for BSS: %pM\n", bss_conf->bssid);
priv->op_flags |= OP_ENABLE_BEACON;
ath9k_htc_beacon_config(priv, vif);
}
- if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
- !bss_conf->enable_beacon) {
- priv->op_flags &= ~OP_ENABLE_BEACON;
- ath9k_htc_beacon_config(priv, vif);
- }
-
- if (changed & BSS_CHANGED_ERP_PREAMBLE) {
- ath_print(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
- bss_conf->use_short_preamble);
- if (bss_conf->use_short_preamble)
- priv->op_flags |= OP_PREAMBLE_SHORT;
- else
- priv->op_flags &= ~OP_PREAMBLE_SHORT;
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) {
+ /*
+ * Disable SWBA interrupt only if there are no
+ * AP/IBSS interfaces.
+ */
+ if ((priv->num_ap_vif <= 1) || priv->num_ibss_vif) {
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Beacon disabled for BSS: %pM\n",
+ bss_conf->bssid);
+ priv->op_flags &= ~OP_ENABLE_BEACON;
+ ath9k_htc_beacon_config(priv, vif);
+ }
}
- if (changed & BSS_CHANGED_ERP_CTS_PROT) {
- ath_print(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
- bss_conf->use_cts_prot);
- if (bss_conf->use_cts_prot &&
- hw->conf.channel->band != IEEE80211_BAND_5GHZ)
- priv->op_flags |= OP_PROTECT_ENABLE;
- else
- priv->op_flags &= ~OP_PROTECT_ENABLE;
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ /*
+ * Reset the HW TSF for the first AP interface.
+ */
+ if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
+ (priv->nvifs == 1) &&
+ (priv->num_ap_vif == 1) &&
+ (vif->type == NL80211_IFTYPE_AP)) {
+ priv->op_flags |= OP_TSF_RESET;
+ }
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Beacon interval changed for BSS: %pM\n",
+ bss_conf->bssid);
+ ath9k_htc_beacon_config(priv, vif);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -1735,12 +1734,14 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta,
- u16 tid, u16 *ssn)
+ u16 tid, u16 *ssn, u8 buf_size)
{
struct ath9k_htc_priv *priv = hw->priv;
struct ath9k_htc_sta *ista;
int ret = 0;
+ mutex_lock(&priv->mutex);
+
switch (action) {
case IEEE80211_AMPDU_RX_START:
break;
@@ -1762,10 +1763,11 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
spin_unlock_bh(&priv->tx_lock);
break;
default:
- ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
- "Unknown AMPDU action\n");
+ ath_err(ath9k_hw_common(priv->ah), "Unknown AMPDU action\n");
}
+ mutex_unlock(&priv->mutex);
+
return ret;
}
@@ -1778,8 +1780,7 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
priv->op_flags |= OP_SCANNING;
spin_unlock_bh(&priv->beacon_lock);
cancel_work_sync(&priv->ps_work);
- if (priv->op_flags & OP_ASSOCIATED)
- cancel_delayed_work_sync(&priv->ath9k_ani_work);
+ ath9k_htc_stop_ani(priv);
mutex_unlock(&priv->mutex);
}
@@ -1788,15 +1789,11 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
struct ath9k_htc_priv *priv = hw->priv;
mutex_lock(&priv->mutex);
- ath9k_htc_ps_wakeup(priv);
spin_lock_bh(&priv->beacon_lock);
priv->op_flags &= ~OP_SCANNING;
spin_unlock_bh(&priv->beacon_lock);
- priv->op_flags |= OP_FULL_RESET;
- if (priv->op_flags & OP_ASSOCIATED) {
- ath9k_htc_beacon_config(priv, priv->vif);
- ath_start_ani(priv);
- }
+ ath9k_htc_ps_wakeup(priv);
+ ath9k_htc_vif_reconfig(priv);
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 29d80ca78393..4a4f27ba96af 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -20,8 +20,15 @@
/* TX */
/******/
+static const int subtype_txq_to_hwq[] = {
+ [WME_AC_BE] = ATH_TXQ_AC_BE,
+ [WME_AC_BK] = ATH_TXQ_AC_BK,
+ [WME_AC_VI] = ATH_TXQ_AC_VI,
+ [WME_AC_VO] = ATH_TXQ_AC_VO,
+};
+
#define ATH9K_HTC_INIT_TXQ(subtype) do { \
- qi.tqi_subtype = subtype; \
+ qi.tqi_subtype = subtype_txq_to_hwq[subtype]; \
qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; \
qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; \
qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; \
@@ -62,8 +69,8 @@ int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
qi.tqi_readyTime = qinfo->tqi_readyTime;
if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
- "Unable to update hardware queue %u!\n", qnum);
+ ath_err(ath9k_hw_common(ah),
+ "Unable to update hardware queue %u!\n", qnum);
error = -EIO;
} else {
ath9k_hw_resettxqueue(ah, qnum);
@@ -77,7 +84,9 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = tx_info->control.sta;
+ struct ieee80211_vif *vif = tx_info->control.vif;
struct ath9k_htc_sta *ista;
+ struct ath9k_htc_vif *avp;
struct ath9k_htc_tx_ctl tx_ctl;
enum htc_endpoint_id epid;
u16 qnum;
@@ -88,24 +97,38 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
hdr = (struct ieee80211_hdr *) skb->data;
fc = hdr->frame_control;
- if (tx_info->control.vif &&
- (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv)
- vif_idx = ((struct ath9k_htc_vif *)
- tx_info->control.vif->drv_priv)->index;
- else
- vif_idx = priv->nvifs;
+ /*
+ * Find out on which interface this packet has to be
+ * sent out.
+ */
+ if (vif) {
+ avp = (struct ath9k_htc_vif *) vif->drv_priv;
+ vif_idx = avp->index;
+ } else {
+ if (!priv->ah->is_monitoring) {
+ ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+ "VIF is null, but no monitor interface !\n");
+ return -EINVAL;
+ }
+
+ vif_idx = priv->mon_vif_idx;
+ }
+ /*
+ * Find out which station this packet is destined for.
+ */
if (sta) {
ista = (struct ath9k_htc_sta *) sta->drv_priv;
sta_idx = ista->index;
} else {
- sta_idx = 0;
+ sta_idx = priv->vif_sta_pos[vif_idx];
}
memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));
if (ieee80211_is_data(fc)) {
struct tx_frame_hdr tx_hdr;
+ u32 flags = 0;
u8 *qc;
memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));
@@ -129,13 +152,14 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
/* Check for RTS protection */
if (priv->hw->wiphy->rts_threshold != (u32) -1)
if (skb->len > priv->hw->wiphy->rts_threshold)
- tx_hdr.flags |= ATH9K_HTC_TX_RTSCTS;
+ flags |= ATH9K_HTC_TX_RTSCTS;
/* CTS-to-self */
- if (!(tx_hdr.flags & ATH9K_HTC_TX_RTSCTS) &&
- (priv->op_flags & OP_PROTECT_ENABLE))
- tx_hdr.flags |= ATH9K_HTC_TX_CTSONLY;
+ if (!(flags & ATH9K_HTC_TX_RTSCTS) &&
+ (vif && vif->bss_conf.use_cts_prot))
+ flags |= ATH9K_HTC_TX_CTSONLY;
+ tx_hdr.flags = cpu_to_be32(flags);
tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
@@ -208,6 +232,7 @@ static bool ath9k_htc_check_tx_aggr(struct ath9k_htc_priv *priv,
void ath9k_tx_tasklet(unsigned long data)
{
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+ struct ieee80211_vif *vif;
struct ieee80211_sta *sta;
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *tx_info;
@@ -219,12 +244,16 @@ void ath9k_tx_tasklet(unsigned long data)
hdr = (struct ieee80211_hdr *) skb->data;
fc = hdr->frame_control;
tx_info = IEEE80211_SKB_CB(skb);
+ vif = tx_info->control.vif;
memset(&tx_info->status, 0, sizeof(tx_info->status));
+ if (!vif)
+ goto send_mac80211;
+
rcu_read_lock();
- sta = ieee80211_find_sta(priv->vif, hdr->addr1);
+ sta = ieee80211_find_sta(vif, hdr->addr1);
if (!sta) {
rcu_read_unlock();
ieee80211_tx_status(priv->hw, skb);
@@ -244,7 +273,7 @@ void ath9k_tx_tasklet(unsigned long data)
ista = (struct ath9k_htc_sta *)sta->drv_priv;
if (ath9k_htc_check_tx_aggr(priv, ista, tid)) {
- ieee80211_start_tx_ba_session(sta, tid);
+ ieee80211_start_tx_ba_session(sta, tid, 0);
spin_lock_bh(&priv->tx_lock);
ista->tid_state[tid] = AGGR_PROGRESS;
spin_unlock_bh(&priv->tx_lock);
@@ -254,6 +283,7 @@ void ath9k_tx_tasklet(unsigned long data)
rcu_read_unlock();
+ send_mac80211:
/* Send status to mac80211 */
ieee80211_tx_status(priv->hw, skb);
}
@@ -263,8 +293,8 @@ void ath9k_tx_tasklet(unsigned long data)
if (priv->tx_queues_stop) {
priv->tx_queues_stop = false;
spin_unlock_bh(&priv->tx_lock);
- ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
- "Waking up TX queues\n");
+ ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+ "Waking up TX queues\n");
ieee80211_wake_queues(priv->hw);
return;
}
@@ -289,8 +319,7 @@ void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb,
(ep_id == priv->data_vo_ep)) {
skb_pull(skb, sizeof(struct tx_frame_hdr));
} else {
- ath_print(common, ATH_DBG_FATAL,
- "Unsupported TX EPID: %d\n", ep_id);
+ ath_err(common, "Unsupported TX EPID: %d\n", ep_id);
dev_kfree_skb_any(skb);
return;
}
@@ -330,9 +359,8 @@ bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype)
return false;
if (qnum >= ARRAY_SIZE(priv->hwq_map)) {
- ath_print(common, ATH_DBG_FATAL,
- "qnum %u out of range, max %u!\n",
- qnum, (unsigned int)ARRAY_SIZE(priv->hwq_map));
+ ath_err(common, "qnum %u out of range, max %zu!\n",
+ qnum, ARRAY_SIZE(priv->hwq_map));
ath9k_hw_releasetxqueue(ah, qnum);
return false;
}
@@ -379,7 +407,7 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
*/
if (((ah->opmode != NL80211_IFTYPE_AP) &&
(priv->rxfilter & FIF_PROMISC_IN_BSS)) ||
- (ah->opmode == NL80211_IFTYPE_MONITOR))
+ ah->is_monitoring)
rfilt |= ATH9K_RX_FILTER_PROM;
if (priv->rxfilter & FIF_CONTROL)
@@ -391,8 +419,13 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
else
rfilt |= ATH9K_RX_FILTER_BEACON;
- if (conf_is_ht(&priv->hw->conf))
+ if (conf_is_ht(&priv->hw->conf)) {
rfilt |= ATH9K_RX_FILTER_COMP_BAR;
+ rfilt |= ATH9K_RX_FILTER_UNCOMP_BA_BAR;
+ }
+
+ if (priv->rxfilter & FIF_PSPOLL)
+ rfilt |= ATH9K_RX_FILTER_PSPOLL;
return rfilt;
@@ -405,20 +438,12 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
{
struct ath_hw *ah = priv->ah;
- struct ath_common *common = ath9k_hw_common(ah);
-
u32 rfilt, mfilt[2];
/* configure rx filter */
rfilt = ath9k_htc_calcrxfilter(priv);
ath9k_hw_setrxfilter(ah, rfilt);
- /* configure bssid mask */
- ath_hw_setbssidmask(common);
-
- /* configure operational mode */
- ath9k_hw_setopmode(ah);
-
/* calculate and install multicast filter */
mfilt[0] = mfilt[1] = ~0;
ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
@@ -483,8 +508,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
__le16 fc;
if (skb->len <= HTC_RX_FRAME_HEADER_SIZE) {
- ath_print(common, ATH_DBG_FATAL,
- "Corrupted RX frame, dropping\n");
+ ath_err(common, "Corrupted RX frame, dropping\n");
goto rx_next;
}
@@ -492,10 +516,9 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
if (be16_to_cpu(rxstatus->rs_datalen) -
(skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) {
- ath_print(common, ATH_DBG_FATAL,
- "Corrupted RX data len, dropping "
- "(dlen: %d, skblen: %d)\n",
- rxstatus->rs_datalen, skb->len);
+ ath_err(common,
+ "Corrupted RX data len, dropping (dlen: %d, skblen: %d)\n",
+ rxstatus->rs_datalen, skb->len);
goto rx_next;
}
@@ -571,31 +594,29 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate,
rxbuf->rxstatus.rs_flags);
- if (priv->op_flags & OP_ASSOCIATED) {
- if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
- !rxbuf->rxstatus.rs_moreaggr)
- ATH_RSSI_LPF(priv->rx.last_rssi,
- rxbuf->rxstatus.rs_rssi);
+ if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
+ !rxbuf->rxstatus.rs_moreaggr)
+ ATH_RSSI_LPF(priv->rx.last_rssi,
+ rxbuf->rxstatus.rs_rssi);
- last_rssi = priv->rx.last_rssi;
+ last_rssi = priv->rx.last_rssi;
- if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
- rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
- ATH_RSSI_EP_MULTIPLIER);
+ if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+ rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
+ ATH_RSSI_EP_MULTIPLIER);
- if (rxbuf->rxstatus.rs_rssi < 0)
- rxbuf->rxstatus.rs_rssi = 0;
+ if (rxbuf->rxstatus.rs_rssi < 0)
+ rxbuf->rxstatus.rs_rssi = 0;
- if (ieee80211_is_beacon(fc))
- priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
- }
+ if (ieee80211_is_beacon(fc))
+ priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
rx_status->band = hw->conf.channel->band;
rx_status->freq = hw->conf.channel->center_freq;
rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
rx_status->antenna = rxbuf->rxstatus.rs_antenna;
- rx_status->flag |= RX_FLAG_TSFT;
+ rx_status->flag |= RX_FLAG_MACTIME_MPDU;
return true;
@@ -678,8 +699,8 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
spin_unlock(&priv->rx.rxbuflock);
if (rxbuf == NULL) {
- ath_print(common, ATH_DBG_ANY,
- "No free RX buffer\n");
+ ath_dbg(common, ATH_DBG_ANY,
+ "No free RX buffer\n");
goto err;
}
@@ -721,8 +742,7 @@ int ath9k_rx_init(struct ath9k_htc_priv *priv)
for (i = 0; i < ATH9K_HTC_RXBUF; i++) {
rxbuf = kzalloc(sizeof(struct ath9k_htc_rxbuf), GFP_KERNEL);
if (rxbuf == NULL) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to allocate RX buffers\n");
+ ath_err(common, "Unable to allocate RX buffers\n");
goto err;
}
list_add_tail(&rxbuf->list, &priv->rx.rxbuf);
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 861ec9269309..c41ab8c30161 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -462,9 +462,10 @@ void ath9k_htc_hw_free(struct htc_target *htc)
}
int ath9k_htc_hw_init(struct htc_target *target,
- struct device *dev, u16 devid, char *product)
+ struct device *dev, u16 devid,
+ char *product, u32 drv_info)
{
- if (ath9k_htc_probe_device(target, dev, devid, product)) {
+ if (ath9k_htc_probe_device(target, dev, devid, product, drv_info)) {
printk(KERN_ERR "Failed to initialize the device\n");
return -ENODEV;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.h b/drivers/net/wireless/ath/ath9k/htc_hst.h
index 07b6509d5896..ecd018798c47 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.h
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.h
@@ -77,20 +77,6 @@ struct htc_config_pipe_msg {
u8 credits;
} __packed;
-struct htc_packet {
- void *pktcontext;
- u8 *buf;
- u8 *buf_payload;
- u32 buflen;
- u32 payload_len;
-
- int endpoint;
- int status;
-
- void *context;
- u32 reserved;
-};
-
struct htc_ep_callbacks {
void *priv;
void (*tx) (void *, struct sk_buff *, enum htc_endpoint_id, bool txok);
@@ -123,11 +109,6 @@ struct htc_endpoint {
#define HTC_CONTROL_BUFFER_SIZE \
(HTC_MAX_CONTROL_MESSAGE_LENGTH + sizeof(struct htc_frame_hdr))
-struct htc_control_buf {
- struct htc_packet htc_pkt;
- u8 buf[HTC_CONTROL_BUFFER_SIZE];
-};
-
#define HTC_OP_START_WAIT BIT(0)
#define HTC_OP_CONFIG_PIPE_CREDITS BIT(1)
@@ -239,7 +220,8 @@ struct htc_target *ath9k_htc_hw_alloc(void *hif_handle,
struct device *dev);
void ath9k_htc_hw_free(struct htc_target *htc);
int ath9k_htc_hw_init(struct htc_target *target,
- struct device *dev, u16 devid, char *product);
+ struct device *dev, u16 devid, char *product,
+ u32 drv_info);
void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug);
#endif /* HTC_HST_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 0a4ad348b699..c8f254fe0f0b 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -223,11 +223,6 @@ static inline void ath9k_hw_rfbus_done(struct ath_hw *ah)
return ath9k_hw_private_ops(ah)->rfbus_done(ah);
}
-static inline void ath9k_enable_rfkill(struct ath_hw *ah)
-{
- return ath9k_hw_private_ops(ah)->enable_rfkill(ah);
-}
-
static inline void ath9k_hw_restore_chainmask(struct ath_hw *ah)
{
if (!ath9k_hw_private_ops(ah)->restore_chainmask)
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 6ebc68bca91f..338b07502f1a 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -54,13 +54,6 @@ static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
ath9k_hw_private_ops(ah)->init_mode_regs(ah);
}
-static bool ath9k_hw_macversion_supported(struct ath_hw *ah)
-{
- struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
-
- return priv_ops->macversion_supported(ah->hw_version.macVersion);
-}
-
static u32 ath9k_hw_compute_pll_control(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -129,9 +122,9 @@ bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout)
udelay(AH_TIME_QUANTUM);
}
- ath_print(ath9k_hw_common(ah), ATH_DBG_ANY,
- "timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n",
- timeout, reg, REG_READ(ah, reg), mask, val);
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_ANY,
+ "timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n",
+ timeout, reg, REG_READ(ah, reg), mask, val);
return false;
}
@@ -211,8 +204,8 @@ u16 ath9k_hw_computetxtime(struct ath_hw *ah,
}
break;
default:
- ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
- "Unknown phy %u (rate ix %u)\n", phy, rateix);
+ ath_err(ath9k_hw_common(ah),
+ "Unknown phy %u (rate ix %u)\n", phy, rateix);
txTime = 0;
break;
}
@@ -284,11 +277,9 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
static void ath9k_hw_disablepcie(struct ath_hw *ah)
{
- if (AR_SREV_9100(ah))
+ if (!AR_SREV_5416(ah))
return;
- ENABLE_REGWRITE_BUFFER(ah);
-
REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029);
@@ -300,8 +291,6 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007);
REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
-
- REGWRITE_BUFFER_FLUSH(ah);
}
/* This should work for all families including legacy */
@@ -310,10 +299,9 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
u32 regAddr[2] = { AR_STA_ID0 };
u32 regHold[2];
- u32 patternData[4] = { 0x55555555,
- 0xaaaaaaaa,
- 0x66666666,
- 0x99999999 };
+ static const u32 patternData[4] = {
+ 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999
+ };
int i, j, loop_max;
if (!AR_SREV_9300_20_OR_LATER(ah)) {
@@ -332,11 +320,9 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
REG_WRITE(ah, addr, wrData);
rdData = REG_READ(ah, addr);
if (rdData != wrData) {
- ath_print(common, ATH_DBG_FATAL,
- "address test failed "
- "addr: 0x%08x - wr:0x%08x != "
- "rd:0x%08x\n",
- addr, wrData, rdData);
+ ath_err(common,
+ "address test failed addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
+ addr, wrData, rdData);
return false;
}
}
@@ -345,11 +331,9 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
REG_WRITE(ah, addr, wrData);
rdData = REG_READ(ah, addr);
if (wrData != rdData) {
- ath_print(common, ATH_DBG_FATAL,
- "address test failed "
- "addr: 0x%08x - wr:0x%08x != "
- "rd:0x%08x\n",
- addr, wrData, rdData);
+ ath_err(common,
+ "address test failed addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
+ addr, wrData, rdData);
return false;
}
}
@@ -385,6 +369,9 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
else
ah->config.ht_enable = 0;
+ /* PAPRD needs some more work to be enabled */
+ ah->config.paprd_disable = 1;
+
ah->config.rx_intr_mitigation = true;
ah->config.pcieSerDesWrite = true;
@@ -419,17 +406,12 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->hw_version.magic = AR5416_MAGIC;
ah->hw_version.subvendorid = 0;
- ah->ah_flags = 0;
- if (!AR_SREV_9100(ah))
- ah->ah_flags = AH_USE_EEPROM;
-
ah->atim_window = 0;
ah->sta_id1_defaults =
AR_STA_ID1_CRPT_MIC_ENABLE |
AR_STA_ID1_MCAST_KSRCH;
- ah->beacon_interval = 100;
ah->enable_32kHz_clock = DONT_USE_32KHZ;
- ah->slottime = (u32) -1;
+ ah->slottime = 20;
ah->globaltxtimeout = (u32) -1;
ah->power_mode = ATH9K_PM_UNDEFINED;
}
@@ -440,7 +422,7 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah)
u32 sum;
int i;
u16 eeval;
- u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW };
+ static const u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW };
sum = 0;
for (i = 0; i < 3; i++) {
@@ -457,9 +439,10 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah)
static int ath9k_hw_post_init(struct ath_hw *ah)
{
+ struct ath_common *common = ath9k_hw_common(ah);
int ecode;
- if (!AR_SREV_9271(ah)) {
+ if (common->bus_ops->ath_bus_type != ATH_USB) {
if (!ath9k_hw_chip_test(ah))
return -ENODEV;
}
@@ -474,16 +457,15 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
if (ecode != 0)
return ecode;
- ath_print(ath9k_hw_common(ah), ATH_DBG_CONFIG,
- "Eeprom VER: %d, REV: %d\n",
- ah->eep_ops->get_eeprom_ver(ah),
- ah->eep_ops->get_eeprom_rev(ah));
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_CONFIG,
+ "Eeprom VER: %d, REV: %d\n",
+ ah->eep_ops->get_eeprom_ver(ah),
+ ah->eep_ops->get_eeprom_rev(ah));
ecode = ath9k_hw_rf_alloc_ext_banks(ah);
if (ecode) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
- "Failed allocating banks for "
- "external radio\n");
+ ath_err(ath9k_hw_common(ah),
+ "Failed allocating banks for external radio\n");
ath9k_hw_rf_free_ext_banks(ah);
return ecode;
}
@@ -513,9 +495,19 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (ah->hw_version.devid == AR5416_AR9100_DEVID)
ah->hw_version.macVersion = AR_SREV_VERSION_9100;
+ ath9k_hw_read_revisions(ah);
+
+ /*
+ * Read back AR_WA into a permanent copy and set bits 14 and 17.
+ * We need to do this to avoid RMW of this register. We cannot
+ * read the reg when chip is asleep.
+ */
+ ah->WARegVal = REG_READ(ah, AR_WA);
+ ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
+ AR_WA_ASPM_TIMER_BASED_DISABLE);
+
if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
- ath_print(common, ATH_DBG_FATAL,
- "Couldn't reset chip\n");
+ ath_err(common, "Couldn't reset chip\n");
return -EIO;
}
@@ -525,7 +517,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
ath9k_hw_attach_ops(ah);
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
- ath_print(common, ATH_DBG_FATAL, "Couldn't wakeup chip\n");
+ ath_err(common, "Couldn't wakeup chip\n");
return -EIO;
}
@@ -541,7 +533,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
}
}
- ath_print(common, ATH_DBG_RESET, "serialize_regmode is %d\n",
+ ath_dbg(common, ATH_DBG_RESET, "serialize_regmode is %d\n",
ah->config.serialize_regmode);
if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
@@ -549,11 +541,22 @@ static int __ath9k_hw_init(struct ath_hw *ah)
else
ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD;
- if (!ath9k_hw_macversion_supported(ah)) {
- ath_print(common, ATH_DBG_FATAL,
- "Mac Chip Rev 0x%02x.%x is not supported by "
- "this driver\n", ah->hw_version.macVersion,
- ah->hw_version.macRev);
+ switch (ah->hw_version.macVersion) {
+ case AR_SREV_VERSION_5416_PCI:
+ case AR_SREV_VERSION_5416_PCIE:
+ case AR_SREV_VERSION_9160:
+ case AR_SREV_VERSION_9100:
+ case AR_SREV_VERSION_9280:
+ case AR_SREV_VERSION_9285:
+ case AR_SREV_VERSION_9287:
+ case AR_SREV_VERSION_9271:
+ case AR_SREV_VERSION_9300:
+ case AR_SREV_VERSION_9485:
+ break;
+ default:
+ ath_err(common,
+ "Mac Chip Rev 0x%02x.%x is not supported by this driver\n",
+ ah->hw_version.macVersion, ah->hw_version.macRev);
return -EOPNOTSUPP;
}
@@ -571,14 +574,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
ath9k_hw_init_mode_regs(ah);
- /*
- * Read back AR_WA into a permanent copy and set bits 14 and 17.
- * We need to do this to avoid RMW of this register. We cannot
- * read the reg when chip is asleep.
- */
- ah->WARegVal = REG_READ(ah, AR_WA);
- ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
- AR_WA_ASPM_TIMER_BASED_DISABLE);
if (ah->is_pciexpress)
ath9k_hw_configpcipowersave(ah, 0, 0);
@@ -599,8 +594,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
r = ath9k_hw_init_macaddr(ah);
if (r) {
- ath_print(common, ATH_DBG_FATAL,
- "Failed to initialize MAC address\n");
+ ath_err(common, "Failed to initialize MAC address\n");
return r;
}
@@ -634,21 +628,21 @@ int ath9k_hw_init(struct ath_hw *ah)
case AR9287_DEVID_PCIE:
case AR2427_DEVID_PCIE:
case AR9300_DEVID_PCIE:
+ case AR9300_DEVID_AR9485_PCIE:
break;
default:
if (common->bus_ops->ath_bus_type == ATH_USB)
break;
- ath_print(common, ATH_DBG_FATAL,
- "Hardware device ID 0x%04x not supported\n",
- ah->hw_version.devid);
+ ath_err(common, "Hardware device ID 0x%04x not supported\n",
+ ah->hw_version.devid);
return -EOPNOTSUPP;
}
ret = __ath9k_hw_init(ah);
if (ret) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to initialize hardware; "
- "initialization status: %d\n", ret);
+ ath_err(common,
+ "Unable to initialize hardware; initialization status: %d\n",
+ ret);
return ret;
}
@@ -677,10 +671,52 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
REGWRITE_BUFFER_FLUSH(ah);
}
+unsigned long ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
+{
+ REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) & ~(PLL3_DO_MEAS_MASK)));
+ udelay(100);
+ REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) | PLL3_DO_MEAS_MASK));
+
+ while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
+ udelay(100);
+
+ return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
+}
+EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
+
+#define DPLL2_KD_VAL 0x3D
+#define DPLL2_KI_VAL 0x06
+#define DPLL3_PHASE_SHIFT_VAL 0x1
+
static void ath9k_hw_init_pll(struct ath_hw *ah,
struct ath9k_channel *chan)
{
- u32 pll = ath9k_hw_compute_pll_control(ah, chan);
+ u32 pll;
+
+ if (AR_SREV_9485(ah)) {
+ REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666);
+ REG_WRITE(ah, AR_CH0_DDR_DPLL2, 0x19e82f01);
+
+ REG_RMW_FIELD(ah, AR_CH0_DDR_DPLL3,
+ AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL);
+
+ REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c);
+ udelay(1000);
+
+ REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666);
+
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
+ AR_CH0_DPLL2_KD, DPLL2_KD_VAL);
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
+ AR_CH0_DPLL2_KI, DPLL2_KI_VAL);
+
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
+ AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL);
+ REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x142c);
+ udelay(1000);
+ }
+
+ pll = ath9k_hw_compute_pll_control(ah, chan);
REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
@@ -772,8 +808,8 @@ static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
{
if (tu > 0xFFFF) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT,
- "bad global tx timeout %u\n", tu);
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT,
+ "bad global tx timeout %u\n", tu);
ah->globaltxtimeout = (u32) -1;
return false;
} else {
@@ -790,8 +826,8 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
int slottime;
int sifstime;
- ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n",
- ah->misc_mode);
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n",
+ ah->misc_mode);
if (ah->misc_mode != 0)
REG_WRITE(ah, AR_PCU_MISC,
@@ -816,7 +852,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ)
acktimeout += 64 - sifstime - ah->slottime;
- ath9k_hw_setslottime(ah, slottime);
+ ath9k_hw_setslottime(ah, ah->slottime);
ath9k_hw_set_ack_timeout(ah, acktimeout);
ath9k_hw_set_cts_timeout(ah, acktimeout);
if (ah->globaltxtimeout != (u32) -1)
@@ -1034,8 +1070,8 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
REG_WRITE(ah, AR_RTC_RC, 0);
if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
- "RTC stuck in MAC reset\n");
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
+ "RTC stuck in MAC reset\n");
return false;
}
@@ -1064,7 +1100,6 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
REG_WRITE(ah, AR_RC, AR_RC_AHB);
REG_WRITE(ah, AR_RTC_RESET, 0);
- udelay(2);
REGWRITE_BUFFER_FLUSH(ah);
@@ -1081,13 +1116,11 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
AR_RTC_STATUS_M,
AR_RTC_STATUS_ON,
AH_WAIT_TIMEOUT)) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
- "RTC not waking up\n");
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
+ "RTC not waking up\n");
return false;
}
- ath9k_hw_read_revisions(ah);
-
return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM);
}
@@ -1142,16 +1175,14 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
if (ath9k_hw_numtxpending(ah, qnum)) {
- ath_print(common, ATH_DBG_QUEUE,
- "Transmit frames pending on "
- "queue %d\n", qnum);
+ ath_dbg(common, ATH_DBG_QUEUE,
+ "Transmit frames pending on queue %d\n", qnum);
return false;
}
}
if (!ath9k_hw_rfbus_req(ah)) {
- ath_print(common, ATH_DBG_FATAL,
- "Could not kill baseband RX\n");
+ ath_err(common, "Could not kill baseband RX\n");
return false;
}
@@ -1159,8 +1190,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
r = ath9k_hw_rf_set_freq(ah, chan);
if (r) {
- ath_print(common, ATH_DBG_FATAL,
- "Failed to set channel\n");
+ ath_err(common, "Failed to set channel\n");
return false;
}
ath9k_hw_set_clockrate(ah);
@@ -1170,7 +1200,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
channel->max_antenna_gain * 2,
channel->max_power * 2,
min((u32) MAX_RATE_POWER,
- (u32) regulatory->power_limit));
+ (u32) regulatory->power_limit), false);
ath9k_hw_rfbus_done(ah);
@@ -1224,10 +1254,10 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ah->txchainmask = common->tx_chainmask;
ah->rxchainmask = common->rx_chainmask;
- if (!ah->chip_fullsleep) {
+ if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) {
ath9k_hw_abortpcurecv(ah);
if (!ath9k_hw_stopdmarecv(ah)) {
- ath_print(common, ATH_DBG_XMIT,
+ ath_dbg(common, ATH_DBG_XMIT,
"Failed to stop receive dma\n");
bChannelChange = false;
}
@@ -1283,6 +1313,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_mark_phy_inactive(ah);
+ ah->paprd_table_write_done = false;
+
/* Only required on the first reset */
if (AR_SREV_9271(ah) && ah->htc_reset_init) {
REG_WRITE(ah,
@@ -1292,7 +1324,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
}
if (!ath9k_hw_chip_reset(ah, chan)) {
- ath_print(common, ATH_DBG_FATAL, "Chip reset failed\n");
+ ath_err(common, "Chip reset failed\n");
return -EINVAL;
}
@@ -1353,8 +1385,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_spur_mitigate_freq(ah, chan);
ah->eep_ops->set_board_values(ah, chan);
- ath9k_hw_set_operating_mode(ah, ah->opmode);
-
ENABLE_REGWRITE_BUFFER(ah);
REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
@@ -1372,6 +1402,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REGWRITE_BUFFER_FLUSH(ah);
+ ath9k_hw_set_operating_mode(ah, ah->opmode);
+
r = ath9k_hw_rf_set_freq(ah, chan);
if (r)
return r;
@@ -1394,7 +1426,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_init_qos(ah);
if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
- ath9k_enable_rfkill(ah);
+ ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
ath9k_hw_init_global_settings(ah);
@@ -1439,13 +1471,13 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u32 mask;
mask = REG_READ(ah, AR_CFG);
if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
- ath_print(common, ATH_DBG_RESET,
+ ath_dbg(common, ATH_DBG_RESET,
"CFG Byte Swap Set 0x%x\n", mask);
} else {
mask =
INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
REG_WRITE(ah, AR_CFG, mask);
- ath_print(common, ATH_DBG_RESET,
+ ath_dbg(common, ATH_DBG_RESET,
"Setting CFG 0x%x\n", REG_READ(ah, AR_CFG));
}
} else {
@@ -1573,9 +1605,9 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
AR_RTC_FORCE_WAKE_EN);
}
if (i == 0) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
- "Failed to wakeup in %uus\n",
- POWER_UP_TIME / 20);
+ ath_err(ath9k_hw_common(ah),
+ "Failed to wakeup in %uus\n",
+ POWER_UP_TIME / 20);
return false;
}
}
@@ -1599,8 +1631,8 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
if (ah->power_mode == mode)
return status;
- ath_print(common, ATH_DBG_RESET, "%s -> %s\n",
- modes[ah->power_mode], modes[mode]);
+ ath_dbg(common, ATH_DBG_RESET, "%s -> %s\n",
+ modes[ah->power_mode], modes[mode]);
switch (mode) {
case ATH9K_PM_AWAKE:
@@ -1614,12 +1646,20 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
ath9k_set_power_network_sleep(ah, setChip);
break;
default:
- ath_print(common, ATH_DBG_FATAL,
- "Unknown power mode %u\n", mode);
+ ath_err(common, "Unknown power mode %u\n", mode);
return false;
}
ah->power_mode = mode;
+ /*
+ * XXX: If this warning never comes up after a while then
+ * simply keep the ATH_DBG_WARN_ON_ONCE() but make
+ * ath9k_hw_setpower() return type void.
+ */
+
+ if (!(ah->ah_flags & AH_UNPLUGGED))
+ ATH_DBG_WARN_ON_ONCE(!status);
+
return status;
}
EXPORT_SYMBOL(ath9k_hw_setpower);
@@ -1632,17 +1672,9 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
{
int flags = 0;
- ah->beacon_interval = beacon_period;
-
ENABLE_REGWRITE_BUFFER(ah);
switch (ah->opmode) {
- case NL80211_IFTYPE_STATION:
- REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
- REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff);
- REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff);
- flags |= AR_TBTT_TIMER_EN;
- break;
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
REG_SET_BIT(ah, AR_TXCFG,
@@ -1666,17 +1698,9 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
break;
default:
- if (ah->is_monitoring) {
- REG_WRITE(ah, AR_NEXT_TBTT_TIMER,
- TU_TO_USEC(next_beacon));
- REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff);
- REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff);
- flags |= AR_TBTT_TIMER_EN;
- break;
- }
- ath_print(ath9k_hw_common(ah), ATH_DBG_BEACON,
- "%s: unsupported opmode: %d\n",
- __func__, ah->opmode);
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_BEACON,
+ "%s: unsupported opmode: %d\n",
+ __func__, ah->opmode);
return;
break;
}
@@ -1732,10 +1756,10 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
else
nextTbtt = bs->bs_nexttbtt;
- ath_print(common, ATH_DBG_BEACON, "next DTIM %d\n", bs->bs_nextdtim);
- ath_print(common, ATH_DBG_BEACON, "next beacon %d\n", nextTbtt);
- ath_print(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval);
- ath_print(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod);
+ ath_dbg(common, ATH_DBG_BEACON, "next DTIM %d\n", bs->bs_nextdtim);
+ ath_dbg(common, ATH_DBG_BEACON, "next beacon %d\n", nextTbtt);
+ ath_dbg(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval);
+ ath_dbg(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod);
ENABLE_REGWRITE_BUFFER(ah);
@@ -1781,7 +1805,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
u16 capField = 0, eeval;
- u8 ant_div_ctl1;
+ u8 ant_div_ctl1, tx_chainmask, rx_chainmask;
eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_0);
regulatory->current_rd = eeval;
@@ -1800,14 +1824,14 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
regulatory->current_rd += 5;
else if (regulatory->current_rd == 0x41)
regulatory->current_rd = 0x43;
- ath_print(common, ATH_DBG_REGULATORY,
- "regdomain mapped to 0x%x\n", regulatory->current_rd);
+ ath_dbg(common, ATH_DBG_REGULATORY,
+ "regdomain mapped to 0x%x\n", regulatory->current_rd);
}
eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE);
if ((eeval & (AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A)) == 0) {
- ath_print(common, ATH_DBG_FATAL,
- "no band has been marked as supported in EEPROM.\n");
+ ath_err(common,
+ "no band has been marked as supported in EEPROM\n");
return -EINVAL;
}
@@ -1833,6 +1857,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
ah->misc_mode |= AR_PCU_MIC_NEW_LOC_ENA;
+ /* enable key search for every frame in an aggregate */
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ah->misc_mode |= AR_PCU_ALWAYS_PERFORM_KEYSEARCH;
+
pCap->low_2ghz_chan = 2312;
pCap->high_2ghz_chan = 2732;
@@ -1921,13 +1949,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
AR_SREV_5416(ah))
pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND;
- pCap->num_antcfg_5ghz =
- ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_5GHZ);
- pCap->num_antcfg_2ghz =
- ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_2GHZ);
-
- if (AR_SREV_9280_20_OR_LATER(ah) &&
- ath9k_hw_btcoex_supported(ah)) {
+ if (AR_SREV_9280_20_OR_LATER(ah) && common->btcoex_enabled) {
btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO;
btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO;
@@ -1942,14 +1964,17 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
}
if (AR_SREV_9300_20_OR_LATER(ah)) {
- pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_LDPC |
- ATH9K_HW_CAP_FASTCLOCK;
+ pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_FASTCLOCK;
+ if (!AR_SREV_9485(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_LDPC;
+
pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH;
pCap->rx_lp_qdepth = ATH9K_HW_RX_LP_QDEPTH;
pCap->rx_status_len = sizeof(struct ar9003_rxs);
pCap->tx_desc_len = sizeof(struct ar9003_txc);
pCap->txs_len = sizeof(struct ar9003_txs);
- if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
+ if (!ah->config.paprd_disable &&
+ ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
} else {
pCap->tx_desc_len = sizeof(struct ath_desc);
@@ -1963,6 +1988,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (AR_SREV_9300_20_OR_LATER(ah))
pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED;
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ah->ent_mode = REG_READ(ah, AR_ENT_OTP);
+
if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah))
pCap->hw_caps |= ATH9K_HW_CAP_SGI_20;
@@ -1973,6 +2001,29 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1))
pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB;
}
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ if (ah->eep_ops->get_eeprom(ah, EEP_CHAIN_MASK_REDUCE))
+ pCap->hw_caps |= ATH9K_HW_CAP_APM;
+ }
+
+
+
+ if (AR_SREV_9485_10(ah)) {
+ pCap->pcie_lcr_extsync_en = true;
+ pCap->pcie_lcr_offset = 0x80;
+ }
+
+ tx_chainmask = pCap->tx_chainmask;
+ rx_chainmask = pCap->rx_chainmask;
+ while (tx_chainmask || rx_chainmask) {
+ if (tx_chainmask & BIT(0))
+ pCap->max_txchains++;
+ if (rx_chainmask & BIT(0))
+ pCap->max_rxchains++;
+
+ tx_chainmask >>= 1;
+ rx_chainmask >>= 1;
+ }
return 0;
}
@@ -2044,7 +2095,8 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
val = REG_READ(ah, AR7010_GPIO_IN);
return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0;
} else if (AR_SREV_9300_20_OR_LATER(ah))
- return MS_REG_READ(AR9300, gpio) != 0;
+ return (MS(REG_READ(ah, AR_GPIO_IN), AR9300_GPIO_IN_VAL) &
+ AR_GPIO_BIT(gpio)) != 0;
else if (AR_SREV_9271(ah))
return MS_REG_READ(AR9271, gpio) != 0;
else if (AR_SREV_9287_11_OR_LATER(ah))
@@ -2176,7 +2228,7 @@ bool ath9k_hw_disable(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_disable);
-void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit)
+void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
{
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ath9k_channel *chan = ah->curchan;
@@ -2189,7 +2241,7 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit)
channel->max_antenna_gain * 2,
channel->max_power * 2,
min((u32) MAX_RATE_POWER,
- (u32) regulatory->power_limit));
+ (u32) regulatory->power_limit), test);
}
EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit);
@@ -2249,8 +2301,8 @@ void ath9k_hw_reset_tsf(struct ath_hw *ah)
{
if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0,
AH_TSF_WRITE_TIMEOUT))
- ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
- "AR_SLP32_TSF_WRITE_STATUS limit exceeded\n");
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
+ "AR_SLP32_TSF_WRITE_STATUS limit exceeded\n");
REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE);
}
@@ -2323,11 +2375,10 @@ static u32 rightmost_index(struct ath_gen_timer_table *timer_table, u32 *mask)
return timer_table->gen_timer_index[b];
}
-u32 ath9k_hw_gettsf32(struct ath_hw *ah)
+static u32 ath9k_hw_gettsf32(struct ath_hw *ah)
{
return REG_READ(ah, AR_TSF_L32);
}
-EXPORT_SYMBOL(ath9k_hw_gettsf32);
struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
void (*trigger)(void *),
@@ -2341,9 +2392,9 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL);
if (timer == NULL) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
- "Failed to allocate memory"
- "for hw timer[%d]\n", timer_index);
+ ath_err(ath9k_hw_common(ah),
+ "Failed to allocate memory for hw timer[%d]\n",
+ timer_index);
return NULL;
}
@@ -2372,9 +2423,9 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah,
tsf = ath9k_hw_gettsf32(ah);
- ath_print(ath9k_hw_common(ah), ATH_DBG_HWTIMER,
- "curent tsf %x period %x"
- "timer_next %x\n", tsf, timer_period, timer_next);
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_HWTIMER,
+ "current tsf %x period %x timer_next %x\n",
+ tsf, timer_period, timer_next);
/*
* Pull timer_next forward if the current TSF already passed it
@@ -2454,8 +2505,8 @@ void ath_gen_timer_isr(struct ath_hw *ah)
index = rightmost_index(timer_table, &thresh_mask);
timer = timer_table->timers[index];
BUG_ON(!timer);
- ath_print(common, ATH_DBG_HWTIMER,
- "TSF overflow for Gen timer %d\n", index);
+ ath_dbg(common, ATH_DBG_HWTIMER,
+ "TSF overflow for Gen timer %d\n", index);
timer->overflow(timer->arg);
}
@@ -2463,8 +2514,8 @@ void ath_gen_timer_isr(struct ath_hw *ah)
index = rightmost_index(timer_table, &trigger_mask);
timer = timer_table->timers[index];
BUG_ON(!timer);
- ath_print(common, ATH_DBG_HWTIMER,
- "Gen timer[%d] trigger\n", index);
+ ath_dbg(common, ATH_DBG_HWTIMER,
+ "Gen timer[%d] trigger\n", index);
timer->trigger(timer->arg);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index d47d1b4b6002..6650fd48415c 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -30,7 +30,6 @@
#include "btcoex.h"
#include "../regd.h"
-#include "../debug.h"
#define ATHEROS_VENDOR_ID 0x168c
@@ -44,6 +43,7 @@
#define AR9287_DEVID_PCI 0x002d
#define AR9287_DEVID_PCIE 0x002e
#define AR9300_DEVID_PCIE 0x0030
+#define AR9300_DEVID_AR9485_PCIE 0x0032
#define AR5416_AR9100_DEVID 0x000b
@@ -70,6 +70,9 @@
#define REG_READ(_ah, _reg) \
ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
+#define REG_READ_MULTI(_ah, _addr, _val, _cnt) \
+ ath9k_hw_common(_ah)->ops->multi_read((_ah), (_addr), (_val), (_cnt))
+
#define ENABLE_REGWRITE_BUFFER(_ah) \
do { \
if (ath9k_hw_common(_ah)->ops->enable_write_buffer) \
@@ -92,9 +95,9 @@
#define REG_READ_FIELD(_a, _r, _f) \
(((REG_READ(_a, _r) & _f) >> _f##_S))
#define REG_SET_BIT(_a, _r, _f) \
- REG_WRITE(_a, _r, REG_READ(_a, _r) | _f)
+ REG_WRITE(_a, _r, REG_READ(_a, _r) | (_f))
#define REG_CLR_BIT(_a, _r, _f) \
- REG_WRITE(_a, _r, REG_READ(_a, _r) & ~_f)
+ REG_WRITE(_a, _r, REG_READ(_a, _r) & ~(_f))
#define DO_DELAY(x) do { \
if ((++(x) % 64) == 0) \
@@ -157,6 +160,13 @@
#define PAPRD_GAIN_TABLE_ENTRIES 32
#define PAPRD_TABLE_SZ 24
+enum ath_hw_txq_subtype {
+ ATH_TXQ_AC_BE = 0,
+ ATH_TXQ_AC_BK = 1,
+ ATH_TXQ_AC_VI = 2,
+ ATH_TXQ_AC_VO = 3,
+};
+
enum ath_ini_subsys {
ATH_INI_PRE = 0,
ATH_INI_CORE,
@@ -180,6 +190,7 @@ enum ath9k_hw_caps {
ATH9K_HW_CAP_ANT_DIV_COMB = BIT(12),
ATH9K_HW_CAP_2GHZ = BIT(13),
ATH9K_HW_CAP_5GHZ = BIT(14),
+ ATH9K_HW_CAP_APM = BIT(15),
};
struct ath9k_hw_capabilities {
@@ -191,16 +202,18 @@ struct ath9k_hw_capabilities {
u16 rts_aggr_limit;
u8 tx_chainmask;
u8 rx_chainmask;
+ u8 max_txchains;
+ u8 max_rxchains;
u16 tx_triglevel_max;
u16 reg_cap;
u8 num_gpio_pins;
- u8 num_antcfg_2ghz;
- u8 num_antcfg_5ghz;
u8 rx_hp_qdepth;
u8 rx_lp_qdepth;
u8 rx_status_len;
u8 tx_desc_len;
u8 txs_len;
+ u16 pcie_lcr_offset;
+ bool pcie_lcr_extsync_en;
};
struct ath9k_ops_config {
@@ -215,6 +228,7 @@ struct ath9k_ops_config {
u32 pcie_waen;
u8 analog_shiftreg;
u8 ht_enable;
+ u8 paprd_disable;
u32 ofdm_trig_low;
u32 ofdm_trig_high;
u32 cck_trig_high;
@@ -226,7 +240,6 @@ struct ath9k_ops_config {
#define SPUR_DISABLE 0
#define SPUR_ENABLE_IOCTL 1
#define SPUR_ENABLE_EEPROM 2
-#define AR_EEPROM_MODAL_SPURS 5
#define AR_SPUR_5413_1 1640
#define AR_SPUR_5413_2 1200
#define AR_NO_SPUR 0x8000
@@ -434,6 +447,7 @@ struct ath9k_hw_version {
u16 analog5GhzRev;
u16 analog2GhzRev;
u16 subsysid;
+ enum ath_usb_dev usbdev;
};
/* Generic TSF timer definitions */
@@ -478,6 +492,40 @@ struct ath_hw_antcomb_conf {
};
/**
+ * struct ath_hw_radar_conf - radar detection initialization parameters
+ *
+ * @pulse_inband: threshold for checking the ratio of in-band power
+ * to total power for short radar pulses (half dB steps)
+ * @pulse_inband_step: threshold for checking an in-band power to total
+ * power ratio increase for short radar pulses (half dB steps)
+ * @pulse_height: threshold for detecting the beginning of a short
+ * radar pulse (dB step)
+ * @pulse_rssi: threshold for detecting if a short radar pulse is
+ * gone (dB step)
+ * @pulse_maxlen: maximum pulse length (0.8 us steps)
+ *
+ * @radar_rssi: RSSI threshold for starting long radar detection (dB steps)
+ * @radar_inband: threshold for checking the ratio of in-band power
+ * to total power for long radar pulses (half dB steps)
+ * @fir_power: threshold for detecting the end of a long radar pulse (dB)
+ *
+ * @ext_channel: enable extension channel radar detection
+ */
+struct ath_hw_radar_conf {
+ unsigned int pulse_inband;
+ unsigned int pulse_inband_step;
+ unsigned int pulse_height;
+ unsigned int pulse_rssi;
+ unsigned int pulse_maxlen;
+
+ unsigned int radar_rssi;
+ unsigned int radar_inband;
+ int fir_power;
+
+ bool ext_channel;
+};
+
+/**
* struct ath_hw_private_ops - callbacks used internally by hardware code
*
* This structure contains private callbacks designed to only be used internally
@@ -488,7 +536,6 @@ struct ath_hw_antcomb_conf {
*
* @init_mode_regs: Initializes mode registers
* @init_mode_gain_regs: Initialize TX/RX gain registers
- * @macversion_supported: If this specific mac revision is supported
*
* @rf_set_freq: change frequency
* @spur_mitigate_freq: spur mitigation
@@ -510,7 +557,6 @@ struct ath_hw_private_ops {
void (*init_mode_regs)(struct ath_hw *ah);
void (*init_mode_gain_regs)(struct ath_hw *ah);
- bool (*macversion_supported)(u32 macversion);
void (*setup_calibration)(struct ath_hw *ah,
struct ath9k_cal_list *currCal);
@@ -534,7 +580,6 @@ struct ath_hw_private_ops {
void (*set_delta_slope)(struct ath_hw *ah, struct ath9k_channel *chan);
bool (*rfbus_req)(struct ath_hw *ah);
void (*rfbus_done)(struct ath_hw *ah);
- void (*enable_rfkill)(struct ath_hw *ah);
void (*restore_chainmask)(struct ath_hw *ah);
void (*set_diversity)(struct ath_hw *ah, bool value);
u32 (*compute_pll_control)(struct ath_hw *ah,
@@ -542,6 +587,8 @@ struct ath_hw_private_ops {
bool (*ani_control)(struct ath_hw *ah, enum ath9k_ani_cmd cmd,
int param);
void (*do_getnf)(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]);
+ void (*set_radar_params)(struct ath_hw *ah,
+ struct ath_hw_radar_conf *conf);
/* ANI */
void (*ani_cache_ini_regs)(struct ath_hw *ah);
@@ -603,6 +650,10 @@ struct ath_nf_limits {
s16 nominal;
};
+/* ah_flags */
+#define AH_USE_EEPROM 0x1
+#define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
+
struct ath_hw {
struct ieee80211_hw *hw;
struct ath_common common;
@@ -718,9 +769,7 @@ struct ath_hw {
u32 *bank6Temp;
u8 txpower_limit;
- int16_t txpower_indexoffset;
int coverage_class;
- u32 beacon_interval;
u32 slottime;
u32 globaltxtimeout;
@@ -740,6 +789,8 @@ struct ath_hw {
u8 txchainmask;
u8 rxchainmask;
+ struct ath_hw_radar_conf radar_conf;
+
u32 originalGain[22];
int initPDADC;
int PDADCdelta;
@@ -789,6 +840,11 @@ struct ath_hw {
u32 bb_watchdog_last_status;
u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */
+ unsigned int paprd_target_power;
+ unsigned int paprd_training_power;
+ unsigned int paprd_ratemask;
+ unsigned int paprd_ratemask_ht40;
+ bool paprd_table_write_done;
u32 paprd_gain_table_entries[PAPRD_GAIN_TABLE_ENTRIES];
u8 paprd_gain_table_index[PAPRD_GAIN_TABLE_ENTRIES];
/*
@@ -797,6 +853,9 @@ struct ath_hw {
* this register when in sleep states.
*/
u32 WARegVal;
+
+ /* Enterprise mode cap */
+ u32 ent_mode;
};
static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
@@ -819,10 +878,9 @@ static inline struct ath_hw_ops *ath9k_hw_ops(struct ath_hw *ah)
return &ah->ops;
}
-static inline int sign_extend(int val, const int nbits)
+static inline u8 get_streams(int mask)
{
- int order = BIT(nbits-1);
- return (val ^ order) - order;
+ return !!(mask & BIT(0)) + !!(mask & BIT(1)) + !!(mask & BIT(2));
}
/* Initialization, Detach, Reset */
@@ -861,7 +919,7 @@ u32 ath9k_hw_getrxfilter(struct ath_hw *ah);
void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits);
bool ath9k_hw_phy_disable(struct ath_hw *ah);
bool ath9k_hw_disable(struct ath_hw *ah);
-void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit);
+void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test);
void ath9k_hw_setopmode(struct ath_hw *ah);
void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
void ath9k_hw_setbssidmask(struct ath_hw *ah);
@@ -871,6 +929,7 @@ void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
void ath9k_hw_reset_tsf(struct ath_hw *ah);
void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
void ath9k_hw_init_global_settings(struct ath_hw *ah);
+unsigned long ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
void ath9k_hw_set11nmac2040(struct ath_hw *ah);
void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
@@ -893,7 +952,6 @@ void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer);
void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer);
void ath_gen_timer_isr(struct ath_hw *hw);
-u32 ath9k_hw_gettsf32(struct ath_hw *ah);
void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 92bc5c5f4876..79aec983279f 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -15,7 +15,6 @@
*/
#include <linux/slab.h>
-#include <linux/pm_qos_params.h>
#include "ath9k.h"
@@ -30,17 +29,23 @@ static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
module_param_named(debug, ath9k_debug, uint, 0);
MODULE_PARM_DESC(debug, "Debugging mask");
-int modparam_nohwcrypt;
-module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
+int ath9k_modparam_nohwcrypt;
+module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
int led_blink;
module_param_named(blink, led_blink, int, 0444);
MODULE_PARM_DESC(blink, "Enable LED blink on activity");
+static int ath9k_btcoex_enable;
+module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
+MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
+
+bool is_ath9k_unloaded;
/* We use the hw_value as an index into our private channel structure */
#define CHAN2G(_freq, _idx) { \
+ .band = IEEE80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_idx), \
.max_power = 20, \
@@ -135,6 +140,21 @@ static struct ieee80211_rate ath9k_legacy_rates[] = {
RATE(540, 0x0c, 0),
};
+#ifdef CONFIG_MAC80211_LEDS
+static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
+ { .throughput = 0 * 1024, .blink_time = 334 },
+ { .throughput = 1 * 1024, .blink_time = 260 },
+ { .throughput = 5 * 1024, .blink_time = 220 },
+ { .throughput = 10 * 1024, .blink_time = 190 },
+ { .throughput = 20 * 1024, .blink_time = 170 },
+ { .throughput = 50 * 1024, .blink_time = 150 },
+ { .throughput = 70 * 1024, .blink_time = 130 },
+ { .throughput = 100 * 1024, .blink_time = 110 },
+ { .throughput = 200 * 1024, .blink_time = 80 },
+ { .throughput = 300 * 1024, .blink_time = 50 },
+};
+#endif
+
static void ath9k_deinit_softc(struct ath_softc *sc);
/*
@@ -180,8 +200,6 @@ static const struct ath_ops ath9k_common_ops = {
.write = ath9k_iowrite32,
};
-struct pm_qos_request_list ath9k_pm_qos_req;
-
/**************************/
/* Initialization */
/**************************/
@@ -209,7 +227,9 @@ static void setup_ht_cap(struct ath_softc *sc,
ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
- if (AR_SREV_9300_20_OR_LATER(ah))
+ if (AR_SREV_9485(ah))
+ max_streams = 1;
+ else if (AR_SREV_9300_20_OR_LATER(ah))
max_streams = 3;
else
max_streams = 2;
@@ -225,9 +245,9 @@ static void setup_ht_cap(struct ath_softc *sc,
tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams);
rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams);
- ath_print(common, ATH_DBG_CONFIG,
- "TX streams %d, RX streams: %d\n",
- tx_streams, rx_streams);
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "TX streams %d, RX streams: %d\n",
+ tx_streams, rx_streams);
if (tx_streams != rx_streams) {
ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
@@ -245,8 +265,7 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
return ath_reg_notifier_apply(wiphy, request, reg);
@@ -270,8 +289,8 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
struct ath_buf *bf;
int i, bsize, error, desc_len;
- ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
- name, nbuf, ndesc);
+ ath_dbg(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
+ name, nbuf, ndesc);
INIT_LIST_HEAD(head);
@@ -282,8 +301,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
/* ath_desc must be a multiple of DWORDs */
if ((desc_len % 4) != 0) {
- ath_print(common, ATH_DBG_FATAL,
- "ath_desc not DWORD aligned\n");
+ ath_err(common, "ath_desc not DWORD aligned\n");
BUG_ON((desc_len % 4) != 0);
error = -ENOMEM;
goto fail;
@@ -317,9 +335,9 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
goto fail;
}
ds = (u8 *) dd->dd_desc;
- ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
- name, ds, (u32) dd->dd_desc_len,
- ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
+ ath_dbg(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
+ name, ds, (u32) dd->dd_desc_len,
+ ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
/* allocate buffers */
bsize = sizeof(struct ath_buf) * nbuf;
@@ -365,7 +383,7 @@ fail:
#undef DS2PHYS
}
-static void ath9k_init_crypto(struct ath_softc *sc)
+void ath9k_init_crypto(struct ath_softc *sc)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
int i = 0;
@@ -373,9 +391,9 @@ static void ath9k_init_crypto(struct ath_softc *sc)
/* Get the hardware key cache size. */
common->keymax = sc->sc_ah->caps.keycache_size;
if (common->keymax > ATH_KEYMAX) {
- ath_print(common, ATH_DBG_ANY,
- "Warning, using only %u entries in %u key cache\n",
- ATH_KEYMAX, common->keymax);
+ ath_dbg(common, ATH_DBG_ANY,
+ "Warning, using only %u entries in %u key cache\n",
+ ATH_KEYMAX, common->keymax);
common->keymax = ATH_KEYMAX;
}
@@ -398,7 +416,8 @@ static void ath9k_init_crypto(struct ath_softc *sc)
static int ath9k_init_btcoex(struct ath_softc *sc)
{
- int r, qnum;
+ struct ath_txq *txq;
+ int r;
switch (sc->sc_ah->btcoex_hw.scheme) {
case ATH_BTCOEX_CFG_NONE:
@@ -411,8 +430,8 @@ static int ath9k_init_btcoex(struct ath_softc *sc)
r = ath_init_btcoex_timer(sc);
if (r)
return -1;
- qnum = sc->tx.hwq_map[WME_AC_BE];
- ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
+ txq = sc->tx.txq_map[WME_AC_BE];
+ ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
break;
default:
@@ -425,59 +444,19 @@ static int ath9k_init_btcoex(struct ath_softc *sc)
static int ath9k_init_queues(struct ath_softc *sc)
{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
int i = 0;
- for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
- sc->tx.hwq_map[i] = -1;
-
sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
- if (sc->beacon.beaconq == -1) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup a beacon xmit queue\n");
- goto err;
- }
-
sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
- if (sc->beacon.cabq == NULL) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup CAB xmit queue\n");
- goto err;
- }
sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
ath_cabq_update(sc);
- if (!ath_tx_setup(sc, WME_AC_BK)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for BK traffic\n");
- goto err;
- }
-
- if (!ath_tx_setup(sc, WME_AC_BE)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for BE traffic\n");
- goto err;
- }
- if (!ath_tx_setup(sc, WME_AC_VI)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for VI traffic\n");
- goto err;
- }
- if (!ath_tx_setup(sc, WME_AC_VO)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for VO traffic\n");
- goto err;
+ for (i = 0; i < WME_NUM_AC; i++) {
+ sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
+ sc->tx.txq_map[i]->mac80211_qnum = i;
}
-
return 0;
-
-err:
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
- if (ATH_TXQ_SETUP(sc, i))
- ath_tx_cleanupq(sc, &sc->tx.txq[i]);
-
- return -EIO;
}
static int ath9k_init_channels_rates(struct ath_softc *sc)
@@ -548,10 +527,8 @@ static void ath9k_init_misc(struct ath_softc *sc)
sc->beacon.slottime = ATH9K_SLOT_TIME_9;
- for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
+ for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
sc->beacon.bslot[i] = NULL;
- sc->beacon.bslot_aphy[i] = NULL;
- }
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
@@ -569,10 +546,14 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
if (!ah)
return -ENOMEM;
+ ah->hw = sc->hw;
ah->hw_version.devid = devid;
ah->hw_version.subsysid = subsysid;
sc->sc_ah = ah;
+ if (!sc->dev->platform_data)
+ ah->ah_flags |= AH_USE_EEPROM;
+
common = ath9k_hw_common(ah);
common->ops = &ath9k_common_ops;
common->bus_ops = bus_ops;
@@ -580,13 +561,16 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
common->hw = sc->hw;
common->priv = sc;
common->debug_mask = ath9k_debug;
+ common->btcoex_enabled = ath9k_btcoex_enable == 1;
spin_lock_init(&common->cc_lock);
- spin_lock_init(&sc->wiphy_lock);
- spin_lock_init(&sc->sc_resetlock);
spin_lock_init(&sc->sc_serial_rw);
spin_lock_init(&sc->sc_pm_lock);
mutex_init(&sc->mutex);
+#ifdef CONFIG_ATH9K_DEBUGFS
+ spin_lock_init(&sc->nodes_lock);
+ INIT_LIST_HEAD(&sc->nodes);
+#endif
tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
(unsigned long)sc);
@@ -603,13 +587,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
if (ret)
goto err_hw;
- ret = ath9k_init_debug(ah);
- if (ret) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to create debugfs files\n");
- goto err_debug;
- }
-
ret = ath9k_init_queues(sc);
if (ret)
goto err_queues;
@@ -632,12 +609,8 @@ err_btcoex:
if (ATH_TXQ_SETUP(sc, i))
ath_tx_cleanupq(sc, &sc->tx.txq[i]);
err_queues:
- ath9k_exit_debug(ah);
-err_debug:
ath9k_hw_deinit(ah);
err_hw:
- tasklet_kill(&sc->intr_tq);
- tasklet_kill(&sc->bcon_tasklet);
kfree(ah);
sc->sc_ah = NULL;
@@ -645,6 +618,37 @@ err_hw:
return ret;
}
+static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
+{
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *chan;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
+ int i;
+
+ sband = &sc->sbands[band];
+ for (i = 0; i < sband->n_channels; i++) {
+ chan = &sband->channels[i];
+ ah->curchan = &ah->channels[chan->hw_value];
+ ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
+ ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
+ chan->max_power = reg->max_power_level / 2;
+ }
+}
+
+static void ath9k_init_txpower_limits(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_channel *curchan = ah->curchan;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
+ ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
+ ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
+
+ ah->curchan = curchan;
+}
+
void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -660,10 +664,12 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
- if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
+ if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_WDS) |
BIT(NL80211_IFTYPE_STATION) |
@@ -738,11 +744,26 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
if (error != 0)
goto error_rx;
+ ath9k_init_txpower_limits(sc);
+
+#ifdef CONFIG_MAC80211_LEDS
+ /* must be initialized before ieee80211_register_hw */
+ sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw,
+ IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink,
+ ARRAY_SIZE(ath9k_tpt_blink));
+#endif
+
/* Register with mac80211 */
error = ieee80211_register_hw(hw);
if (error)
goto error_register;
+ error = ath9k_init_debug(ah);
+ if (error) {
+ ath_err(common, "Unable to create debugfs files\n");
+ goto error_world;
+ }
+
/* Handle world regulatory */
if (!ath_is_world_regd(reg)) {
error = regulatory_hint(hw->wiphy, reg->alpha2);
@@ -752,16 +773,11 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
INIT_WORK(&sc->hw_check_work, ath_hw_check);
INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
- INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
- INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
- sc->wiphy_scheduler_int = msecs_to_jiffies(500);
+ sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
ath_init_leds(sc);
ath_start_rfkill_poll(sc);
- pm_qos_add_request(&ath9k_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
-
return 0;
error_world:
@@ -800,12 +816,8 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
if (ATH_TXQ_SETUP(sc, i))
ath_tx_cleanupq(sc, &sc->tx.txq[i]);
- ath9k_exit_debug(sc->sc_ah);
ath9k_hw_deinit(sc->sc_ah);
- tasklet_kill(&sc->intr_tq);
- tasklet_kill(&sc->bcon_tasklet);
-
kfree(sc->sc_ah);
sc->sc_ah = NULL;
}
@@ -813,28 +825,18 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
void ath9k_deinit_device(struct ath_softc *sc)
{
struct ieee80211_hw *hw = sc->hw;
- int i = 0;
ath9k_ps_wakeup(sc);
wiphy_rfkill_stop_polling(sc->hw->wiphy);
ath_deinit_leds(sc);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (aphy == NULL)
- continue;
- sc->sec_wiphy[i] = NULL;
- ieee80211_unregister_hw(aphy->hw);
- ieee80211_free_hw(aphy->hw);
- }
+ ath9k_ps_restore(sc);
ieee80211_unregister_hw(hw);
- pm_qos_remove_request(&ath9k_pm_qos_req);
ath_rx_cleanup(sc);
ath_tx_cleanup(sc);
ath9k_deinit_softc(sc);
- kfree(sc->sec_wiphy);
}
void ath_descdma_cleanup(struct ath_softc *sc,
@@ -867,20 +869,12 @@ static int __init ath9k_init(void)
goto err_out;
}
- error = ath9k_debug_create_root();
- if (error) {
- printk(KERN_ERR
- "ath9k: Unable to create debugfs root: %d\n",
- error);
- goto err_rate_unregister;
- }
-
error = ath_pci_init();
if (error < 0) {
printk(KERN_ERR
"ath9k: No PCI devices found, driver not installed.\n");
error = -ENODEV;
- goto err_remove_root;
+ goto err_rate_unregister;
}
error = ath_ahb_init();
@@ -894,8 +888,6 @@ static int __init ath9k_init(void)
err_pci_exit:
ath_pci_exit();
- err_remove_root:
- ath9k_debug_remove_root();
err_rate_unregister:
ath_rate_control_unregister();
err_out:
@@ -905,9 +897,9 @@ module_init(ath9k_init);
static void __exit ath9k_exit(void)
{
+ is_ath9k_unloaded = true;
ath_ahb_exit();
ath_pci_exit();
- ath9k_debug_remove_root();
ath_rate_control_unregister();
printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
}
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 8c13479b17cd..562257ac52cf 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -20,11 +20,11 @@
static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
struct ath9k_tx_queue_info *qi)
{
- ath_print(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
- "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
- ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
- ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
- ah->txurn_interrupt_mask);
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
+ "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
+ ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
+ ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
+ ah->txurn_interrupt_mask);
ENABLE_REGWRITE_BUFFER(ah);
@@ -56,8 +56,8 @@ EXPORT_SYMBOL(ath9k_hw_puttxbuf);
void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
{
- ath_print(ath9k_hw_common(ah), ATH_DBG_QUEUE,
- "Enable TXE on queue: %u\n", q);
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_QUEUE,
+ "Enable TXE on queue: %u\n", q);
REG_WRITE(ah, AR_Q_TXE, 1 << q);
}
EXPORT_SYMBOL(ath9k_hw_txstart);
@@ -117,12 +117,11 @@ EXPORT_SYMBOL(ath9k_hw_numtxpending);
bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
{
u32 txcfg, curLevel, newLevel;
- enum ath9k_int omask;
if (ah->tx_trig_level >= ah->config.max_txtrig_level)
return false;
- omask = ath9k_hw_set_interrupts(ah, ah->imask & ~ATH9K_INT_GLOBAL);
+ ath9k_hw_disable_interrupts(ah);
txcfg = REG_READ(ah, AR_TXCFG);
curLevel = MS(txcfg, AR_FTRIG);
@@ -136,7 +135,7 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
REG_WRITE(ah, AR_TXCFG,
(txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
- ath9k_hw_set_interrupts(ah, omask);
+ ath9k_hw_enable_interrupts(ah);
ah->tx_trig_level = newLevel;
@@ -144,85 +143,59 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
}
EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);
-bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
+void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
{
-#define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */
-#define ATH9K_TIME_QUANTUM 100 /* usec */
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_hw_capabilities *pCap = &ah->caps;
- struct ath9k_tx_queue_info *qi;
- u32 tsfLow, j, wait;
- u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
+ int i, q;
- if (q >= pCap->total_queues) {
- ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
- "invalid queue: %u\n", q);
- return false;
- }
+ REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M);
- qi = &ah->txq[q];
- if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
- "inactive queue: %u\n", q);
- return false;
- }
+ REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
+ REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
+ REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
- REG_WRITE(ah, AR_Q_TXD, 1 << q);
+ for (q = 0; q < AR_NUM_QCU; q++) {
+ for (i = 0; i < 1000; i++) {
+ if (i)
+ udelay(5);
- for (wait = wait_time; wait != 0; wait--) {
- if (ath9k_hw_numtxpending(ah, q) == 0)
- break;
- udelay(ATH9K_TIME_QUANTUM);
+ if (!ath9k_hw_numtxpending(ah, q))
+ break;
+ }
}
- if (ath9k_hw_numtxpending(ah, q)) {
- ath_print(common, ATH_DBG_QUEUE,
- "%s: Num of pending TX Frames %d on Q %d\n",
- __func__, ath9k_hw_numtxpending(ah, q), q);
-
- for (j = 0; j < 2; j++) {
- tsfLow = REG_READ(ah, AR_TSF_L32);
- REG_WRITE(ah, AR_QUIET2,
- SM(10, AR_QUIET2_QUIET_DUR));
- REG_WRITE(ah, AR_QUIET_PERIOD, 100);
- REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
- REG_SET_BIT(ah, AR_TIMER_MODE,
- AR_QUIET_TIMER_EN);
-
- if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
- break;
+ REG_CLR_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
+ REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
+ REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
- ath_print(common, ATH_DBG_QUEUE,
- "TSF has moved while trying to set "
- "quiet time TSF: 0x%08x\n", tsfLow);
- }
+ REG_WRITE(ah, AR_Q_TXD, 0);
+}
+EXPORT_SYMBOL(ath9k_hw_abort_tx_dma);
- REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
+bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q)
+{
+#define ATH9K_TX_STOP_DMA_TIMEOUT 1000 /* usec */
+#define ATH9K_TIME_QUANTUM 100 /* usec */
+ int wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
+ int wait;
- udelay(200);
- REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
+ REG_WRITE(ah, AR_Q_TXD, 1 << q);
- wait = wait_time;
- while (ath9k_hw_numtxpending(ah, q)) {
- if ((--wait) == 0) {
- ath_print(common, ATH_DBG_FATAL,
- "Failed to stop TX DMA in 100 "
- "msec after killing last frame\n");
- break;
- }
+ for (wait = wait_time; wait != 0; wait--) {
+ if (wait != wait_time)
udelay(ATH9K_TIME_QUANTUM);
- }
- REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
+ if (ath9k_hw_numtxpending(ah, q) == 0)
+ break;
}
REG_WRITE(ah, AR_Q_TXD, 0);
+
return wait != 0;
#undef ATH9K_TX_STOP_DMA_TIMEOUT
#undef ATH9K_TIME_QUANTUM
}
-EXPORT_SYMBOL(ath9k_hw_stoptxdma);
+EXPORT_SYMBOL(ath9k_hw_stop_dma_queue);
void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
{
@@ -240,19 +213,19 @@ bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
struct ath9k_tx_queue_info *qi;
if (q >= pCap->total_queues) {
- ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
- "invalid queue: %u\n", q);
+ ath_dbg(common, ATH_DBG_QUEUE,
+ "Set TXQ properties, invalid queue: %u\n", q);
return false;
}
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
- "inactive queue: %u\n", q);
+ ath_dbg(common, ATH_DBG_QUEUE,
+ "Set TXQ properties, inactive queue: %u\n", q);
return false;
}
- ath_print(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
+ ath_dbg(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
qi->tqi_ver = qinfo->tqi_ver;
qi->tqi_subtype = qinfo->tqi_subtype;
@@ -311,15 +284,15 @@ bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
struct ath9k_tx_queue_info *qi;
if (q >= pCap->total_queues) {
- ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
- "invalid queue: %u\n", q);
+ ath_dbg(common, ATH_DBG_QUEUE,
+ "Get TXQ properties, invalid queue: %u\n", q);
return false;
}
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
- "inactive queue: %u\n", q);
+ ath_dbg(common, ATH_DBG_QUEUE,
+ "Get TXQ properties, inactive queue: %u\n", q);
return false;
}
@@ -369,23 +342,20 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
ATH9K_TX_QUEUE_INACTIVE)
break;
if (q == pCap->total_queues) {
- ath_print(common, ATH_DBG_FATAL,
- "No available TX queue\n");
+ ath_err(common, "No available TX queue\n");
return -1;
}
break;
default:
- ath_print(common, ATH_DBG_FATAL,
- "Invalid TX queue type: %u\n", type);
+ ath_err(common, "Invalid TX queue type: %u\n", type);
return -1;
}
- ath_print(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
+ ath_dbg(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
qi = &ah->txq[q];
if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
- ath_print(common, ATH_DBG_FATAL,
- "TX queue: %u already active\n", q);
+ ath_err(common, "TX queue: %u already active\n", q);
return -1;
}
memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
@@ -417,18 +387,18 @@ bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
struct ath9k_tx_queue_info *qi;
if (q >= pCap->total_queues) {
- ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
- "invalid queue: %u\n", q);
+ ath_dbg(common, ATH_DBG_QUEUE,
+ "Release TXQ, invalid queue: %u\n", q);
return false;
}
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
- "inactive queue: %u\n", q);
+ ath_dbg(common, ATH_DBG_QUEUE,
+ "Release TXQ, inactive queue: %u\n", q);
return false;
}
- ath_print(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
+ ath_dbg(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
ah->txok_interrupt_mask &= ~(1 << q);
@@ -451,19 +421,19 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
u32 cwMin, chanCwMin, value;
if (q >= pCap->total_queues) {
- ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
- "invalid queue: %u\n", q);
+ ath_dbg(common, ATH_DBG_QUEUE,
+ "Reset TXQ, invalid queue: %u\n", q);
return false;
}
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
- "inactive queue: %u\n", q);
+ ath_dbg(common, ATH_DBG_QUEUE,
+ "Reset TXQ, inactive queue: %u\n", q);
return true;
}
- ath_print(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
+ ath_dbg(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
if (chan && IS_CHAN_B(chan))
@@ -695,6 +665,12 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
+ /*
+ * Treat these errors as mutually exclusive to avoid spurious
+ * extra error reports from the hardware. If a CRC error is
+ * reported, then decryption and MIC errors are irrelevant,
+ * the frame is going to be dropped either way
+ */
if (ads.ds_rxstatus8 & AR_CRCErr)
rs->rs_status |= ATH9K_RXERR_CRC;
else if (ads.ds_rxstatus8 & AR_PHYErr) {
@@ -703,10 +679,10 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
rs->rs_phyerr = phyerr;
} else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
rs->rs_status |= ATH9K_RXERR_DECRYPT;
- else if ((ads.ds_rxstatus8 & AR_MichaelErr) &&
- rs->rs_keyix != ATH9K_RXKEYIX_INVALID)
+ else if (ads.ds_rxstatus8 & AR_MichaelErr)
rs->rs_status |= ATH9K_RXERR_MIC;
- else if (ads.ds_rxstatus8 & AR_KeyMiss)
+
+ if (ads.ds_rxstatus8 & AR_KeyMiss)
rs->rs_status |= ATH9K_RXERR_DECRYPT;
}
@@ -736,9 +712,9 @@ bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
AR_DIAG_RX_ABORT));
reg = REG_READ(ah, AR_OBS_BUS_1);
- ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
- "RX failed to go idle in 10 ms RXSM=0x%x\n",
- reg);
+ ath_err(ath9k_hw_common(ah),
+ "RX failed to go idle in 10 ms RXSM=0x%x\n",
+ reg);
return false;
}
@@ -767,14 +743,6 @@ void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning)
}
EXPORT_SYMBOL(ath9k_hw_startpcureceive);
-void ath9k_hw_stoppcurecv(struct ath_hw *ah)
-{
- REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
-
- ath9k_hw_disable_mib_counters(ah);
-}
-EXPORT_SYMBOL(ath9k_hw_stoppcurecv);
-
void ath9k_hw_abortpcurecv(struct ath_hw *ah)
{
REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS);
@@ -800,12 +768,11 @@ bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
}
if (i == 0) {
- ath_print(common, ATH_DBG_FATAL,
- "DMA failed to stop in %d ms "
- "AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
- AH_RX_STOP_DMA_TIMEOUT / 1000,
- REG_READ(ah, AR_CR),
- REG_READ(ah, AR_DIAG_SW));
+ ath_err(common,
+ "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
+ AH_RX_STOP_DMA_TIMEOUT / 1000,
+ REG_READ(ah, AR_CR),
+ REG_READ(ah, AR_DIAG_SW));
return false;
} else {
return true;
@@ -849,28 +816,59 @@ bool ath9k_hw_intrpend(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_intrpend);
-enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
- enum ath9k_int ints)
+void ath9k_hw_disable_interrupts(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ ath_dbg(common, ATH_DBG_INTERRUPT, "disable IER\n");
+ REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
+ (void) REG_READ(ah, AR_IER);
+ if (!AR_SREV_9100(ah)) {
+ REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
+ (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
+
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
+ (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
+ }
+}
+EXPORT_SYMBOL(ath9k_hw_disable_interrupts);
+
+void ath9k_hw_enable_interrupts(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!(ah->imask & ATH9K_INT_GLOBAL))
+ return;
+
+ ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n");
+ REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
+ if (!AR_SREV_9100(ah)) {
+ REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
+ AR_INTR_MAC_IRQ);
+ REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
+
+
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
+ AR_INTR_SYNC_DEFAULT);
+ REG_WRITE(ah, AR_INTR_SYNC_MASK,
+ AR_INTR_SYNC_DEFAULT);
+ }
+ ath_dbg(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
+ REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
+}
+EXPORT_SYMBOL(ath9k_hw_enable_interrupts);
+
+void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
{
enum ath9k_int omask = ah->imask;
u32 mask, mask2;
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_common *common = ath9k_hw_common(ah);
- ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
-
- if (omask & ATH9K_INT_GLOBAL) {
- ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
- REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
- (void) REG_READ(ah, AR_IER);
- if (!AR_SREV_9100(ah)) {
- REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
- (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
+ if (!(ints & ATH9K_INT_GLOBAL))
+ ath9k_hw_disable_interrupts(ah);
- REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
- (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
- }
- }
+ ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
/* TODO: global int Ref count */
mask = ints & ATH9K_INT_COMMON;
@@ -931,7 +929,7 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
mask2 |= AR_IMR_S2_CST;
}
- ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
+ ath_dbg(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
REG_WRITE(ah, AR_IMR, mask);
ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
@@ -946,24 +944,9 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
}
- if (ints & ATH9K_INT_GLOBAL) {
- ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
- REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
- if (!AR_SREV_9100(ah)) {
- REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
- AR_INTR_MAC_IRQ);
- REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
-
-
- REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
- AR_INTR_SYNC_DEFAULT);
- REG_WRITE(ah, AR_INTR_SYNC_MASK,
- AR_INTR_SYNC_DEFAULT);
- }
- ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
- REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
- }
+ if (ints & ATH9K_INT_GLOBAL)
+ ath9k_hw_enable_interrupts(ah);
- return omask;
+ return;
}
EXPORT_SYMBOL(ath9k_hw_set_interrupts);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 7c1a34d64f6d..b2b2ff852c32 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -104,13 +104,11 @@ struct ath_tx_status {
u32 ts_tstamp;
u16 ts_seqnum;
u8 ts_status;
- u8 ts_ratecode;
u8 ts_rateindex;
int8_t ts_rssi;
u8 ts_shortretry;
u8 ts_longretry;
u8 ts_virtcol;
- u8 ts_antenna;
u8 ts_flags;
int8_t ts_rssi_ctl0;
int8_t ts_rssi_ctl1;
@@ -121,7 +119,6 @@ struct ath_tx_status {
u8 qid;
u16 desc_id;
u8 tid;
- u8 pad[2];
u32 ba_low;
u32 ba_high;
u32 evm0;
@@ -240,7 +237,7 @@ struct ath_desc {
u32 ds_ctl1;
u32 ds_hw[20];
void *ds_vdata;
-} __packed;
+} __packed __aligned(4);
#define ATH9K_TXDESC_CLRDMASK 0x0001
#define ATH9K_TXDESC_NOACK 0x0002
@@ -310,7 +307,7 @@ struct ar5416_desc {
u32 status8;
} rx;
} u;
-} __packed;
+} __packed __aligned(4);
#define AR5416DESC(_ds) ((struct ar5416_desc *)(_ds))
#define AR5416DESC_CONST(_ds) ((const struct ar5416_desc *)(_ds))
@@ -642,6 +639,8 @@ enum ath9k_rx_filter {
ATH9K_RX_FILTER_PHYERR = 0x00000100,
ATH9K_RX_FILTER_MYBEACON = 0x00000200,
ATH9K_RX_FILTER_COMP_BAR = 0x00000400,
+ ATH9K_RX_FILTER_COMP_BA = 0x00000800,
+ ATH9K_RX_FILTER_UNCOMP_BA_BAR = 0x00001000,
ATH9K_RX_FILTER_PSPOLL = 0x00004000,
ATH9K_RX_FILTER_PHYRADAR = 0x00002000,
ATH9K_RX_FILTER_MCAST_BCAST_ALL = 0x00008000,
@@ -669,6 +668,7 @@ enum ath9k_key_type {
struct ath_hw;
struct ath9k_channel;
+enum ath9k_int;
u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q);
void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp);
@@ -676,7 +676,8 @@ void ath9k_hw_txstart(struct ath_hw *ah, u32 q);
void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds);
u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q);
bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel);
-bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q);
+bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q);
+void ath9k_hw_abort_tx_dma(struct ath_hw *ah);
void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs);
bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
const struct ath9k_tx_queue_info *qinfo);
@@ -693,15 +694,15 @@ void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning);
-void ath9k_hw_stoppcurecv(struct ath_hw *ah);
void ath9k_hw_abortpcurecv(struct ath_hw *ah);
bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
int ath9k_hw_beaconq_setup(struct ath_hw *ah);
/* Interrupt Handling */
bool ath9k_hw_intrpend(struct ath_hw *ah);
-enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
- enum ath9k_int ints);
+void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints);
+void ath9k_hw_enable_interrupts(struct ath_hw *ah);
+void ath9k_hw_disable_interrupts(struct ath_hw *ah);
void ar9002_hw_attach_mac_ops(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 25d3ef4c338e..115f162c617a 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -15,21 +15,10 @@
*/
#include <linux/nl80211.h>
-#include <linux/pm_qos_params.h>
+#include <linux/delay.h>
#include "ath9k.h"
#include "btcoex.h"
-static void ath_update_txpow(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- if (sc->curtxpow != sc->config.txpowlimit) {
- ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit);
- /* read back in case value is clamped */
- sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
- }
-}
-
static u8 parse_mpdudensity(u8 mpdudensity)
{
/*
@@ -65,17 +54,19 @@ static u8 parse_mpdudensity(u8 mpdudensity)
}
}
-static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
- struct ieee80211_hw *hw)
+static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq)
{
- struct ieee80211_channel *curchan = hw->conf.channel;
- struct ath9k_channel *channel;
- u8 chan_idx;
+ bool pending = false;
- chan_idx = curchan->hw_value;
- channel = &sc->sc_ah->channels[chan_idx];
- ath9k_update_ichannel(sc, hw, channel);
- return channel;
+ spin_lock_bh(&txq->axq_lock);
+
+ if (txq->axq_depth || !list_empty(&txq->axq_acq))
+ pending = true;
+ else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+ pending = !list_empty(&txq->txq_fifo_pending);
+
+ spin_unlock_bh(&txq->axq_lock);
+ return pending;
}
bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
@@ -178,7 +169,12 @@ static void ath_update_survey_nf(struct ath_softc *sc, int channel)
}
}
-static void ath_update_survey_stats(struct ath_softc *sc)
+/*
+ * Updates the survey statistics and returns the busy time since last
+ * update in %, if the measurement duration was long enough for the
+ * result to be useful, -1 otherwise.
+ */
+static int ath_update_survey_stats(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -186,9 +182,10 @@ static void ath_update_survey_stats(struct ath_softc *sc)
struct survey_info *survey = &sc->survey[pos];
struct ath_cycle_counters *cc = &common->cc_survey;
unsigned int div = common->clockrate * 1000;
+ int ret = 0;
if (!ah->curchan)
- return;
+ return -1;
if (ah->power_mode == ATH9K_PM_AWAKE)
ath_hw_cycle_counters_update(common);
@@ -203,9 +200,18 @@ static void ath_update_survey_stats(struct ath_softc *sc)
survey->channel_time_rx += cc->rx_frame / div;
survey->channel_time_tx += cc->tx_frame / div;
}
+
+ if (cc->cycles < div)
+ return -1;
+
+ if (cc->cycles > 0)
+ ret = cc->rx_busy * 100 / cc->cycles;
+
memset(cc, 0, sizeof(*cc));
ath_update_survey_nf(sc, pos);
+
+ return ret;
}
/*
@@ -216,7 +222,6 @@ static void ath_update_survey_stats(struct ath_softc *sc)
int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
struct ath9k_channel *hchan)
{
- struct ath_wiphy *aphy = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_conf *conf = &common->hw->conf;
@@ -228,13 +233,18 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
if (sc->sc_flags & SC_OP_INVALID)
return -EIO;
+ sc->hw_busy_count = 0;
+
del_timer_sync(&common->ani.timer);
cancel_work_sync(&sc->paprd_work);
cancel_work_sync(&sc->hw_check_work);
cancel_delayed_work_sync(&sc->tx_complete_work);
+ cancel_delayed_work_sync(&sc->hw_pll_work);
ath9k_ps_wakeup(sc);
+ spin_lock_bh(&sc->sc_pcu_lock);
+
/*
* This is only performed if the channel settings have
* actually changed.
@@ -244,12 +254,14 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
* hardware at the new frequency, and then re-enable
* the relevant bits of the h/w.
*/
- ath9k_hw_set_interrupts(ah, 0);
- ath_drain_all_txq(sc, false);
+ ath9k_hw_disable_interrupts(ah);
+ stopped = ath_drain_all_txq(sc, false);
- spin_lock_bh(&sc->rx.pcu_lock);
+ if (!ath_stoprecv(sc))
+ stopped = false;
- stopped = ath_stoprecv(sc);
+ if (!ath9k_hw_check_alive(ah))
+ stopped = false;
/* XXX: do not flush receive queue here. We don't want
* to flush data frames already in queue because of
@@ -259,48 +271,45 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
fastcc = false;
if (!(sc->sc_flags & SC_OP_OFFCHANNEL))
- caldata = &aphy->caldata;
-
- ath_print(common, ATH_DBG_CONFIG,
- "(%u MHz) -> (%u MHz), conf_is_ht40: %d fastcc: %d\n",
- sc->sc_ah->curchan->channel,
- channel->center_freq, conf_is_ht40(conf),
- fastcc);
+ caldata = &sc->caldata;
- spin_lock_bh(&sc->sc_resetlock);
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "(%u MHz) -> (%u MHz), conf_is_ht40: %d fastcc: %d\n",
+ sc->sc_ah->curchan->channel,
+ channel->center_freq, conf_is_ht40(conf),
+ fastcc);
r = ath9k_hw_reset(ah, hchan, caldata, fastcc);
if (r) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to reset channel (%u MHz), "
- "reset status %d\n",
- channel->center_freq, r);
- spin_unlock_bh(&sc->sc_resetlock);
- spin_unlock_bh(&sc->rx.pcu_lock);
+ ath_err(common,
+ "Unable to reset channel (%u MHz), reset status %d\n",
+ channel->center_freq, r);
goto ps_restore;
}
- spin_unlock_bh(&sc->sc_resetlock);
if (ath_startrecv(sc) != 0) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to restart recv logic\n");
+ ath_err(common, "Unable to restart recv logic\n");
r = -EIO;
- spin_unlock_bh(&sc->rx.pcu_lock);
goto ps_restore;
}
- spin_unlock_bh(&sc->rx.pcu_lock);
-
- ath_update_txpow(sc);
+ ath9k_cmn_update_txpow(ah, sc->curtxpow,
+ sc->config.txpowlimit, &sc->curtxpow);
ath9k_hw_set_interrupts(ah, ah->imask);
if (!(sc->sc_flags & (SC_OP_OFFCHANNEL))) {
- ath_beacon_config(sc, NULL);
+ if (sc->sc_flags & SC_OP_BEACONS)
+ ath_beacon_config(sc, NULL);
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
+ ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2);
ath_start_ani(common);
}
ps_restore:
+ ieee80211_wake_queues(hw);
+
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
ath9k_ps_restore(sc);
return r;
}
@@ -328,6 +337,46 @@ static void ath_paprd_activate(struct ath_softc *sc)
ath9k_ps_restore(sc);
}
+static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int chain)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_tx_control txctl;
+ int time_left;
+
+ memset(&txctl, 0, sizeof(txctl));
+ txctl.txq = sc->tx.txq_map[WME_AC_BE];
+
+ memset(tx_info, 0, sizeof(*tx_info));
+ tx_info->band = hw->conf.channel->band;
+ tx_info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ tx_info->control.rates[0].idx = 0;
+ tx_info->control.rates[0].count = 1;
+ tx_info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
+ tx_info->control.rates[1].idx = -1;
+
+ init_completion(&sc->paprd_complete);
+ txctl.paprd = BIT(chain);
+
+ if (ath_tx_start(hw, skb, &txctl) != 0) {
+ ath_dbg(common, ATH_DBG_XMIT, "PAPRD TX failed\n");
+ dev_kfree_skb_any(skb);
+ return false;
+ }
+
+ time_left = wait_for_completion_timeout(&sc->paprd_complete,
+ msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
+
+ if (!time_left)
+ ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CALIBRATE,
+ "Timeout waiting for paprd training on TX chain %d\n",
+ chain);
+
+ return !!time_left;
+}
+
void ath_paprd_calibrate(struct work_struct *work)
{
struct ath_softc *sc = container_of(work, struct ath_softc, paprd_work);
@@ -335,28 +384,23 @@ void ath_paprd_calibrate(struct work_struct *work)
struct ath_hw *ah = sc->sc_ah;
struct ieee80211_hdr *hdr;
struct sk_buff *skb = NULL;
- struct ieee80211_tx_info *tx_info;
- int band = hw->conf.channel->band;
- struct ieee80211_supported_band *sband = &sc->sbands[band];
- struct ath_tx_control txctl;
struct ath9k_hw_cal_data *caldata = ah->caldata;
struct ath_common *common = ath9k_hw_common(ah);
- int qnum, ftype;
+ int ftype;
int chain_ok = 0;
int chain;
int len = 1800;
- int time_left;
- int i;
if (!caldata)
return;
+ if (ar9003_paprd_init_table(ah) < 0)
+ return;
+
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
return;
- tx_info = IEEE80211_SKB_CB(skb);
-
skb_put(skb, len);
memset(skb->data, 0, len);
hdr = (struct ieee80211_hdr *)skb->data;
@@ -367,40 +411,25 @@ void ath_paprd_calibrate(struct work_struct *work)
memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
- memset(&txctl, 0, sizeof(txctl));
- qnum = sc->tx.hwq_map[WME_AC_BE];
- txctl.txq = &sc->tx.txq[qnum];
-
ath9k_ps_wakeup(sc);
- ar9003_paprd_init_table(ah);
for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
if (!(common->tx_chainmask & BIT(chain)))
continue;
chain_ok = 0;
- memset(tx_info, 0, sizeof(*tx_info));
- tx_info->band = band;
- for (i = 0; i < 4; i++) {
- tx_info->control.rates[i].idx = sband->n_bitrates - 1;
- tx_info->control.rates[i].count = 6;
- }
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Sending PAPRD frame for thermal measurement "
+ "on chain %d\n", chain);
+ if (!ath_paprd_send_frame(sc, skb, chain))
+ goto fail_paprd;
- init_completion(&sc->paprd_complete);
ar9003_paprd_setup_gain_table(ah, chain);
- txctl.paprd = BIT(chain);
- if (ath_tx_start(hw, skb, &txctl) != 0)
- break;
- time_left = wait_for_completion_timeout(&sc->paprd_complete,
- msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
- if (!time_left) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
- "Timeout waiting for paprd training on "
- "TX chain %d\n",
- chain);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Sending PAPRD training frame on chain %d\n", chain);
+ if (!ath_paprd_send_frame(sc, skb, chain))
goto fail_paprd;
- }
if (!ar9003_paprd_is_done(ah))
break;
@@ -457,7 +486,7 @@ void ath_ani_calibrate(unsigned long data)
/* Long calibration runs independently of short calibration. */
if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
longcal = true;
- ath_print(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
+ ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
common->ani.longcal_timer = timestamp;
}
@@ -465,8 +494,8 @@ void ath_ani_calibrate(unsigned long data)
if (!common->ani.caldone) {
if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
shortcal = true;
- ath_print(common, ATH_DBG_ANI,
- "shortcal @%lu\n", jiffies);
+ ath_dbg(common, ATH_DBG_ANI,
+ "shortcal @%lu\n", jiffies);
common->ani.shortcal_timer = timestamp;
common->ani.resetcal_timer = timestamp;
}
@@ -525,49 +554,31 @@ set_timer:
if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->caldata) {
if (!ah->caldata->paprd_done)
ieee80211_queue_work(sc->hw, &sc->paprd_work);
- else
+ else if (!ah->paprd_table_write_done)
ath_paprd_activate(sc);
}
}
-/*
- * Update tx/rx chainmask. For legacy association,
- * hard code chainmask to 1x1, for 11n association, use
- * the chainmask configuration, for bt coexistence, use
- * the chainmask configuration even in legacy mode.
- */
-void ath_update_chainmask(struct ath_softc *sc, int is_ht)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
-
- if ((sc->sc_flags & SC_OP_OFFCHANNEL) || is_ht ||
- (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE)) {
- common->tx_chainmask = ah->caps.tx_chainmask;
- common->rx_chainmask = ah->caps.rx_chainmask;
- } else {
- common->tx_chainmask = 1;
- common->rx_chainmask = 1;
- }
-
- ath_print(common, ATH_DBG_CONFIG,
- "tx chmask: %d, rx chmask: %d\n",
- common->tx_chainmask,
- common->rx_chainmask);
-}
-
static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
{
struct ath_node *an;
-
+ struct ath_hw *ah = sc->sc_ah;
an = (struct ath_node *)sta->drv_priv;
+#ifdef CONFIG_ATH9K_DEBUGFS
+ spin_lock(&sc->nodes_lock);
+ list_add(&an->list, &sc->nodes);
+ spin_unlock(&sc->nodes_lock);
+ an->sta = sta;
+#endif
+ if ((ah->caps.hw_caps) & ATH9K_HW_CAP_APM)
+ sc->sc_flags |= SC_OP_ENABLE_APM;
+
if (sc->sc_flags & SC_OP_TXAGGR) {
ath_tx_node_init(sc, an);
an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
sta->ht_cap.ampdu_factor);
an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
- an->last_rssi = ATH_RSSI_DUMMY_MARKER;
}
}
@@ -575,6 +586,13 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
{
struct ath_node *an = (struct ath_node *)sta->drv_priv;
+#ifdef CONFIG_ATH9K_DEBUGFS
+ spin_lock(&sc->nodes_lock);
+ list_del(&an->list);
+ spin_unlock(&sc->nodes_lock);
+ an->sta = NULL;
+#endif
+
if (sc->sc_flags & SC_OP_TXAGGR)
ath_tx_node_cleanup(sc, an);
}
@@ -582,17 +600,25 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
void ath_hw_check(struct work_struct *work)
{
struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
- int i;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ unsigned long flags;
+ int busy;
ath9k_ps_wakeup(sc);
+ if (ath9k_hw_check_alive(sc->sc_ah))
+ goto out;
- for (i = 0; i < 3; i++) {
- if (ath9k_hw_check_alive(sc->sc_ah))
- goto out;
+ spin_lock_irqsave(&common->cc_lock, flags);
+ busy = ath_update_survey_stats(sc);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
- msleep(1);
- }
- ath_reset(sc, true);
+ ath_dbg(common, ATH_DBG_RESET, "Possible baseband hang, "
+ "busy=%d (try %d)\n", busy, sc->hw_busy_count + 1);
+ if (busy >= 99) {
+ if (++sc->hw_busy_count >= 3)
+ ath_reset(sc, true);
+ } else if (busy >= 0)
+ sc->hw_busy_count = 0;
out:
ath9k_ps_restore(sc);
@@ -607,15 +633,23 @@ void ath9k_tasklet(unsigned long data)
u32 status = sc->intrstatus;
u32 rxmask;
- ath9k_ps_wakeup(sc);
-
if (status & ATH9K_INT_FATAL) {
ath_reset(sc, true);
- ath9k_ps_restore(sc);
return;
}
- if (!ath9k_hw_check_alive(ah))
+ ath9k_ps_wakeup(sc);
+ spin_lock(&sc->sc_pcu_lock);
+
+ /*
+ * Only run the baseband hang check if beacons stop working in AP or
+ * IBSS mode, because it has a high false positive rate. For station
+ * mode it should not be necessary, since the upper layers will detect
+ * this through a beacon miss automatically and the following channel
+ * change will trigger a hardware reset anyway
+ */
+ if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0 &&
+ !ath9k_hw_check_alive(ah))
ieee80211_queue_work(sc->hw, &sc->hw_check_work);
if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
@@ -625,15 +659,12 @@ void ath9k_tasklet(unsigned long data)
rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
if (status & rxmask) {
- spin_lock_bh(&sc->rx.pcu_lock);
-
/* Check for high priority Rx first */
if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
(status & ATH9K_INT_RXHP))
ath_rx_tasklet(sc, 0, true);
ath_rx_tasklet(sc, 0, false);
- spin_unlock_bh(&sc->rx.pcu_lock);
}
if (status & ATH9K_INT_TX) {
@@ -648,8 +679,8 @@ void ath9k_tasklet(unsigned long data)
* TSF sync does not look correct; remain awake to sync with
* the next Beacon.
*/
- ath_print(common, ATH_DBG_PS,
- "TSFOOR - Sync with next Beacon\n");
+ ath_dbg(common, ATH_DBG_PS,
+ "TSFOOR - Sync with next Beacon\n");
sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
}
@@ -658,7 +689,9 @@ void ath9k_tasklet(unsigned long data)
ath_gen_timer_isr(sc->sc_ah);
/* re-enable hardware interrupt */
- ath9k_hw_set_interrupts(ah, ah->imask);
+ ath9k_hw_enable_interrupts(ah);
+
+ spin_unlock(&sc->sc_pcu_lock);
ath9k_ps_restore(sc);
}
@@ -757,7 +790,7 @@ irqreturn_t ath_isr(int irq, void *dev)
* interrupt; otherwise it will continue to
* fire.
*/
- ath9k_hw_set_interrupts(ah, 0);
+ ath9k_hw_disable_interrupts(ah);
/*
* Let the hal handle the event. We assume
* it will clear whatever condition caused
@@ -766,11 +799,13 @@ irqreturn_t ath_isr(int irq, void *dev)
spin_lock(&common->cc_lock);
ath9k_hw_proc_mib_event(ah);
spin_unlock(&common->cc_lock);
- ath9k_hw_set_interrupts(ah, ah->imask);
+ ath9k_hw_enable_interrupts(ah);
}
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
if (status & ATH9K_INT_TIM_TIMER) {
+ if (ATH_DBG_WARN_ON_ONCE(sc->ps_idle))
+ goto chip_reset;
/* Clear RxAbort bit so that we can
* receive frames */
ath9k_setpower(sc, ATH9K_PM_AWAKE);
@@ -783,8 +818,8 @@ chip_reset:
ath_debug_stat_interrupt(sc, status);
if (sched) {
- /* turn off every interrupt except SWBA */
- ath9k_hw_set_interrupts(ah, (ah->imask & ATH9K_INT_SWBA));
+ /* turn off every interrupt */
+ ath9k_hw_disable_interrupts(ah);
tasklet_schedule(&sc->intr_tq);
}
@@ -793,49 +828,8 @@ chip_reset:
#undef SCHED_INTR
}
-static u32 ath_get_extchanmode(struct ath_softc *sc,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type)
-{
- u32 chanmode = 0;
-
- switch (chan->band) {
- case IEEE80211_BAND_2GHZ:
- switch(channel_type) {
- case NL80211_CHAN_NO_HT:
- case NL80211_CHAN_HT20:
- chanmode = CHANNEL_G_HT20;
- break;
- case NL80211_CHAN_HT40PLUS:
- chanmode = CHANNEL_G_HT40PLUS;
- break;
- case NL80211_CHAN_HT40MINUS:
- chanmode = CHANNEL_G_HT40MINUS;
- break;
- }
- break;
- case IEEE80211_BAND_5GHZ:
- switch(channel_type) {
- case NL80211_CHAN_NO_HT:
- case NL80211_CHAN_HT20:
- chanmode = CHANNEL_A_HT20;
- break;
- case NL80211_CHAN_HT40PLUS:
- chanmode = CHANNEL_A_HT40PLUS;
- break;
- case NL80211_CHAN_HT40MINUS:
- chanmode = CHANNEL_A_HT40MINUS;
- break;
- }
- break;
- default:
- break;
- }
-
- return chanmode;
-}
-
static void ath9k_bss_assoc_info(struct ath_softc *sc,
+ struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf)
{
@@ -843,9 +837,9 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
struct ath_common *common = ath9k_hw_common(ah);
if (bss_conf->assoc) {
- ath_print(common, ATH_DBG_CONFIG,
- "Bss Info ASSOC %d, bssid: %pM\n",
- bss_conf->aid, common->curbssid);
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Bss Info ASSOC %d, bssid: %pM\n",
+ bss_conf->aid, common->curbssid);
/* New association, store aid */
common->curaid = bss_conf->aid;
@@ -862,12 +856,13 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
ath_beacon_config(sc, vif);
/* Reset rssi stats */
+ sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
sc->sc_flags |= SC_OP_ANI_RUN;
ath_start_ani(common);
} else {
- ath_print(common, ATH_DBG_CONFIG, "Bss Info DISASSOC\n");
+ ath_dbg(common, ATH_DBG_CONFIG, "Bss Info DISASSOC\n");
common->curaid = 0;
/* Stop ANI */
sc->sc_flags &= ~SC_OP_ANI_RUN;
@@ -883,31 +878,26 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
int r;
ath9k_ps_wakeup(sc);
+ spin_lock_bh(&sc->sc_pcu_lock);
+
ath9k_hw_configpcipowersave(ah, 0, 0);
if (!ah->curchan)
- ah->curchan = ath_get_curchannel(sc, sc->hw);
+ ah->curchan = ath9k_cmn_get_curchannel(sc->hw, ah);
- spin_lock_bh(&sc->rx.pcu_lock);
- spin_lock_bh(&sc->sc_resetlock);
r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
if (r) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to reset channel (%u MHz), "
- "reset status %d\n",
- channel->center_freq, r);
+ ath_err(common,
+ "Unable to reset channel (%u MHz), reset status %d\n",
+ channel->center_freq, r);
}
- spin_unlock_bh(&sc->sc_resetlock);
- ath_update_txpow(sc);
+ ath9k_cmn_update_txpow(ah, sc->curtxpow,
+ sc->config.txpowlimit, &sc->curtxpow);
if (ath_startrecv(sc) != 0) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to restart recv logic\n");
- spin_unlock_bh(&sc->rx.pcu_lock);
- return;
+ ath_err(common, "Unable to restart recv logic\n");
+ goto out;
}
- spin_unlock_bh(&sc->rx.pcu_lock);
-
if (sc->sc_flags & SC_OP_BEACONS)
ath_beacon_config(sc, NULL); /* restart beacons */
@@ -920,6 +910,11 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
ath9k_hw_set_gpio(ah, ah->led_pin, 0);
ieee80211_wake_queues(hw);
+ ieee80211_queue_delayed_work(hw, &sc->hw_pll_work, HZ/2);
+
+out:
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
ath9k_ps_restore(sc);
}
@@ -930,6 +925,10 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
int r;
ath9k_ps_wakeup(sc);
+ cancel_delayed_work_sync(&sc->hw_pll_work);
+
+ spin_lock_bh(&sc->sc_pcu_lock);
+
ieee80211_stop_queues(hw);
/*
@@ -942,35 +941,29 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
}
/* Disable interrupts */
- ath9k_hw_set_interrupts(ah, 0);
+ ath9k_hw_disable_interrupts(ah);
ath_drain_all_txq(sc, false); /* clear pending tx frames */
- spin_lock_bh(&sc->rx.pcu_lock);
-
ath_stoprecv(sc); /* turn off frame recv */
ath_flushrecv(sc); /* flush recv queue */
if (!ah->curchan)
- ah->curchan = ath_get_curchannel(sc, hw);
+ ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
- spin_lock_bh(&sc->sc_resetlock);
r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
if (r) {
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
- "Unable to reset channel (%u MHz), "
- "reset status %d\n",
- channel->center_freq, r);
+ ath_err(ath9k_hw_common(sc->sc_ah),
+ "Unable to reset channel (%u MHz), reset status %d\n",
+ channel->center_freq, r);
}
- spin_unlock_bh(&sc->sc_resetlock);
ath9k_hw_phy_disable(ah);
- spin_unlock_bh(&sc->rx.pcu_lock);
-
ath9k_hw_configpcipowersave(ah, 1, 1);
+
+ spin_unlock_bh(&sc->sc_pcu_lock);
ath9k_ps_restore(sc);
- ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
}
int ath_reset(struct ath_softc *sc, bool retry_tx)
@@ -980,38 +973,37 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
struct ieee80211_hw *hw = sc->hw;
int r;
+ sc->hw_busy_count = 0;
+
/* Stop ANI */
del_timer_sync(&common->ani.timer);
+ ath9k_ps_wakeup(sc);
+ spin_lock_bh(&sc->sc_pcu_lock);
+
ieee80211_stop_queues(hw);
- ath9k_hw_set_interrupts(ah, 0);
+ ath9k_hw_disable_interrupts(ah);
ath_drain_all_txq(sc, retry_tx);
- spin_lock_bh(&sc->rx.pcu_lock);
-
ath_stoprecv(sc);
ath_flushrecv(sc);
- spin_lock_bh(&sc->sc_resetlock);
r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
if (r)
- ath_print(common, ATH_DBG_FATAL,
- "Unable to reset hardware; reset status %d\n", r);
- spin_unlock_bh(&sc->sc_resetlock);
+ ath_err(common,
+ "Unable to reset hardware; reset status %d\n", r);
if (ath_startrecv(sc) != 0)
- ath_print(common, ATH_DBG_FATAL,
- "Unable to start recv logic\n");
-
- spin_unlock_bh(&sc->rx.pcu_lock);
+ ath_err(common, "Unable to start recv logic\n");
/*
* We may be doing a reset in response to a request
* that changes the channel so update any state that
* might change as a result.
*/
- ath_update_txpow(sc);
+ ath9k_cmn_update_txpow(ah, sc->curtxpow,
+ sc->config.txpowlimit, &sc->curtxpow);
if ((sc->sc_flags & SC_OP_BEACONS) || !(sc->sc_flags & (SC_OP_OFFCHANNEL)))
ath_beacon_config(sc, NULL); /* restart beacons */
@@ -1030,133 +1022,38 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
}
ieee80211_wake_queues(hw);
+ spin_unlock_bh(&sc->sc_pcu_lock);
/* Start ANI */
ath_start_ani(common);
+ ath9k_ps_restore(sc);
return r;
}
-static int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
-{
- int qnum;
-
- switch (queue) {
- case 0:
- qnum = sc->tx.hwq_map[WME_AC_VO];
- break;
- case 1:
- qnum = sc->tx.hwq_map[WME_AC_VI];
- break;
- case 2:
- qnum = sc->tx.hwq_map[WME_AC_BE];
- break;
- case 3:
- qnum = sc->tx.hwq_map[WME_AC_BK];
- break;
- default:
- qnum = sc->tx.hwq_map[WME_AC_BE];
- break;
- }
-
- return qnum;
-}
-
-int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
-{
- int qnum;
-
- switch (queue) {
- case WME_AC_VO:
- qnum = 0;
- break;
- case WME_AC_VI:
- qnum = 1;
- break;
- case WME_AC_BE:
- qnum = 2;
- break;
- case WME_AC_BK:
- qnum = 3;
- break;
- default:
- qnum = -1;
- break;
- }
-
- return qnum;
-}
-
-/* XXX: Remove me once we don't depend on ath9k_channel for all
- * this redundant data */
-void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
- struct ath9k_channel *ichan)
-{
- struct ieee80211_channel *chan = hw->conf.channel;
- struct ieee80211_conf *conf = &hw->conf;
-
- ichan->channel = chan->center_freq;
- ichan->chan = chan;
-
- if (chan->band == IEEE80211_BAND_2GHZ) {
- ichan->chanmode = CHANNEL_G;
- ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM | CHANNEL_G;
- } else {
- ichan->chanmode = CHANNEL_A;
- ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
- }
-
- if (conf_is_ht(conf))
- ichan->chanmode = ath_get_extchanmode(sc, chan,
- conf->channel_type);
-}
-
/**********************/
/* mac80211 callbacks */
/**********************/
static int ath9k_start(struct ieee80211_hw *hw)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_channel *curchan = hw->conf.channel;
struct ath9k_channel *init_channel;
int r;
- ath_print(common, ATH_DBG_CONFIG,
- "Starting driver with initial channel: %d MHz\n",
- curchan->center_freq);
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Starting driver with initial channel: %d MHz\n",
+ curchan->center_freq);
mutex_lock(&sc->mutex);
- if (ath9k_wiphy_started(sc)) {
- if (sc->chan_idx == curchan->hw_value) {
- /*
- * Already on the operational channel, the new wiphy
- * can be marked active.
- */
- aphy->state = ATH_WIPHY_ACTIVE;
- ieee80211_wake_queues(hw);
- } else {
- /*
- * Another wiphy is on another channel, start the new
- * wiphy in paused state.
- */
- aphy->state = ATH_WIPHY_PAUSED;
- ieee80211_stop_queues(hw);
- }
- mutex_unlock(&sc->mutex);
- return 0;
- }
- aphy->state = ATH_WIPHY_ACTIVE;
-
/* setup initial channel */
-
sc->chan_idx = curchan->hw_value;
- init_channel = ath_get_curchannel(sc, hw);
+ init_channel = ath9k_cmn_get_curchannel(hw, ah);
/* Reset SERDES registers */
ath9k_hw_configpcipowersave(ah, 0, 0);
@@ -1168,25 +1065,22 @@ static int ath9k_start(struct ieee80211_hw *hw)
* be followed by initialization of the appropriate bits
* and then setup of the interrupt mask.
*/
- spin_lock_bh(&sc->rx.pcu_lock);
- spin_lock_bh(&sc->sc_resetlock);
+ spin_lock_bh(&sc->sc_pcu_lock);
r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
if (r) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to reset hardware; reset status %d "
- "(freq %u MHz)\n", r,
- curchan->center_freq);
- spin_unlock_bh(&sc->sc_resetlock);
- spin_unlock_bh(&sc->rx.pcu_lock);
+ ath_err(common,
+ "Unable to reset hardware; reset status %d (freq %u MHz)\n",
+ r, curchan->center_freq);
+ spin_unlock_bh(&sc->sc_pcu_lock);
goto mutex_unlock;
}
- spin_unlock_bh(&sc->sc_resetlock);
/*
* This is needed only to setup initial state
* but it's best done after a reset.
*/
- ath_update_txpow(sc);
+ ath9k_cmn_update_txpow(ah, sc->curtxpow,
+ sc->config.txpowlimit, &sc->curtxpow);
/*
* Setup the hardware after reset:
@@ -1196,13 +1090,12 @@ static int ath9k_start(struct ieee80211_hw *hw)
* here except setup the interrupt mask.
*/
if (ath_startrecv(sc) != 0) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to start recv logic\n");
+ ath_err(common, "Unable to start recv logic\n");
r = -EIO;
- spin_unlock_bh(&sc->rx.pcu_lock);
+ spin_unlock_bh(&sc->sc_pcu_lock);
goto mutex_unlock;
}
- spin_unlock_bh(&sc->rx.pcu_lock);
+ spin_unlock_bh(&sc->sc_pcu_lock);
/* Setup our intr mask. */
ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL |
@@ -1244,7 +1137,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
ath9k_btcoex_timer_resume(sc);
}
- pm_qos_update_request(&ath9k_pm_qos_req, 55);
+ if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en)
+ common->bus_ops->extn_synch_en(common);
mutex_unlock:
mutex_unlock(&sc->mutex);
@@ -1252,24 +1146,12 @@ mutex_unlock:
return r;
}
-static int ath9k_tx(struct ieee80211_hw *hw,
- struct sk_buff *skb)
+static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_tx_control txctl;
- int padpos, padsize;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- int qnum;
-
- if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
- ath_print(common, ATH_DBG_XMIT,
- "ath9k: %s: TX in unexpected wiphy state "
- "%d\n", wiphy_name(hw->wiphy), aphy->state);
- goto exit;
- }
if (sc->ps_enabled) {
/*
@@ -1279,8 +1161,8 @@ static int ath9k_tx(struct ieee80211_hw *hw,
if (ieee80211_is_data(hdr->frame_control) &&
!ieee80211_is_nullfunc(hdr->frame_control) &&
!ieee80211_has_pm(hdr->frame_control)) {
- ath_print(common, ATH_DBG_PS, "Add PM=1 for a TX frame "
- "while in PS mode\n");
+ ath_dbg(common, ATH_DBG_PS,
+ "Add PM=1 for a TX frame while in PS mode\n");
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
}
}
@@ -1295,12 +1177,12 @@ static int ath9k_tx(struct ieee80211_hw *hw,
if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
ath9k_hw_setrxabort(sc->sc_ah, 0);
if (ieee80211_is_pspoll(hdr->frame_control)) {
- ath_print(common, ATH_DBG_PS,
- "Sending PS-Poll to pick a buffered frame\n");
+ ath_dbg(common, ATH_DBG_PS,
+ "Sending PS-Poll to pick a buffered frame\n");
sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA;
} else {
- ath_print(common, ATH_DBG_PS,
- "Wake up to complete TX\n");
+ ath_dbg(common, ATH_DBG_PS,
+ "Wake up to complete TX\n");
sc->ps_flags |= PS_WAIT_FOR_TX_ACK;
}
/*
@@ -1312,85 +1194,39 @@ static int ath9k_tx(struct ieee80211_hw *hw,
}
memset(&txctl, 0, sizeof(struct ath_tx_control));
+ txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
- /*
- * As a temporary workaround, assign seq# here; this will likely need
- * to be cleaned up to work better with Beacon transmission and virtual
- * BSSes.
- */
- if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
- if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
- sc->tx.seq_no += 0x10;
- hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
- }
-
- /* Add the padding after the header if this is not already done */
- padpos = ath9k_cmn_padpos(hdr->frame_control);
- padsize = padpos & 3;
- if (padsize && skb->len>padpos) {
- if (skb_headroom(skb) < padsize)
- return -1;
- skb_push(skb, padsize);
- memmove(skb->data, skb->data + padsize, padpos);
- }
-
- qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
- txctl.txq = &sc->tx.txq[qnum];
-
- ath_print(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
+ ath_dbg(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
if (ath_tx_start(hw, skb, &txctl) != 0) {
- ath_print(common, ATH_DBG_XMIT, "TX failed\n");
+ ath_dbg(common, ATH_DBG_XMIT, "TX failed\n");
goto exit;
}
- return 0;
+ return;
exit:
dev_kfree_skb_any(skb);
- return 0;
}
static void ath9k_stop(struct ieee80211_hw *hw)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- int i;
mutex_lock(&sc->mutex);
- aphy->state = ATH_WIPHY_INACTIVE;
-
- if (led_blink)
- cancel_delayed_work_sync(&sc->ath_led_blink_work);
-
cancel_delayed_work_sync(&sc->tx_complete_work);
+ cancel_delayed_work_sync(&sc->hw_pll_work);
cancel_work_sync(&sc->paprd_work);
cancel_work_sync(&sc->hw_check_work);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i])
- break;
- }
-
- if (i == sc->num_sec_wiphy) {
- cancel_delayed_work_sync(&sc->wiphy_work);
- cancel_work_sync(&sc->chan_work);
- }
-
if (sc->sc_flags & SC_OP_INVALID) {
- ath_print(common, ATH_DBG_ANY, "Device not present\n");
+ ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
mutex_unlock(&sc->mutex);
return;
}
- if (ath9k_wiphy_started(sc)) {
- mutex_unlock(&sc->mutex);
- return; /* another wiphy still in use */
- }
-
/* Ensure HW is awake when we try to shut it down. */
ath9k_ps_wakeup(sc);
@@ -1400,114 +1236,311 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath9k_btcoex_timer_pause(sc);
}
+ spin_lock_bh(&sc->sc_pcu_lock);
+
+ /* prevent tasklets to enable interrupts once we disable them */
+ ah->imask &= ~ATH9K_INT_GLOBAL;
+
/* make sure h/w will not generate any interrupt
* before setting the invalid flag. */
- ath9k_hw_set_interrupts(ah, 0);
+ ath9k_hw_disable_interrupts(ah);
- spin_lock_bh(&sc->rx.pcu_lock);
if (!(sc->sc_flags & SC_OP_INVALID)) {
ath_drain_all_txq(sc, false);
ath_stoprecv(sc);
ath9k_hw_phy_disable(ah);
} else
sc->rx.rxlink = NULL;
- spin_unlock_bh(&sc->rx.pcu_lock);
+
+ if (sc->rx.frag) {
+ dev_kfree_skb_any(sc->rx.frag);
+ sc->rx.frag = NULL;
+ }
/* disable HAL and put h/w to sleep */
ath9k_hw_disable(ah);
ath9k_hw_configpcipowersave(ah, 1, 1);
+
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
+ /* we can now sync irq and kill any running tasklets, since we already
+ * disabled interrupts and not holding a spin lock */
+ synchronize_irq(sc->irq);
+ tasklet_kill(&sc->intr_tq);
+ tasklet_kill(&sc->bcon_tasklet);
+
ath9k_ps_restore(sc);
- /* Finally, put the chip in FULL SLEEP mode */
- ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
+ sc->ps_idle = true;
+ ath_radio_disable(sc, hw);
sc->sc_flags |= SC_OP_INVALID;
- pm_qos_update_request(&ath9k_pm_qos_req, PM_QOS_DEFAULT_VALUE);
-
mutex_unlock(&sc->mutex);
- ath_print(common, ATH_DBG_CONFIG, "Driver halt\n");
+ ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
}
+bool ath9k_uses_beacons(int type)
+{
+ switch (type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void ath9k_reclaim_beacon(struct ath_softc *sc,
+ struct ieee80211_vif *vif)
+{
+ struct ath_vif *avp = (void *)vif->drv_priv;
+
+ ath9k_set_beaconing_status(sc, false);
+ ath_beacon_return(sc, avp);
+ ath9k_set_beaconing_status(sc, true);
+ sc->sc_flags &= ~SC_OP_BEACONS;
+}
+
+static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ath9k_vif_iter_data *iter_data = data;
+ int i;
+
+ if (iter_data->hw_macaddr)
+ for (i = 0; i < ETH_ALEN; i++)
+ iter_data->mask[i] &=
+ ~(iter_data->hw_macaddr[i] ^ mac[i]);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ iter_data->naps++;
+ break;
+ case NL80211_IFTYPE_STATION:
+ iter_data->nstations++;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ iter_data->nadhocs++;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ iter_data->nmeshes++;
+ break;
+ case NL80211_IFTYPE_WDS:
+ iter_data->nwds++;
+ break;
+ default:
+ iter_data->nothers++;
+ break;
+ }
+}
+
+/* Called with sc->mutex held. */
+void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ath9k_vif_iter_data *iter_data)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /*
+ * Use the hardware MAC address as reference, the hardware uses it
+ * together with the BSSID mask when matching addresses.
+ */
+ memset(iter_data, 0, sizeof(*iter_data));
+ iter_data->hw_macaddr = common->macaddr;
+ memset(&iter_data->mask, 0xff, ETH_ALEN);
+
+ if (vif)
+ ath9k_vif_iter(iter_data, vif->addr, vif);
+
+ /* Get list of all active MAC addresses */
+ ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
+ iter_data);
+}
+
+/* Called with sc->mutex held. */
+static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_vif_iter_data iter_data;
+
+ ath9k_calculate_iter_data(hw, vif, &iter_data);
+
+ ath9k_ps_wakeup(sc);
+ /* Set BSSID mask. */
+ memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
+ ath_hw_setbssidmask(common);
+
+ /* Set op-mode & TSF */
+ if (iter_data.naps > 0) {
+ ath9k_hw_set_tsfadjust(ah, 1);
+ sc->sc_flags |= SC_OP_TSF_RESET;
+ ah->opmode = NL80211_IFTYPE_AP;
+ } else {
+ ath9k_hw_set_tsfadjust(ah, 0);
+ sc->sc_flags &= ~SC_OP_TSF_RESET;
+
+ if (iter_data.nwds + iter_data.nmeshes)
+ ah->opmode = NL80211_IFTYPE_AP;
+ else if (iter_data.nadhocs)
+ ah->opmode = NL80211_IFTYPE_ADHOC;
+ else
+ ah->opmode = NL80211_IFTYPE_STATION;
+ }
+
+ /*
+ * Enable MIB interrupts when there are hardware phy counters.
+ */
+ if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0) {
+ if (ah->config.enable_ani)
+ ah->imask |= ATH9K_INT_MIB;
+ ah->imask |= ATH9K_INT_TSFOOR;
+ } else {
+ ah->imask &= ~ATH9K_INT_MIB;
+ ah->imask &= ~ATH9K_INT_TSFOOR;
+ }
+
+ ath9k_hw_set_interrupts(ah, ah->imask);
+ ath9k_ps_restore(sc);
+
+ /* Set up ANI */
+ if ((iter_data.naps + iter_data.nadhocs) > 0) {
+ sc->sc_flags |= SC_OP_ANI_RUN;
+ ath_start_ani(common);
+ } else {
+ sc->sc_flags &= ~SC_OP_ANI_RUN;
+ del_timer_sync(&common->ani.timer);
+ }
+}
+
+/* Called with sc->mutex held, vif counts set up properly. */
+static void ath9k_do_vif_add_setup(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath_softc *sc = hw->priv;
+
+ ath9k_calculate_summary_state(hw, vif);
+
+ if (ath9k_uses_beacons(vif->type)) {
+ int error;
+ /* This may fail because upper levels do not have beacons
+ * properly configured yet. That's OK, we assume it
+ * will be properly configured and then we will be notified
+ * in the info_changed method and set up beacons properly
+ * there.
+ */
+ ath9k_set_beaconing_status(sc, false);
+ error = ath_beacon_alloc(sc, vif);
+ if (!error)
+ ath_beacon_config(sc, vif);
+ ath9k_set_beaconing_status(sc, true);
+ }
+}
+
+
static int ath9k_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_vif *avp = (void *)vif->drv_priv;
- enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
int ret = 0;
mutex_lock(&sc->mutex);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
- ic_opmode = NL80211_IFTYPE_STATION;
- break;
case NL80211_IFTYPE_WDS:
- ic_opmode = NL80211_IFTYPE_WDS;
- break;
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
+ break;
+ default:
+ ath_err(common, "Interface type %d not yet supported\n",
+ vif->type);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (ath9k_uses_beacons(vif->type)) {
if (sc->nbcnvifs >= ATH_BCBUF) {
+ ath_err(common, "Not enough beacon buffers when adding"
+ " new interface of type: %i\n",
+ vif->type);
ret = -ENOBUFS;
goto out;
}
- ic_opmode = vif->type;
- break;
- default:
- ath_print(common, ATH_DBG_FATAL,
- "Interface type %d not yet supported\n", vif->type);
- ret = -EOPNOTSUPP;
+ }
+
+ if ((vif->type == NL80211_IFTYPE_ADHOC) &&
+ sc->nvifs > 0) {
+ ath_err(common, "Cannot create ADHOC interface when other"
+ " interfaces already exist.\n");
+ ret = -EINVAL;
goto out;
}
- ath_print(common, ATH_DBG_CONFIG,
- "Attach a VIF of type: %d\n", ic_opmode);
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Attach a VIF of type: %d\n", vif->type);
/* Set the VIF opmode */
- avp->av_opmode = ic_opmode;
+ avp->av_opmode = vif->type;
avp->av_bslot = -1;
sc->nvifs++;
- ath9k_set_bssid_mask(hw, vif);
+ ath9k_do_vif_add_setup(hw, vif);
+out:
+ mutex_unlock(&sc->mutex);
+ return ret;
+}
- if (sc->nvifs > 1)
- goto out; /* skip global settings for secondary vif */
+static int ath9k_change_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype new_type,
+ bool p2p)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int ret = 0;
- if (ic_opmode == NL80211_IFTYPE_AP) {
- ath9k_hw_set_tsfadjust(ah, 1);
- sc->sc_flags |= SC_OP_TSF_RESET;
- }
+ ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n");
+ mutex_lock(&sc->mutex);
- /* Set the device opmode */
- ah->opmode = ic_opmode;
+ /* See if new interface type is valid. */
+ if ((new_type == NL80211_IFTYPE_ADHOC) &&
+ (sc->nvifs > 1)) {
+ ath_err(common, "When using ADHOC, it must be the only"
+ " interface.\n");
+ ret = -EINVAL;
+ goto out;
+ }
- /*
- * Enable MIB interrupts when there are hardware phy counters.
- * Note we only do this (at the moment) for station mode.
- */
- if ((vif->type == NL80211_IFTYPE_STATION) ||
- (vif->type == NL80211_IFTYPE_ADHOC) ||
- (vif->type == NL80211_IFTYPE_MESH_POINT)) {
- if (ah->config.enable_ani)
- ah->imask |= ATH9K_INT_MIB;
- ah->imask |= ATH9K_INT_TSFOOR;
+ if (ath9k_uses_beacons(new_type) &&
+ !ath9k_uses_beacons(vif->type)) {
+ if (sc->nbcnvifs >= ATH_BCBUF) {
+ ath_err(common, "No beacon slot available\n");
+ ret = -ENOBUFS;
+ goto out;
+ }
}
- ath9k_hw_set_interrupts(ah, ah->imask);
+ /* Clean up old vif stuff */
+ if (ath9k_uses_beacons(vif->type))
+ ath9k_reclaim_beacon(sc, vif);
- if (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_ADHOC) {
- sc->sc_flags |= SC_OP_ANI_RUN;
- ath_start_ani(common);
- }
+ /* Add new settings */
+ vif->type = new_type;
+ vif->p2p = p2p;
+ ath9k_do_vif_add_setup(hw, vif);
out:
mutex_unlock(&sc->mutex);
return ret;
@@ -1516,42 +1549,20 @@ out:
static void ath9k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_vif *avp = (void *)vif->drv_priv;
- int i;
- ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
+ ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
mutex_lock(&sc->mutex);
- /* Stop ANI */
- sc->sc_flags &= ~SC_OP_ANI_RUN;
- del_timer_sync(&common->ani.timer);
+ sc->nvifs--;
/* Reclaim beacon resources */
- if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
- (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
- (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
- ath9k_ps_wakeup(sc);
- ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
- ath9k_ps_restore(sc);
- }
+ if (ath9k_uses_beacons(vif->type))
+ ath9k_reclaim_beacon(sc, vif);
- ath_beacon_return(sc, avp);
- sc->sc_flags &= ~SC_OP_BEACONS;
-
- for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
- if (sc->beacon.bslot[i] == vif) {
- printk(KERN_DEBUG "%s: vif had allocated beacon "
- "slot\n", __func__);
- sc->beacon.bslot[i] = NULL;
- sc->beacon.bslot_aphy[i] = NULL;
- }
- }
-
- sc->nvifs--;
+ ath9k_calculate_summary_state(hw, NULL);
mutex_unlock(&sc->mutex);
}
@@ -1592,12 +1603,11 @@ static void ath9k_disable_ps(struct ath_softc *sc)
static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_conf *conf = &hw->conf;
- bool disable_radio;
+ bool disable_radio = false;
mutex_lock(&sc->mutex);
@@ -1608,29 +1618,13 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
* the end.
*/
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
- bool enable_radio;
- bool all_wiphys_idle;
- bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
-
- spin_lock_bh(&sc->wiphy_lock);
- all_wiphys_idle = ath9k_all_wiphys_idle(sc);
- ath9k_set_wiphy_idle(aphy, idle);
-
- enable_radio = (!idle && all_wiphys_idle);
-
- /*
- * After we unlock here its possible another wiphy
- * can be re-renabled so to account for that we will
- * only disable the radio toward the end of this routine
- * if by then all wiphys are still idle.
- */
- spin_unlock_bh(&sc->wiphy_lock);
-
- if (enable_radio) {
- sc->ps_idle = false;
+ sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
+ if (!sc->ps_idle) {
ath_radio_enable(sc, hw);
- ath_print(common, ATH_DBG_CONFIG,
- "not-idle: enabling radio\n");
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "not-idle: enabling radio\n");
+ } else {
+ disable_radio = true;
}
}
@@ -1652,12 +1646,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
if (conf->flags & IEEE80211_CONF_MONITOR) {
- ath_print(common, ATH_DBG_CONFIG,
- "Monitor mode is enabled\n");
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Monitor mode is enabled\n");
sc->sc_ah->is_monitoring = true;
} else {
- ath_print(common, ATH_DBG_CONFIG,
- "Monitor mode is disabled\n");
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Monitor mode is disabled\n");
sc->sc_ah->is_monitoring = false;
}
}
@@ -1671,31 +1665,17 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (ah->curchan)
old_pos = ah->curchan - &ah->channels[0];
- aphy->chan_idx = pos;
- aphy->chan_is_ht = conf_is_ht(conf);
if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
sc->sc_flags |= SC_OP_OFFCHANNEL;
else
sc->sc_flags &= ~SC_OP_OFFCHANNEL;
- if (aphy->state == ATH_WIPHY_SCAN ||
- aphy->state == ATH_WIPHY_ACTIVE)
- ath9k_wiphy_pause_all_forced(sc, aphy);
- else {
- /*
- * Do not change operational channel based on a paused
- * wiphy changes.
- */
- goto skip_chan_change;
- }
-
- ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
- curchan->center_freq);
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Set channel: %d MHz type: %d\n",
+ curchan->center_freq, conf->channel_type);
- /* XXX: remove me eventualy */
- ath9k_update_ichannel(sc, hw, &sc->sc_ah->channels[pos]);
-
- ath_update_chainmask(sc, conf_is_ht(conf));
+ ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
+ curchan, conf->channel_type);
/* update survey stats for the old channel before switching */
spin_lock_irqsave(&common->cc_lock, flags);
@@ -1723,8 +1703,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
}
if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to set channel\n");
+ ath_err(common, "Unable to set channel\n");
mutex_unlock(&sc->mutex);
return -EINVAL;
}
@@ -1738,19 +1717,18 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
ath_update_survey_nf(sc, old_pos);
}
-skip_chan_change:
if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Set power: %d\n", conf->power_level);
sc->config.txpowlimit = 2 * conf->power_level;
- ath_update_txpow(sc);
+ ath9k_ps_wakeup(sc);
+ ath9k_cmn_update_txpow(ah, sc->curtxpow,
+ sc->config.txpowlimit, &sc->curtxpow);
+ ath9k_ps_restore(sc);
}
- spin_lock_bh(&sc->wiphy_lock);
- disable_radio = ath9k_all_wiphys_idle(sc);
- spin_unlock_bh(&sc->wiphy_lock);
-
if (disable_radio) {
- ath_print(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
- sc->ps_idle = true;
+ ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
ath_radio_disable(sc, hw);
}
@@ -1775,8 +1753,7 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
unsigned int *total_flags,
u64 multicast)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
u32 rfilt;
changed_flags &= SUPPORTED_FILTERS;
@@ -1788,16 +1765,15 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
ath9k_ps_restore(sc);
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
- "Set HW RX filter: 0x%x\n", rfilt);
+ ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
+ "Set HW RX filter: 0x%x\n", rfilt);
}
static int ath9k_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
ath_node_attach(sc, sta);
@@ -1808,8 +1784,7 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
ath_node_detach(sc, sta);
@@ -1819,15 +1794,17 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw,
static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_txq *txq;
struct ath9k_tx_queue_info qi;
- int ret = 0, qnum;
+ int ret = 0;
if (queue >= WME_NUM_AC)
return 0;
+ txq = sc->tx.txq_map[queue];
+
mutex_lock(&sc->mutex);
memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
@@ -1836,20 +1813,18 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
qi.tqi_cwmin = params->cw_min;
qi.tqi_cwmax = params->cw_max;
qi.tqi_burstTime = params->txop;
- qnum = ath_get_hal_qnum(queue, sc);
- ath_print(common, ATH_DBG_CONFIG,
- "Configure tx [queue/halq] [%d/%d], "
- "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
- queue, qnum, params->aifs, params->cw_min,
- params->cw_max, params->txop);
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Configure tx [queue/halq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
+ queue, txq->axq_qnum, params->aifs, params->cw_min,
+ params->cw_max, params->txop);
- ret = ath_txq_update(sc, qnum, &qi);
+ ret = ath_txq_update(sc, txq->axq_qnum, &qi);
if (ret)
- ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
+ ath_err(common, "TXQ Update failed\n");
if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)
- if ((qnum == sc->tx.hwq_map[WME_AC_BE]) && !ret)
+ if (queue == WME_AC_BE && !ret)
ath_beaconq_config(sc);
mutex_unlock(&sc->mutex);
@@ -1863,17 +1838,16 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
int ret = 0;
- if (modparam_nohwcrypt)
+ if (ath9k_modparam_nohwcrypt)
return -ENOSPC;
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
- ath_print(common, ATH_DBG_CONFIG, "Set HW Key\n");
+ ath_dbg(common, ATH_DBG_CONFIG, "Set HW Key\n");
switch (cmd) {
case SET_KEY:
@@ -1908,8 +1882,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
+ struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_vif *avp = (void *)vif->drv_priv;
@@ -1928,13 +1902,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
/* Set aggregation protection mode parameters */
sc->config.ath_aggr_prot = 0;
- /* Only legacy IBSS for now */
- if (vif->type == NL80211_IFTYPE_ADHOC)
- ath_update_chainmask(sc, 0);
-
- ath_print(common, ATH_DBG_CONFIG,
- "BSSID: %pM aid: 0x%x\n",
- common->curbssid, common->curaid);
+ ath_dbg(common, ATH_DBG_CONFIG, "BSSID: %pM aid: 0x%x\n",
+ common->curbssid, common->curaid);
/* need to reconfigure the beacon */
sc->sc_flags &= ~SC_OP_BEACONS ;
@@ -1943,10 +1912,11 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
/* Enable transmission of beacons (AP, IBSS, MESH) */
if ((changed & BSS_CHANGED_BEACON) ||
((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon)) {
- ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
- error = ath_beacon_alloc(aphy, vif);
+ ath9k_set_beaconing_status(sc, false);
+ error = ath_beacon_alloc(sc, vif);
if (!error)
ath_beacon_config(sc, vif);
+ ath9k_set_beaconing_status(sc, true);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -1969,29 +1939,34 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
}
/* Disable transmission of beacons */
- if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon)
- ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
+ !bss_conf->enable_beacon) {
+ ath9k_set_beaconing_status(sc, false);
+ avp->is_bslot_active = false;
+ ath9k_set_beaconing_status(sc, true);
+ }
if (changed & BSS_CHANGED_BEACON_INT) {
- sc->beacon_interval = bss_conf->beacon_int;
+ cur_conf->beacon_interval = bss_conf->beacon_int;
/*
* In case of AP mode, the HW TSF has to be reset
* when the beacon interval changes.
*/
if (vif->type == NL80211_IFTYPE_AP) {
sc->sc_flags |= SC_OP_TSF_RESET;
- ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
- error = ath_beacon_alloc(aphy, vif);
+ ath9k_set_beaconing_status(sc, false);
+ error = ath_beacon_alloc(sc, vif);
if (!error)
ath_beacon_config(sc, vif);
+ ath9k_set_beaconing_status(sc, true);
} else {
ath_beacon_config(sc, vif);
}
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
- ath_print(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
- bss_conf->use_short_preamble);
+ ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
+ bss_conf->use_short_preamble);
if (bss_conf->use_short_preamble)
sc->sc_flags |= SC_OP_PREAMBLE_SHORT;
else
@@ -1999,8 +1974,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
- ath_print(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
- bss_conf->use_cts_prot);
+ ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
+ bss_conf->use_cts_prot);
if (bss_conf->use_cts_prot &&
hw->conf.channel->band != IEEE80211_BAND_5GHZ)
sc->sc_flags |= SC_OP_PROTECT_ENABLE;
@@ -2009,9 +1984,9 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ASSOC) {
- ath_print(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
+ ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
bss_conf->assoc);
- ath9k_bss_assoc_info(sc, vif, bss_conf);
+ ath9k_bss_assoc_info(sc, hw, vif, bss_conf);
}
mutex_unlock(&sc->mutex);
@@ -2019,12 +1994,13 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
{
+ struct ath_softc *sc = hw->priv;
u64 tsf;
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
mutex_lock(&sc->mutex);
+ ath9k_ps_wakeup(sc);
tsf = ath9k_hw_gettsf64(sc->sc_ah);
+ ath9k_ps_restore(sc);
mutex_unlock(&sc->mutex);
return tsf;
@@ -2032,18 +2008,18 @@ static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
mutex_lock(&sc->mutex);
+ ath9k_ps_wakeup(sc);
ath9k_hw_settsf64(sc->sc_ah, tsf);
+ ath9k_ps_restore(sc);
mutex_unlock(&sc->mutex);
}
static void ath9k_reset_tsf(struct ieee80211_hw *hw)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
mutex_lock(&sc->mutex);
@@ -2058,10 +2034,9 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta,
- u16 tid, u16 *ssn)
+ u16 tid, u16 *ssn, u8 buf_size)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
int ret = 0;
local_bh_disable();
@@ -2074,6 +2049,9 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_RX_STOP:
break;
case IEEE80211_AMPDU_TX_START:
+ if (!(sc->sc_flags & SC_OP_TXAGGR))
+ return -EOPNOTSUPP;
+
ath9k_ps_wakeup(sc);
ret = ath_tx_aggr_start(sc, sta, tid, ssn);
if (!ret)
@@ -2092,8 +2070,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
ath9k_ps_restore(sc);
break;
default:
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
- "Unknown AMPDU action\n");
+ ath_err(ath9k_hw_common(sc->sc_ah), "Unknown AMPDU action\n");
}
local_bh_enable();
@@ -2104,8 +2081,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
@@ -2139,53 +2115,55 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
return 0;
}
-static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
+static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
mutex_lock(&sc->mutex);
- if (ath9k_wiphy_scanning(sc)) {
- /*
- * There is a race here in mac80211 but fixing it requires
- * we revisit how we handle the scan complete callback.
- * After mac80211 fixes we will not have configured hardware
- * to the home channel nor would we have configured the RX
- * filter yet.
- */
- mutex_unlock(&sc->mutex);
- return;
- }
-
- aphy->state = ATH_WIPHY_SCAN;
- ath9k_wiphy_pause_all_forced(sc, aphy);
+ ah->coverage_class = coverage_class;
+ ath9k_hw_init_global_settings(ah);
mutex_unlock(&sc->mutex);
}
-/*
- * XXX: this requires a revisit after the driver
- * scan_complete gets moved to another place/removed in mac80211.
- */
-static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
+static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
+ int timeout = 200; /* ms */
+ int i, j;
+ ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
- aphy->state = ATH_WIPHY_ACTIVE;
- mutex_unlock(&sc->mutex);
-}
-static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
-{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
- struct ath_hw *ah = sc->sc_ah;
+ cancel_delayed_work_sync(&sc->tx_complete_work);
- mutex_lock(&sc->mutex);
- ah->coverage_class = coverage_class;
- ath9k_hw_init_global_settings(ah);
+ if (drop)
+ timeout = 1;
+
+ for (j = 0; j < timeout; j++) {
+ int npend = 0;
+
+ if (j)
+ usleep_range(1000, 2000);
+
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (!ATH_TXQ_SETUP(sc, i))
+ continue;
+
+ npend += ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
+ }
+
+ if (!npend)
+ goto out;
+ }
+
+ if (!ath_drain_all_txq(sc, false))
+ ath_reset(sc, false);
+
+out:
+ ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
}
struct ieee80211_ops ath9k_ops = {
@@ -2193,6 +2171,7 @@ struct ieee80211_ops ath9k_ops = {
.start = ath9k_start,
.stop = ath9k_stop,
.add_interface = ath9k_add_interface,
+ .change_interface = ath9k_change_interface,
.remove_interface = ath9k_remove_interface,
.config = ath9k_config,
.configure_filter = ath9k_configure_filter,
@@ -2206,8 +2185,7 @@ struct ieee80211_ops ath9k_ops = {
.reset_tsf = ath9k_reset_tsf,
.ampdu_action = ath9k_ampdu_action,
.get_survey = ath9k_get_survey,
- .sw_scan_start = ath9k_sw_scan_start,
- .sw_scan_complete = ath9k_sw_scan_complete,
.rfkill_poll = ath9k_rfkill_poll_state,
.set_coverage_class = ath9k_set_coverage_class,
+ .flush = ath9k_flush,
};
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index b5b651413e77..e83128c50f7b 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -16,6 +16,7 @@
#include <linux/nl80211.h>
#include <linux/pci.h>
+#include <linux/ath9k_platform.h>
#include "ath9k.h"
static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
@@ -29,6 +30,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
{ PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */
+ { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
{ 0 }
};
@@ -53,21 +55,35 @@ static void ath_pci_read_cachesize(struct ath_common *common, int *csz)
static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
- struct ath_hw *ah = (struct ath_hw *) common->ah;
-
- common->ops->read(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
-
- if (!ath9k_hw_wait(ah,
- AR_EEPROM_STATUS_DATA,
- AR_EEPROM_STATUS_DATA_BUSY |
- AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0,
- AH_WAIT_TIMEOUT)) {
- return false;
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+ struct ath9k_platform_data *pdata = sc->dev->platform_data;
+
+ if (pdata) {
+ if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
+ ath_err(common,
+ "%s: eeprom read failed, offset %08x is out of range\n",
+ __func__, off);
+ }
+
+ *data = pdata->eeprom_data[off];
+ } else {
+ struct ath_hw *ah = (struct ath_hw *) common->ah;
+
+ common->ops->read(ah, AR5416_EEPROM_OFFSET +
+ (off << AR5416_EEPROM_S));
+
+ if (!ath9k_hw_wait(ah,
+ AR_EEPROM_STATUS_DATA,
+ AR_EEPROM_STATUS_DATA_BUSY |
+ AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0,
+ AH_WAIT_TIMEOUT)) {
+ return false;
+ }
+
+ *data = MS(common->ops->read(ah, AR_EEPROM_STATUS_DATA),
+ AR_EEPROM_STATUS_DATA_VAL);
}
- *data = MS(common->ops->read(ah, AR_EEPROM_STATUS_DATA),
- AR_EEPROM_STATUS_DATA_VAL);
-
return true;
}
@@ -80,7 +96,7 @@ static void ath_pci_bt_coex_prep(struct ath_common *common)
struct pci_dev *pdev = to_pci_dev(sc->dev);
u8 aspm;
- if (!pdev->is_pcie)
+ if (!pci_is_pcie(pdev))
return;
pci_read_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, &aspm);
@@ -88,17 +104,28 @@ static void ath_pci_bt_coex_prep(struct ath_common *common)
pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm);
}
+static void ath_pci_extn_synch_enable(struct ath_common *common)
+{
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+ struct pci_dev *pdev = to_pci_dev(sc->dev);
+ u8 lnkctl;
+
+ pci_read_config_byte(pdev, sc->sc_ah->caps.pcie_lcr_offset, &lnkctl);
+ lnkctl |= PCI_EXP_LNKCTL_ES;
+ pci_write_config_byte(pdev, sc->sc_ah->caps.pcie_lcr_offset, lnkctl);
+}
+
static const struct ath_bus_ops ath_pci_bus_ops = {
.ath_bus_type = ATH_PCI,
.read_cachesize = ath_pci_read_cachesize,
.eeprom_read = ath_pci_eeprom_read,
.bt_coex_prep = ath_pci_bt_coex_prep,
+ .extn_synch_en = ath_pci_extn_synch_enable,
};
static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
void __iomem *mem;
- struct ath_wiphy *aphy;
struct ath_softc *sc;
struct ieee80211_hw *hw;
u8 csz;
@@ -170,8 +197,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_iomap;
}
- hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) +
- sizeof(struct ath_softc), &ath9k_ops);
+ hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
if (!hw) {
dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
ret = -ENOMEM;
@@ -181,11 +207,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
SET_IEEE80211_DEV(hw, &pdev->dev);
pci_set_drvdata(pdev, hw);
- aphy = hw->priv;
- sc = (struct ath_softc *) (aphy + 1);
- aphy->sc = sc;
- aphy->hw = hw;
- sc->pri_wiphy = aphy;
+ sc = hw->priv;
sc->hw = hw;
sc->dev = &pdev->dev;
sc->mem = mem;
@@ -232,10 +254,11 @@ err_dma:
static void ath_pci_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
void __iomem *mem = sc->mem;
+ if (!is_ath9k_unloaded)
+ sc->sc_ah->ah_flags |= AH_UNPLUGGED;
ath9k_deinit_device(sc);
free_irq(sc->irq, sc);
ieee80211_free_hw(sc->hw);
@@ -247,34 +270,23 @@ static void ath_pci_remove(struct pci_dev *pdev)
#ifdef CONFIG_PM
-static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int ath_pci_suspend(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
-
return 0;
}
-static int ath_pci_resume(struct pci_dev *pdev)
+static int ath_pci_resume(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
u32 val;
- int err;
-
- pci_restore_state(pdev);
-
- err = pci_enable_device(pdev);
- if (err)
- return err;
/*
* Suspend/Resume resets the PCI configuration space, so we have to
@@ -290,10 +302,37 @@ static int ath_pci_resume(struct pci_dev *pdev)
AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
+ /*
+ * Reset key cache to sane defaults (all entries cleared) instead of
+ * semi-random values after suspend/resume.
+ */
+ ath9k_ps_wakeup(sc);
+ ath9k_init_crypto(sc);
+ ath9k_ps_restore(sc);
+
+ sc->ps_idle = true;
+ ath_radio_disable(sc, hw);
+
return 0;
}
-#endif /* CONFIG_PM */
+static const struct dev_pm_ops ath9k_pm_ops = {
+ .suspend = ath_pci_suspend,
+ .resume = ath_pci_resume,
+ .freeze = ath_pci_suspend,
+ .thaw = ath_pci_resume,
+ .poweroff = ath_pci_suspend,
+ .restore = ath_pci_resume,
+};
+
+#define ATH9K_PM_OPS (&ath9k_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define ATH9K_PM_OPS NULL
+
+#endif /* !CONFIG_PM */
+
MODULE_DEVICE_TABLE(pci, ath_pci_id_table);
@@ -302,10 +341,7 @@ static struct pci_driver ath_pci_driver = {
.id_table = ath_pci_id_table,
.probe = ath_pci_probe,
.remove = ath_pci_remove,
-#ifdef CONFIG_PM
- .suspend = ath_pci_suspend,
- .resume = ath_pci_resume,
-#endif /* CONFIG_PM */
+ .driver.pm = ATH9K_PM_OPS,
};
int ath_pci_init(void)
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index 17969af842f6..5e3d7496986e 100644
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -19,6 +19,7 @@
#define CHANSEL_DIV 15
#define CHANSEL_2G(_freq) (((_freq) * 0x10000) / CHANSEL_DIV)
+#define CHANSEL_2G_9485(_freq) ((((_freq) * 0x10000) - 215) / CHANSEL_DIV)
#define CHANSEL_5G(_freq) (((_freq) * 0x8000) / CHANSEL_DIV)
#define AR_PHY_BASE 0x9800
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 89978d71617f..960d717ca7c2 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -381,25 +381,6 @@ static const struct ath_rate_table ar5416_11g_ratetable = {
static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table,
struct ieee80211_tx_rate *rate);
-static inline int8_t median(int8_t a, int8_t b, int8_t c)
-{
- if (a >= b) {
- if (b >= c)
- return b;
- else if (a > c)
- return c;
- else
- return a;
- } else {
- if (a >= c)
- return a;
- else if (b >= c)
- return c;
- else
- return b;
- }
-}
-
static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
struct ath_rate_priv *ath_rc_priv)
{
@@ -419,7 +400,7 @@ static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
}
}
-static void ath_rc_init_valid_txmask(struct ath_rate_priv *ath_rc_priv)
+static void ath_rc_init_valid_rate_idx(struct ath_rate_priv *ath_rc_priv)
{
u8 i;
@@ -427,7 +408,7 @@ static void ath_rc_init_valid_txmask(struct ath_rate_priv *ath_rc_priv)
ath_rc_priv->valid_rate_index[i] = 0;
}
-static inline void ath_rc_set_valid_txmask(struct ath_rate_priv *ath_rc_priv,
+static inline void ath_rc_set_valid_rate_idx(struct ath_rate_priv *ath_rc_priv,
u8 index, int valid_tx_rate)
{
BUG_ON(index > ath_rc_priv->rate_table_size);
@@ -508,7 +489,7 @@ static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i;
ath_rc_priv->valid_phy_ratecnt[phy] += 1;
- ath_rc_set_valid_txmask(ath_rc_priv, i, 1);
+ ath_rc_set_valid_rate_idx(ath_rc_priv, i, 1);
hi = i;
}
}
@@ -551,7 +532,7 @@ static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv,
ath_rc_priv->valid_phy_rateidx[phy]
[valid_rate_count] = j;
ath_rc_priv->valid_phy_ratecnt[phy] += 1;
- ath_rc_set_valid_txmask(ath_rc_priv, j, 1);
+ ath_rc_set_valid_rate_idx(ath_rc_priv, j, 1);
hi = A_MAX(hi, j);
}
}
@@ -587,7 +568,7 @@ static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
ath_rc_priv->valid_phy_rateidx[phy]
[ath_rc_priv->valid_phy_ratecnt[phy]] = j;
ath_rc_priv->valid_phy_ratecnt[phy] += 1;
- ath_rc_set_valid_txmask(ath_rc_priv, j, 1);
+ ath_rc_set_valid_rate_idx(ath_rc_priv, j, 1);
hi = A_MAX(hi, j);
}
}
@@ -883,7 +864,7 @@ static bool ath_rc_update_per(struct ath_softc *sc,
bool state_change = false;
int count, n_bad_frames;
u8 last_per;
- static u32 nretry_to_per_lookup[10] = {
+ static const u32 nretry_to_per_lookup[10] = {
100 * 0 / 1,
100 * 1 / 4,
100 * 1 / 2,
@@ -1106,13 +1087,13 @@ static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table,
struct ieee80211_tx_rate *rate)
{
int rix = 0, i = 0;
- int mcs_rix_off[] = { 7, 15, 20, 21, 22, 23 };
+ static const int mcs_rix_off[] = { 7, 15, 20, 21, 22, 23 };
if (!(rate->flags & IEEE80211_TX_RC_MCS))
return rate->idx;
while (rate->idx > mcs_rix_off[i] &&
- i < sizeof(mcs_rix_off)/sizeof(int)) {
+ i < ARRAY_SIZE(mcs_rix_off)) {
rix++; i++;
}
@@ -1203,7 +1184,7 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
return &ar5416_11na_ratetable;
return &ar5416_11a_ratetable;
default:
- ath_print(common, ATH_DBG_CONFIG, "Invalid band\n");
+ ath_dbg(common, ATH_DBG_CONFIG, "Invalid band\n");
return NULL;
}
}
@@ -1229,7 +1210,7 @@ static void ath_rc_init(struct ath_softc *sc,
}
/* Determine the valid rates */
- ath_rc_init_valid_txmask(ath_rc_priv);
+ ath_rc_init_valid_rate_idx(ath_rc_priv);
for (i = 0; i < WLAN_RC_PHY_MAX; i++) {
for (j = 0; j < MAX_TX_RATE_PHY; j++)
@@ -1278,9 +1259,9 @@ static void ath_rc_init(struct ath_softc *sc,
ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
ath_rc_priv->rate_table = rate_table;
- ath_print(common, ATH_DBG_CONFIG,
- "RC Initialized with capabilities: 0x%x\n",
- ath_rc_priv->ht_cap);
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "RC Initialized with capabilities: 0x%x\n",
+ ath_rc_priv->ht_cap);
}
static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -1340,7 +1321,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
struct ath_rate_priv *ath_rc_priv = priv_sta;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr;
- int final_ts_idx = 0, tx_status = 0, is_underrun = 0;
+ int final_ts_idx = 0, tx_status = 0;
int long_retry = 0;
__le16 fc;
int i;
@@ -1373,32 +1354,17 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
tx_info->status.ampdu_len = 1;
}
- /*
- * If an underrun error is seen assume it as an excessive retry only
- * if max frame trigger level has been reached (2 KB for singel stream,
- * and 4 KB for dual stream). Adjust the long retry as if the frame was
- * tried hw->max_rate_tries times to affect how ratectrl updates PER for
- * the failed rate. In case of congestion on the bus penalizing these
- * type of underruns should help hardware actually transmit new frames
- * successfully by eventually preferring slower rates. This itself
- * should also alleviate congestion on the bus.
- */
- if ((tx_info->pad[0] & ATH_TX_INFO_UNDERRUN) &&
- (sc->sc_ah->tx_trig_level >= ath_rc_priv->tx_triglevel_max)) {
- tx_status = 1;
- is_underrun = 1;
- }
-
- if (tx_info->pad[0] & ATH_TX_INFO_XRETRY)
+ if (!(tx_info->flags & IEEE80211_TX_STAT_ACK))
tx_status = 1;
ath_rc_tx_status(sc, ath_rc_priv, tx_info, final_ts_idx, tx_status,
- (is_underrun) ? sc->hw->max_rate_tries : long_retry);
+ long_retry);
/* Check if aggregation has to be enabled for this tid */
if (conf_is_ht(&sc->hw->conf) &&
!(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
- if (ieee80211_is_data_qos(fc)) {
+ if (ieee80211_is_data_qos(fc) &&
+ skb_get_queue_mapping(skb) != IEEE80211_AC_VO) {
u8 *qc, tid;
struct ath_node *an;
@@ -1407,7 +1373,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
an = (struct ath_node *)sta->drv_priv;
if(ath_tx_aggr_check(sc, an, tid))
- ieee80211_start_tx_ba_session(sta, tid);
+ ieee80211_start_tx_ba_session(sta, tid, 0);
}
}
@@ -1444,12 +1410,12 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
ath_rc_priv->neg_ht_rates.rs_nrates = j;
}
- is_cw40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ is_cw40 = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40);
if (is_cw40)
- is_sgi = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40;
+ is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
- is_sgi = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
+ is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
/* Choose rate table first */
@@ -1468,10 +1434,8 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
struct ath_rate_priv *ath_rc_priv = priv_sta;
const struct ath_rate_table *rate_table = NULL;
bool oper_cw40 = false, oper_sgi;
- bool local_cw40 = (ath_rc_priv->ht_cap & WLAN_RC_40_FLAG) ?
- true : false;
- bool local_sgi = (ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG) ?
- true : false;
+ bool local_cw40 = !!(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG);
+ bool local_sgi = !!(ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG);
/* FIXME: Handle AP mode later when we support CWM */
@@ -1499,9 +1463,9 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
oper_cw40, oper_sgi);
ath_rc_init(sc, priv_sta, sband, sta, rate_table);
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
- "Operating HT Bandwidth changed to: %d\n",
- sc->hw->conf.channel_type);
+ ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
+ "Operating HT Bandwidth changed to: %d\n",
+ sc->hw->conf.channel_type);
}
}
}
@@ -1596,8 +1560,7 @@ static void ath_rate_add_sta_debugfs(void *priv, void *priv_sta,
static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
{
- struct ath_wiphy *aphy = hw->priv;
- return aphy->sc;
+ return hw->priv;
}
static void ath_rate_free(void *priv)
@@ -1612,13 +1575,11 @@ static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp
rate_priv = kzalloc(sizeof(struct ath_rate_priv), gfp);
if (!rate_priv) {
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
- "Unable to allocate private rc structure\n");
+ ath_err(ath9k_hw_common(sc->sc_ah),
+ "Unable to allocate private rc structure\n");
return NULL;
}
- rate_priv->tx_triglevel_max = sc->sc_ah->caps.tx_triglevel_max;
-
return rate_priv;
}
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 2f46a2266ba1..5d984b8acdb1 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -195,7 +195,6 @@ struct ath_rc_stats {
* @rate_max_phy: phy index for the max rate
* @per: PER for every valid rate in %
* @probe_interval: interval for ratectrl to probe for other rates
- * @prev_data_rix: rate idx of last data frame
* @ht_cap: HT capabilities
* @neg_rates: Negotatied rates
* @neg_ht_rates: Negotiated HT rates
@@ -214,22 +213,14 @@ struct ath_rate_priv {
u32 probe_time;
u32 per_down_time;
u32 probe_interval;
- u32 prev_data_rix;
- u32 tx_triglevel_max;
struct ath_rateset neg_rates;
struct ath_rateset neg_ht_rates;
- struct ath_rate_softc *asc;
const struct ath_rate_table *rate_table;
struct dentry *debugfs_rcstats;
struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
};
-#define ATH_TX_INFO_FRAME_TYPE_INTERNAL (1 << 0)
-#define ATH_TX_INFO_FRAME_TYPE_PAUSE (1 << 1)
-#define ATH_TX_INFO_XRETRY (1 << 3)
-#define ATH_TX_INFO_UNDERRUN (1 << 4)
-
enum ath9k_internal_frame_type {
ATH9K_IFT_NOT_INTERNAL,
ATH9K_IFT_PAUSE,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 1a62e351ec77..a9c3f4672aa0 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -34,27 +34,6 @@ static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
}
-static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
- struct ieee80211_hdr *hdr)
-{
- struct ieee80211_hw *hw = sc->pri_wiphy->hw;
- int i;
-
- spin_lock_bh(&sc->wiphy_lock);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (aphy == NULL)
- continue;
- if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr)
- == 0) {
- hw = aphy->hw;
- break;
- }
- }
- spin_unlock_bh(&sc->wiphy_lock);
- return hw;
-}
-
/*
* Setup and link descriptors.
*
@@ -165,7 +144,7 @@ static void ath_rx_addbuffer_edma(struct ath_softc *sc,
u32 nbuf = 0;
if (list_empty(&sc->rx.rxbuf)) {
- ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n");
+ ath_dbg(common, ATH_DBG_QUEUE, "No free rx buf available\n");
return;
}
@@ -230,11 +209,6 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
int error = 0, i;
u32 size;
-
- common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN +
- ah->caps.rx_status_len,
- min(common->cachelsz, (u16)64));
-
ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
ah->caps.rx_status_len);
@@ -269,7 +243,7 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
dev_kfree_skb_any(skb);
bf->bf_mpdu = NULL;
bf->bf_buf_addr = 0;
- ath_print(common, ATH_DBG_FATAL,
+ ath_err(common,
"dma_mapping_error() on RX init\n");
error = -ENOMEM;
goto rx_init_fail;
@@ -317,27 +291,27 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
struct ath_buf *bf;
int error = 0;
- spin_lock_init(&sc->rx.pcu_lock);
+ spin_lock_init(&sc->sc_pcu_lock);
sc->sc_flags &= ~SC_OP_RXFLUSH;
spin_lock_init(&sc->rx.rxbuflock);
+ common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
+ sc->sc_ah->caps.rx_status_len;
+
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
return ath_rx_edma_init(sc, nbufs);
} else {
- common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
- min(common->cachelsz, (u16)64));
-
- ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
- common->cachelsz, common->rx_bufsize);
+ ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
+ common->cachelsz, common->rx_bufsize);
/* Initialize rx descriptors */
error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
"rx", nbufs, 1, 0);
if (error != 0) {
- ath_print(common, ATH_DBG_FATAL,
- "failed to allocate rx descriptors: %d\n",
- error);
+ ath_err(common,
+ "failed to allocate rx descriptors: %d\n",
+ error);
goto err;
}
@@ -358,8 +332,8 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
dev_kfree_skb_any(skb);
bf->bf_mpdu = NULL;
bf->bf_buf_addr = 0;
- ath_print(common, ATH_DBG_FATAL,
- "dma_mapping_error() on RX init\n");
+ ath_err(common,
+ "dma_mapping_error() on RX init\n");
error = -ENOMEM;
goto err;
}
@@ -439,9 +413,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
* mode interface or when in monitor mode. AP mode does not need this
* since it receives all in-BSS frames anyway.
*/
- if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) &&
- (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) ||
- (sc->sc_ah->is_monitoring))
+ if (sc->sc_ah->is_monitoring)
rfilt |= ATH9K_RX_FILTER_PROM;
if (sc->rx.rxfilter & FIF_CONTROL)
@@ -463,8 +435,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
if (conf_is_ht(&sc->hw->conf))
rfilt |= ATH9K_RX_FILTER_COMP_BAR;
- if (sc->sec_wiphy || (sc->nvifs > 1) ||
- (sc->rx.rxfilter & FIF_OTHER_BSS)) {
+ if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
/* The following may also be needed for other older chips */
if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
rfilt |= ATH9K_RX_FILTER_PROM;
@@ -528,6 +499,13 @@ bool ath_stoprecv(struct ath_softc *sc)
sc->rx.rxlink = NULL;
spin_unlock_bh(&sc->rx.rxbuflock);
+ if (!(ah->ah_flags & AH_UNPLUGGED) &&
+ unlikely(!stopped)) {
+ ath_err(ath9k_hw_common(sc->sc_ah),
+ "Could not stop RX, we could be "
+ "confusing the DMA engine when we start RX up\n");
+ ATH_DBG_WARN_ON_ONCE(!stopped);
+ }
return stopped;
}
@@ -581,16 +559,21 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
return;
mgmt = (struct ieee80211_mgmt *)skb->data;
- if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
+ if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) {
+ /* TODO: This doesn't work well if you have stations
+ * associated to two different APs because curbssid
+ * is just the last AP that any of the stations associated
+ * with.
+ */
return; /* not from our current AP */
+ }
sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
if (sc->ps_flags & PS_BEACON_SYNC) {
sc->ps_flags &= ~PS_BEACON_SYNC;
- ath_print(common, ATH_DBG_PS,
- "Reconfigure Beacon timers based on "
- "timestamp from the AP\n");
+ ath_dbg(common, ATH_DBG_PS,
+ "Reconfigure Beacon timers based on timestamp from the AP\n");
ath_beacon_config(sc, NULL);
}
@@ -602,8 +585,8 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
* a backup trigger for returning into NETWORK SLEEP state,
* so we are waiting for it as well.
*/
- ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating "
- "buffered broadcast/multicast frame(s)\n");
+ ath_dbg(common, ATH_DBG_PS,
+ "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
return;
}
@@ -615,8 +598,8 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
* been delivered.
*/
sc->ps_flags &= ~PS_WAIT_FOR_CAB;
- ath_print(common, ATH_DBG_PS,
- "PS wait for CAB frames timed out\n");
+ ath_dbg(common, ATH_DBG_PS,
+ "PS wait for CAB frames timed out\n");
}
}
@@ -641,15 +624,14 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
* point.
*/
sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
- ath_print(common, ATH_DBG_PS,
- "All PS CAB frames received, back to sleep\n");
+ ath_dbg(common, ATH_DBG_PS,
+ "All PS CAB frames received, back to sleep\n");
} else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
!is_multicast_ether_addr(hdr->addr1) &&
!ieee80211_has_morefrags(hdr->frame_control)) {
sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
- ath_print(common, ATH_DBG_PS,
- "Going back to sleep after having received "
- "PS-Poll data (0x%lx)\n",
+ ath_dbg(common, ATH_DBG_PS,
+ "Going back to sleep after having received PS-Poll data (0x%lx)\n",
sc->ps_flags & (PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
PS_WAIT_FOR_PSPOLL_DATA |
@@ -657,38 +639,6 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
}
}
-static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
- struct ath_softc *sc, struct sk_buff *skb,
- struct ieee80211_rx_status *rxs)
-{
- struct ieee80211_hdr *hdr;
-
- hdr = (struct ieee80211_hdr *)skb->data;
-
- /* Send the frame to mac80211 */
- if (is_multicast_ether_addr(hdr->addr1)) {
- int i;
- /*
- * Deliver broadcast/multicast frames to all suitable
- * virtual wiphys.
- */
- /* TODO: filter based on channel configuration */
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- struct sk_buff *nskb;
- if (aphy == NULL)
- continue;
- nskb = skb_copy(skb, GFP_ATOMIC);
- if (!nskb)
- continue;
- ieee80211_rx(aphy->hw, nskb);
- }
- ieee80211_rx(sc->hw, skb);
- } else
- /* Deliver unicast frames based on receiver address */
- ieee80211_rx(hw, skb);
-}
-
static bool ath_edma_get_buffers(struct ath_softc *sc,
enum ath9k_rx_qtype qtype)
{
@@ -838,6 +788,10 @@ static bool ath9k_rx_accept(struct ath_common *common,
struct ath_rx_status *rx_stats,
bool *decrypt_error)
{
+#define is_mc_or_valid_tkip_keyix ((is_mc || \
+ (rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && \
+ test_bit(rx_stats->rs_keyix, common->tkip_keymap))))
+
struct ath_hw *ah = common->ah;
__le16 fc;
u8 rx_status_len = ah->caps.rx_status_len;
@@ -854,15 +808,9 @@ static bool ath9k_rx_accept(struct ath_common *common,
if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
return false;
- /*
- * rs_more indicates chained descriptors which can be used
- * to link buffers together for a sort of scatter-gather
- * operation.
- * reject the frame, we don't support scatter-gather yet and
- * the frame is probably corrupt anyway
- */
+ /* Only use error bits from the last fragment */
if (rx_stats->rs_more)
- return false;
+ return true;
/*
* The rx_stats->rs_status will not be set until the end of the
@@ -879,15 +827,18 @@ static bool ath9k_rx_accept(struct ath_common *common,
if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
*decrypt_error = true;
} else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
+ bool is_mc;
/*
* The MIC error bit is only valid if the frame
* is not a control frame or fragment, and it was
* decrypted using a valid TKIP key.
*/
+ is_mc = !!is_multicast_ether_addr(hdr->addr1);
+
if (!ieee80211_is_ctl(fc) &&
!ieee80211_has_morefrags(fc) &&
!(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
- test_bit(rx_stats->rs_keyix, common->tkip_keymap))
+ is_mc_or_valid_tkip_keyix)
rxs->flag |= RX_FLAG_MMIC_ERROR;
else
rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
@@ -951,8 +902,9 @@ static int ath9k_process_rate(struct ath_common *common,
* No valid hardware bitrate found -- we should not get here
* because hardware has already validated this frame as OK.
*/
- ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected "
- "0x%02x using 1 Mbit\n", rx_stats->rs_rate);
+ ath_dbg(common, ATH_DBG_XMIT,
+ "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
+ rx_stats->rs_rate);
return -EINVAL;
}
@@ -962,36 +914,29 @@ static void ath9k_process_rssi(struct ath_common *common,
struct ieee80211_hdr *hdr,
struct ath_rx_status *rx_stats)
{
+ struct ath_softc *sc = hw->priv;
struct ath_hw *ah = common->ah;
- struct ieee80211_sta *sta;
- struct ath_node *an;
- int last_rssi = ATH_RSSI_DUMMY_MARKER;
+ int last_rssi;
__le16 fc;
- fc = hdr->frame_control;
+ if (ah->opmode != NL80211_IFTYPE_STATION)
+ return;
- rcu_read_lock();
- /*
- * XXX: use ieee80211_find_sta! This requires quite a bit of work
- * under the current ath9k virtual wiphy implementation as we have
- * no way of tying a vif to wiphy. Typically vifs are attached to
- * at least one sdata of a wiphy on mac80211 but with ath9k virtual
- * wiphy you'd have to iterate over every wiphy and each sdata.
- */
- if (is_multicast_ether_addr(hdr->addr1))
- sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
- else
- sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, hdr->addr1);
-
- if (sta) {
- an = (struct ath_node *) sta->drv_priv;
- if (rx_stats->rs_rssi != ATH9K_RSSI_BAD &&
- !rx_stats->rs_moreaggr)
- ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi);
- last_rssi = an->last_rssi;
+ fc = hdr->frame_control;
+ if (!ieee80211_is_beacon(fc) ||
+ compare_ether_addr(hdr->addr3, common->curbssid)) {
+ /* TODO: This doesn't work well if you have stations
+ * associated to two different APs because curbssid
+ * is just the last AP that any of the stations associated
+ * with.
+ */
+ return;
}
- rcu_read_unlock();
+ if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
+ ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
+
+ last_rssi = sc->last_rssi;
if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
ATH_RSSI_EP_MULTIPLIER);
@@ -999,8 +944,7 @@ static void ath9k_process_rssi(struct ath_common *common,
rx_stats->rs_rssi = 0;
/* Update Beacon RSSI, this is used by ANI. */
- if (ieee80211_is_beacon(fc))
- ah->stats.avgbrssi = rx_stats->rs_rssi;
+ ah->stats.avgbrssi = rx_stats->rs_rssi;
}
/*
@@ -1024,6 +968,10 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common,
if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
return -EINVAL;
+ /* Only use status info from the last fragment */
+ if (rx_stats->rs_more)
+ return 0;
+
ath9k_process_rssi(common, hw, hdr, rx_stats);
if (ath9k_process_rate(common, hw, rx_stats, rx_status))
@@ -1033,7 +981,7 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common,
rx_status->freq = hw->conf.channel->center_freq;
rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
rx_status->antenna = rx_stats->rs_antenna;
- rx_status->flag |= RX_FLAG_TSFT;
+ rx_status->flag |= RX_FLAG_MACTIME_MPDU;
return 0;
}
@@ -1625,16 +1573,16 @@ div_comb_done:
int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
{
struct ath_buf *bf;
- struct sk_buff *skb = NULL, *requeue_skb;
+ struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
struct ieee80211_rx_status *rxs;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
/*
- * The hw can techncically differ from common->hw when using ath9k
+ * The hw can technically differ from common->hw when using ath9k
* virtual wiphy so to account for that we iterate over the active
* wiphys and find the appropriate wiphy and therefore hw.
*/
- struct ieee80211_hw *hw = NULL;
+ struct ieee80211_hw *hw = sc->hw;
struct ieee80211_hdr *hdr;
int retval;
bool decrypt_error = false;
@@ -1676,10 +1624,17 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (!skb)
continue;
- hdr = (struct ieee80211_hdr *) (skb->data + rx_status_len);
- rxs = IEEE80211_SKB_RXCB(skb);
+ /*
+ * Take frame header from the first fragment and RX status from
+ * the last one.
+ */
+ if (sc->rx.frag)
+ hdr_skb = sc->rx.frag;
+ else
+ hdr_skb = skb;
- hw = ath_get_virt_hw(sc, hdr);
+ hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
+ rxs = IEEE80211_SKB_RXCB(hdr_skb);
ath_debug_stat_rx(sc, &rs);
@@ -1688,12 +1643,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
* chain it back at the queue without processing it.
*/
if (flush)
- goto requeue;
+ goto requeue_drop_frag;
retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
rxs, &decrypt_error);
if (retval)
- goto requeue;
+ goto requeue_drop_frag;
rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
if (rs.rs_tstamp > tsf_lower &&
@@ -1713,7 +1668,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
* skb and put it at the tail of the sc->rx.rxbuf list for
* processing. */
if (!requeue_skb)
- goto requeue;
+ goto requeue_drop_frag;
/* Unmap the frame */
dma_unmap_single(sc->dev, bf->bf_buf_addr,
@@ -1724,8 +1679,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (ah->caps.rx_status_len)
skb_pull(skb, ah->caps.rx_status_len);
- ath9k_rx_skb_postprocess(common, skb, &rs,
- rxs, decrypt_error);
+ if (!rs.rs_more)
+ ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
+ rxs, decrypt_error);
/* We will now give hardware our shiny new allocated skb */
bf->bf_mpdu = requeue_skb;
@@ -1737,12 +1693,43 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
dev_kfree_skb_any(requeue_skb);
bf->bf_mpdu = NULL;
bf->bf_buf_addr = 0;
- ath_print(common, ATH_DBG_FATAL,
- "dma_mapping_error() on RX\n");
- ath_rx_send_to_mac80211(hw, sc, skb, rxs);
+ ath_err(common, "dma_mapping_error() on RX\n");
+ ieee80211_rx(hw, skb);
break;
}
+ if (rs.rs_more) {
+ /*
+ * rs_more indicates chained descriptors which can be
+ * used to link buffers together for a sort of
+ * scatter-gather operation.
+ */
+ if (sc->rx.frag) {
+ /* too many fragments - cannot handle frame */
+ dev_kfree_skb_any(sc->rx.frag);
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ }
+ sc->rx.frag = skb;
+ goto requeue;
+ }
+
+ if (sc->rx.frag) {
+ int space = skb->len - skb_tailroom(hdr_skb);
+
+ sc->rx.frag = NULL;
+
+ if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
+ dev_kfree_skb(skb);
+ goto requeue_drop_frag;
+ }
+
+ skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
+ skb->len);
+ dev_kfree_skb_any(skb);
+ skb = hdr_skb;
+ }
+
/*
* change the default rx antenna if rx diversity chooses the
* other antenna 3 times in a row.
@@ -1755,18 +1742,24 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
}
spin_lock_irqsave(&sc->sc_pm_lock, flags);
- if (unlikely(ath9k_check_auto_sleep(sc) ||
- (sc->ps_flags & (PS_WAIT_FOR_BEACON |
+
+ if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
- PS_WAIT_FOR_PSPOLL_DATA))))
+ PS_WAIT_FOR_PSPOLL_DATA)) ||
+ unlikely(ath9k_check_auto_sleep(sc)))
ath_rx_ps(sc, skb);
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
ath_ant_comb_scan(sc, &rs);
- ath_rx_send_to_mac80211(hw, sc, skb, rxs);
+ ieee80211_rx(hw, skb);
+requeue_drop_frag:
+ if (sc->rx.frag) {
+ dev_kfree_skb_any(sc->rx.frag);
+ sc->rx.frag = NULL;
+ }
requeue:
if (edma) {
list_add_tail(&bf->list, &sc->rx.rxbuf);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index dddf579aacf1..8fa8acfde62e 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -787,6 +787,9 @@
#define AR_SREV_REVISION_9271_11 1
#define AR_SREV_VERSION_9300 0x1c0
#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */
+#define AR_SREV_VERSION_9485 0x240
+#define AR_SREV_REVISION_9485_10 0
+#define AR_SREV_REVISION_9485_11 1
#define AR_SREV_5416(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@@ -859,20 +862,28 @@
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300) && \
((_ah)->hw_version.macRev >= AR_SREV_REVISION_9300_20)))
+#define AR_SREV_9485(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485))
+#define AR_SREV_9485_10(_ah) \
+ (AR_SREV_9485(_ah) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_10))
+#define AR_SREV_9485_11(_ah) \
+ (AR_SREV_9485(_ah) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_11))
+
#define AR_SREV_9285E_20(_ah) \
(AR_SREV_9285_12_OR_LATER(_ah) && \
((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1))
-#define AR_DEVID_7010(_ah) \
- (((_ah)->hw_version.devid == 0x7010) || \
- ((_ah)->hw_version.devid == 0x7015) || \
- ((_ah)->hw_version.devid == 0x9018) || \
- ((_ah)->hw_version.devid == 0xA704) || \
- ((_ah)->hw_version.devid == 0x1200))
+enum ath_usb_dev {
+ AR9280_USB = 1, /* AR7010 + AR9280, UB94 */
+ AR9287_USB = 2, /* AR7010 + AR9287, UB95 */
+ STORAGE_DEVICE = 3,
+};
-#define AR9287_HTC_DEVID(_ah) \
- (((_ah)->hw_version.devid == 0x7015) || \
- ((_ah)->hw_version.devid == 0x1200))
+#define AR_DEVID_7010(_ah) \
+ (((_ah)->hw_version.usbdev == AR9280_USB) || \
+ ((_ah)->hw_version.usbdev == AR9287_USB))
#define AR_RADIO_SREV_MAJOR 0xf0
#define AR_RAD5133_SREV_MAJOR 0xc0
@@ -984,11 +995,13 @@ enum {
#define AR9287_GPIO_IN_VAL_S 11
#define AR9271_GPIO_IN_VAL 0xFFFF0000
#define AR9271_GPIO_IN_VAL_S 16
-#define AR9300_GPIO_IN_VAL 0x0001FFFF
-#define AR9300_GPIO_IN_VAL_S 0
#define AR7010_GPIO_IN_VAL 0x0000FFFF
#define AR7010_GPIO_IN_VAL_S 0
+#define AR_GPIO_IN 0x404c
+#define AR9300_GPIO_IN_VAL 0x0001FFFF
+#define AR9300_GPIO_IN_VAL_S 0
+
#define AR_GPIO_OE_OUT (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c)
#define AR_GPIO_OE_OUT_DRV 0x3
#define AR_GPIO_OE_OUT_DRV_NO 0x0
@@ -1072,6 +1085,20 @@ enum {
#define AR_INTR_PRIO_ASYNC_MASK 0x40c8
#define AR_INTR_PRIO_SYNC_MASK 0x40cc
#define AR_INTR_PRIO_ASYNC_ENABLE 0x40d4
+#define AR_ENT_OTP 0x40d8
+#define AR_ENT_OTP_CHAIN2_DISABLE 0x00020000
+#define AR_ENT_OTP_MPSD 0x00800000
+#define AR_CH0_BB_DPLL2 0x16184
+#define AR_CH0_BB_DPLL3 0x16188
+#define AR_CH0_DDR_DPLL2 0x16244
+#define AR_CH0_DDR_DPLL3 0x16248
+#define AR_CH0_DPLL2_KD 0x03F80000
+#define AR_CH0_DPLL2_KD_S 19
+#define AR_CH0_DPLL2_KI 0x3C000000
+#define AR_CH0_DPLL2_KI_S 26
+#define AR_CH0_DPLL3_PHASE_SHIFT 0x3F800000
+#define AR_CH0_DPLL3_PHASE_SHIFT_S 23
+#define AR_PHY_CCA_NOM_VAL_2GHZ -118
#define AR_RTC_9300_PLL_DIV 0x000003ff
#define AR_RTC_9300_PLL_DIV_S 0
@@ -1109,6 +1136,8 @@ enum {
#define AR_RTC_PLL_CONTROL \
((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014)
+#define AR_RTC_PLL_CONTROL2 0x703c
+
#define AR_RTC_PLL_DIV 0x0000001f
#define AR_RTC_PLL_DIV_S 0
#define AR_RTC_PLL_DIV2 0x00000020
@@ -1116,6 +1145,12 @@ enum {
#define AR_RTC_PLL_CLKSEL 0x00000300
#define AR_RTC_PLL_CLKSEL_S 8
+#define PLL3 0x16188
+#define PLL3_DO_MEAS_MASK 0x40000000
+#define PLL4 0x1618c
+#define PLL4_MEAS_DONE 0x8
+#define SQSUM_DVC_MASK 0x007ffff8
+
#define AR_RTC_RESET \
((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040)
#define AR_RTC_RESET_EN (0x00000001)
@@ -1572,6 +1607,7 @@ enum {
#define AR_PCU_TBTT_PROTECT 0x00200000
#define AR_PCU_CLEAR_VMF 0x01000000
#define AR_PCU_CLEAR_BA_VALID 0x04000000
+#define AR_PCU_ALWAYS_PERFORM_KEYSEARCH 0x10000000
#define AR_PCU_BT_ANT_PREVENT_RX 0x00100000
#define AR_PCU_BT_ANT_PREVENT_RX_S 20
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
deleted file mode 100644
index ec7cf5ee56bc..000000000000
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ /dev/null
@@ -1,719 +0,0 @@
-/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/slab.h>
-
-#include "ath9k.h"
-
-struct ath9k_vif_iter_data {
- const u8 *hw_macaddr;
- u8 mask[ETH_ALEN];
-};
-
-static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
- struct ath9k_vif_iter_data *iter_data = data;
- int i;
-
- for (i = 0; i < ETH_ALEN; i++)
- iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
-}
-
-void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath9k_vif_iter_data iter_data;
- int i;
-
- /*
- * Use the hardware MAC address as reference, the hardware uses it
- * together with the BSSID mask when matching addresses.
- */
- iter_data.hw_macaddr = common->macaddr;
- memset(&iter_data.mask, 0xff, ETH_ALEN);
-
- if (vif)
- ath9k_vif_iter(&iter_data, vif->addr, vif);
-
- /* Get list of all active MAC addresses */
- spin_lock_bh(&sc->wiphy_lock);
- ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
- &iter_data);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] == NULL)
- continue;
- ieee80211_iterate_active_interfaces_atomic(
- sc->sec_wiphy[i]->hw, ath9k_vif_iter, &iter_data);
- }
- spin_unlock_bh(&sc->wiphy_lock);
-
- memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
- ath_hw_setbssidmask(common);
-}
-
-int ath9k_wiphy_add(struct ath_softc *sc)
-{
- int i, error;
- struct ath_wiphy *aphy;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ieee80211_hw *hw;
- u8 addr[ETH_ALEN];
-
- hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops);
- if (hw == NULL)
- return -ENOMEM;
-
- spin_lock_bh(&sc->wiphy_lock);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] == NULL)
- break;
- }
-
- if (i == sc->num_sec_wiphy) {
- /* No empty slot available; increase array length */
- struct ath_wiphy **n;
- n = krealloc(sc->sec_wiphy,
- (sc->num_sec_wiphy + 1) *
- sizeof(struct ath_wiphy *),
- GFP_ATOMIC);
- if (n == NULL) {
- spin_unlock_bh(&sc->wiphy_lock);
- ieee80211_free_hw(hw);
- return -ENOMEM;
- }
- n[i] = NULL;
- sc->sec_wiphy = n;
- sc->num_sec_wiphy++;
- }
-
- SET_IEEE80211_DEV(hw, sc->dev);
-
- aphy = hw->priv;
- aphy->sc = sc;
- aphy->hw = hw;
- sc->sec_wiphy[i] = aphy;
- spin_unlock_bh(&sc->wiphy_lock);
-
- memcpy(addr, common->macaddr, ETH_ALEN);
- addr[0] |= 0x02; /* Locally managed address */
- /*
- * XOR virtual wiphy index into the least significant bits to generate
- * a different MAC address for each virtual wiphy.
- */
- addr[5] ^= i & 0xff;
- addr[4] ^= (i & 0xff00) >> 8;
- addr[3] ^= (i & 0xff0000) >> 16;
-
- SET_IEEE80211_PERM_ADDR(hw, addr);
-
- ath9k_set_hw_capab(sc, hw);
-
- error = ieee80211_register_hw(hw);
-
- if (error == 0) {
- /* Make sure wiphy scheduler is started (if enabled) */
- ath9k_wiphy_set_scheduler(sc, sc->wiphy_scheduler_int);
- }
-
- return error;
-}
-
-int ath9k_wiphy_del(struct ath_wiphy *aphy)
-{
- struct ath_softc *sc = aphy->sc;
- int i;
-
- spin_lock_bh(&sc->wiphy_lock);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (aphy == sc->sec_wiphy[i]) {
- sc->sec_wiphy[i] = NULL;
- spin_unlock_bh(&sc->wiphy_lock);
- ieee80211_unregister_hw(aphy->hw);
- ieee80211_free_hw(aphy->hw);
- return 0;
- }
- }
- spin_unlock_bh(&sc->wiphy_lock);
- return -ENOENT;
-}
-
-static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
- struct ieee80211_vif *vif, const u8 *bssid,
- int ps)
-{
- struct ath_softc *sc = aphy->sc;
- struct ath_tx_control txctl;
- struct sk_buff *skb;
- struct ieee80211_hdr *hdr;
- __le16 fc;
- struct ieee80211_tx_info *info;
-
- skb = dev_alloc_skb(24);
- if (skb == NULL)
- return -ENOMEM;
- hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
- memset(hdr, 0, 24);
- fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
- IEEE80211_FCTL_TODS);
- if (ps)
- fc |= cpu_to_le16(IEEE80211_FCTL_PM);
- hdr->frame_control = fc;
- memcpy(hdr->addr1, bssid, ETH_ALEN);
- memcpy(hdr->addr2, aphy->hw->wiphy->perm_addr, ETH_ALEN);
- memcpy(hdr->addr3, bssid, ETH_ALEN);
-
- info = IEEE80211_SKB_CB(skb);
- memset(info, 0, sizeof(*info));
- info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS;
- info->control.vif = vif;
- info->control.rates[0].idx = 0;
- info->control.rates[0].count = 4;
- info->control.rates[1].idx = -1;
-
- memset(&txctl, 0, sizeof(struct ath_tx_control));
- txctl.txq = &sc->tx.txq[sc->tx.hwq_map[WME_AC_VO]];
- txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
-
- if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
- goto exit;
-
- return 0;
-exit:
- dev_kfree_skb_any(skb);
- return -1;
-}
-
-static bool __ath9k_wiphy_pausing(struct ath_softc *sc)
-{
- int i;
- if (sc->pri_wiphy->state == ATH_WIPHY_PAUSING)
- return true;
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] &&
- sc->sec_wiphy[i]->state == ATH_WIPHY_PAUSING)
- return true;
- }
- return false;
-}
-
-static bool ath9k_wiphy_pausing(struct ath_softc *sc)
-{
- bool ret;
- spin_lock_bh(&sc->wiphy_lock);
- ret = __ath9k_wiphy_pausing(sc);
- spin_unlock_bh(&sc->wiphy_lock);
- return ret;
-}
-
-static bool __ath9k_wiphy_scanning(struct ath_softc *sc)
-{
- int i;
- if (sc->pri_wiphy->state == ATH_WIPHY_SCAN)
- return true;
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] &&
- sc->sec_wiphy[i]->state == ATH_WIPHY_SCAN)
- return true;
- }
- return false;
-}
-
-bool ath9k_wiphy_scanning(struct ath_softc *sc)
-{
- bool ret;
- spin_lock_bh(&sc->wiphy_lock);
- ret = __ath9k_wiphy_scanning(sc);
- spin_unlock_bh(&sc->wiphy_lock);
- return ret;
-}
-
-static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy);
-
-/* caller must hold wiphy_lock */
-static void __ath9k_wiphy_unpause_ch(struct ath_wiphy *aphy)
-{
- if (aphy == NULL)
- return;
- if (aphy->chan_idx != aphy->sc->chan_idx)
- return; /* wiphy not on the selected channel */
- __ath9k_wiphy_unpause(aphy);
-}
-
-static void ath9k_wiphy_unpause_channel(struct ath_softc *sc)
-{
- int i;
- spin_lock_bh(&sc->wiphy_lock);
- __ath9k_wiphy_unpause_ch(sc->pri_wiphy);
- for (i = 0; i < sc->num_sec_wiphy; i++)
- __ath9k_wiphy_unpause_ch(sc->sec_wiphy[i]);
- spin_unlock_bh(&sc->wiphy_lock);
-}
-
-void ath9k_wiphy_chan_work(struct work_struct *work)
-{
- struct ath_softc *sc = container_of(work, struct ath_softc, chan_work);
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_wiphy *aphy = sc->next_wiphy;
-
- if (aphy == NULL)
- return;
-
- /*
- * All pending interfaces paused; ready to change
- * channels.
- */
-
- /* Change channels */
- mutex_lock(&sc->mutex);
- /* XXX: remove me eventually */
- ath9k_update_ichannel(sc, aphy->hw,
- &sc->sc_ah->channels[sc->chan_idx]);
-
- /* sync hw configuration for hw code */
- common->hw = aphy->hw;
-
- ath_update_chainmask(sc, sc->chan_is_ht);
- if (ath_set_channel(sc, aphy->hw,
- &sc->sc_ah->channels[sc->chan_idx]) < 0) {
- printk(KERN_DEBUG "ath9k: Failed to set channel for new "
- "virtual wiphy\n");
- mutex_unlock(&sc->mutex);
- return;
- }
- mutex_unlock(&sc->mutex);
-
- ath9k_wiphy_unpause_channel(sc);
-}
-
-/*
- * ath9k version of ieee80211_tx_status() for TX frames that are generated
- * internally in the driver.
- */
-void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct ath_wiphy *aphy = hw->priv;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-
- if ((tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_PAUSE) &&
- aphy->state == ATH_WIPHY_PAUSING) {
- if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
- printk(KERN_DEBUG "ath9k: %s: no ACK for pause "
- "frame\n", wiphy_name(hw->wiphy));
- /*
- * The AP did not reply; ignore this to allow us to
- * continue.
- */
- }
- aphy->state = ATH_WIPHY_PAUSED;
- if (!ath9k_wiphy_pausing(aphy->sc)) {
- /*
- * Drop from tasklet to work to allow mutex for channel
- * change.
- */
- ieee80211_queue_work(aphy->sc->hw,
- &aphy->sc->chan_work);
- }
- }
-
- dev_kfree_skb(skb);
-}
-
-static void ath9k_mark_paused(struct ath_wiphy *aphy)
-{
- struct ath_softc *sc = aphy->sc;
- aphy->state = ATH_WIPHY_PAUSED;
- if (!__ath9k_wiphy_pausing(sc))
- ieee80211_queue_work(sc->hw, &sc->chan_work);
-}
-
-static void ath9k_pause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
- struct ath_wiphy *aphy = data;
- struct ath_vif *avp = (void *) vif->drv_priv;
-
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- if (!vif->bss_conf.assoc) {
- ath9k_mark_paused(aphy);
- break;
- }
- /* TODO: could avoid this if already in PS mode */
- if (ath9k_send_nullfunc(aphy, vif, avp->bssid, 1)) {
- printk(KERN_DEBUG "%s: failed to send PS nullfunc\n",
- __func__);
- ath9k_mark_paused(aphy);
- }
- break;
- case NL80211_IFTYPE_AP:
- /* Beacon transmission is paused by aphy->state change */
- ath9k_mark_paused(aphy);
- break;
- default:
- break;
- }
-}
-
-/* caller must hold wiphy_lock */
-static int __ath9k_wiphy_pause(struct ath_wiphy *aphy)
-{
- ieee80211_stop_queues(aphy->hw);
- aphy->state = ATH_WIPHY_PAUSING;
- /*
- * TODO: handle PAUSING->PAUSED for the case where there are multiple
- * active vifs (now we do it on the first vif getting ready; should be
- * on the last)
- */
- ieee80211_iterate_active_interfaces_atomic(aphy->hw, ath9k_pause_iter,
- aphy);
- return 0;
-}
-
-int ath9k_wiphy_pause(struct ath_wiphy *aphy)
-{
- int ret;
- spin_lock_bh(&aphy->sc->wiphy_lock);
- ret = __ath9k_wiphy_pause(aphy);
- spin_unlock_bh(&aphy->sc->wiphy_lock);
- return ret;
-}
-
-static void ath9k_unpause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
- struct ath_wiphy *aphy = data;
- struct ath_vif *avp = (void *) vif->drv_priv;
-
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- if (!vif->bss_conf.assoc)
- break;
- ath9k_send_nullfunc(aphy, vif, avp->bssid, 0);
- break;
- case NL80211_IFTYPE_AP:
- /* Beacon transmission is re-enabled by aphy->state change */
- break;
- default:
- break;
- }
-}
-
-/* caller must hold wiphy_lock */
-static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy)
-{
- ieee80211_iterate_active_interfaces_atomic(aphy->hw,
- ath9k_unpause_iter, aphy);
- aphy->state = ATH_WIPHY_ACTIVE;
- ieee80211_wake_queues(aphy->hw);
- return 0;
-}
-
-int ath9k_wiphy_unpause(struct ath_wiphy *aphy)
-{
- int ret;
- spin_lock_bh(&aphy->sc->wiphy_lock);
- ret = __ath9k_wiphy_unpause(aphy);
- spin_unlock_bh(&aphy->sc->wiphy_lock);
- return ret;
-}
-
-static void __ath9k_wiphy_mark_all_paused(struct ath_softc *sc)
-{
- int i;
- if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE)
- sc->pri_wiphy->state = ATH_WIPHY_PAUSED;
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] &&
- sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE)
- sc->sec_wiphy[i]->state = ATH_WIPHY_PAUSED;
- }
-}
-
-/* caller must hold wiphy_lock */
-static void __ath9k_wiphy_pause_all(struct ath_softc *sc)
-{
- int i;
- if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
- __ath9k_wiphy_pause(sc->pri_wiphy);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] &&
- sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
- __ath9k_wiphy_pause(sc->sec_wiphy[i]);
- }
-}
-
-int ath9k_wiphy_select(struct ath_wiphy *aphy)
-{
- struct ath_softc *sc = aphy->sc;
- bool now;
-
- spin_lock_bh(&sc->wiphy_lock);
- if (__ath9k_wiphy_scanning(sc)) {
- /*
- * For now, we are using mac80211 sw scan and it expects to
- * have full control over channel changes, so avoid wiphy
- * scheduling during a scan. This could be optimized if the
- * scanning control were moved into the driver.
- */
- spin_unlock_bh(&sc->wiphy_lock);
- return -EBUSY;
- }
- if (__ath9k_wiphy_pausing(sc)) {
- if (sc->wiphy_select_failures == 0)
- sc->wiphy_select_first_fail = jiffies;
- sc->wiphy_select_failures++;
- if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2))
- {
- printk(KERN_DEBUG "ath9k: Previous wiphy select timed "
- "out; disable/enable hw to recover\n");
- __ath9k_wiphy_mark_all_paused(sc);
- /*
- * TODO: this workaround to fix hardware is unlikely to
- * be specific to virtual wiphy changes. It can happen
- * on normal channel change, too, and as such, this
- * should really be made more generic. For example,
- * tricker radio disable/enable on GTT interrupt burst
- * (say, 10 GTT interrupts received without any TX
- * frame being completed)
- */
- spin_unlock_bh(&sc->wiphy_lock);
- ath_radio_disable(sc, aphy->hw);
- ath_radio_enable(sc, aphy->hw);
- /* Only the primary wiphy hw is used for queuing work */
- ieee80211_queue_work(aphy->sc->hw,
- &aphy->sc->chan_work);
- return -EBUSY; /* previous select still in progress */
- }
- spin_unlock_bh(&sc->wiphy_lock);
- return -EBUSY; /* previous select still in progress */
- }
- sc->wiphy_select_failures = 0;
-
- /* Store the new channel */
- sc->chan_idx = aphy->chan_idx;
- sc->chan_is_ht = aphy->chan_is_ht;
- sc->next_wiphy = aphy;
-
- __ath9k_wiphy_pause_all(sc);
- now = !__ath9k_wiphy_pausing(aphy->sc);
- spin_unlock_bh(&sc->wiphy_lock);
-
- if (now) {
- /* Ready to request channel change immediately */
- ieee80211_queue_work(aphy->sc->hw, &aphy->sc->chan_work);
- }
-
- /*
- * wiphys will be unpaused in ath9k_tx_status() once channel has been
- * changed if any wiphy needs time to become paused.
- */
-
- return 0;
-}
-
-bool ath9k_wiphy_started(struct ath_softc *sc)
-{
- int i;
- spin_lock_bh(&sc->wiphy_lock);
- if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) {
- spin_unlock_bh(&sc->wiphy_lock);
- return true;
- }
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] &&
- sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE) {
- spin_unlock_bh(&sc->wiphy_lock);
- return true;
- }
- }
- spin_unlock_bh(&sc->wiphy_lock);
- return false;
-}
-
-static void ath9k_wiphy_pause_chan(struct ath_wiphy *aphy,
- struct ath_wiphy *selected)
-{
- if (selected->state == ATH_WIPHY_SCAN) {
- if (aphy == selected)
- return;
- /*
- * Pause all other wiphys for the duration of the scan even if
- * they are on the current channel now.
- */
- } else if (aphy->chan_idx == selected->chan_idx)
- return;
- aphy->state = ATH_WIPHY_PAUSED;
- ieee80211_stop_queues(aphy->hw);
-}
-
-void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
- struct ath_wiphy *selected)
-{
- int i;
- spin_lock_bh(&sc->wiphy_lock);
- if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
- ath9k_wiphy_pause_chan(sc->pri_wiphy, selected);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] &&
- sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
- ath9k_wiphy_pause_chan(sc->sec_wiphy[i], selected);
- }
- spin_unlock_bh(&sc->wiphy_lock);
-}
-
-void ath9k_wiphy_work(struct work_struct *work)
-{
- struct ath_softc *sc = container_of(work, struct ath_softc,
- wiphy_work.work);
- struct ath_wiphy *aphy = NULL;
- bool first = true;
-
- spin_lock_bh(&sc->wiphy_lock);
-
- if (sc->wiphy_scheduler_int == 0) {
- /* wiphy scheduler is disabled */
- spin_unlock_bh(&sc->wiphy_lock);
- return;
- }
-
-try_again:
- sc->wiphy_scheduler_index++;
- while (sc->wiphy_scheduler_index <= sc->num_sec_wiphy) {
- aphy = sc->sec_wiphy[sc->wiphy_scheduler_index - 1];
- if (aphy && aphy->state != ATH_WIPHY_INACTIVE)
- break;
-
- sc->wiphy_scheduler_index++;
- aphy = NULL;
- }
- if (aphy == NULL) {
- sc->wiphy_scheduler_index = 0;
- if (sc->pri_wiphy->state == ATH_WIPHY_INACTIVE) {
- if (first) {
- first = false;
- goto try_again;
- }
- /* No wiphy is ready to be scheduled */
- } else
- aphy = sc->pri_wiphy;
- }
-
- spin_unlock_bh(&sc->wiphy_lock);
-
- if (aphy &&
- aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN &&
- ath9k_wiphy_select(aphy)) {
- printk(KERN_DEBUG "ath9k: Failed to schedule virtual wiphy "
- "change\n");
- }
-
- ieee80211_queue_delayed_work(sc->hw,
- &sc->wiphy_work,
- sc->wiphy_scheduler_int);
-}
-
-void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int)
-{
- cancel_delayed_work_sync(&sc->wiphy_work);
- sc->wiphy_scheduler_int = msecs_to_jiffies(msec_int);
- if (sc->wiphy_scheduler_int)
- ieee80211_queue_delayed_work(sc->hw, &sc->wiphy_work,
- sc->wiphy_scheduler_int);
-}
-
-/* caller must hold wiphy_lock */
-bool ath9k_all_wiphys_idle(struct ath_softc *sc)
-{
- unsigned int i;
- if (!sc->pri_wiphy->idle)
- return false;
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (!aphy)
- continue;
- if (!aphy->idle)
- return false;
- }
- return true;
-}
-
-/* caller must hold wiphy_lock */
-void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle)
-{
- struct ath_softc *sc = aphy->sc;
-
- aphy->idle = idle;
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
- "Marking %s as %s\n",
- wiphy_name(aphy->hw->wiphy),
- idle ? "idle" : "not-idle");
-}
-/* Only bother starting a queue on an active virtual wiphy */
-bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue)
-{
- struct ieee80211_hw *hw = sc->pri_wiphy->hw;
- unsigned int i;
- bool txq_started = false;
-
- spin_lock_bh(&sc->wiphy_lock);
-
- /* Start the primary wiphy */
- if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) {
- ieee80211_wake_queue(hw, skb_queue);
- txq_started = true;
- goto unlock;
- }
-
- /* Now start the secondary wiphy queues */
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (!aphy)
- continue;
- if (aphy->state != ATH_WIPHY_ACTIVE)
- continue;
-
- hw = aphy->hw;
- ieee80211_wake_queue(hw, skb_queue);
- txq_started = true;
- break;
- }
-
-unlock:
- spin_unlock_bh(&sc->wiphy_lock);
- return txq_started;
-}
-
-/* Go ahead and propagate information to all virtual wiphys, it won't hurt */
-void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue)
-{
- struct ieee80211_hw *hw = sc->pri_wiphy->hw;
- unsigned int i;
-
- spin_lock_bh(&sc->wiphy_lock);
-
- /* Stop the primary wiphy */
- ieee80211_stop_queue(hw, skb_queue);
-
- /* Now stop the secondary wiphy queues */
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (!aphy)
- continue;
- hw = aphy->hw;
- ieee80211_stop_queue(hw, skb_queue);
- }
- spin_unlock_bh(&sc->wiphy_lock);
-}
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index 93a8bda09c25..d3d24904f62f 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -120,15 +120,21 @@ void ath9k_deinit_wmi(struct ath9k_htc_priv *priv)
kfree(priv->wmi);
}
-void ath9k_wmi_tasklet(unsigned long data)
+void ath9k_swba_tasklet(unsigned long data)
{
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
- struct ath_common *common = ath9k_hw_common(priv->ah);
-
- ath_print(common, ATH_DBG_WMI, "SWBA Event received\n");
ath9k_htc_swba(priv, priv->wmi->beacon_pending);
+}
+void ath9k_fatal_work(struct work_struct *work)
+{
+ struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
+ fatal_work);
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+
+ ath_dbg(common, ATH_DBG_FATAL, "FATAL Event received, resetting device\n");
+ ath9k_htc_reset(priv);
}
static void ath9k_wmi_rsp_callback(struct wmi *wmi, struct sk_buff *skb)
@@ -163,7 +169,11 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
switch (cmd_id) {
case WMI_SWBA_EVENTID:
wmi->beacon_pending = *(u8 *)wmi_event;
- tasklet_schedule(&wmi->drv_priv->wmi_tasklet);
+ tasklet_schedule(&wmi->drv_priv->swba_tasklet);
+ break;
+ case WMI_FATAL_EVENTID:
+ ieee80211_queue_work(wmi->drv_priv->hw,
+ &wmi->drv_priv->fatal_work);
break;
case WMI_TXRATE_EVENTID:
#ifdef CONFIG_ATH9K_HTC_DEBUGFS
@@ -250,7 +260,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
int time_left, ret = 0;
unsigned long flags;
- if (wmi->drv_priv->op_flags & OP_UNPLUGGED)
+ if (ah->ah_flags & AH_UNPLUGGED)
return 0;
skb = alloc_skb(headroom + cmd_len, GFP_ATOMIC);
@@ -286,9 +296,9 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout);
if (!time_left) {
- ath_print(common, ATH_DBG_WMI,
- "Timeout waiting for WMI command: %s\n",
- wmi_cmd_to_name(cmd_id));
+ ath_dbg(common, ATH_DBG_WMI,
+ "Timeout waiting for WMI command: %s\n",
+ wmi_cmd_to_name(cmd_id));
mutex_unlock(&wmi->op_mutex);
return -ETIMEDOUT;
}
@@ -298,8 +308,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
return 0;
out:
- ath_print(common, ATH_DBG_WMI,
- "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id));
+ ath_dbg(common, ATH_DBG_WMI,
+ "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id));
mutex_unlock(&wmi->op_mutex);
kfree_skb(skb);
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
index ac61074af8ac..42084277522d 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.h
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -117,7 +117,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
u8 *cmd_buf, u32 cmd_len,
u8 *rsp_buf, u32 rsp_len,
u32 timeout);
-void ath9k_wmi_tasklet(unsigned long data);
+void ath9k_swba_tasklet(unsigned long data);
+void ath9k_fatal_work(struct work_struct *work);
#define WMI_CMD(_wmi_cmd) \
do { \
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index f2ade2402ce2..ef22096d40c9 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -19,7 +19,6 @@
#define BITS_PER_BYTE 8
#define OFDM_PLCP_BITS 22
-#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
#define L_STF 8
#define L_LTF 8
@@ -32,7 +31,6 @@
#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
-#define OFDM_SIFS_TIME 16
static u16 bits_per_symbol[][2] = {
/* 20MHz 40MHz */
@@ -48,19 +46,18 @@ static u16 bits_per_symbol[][2] = {
#define IS_HT_RATE(_rate) ((_rate) & 0x80)
-static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_atx_tid *tid,
- struct list_head *bf_head);
+static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_atx_tid *tid,
+ struct list_head *bf_head);
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, struct list_head *bf_q,
struct ath_tx_status *ts, int txok, int sendbar);
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *head);
-static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
-static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
- struct ath_tx_status *ts, int txok);
-static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
- int nbad, int txok, bool update_rc);
+static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
+static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
+ struct ath_tx_status *ts, int nframes, int nbad,
+ int txok, bool update_rc);
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
int seqno);
@@ -124,7 +121,7 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
- struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
+ struct ath_txq *txq = tid->ac->txq;
WARN_ON(!tid->paused);
@@ -140,12 +137,21 @@ unlock:
spin_unlock_bh(&txq->axq_lock);
}
+static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ BUILD_BUG_ON(sizeof(struct ath_frame_info) >
+ sizeof(tx_info->rate_driver_data));
+ return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
+}
+
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
- struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
+ struct ath_txq *txq = tid->ac->txq;
struct ath_buf *bf;
struct list_head bf_head;
struct ath_tx_status ts;
+ struct ath_frame_info *fi;
INIT_LIST_HEAD(&bf_head);
@@ -156,12 +162,15 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
list_move_tail(&bf->list, &bf_head);
- if (bf_isretried(bf)) {
- ath_tx_update_baw(sc, tid, bf->bf_seqno);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
+ spin_unlock_bh(&txq->axq_lock);
+ fi = get_frame_info(bf->bf_mpdu);
+ if (fi->retries) {
+ ath_tx_update_baw(sc, tid, fi->seqno);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
} else {
- ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
+ ath_tx_send_normal(sc, txq, NULL, &bf_head);
}
+ spin_lock_bh(&txq->axq_lock);
}
spin_unlock_bh(&txq->axq_lock);
@@ -184,14 +193,11 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
}
static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
- struct ath_buf *bf)
+ u16 seqno)
{
int index, cindex;
- if (bf_isretried(bf))
- return;
-
- index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
+ index = ATH_BA_INDEX(tid->seq_start, seqno);
cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
__set_bit(cindex, tid->tx_buf);
@@ -215,6 +221,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf;
struct list_head bf_head;
struct ath_tx_status ts;
+ struct ath_frame_info *fi;
memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
@@ -226,8 +233,9 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
list_move_tail(&bf->list, &bf_head);
- if (bf_isretried(bf))
- ath_tx_update_baw(sc, tid, bf->bf_seqno);
+ fi = get_frame_info(bf->bf_mpdu);
+ if (fi->retries)
+ ath_tx_update_baw(sc, tid, fi->seqno);
spin_unlock(&txq->axq_lock);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
@@ -239,16 +247,15 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
}
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_buf *bf)
+ struct sk_buff *skb)
{
- struct sk_buff *skb;
+ struct ath_frame_info *fi = get_frame_info(skb);
struct ieee80211_hdr *hdr;
- bf->bf_state.bf_type |= BUF_RETRY;
- bf->bf_retries++;
TX_STAT_INC(txq->axq_qnum, a_retries);
+ if (fi->retries++ > 0)
+ return;
- skb = bf->bf_mpdu;
hdr = (struct ieee80211_hdr *)skb->data;
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
}
@@ -289,7 +296,6 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
ATH_TXBUF_RESET(tbf);
- tbf->aphy = bf->aphy;
tbf->bf_mpdu = bf->bf_mpdu;
tbf->bf_buf_addr = bf->bf_buf_addr;
memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
@@ -298,14 +304,46 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
return tbf;
}
+static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
+ struct ath_tx_status *ts, int txok,
+ int *nframes, int *nbad)
+{
+ struct ath_frame_info *fi;
+ u16 seq_st = 0;
+ u32 ba[WME_BA_BMP_SIZE >> 5];
+ int ba_index;
+ int isaggr = 0;
+
+ *nbad = 0;
+ *nframes = 0;
+
+ isaggr = bf_isaggr(bf);
+ if (isaggr) {
+ seq_st = ts->ts_seqnum;
+ memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
+ }
+
+ while (bf) {
+ fi = get_frame_info(bf->bf_mpdu);
+ ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
+
+ (*nframes)++;
+ if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
+ (*nbad)++;
+
+ bf = bf->bf_next;
+ }
+}
+
+
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf, struct list_head *bf_q,
- struct ath_tx_status *ts, int txok)
+ struct ath_tx_status *ts, int txok, bool retry)
{
struct ath_node *an = NULL;
struct sk_buff *skb;
struct ieee80211_sta *sta;
- struct ieee80211_hw *hw;
+ struct ieee80211_hw *hw = sc->hw;
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *tx_info;
struct ath_atx_tid *tid = NULL;
@@ -316,16 +354,16 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
bool rc_update = true;
struct ieee80211_tx_rate rates[4];
+ struct ath_frame_info *fi;
int nframes;
+ u8 tidno;
skb = bf->bf_mpdu;
hdr = (struct ieee80211_hdr *)skb->data;
tx_info = IEEE80211_SKB_CB(skb);
- hw = bf->aphy->hw;
memcpy(rates, tx_info->control.rates, sizeof(rates));
- nframes = bf->bf_nframes;
rcu_read_lock();
@@ -342,7 +380,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
!bf->bf_stale || bf_next != NULL)
list_move_tail(&bf->list, &bf_head);
- ath_tx_rc_status(bf, ts, 1, 0, false);
+ ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
0, 0);
@@ -352,14 +390,15 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
}
an = (struct ath_node *)sta->drv_priv;
- tid = ATH_AN_2_TID(an, bf->bf_tidno);
+ tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
+ tid = ATH_AN_2_TID(an, tidno);
/*
* The hardware occasionally sends a tx status for the wrong TID.
* In this case, the BA status cannot be considered valid and all
* subframes need to be retransmitted
*/
- if (bf->bf_tidno != ts->tid)
+ if (tidno != ts->tid)
txok = false;
isaggr = bf_isaggr(bf);
@@ -385,15 +424,16 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
INIT_LIST_HEAD(&bf_pending);
INIT_LIST_HEAD(&bf_head);
- nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
+ ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
while (bf) {
- txfail = txpending = 0;
+ txfail = txpending = sendbar = 0;
bf_next = bf->bf_next;
skb = bf->bf_mpdu;
tx_info = IEEE80211_SKB_CB(skb);
+ fi = get_frame_info(skb);
- if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
+ if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
/* transmit completion, subframe is
* acked by block ack */
acked_cnt++;
@@ -401,10 +441,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
/* transmit completion */
acked_cnt++;
} else {
- if (!(tid->state & AGGR_CLEANUP) &&
- !bf_last->bf_tx_aborted) {
- if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
- ath_tx_set_retry(sc, txq, bf);
+ if (!(tid->state & AGGR_CLEANUP) && retry) {
+ if (fi->retries < ATH_MAX_SW_RETRIES) {
+ ath_tx_set_retry(sc, txq, bf->bf_mpdu);
txpending = 1;
} else {
bf->bf_state.bf_type |= BUF_XRETRY;
@@ -442,16 +481,15 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* block-ack window
*/
spin_lock_bh(&txq->axq_lock);
- ath_tx_update_baw(sc, tid, bf->bf_seqno);
+ ath_tx_update_baw(sc, tid, fi->seqno);
spin_unlock_bh(&txq->axq_lock);
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
memcpy(tx_info->control.rates, rates, sizeof(rates));
- bf->bf_nframes = nframes;
- ath_tx_rc_status(bf, ts, nbad, txok, true);
+ ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
rc_update = false;
} else {
- ath_tx_rc_status(bf, ts, nbad, txok, false);
+ ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
}
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
@@ -470,14 +508,13 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
*/
if (!tbf) {
spin_lock_bh(&txq->axq_lock);
- ath_tx_update_baw(sc, tid,
- bf->bf_seqno);
+ ath_tx_update_baw(sc, tid, fi->seqno);
spin_unlock_bh(&txq->axq_lock);
bf->bf_state.bf_type |=
BUF_XRETRY;
- ath_tx_rc_status(bf, ts, nbad,
- 0, false);
+ ath_tx_rc_status(sc, bf, ts, nframes,
+ nbad, 0, false);
ath_tx_complete_buf(sc, bf, txq,
&bf_head,
ts, 0, 0);
@@ -526,8 +563,11 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
rcu_read_unlock();
- if (needreset)
+ if (needreset) {
+ spin_unlock_bh(&sc->sc_pcu_lock);
ath_reset(sc, false);
+ spin_lock_bh(&sc->sc_pcu_lock);
+ }
}
static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
@@ -611,6 +651,7 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
u16 minlen;
u8 flags, rix;
int width, streams, half_gi, ndelim, mindelim;
+ struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
/* Select standard number of delimiters based on frame length alone */
ndelim = ATH_AGGR_GET_NDELIM(frmlen);
@@ -621,7 +662,7 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
* TODO - this could be improved to be dependent on the rate.
* The hardware can keep up at lower rates, but not higher rates
*/
- if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
+ if (fi->keyix != ATH9K_TXKEYIX_INVALID)
ndelim += ATH_AGGR_ENCRYPTDELIM;
/*
@@ -665,7 +706,8 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_atx_tid *tid,
- struct list_head *bf_q)
+ struct list_head *bf_q,
+ int *aggr_len)
{
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
struct ath_buf *bf, *bf_first, *bf_prev = NULL;
@@ -674,14 +716,16 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
al_delta, h_baw = tid->baw_size / 2;
enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
struct ieee80211_tx_info *tx_info;
+ struct ath_frame_info *fi;
bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
do {
bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
+ fi = get_frame_info(bf->bf_mpdu);
/* do not step over block-ack window */
- if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
+ if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
status = ATH_AGGR_BAW_CLOSED;
break;
}
@@ -692,7 +736,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
}
/* do not exceed aggregation limit */
- al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
+ al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
if (nframes &&
(aggr_limit < (al + bpad + al_delta + prev_al))) {
@@ -719,14 +763,15 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
* Get the delimiters needed to meet the MPDU
* density for this node.
*/
- ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
+ ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
bpad = PADBYTES(al_delta) + (ndelim << 2);
bf->bf_next = NULL;
ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
/* link buffers of this frame to the aggregate */
- ath_tx_addto_baw(sc, tid, bf);
+ if (!fi->retries)
+ ath_tx_addto_baw(sc, tid, fi->seqno);
ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
list_move_tail(&bf->list, bf_q);
if (bf_prev) {
@@ -738,8 +783,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
} while (!list_empty(&tid->buf_q));
- bf_first->bf_al = al;
- bf_first->bf_nframes = nframes;
+ *aggr_len = al;
return status;
#undef PADBYTES
@@ -750,7 +794,9 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
{
struct ath_buf *bf;
enum ATH_AGGR_STATUS status;
+ struct ath_frame_info *fi;
struct list_head bf_q;
+ int aggr_len;
do {
if (list_empty(&tid->buf_q))
@@ -758,7 +804,7 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
INIT_LIST_HEAD(&bf_q);
- status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
+ status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
/*
* no frames picked up to be aggregated;
@@ -771,18 +817,20 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
/* if only one frame, send as non-aggregate */
- if (bf->bf_nframes == 1) {
+ if (bf == bf->bf_lastbf) {
+ fi = get_frame_info(bf->bf_mpdu);
+
bf->bf_state.bf_type &= ~BUF_AGGR;
ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
- ath_buf_set_rate(sc, bf);
+ ath_buf_set_rate(sc, bf, fi->framelen);
ath_tx_txqaddbuf(sc, txq, &bf_q);
continue;
}
/* setup first desc of aggregate */
bf->bf_state.bf_type |= BUF_AGGR;
- ath_buf_set_rate(sc, bf);
- ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
+ ath_buf_set_rate(sc, bf, aggr_len);
+ ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
/* anchor last desc of aggregate */
ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
@@ -790,7 +838,7 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_txqaddbuf(sc, txq, &bf_q);
TX_STAT_INC(txq->axq_qnum, a_aggr);
- } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
+ } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
status != ATH_AGGR_BAW_CLOSED);
}
@@ -808,7 +856,10 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
txtid->state |= AGGR_ADDBA_PROGRESS;
txtid->paused = true;
- *ssn = txtid->seq_start;
+ *ssn = txtid->seq_start = txtid->seq_next;
+
+ memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
+ txtid->baw_head = txtid->baw_tail = 0;
return 0;
}
@@ -817,7 +868,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
{
struct ath_node *an = (struct ath_node *)sta->drv_priv;
struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
- struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
+ struct ath_txq *txq = txtid->ac->txq;
if (txtid->state & AGGR_CLEANUP)
return;
@@ -888,10 +939,16 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_tx_queue_info qi;
- int qnum, i;
+ static const int subtype_txq_to_hwq[] = {
+ [WME_AC_BE] = ATH_TXQ_AC_BE,
+ [WME_AC_BK] = ATH_TXQ_AC_BK,
+ [WME_AC_VI] = ATH_TXQ_AC_VI,
+ [WME_AC_VO] = ATH_TXQ_AC_VO,
+ };
+ int axq_qnum, i;
memset(&qi, 0, sizeof(qi));
- qi.tqi_subtype = subtype;
+ qi.tqi_subtype = subtype_txq_to_hwq[subtype];
qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
@@ -922,40 +979,40 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
TXQ_FLAG_TXDESCINT_ENABLE;
}
- qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
- if (qnum == -1) {
+ axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
+ if (axq_qnum == -1) {
/*
* NB: don't print a message, this happens
* normally on parts with too few tx queues
*/
return NULL;
}
- if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
- ath_print(common, ATH_DBG_FATAL,
- "qnum %u out of range, max %u!\n",
- qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
- ath9k_hw_releasetxqueue(ah, qnum);
+ if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
+ ath_err(common, "qnum %u out of range, max %zu!\n",
+ axq_qnum, ARRAY_SIZE(sc->tx.txq));
+ ath9k_hw_releasetxqueue(ah, axq_qnum);
return NULL;
}
- if (!ATH_TXQ_SETUP(sc, qnum)) {
- struct ath_txq *txq = &sc->tx.txq[qnum];
+ if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
+ struct ath_txq *txq = &sc->tx.txq[axq_qnum];
- txq->axq_class = subtype;
- txq->axq_qnum = qnum;
+ txq->axq_qnum = axq_qnum;
+ txq->mac80211_qnum = -1;
txq->axq_link = NULL;
INIT_LIST_HEAD(&txq->axq_q);
INIT_LIST_HEAD(&txq->axq_acq);
spin_lock_init(&txq->axq_lock);
txq->axq_depth = 0;
+ txq->axq_ampdu_depth = 0;
txq->axq_tx_inprogress = false;
- sc->tx.txqsetup |= 1<<qnum;
+ sc->tx.txqsetup |= 1<<axq_qnum;
txq->txq_headidx = txq->txq_tailidx = 0;
for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
INIT_LIST_HEAD(&txq->txq_fifo[i]);
INIT_LIST_HEAD(&txq->txq_fifo_pending);
}
- return &sc->tx.txq[qnum];
+ return &sc->tx.txq[axq_qnum];
}
int ath_txq_update(struct ath_softc *sc, int qnum,
@@ -985,8 +1042,8 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
qi.tqi_readyTime = qinfo->tqi_readyTime;
if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
- "Unable to update hardware queue %u!\n", qnum);
+ ath_err(ath9k_hw_common(sc->sc_ah),
+ "Unable to update hardware queue %u!\n", qnum);
error = -EIO;
} else {
ath9k_hw_resettxqueue(ah, qnum);
@@ -998,6 +1055,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
int ath_cabq_update(struct ath_softc *sc)
{
struct ath9k_tx_queue_info qi;
+ struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
int qnum = sc->beacon.cabq->axq_qnum;
ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
@@ -1009,13 +1067,19 @@ int ath_cabq_update(struct ath_softc *sc)
else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
- qi.tqi_readyTime = (sc->beacon_interval *
+ qi.tqi_readyTime = (cur_conf->beacon_interval *
sc->config.cabqReadytime) / 100;
ath_txq_update(sc, qnum, &qi);
return 0;
}
+static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
+ return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+}
+
/*
* Drain a given TX queue (could be Beacon or Data)
*
@@ -1062,8 +1126,6 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
}
lastbf = bf->bf_lastbf;
- if (!retry_tx)
- lastbf->bf_tx_aborted = true;
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
list_cut_position(&bf_head,
@@ -1076,11 +1138,13 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
}
txq->axq_depth--;
-
+ if (bf_is_ampdu_not_probing(bf))
+ txq->axq_ampdu_depth--;
spin_unlock_bh(&txq->axq_lock);
if (bf_isampdu(bf))
- ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
+ ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
+ retry_tx);
else
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
}
@@ -1101,7 +1165,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
if (bf_isampdu(bf))
ath_tx_complete_aggr(sc, txq, bf, &bf_head,
- &ts, 0);
+ &ts, 0, retry_tx);
else
ath_tx_complete_buf(sc, bf, txq, &bf_head,
&ts, 0, 0);
@@ -1120,7 +1184,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
}
}
-void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
+bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -1128,39 +1192,36 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
int i, npend = 0;
if (sc->sc_flags & SC_OP_INVALID)
- return;
+ return true;
- /* Stop beacon queue */
- ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
+ ath9k_hw_abort_tx_dma(ah);
- /* Stop data queues */
+ /* Check if any queue remains active */
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
- if (ATH_TXQ_SETUP(sc, i)) {
- txq = &sc->tx.txq[i];
- ath9k_hw_stoptxdma(ah, txq->axq_qnum);
- npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
- }
+ if (!ATH_TXQ_SETUP(sc, i))
+ continue;
+
+ npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
}
- if (npend) {
- int r;
+ if (npend)
+ ath_err(common, "Failed to stop TX DMA!\n");
- ath_print(common, ATH_DBG_FATAL,
- "Failed to stop TX DMA. Resetting hardware!\n");
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+ if (!ATH_TXQ_SETUP(sc, i))
+ continue;
- spin_lock_bh(&sc->sc_resetlock);
- r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
- if (r)
- ath_print(common, ATH_DBG_FATAL,
- "Unable to reset hardware; reset status %d\n",
- r);
- spin_unlock_bh(&sc->sc_resetlock);
+ /*
+ * The caller will resume queues with ieee80211_wake_queues.
+ * Mark the queue as not stopped to prevent ath_tx_complete
+ * from waking the queue too early.
+ */
+ txq = &sc->tx.txq[i];
+ txq->stopped = false;
+ ath_draintxq(sc, txq, retry_tx);
}
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
- if (ATH_TXQ_SETUP(sc, i))
- ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
- }
+ return !npend;
}
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
@@ -1169,65 +1230,60 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
}
+/* For each axq_acq entry, for each tid, try to schedule packets
+ * for transmit until ampdu_depth has reached min Q depth.
+ */
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
- struct ath_atx_ac *ac;
- struct ath_atx_tid *tid;
+ struct ath_atx_ac *ac, *ac_tmp, *last_ac;
+ struct ath_atx_tid *tid, *last_tid;
- if (list_empty(&txq->axq_acq))
+ if (list_empty(&txq->axq_acq) ||
+ txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
return;
ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
- list_del(&ac->list);
- ac->sched = false;
-
- do {
- if (list_empty(&ac->tid_q))
- return;
+ last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
- tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
- list_del(&tid->list);
- tid->sched = false;
+ list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
+ last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
+ list_del(&ac->list);
+ ac->sched = false;
- if (tid->paused)
- continue;
+ while (!list_empty(&ac->tid_q)) {
+ tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
+ list);
+ list_del(&tid->list);
+ tid->sched = false;
- ath_tx_sched_aggr(sc, txq, tid);
+ if (tid->paused)
+ continue;
- /*
- * add tid to round-robin queue if more frames
- * are pending for the tid
- */
- if (!list_empty(&tid->buf_q))
- ath_tx_queue_tid(txq, tid);
+ ath_tx_sched_aggr(sc, txq, tid);
- break;
- } while (!list_empty(&ac->tid_q));
+ /*
+ * add tid to round-robin queue if more frames
+ * are pending for the tid
+ */
+ if (!list_empty(&tid->buf_q))
+ ath_tx_queue_tid(txq, tid);
- if (!list_empty(&ac->tid_q)) {
- if (!ac->sched) {
- ac->sched = true;
- list_add_tail(&ac->list, &txq->axq_acq);
+ if (tid == last_tid ||
+ txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
+ break;
}
- }
-}
-int ath_tx_setup(struct ath_softc *sc, int haltype)
-{
- struct ath_txq *txq;
+ if (!list_empty(&ac->tid_q)) {
+ if (!ac->sched) {
+ ac->sched = true;
+ list_add_tail(&ac->list, &txq->axq_acq);
+ }
+ }
- if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
- "HAL AC %u out of range, max %zu!\n",
- haltype, ARRAY_SIZE(sc->tx.hwq_map));
- return 0;
+ if (ac == last_ac ||
+ txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
+ return;
}
- txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
- if (txq != NULL) {
- sc->tx.hwq_map[haltype] = txq->axq_qnum;
- return 1;
- } else
- return 0;
}
/***********/
@@ -1255,8 +1311,8 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
bf = list_first_entry(head, struct ath_buf, list);
- ath_print(common, ATH_DBG_QUEUE,
- "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
+ ath_dbg(common, ATH_DBG_QUEUE,
+ "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
@@ -1264,49 +1320,49 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
return;
}
if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
- ath_print(common, ATH_DBG_XMIT,
- "Initializing tx fifo %d which "
- "is non-empty\n",
- txq->txq_headidx);
+ ath_dbg(common, ATH_DBG_XMIT,
+ "Initializing tx fifo %d which is non-empty\n",
+ txq->txq_headidx);
INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
+ TX_STAT_INC(txq->axq_qnum, puttxbuf);
ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
- ath_print(common, ATH_DBG_XMIT,
- "TXDP[%u] = %llx (%p)\n",
- txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
+ ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
+ txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
} else {
list_splice_tail_init(head, &txq->axq_q);
if (txq->axq_link == NULL) {
+ TX_STAT_INC(txq->axq_qnum, puttxbuf);
ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
- ath_print(common, ATH_DBG_XMIT,
- "TXDP[%u] = %llx (%p)\n",
- txq->axq_qnum, ito64(bf->bf_daddr),
- bf->bf_desc);
+ ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
+ txq->axq_qnum, ito64(bf->bf_daddr),
+ bf->bf_desc);
} else {
*txq->axq_link = bf->bf_daddr;
- ath_print(common, ATH_DBG_XMIT,
- "link[%u] (%p)=%llx (%p)\n",
- txq->axq_qnum, txq->axq_link,
- ito64(bf->bf_daddr), bf->bf_desc);
+ ath_dbg(common, ATH_DBG_XMIT,
+ "link[%u] (%p)=%llx (%p)\n",
+ txq->axq_qnum, txq->axq_link,
+ ito64(bf->bf_daddr), bf->bf_desc);
}
ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
&txq->axq_link);
+ TX_STAT_INC(txq->axq_qnum, txstart);
ath9k_hw_txstart(ah, txq->axq_qnum);
}
txq->axq_depth++;
+ if (bf_is_ampdu_not_probing(bf))
+ txq->axq_ampdu_depth++;
}
static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
- struct list_head *bf_head,
- struct ath_tx_control *txctl)
+ struct ath_buf *bf, struct ath_tx_control *txctl)
{
- struct ath_buf *bf;
+ struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
+ struct list_head bf_head;
- bf = list_first_entry(bf_head, struct ath_buf, list);
bf->bf_state.bf_type |= BUF_AMPDU;
- TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
/*
* Do not queue to h/w when any of the following conditions is true:
@@ -1316,56 +1372,49 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
* - h/w queue depth exceeds low water mark
*/
if (!list_empty(&tid->buf_q) || tid->paused ||
- !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
- txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
+ !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
+ txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
/*
* Add this frame to software queue for scheduling later
* for aggregation.
*/
- list_move_tail(&bf->list, &tid->buf_q);
+ TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
+ list_add_tail(&bf->list, &tid->buf_q);
ath_tx_queue_tid(txctl->txq, tid);
return;
}
+ INIT_LIST_HEAD(&bf_head);
+ list_add(&bf->list, &bf_head);
+
/* Add sub-frame to BAW */
- ath_tx_addto_baw(sc, tid, bf);
+ if (!fi->retries)
+ ath_tx_addto_baw(sc, tid, fi->seqno);
/* Queue to h/w without aggregation */
- bf->bf_nframes = 1;
+ TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
bf->bf_lastbf = bf;
- ath_buf_set_rate(sc, bf);
- ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
+ ath_buf_set_rate(sc, bf, fi->framelen);
+ ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
}
-static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_atx_tid *tid,
- struct list_head *bf_head)
+static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_atx_tid *tid,
+ struct list_head *bf_head)
{
+ struct ath_frame_info *fi;
struct ath_buf *bf;
bf = list_first_entry(bf_head, struct ath_buf, list);
bf->bf_state.bf_type &= ~BUF_AMPDU;
/* update starting sequence number for subsequent ADDBA request */
- INCR(tid->seq_start, IEEE80211_SEQ_MAX);
-
- bf->bf_nframes = 1;
- bf->bf_lastbf = bf;
- ath_buf_set_rate(sc, bf);
- ath_tx_txqaddbuf(sc, txq, bf_head);
- TX_STAT_INC(txq->axq_qnum, queued);
-}
-
-static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
- struct list_head *bf_head)
-{
- struct ath_buf *bf;
-
- bf = list_first_entry(bf_head, struct ath_buf, list);
+ if (tid)
+ INCR(tid->seq_start, IEEE80211_SEQ_MAX);
bf->bf_lastbf = bf;
- bf->bf_nframes = 1;
- ath_buf_set_rate(sc, bf);
+ fi = get_frame_info(bf->bf_mpdu);
+ ath_buf_set_rate(sc, bf, fi->framelen);
ath_tx_txqaddbuf(sc, txq, bf_head);
TX_STAT_INC(txq->axq_qnum, queued);
}
@@ -1393,40 +1442,51 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
return htype;
}
-static void assign_aggr_tid_seqno(struct sk_buff *skb,
- struct ath_buf *bf)
+static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
+ int framelen)
{
+ struct ath_softc *sc = hw->priv;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = tx_info->control.sta;
+ struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
struct ieee80211_hdr *hdr;
+ struct ath_frame_info *fi = get_frame_info(skb);
struct ath_node *an;
struct ath_atx_tid *tid;
- __le16 fc;
- u8 *qc;
+ enum ath9k_key_type keytype;
+ u16 seqno = 0;
+ u8 tidno;
- if (!tx_info->control.sta)
- return;
+ keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
- an = (struct ath_node *)tx_info->control.sta->drv_priv;
hdr = (struct ieee80211_hdr *)skb->data;
- fc = hdr->frame_control;
+ if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
+ conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
- if (ieee80211_is_data_qos(fc)) {
- qc = ieee80211_get_qos_ctl(hdr);
- bf->bf_tidno = qc[0] & 0xf;
+ an = (struct ath_node *) sta->drv_priv;
+ tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
+
+ /*
+ * Override seqno set by upper layer with the one
+ * in tx aggregation state.
+ */
+ tid = ATH_AN_2_TID(an, tidno);
+ seqno = tid->seq_next;
+ hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
+ INCR(tid->seq_next, IEEE80211_SEQ_MAX);
}
- /*
- * For HT capable stations, we save tidno for later use.
- * We also override seqno set by upper layer with the one
- * in tx aggregation state.
- */
- tid = ATH_AN_2_TID(an, bf->bf_tidno);
- hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
- bf->bf_seqno = tid->seq_next;
- INCR(tid->seq_next, IEEE80211_SEQ_MAX);
+ memset(fi, 0, sizeof(*fi));
+ if (hw_key)
+ fi->keyix = hw_key->hw_key_idx;
+ else
+ fi->keyix = ATH9K_TXKEYIX_INVALID;
+ fi->keytype = keytype;
+ fi->framelen = framelen;
+ fi->seqno = seqno;
}
-static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
+static int setup_tx_flags(struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
int flags = 0;
@@ -1437,7 +1497,7 @@ static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
flags |= ATH9K_TXDESC_NOACK;
- if (use_ldpc)
+ if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
flags |= ATH9K_TXDESC_LDPC;
return flags;
@@ -1449,13 +1509,11 @@ static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
* width - 0 for 20 MHz, 1 for 40 MHz
* half_gi - to use 4us v/s 3.6 us for symbol time
*/
-static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
+static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
int width, int half_gi, bool shortPreamble)
{
u32 nbits, nsymbits, duration, nsymbols;
- int streams, pktlen;
-
- pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
+ int streams;
/* find number of symbols: PLCP + data */
streams = HT_RC_2_STREAMS(rix);
@@ -1474,7 +1532,19 @@ static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
return duration;
}
-static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
+u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_channel *curchan = ah->curchan;
+ if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
+ (curchan->channelFlags & CHANNEL_5GHZ) &&
+ (chainmask == 0x7) && (rate < 0x90))
+ return 0x3;
+ else
+ return chainmask;
+}
+
+static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath9k_11n_rate_series series[4];
@@ -1514,7 +1584,6 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
rix = rates[i].idx;
series[i].Tries = rates[i].count;
- series[i].ChSel = common->tx_chainmask;
if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
(rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
@@ -1537,14 +1606,16 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
if (rates[i].flags & IEEE80211_TX_RC_MCS) {
/* MCS rates */
series[i].Rate = rix | 0x80;
- series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
+ series[i].ChSel = ath_txchainmask_reduction(sc,
+ common->tx_chainmask, series[i].Rate);
+ series[i].PktDuration = ath_pkt_duration(sc, rix, len,
is_40, is_sgi, is_sp);
if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
series[i].RateFlags |= ATH9K_RATESERIES_STBC;
continue;
}
- /* legcay rates */
+ /* legacy rates */
if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
!(rate->flags & IEEE80211_RATE_ERP_G))
phy = WLAN_RC_PHY_CCK;
@@ -1560,12 +1631,18 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
is_sp = false;
}
+ if (bf->bf_state.bfs_paprd)
+ series[i].ChSel = common->tx_chainmask;
+ else
+ series[i].ChSel = ath_txchainmask_reduction(sc,
+ common->tx_chainmask, series[i].Rate);
+
series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
- phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
+ phy, rate->bitrate * 100, len, rix, is_sp);
}
/* For AR5416 - RTS cannot be followed by a frame larger than 8K */
- if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
+ if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
flags &= ~ATH9K_TXDESC_RTSENA;
/* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
@@ -1582,67 +1659,27 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
}
-static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
- struct sk_buff *skb,
- struct ath_tx_control *txctl)
+static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
+ struct ath_txq *txq,
+ struct sk_buff *skb)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int hdrlen;
- __le16 fc;
- int padpos, padsize;
- bool use_ldpc = false;
-
- tx_info->pad[0] = 0;
- switch (txctl->frame_type) {
- case ATH9K_IFT_NOT_INTERNAL:
- break;
- case ATH9K_IFT_PAUSE:
- tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
- /* fall through */
- case ATH9K_IFT_UNPAUSE:
- tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
- break;
- }
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- fc = hdr->frame_control;
-
- ATH_TXBUF_RESET(bf);
-
- bf->aphy = aphy;
- bf->bf_frmlen = skb->len + FCS_LEN;
- /* Remove the padding size from bf_frmlen, if any */
- padpos = ath9k_cmn_padpos(hdr->frame_control);
- padsize = padpos & 3;
- if (padsize && skb->len>padpos+padsize) {
- bf->bf_frmlen -= padsize;
- }
-
- if (!txctl->paprd && conf_is_ht(&hw->conf)) {
- bf->bf_state.bf_type |= BUF_HT;
- if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
- use_ldpc = true;
- }
-
- bf->bf_state.bfs_paprd = txctl->paprd;
- if (txctl->paprd)
- bf->bf_state.bfs_paprd_timestamp = jiffies;
- bf->bf_flags = setup_tx_flags(skb, use_ldpc);
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_frame_info *fi = get_frame_info(skb);
+ struct ath_buf *bf;
+ struct ath_desc *ds;
+ int frm_type;
- bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
- if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
- bf->bf_frmlen += tx_info->control.hw_key->icv_len;
- bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
- } else {
- bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
+ bf = ath_tx_get_buffer(sc);
+ if (!bf) {
+ ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
+ return NULL;
}
- if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
- (sc->sc_flags & SC_OP_TXAGGR))
- assign_aggr_tid_seqno(skb, bf);
+ ATH_TXBUF_RESET(bf);
+ bf->bf_flags = setup_tx_flags(skb);
bf->bf_mpdu = skb;
bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
@@ -1650,42 +1687,19 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
bf->bf_mpdu = NULL;
bf->bf_buf_addr = 0;
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
- "dma_mapping_error() on TX\n");
- return -ENOMEM;
+ ath_err(ath9k_hw_common(sc->sc_ah),
+ "dma_mapping_error() on TX\n");
+ ath_tx_return_buffer(sc, bf);
+ return NULL;
}
- bf->bf_tx_aborted = false;
-
- return 0;
-}
-
-/* FIXME: tx power */
-static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
- struct ath_tx_control *txctl)
-{
- struct sk_buff *skb = bf->bf_mpdu;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ath_node *an = NULL;
- struct list_head bf_head;
- struct ath_desc *ds;
- struct ath_atx_tid *tid;
- struct ath_hw *ah = sc->sc_ah;
- int frm_type;
- __le16 fc;
-
frm_type = get_hw_packet_type(skb);
- fc = hdr->frame_control;
-
- INIT_LIST_HEAD(&bf_head);
- list_add_tail(&bf->list, &bf_head);
ds = bf->bf_desc;
ath9k_hw_set_desc_link(ah, ds, 0);
- ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
- bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
+ ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
+ fi->keyix, fi->keytype, bf->bf_flags);
ath9k_hw_filltxdesc(ah, ds,
skb->len, /* segment length */
@@ -1693,109 +1707,79 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
true, /* last segment */
ds, /* first descriptor */
bf->bf_buf_addr,
- txctl->txq->axq_qnum);
-
- if (bf->bf_state.bfs_paprd)
- ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
-
- spin_lock_bh(&txctl->txq->axq_lock);
-
- if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
- tx_info->control.sta) {
- an = (struct ath_node *)tx_info->control.sta->drv_priv;
- tid = ATH_AN_2_TID(an, bf->bf_tidno);
+ txq->axq_qnum);
- if (!ieee80211_is_data_qos(fc)) {
- ath_tx_send_normal(sc, txctl->txq, &bf_head);
- goto tx_done;
- }
- if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
- /*
- * Try aggregation if it's a unicast data frame
- * and the destination is HT capable.
- */
- ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
- } else {
- /*
- * Send this frame as regular when ADDBA
- * exchange is neither complete nor pending.
- */
- ath_tx_send_ht_normal(sc, txctl->txq,
- tid, &bf_head);
- }
- } else {
- ath_tx_send_normal(sc, txctl->txq, &bf_head);
- }
-
-tx_done:
- spin_unlock_bh(&txctl->txq->axq_lock);
+ return bf;
}
-/* Upon failure caller should free skb */
-int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct ath_tx_control *txctl)
+/* FIXME: tx power */
+static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
+ struct ath_tx_control *txctl)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_txq *txq = txctl->txq;
- struct ath_buf *bf;
- int q, r;
+ struct sk_buff *skb = bf->bf_mpdu;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct list_head bf_head;
+ struct ath_atx_tid *tid = NULL;
+ u8 tidno;
- bf = ath_tx_get_buffer(sc);
- if (!bf) {
- ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
- return -1;
- }
+ spin_lock_bh(&txctl->txq->axq_lock);
- r = ath_tx_setup_buffer(hw, bf, skb, txctl);
- if (unlikely(r)) {
- ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
+ if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) {
+ tidno = ieee80211_get_qos_ctl(hdr)[0] &
+ IEEE80211_QOS_CTL_TID_MASK;
+ tid = ATH_AN_2_TID(txctl->an, tidno);
- /* upon ath_tx_processq() this TX queue will be resumed, we
- * guarantee this will happen by knowing beforehand that
- * we will at least have to run TX completionon one buffer
- * on the queue */
- spin_lock_bh(&txq->axq_lock);
- if (!txq->stopped && txq->axq_depth > 1) {
- ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
- txq->stopped = 1;
- }
- spin_unlock_bh(&txq->axq_lock);
+ WARN_ON(tid->ac->txq != txctl->txq);
+ }
- ath_tx_return_buffer(sc, bf);
+ if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
+ /*
+ * Try aggregation if it's a unicast data frame
+ * and the destination is HT capable.
+ */
+ ath_tx_send_ampdu(sc, tid, bf, txctl);
+ } else {
+ INIT_LIST_HEAD(&bf_head);
+ list_add_tail(&bf->list, &bf_head);
- return r;
- }
+ bf->bf_state.bfs_ftype = txctl->frame_type;
+ bf->bf_state.bfs_paprd = txctl->paprd;
- q = skb_get_queue_mapping(skb);
- if (q >= 4)
- q = 0;
+ if (bf->bf_state.bfs_paprd)
+ ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
+ bf->bf_state.bfs_paprd);
- spin_lock_bh(&txq->axq_lock);
- if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
- ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
- txq->stopped = 1;
- }
- spin_unlock_bh(&txq->axq_lock);
+ if (txctl->paprd)
+ bf->bf_state.bfs_paprd_timestamp = jiffies;
- ath_tx_start_dma(sc, bf, txctl);
+ ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
+ }
- return 0;
+ spin_unlock_bh(&txctl->txq->axq_lock);
}
-void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
+/* Upon failure caller should free skb */
+int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct ath_tx_control *txctl)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- int padpos, padsize;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ath_tx_control txctl;
+ struct ieee80211_sta *sta = info->control.sta;
+ struct ath_softc *sc = hw->priv;
+ struct ath_txq *txq = txctl->txq;
+ struct ath_buf *bf;
+ int padpos, padsize;
+ int frmlen = skb->len + FCS_LEN;
+ int q;
+
+ /* NOTE: sta can be NULL according to net/mac80211.h */
+ if (sta)
+ txctl->an = (struct ath_node *)sta->drv_priv;
- memset(&txctl, 0, sizeof(struct ath_tx_control));
+ if (info->control.hw_key)
+ frmlen += info->control.hw_key->icv_len;
/*
* As a temporary workaround, assign seq# here; this will likely need
@@ -1812,30 +1796,37 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
/* Add the padding after the header if this is not already done */
padpos = ath9k_cmn_padpos(hdr->frame_control);
padsize = padpos & 3;
- if (padsize && skb->len>padpos) {
- if (skb_headroom(skb) < padsize) {
- ath_print(common, ATH_DBG_XMIT,
- "TX CABQ padding failed\n");
- dev_kfree_skb_any(skb);
- return;
- }
+ if (padsize && skb->len > padpos) {
+ if (skb_headroom(skb) < padsize)
+ return -ENOMEM;
+
skb_push(skb, padsize);
memmove(skb->data, skb->data + padsize, padpos);
}
- txctl.txq = sc->beacon.cabq;
+ setup_frame_info(hw, skb, frmlen);
- ath_print(common, ATH_DBG_XMIT,
- "transmitting CABQ packet, skb: %p\n", skb);
+ /*
+ * At this point, the vif, hw_key and sta pointers in the tx control
+ * info are no longer valid (overwritten by the ath_frame_info data.
+ */
- if (ath_tx_start(hw, skb, &txctl) != 0) {
- ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
- goto exit;
+ bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
+ if (unlikely(!bf))
+ return -ENOMEM;
+
+ q = skb_get_queue_mapping(skb);
+ spin_lock_bh(&txq->axq_lock);
+ if (txq == sc->tx.txq_map[q] &&
+ ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
+ ieee80211_stop_queue(sc->hw, q);
+ txq->stopped = 1;
}
+ spin_unlock_bh(&txq->axq_lock);
- return;
-exit:
- dev_kfree_skb_any(skb);
+ ath_tx_start_dma(sc, bf, txctl);
+
+ return 0;
}
/*****************/
@@ -1843,7 +1834,7 @@ exit:
/*****************/
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
- struct ath_wiphy *aphy, int tx_flags)
+ int tx_flags, int ftype, struct ath_txq *txq)
{
struct ieee80211_hw *hw = sc->hw;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -1851,10 +1842,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
int q, padpos, padsize;
- ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
-
- if (aphy)
- hw = aphy->hw;
+ ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
if (tx_flags & ATH_TX_BAR)
tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
@@ -1877,27 +1865,28 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
- ath_print(common, ATH_DBG_PS,
- "Going back to sleep after having "
- "received TX status (0x%lx)\n",
+ ath_dbg(common, ATH_DBG_PS,
+ "Going back to sleep after having received TX status (0x%lx)\n",
sc->ps_flags & (PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
PS_WAIT_FOR_PSPOLL_DATA |
PS_WAIT_FOR_TX_ACK));
}
- if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
- ath9k_tx_status(hw, skb);
- else {
- q = skb_get_queue_mapping(skb);
- if (q >= 4)
- q = 0;
-
- if (--sc->tx.pending_frames[q] < 0)
- sc->tx.pending_frames[q] = 0;
+ q = skb_get_queue_mapping(skb);
+ if (txq == sc->tx.txq_map[q]) {
+ spin_lock_bh(&txq->axq_lock);
+ if (WARN_ON(--txq->pending_frames < 0))
+ txq->pending_frames = 0;
- ieee80211_tx_status(hw, skb);
+ if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
+ ieee80211_wake_queue(sc->hw, q);
+ txq->stopped = 0;
+ }
+ spin_unlock_bh(&txq->axq_lock);
}
+
+ ieee80211_tx_status(hw, skb);
}
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
@@ -1923,14 +1912,15 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
if (bf->bf_state.bfs_paprd) {
if (time_after(jiffies,
- bf->bf_state.bfs_paprd_timestamp +
- msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
+ bf->bf_state.bfs_paprd_timestamp +
+ msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
dev_kfree_skb_any(skb);
else
complete(&sc->paprd_complete);
} else {
- ath_debug_stat_tx(sc, txq, bf, ts);
- ath_tx_complete(sc, skb, bf->aphy, tx_flags);
+ ath_debug_stat_tx(sc, bf, ts, txq);
+ ath_tx_complete(sc, skb, tx_flags,
+ bf->bf_state.bfs_ftype, txq);
}
/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
* accidentally reference it later.
@@ -1945,42 +1935,15 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
}
-static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
- struct ath_tx_status *ts, int txok)
-{
- u16 seq_st = 0;
- u32 ba[WME_BA_BMP_SIZE >> 5];
- int ba_index;
- int nbad = 0;
- int isaggr = 0;
-
- if (bf->bf_lastbf->bf_tx_aborted)
- return 0;
-
- isaggr = bf_isaggr(bf);
- if (isaggr) {
- seq_st = ts->ts_seqnum;
- memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
- }
-
- while (bf) {
- ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
- if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
- nbad++;
-
- bf = bf->bf_next;
- }
-
- return nbad;
-}
-
-static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
- int nbad, int txok, bool update_rc)
+static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
+ struct ath_tx_status *ts, int nframes, int nbad,
+ int txok, bool update_rc)
{
struct sk_buff *skb = bf->bf_mpdu;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hw *hw = bf->aphy->hw;
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_hw *ah = sc->sc_ah;
u8 i, tx_rateindex;
if (txok)
@@ -1994,22 +1957,32 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
- BUG_ON(nbad > bf->bf_nframes);
+ BUG_ON(nbad > nframes);
- tx_info->status.ampdu_len = bf->bf_nframes;
- tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
+ tx_info->status.ampdu_len = nframes;
+ tx_info->status.ampdu_ack_len = nframes - nbad;
}
if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
(bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
- if (ieee80211_is_data(hdr->frame_control)) {
- if (ts->ts_flags &
- (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
- tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
- if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
- (ts->ts_status & ATH9K_TXERR_FIFO))
- tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
- }
+ /*
+ * If an underrun error is seen assume it as an excessive
+ * retry only if max frame trigger level has been reached
+ * (2 KB for single stream, and 4 KB for dual stream).
+ * Adjust the long retry as if the frame was tried
+ * hw->max_rate_tries times to affect how rate control updates
+ * PER for the failed rate.
+ * In case of congestion on the bus penalizing this type of
+ * underruns should help hardware actually transmit new frames
+ * successfully by eventually preferring slower rates.
+ * This itself should also alleviate congestion on the bus.
+ */
+ if (ieee80211_is_data(hdr->frame_control) &&
+ (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
+ ATH9K_TX_DELIM_UNDERRUN)) &&
+ ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
+ tx_info->status.rates[tx_rateindex].count =
+ hw->max_rate_tries;
}
for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
@@ -2020,22 +1993,6 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
}
-static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
-{
- int qnum;
-
- qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
- if (qnum == -1)
- return;
-
- spin_lock_bh(&txq->axq_lock);
- if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
- if (ath_mac80211_start_queue(sc, qnum))
- txq->stopped = 0;
- }
- spin_unlock_bh(&txq->axq_lock);
-}
-
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
{
struct ath_hw *ah = sc->sc_ah;
@@ -2047,14 +2004,16 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
int txok;
int status;
- ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
- txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
- txq->axq_link);
+ ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
+ txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
+ txq->axq_link);
for (;;) {
spin_lock_bh(&txq->axq_lock);
if (list_empty(&txq->axq_q)) {
txq->axq_link = NULL;
+ if (sc->sc_flags & SC_OP_TXAGGR)
+ ath_txq_schedule(sc, txq);
spin_unlock_bh(&txq->axq_lock);
break;
}
@@ -2089,6 +2048,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
spin_unlock_bh(&txq->axq_lock);
break;
}
+ TX_STAT_INC(txq->axq_qnum, txprocdesc);
/*
* Remove ath_buf's of the same transmit unit from txq,
@@ -2106,6 +2066,10 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
txq->axq_tx_inprogress = false;
if (bf_held)
list_del(&bf_held->list);
+
+ if (bf_is_ampdu_not_probing(bf))
+ txq->axq_ampdu_depth--;
+
spin_unlock_bh(&txq->axq_lock);
if (bf_held)
@@ -2118,23 +2082,45 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
*/
if (ts.ts_status & ATH9K_TXERR_XRETRY)
bf->bf_state.bf_type |= BUF_XRETRY;
- ath_tx_rc_status(bf, &ts, txok ? 0 : 1, txok, true);
+ ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true);
}
if (bf_isampdu(bf))
- ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
+ ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
+ true);
else
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
- ath_wake_mac80211_queue(sc, txq);
-
spin_lock_bh(&txq->axq_lock);
+
if (sc->sc_flags & SC_OP_TXAGGR)
ath_txq_schedule(sc, txq);
spin_unlock_bh(&txq->axq_lock);
}
}
+static void ath_hw_pll_work(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc,
+ hw_pll_work.work);
+ static int count;
+
+ if (AR_SREV_9485(sc->sc_ah)) {
+ if (ar9003_get_pll_sqsum_dvc(sc->sc_ah) >= 0x40000) {
+ count++;
+
+ if (count == 3) {
+ /* Rx is hung for more than 500ms. Reset it */
+ ath_reset(sc, true);
+ count = 0;
+ }
+ } else
+ count = 0;
+
+ ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
+ }
+}
+
static void ath_tx_complete_poll_work(struct work_struct *work)
{
struct ath_softc *sc = container_of(work, struct ath_softc,
@@ -2142,6 +2128,9 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
struct ath_txq *txq;
int i;
bool needreset = false;
+#ifdef CONFIG_ATH9K_DEBUGFS
+ sc->tx_complete_poll_work_seen++;
+#endif
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
if (ATH_TXQ_SETUP(sc, i)) {
@@ -2155,16 +2144,41 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
} else {
txq->axq_tx_inprogress = true;
}
+ } else {
+ /* If the queue has pending buffers, then it
+ * should be doing tx work (and have axq_depth).
+ * Shouldn't get to this state I think..but
+ * we do.
+ */
+ if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) &&
+ (txq->pending_frames > 0 ||
+ !list_empty(&txq->axq_acq) ||
+ txq->stopped)) {
+ ath_err(ath9k_hw_common(sc->sc_ah),
+ "txq: %p axq_qnum: %u,"
+ " mac80211_qnum: %i"
+ " axq_link: %p"
+ " pending frames: %i"
+ " axq_acq empty: %i"
+ " stopped: %i"
+ " axq_depth: 0 Attempting to"
+ " restart tx logic.\n",
+ txq, txq->axq_qnum,
+ txq->mac80211_qnum,
+ txq->axq_link,
+ txq->pending_frames,
+ list_empty(&txq->axq_acq),
+ txq->stopped);
+ ath_txq_schedule(sc, txq);
+ }
}
spin_unlock_bh(&txq->axq_lock);
}
if (needreset) {
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
- "tx hung, resetting the chip\n");
- ath9k_ps_wakeup(sc);
+ ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
+ "tx hung, resetting the chip\n");
ath_reset(sc, true);
- ath9k_ps_restore(sc);
}
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
@@ -2202,8 +2216,8 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
if (status == -EINPROGRESS)
break;
if (status == -EIO) {
- ath_print(common, ATH_DBG_XMIT,
- "Error processing tx status\n");
+ ath_dbg(common, ATH_DBG_XMIT,
+ "Error processing tx status\n");
break;
}
@@ -2229,6 +2243,8 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
txq->axq_depth--;
txq->axq_tx_inprogress = false;
+ if (bf_is_ampdu_not_probing(bf))
+ txq->axq_ampdu_depth--;
spin_unlock_bh(&txq->axq_lock);
txok = !(txs.ts_status & ATH9K_TXERR_MASK);
@@ -2236,27 +2252,29 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
if (!bf_isampdu(bf)) {
if (txs.ts_status & ATH9K_TXERR_XRETRY)
bf->bf_state.bf_type |= BUF_XRETRY;
- ath_tx_rc_status(bf, &txs, txok ? 0 : 1, txok, true);
+ ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true);
}
if (bf_isampdu(bf))
- ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
+ ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
+ txok, true);
else
ath_tx_complete_buf(sc, bf, txq, &bf_head,
&txs, txok, 0);
- ath_wake_mac80211_queue(sc, txq);
-
spin_lock_bh(&txq->axq_lock);
+
if (!list_empty(&txq->txq_fifo_pending)) {
INIT_LIST_HEAD(&bf_head);
bf = list_first_entry(&txq->txq_fifo_pending,
- struct ath_buf, list);
- list_cut_position(&bf_head, &txq->txq_fifo_pending,
- &bf->bf_lastbf->list);
+ struct ath_buf, list);
+ list_cut_position(&bf_head,
+ &txq->txq_fifo_pending,
+ &bf->bf_lastbf->list);
ath_tx_txqaddbuf(sc, txq, &bf_head);
} else if (sc->sc_flags & SC_OP_TXAGGR)
ath_txq_schedule(sc, txq);
+
spin_unlock_bh(&txq->axq_lock);
}
}
@@ -2310,20 +2328,21 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
"tx", nbufs, 1, 1);
if (error != 0) {
- ath_print(common, ATH_DBG_FATAL,
- "Failed to allocate tx descriptors: %d\n", error);
+ ath_err(common,
+ "Failed to allocate tx descriptors: %d\n", error);
goto err;
}
error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
"beacon", ATH_BCBUF, 1, 1);
if (error != 0) {
- ath_print(common, ATH_DBG_FATAL,
- "Failed to allocate beacon descriptors: %d\n", error);
+ ath_err(common,
+ "Failed to allocate beacon descriptors: %d\n", error);
goto err;
}
INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
+ INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
error = ath_tx_edma_init(sc);
@@ -2377,7 +2396,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
for (acno = 0, ac = &an->ac[acno];
acno < WME_NUM_AC; acno++, ac++) {
ac->sched = false;
- ac->qnum = sc->tx.hwq_map[acno];
+ ac->txq = sc->tx.txq_map[acno];
INIT_LIST_HEAD(&ac->tid_q);
}
}
@@ -2387,17 +2406,13 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
struct ath_atx_ac *ac;
struct ath_atx_tid *tid;
struct ath_txq *txq;
- int i, tidno;
+ int tidno;
for (tidno = 0, tid = &an->tid[tidno];
tidno < WME_NUM_TID; tidno++, tid++) {
- i = tid->ac->qnum;
- if (!ATH_TXQ_SETUP(sc, i))
- continue;
-
- txq = &sc->tx.txq[i];
ac = tid->ac;
+ txq = ac->txq;
spin_lock_bh(&txq->axq_lock);
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 6cf0c9ef47aa..c6a5fae634a0 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -48,7 +48,7 @@
#include <linux/usb.h>
#ifdef CONFIG_CARL9170_LEDS
#include <linux/leds.h>
-#endif /* CONFIG_CARL170_LEDS */
+#endif /* CONFIG_CARL9170_LEDS */
#ifdef CONFIG_CARL9170_WPC
#include <linux/input.h>
#endif /* CONFIG_CARL9170_WPC */
@@ -215,7 +215,7 @@ enum carl9170_restart_reasons {
CARL9170_RR_TOO_MANY_FIRMWARE_ERRORS,
CARL9170_RR_WATCHDOG,
CARL9170_RR_STUCK_TX,
- CARL9170_RR_SLOW_SYSTEM,
+ CARL9170_RR_UNRESPONSIVE_DEVICE,
CARL9170_RR_COMMAND_TIMEOUT,
CARL9170_RR_TOO_MANY_PHY_ERRORS,
CARL9170_RR_LOST_RSP,
@@ -283,10 +283,12 @@ struct ar9170 {
unsigned int mem_blocks;
unsigned int mem_block_size;
unsigned int rx_size;
+ unsigned int tx_seq_table;
} fw;
/* reset / stuck frames/queue detection */
struct work_struct restart_work;
+ struct work_struct ping_work;
unsigned int restart_counter;
unsigned long queue_stop_timeout[__AR9170_NUM_TXQ];
unsigned long max_queue_stop_timeout[__AR9170_NUM_TXQ];
@@ -532,7 +534,7 @@ void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len);
void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
/* TX */
-int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
void carl9170_tx_janitor(struct work_struct *work);
void carl9170_tx_process_status(struct ar9170 *ar,
const struct carl9170_rsp *cmd);
diff --git a/drivers/net/wireless/ath/carl9170/cmd.c b/drivers/net/wireless/ath/carl9170/cmd.c
index c21f3364bfec..cdfc94c371b4 100644
--- a/drivers/net/wireless/ath/carl9170/cmd.c
+++ b/drivers/net/wireless/ath/carl9170/cmd.c
@@ -41,7 +41,7 @@
int carl9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val)
{
- __le32 buf[2] = {
+ const __le32 buf[2] = {
cpu_to_le32(reg),
cpu_to_le32(val),
};
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index ae6c006bbc56..9517ede9e2df 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -150,6 +150,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
const struct carl9170fw_otus_desc *otus_desc;
const struct carl9170fw_chk_desc *chk_desc;
const struct carl9170fw_last_desc *last_desc;
+ const struct carl9170fw_txsq_desc *txsq_desc;
last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC,
sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER);
@@ -264,6 +265,9 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
FIF_PROMISC_IN_BSS;
}
+ if (SUPP(CARL9170FW_WOL))
+ device_set_wakeup_enable(&ar->udev->dev, true);
+
ar->fw.vif_num = otus_desc->vif_num;
ar->fw.cmd_bufs = otus_desc->cmd_bufs;
ar->fw.address = le32_to_cpu(otus_desc->fw_address);
@@ -291,10 +295,22 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
if (SUPP(CARL9170FW_WLANTX_CAB)) {
ar->hw->wiphy->interface_modes |=
- BIT(NL80211_IFTYPE_AP);
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO);
}
}
+ txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC,
+ sizeof(*txsq_desc), CARL9170FW_TXSQ_DESC_CUR_VER);
+
+ if (txsq_desc) {
+ ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr);
+ if (!valid_cpu_addr(ar->fw.tx_seq_table))
+ return -EINVAL;
+ } else {
+ ar->fw.tx_seq_table = 0;
+ }
+
#undef SUPPORTED
return 0;
}
diff --git a/drivers/net/wireless/ath/carl9170/fwcmd.h b/drivers/net/wireless/ath/carl9170/fwcmd.h
index d552166db505..30449d21b762 100644
--- a/drivers/net/wireless/ath/carl9170/fwcmd.h
+++ b/drivers/net/wireless/ath/carl9170/fwcmd.h
@@ -97,13 +97,13 @@ struct carl9170_set_key_cmd {
__le16 type;
u8 macAddr[6];
u32 key[4];
-} __packed;
+} __packed __aligned(4);
#define CARL9170_SET_KEY_CMD_SIZE 28
struct carl9170_disable_key_cmd {
__le16 user;
__le16 padding;
-} __packed;
+} __packed __aligned(4);
#define CARL9170_DISABLE_KEY_CMD_SIZE 4
struct carl9170_u32_list {
@@ -167,6 +167,7 @@ struct carl9170_rx_filter_cmd {
#define CARL9170_RX_FILTER_CTL_BACKR 0x20
#define CARL9170_RX_FILTER_MGMT 0x40
#define CARL9170_RX_FILTER_DATA 0x80
+#define CARL9170_RX_FILTER_EVERYTHING (~0)
struct carl9170_bcn_ctrl_cmd {
__le32 vif_id;
@@ -206,7 +207,7 @@ struct carl9170_cmd {
struct carl9170_rx_filter_cmd rx_filter;
u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN];
} __packed;
-} __packed;
+} __packed __aligned(4);
#define CARL9170_TX_STATUS_QUEUE 3
#define CARL9170_TX_STATUS_QUEUE_S 0
@@ -216,6 +217,7 @@ struct carl9170_cmd {
#define CARL9170_TX_STATUS_TRIES (7 << CARL9170_TX_STATUS_TRIES_S)
#define CARL9170_TX_STATUS_SUCCESS 0x80
+#ifdef __CARL9170FW__
/*
* NOTE:
* Both structs [carl9170_tx_status and _carl9170_tx_status]
@@ -232,6 +234,8 @@ struct carl9170_tx_status {
u8 tries:3;
u8 success:1;
} __packed;
+#endif /* __CARL9170FW__ */
+
struct _carl9170_tx_status {
/*
* This version should be immune to all alignment bugs.
@@ -272,13 +276,15 @@ struct carl9170_rsp {
struct carl9170_rf_init_result rf_init_res;
struct carl9170_u32_list rreg_res;
struct carl9170_u32_list echo;
+#ifdef __CARL9170FW__
struct carl9170_tx_status tx_status[0];
+#endif /* __CARL9170FW__ */
struct _carl9170_tx_status _tx_status[0];
struct carl9170_gpio gpio;
struct carl9170_tsf_rsp tsf;
struct carl9170_psm psm;
u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN];
} __packed;
-} __packed;
+} __packed __aligned(4);
#endif /* __CARL9170_SHARED_FWCMD_H */
diff --git a/drivers/net/wireless/ath/carl9170/fwdesc.h b/drivers/net/wireless/ath/carl9170/fwdesc.h
index 71f3821f6058..921066822dd5 100644
--- a/drivers/net/wireless/ath/carl9170/fwdesc.h
+++ b/drivers/net/wireless/ath/carl9170/fwdesc.h
@@ -69,6 +69,9 @@ enum carl9170fw_feature_list {
/* Firmware RX filter | CARL9170_CMD_RX_FILTER */
CARL9170FW_RX_FILTER,
+ /* Wake up on WLAN */
+ CARL9170FW_WOL,
+
/* KEEP LAST */
__CARL9170FW_FEATURE_NUM
};
@@ -78,6 +81,7 @@ enum carl9170fw_feature_list {
#define FIX_MAGIC "FIX\0"
#define DBG_MAGIC "DBG\0"
#define CHK_MAGIC "CHK\0"
+#define TXSQ_MAGIC "TXSQ"
#define LAST_MAGIC "LAST"
#define CARL9170FW_SET_DAY(d) (((d) - 1) % 31)
@@ -88,8 +92,10 @@ enum carl9170fw_feature_list {
#define CARL9170FW_GET_MONTH(m) ((((m) / 31) % 12) + 1)
#define CARL9170FW_GET_YEAR(y) ((y) / 372 + 10)
+#define CARL9170FW_MAGIC_SIZE 4
+
struct carl9170fw_desc_head {
- u8 magic[4];
+ u8 magic[CARL9170FW_MAGIC_SIZE];
__le16 length;
u8 min_ver;
u8 cur_ver;
@@ -170,6 +176,16 @@ struct carl9170fw_chk_desc {
#define CARL9170FW_CHK_DESC_SIZE \
(sizeof(struct carl9170fw_chk_desc))
+#define CARL9170FW_TXSQ_DESC_MIN_VER 1
+#define CARL9170FW_TXSQ_DESC_CUR_VER 1
+struct carl9170fw_txsq_desc {
+ struct carl9170fw_desc_head head;
+
+ __le32 seq_table_addr;
+} __packed;
+#define CARL9170FW_TXSQ_DESC_SIZE \
+ (sizeof(struct carl9170fw_txsq_desc))
+
#define CARL9170FW_LAST_DESC_MIN_VER 1
#define CARL9170FW_LAST_DESC_CUR_VER 2
struct carl9170fw_last_desc {
@@ -189,8 +205,8 @@ struct carl9170fw_last_desc {
}
static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head,
- u8 magic[4], __le16 length,
- u8 min_ver, u8 cur_ver)
+ u8 magic[CARL9170FW_MAGIC_SIZE],
+ __le16 length, u8 min_ver, u8 cur_ver)
{
head->magic[0] = magic[0];
head->magic[1] = magic[1];
@@ -204,7 +220,7 @@ static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head,
#define carl9170fw_for_each_hdr(desc, fw_desc) \
for (desc = fw_desc; \
- memcmp(desc->magic, LAST_MAGIC, 4) && \
+ memcmp(desc->magic, LAST_MAGIC, CARL9170FW_MAGIC_SIZE) && \
le16_to_cpu(desc->length) >= CARL9170FW_DESC_HEAD_SIZE && \
le16_to_cpu(desc->length) < CARL9170FW_DESC_MAX_LENGTH; \
desc = (void *)((unsigned long)desc + le16_to_cpu(desc->length)))
@@ -218,8 +234,8 @@ static inline bool carl9170fw_supports(__le32 list, u8 feature)
}
static inline bool carl9170fw_desc_cmp(const struct carl9170fw_desc_head *head,
- const u8 descid[4], u16 min_len,
- u8 compatible_revision)
+ const u8 descid[CARL9170FW_MAGIC_SIZE],
+ u16 min_len, u8 compatible_revision)
{
if (descid[0] == head->magic[0] && descid[1] == head->magic[1] &&
descid[2] == head->magic[2] && descid[3] == head->magic[3] &&
diff --git a/drivers/net/wireless/ath/carl9170/hw.h b/drivers/net/wireless/ath/carl9170/hw.h
index 2f471b3f05af..4e30762dd903 100644
--- a/drivers/net/wireless/ath/carl9170/hw.h
+++ b/drivers/net/wireless/ath/carl9170/hw.h
@@ -463,6 +463,8 @@
#define AR9170_PWR_REG_CHIP_REVISION (AR9170_PWR_REG_BASE + 0x010)
#define AR9170_PWR_REG_PLL_ADDAC (AR9170_PWR_REG_BASE + 0x014)
+#define AR9170_PWR_PLL_ADDAC_DIV_S 2
+#define AR9170_PWR_PLL_ADDAC_DIV 0xffc
#define AR9170_PWR_REG_WATCH_DOG_MAGIC (AR9170_PWR_REG_BASE + 0x020)
/* Faraday USB Controller */
@@ -471,6 +473,9 @@
#define AR9170_USB_REG_MAIN_CTRL (AR9170_USB_REG_BASE + 0x000)
#define AR9170_USB_MAIN_CTRL_REMOTE_WAKEUP BIT(0)
#define AR9170_USB_MAIN_CTRL_ENABLE_GLOBAL_INT BIT(2)
+#define AR9170_USB_MAIN_CTRL_GO_TO_SUSPEND BIT(3)
+#define AR9170_USB_MAIN_CTRL_RESET BIT(4)
+#define AR9170_USB_MAIN_CTRL_CHIP_ENABLE BIT(5)
#define AR9170_USB_MAIN_CTRL_HIGHSPEED BIT(6)
#define AR9170_USB_REG_DEVICE_ADDRESS (AR9170_USB_REG_BASE + 0x001)
@@ -499,6 +504,13 @@
#define AR9170_USB_REG_INTR_GROUP (AR9170_USB_REG_BASE + 0x020)
#define AR9170_USB_REG_INTR_SOURCE_0 (AR9170_USB_REG_BASE + 0x021)
+#define AR9170_USB_INTR_SRC0_SETUP BIT(0)
+#define AR9170_USB_INTR_SRC0_IN BIT(1)
+#define AR9170_USB_INTR_SRC0_OUT BIT(2)
+#define AR9170_USB_INTR_SRC0_FAIL BIT(3) /* ??? */
+#define AR9170_USB_INTR_SRC0_END BIT(4) /* ??? */
+#define AR9170_USB_INTR_SRC0_ABORT BIT(7)
+
#define AR9170_USB_REG_INTR_SOURCE_1 (AR9170_USB_REG_BASE + 0x022)
#define AR9170_USB_REG_INTR_SOURCE_2 (AR9170_USB_REG_BASE + 0x023)
#define AR9170_USB_REG_INTR_SOURCE_3 (AR9170_USB_REG_BASE + 0x024)
@@ -506,6 +518,15 @@
#define AR9170_USB_REG_INTR_SOURCE_5 (AR9170_USB_REG_BASE + 0x026)
#define AR9170_USB_REG_INTR_SOURCE_6 (AR9170_USB_REG_BASE + 0x027)
#define AR9170_USB_REG_INTR_SOURCE_7 (AR9170_USB_REG_BASE + 0x028)
+#define AR9170_USB_INTR_SRC7_USB_RESET BIT(1)
+#define AR9170_USB_INTR_SRC7_USB_SUSPEND BIT(2)
+#define AR9170_USB_INTR_SRC7_USB_RESUME BIT(3)
+#define AR9170_USB_INTR_SRC7_ISO_SEQ_ERR BIT(4)
+#define AR9170_USB_INTR_SRC7_ISO_SEQ_ABORT BIT(5)
+#define AR9170_USB_INTR_SRC7_TX0BYTE BIT(6)
+#define AR9170_USB_INTR_SRC7_RX0BYTE BIT(7)
+
+#define AR9170_USB_REG_IDLE_COUNT (AR9170_USB_REG_BASE + 0x02f)
#define AR9170_USB_REG_EP_MAP (AR9170_USB_REG_BASE + 0x030)
#define AR9170_USB_REG_EP1_MAP (AR9170_USB_REG_BASE + 0x030)
@@ -581,6 +602,10 @@
#define AR9170_USB_REG_MAX_AGG_UPLOAD (AR9170_USB_REG_BASE + 0x110)
#define AR9170_USB_REG_UPLOAD_TIME_CTL (AR9170_USB_REG_BASE + 0x114)
+
+#define AR9170_USB_REG_WAKE_UP (AR9170_USB_REG_BASE + 0x120)
+#define AR9170_USB_WAKE_UP_WAKE BIT(0)
+
#define AR9170_USB_REG_CBUS_CTRL (AR9170_USB_REG_BASE + 0x1f0)
#define AR9170_USB_CBUS_CTRL_BUFFER_END (BIT(1))
@@ -712,7 +737,8 @@ struct ar9170_stream {
__le16 tag;
u8 payload[0];
-};
+} __packed __aligned(4);
+#define AR9170_STREAM_LEN 4
#define AR9170_MAX_ACKTABLE_ENTRIES 8
#define AR9170_MAX_VIRTUAL_MAC 7
@@ -736,4 +762,8 @@ struct ar9170_stream {
#define MOD_VAL(reg, value, newvalue) \
(((value) & ~reg) | (((newvalue) << reg##_S) & reg))
+
+#define GET_VAL(reg, value) \
+ (((value) & reg) >> reg##_S)
+
#endif /* __CARL9170_SHARED_HW_H */
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
index 2305bc27151c..385cf508479b 100644
--- a/drivers/net/wireless/ath/carl9170/mac.c
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -205,8 +205,8 @@ int carl9170_init_mac(struct ar9170 *ar)
carl9170_regwrite(AR9170_MAC_REG_BACKOFF_PROTECT, 0x105);
/* Aggregation MAX number and timeout */
- carl9170_regwrite(AR9170_MAC_REG_AMPDU_FACTOR, 0xa);
- carl9170_regwrite(AR9170_MAC_REG_AMPDU_DENSITY, 0x140a00);
+ carl9170_regwrite(AR9170_MAC_REG_AMPDU_FACTOR, 0x8000a);
+ carl9170_regwrite(AR9170_MAC_REG_AMPDU_DENSITY, 0x140a07);
carl9170_regwrite(AR9170_MAC_REG_FRAMETYPE_FILTER,
AR9170_MAC_FTF_DEFAULTS);
@@ -457,8 +457,9 @@ int carl9170_set_beacon_timers(struct ar9170 *ar)
int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
{
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct carl9170_vif_info *cvif;
+ struct ieee80211_tx_info *txinfo;
__le32 *data, *old = NULL;
u32 word, off, addr, len;
int i = 0, err = 0;
@@ -487,7 +488,13 @@ found:
if (!skb) {
err = -ENOMEM;
- goto out_unlock;
+ goto err_free;
+ }
+
+ txinfo = IEEE80211_SKB_CB(skb);
+ if (txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS) {
+ err = -EINVAL;
+ goto err_free;
}
spin_lock_bh(&ar->beacon_lock);
@@ -504,11 +511,8 @@ found:
wiphy_err(ar->hw->wiphy, "beacon does not "
"fit into device memory!\n");
}
-
- spin_unlock_bh(&ar->beacon_lock);
- dev_kfree_skb_any(skb);
err = -EINVAL;
- goto out_unlock;
+ goto err_unlock;
}
if (len > AR9170_MAC_BCN_LENGTH_MAX) {
@@ -518,22 +522,22 @@ found:
AR9170_MAC_BCN_LENGTH_MAX, len);
}
- spin_unlock_bh(&ar->beacon_lock);
- dev_kfree_skb_any(skb);
err = -EMSGSIZE;
- goto out_unlock;
+ goto err_unlock;
}
- carl9170_async_regwrite_begin(ar);
+ i = txinfo->control.rates[0].idx;
+ if (txinfo->band != IEEE80211_BAND_2GHZ)
+ i += 4;
- /* XXX: use skb->cb info */
- if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ) {
- carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP,
- ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400);
- } else {
- carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP,
- ((skb->len + FCS_LEN) << 16) + 0x001b);
- }
+ word = __carl9170_ratetable[i].hw_value & 0xf;
+ if (i < 4)
+ word |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400;
+ else
+ word |= ((skb->len + FCS_LEN) << 16) + 0x0010;
+
+ carl9170_async_regwrite_begin(ar);
+ carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, word);
for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
/*
@@ -557,7 +561,7 @@ found:
cvif->beacon = skb;
spin_unlock_bh(&ar->beacon_lock);
if (err)
- goto out_unlock;
+ goto err_free;
if (submit) {
err = carl9170_bcn_ctrl(ar, cvif->id,
@@ -565,10 +569,18 @@ found:
addr, skb->len + FCS_LEN);
if (err)
- goto out_unlock;
+ goto err_free;
}
out_unlock:
rcu_read_unlock();
+ return 0;
+
+err_unlock:
+ spin_unlock_bh(&ar->beacon_lock);
+
+err_free:
+ rcu_read_unlock();
+ dev_kfree_skb_any(skb);
return err;
}
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index a314c2c2bfbe..ede3d7e5a048 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -428,6 +428,7 @@ static void carl9170_cancel_worker(struct ar9170 *ar)
cancel_delayed_work_sync(&ar->led_work);
#endif /* CONFIG_CARL9170_LEDS */
cancel_work_sync(&ar->ps_work);
+ cancel_work_sync(&ar->ping_work);
cancel_work_sync(&ar->ampdu_work);
}
@@ -533,6 +534,21 @@ void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
*/
}
+static void carl9170_ping_work(struct work_struct *work)
+{
+ struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
+ int err;
+
+ if (!IS_STARTED(ar))
+ return;
+
+ mutex_lock(&ar->mutex);
+ err = carl9170_echo_test(ar, 0xdeadbeef);
+ if (err)
+ carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
+ mutex_unlock(&ar->mutex);
+}
+
static int carl9170_init_interface(struct ar9170 *ar,
struct ieee80211_vif *vif)
{
@@ -646,6 +662,13 @@ init:
goto unlock;
}
+ if (ar->fw.tx_seq_table) {
+ err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
+ 0);
+ if (err)
+ goto unlock;
+ }
+
unlock:
if (err && (vif_id >= 0)) {
vif_priv->active = false;
@@ -1263,7 +1286,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta,
- u16 tid, u16 *ssn)
+ u16 tid, u16 *ssn, u8 buf_size)
{
struct ar9170 *ar = hw->priv;
struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
@@ -1614,6 +1637,7 @@ void *carl9170_alloc(size_t priv_size)
skb_queue_head_init(&ar->tx_pending[i]);
}
INIT_WORK(&ar->ps_work, carl9170_ps_work);
+ INIT_WORK(&ar->ping_work, carl9170_ping_work);
INIT_WORK(&ar->restart_work, carl9170_restart_work);
INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
@@ -1631,7 +1655,8 @@ void *carl9170_alloc(size_t priv_size)
* supports these modes. The code which will add the
* additional interface_modes is in fw.c.
*/
- hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT);
hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
@@ -1828,7 +1853,7 @@ int carl9170_register(struct ar9170 *ar)
err = carl9170_led_register(ar);
if (err)
goto err_unreg;
-#endif /* CONFIG_CAR9L170_LEDS */
+#endif /* CONFIG_CARL9170_LEDS */
#ifdef CONFIG_CARL9170_WPC
err = carl9170_register_wps_button(ar);
diff --git a/drivers/net/wireless/ath/carl9170/phy.c b/drivers/net/wireless/ath/carl9170/phy.c
index 89deca37a988..b6b0de600506 100644
--- a/drivers/net/wireless/ath/carl9170/phy.c
+++ b/drivers/net/wireless/ath/carl9170/phy.c
@@ -1029,8 +1029,6 @@ static int carl9170_init_rf_bank4_pwr(struct ar9170 *ar, bool band5ghz,
if (err)
return err;
- msleep(20);
-
return 0;
}
@@ -1554,15 +1552,6 @@ static int carl9170_set_power_cal(struct ar9170 *ar, u32 freq,
return carl9170_regwrite_result();
}
-/* TODO: replace this with sign_extend32(noise, 8) */
-static int carl9170_calc_noise_dbm(u32 raw_noise)
-{
- if (raw_noise & 0x100)
- return ~0x1ff | raw_noise;
- else
- return raw_noise;
-}
-
int carl9170_get_noisefloor(struct ar9170 *ar)
{
static const u32 phy_regs[] = {
@@ -1578,11 +1567,11 @@ int carl9170_get_noisefloor(struct ar9170 *ar)
return err;
for (i = 0; i < 2; i++) {
- ar->noise[i] = carl9170_calc_noise_dbm(
- (phy_res[i] >> 19) & 0x1ff);
+ ar->noise[i] = sign_extend32(GET_VAL(
+ AR9170_PHY_CCA_MIN_PWR, phy_res[i]), 8);
- ar->noise[i + 2] = carl9170_calc_noise_dbm(
- (phy_res[i + 2] >> 23) & 0x1ff);
+ ar->noise[i + 2] = sign_extend32(GET_VAL(
+ AR9170_PHY_EXT_CCA_MIN_PWR, phy_res[i + 2]), 8);
}
return 0;
@@ -1669,12 +1658,6 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
return err;
cmd = CARL9170_CMD_RF_INIT;
-
- msleep(100);
-
- err = carl9170_echo_test(ar, 0xaabbccdd);
- if (err)
- return err;
} else {
cmd = CARL9170_CMD_FREQUENCY;
}
@@ -1685,6 +1668,8 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
err = carl9170_write_reg(ar, AR9170_PHY_REG_HEAVY_CLIP_ENABLE,
0x200);
+ if (err)
+ return err;
err = carl9170_init_rf_bank4_pwr(ar,
channel->band == IEEE80211_BAND_5GHZ,
diff --git a/drivers/net/wireless/ath/carl9170/phy.h b/drivers/net/wireless/ath/carl9170/phy.h
index 02c34eb4ebde..024fb42bc787 100644
--- a/drivers/net/wireless/ath/carl9170/phy.h
+++ b/drivers/net/wireless/ath/carl9170/phy.h
@@ -139,8 +139,8 @@
#define AR9170_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000
#define AR9170_PHY_REG_CCA (AR9170_PHY_REG_BASE + 0x0064)
-#define AR9170_PHY_CCA_MINCCA_PWR 0x0ff80000
-#define AR9170_PHY_CCA_MINCCA_PWR_S 19
+#define AR9170_PHY_CCA_MIN_PWR 0x0ff80000
+#define AR9170_PHY_CCA_MIN_PWR_S 19
#define AR9170_PHY_CCA_THRESH62 0x0007f000
#define AR9170_PHY_CCA_THRESH62_S 12
@@ -338,8 +338,8 @@
#define AR9170_PHY_EXT_CCA_CYCPWR_THR1_S 9
#define AR9170_PHY_EXT_CCA_THRESH62 0x007f0000
#define AR9170_PHY_EXT_CCA_THRESH62_S 16
-#define AR9170_PHY_EXT_MINCCA_PWR 0xff800000
-#define AR9170_PHY_EXT_MINCCA_PWR_S 23
+#define AR9170_PHY_EXT_CCA_MIN_PWR 0xff800000
+#define AR9170_PHY_EXT_CCA_MIN_PWR_S 23
#define AR9170_PHY_REG_SFCORR_EXT (AR9170_PHY_REG_BASE + 0x01c0)
#define AR9170_PHY_SFCORR_EXT_M1_THRESH 0x0000007f
@@ -546,19 +546,19 @@
#define AR9170_PHY_FORCE_XPA_CFG_S 0
#define AR9170_PHY_REG_CH1_CCA (AR9170_PHY_REG_BASE + 0x1064)
-#define AR9170_PHY_CH1_MINCCA_PWR 0x0ff80000
-#define AR9170_PHY_CH1_MINCCA_PWR_S 19
+#define AR9170_PHY_CH1_CCA_MIN_PWR 0x0ff80000
+#define AR9170_PHY_CH1_CCA_MIN_PWR_S 19
#define AR9170_PHY_REG_CH2_CCA (AR9170_PHY_REG_BASE + 0x2064)
-#define AR9170_PHY_CH2_MINCCA_PWR 0x0ff80000
-#define AR9170_PHY_CH2_MINCCA_PWR_S 19
+#define AR9170_PHY_CH2_CCA_MIN_PWR 0x0ff80000
+#define AR9170_PHY_CH2_CCA_MIN_PWR_S 19
#define AR9170_PHY_REG_CH1_EXT_CCA (AR9170_PHY_REG_BASE + 0x11bc)
-#define AR9170_PHY_CH1_EXT_MINCCA_PWR 0xff800000
-#define AR9170_PHY_CH1_EXT_MINCCA_PWR_S 23
+#define AR9170_PHY_CH1_EXT_CCA_MIN_PWR 0xff800000
+#define AR9170_PHY_CH1_EXT_CCA_MIN_PWR_S 23
#define AR9170_PHY_REG_CH2_EXT_CCA (AR9170_PHY_REG_BASE + 0x21bc)
-#define AR9170_PHY_CH2_EXT_MINCCA_PWR 0xff800000
-#define AR9170_PHY_CH2_EXT_MINCCA_PWR_S 23
+#define AR9170_PHY_CH2_EXT_CCA_MIN_PWR 0xff800000
+#define AR9170_PHY_CH2_EXT_CCA_MIN_PWR_S 23
#endif /* __CARL9170_SHARED_PHY_H */
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 939a0e96ed1f..84866a4b8350 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -564,7 +564,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid);
/* 2. Maybe the AP wants to send multicast/broadcast data? */
- cam = !!(tim_ie->bitmap_ctrl & 0x01);
+ cam |= !!(tim_ie->bitmap_ctrl & 0x01);
if (!cam) {
/* back to low-power land. */
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index b575c865142d..0ef70b6fc512 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -242,9 +242,11 @@ static void carl9170_tx_release(struct kref *ref)
ar->tx_ampdu_schedule = true;
if (txinfo->flags & IEEE80211_TX_STAT_AMPDU) {
- txinfo->status.ampdu_len = txinfo->pad[0];
- txinfo->status.ampdu_ack_len = txinfo->pad[1];
- txinfo->pad[0] = txinfo->pad[1] = 0;
+ struct _carl9170_tx_superframe *super;
+
+ super = (void *)skb->data;
+ txinfo->status.ampdu_len = super->s.rix;
+ txinfo->status.ampdu_ack_len = super->s.cnt;
} else if (txinfo->flags & IEEE80211_TX_STAT_ACK) {
/*
* drop redundant tx_status reports:
@@ -337,7 +339,8 @@ static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
u8 tid;
if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) ||
- txinfo->flags & IEEE80211_TX_CTL_INJECTED)
+ txinfo->flags & IEEE80211_TX_CTL_INJECTED ||
+ (!(super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_AGGR))))
return;
tx_info = IEEE80211_SKB_CB(skb);
@@ -389,8 +392,8 @@ static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
sta_info->stats[tid].ampdu_ack_len++;
if (super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_IMM_BA)) {
- txinfo->pad[0] = sta_info->stats[tid].ampdu_len;
- txinfo->pad[1] = sta_info->stats[tid].ampdu_ack_len;
+ super->s.rix = sta_info->stats[tid].ampdu_len;
+ super->s.cnt = sta_info->stats[tid].ampdu_ack_len;
txinfo->flags |= IEEE80211_TX_STAT_AMPDU;
sta_info->stats[tid].clear = true;
}
@@ -524,6 +527,59 @@ next:
}
}
+static void carl9170_tx_ampdu_timeout(struct ar9170 *ar)
+{
+ struct carl9170_sta_tid *iter;
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *txinfo;
+ struct carl9170_tx_info *arinfo;
+ struct _carl9170_tx_superframe *super;
+ struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif;
+ struct ieee80211_hdr *hdr;
+ unsigned int vif_id;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) {
+ if (iter->state < CARL9170_TID_STATE_IDLE)
+ continue;
+
+ spin_lock_bh(&iter->lock);
+ skb = skb_peek(&iter->queue);
+ if (!skb)
+ goto unlock;
+
+ txinfo = IEEE80211_SKB_CB(skb);
+ arinfo = (void *)txinfo->rate_driver_data;
+ if (time_is_after_jiffies(arinfo->timeout +
+ msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT)))
+ goto unlock;
+
+ super = (void *) skb->data;
+ hdr = (void *) super->frame_data;
+
+ vif_id = (super->s.misc & CARL9170_TX_SUPER_MISC_VIF_ID) >>
+ CARL9170_TX_SUPER_MISC_VIF_ID_S;
+
+ if (WARN_ON(vif_id >= AR9170_MAX_VIRTUAL_MAC))
+ goto unlock;
+
+ vif = rcu_dereference(ar->vif_priv[vif_id].vif);
+ if (WARN_ON(!vif))
+ goto unlock;
+
+ sta = ieee80211_find_sta(vif, hdr->addr1);
+ if (WARN_ON(!sta))
+ goto unlock;
+
+ ieee80211_stop_tx_ba_session(sta, iter->tid);
+unlock:
+ spin_unlock_bh(&iter->lock);
+
+ }
+ rcu_read_unlock();
+}
+
void carl9170_tx_janitor(struct work_struct *work)
{
struct ar9170 *ar = container_of(work, struct ar9170,
@@ -534,6 +590,7 @@ void carl9170_tx_janitor(struct work_struct *work)
ar->tx_janitor_last_run = jiffies;
carl9170_check_queue_stop_timeout(ar);
+ carl9170_tx_ampdu_timeout(ar);
if (!atomic_read(&ar->tx_total_queued))
return;
@@ -805,12 +862,15 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
if (unlikely(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM))
txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB;
+ if (unlikely(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
+ txc->s.misc |= CARL9170_TX_SUPER_MISC_ASSIGN_SEQ;
+
if (unlikely(ieee80211_is_probe_resp(hdr->frame_control)))
txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF;
mac_tmp = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
AR9170_TX_MAC_BACKOFF);
- mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) &&
+ mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) &
AR9170_TX_MAC_QOS);
no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK);
@@ -842,10 +902,8 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
if (unlikely(!sta || !cvif))
goto err_out;
- factor = min_t(unsigned int, 1u,
- info->control.sta->ht_cap.ampdu_factor);
-
- density = info->control.sta->ht_cap.ampdu_density;
+ factor = min_t(unsigned int, 1u, sta->ht_cap.ampdu_factor);
+ density = sta->ht_cap.ampdu_density;
if (density) {
/*
@@ -1206,6 +1264,7 @@ static void carl9170_tx(struct ar9170 *ar)
static bool carl9170_tx_ampdu_queue(struct ar9170 *ar,
struct ieee80211_sta *sta, struct sk_buff *skb)
{
+ struct _carl9170_tx_superframe *super = (void *) skb->data;
struct carl9170_sta_info *sta_info;
struct carl9170_sta_tid *agg;
struct sk_buff *iter;
@@ -1274,12 +1333,13 @@ err_unlock:
err_unlock_rcu:
rcu_read_unlock();
+ super->f.mac_control &= ~cpu_to_le16(AR9170_TX_MAC_AGGR);
carl9170_tx_status(ar, skb, false);
ar->tx_dropped++;
return false;
}
-int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ar9170 *ar = hw->priv;
struct ieee80211_tx_info *info;
@@ -1302,9 +1362,6 @@ int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
*/
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- if (WARN_ON_ONCE(!sta))
- goto err_free;
-
run = carl9170_tx_ampdu_queue(ar, sta, skb);
if (run)
carl9170_tx_ampdu(ar);
@@ -1316,12 +1373,11 @@ int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
}
carl9170_tx(ar);
- return NETDEV_TX_OK;
+ return;
err_free:
ar->tx_dropped++;
dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
}
void carl9170_tx_scheduler(struct ar9170 *ar)
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 7504ed14c725..f82c400be288 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -118,6 +118,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
{ USB_DEVICE(0x057c, 0x8402) },
/* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
{ USB_DEVICE(0x1668, 0x1200) },
+ /* Airlive X.USB a/b/g/n */
+ { USB_DEVICE(0x1b75, 0x9170) },
/* terminate */
{}
@@ -160,8 +162,7 @@ err_acc:
static void carl9170_usb_tx_data_complete(struct urb *urb)
{
- struct ar9170 *ar = (struct ar9170 *)
- usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
+ struct ar9170 *ar = usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
if (WARN_ON_ONCE(!ar)) {
dev_kfree_skb_irq(urb->context);
@@ -433,7 +434,7 @@ static void carl9170_usb_rx_complete(struct urb *urb)
* device.
*/
- carl9170_restart(ar, CARL9170_RR_SLOW_SYSTEM);
+ ieee80211_queue_work(ar->hw, &ar->ping_work);
}
} else {
/*
@@ -835,7 +836,7 @@ static int carl9170_usb_load_firmware(struct ar9170 *ar)
if (err)
goto err_out;
- /* firmware restarts cmd counter */
+ /* now, start the command response counter */
ar->cmd_seq = -1;
return 0;
@@ -852,7 +853,12 @@ int carl9170_usb_restart(struct ar9170 *ar)
if (ar->intf->condition != USB_INTERFACE_BOUND)
return 0;
- /* Disable command response sequence counter. */
+ /*
+ * Disable the command response sequence counter check.
+ * We already know that the device/firmware is in a bad state.
+ * So, no extra points are awarded to anyone who reminds the
+ * driver about that.
+ */
ar->cmd_seq = -2;
err = carl9170_reboot(ar);
@@ -904,6 +910,15 @@ static int carl9170_usb_init_device(struct ar9170 *ar)
{
int err;
+ /*
+ * The carl9170 firmware let's the driver know when it's
+ * ready for action. But we have to be prepared to gracefully
+ * handle all spurious [flushed] messages after each (re-)boot.
+ * Thus the command response counter remains disabled until it
+ * can be safely synchronized.
+ */
+ ar->cmd_seq = -2;
+
err = carl9170_usb_send_rx_irq_urb(ar);
if (err)
goto err_out;
@@ -912,14 +927,21 @@ static int carl9170_usb_init_device(struct ar9170 *ar)
if (err)
goto err_unrx;
+ err = carl9170_usb_open(ar);
+ if (err)
+ goto err_unrx;
+
mutex_lock(&ar->mutex);
err = carl9170_usb_load_firmware(ar);
mutex_unlock(&ar->mutex);
if (err)
- goto err_unrx;
+ goto err_stop;
return 0;
+err_stop:
+ carl9170_usb_stop(ar);
+
err_unrx:
carl9170_usb_cancel_urbs(ar);
@@ -965,10 +987,6 @@ static void carl9170_usb_firmware_finish(struct ar9170 *ar)
if (err)
goto err_freefw;
- err = carl9170_usb_open(ar);
- if (err)
- goto err_unrx;
-
err = carl9170_register(ar);
carl9170_usb_stop(ar);
@@ -1044,7 +1062,6 @@ static int carl9170_usb_probe(struct usb_interface *intf,
atomic_set(&ar->rx_work_urbs, 0);
atomic_set(&ar->rx_anch_urbs, 0);
atomic_set(&ar->rx_pool_urbs, 0);
- ar->cmd_seq = -2;
usb_get_dev(ar->udev);
@@ -1091,10 +1108,6 @@ static int carl9170_usb_suspend(struct usb_interface *intf,
carl9170_usb_cancel_urbs(ar);
- /*
- * firmware automatically reboots for usb suspend.
- */
-
return 0;
}
@@ -1107,12 +1120,20 @@ static int carl9170_usb_resume(struct usb_interface *intf)
return -ENODEV;
usb_unpoison_anchored_urbs(&ar->rx_anch);
+ carl9170_set_state(ar, CARL9170_STOPPED);
- err = carl9170_usb_init_device(ar);
- if (err)
- goto err_unrx;
+ /*
+ * The USB documentation demands that [for suspend] all traffic
+ * to and from the device has to stop. This would be fine, but
+ * there's a catch: the device[usb phy] does not come back.
+ *
+ * Upon resume the firmware will "kill" itself and the
+ * boot-code sorts out the magic voodoo.
+ * Not very nice, but there's not much what could go wrong.
+ */
+ msleep(1100);
- err = carl9170_usb_open(ar);
+ err = carl9170_usb_init_device(ar);
if (err)
goto err_unrx;
@@ -1134,6 +1155,7 @@ static struct usb_driver carl9170_driver = {
#ifdef CONFIG_PM
.suspend = carl9170_usb_suspend,
.resume = carl9170_usb_resume,
+ .reset_resume = carl9170_usb_resume,
#endif /* CONFIG_PM */
};
diff --git a/drivers/net/wireless/ath/carl9170/version.h b/drivers/net/wireless/ath/carl9170/version.h
index ff53f078a0b5..15095c035169 100644
--- a/drivers/net/wireless/ath/carl9170/version.h
+++ b/drivers/net/wireless/ath/carl9170/version.h
@@ -1,7 +1,7 @@
#ifndef __CARL9170_SHARED_VERSION_H
#define __CARL9170_SHARED_VERSION_H
-#define CARL9170FW_VERSION_YEAR 10
-#define CARL9170FW_VERSION_MONTH 9
-#define CARL9170FW_VERSION_DAY 28
-#define CARL9170FW_VERSION_GIT "1.8.8.3"
+#define CARL9170FW_VERSION_YEAR 11
+#define CARL9170FW_VERSION_MONTH 1
+#define CARL9170FW_VERSION_DAY 22
+#define CARL9170FW_VERSION_GIT "1.9.2"
#endif /* __CARL9170_SHARED_VERSION_H */
diff --git a/drivers/net/wireless/ath/carl9170/wlan.h b/drivers/net/wireless/ath/carl9170/wlan.h
index 24d63b583b6b..9e1324b67e08 100644
--- a/drivers/net/wireless/ath/carl9170/wlan.h
+++ b/drivers/net/wireless/ath/carl9170/wlan.h
@@ -251,7 +251,7 @@ struct carl9170_tx_superdesc {
u8 ampdu_commit_factor:1;
u8 ampdu_unused_bit:1;
u8 queue:2;
- u8 reserved:1;
+ u8 assign_seq:1;
u8 vif_id:3;
u8 fill_in_tsf:1;
u8 cab:1;
@@ -299,6 +299,7 @@ struct _ar9170_tx_hwdesc {
#define CARL9170_TX_SUPER_MISC_QUEUE 0x3
#define CARL9170_TX_SUPER_MISC_QUEUE_S 0
+#define CARL9170_TX_SUPER_MISC_ASSIGN_SEQ 0x4
#define CARL9170_TX_SUPER_MISC_VIF_ID 0x38
#define CARL9170_TX_SUPER_MISC_VIF_ID_S 3
#define CARL9170_TX_SUPER_MISC_FILL_IN_TSF 0x40
@@ -413,6 +414,23 @@ enum ar9170_txq {
__AR9170_NUM_TXQ,
};
+/*
+ * This is an workaround for several undocumented bugs.
+ * Don't mess with the QoS/AC <-> HW Queue map, if you don't
+ * know what you are doing.
+ *
+ * Known problems [hardware]:
+ * * The MAC does not aggregate frames on anything other
+ * than the first HW queue.
+ * * when an AMPDU is placed [in the first hw queue] and
+ * additional frames are already queued on a different
+ * hw queue, the MAC will ALWAYS freeze.
+ *
+ * In a nutshell: The hardware can either do QoS or
+ * Aggregation but not both at the same time. As a
+ * result, this makes the device pretty much useless
+ * for any serious 802.11n setup.
+ */
static const u8 ar9170_qmap[__AR9170_NUM_TXQ] = { 2, 1, 0, 3 };
#define AR9170_TXQ_DEPTH 32
diff --git a/drivers/net/wireless/ath/debug.c b/drivers/net/wireless/ath/debug.c
index dacfb234f491..5367b1086e09 100644
--- a/drivers/net/wireless/ath/debug.c
+++ b/drivers/net/wireless/ath/debug.c
@@ -15,21 +15,6 @@
*/
#include "ath.h"
-#include "debug.h"
-
-void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
-{
- va_list args;
-
- if (likely(!(common->debug_mask & dbg_mask)))
- return;
-
- va_start(args, fmt);
- printk(KERN_DEBUG "ath: ");
- vprintk(fmt, args);
- va_end(args);
-}
-EXPORT_SYMBOL(ath_print);
const char *ath_opmode_to_string(enum nl80211_iftype opmode)
{
diff --git a/drivers/net/wireless/ath/debug.h b/drivers/net/wireless/ath/debug.h
deleted file mode 100644
index 64e4af2c2887..000000000000
--- a/drivers/net/wireless/ath/debug.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef ATH_DEBUG_H
-#define ATH_DEBUG_H
-
-#include "ath.h"
-
-/**
- * enum ath_debug_level - atheros wireless debug level
- *
- * @ATH_DBG_RESET: reset processing
- * @ATH_DBG_QUEUE: hardware queue management
- * @ATH_DBG_EEPROM: eeprom processing
- * @ATH_DBG_CALIBRATE: periodic calibration
- * @ATH_DBG_INTERRUPT: interrupt processing
- * @ATH_DBG_REGULATORY: regulatory processing
- * @ATH_DBG_ANI: adaptive noise immunitive processing
- * @ATH_DBG_XMIT: basic xmit operation
- * @ATH_DBG_BEACON: beacon handling
- * @ATH_DBG_CONFIG: configuration of the hardware
- * @ATH_DBG_FATAL: fatal errors, this is the default, DBG_DEFAULT
- * @ATH_DBG_PS: power save processing
- * @ATH_DBG_HWTIMER: hardware timer handling
- * @ATH_DBG_BTCOEX: bluetooth coexistance
- * @ATH_DBG_BSTUCK: stuck beacons
- * @ATH_DBG_ANY: enable all debugging
- *
- * The debug level is used to control the amount and type of debugging output
- * we want to see. Each driver has its own method for enabling debugging and
- * modifying debug level states -- but this is typically done through a
- * module parameter 'debug' along with a respective 'debug' debugfs file
- * entry.
- */
-enum ATH_DEBUG {
- ATH_DBG_RESET = 0x00000001,
- ATH_DBG_QUEUE = 0x00000002,
- ATH_DBG_EEPROM = 0x00000004,
- ATH_DBG_CALIBRATE = 0x00000008,
- ATH_DBG_INTERRUPT = 0x00000010,
- ATH_DBG_REGULATORY = 0x00000020,
- ATH_DBG_ANI = 0x00000040,
- ATH_DBG_XMIT = 0x00000080,
- ATH_DBG_BEACON = 0x00000100,
- ATH_DBG_CONFIG = 0x00000200,
- ATH_DBG_FATAL = 0x00000400,
- ATH_DBG_PS = 0x00000800,
- ATH_DBG_HWTIMER = 0x00001000,
- ATH_DBG_BTCOEX = 0x00002000,
- ATH_DBG_WMI = 0x00004000,
- ATH_DBG_BSTUCK = 0x00008000,
- ATH_DBG_ANY = 0xffffffff
-};
-
-#define ATH_DBG_DEFAULT (ATH_DBG_FATAL)
-
-#ifdef CONFIG_ATH_DEBUG
-void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
- __attribute__ ((format (printf, 3, 4)));
-#else
-static inline void __attribute__ ((format (printf, 3, 4)))
-ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
-{
-}
-#endif /* CONFIG_ATH_DEBUG */
-
-/** Returns string describing opmode, or NULL if unknown mode. */
-#ifdef CONFIG_ATH_DEBUG
-const char *ath_opmode_to_string(enum nl80211_iftype opmode);
-#else
-static inline const char *ath_opmode_to_string(enum nl80211_iftype opmode)
-{
- return "UNKNOWN";
-}
-#endif
-
-#endif /* ATH_DEBUG_H */
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index bd21a4d82085..37b8e115375a 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -20,7 +20,6 @@
#include "ath.h"
#include "reg.h"
-#include "debug.h"
#define REG_READ (common->ops->read)
#define REG_WRITE(_ah, _reg, _val) (common->ops->write)(_ah, _val, _reg)
@@ -37,8 +36,7 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
void *ah = common->ah;
if (entry >= common->keymax) {
- ath_print(common, ATH_DBG_FATAL,
- "keychache entry %u out of range\n", entry);
+ ath_err(common, "keycache entry %u out of range\n", entry);
return false;
}
@@ -60,6 +58,11 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0);
REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
+ if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) {
+ REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
+ AR_KEYTABLE_TYPE_CLR);
+ }
}
@@ -67,15 +70,15 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
}
EXPORT_SYMBOL(ath_hw_keyreset);
-bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac)
+static bool ath_hw_keysetmac(struct ath_common *common,
+ u16 entry, const u8 *mac)
{
u32 macHi, macLo;
u32 unicast_flag = AR_KEYTABLE_VALID;
void *ah = common->ah;
if (entry >= common->keymax) {
- ath_print(common, ATH_DBG_FATAL,
- "keychache entry %u out of range\n", entry);
+ ath_err(common, "keycache entry %u out of range\n", entry);
return false;
}
@@ -107,17 +110,16 @@ bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac)
return true;
}
-bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
- const struct ath_keyval *k,
- const u8 *mac)
+static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
+ const struct ath_keyval *k,
+ const u8 *mac)
{
void *ah = common->ah;
u32 key0, key1, key2, key3, key4;
u32 keyType;
if (entry >= common->keymax) {
- ath_print(common, ATH_DBG_FATAL,
- "keycache entry %u out of range\n", entry);
+ ath_err(common, "keycache entry %u out of range\n", entry);
return false;
}
@@ -127,8 +129,8 @@ bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
break;
case ATH_CIPHER_AES_CCM:
if (!(common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)) {
- ath_print(common, ATH_DBG_ANY,
- "AES-CCM not supported by this mac rev\n");
+ ath_dbg(common, ATH_DBG_ANY,
+ "AES-CCM not supported by this mac rev\n");
return false;
}
keyType = AR_KEYTABLE_TYPE_CCM;
@@ -136,15 +138,15 @@ bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
case ATH_CIPHER_TKIP:
keyType = AR_KEYTABLE_TYPE_TKIP;
if (entry + 64 >= common->keymax) {
- ath_print(common, ATH_DBG_ANY,
- "entry %u inappropriate for TKIP\n", entry);
+ ath_dbg(common, ATH_DBG_ANY,
+ "entry %u inappropriate for TKIP\n", entry);
return false;
}
break;
case ATH_CIPHER_WEP:
if (k->kv_len < WLAN_KEY_LEN_WEP40) {
- ath_print(common, ATH_DBG_ANY,
- "WEP key length %u too small\n", k->kv_len);
+ ath_dbg(common, ATH_DBG_ANY,
+ "WEP key length %u too small\n", k->kv_len);
return false;
}
if (k->kv_len <= WLAN_KEY_LEN_WEP40)
@@ -158,8 +160,7 @@ bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
keyType = AR_KEYTABLE_TYPE_CLR;
break;
default:
- ath_print(common, ATH_DBG_FATAL,
- "cipher %u not supported\n", k->kv_type);
+ ath_err(common, "cipher %u not supported\n", k->kv_type);
return false;
}
@@ -340,8 +341,7 @@ static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key,
memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
if (!ath_hw_set_keycache_entry(common, keyix, hk, NULL)) {
/* TX MIC entry failed. No need to proceed further */
- ath_print(common, ATH_DBG_FATAL,
- "Setting TX MIC Key Failed\n");
+ ath_err(common, "Setting TX MIC Key Failed\n");
return 0;
}
diff --git a/drivers/net/wireless/ath/main.c b/drivers/net/wireless/ath/main.c
index 487193f1de1a..c325202fdc5f 100644
--- a/drivers/net/wireless/ath/main.c
+++ b/drivers/net/wireless/ath/main.c
@@ -56,3 +56,23 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
return skb;
}
EXPORT_SYMBOL(ath_rxbuf_alloc);
+
+int ath_printk(const char *level, struct ath_common *common,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int rtn;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ rtn = printk("%sath: %pV", level, &vaf);
+
+ va_end(args);
+
+ return rtn;
+}
+EXPORT_SYMBOL(ath_printk);
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 3f4244f56ce5..f828f294ba89 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -158,6 +158,13 @@ ieee80211_regdomain *ath_world_regdomain(struct ath_regulatory *reg)
}
}
+bool ath_is_49ghz_allowed(u16 regdomain)
+{
+ /* possibly more */
+ return regdomain == MKK9_MKKC;
+}
+EXPORT_SYMBOL(ath_is_49ghz_allowed);
+
/* Frequency is one where radar detection is required */
static bool ath_is_radar_freq(u16 center_freq)
{
@@ -342,6 +349,14 @@ int ath_reg_notifier_apply(struct wiphy *wiphy,
/* We always apply this */
ath_reg_apply_radar_flags(wiphy);
+ /*
+ * This would happen when we have sent a custom regulatory request
+ * a world regulatory domain and the scheduler hasn't yet processed
+ * any pending requests in the queue.
+ */
+ if (!request)
+ return 0;
+
switch (request->initiator) {
case NL80211_REGDOM_SET_BY_DRIVER:
case NL80211_REGDOM_SET_BY_CORE:
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index 345dd9721b41..172f63f671cf 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -250,6 +250,7 @@ enum CountryCode {
};
bool ath_is_world_regd(struct ath_regulatory *reg);
+bool ath_is_49ghz_allowed(u16 redomain);
int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy,
int (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request));
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index c8f7090b27d3..46e382ed46aa 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -1161,7 +1161,7 @@ static irqreturn_t service_interrupt(int irq, void *dev_id)
struct atmel_private *priv = netdev_priv(dev);
u8 isr;
int i = -1;
- static u8 irq_order[] = {
+ static const u8 irq_order[] = {
ISR_OUT_OF_RANGE,
ISR_RxCOMPLETE,
ISR_TxCOMPLETE,
@@ -3771,7 +3771,9 @@ static int probe_atmel_card(struct net_device *dev)
if (rc) {
if (dev->dev_addr[0] == 0xFF) {
- u8 default_mac[] = {0x00, 0x04, 0x25, 0x00, 0x00, 0x00};
+ static const u8 default_mac[] = {
+ 0x00, 0x04, 0x25, 0x00, 0x00, 0x00
+ };
printk(KERN_ALERT "%s: *** Invalid MAC address. UPGRADE Firmware ****\n", dev->name);
memcpy(dev->dev_addr, default_mac, 6);
}
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 0a00d42642cd..480595f04411 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -86,15 +86,16 @@ config B43_PIO
select SSB_BLOCKIO
default y
-config B43_NPHY
- bool "Pre IEEE 802.11n support (BROKEN)"
- depends on B43 && EXPERIMENTAL && BROKEN
+config B43_PHY_N
+ bool "Support for 802.11n (N-PHY) devices (EXPERIMENTAL)"
+ depends on B43 && EXPERIMENTAL
---help---
- Support for the IEEE 802.11n draft.
+ Support for the N-PHY.
- THIS IS BROKEN AND DOES NOT WORK YET.
+ This enables support for devices with N-PHY.
- SAY N.
+ Say N if you expect high stability and performance. Saying Y will not
+ affect other devices support and may provide support for basic needs.
config B43_PHY_LP
bool "Support for low-power (LP-PHY) devices (EXPERIMENTAL)"
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 69d4af09a6cb..cef334a8c669 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -1,12 +1,12 @@
b43-y += main.o
b43-y += tables.o
-b43-$(CONFIG_B43_NPHY) += tables_nphy.o
-b43-$(CONFIG_B43_NPHY) += radio_2055.o
-b43-$(CONFIG_B43_NPHY) += radio_2056.o
+b43-$(CONFIG_B43_PHY_N) += tables_nphy.o
+b43-$(CONFIG_B43_PHY_N) += radio_2055.o
+b43-$(CONFIG_B43_PHY_N) += radio_2056.o
b43-y += phy_common.o
b43-y += phy_g.o
b43-y += phy_a.o
-b43-$(CONFIG_B43_NPHY) += phy_n.o
+b43-$(CONFIG_B43_PHY_N) += phy_n.o
b43-$(CONFIG_B43_PHY_LP) += phy_lp.o
b43-$(CONFIG_B43_PHY_LP) += tables_lpphy.o
b43-y += sysfs.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 72821c456b02..bd4cb75b6ca3 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -153,6 +153,19 @@
#define B43_BFH_FEM_BT 0x0040 /* has FEM and switch to share antenna
* with bluetooth */
+/* SPROM boardflags2_lo values */
+#define B43_BFL2_RXBB_INT_REG_DIS 0x0001 /* external RX BB regulator present */
+#define B43_BFL2_APLL_WAR 0x0002 /* alternative A-band PLL settings implemented */
+#define B43_BFL2_TXPWRCTRL_EN 0x0004 /* permits enabling TX Power Control */
+#define B43_BFL2_2X4_DIV 0x0008 /* 2x4 diversity switch */
+#define B43_BFL2_5G_PWRGAIN 0x0010 /* supports 5G band power gain */
+#define B43_BFL2_PCIEWAR_OVR 0x0020 /* overrides ASPM and Clkreq settings */
+#define B43_BFL2_CAESERS_BRD 0x0040 /* is Caesers board (unused) */
+#define B43_BFL2_BTC3WIRE 0x0080 /* used 3-wire bluetooth coexist */
+#define B43_BFL2_SKWRKFEM_BRD 0x0100 /* 4321mcm93 uses Skyworks FEM */
+#define B43_BFL2_SPUR_WAR 0x0200 /* has a workaround for clock-harmonic spurs */
+#define B43_BFL2_GPLL_WAR 0x0400 /* altenative G-band PLL settings implemented */
+
/* GPIO register offset, in both ChipCommon and PCI core. */
#define B43_GPIO_CONTROL 0x6c
@@ -403,10 +416,10 @@ enum {
/* 802.11 core specific TM State Low (SSB_TMSLOW) flags */
#define B43_TMSLOW_GMODE 0x20000000 /* G Mode Enable */
-#define B43_TMSLOW_PHYCLKSPEED 0x00C00000 /* PHY clock speed mask (N-PHY only) */
-#define B43_TMSLOW_PHYCLKSPEED_40MHZ 0x00000000 /* 40 MHz PHY */
-#define B43_TMSLOW_PHYCLKSPEED_80MHZ 0x00400000 /* 80 MHz PHY */
-#define B43_TMSLOW_PHYCLKSPEED_160MHZ 0x00800000 /* 160 MHz PHY */
+#define B43_TMSLOW_PHY_BANDWIDTH 0x00C00000 /* PHY band width and clock speed mask (N-PHY only) */
+#define B43_TMSLOW_PHY_BANDWIDTH_10MHZ 0x00000000 /* 10 MHz bandwidth, 40 MHz PHY */
+#define B43_TMSLOW_PHY_BANDWIDTH_20MHZ 0x00400000 /* 20 MHz bandwidth, 80 MHz PHY */
+#define B43_TMSLOW_PHY_BANDWIDTH_40MHZ 0x00800000 /* 40 MHz bandwidth, 160 MHz PHY */
#define B43_TMSLOW_PLLREFSEL 0x00200000 /* PLL Frequency Reference Select (rev >= 5) */
#define B43_TMSLOW_MACPHYCLKEN 0x00100000 /* MAC PHY Clock Control Enable (rev >= 5) */
#define B43_TMSLOW_PHYRESET 0x00080000 /* PHY Reset */
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 10d0aaf754c5..3d5566e7af0a 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -415,11 +415,6 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
static void free_ringmemory(struct b43_dmaring *ring)
{
- gfp_t flags = GFP_KERNEL;
-
- if (ring->type == B43_DMA_64BIT)
- flags |= GFP_DMA;
-
dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
ring->descbase, ring->dmabase);
}
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index a1186525c70d..57eb5b649730 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -322,59 +322,83 @@ static int b43_ratelimit(struct b43_wl *wl)
void b43info(struct b43_wl *wl, const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
if (b43_modparam_verbose < B43_VERBOSITY_INFO)
return;
if (!b43_ratelimit(wl))
return;
+
va_start(args, fmt);
- printk(KERN_INFO "b43-%s: ",
- (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan");
- vprintk(fmt, args);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk(KERN_INFO "b43-%s: %pV",
+ (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
+
va_end(args);
}
void b43err(struct b43_wl *wl, const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
if (b43_modparam_verbose < B43_VERBOSITY_ERROR)
return;
if (!b43_ratelimit(wl))
return;
+
va_start(args, fmt);
- printk(KERN_ERR "b43-%s ERROR: ",
- (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan");
- vprintk(fmt, args);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk(KERN_ERR "b43-%s ERROR: %pV",
+ (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
+
va_end(args);
}
void b43warn(struct b43_wl *wl, const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
if (b43_modparam_verbose < B43_VERBOSITY_WARN)
return;
if (!b43_ratelimit(wl))
return;
+
va_start(args, fmt);
- printk(KERN_WARNING "b43-%s warning: ",
- (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan");
- vprintk(fmt, args);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk(KERN_WARNING "b43-%s warning: %pV",
+ (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
+
va_end(args);
}
void b43dbg(struct b43_wl *wl, const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
if (b43_modparam_verbose < B43_VERBOSITY_DEBUG)
return;
+
va_start(args, fmt);
- printk(KERN_DEBUG "b43-%s debug: ",
- (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan");
- vprintk(fmt, args);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk(KERN_DEBUG "b43-%s debug: %pV",
+ (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
+
va_end(args);
}
@@ -1126,6 +1150,8 @@ void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags)
flags |= B43_TMSLOW_PHYCLKEN;
flags |= B43_TMSLOW_PHYRESET;
+ if (dev->phy.type == B43_PHYTYPE_N)
+ flags |= B43_TMSLOW_PHY_BANDWIDTH_20MHZ; /* Make 20 MHz def */
ssb_device_enable(dev->dev, flags);
msleep(2); /* Wait for the PLL to turn on. */
@@ -2095,8 +2121,10 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
filename = "ucode13";
else if (rev == 14)
filename = "ucode14";
- else if (rev >= 15)
+ else if (rev == 15)
filename = "ucode15";
+ else if ((rev >= 16) && (rev <= 20))
+ filename = "ucode16_mimo";
else
goto err_no_ucode;
err = b43_do_request_fw(ctx, filename, &fw->ucode);
@@ -2139,7 +2167,9 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
goto err_no_initvals;
break;
case B43_PHYTYPE_N:
- if ((rev >= 11) && (rev <= 12))
+ if (rev >= 16)
+ filename = "n0initvals16";
+ else if ((rev >= 11) && (rev <= 12))
filename = "n0initvals11";
else
goto err_no_initvals;
@@ -2183,7 +2213,9 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
goto err_no_initvals;
break;
case B43_PHYTYPE_N:
- if ((rev >= 11) && (rev <= 12))
+ if (rev >= 16)
+ filename = "n0bsinitvals16";
+ else if ((rev >= 11) && (rev <= 12))
filename = "n0bsinitvals11";
else
goto err_no_initvals;
@@ -3171,7 +3203,7 @@ static void b43_tx_work(struct work_struct *work)
mutex_unlock(&wl->mutex);
}
-static int b43_op_tx(struct ieee80211_hw *hw,
+static void b43_op_tx(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
@@ -3179,14 +3211,12 @@ static int b43_op_tx(struct ieee80211_hw *hw,
if (unlikely(skb->len < 2 + 2 + 6)) {
/* Too short, this can't be a valid frame. */
dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
+ return;
}
B43_WARN_ON(skb_shinfo(skb)->nr_frags);
skb_queue_tail(&wl->tx_queue, skb);
ieee80211_queue_work(wl->hw, &wl->tx_work);
-
- return NETDEV_TX_OK;
}
static void b43_qos_params_upload(struct b43_wldev *dev,
@@ -4022,9 +4052,9 @@ static int b43_phy_versioning(struct b43_wldev *dev)
if (phy_rev > 9)
unsupported = 1;
break;
-#ifdef CONFIG_B43_NPHY
+#ifdef CONFIG_B43_PHY_N
case B43_PHYTYPE_N:
- if (phy_rev > 4)
+ if (phy_rev > 9)
unsupported = 1;
break;
#endif
@@ -5067,7 +5097,7 @@ static void b43_print_driverinfo(void)
#ifdef CONFIG_B43_PCMCIA
feat_pcmcia = "M";
#endif
-#ifdef CONFIG_B43_NPHY
+#ifdef CONFIG_B43_PHY_N
feat_nphy = "N";
#endif
#ifdef CONFIG_B43_LEDS
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index 7b2ea6781457..b5c5ce94d3fd 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -50,7 +50,7 @@ int b43_phy_allocate(struct b43_wldev *dev)
phy->ops = &b43_phyops_g;
break;
case B43_PHYTYPE_N:
-#ifdef CONFIG_B43_NPHY
+#ifdef CONFIG_B43_PHY_N
phy->ops = &b43_phyops_n;
#endif
break;
@@ -231,6 +231,7 @@ void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
u16 b43_phy_read(struct b43_wldev *dev, u16 reg)
{
assert_mac_suspended(dev);
+ dev->phy.writes_counter = 0;
return dev->phy.ops->phy_read(dev, reg);
}
@@ -238,6 +239,10 @@ void b43_phy_write(struct b43_wldev *dev, u16 reg, u16 value)
{
assert_mac_suspended(dev);
dev->phy.ops->phy_write(dev, reg, value);
+ if (++dev->phy.writes_counter == B43_MAX_WRITES_IN_ROW) {
+ b43_read16(dev, B43_MMIO_PHY_VER);
+ dev->phy.writes_counter = 0;
+ }
}
void b43_phy_copy(struct b43_wldev *dev, u16 destreg, u16 srcreg)
@@ -424,12 +429,21 @@ void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on)
b43_write16(dev, B43_MMIO_PHY0, on ? 0 : 0xF4);
}
+
+bool b43_channel_type_is_40mhz(enum nl80211_channel_type channel_type)
+{
+ return (channel_type == NL80211_CHAN_HT40MINUS ||
+ channel_type == NL80211_CHAN_HT40PLUS);
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/Cordic */
struct b43_c32 b43_cordic(int theta)
{
- u32 arctg[] = { 2949120, 1740967, 919879, 466945, 234379, 117304,
- 58666, 29335, 14668, 7334, 3667, 1833, 917, 458,
- 229, 115, 57, 29, };
+ static const u32 arctg[] = {
+ 2949120, 1740967, 919879, 466945, 234379, 117304,
+ 58666, 29335, 14668, 7334, 3667, 1833,
+ 917, 458, 229, 115, 57, 29,
+ };
u8 i;
s32 tmp;
s8 signx = 1;
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
index 0e6194228845..2401bee8b081 100644
--- a/drivers/net/wireless/b43/phy_common.h
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -39,6 +39,9 @@ struct b43_c32 { s32 i, q; };
#define B43_PHYVER_TYPE_SHIFT 8
#define B43_PHYVER_VERSION 0x00FF
+/* PHY writes need to be flushed if we reach limit */
+#define B43_MAX_WRITES_IN_ROW 24
+
/**
* enum b43_interference_mitigation - Interference Mitigation mode
*
@@ -232,6 +235,9 @@ struct b43_phy {
/* PHY revision number. */
u8 rev;
+ /* Count writes since last read */
+ u8 writes_counter;
+
/* Radio versioning */
u16 radio_manuf; /* Radio manufacturer */
u16 radio_ver; /* Radio version */
@@ -430,6 +436,8 @@ int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset);
*/
void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on);
+bool b43_channel_type_is_40mhz(enum nl80211_channel_type channel_type);
+
struct b43_c32 b43_cordic(int theta);
#endif /* LINUX_B43_PHY_COMMON_H_ */
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 0dc33b65e86b..be4828167012 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -1919,7 +1919,7 @@ static void b43_hardware_pctl_init_gphy(struct b43_wldev *dev)
b43_hf_write(dev, b43_hf_read(dev) | B43_HF_HWPCTL);
}
-/* Intialize B/G PHY power control */
+/* Initialize B/G PHY power control */
static void b43_phy_init_pctl(struct b43_wldev *dev)
{
struct ssb_bus *bus = dev->dev->bus;
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index e0f2d122e124..8a00f9a95dbb 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -67,6 +67,18 @@ enum b43_nphy_rf_sequence {
B43_RFSEQ_UPDATE_GAINU,
};
+enum b43_nphy_rssi_type {
+ B43_NPHY_RSSI_X = 0,
+ B43_NPHY_RSSI_Y,
+ B43_NPHY_RSSI_Z,
+ B43_NPHY_RSSI_PWRDET,
+ B43_NPHY_RSSI_TSSI_I,
+ B43_NPHY_RSSI_TSSI_Q,
+ B43_NPHY_RSSI_TBD,
+};
+
+static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev,
+ bool enable);
static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
u8 *events, u8 *delays, u8 length);
static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
@@ -76,13 +88,6 @@ static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
u16 value, u8 core);
-static inline bool b43_channel_type_is_40mhz(
- enum nl80211_channel_type channel_type)
-{
- return (channel_type == NL80211_CHAN_HT40MINUS ||
- channel_type == NL80211_CHAN_HT40PLUS);
-}
-
void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
{//TODO
}
@@ -134,6 +139,99 @@ static void b43_chantab_radio_upload(struct b43_wldev *dev,
b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim);
}
+static void b43_chantab_radio_2056_upload(struct b43_wldev *dev,
+ const struct b43_nphy_channeltab_entry_rev3 *e)
+{
+ b43_radio_write(dev, B2056_SYN_PLL_VCOCAL1, e->radio_syn_pll_vcocal1);
+ b43_radio_write(dev, B2056_SYN_PLL_VCOCAL2, e->radio_syn_pll_vcocal2);
+ b43_radio_write(dev, B2056_SYN_PLL_REFDIV, e->radio_syn_pll_refdiv);
+ b43_radio_write(dev, B2056_SYN_PLL_MMD2, e->radio_syn_pll_mmd2);
+ b43_radio_write(dev, B2056_SYN_PLL_MMD1, e->radio_syn_pll_mmd1);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1,
+ e->radio_syn_pll_loopfilter1);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2,
+ e->radio_syn_pll_loopfilter2);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER3,
+ e->radio_syn_pll_loopfilter3);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4,
+ e->radio_syn_pll_loopfilter4);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER5,
+ e->radio_syn_pll_loopfilter5);
+ b43_radio_write(dev, B2056_SYN_RESERVED_ADDR27,
+ e->radio_syn_reserved_addr27);
+ b43_radio_write(dev, B2056_SYN_RESERVED_ADDR28,
+ e->radio_syn_reserved_addr28);
+ b43_radio_write(dev, B2056_SYN_RESERVED_ADDR29,
+ e->radio_syn_reserved_addr29);
+ b43_radio_write(dev, B2056_SYN_LOGEN_VCOBUF1,
+ e->radio_syn_logen_vcobuf1);
+ b43_radio_write(dev, B2056_SYN_LOGEN_MIXER2, e->radio_syn_logen_mixer2);
+ b43_radio_write(dev, B2056_SYN_LOGEN_BUF3, e->radio_syn_logen_buf3);
+ b43_radio_write(dev, B2056_SYN_LOGEN_BUF4, e->radio_syn_logen_buf4);
+
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA_TUNE,
+ e->radio_rx0_lnaa_tune);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG_TUNE,
+ e->radio_rx0_lnag_tune);
+
+ b43_radio_write(dev, B2056_TX0 | B2056_TX_INTPAA_BOOST_TUNE,
+ e->radio_tx0_intpaa_boost_tune);
+ b43_radio_write(dev, B2056_TX0 | B2056_TX_INTPAG_BOOST_TUNE,
+ e->radio_tx0_intpag_boost_tune);
+ b43_radio_write(dev, B2056_TX0 | B2056_TX_PADA_BOOST_TUNE,
+ e->radio_tx0_pada_boost_tune);
+ b43_radio_write(dev, B2056_TX0 | B2056_TX_PADG_BOOST_TUNE,
+ e->radio_tx0_padg_boost_tune);
+ b43_radio_write(dev, B2056_TX0 | B2056_TX_PGAA_BOOST_TUNE,
+ e->radio_tx0_pgaa_boost_tune);
+ b43_radio_write(dev, B2056_TX0 | B2056_TX_PGAG_BOOST_TUNE,
+ e->radio_tx0_pgag_boost_tune);
+ b43_radio_write(dev, B2056_TX0 | B2056_TX_MIXA_BOOST_TUNE,
+ e->radio_tx0_mixa_boost_tune);
+ b43_radio_write(dev, B2056_TX0 | B2056_TX_MIXG_BOOST_TUNE,
+ e->radio_tx0_mixg_boost_tune);
+
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA_TUNE,
+ e->radio_rx1_lnaa_tune);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG_TUNE,
+ e->radio_rx1_lnag_tune);
+
+ b43_radio_write(dev, B2056_TX1 | B2056_TX_INTPAA_BOOST_TUNE,
+ e->radio_tx1_intpaa_boost_tune);
+ b43_radio_write(dev, B2056_TX1 | B2056_TX_INTPAG_BOOST_TUNE,
+ e->radio_tx1_intpag_boost_tune);
+ b43_radio_write(dev, B2056_TX1 | B2056_TX_PADA_BOOST_TUNE,
+ e->radio_tx1_pada_boost_tune);
+ b43_radio_write(dev, B2056_TX1 | B2056_TX_PADG_BOOST_TUNE,
+ e->radio_tx1_padg_boost_tune);
+ b43_radio_write(dev, B2056_TX1 | B2056_TX_PGAA_BOOST_TUNE,
+ e->radio_tx1_pgaa_boost_tune);
+ b43_radio_write(dev, B2056_TX1 | B2056_TX_PGAG_BOOST_TUNE,
+ e->radio_tx1_pgag_boost_tune);
+ b43_radio_write(dev, B2056_TX1 | B2056_TX_MIXA_BOOST_TUNE,
+ e->radio_tx1_mixa_boost_tune);
+ b43_radio_write(dev, B2056_TX1 | B2056_TX_MIXG_BOOST_TUNE,
+ e->radio_tx1_mixg_boost_tune);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2056Setup */
+static void b43_radio_2056_setup(struct b43_wldev *dev,
+ const struct b43_nphy_channeltab_entry_rev3 *e)
+{
+ B43_WARN_ON(dev->phy.rev < 3);
+
+ b43_chantab_radio_2056_upload(dev, e);
+ /* TODO */
+ udelay(50);
+ /* VCO calibration */
+ b43_radio_write(dev, B2056_SYN_PLL_VCOCAL12, 0x00);
+ b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x38);
+ b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x18);
+ b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x38);
+ b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x39);
+ udelay(300);
+}
+
static void b43_chantab_phy_upload(struct b43_wldev *dev,
const struct b43_phy_n_sfo_cfg *e)
{
@@ -145,9 +243,154 @@ static void b43_chantab_phy_upload(struct b43_wldev *dev,
b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6);
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */
+static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i;
+ u16 tmp;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ nphy->txpwrctrl = enable;
+ if (!enable) {
+ if (dev->phy.rev >= 3)
+ ; /* TODO */
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6840);
+ for (i = 0; i < 84; i++)
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6C40);
+ for (i = 0; i < 84; i++)
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0);
+
+ tmp = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN;
+ if (dev->phy.rev >= 3)
+ tmp |= B43_NPHY_TXPCTL_CMD_PCTLEN;
+ b43_phy_mask(dev, B43_NPHY_TXPCTL_CMD, ~tmp);
+
+ if (dev->phy.rev >= 3) {
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100);
+ } else {
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000);
+ }
+
+ if (dev->phy.rev == 2)
+ b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
+ ~B43_NPHY_BPHY_CTL3_SCALE, 0x53);
+ else if (dev->phy.rev < 2)
+ b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
+ ~B43_NPHY_BPHY_CTL3_SCALE, 0x5A);
+
+ if (dev->phy.rev < 2 && 0)
+ ; /* TODO */
+ } else {
+ b43err(dev->wl, "enabling tx pwr ctrl not implemented yet\n");
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */
static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
{
- //TODO
+ struct b43_phy_n *nphy = dev->phy.n;
+ struct ssb_sprom *sprom = &(dev->dev->bus->sprom);
+
+ u8 txpi[2], bbmult, i;
+ u16 tmp, radio_gain, dac_gain;
+ u16 freq = dev->phy.channel_freq;
+ u32 txgain;
+ /* u32 gaintbl; rev3+ */
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ if (dev->phy.rev >= 3) {
+ txpi[0] = 40;
+ txpi[1] = 40;
+ } else if (sprom->revision < 4) {
+ txpi[0] = 72;
+ txpi[1] = 72;
+ } else {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ txpi[0] = sprom->txpid2g[0];
+ txpi[1] = sprom->txpid2g[1];
+ } else if (freq >= 4900 && freq < 5100) {
+ txpi[0] = sprom->txpid5gl[0];
+ txpi[1] = sprom->txpid5gl[1];
+ } else if (freq >= 5100 && freq < 5500) {
+ txpi[0] = sprom->txpid5g[0];
+ txpi[1] = sprom->txpid5g[1];
+ } else if (freq >= 5500) {
+ txpi[0] = sprom->txpid5gh[0];
+ txpi[1] = sprom->txpid5gh[1];
+ } else {
+ txpi[0] = 91;
+ txpi[1] = 91;
+ }
+ }
+
+ /*
+ for (i = 0; i < 2; i++) {
+ nphy->txpwrindex[i].index_internal = txpi[i];
+ nphy->txpwrindex[i].index_internal_save = txpi[i];
+ }
+ */
+
+ for (i = 0; i < 2; i++) {
+ if (dev->phy.rev >= 3) {
+ /* FIXME: support 5GHz */
+ txgain = b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
+ radio_gain = (txgain >> 16) & 0x1FFFF;
+ } else {
+ txgain = b43_ntab_tx_gain_rev0_1_2[txpi[i]];
+ radio_gain = (txgain >> 16) & 0x1FFF;
+ }
+
+ dac_gain = (txgain >> 8) & 0x3F;
+ bbmult = txgain & 0xFF;
+
+ if (dev->phy.rev >= 3) {
+ if (i == 0)
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100);
+ else
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100);
+ } else {
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000);
+ }
+
+ if (i == 0)
+ b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN1, dac_gain);
+ else
+ b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN2, dac_gain);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D10 + i);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, radio_gain);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x3C57);
+ tmp = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+
+ if (i == 0)
+ tmp = (tmp & 0x00FF) | (bbmult << 8);
+ else
+ tmp = (tmp & 0xFF00) | bbmult;
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x3C57);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, tmp);
+
+ if (0)
+ ; /* TODO */
+ }
+
+ b43_phy_mask(dev, B43_NPHY_BPHY_CTL2, ~B43_NPHY_BPHY_CTL2_LUT);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
}
@@ -187,11 +430,12 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
bool workaround = false;
if (sprom->revision < 4)
- workaround = (binfo->vendor != PCI_VENDOR_ID_BROADCOM ||
- binfo->type != 0x46D ||
- binfo->rev < 0x41);
+ workaround = (binfo->vendor != PCI_VENDOR_ID_BROADCOM &&
+ binfo->type == 0x46D &&
+ binfo->rev >= 0x41);
else
- workaround = ((sprom->boardflags_hi & B43_BFH_NOPA) == 0);
+ workaround =
+ !(sprom->boardflags2_lo & B43_BFL2_RXBB_INT_REG_DIS);
b43_radio_mask(dev, B2055_MASTER1, 0xFFF3);
if (workaround) {
@@ -240,23 +484,55 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
static void b43_radio_init2055(struct b43_wldev *dev)
{
b43_radio_init2055_pre(dev);
- if (b43_status(dev) < B43_STAT_INITIALIZED)
- b2055_upload_inittab(dev, 0, 1);
- else
- b2055_upload_inittab(dev, 0/*FIXME on 5ghz band*/, 0);
+ if (b43_status(dev) < B43_STAT_INITIALIZED) {
+ /* Follow wl, not specs. Do not force uploading all regs */
+ b2055_upload_inittab(dev, 0, 0);
+ } else {
+ bool ghz5 = b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ;
+ b2055_upload_inittab(dev, ghz5, 0);
+ }
b43_radio_init2055_post(dev);
}
+static void b43_radio_init2056_pre(struct b43_wldev *dev)
+{
+ b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+ ~B43_NPHY_RFCTL_CMD_CHIP0PU);
+ /* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */
+ b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_OEPORFORCE);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ ~B43_NPHY_RFCTL_CMD_OEPORFORCE);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_CHIP0PU);
+}
+
+static void b43_radio_init2056_post(struct b43_wldev *dev)
+{
+ b43_radio_set(dev, B2056_SYN_COM_CTRL, 0xB);
+ b43_radio_set(dev, B2056_SYN_COM_PU, 0x2);
+ b43_radio_set(dev, B2056_SYN_COM_RESET, 0x2);
+ msleep(1);
+ b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2);
+ b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC);
+ b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1);
+ /*
+ if (nphy->init_por)
+ Call Radio 2056 Recalibrate
+ */
+}
+
/*
* Initialize a Broadcom 2056 N-radio
* http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init
*/
static void b43_radio_init2056(struct b43_wldev *dev)
{
- /* TODO */
+ b43_radio_init2056_pre(dev);
+ b2056_upload_inittabs(dev, 0, 0);
+ b43_radio_init2056_post(dev);
}
-
/*
* Upload the N-PHY tables.
* http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables
@@ -453,6 +729,8 @@ static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write,
}
}
+#if 0
+/* Ready but not used anywhere */
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */
static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core)
{
@@ -534,6 +812,7 @@ static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1));
b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core));
}
+#endif
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
@@ -569,7 +848,6 @@ static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
ii = est.i1_pwr;
qq = est.q1_pwr;
} else {
- B43_WARN_ON(1);
continue;
}
@@ -651,7 +929,8 @@ static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
-static void b43_nphy_write_clip_detection(struct b43_wldev *dev, u16 *clip_st)
+static void b43_nphy_write_clip_detection(struct b43_wldev *dev,
+ const u16 *clip_st)
{
b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]);
b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]);
@@ -727,7 +1006,7 @@ static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
struct b43_phy_n *nphy = phy->n;
if (enable) {
- u16 clip[] = { 0xFFFF, 0xFFFF };
+ static const u16 clip[] = { 0xFFFF, 0xFFFF };
if (nphy->deaf_count++ == 0) {
nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
b43_nphy_classifier(dev, 0x7, 0);
@@ -839,7 +1118,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
u16 data[4];
s16 gain[2];
u16 minmax[2];
- u16 lna_gain[4] = { -2, 10, 19, 25 };
+ static const u16 lna_gain[4] = { -2, 10, 19, 25 };
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, 1);
@@ -871,7 +1150,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
data[2] = lna_gain[2] + gain[i];
data[3] = lna_gain[3] + gain[i];
}
- b43_ntab_write_bulk(dev, B43_NTAB16(10, 8), 4, data);
+ b43_ntab_write_bulk(dev, B43_NTAB16(i, 8), 4, data);
minmax[i] = 23 + gain[i];
}
@@ -889,22 +1168,98 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
{
struct b43_phy_n *nphy = dev->phy.n;
+ struct ssb_sprom *sprom = &(dev->dev->bus->sprom);
+
+ /* PHY rev 0, 1, 2 */
u8 i, j;
u8 code;
+ u16 tmp;
+ u8 rfseq_events[3] = { 6, 8, 7 };
+ u8 rfseq_delays[3] = { 10, 30, 1 };
- /* TODO: for PHY >= 3
- s8 *lna1_gain, *lna2_gain;
- u8 *gain_db, *gain_bits;
- u16 *rfseq_init;
+ /* PHY rev >= 3 */
+ bool ghz5;
+ bool ext_lna;
+ u16 rssi_gain;
+ struct nphy_gain_ctl_workaround_entry *e;
u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 };
u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 };
- */
-
- u8 rfseq_events[3] = { 6, 8, 7 };
- u8 rfseq_delays[3] = { 10, 30, 1 };
if (dev->phy.rev >= 3) {
- /* TODO */
+ /* Prepare values */
+ ghz5 = b43_phy_read(dev, B43_NPHY_BANDCTL)
+ & B43_NPHY_BANDCTL_5GHZ;
+ ext_lna = sprom->boardflags_lo & B43_BFL_EXTLNA;
+ e = b43_nphy_get_gain_ctl_workaround_ent(dev, ghz5, ext_lna);
+ if (ghz5 && dev->phy.rev >= 5)
+ rssi_gain = 0x90;
+ else
+ rssi_gain = 0x50;
+
+ b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040);
+
+ /* Set Clip 2 detect */
+ b43_phy_set(dev, B43_NPHY_C1_CGAINI,
+ B43_NPHY_C1_CGAINI_CL2DETECT);
+ b43_phy_set(dev, B43_NPHY_C2_CGAINI,
+ B43_NPHY_C2_CGAINI_CL2DETECT);
+
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC,
+ 0x17);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAG1_IDAC,
+ 0x17);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG2_IDAC, 0xF0);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG2_IDAC, 0xF0);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_POLE, 0x00);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_POLE, 0x00);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_GAIN,
+ rssi_gain);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_GAIN,
+ rssi_gain);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAA1_IDAC,
+ 0x17);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAA1_IDAC,
+ 0x17);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA2_IDAC, 0xFF);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA2_IDAC, 0xFF);
+
+ b43_ntab_write_bulk(dev, B43_NTAB8(0, 8), 4, e->lna1_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(1, 8), 4, e->lna1_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(0, 16), 4, e->lna2_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(1, 16), 4, e->lna2_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(0, 32), 10, e->gain_db);
+ b43_ntab_write_bulk(dev, B43_NTAB8(1, 32), 10, e->gain_db);
+ b43_ntab_write_bulk(dev, B43_NTAB8(2, 32), 10, e->gain_bits);
+ b43_ntab_write_bulk(dev, B43_NTAB8(3, 32), 10, e->gain_bits);
+ b43_ntab_write_bulk(dev, B43_NTAB8(0, 0x40), 6, lpf_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(1, 0x40), 6, lpf_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits);
+ b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits);
+
+ b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain);
+ b43_phy_write(dev, 0x2A7, e->init_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2,
+ e->rfseq_init);
+ b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain);
+
+ /* TODO: check defines. Do not match variables names */
+ b43_phy_write(dev, B43_NPHY_C1_CLIP1_MEDGAIN, e->cliphi_gain);
+ b43_phy_write(dev, 0x2A9, e->cliphi_gain);
+ b43_phy_write(dev, B43_NPHY_C1_CLIP2_GAIN, e->clipmd_gain);
+ b43_phy_write(dev, 0x2AB, e->clipmd_gain);
+ b43_phy_write(dev, B43_NPHY_C2_CLIP1_HIGAIN, e->cliplo_gain);
+ b43_phy_write(dev, 0x2AD, e->cliplo_gain);
+
+ b43_phy_maskset(dev, 0x27D, 0xFF00, e->crsmin);
+ b43_phy_maskset(dev, 0x280, 0xFF00, e->crsminl);
+ b43_phy_maskset(dev, 0x283, 0xFF00, e->crsminu);
+ b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip);
+ b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip);
+ b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
+ ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip);
+ b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
+ ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip);
+ b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
} else {
/* Set Clip 2 detect */
b43_phy_set(dev, B43_NPHY_C1_CGAINI,
@@ -913,15 +1268,15 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
B43_NPHY_C2_CGAINI_CL2DETECT);
/* Set narrowband clip threshold */
- b43_phy_set(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84);
- b43_phy_set(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84);
+ b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84);
+ b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84);
if (!dev->phy.is_40mhz) {
/* Set dwell lengths */
- b43_phy_set(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B);
- b43_phy_set(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B);
- b43_phy_set(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009);
- b43_phy_set(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009);
+ b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B);
+ b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B);
+ b43_phy_write(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009);
+ b43_phy_write(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009);
}
/* Set wideband clip 2 threshold */
@@ -943,7 +1298,7 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1);
}
- b43_phy_set(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
+ b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
if (nphy->gain_boost) {
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ &&
@@ -964,10 +1319,10 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT);
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
- (code << 8 | 0x7C));
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
- (code << 8 | 0x7C));
+ /* specs say about 2 loops, but wl does 4 */
+ for (i = 0; i < 4; i++)
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (code << 8 | 0x7C));
b43_nphy_adjust_lna_gain_table(dev);
@@ -985,31 +1340,33 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
- (code << 8 | 0x74));
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
- (code << 8 | 0x74));
+ /* specs say about 2 loops, but wl does 4 */
+ for (i = 0; i < 4; i++)
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (code << 8 | 0x74));
}
if (dev->phy.rev == 2) {
for (i = 0; i < 4; i++) {
b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
(0x0400 * i) + 0x0020);
- for (j = 0; j < 21; j++)
+ for (j = 0; j < 21; j++) {
+ tmp = j * (i < 2 ? 3 : 1);
b43_phy_write(dev,
- B43_NPHY_TABLE_DATALO, 3 * j);
+ B43_NPHY_TABLE_DATALO, tmp);
+ }
}
+ }
- b43_nphy_set_rf_sequence(dev, 5,
- rfseq_events, rfseq_delays, 3);
- b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
- ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
- 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
+ b43_nphy_set_rf_sequence(dev, 5,
+ rfseq_events, rfseq_delays, 3);
+ b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
+ ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
+ 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
- b43_phy_maskset(dev, B43_PHY_N(0xC5D),
- 0xFF80, 4);
- }
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ b43_phy_maskset(dev, B43_PHY_N(0xC5D),
+ 0xFF80, 4);
}
}
@@ -1026,7 +1383,10 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ u16 tmp16;
+ u32 tmp32;
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
b43_nphy_classifier(dev, 1, 0);
else
b43_nphy_classifier(dev, 1, 1);
@@ -1038,7 +1398,82 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
if (dev->phy.rev >= 3) {
+ tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
+ tmp32 &= 0xffffff;
+ b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
+
+ b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x0125);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x01B3);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x0105);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x016E);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020);
+
+ b43_phy_write(dev, B43_NPHY_C2_CLIP1_MEDGAIN, 0x000C);
+ b43_phy_write(dev, 0x2AE, 0x000C);
+
/* TODO */
+
+ tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
+ 0x2 : 0x9C40;
+ b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16);
+
+ b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700);
+
+ b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
+ b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
+
+ b43_nphy_gain_ctrl_workarounds(dev);
+
+ b43_ntab_write(dev, B43_NTAB32(8, 0), 2);
+ b43_ntab_write(dev, B43_NTAB32(8, 16), 2);
+
+ /* TODO */
+
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_AUX, 0x07);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
+
+ /* N PHY WAR TX Chain Update with hw_phytxchain as argument */
+
+ if ((bus->sprom.boardflags2_lo & B43_BFL2_APLL_WAR &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
+ (bus->sprom.boardflags2_lo & B43_BFL2_GPLL_WAR &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
+ tmp32 = 0x00088888;
+ else
+ tmp32 = 0x88888888;
+ b43_ntab_write(dev, B43_NTAB32(30, 1), tmp32);
+ b43_ntab_write(dev, B43_NTAB32(30, 2), tmp32);
+ b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
+
+ if (dev->phy.rev == 4 &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
+ 0x70);
+ b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
+ 0x70);
+ }
+
+ b43_phy_write(dev, 0x224, 0x039C);
+ b43_phy_write(dev, 0x225, 0x0357);
+ b43_phy_write(dev, 0x226, 0x0317);
+ b43_phy_write(dev, 0x227, 0x02D7);
+ b43_phy_write(dev, 0x228, 0x039C);
+ b43_phy_write(dev, 0x229, 0x0357);
+ b43_phy_write(dev, 0x22A, 0x0317);
+ b43_phy_write(dev, 0x22B, 0x02D7);
+ b43_phy_write(dev, 0x22C, 0x039C);
+ b43_phy_write(dev, 0x22D, 0x0357);
+ b43_phy_write(dev, 0x22E, 0x0317);
+ b43_phy_write(dev, 0x22F, 0x02D7);
} else {
if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
nphy->band5g_pwrgain) {
@@ -1049,29 +1484,18 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8);
}
- /* TODO: convert to b43_ntab_write? */
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2000);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x000A);
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2010);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x000A);
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2002);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0xCDAA);
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2012);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0xCDAA);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
if (dev->phy.rev < 2) {
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2008);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0000);
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2018);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0000);
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2007);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x7AAB);
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2017);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x7AAB);
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2006);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0800);
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2016);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0800);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x18), 0x0000);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x07), 0x7AAB);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x17), 0x7AAB);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x06), 0x0800);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x16), 0x0800);
}
b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
@@ -1565,19 +1989,20 @@ static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
}
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */
static void b43_nphy_bphy_init(struct b43_wldev *dev)
{
unsigned int i;
u16 val;
val = 0x1E1F;
- for (i = 0; i < 14; i++) {
+ for (i = 0; i < 16; i++) {
b43_phy_write(dev, B43_PHY_N_BMODE(0x88 + i), val);
val -= 0x202;
}
val = 0x3E3F;
for (i = 0; i < 16; i++) {
- b43_phy_write(dev, B43_PHY_N_BMODE(0x97 + i), val);
+ b43_phy_write(dev, B43_PHY_N_BMODE(0x98 + i), val);
val -= 0x202;
}
b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668);
@@ -1585,7 +2010,8 @@ static void b43_nphy_bphy_init(struct b43_wldev *dev)
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */
static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale,
- s8 offset, u8 core, u8 rail, u8 type)
+ s8 offset, u8 core, u8 rail,
+ enum b43_nphy_rssi_type type)
{
u16 tmp;
bool core1or5 = (core == 1) || (core == 5);
@@ -1594,53 +2020,59 @@ static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale,
offset = clamp_val(offset, -32, 31);
tmp = ((scale & 0x3F) << 8) | (offset & 0x3F);
- if (core1or5 && (rail == 0) && (type == 2))
+ if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_Z))
b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, tmp);
- if (core1or5 && (rail == 1) && (type == 2))
+ if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_Z))
b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, tmp);
- if (core2or5 && (rail == 0) && (type == 2))
+ if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_Z))
b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, tmp);
- if (core2or5 && (rail == 1) && (type == 2))
+ if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_Z))
b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, tmp);
- if (core1or5 && (rail == 0) && (type == 0))
+
+ if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_X))
b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, tmp);
- if (core1or5 && (rail == 1) && (type == 0))
+ if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_X))
b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, tmp);
- if (core2or5 && (rail == 0) && (type == 0))
+ if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_X))
b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, tmp);
- if (core2or5 && (rail == 1) && (type == 0))
+ if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_X))
b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, tmp);
- if (core1or5 && (rail == 0) && (type == 1))
+
+ if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_Y))
b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, tmp);
- if (core1or5 && (rail == 1) && (type == 1))
+ if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_Y))
b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, tmp);
- if (core2or5 && (rail == 0) && (type == 1))
+ if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_Y))
b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, tmp);
- if (core2or5 && (rail == 1) && (type == 1))
+ if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_Y))
b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, tmp);
- if (core1or5 && (rail == 0) && (type == 6))
+
+ if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_TBD))
b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TBD, tmp);
- if (core1or5 && (rail == 1) && (type == 6))
+ if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_TBD))
b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TBD, tmp);
- if (core2or5 && (rail == 0) && (type == 6))
+ if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_TBD))
b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TBD, tmp);
- if (core2or5 && (rail == 1) && (type == 6))
+ if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_TBD))
b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TBD, tmp);
- if (core1or5 && (rail == 0) && (type == 3))
+
+ if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_PWRDET))
b43_phy_write(dev, B43_NPHY_RSSIMC_0I_PWRDET, tmp);
- if (core1or5 && (rail == 1) && (type == 3))
+ if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_PWRDET))
b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_PWRDET, tmp);
- if (core2or5 && (rail == 0) && (type == 3))
+ if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_PWRDET))
b43_phy_write(dev, B43_NPHY_RSSIMC_1I_PWRDET, tmp);
- if (core2or5 && (rail == 1) && (type == 3))
+ if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_PWRDET))
b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_PWRDET, tmp);
- if (core1or5 && (type == 4))
+
+ if (core1or5 && (type == B43_NPHY_RSSI_TSSI_I))
b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TSSI, tmp);
- if (core2or5 && (type == 4))
+ if (core2or5 && (type == B43_NPHY_RSSI_TSSI_I))
b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TSSI, tmp);
- if (core1or5 && (type == 5))
+
+ if (core1or5 && (type == B43_NPHY_RSSI_TSSI_Q))
b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TSSI, tmp);
- if (core2or5 && (type == 5))
+ if (core2or5 && (type == B43_NPHY_RSSI_TSSI_Q))
b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TSSI, tmp);
}
@@ -1668,27 +2100,39 @@ static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
(type + 1) << 4);
}
- /* TODO use some definitions */
if (code == 0) {
- b43_phy_maskset(dev, B43_NPHY_AFECTL_OVER, 0xCFFF, 0);
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x3000);
if (type < 3) {
- b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFEC7, 0);
- b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xEFDC, 0);
- b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFFFE, 0);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+ ~(B43_NPHY_RFCTL_CMD_RXEN |
+ B43_NPHY_RFCTL_CMD_CORESEL));
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
+ ~(0x1 << 12 |
+ 0x1 << 5 |
+ 0x1 << 1 |
+ 0x1));
+ b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+ ~B43_NPHY_RFCTL_CMD_START);
udelay(20);
- b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xFFFE, 0);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
}
} else {
- b43_phy_maskset(dev, B43_NPHY_AFECTL_OVER, 0xCFFF,
- 0x3000);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x3000);
if (type < 3) {
b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
- 0xFEC7, 0x0180);
- b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
- 0xEFDC, (code << 1 | 0x1021));
- b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFFFE, 0x1);
+ ~(B43_NPHY_RFCTL_CMD_RXEN |
+ B43_NPHY_RFCTL_CMD_CORESEL),
+ (B43_NPHY_RFCTL_CMD_RXEN |
+ code << B43_NPHY_RFCTL_CMD_CORESEL_SHIFT));
+ b43_phy_set(dev, B43_NPHY_RFCTL_OVER,
+ (0x1 << 12 |
+ 0x1 << 5 |
+ 0x1 << 1 |
+ 0x1));
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_START);
udelay(20);
- b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xFFFE, 0);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
}
}
}
@@ -1837,6 +2281,14 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0);
save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1);
+ } else {
+ save_regs_phy[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
+ save_regs_phy[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
+ save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ save_regs_phy[3] = b43_phy_read(dev, B43_NPHY_RFCTL_CMD);
+ save_regs_phy[4] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER);
+ save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1);
+ save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2);
}
b43_nphy_rssi_select(dev, 5, type);
@@ -1880,6 +2332,14 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]);
b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]);
b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]);
+ } else {
+ b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[0]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[2]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_CMD, save_regs_phy[3]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_OVER, save_regs_phy[4]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, save_regs_phy[5]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, save_regs_phy[6]);
}
return out;
@@ -1894,7 +2354,10 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
u16 class, override;
u8 regs_save_radio[2];
u16 regs_save_phy[2];
+
s8 offset[4];
+ u8 core;
+ u8 rail;
u16 clip_state[2];
u16 clip_off[2] = { 0xFFFF, 0xFFFF };
@@ -1995,16 +2458,15 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
if (results_min[i] == 248)
offset[i] = code - 32;
- if (i % 2 == 0)
- b43_nphy_scale_offset_rssi(dev, 0, offset[i], 1, 0,
- type);
- else
- b43_nphy_scale_offset_rssi(dev, 0, offset[i], 2, 1,
- type);
+ core = (i / 2) ? 2 : 1;
+ rail = (i % 2) ? 1 : 0;
+
+ b43_nphy_scale_offset_rssi(dev, 0, offset[i], core, rail,
+ type);
}
b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[0]);
- b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[1]);
+ b43_radio_maskset(dev, B2055_C2_PD_RSSIMISC, 0xF8, state[1]);
switch (state[2]) {
case 1:
@@ -2042,6 +2504,9 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
b43_nphy_classifier(dev, 7, class);
b43_nphy_write_clip_detection(dev, clip_state);
+ /* Specs don't say about reset here, but it makes wl and b43 dumps
+ identical, it really seems wl performs this */
+ b43_nphy_reset_cca(dev);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */
@@ -2059,9 +2524,9 @@ static void b43_nphy_rssi_cal(struct b43_wldev *dev)
if (dev->phy.rev >= 3) {
b43_nphy_rev3_rssi_cal(dev);
} else {
- b43_nphy_rev2_rssi_cal(dev, 2);
- b43_nphy_rev2_rssi_cal(dev, 0);
- b43_nphy_rev2_rssi_cal(dev, 1);
+ b43_nphy_rev2_rssi_cal(dev, B43_NPHY_RSSI_Z);
+ b43_nphy_rev2_rssi_cal(dev, B43_NPHY_RSSI_X);
+ b43_nphy_rev2_rssi_cal(dev, B43_NPHY_RSSI_Y);
}
}
@@ -2295,7 +2760,7 @@ static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev)
{
int i, j;
/* B43_NPHY_TXF_20CO_S0A1, B43_NPHY_TXF_40CO_S0A1, unknown */
- u16 offset[] = { 0x186, 0x195, 0x2C5 };
+ static const u16 offset[] = { 0x186, 0x195, 0x2C5 };
for (i = 0; i < 3; i++)
for (j = 0; j < 15; j++)
@@ -2327,7 +2792,7 @@ static struct nphy_txgains b43_nphy_get_tx_gains(struct b43_wldev *dev)
struct nphy_txgains target;
const u32 *table = NULL;
- if (nphy->txpwrctrl == 0) {
+ if (!nphy->txpwrctrl) {
int i;
if (nphy->hang_avoid)
@@ -2884,7 +3349,7 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
u8 rfctl[2];
u8 afectl_core;
u16 tmp[6];
- u16 cur_hpf1, cur_hpf2, cur_lna;
+ u16 uninitialized_var(cur_hpf1), uninitialized_var(cur_hpf2), cur_lna;
u32 real, imag;
enum ieee80211_band band;
@@ -3077,9 +3542,9 @@ static void b43_nphy_mac_phy_clock_set(struct b43_wldev *dev, bool on)
{
u32 tmslow = ssb_read32(dev->dev, SSB_TMSLOW);
if (on)
- tmslow |= SSB_TMSLOW_PHYCLK;
+ tmslow |= B43_TMSLOW_MACPHYCLKEN;
else
- tmslow &= ~SSB_TMSLOW_PHYCLK;
+ tmslow &= ~B43_TMSLOW_MACPHYCLKEN;
ssb_write32(dev->dev, SSB_TMSLOW, tmslow);
}
@@ -3088,7 +3553,7 @@ static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask)
{
struct b43_phy *phy = &dev->phy;
struct b43_phy_n *nphy = phy->n;
- u16 buf[16];
+ /* u16 buf[16]; it's rev3+ */
nphy->phyrxchain = mask;
@@ -3232,10 +3697,12 @@ int b43_phy_initn(struct b43_wldev *dev)
b43_nphy_classifier(dev, 0, 0);
b43_nphy_read_clip_detection(dev, clip);
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ b43_nphy_bphy_init(dev);
+
tx_pwr_state = nphy->txpwrctrl;
- /* TODO N PHY TX power control with argument 0
- (turning off power control) */
- /* TODO Fix the TX Power Settings */
+ b43_nphy_tx_power_ctrl(dev, false);
+ b43_nphy_tx_power_fix(dev);
/* TODO N PHY TX Power Control Idle TSSI */
/* TODO N PHY TX Power Control Setup */
@@ -3292,21 +3759,18 @@ int b43_phy_initn(struct b43_wldev *dev)
/* TODO N PHY Pre Calibrate TX Gain */
target = b43_nphy_get_tx_gains(dev);
}
- }
+ if (!b43_nphy_cal_tx_iq_lo(dev, target, true, false))
+ if (b43_nphy_cal_rx_iq(dev, target, 2, 0) == 0)
+ b43_nphy_save_cal(dev);
+ } else if (nphy->mphase_cal_phase_id == 0)
+ ;/* N PHY Periodic Calibration with arg 3 */
+ } else {
+ b43_nphy_restore_cal(dev);
}
}
- if (!b43_nphy_cal_tx_iq_lo(dev, target, true, false)) {
- if (b43_nphy_cal_rx_iq(dev, target, 2, 0) == 0)
- b43_nphy_save_cal(dev);
- else if (nphy->mphase_cal_phase_id == 0)
- ;/* N PHY Periodic Calibration with argument 3 */
- } else {
- b43_nphy_restore_cal(dev);
- }
-
b43_nphy_tx_pwr_ctrl_coef_setup(dev);
- /* TODO N PHY TX Power Control Enable with argument tx_pwr_state */
+ b43_nphy_tx_power_ctrl(dev, tx_pwr_state);
b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015);
b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320);
if (phy->rev >= 3 && phy->rev <= 6)
@@ -3315,7 +3779,6 @@ int b43_phy_initn(struct b43_wldev *dev)
if (phy->rev >= 3)
b43_nphy_spur_workaround(dev);
- b43err(dev->wl, "IEEE 802.11n devices are not supported, yet.\n");
return 0;
}
@@ -3357,7 +3820,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
b43_phy_mask(dev, B43_PHY_B_TEST, ~0x840);
}
- if (nphy->txpwrctrl)
+ if (!nphy->txpwrctrl)
b43_nphy_tx_power_fix(dev);
if (dev->phy.rev < 3)
@@ -3381,7 +3844,6 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
enum nl80211_channel_type channel_type)
{
struct b43_phy *phy = &dev->phy;
- struct b43_phy_n *nphy = dev->phy.n;
const struct b43_nphy_channeltab_entry_rev2 *tabent_r2;
const struct b43_nphy_channeltab_entry_rev3 *tabent_r3;
@@ -3391,7 +3853,6 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
if (dev->phy.rev >= 3) {
tabent_r3 = b43_nphy_get_chantabent_rev3(dev,
channel->center_freq);
- tabent_r3 = NULL;
if (!tabent_r3)
return -ESRCH;
} else {
@@ -3420,7 +3881,7 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
if (dev->phy.rev >= 3) {
tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 4 : 0;
b43_radio_maskset(dev, 0x08, 0xFFFB, tmp);
- /* TODO: PHY Radio2056 Setup (dev, tabent_r3); */
+ b43_radio_2056_setup(dev, tabent_r3);
b43_nphy_channel_setup(dev, &(tabent_r3->phy_regs), channel);
} else {
tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 0x0020 : 0x0050;
@@ -3451,7 +3912,11 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
memset(nphy, 0, sizeof(*nphy));
- //TODO init struct b43_phy_n
+ nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4);
+ nphy->gain_boost = true; /* this way we follow wl, assume it is true */
+ nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
+ nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
+ nphy->perical = 2; /* avoid additional rssi cal on init (like wl) */
}
static void b43_nphy_op_free(struct b43_wldev *dev)
@@ -3500,6 +3965,15 @@ static void b43_nphy_op_write(struct b43_wldev *dev, u16 reg, u16 value)
b43_write16(dev, B43_MMIO_PHY_DATA, value);
}
+static void b43_nphy_op_maskset(struct b43_wldev *dev, u16 reg, u16 mask,
+ u16 set)
+{
+ check_phyreg(dev, reg);
+ b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
+ b43_write16(dev, B43_MMIO_PHY_DATA,
+ (b43_read16(dev, B43_MMIO_PHY_DATA) & mask) | set);
+}
+
static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg)
{
/* Register 1 is a 32-bit register. */
@@ -3524,8 +3998,6 @@ static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
bool blocked)
{
- struct b43_phy_n *nphy = dev->phy.n;
-
if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED)
b43err(dev->wl, "MAC not suspended\n");
@@ -3559,10 +4031,14 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
}
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/Anacore */
static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on)
{
- b43_phy_write(dev, B43_NPHY_AFECTL_OVER,
- on ? 0 : 0x7FFF);
+ u16 val = on ? 0 : 0x7FFF;
+
+ if (dev->phy.rev >= 3)
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, val);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, val);
}
static int b43_nphy_op_switch_channel(struct b43_wldev *dev,
@@ -3596,6 +4072,7 @@ const struct b43_phy_operations b43_phyops_n = {
.init = b43_nphy_op_init,
.phy_read = b43_nphy_op_read,
.phy_write = b43_nphy_op_write,
+ .phy_maskset = b43_nphy_op_maskset,
.radio_read = b43_nphy_op_radio_read,
.radio_write = b43_nphy_op_radio_write,
.software_rfkill = b43_nphy_op_software_rfkill,
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index c144e59a708b..001e841f118c 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -782,7 +782,7 @@ struct b43_phy_n {
u16 mphase_txcal_numcmds;
u16 mphase_txcal_bestcoeffs[11];
- u8 txpwrctrl;
+ bool txpwrctrl;
u16 txcal_bbmult;
u16 txiqlocal_bestc[11];
bool txiqlocal_coeffsvalid;
diff --git a/drivers/net/wireless/b43/radio_2055.c b/drivers/net/wireless/b43/radio_2055.c
index 1b5316586cbf..44c6dea66882 100644
--- a/drivers/net/wireless/b43/radio_2055.c
+++ b/drivers/net/wireless/b43/radio_2055.c
@@ -244,7 +244,7 @@ static const struct b2055_inittab_entry b2055_inittab [] = {
[0xCB] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xCC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_C1_LNA_GAINBST] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
- [0xCE] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [0xCE] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
[0xCF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xD0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xD1] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
@@ -256,7 +256,7 @@ static const struct b2055_inittab_entry b2055_inittab [] = {
[0xD7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xD8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_C2_LNA_GAINBST] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
- [0xDA] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [0xDA] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
[0xDB] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xDC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xDD] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
@@ -304,178 +304,178 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
{ .channel = 184,
.freq = 4920, /* MHz */
.unk2 = 3280,
- RADIOREGS(0x71, 0x01, 0xEC, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0xEC, 0x01, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
- PHYREGS(0xB407, 0xB007, 0xAC07, 0x1402, 0x1502, 0x1602),
+ PHYREGS(0x07B4, 0x07B0, 0x07AC, 0x0214, 0x0215, 0x0216),
},
{ .channel = 186,
.freq = 4930, /* MHz */
.unk2 = 3287,
- RADIOREGS(0x71, 0x01, 0xED, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0xED, 0x01, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
- PHYREGS(0xB807, 0xB407, 0xB007, 0x1302, 0x1402, 0x1502),
+ PHYREGS(0x07B8, 0x07B4, 0x07B0, 0x0213, 0x0214, 0x0215),
},
{ .channel = 188,
.freq = 4940, /* MHz */
.unk2 = 3293,
- RADIOREGS(0x71, 0x01, 0xEE, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0xEE, 0x01, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
- PHYREGS(0xBC07, 0xB807, 0xB407, 0x1202, 0x1302, 0x1402),
+ PHYREGS(0x07BC, 0x07B8, 0x07B4, 0x0212, 0x0213, 0x0214),
},
{ .channel = 190,
.freq = 4950, /* MHz */
.unk2 = 3300,
- RADIOREGS(0x71, 0x01, 0xEF, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0xEF, 0x01, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
- PHYREGS(0xC007, 0xBC07, 0xB807, 0x1102, 0x1202, 0x1302),
+ PHYREGS(0x07C0, 0x07BC, 0x07B8, 0x0211, 0x0212, 0x0213),
},
{ .channel = 192,
.freq = 4960, /* MHz */
.unk2 = 3307,
- RADIOREGS(0x71, 0x01, 0xF0, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0xF0, 0x01, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
- PHYREGS(0xC407, 0xC007, 0xBC07, 0x0F02, 0x1102, 0x1202),
+ PHYREGS(0x07C4, 0x07C0, 0x07BC, 0x020F, 0x0211, 0x0212),
},
{ .channel = 194,
.freq = 4970, /* MHz */
.unk2 = 3313,
- RADIOREGS(0x71, 0x01, 0xF1, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0xF1, 0x01, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
- PHYREGS(0xC807, 0xC407, 0xC007, 0x0E02, 0x0F02, 0x1102),
+ PHYREGS(0x07C8, 0x07C4, 0x07C0, 0x020E, 0x020F, 0x0211),
},
{ .channel = 196,
.freq = 4980, /* MHz */
.unk2 = 3320,
- RADIOREGS(0x71, 0x01, 0xF2, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0xF2, 0x01, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
- PHYREGS(0xCC07, 0xC807, 0xC407, 0x0D02, 0x0E02, 0x0F02),
+ PHYREGS(0x07CC, 0x07C8, 0x07C4, 0x020D, 0x020E, 0x020F),
},
{ .channel = 198,
.freq = 4990, /* MHz */
.unk2 = 3327,
- RADIOREGS(0x71, 0x01, 0xF3, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0xF3, 0x01, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
- PHYREGS(0xD007, 0xCC07, 0xC807, 0x0C02, 0x0D02, 0x0E02),
+ PHYREGS(0x07D0, 0x07CC, 0x07C8, 0x020C, 0x020D, 0x020E),
},
{ .channel = 200,
.freq = 5000, /* MHz */
.unk2 = 3333,
- RADIOREGS(0x71, 0x01, 0xF4, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0xF4, 0x01, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
- PHYREGS(0xD407, 0xD007, 0xCC07, 0x0B02, 0x0C02, 0x0D02),
+ PHYREGS(0x07D4, 0x07D0, 0x07CC, 0x020B, 0x020C, 0x020D),
},
{ .channel = 202,
.freq = 5010, /* MHz */
.unk2 = 3340,
- RADIOREGS(0x71, 0x01, 0xF5, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0xF5, 0x01, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
- PHYREGS(0xD807, 0xD407, 0xD007, 0x0A02, 0x0B02, 0x0C02),
+ PHYREGS(0x07D8, 0x07D4, 0x07D0, 0x020A, 0x020B, 0x020C),
},
{ .channel = 204,
.freq = 5020, /* MHz */
.unk2 = 3347,
- RADIOREGS(0x71, 0x01, 0xF6, 0x0E, 0xF7, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0xF6, 0x01, 0x0E, 0xF7, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
- PHYREGS(0xDC07, 0xD807, 0xD407, 0x0902, 0x0A02, 0x0B02),
+ PHYREGS(0x07DC, 0x07D8, 0x07D4, 0x0209, 0x020A, 0x020B),
},
{ .channel = 206,
.freq = 5030, /* MHz */
.unk2 = 3353,
- RADIOREGS(0x71, 0x01, 0xF7, 0x0E, 0xF7, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0xF7, 0x01, 0x0E, 0xF7, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
- PHYREGS(0xE007, 0xDC07, 0xD807, 0x0802, 0x0902, 0x0A02),
+ PHYREGS(0x07E0, 0x07DC, 0x07D8, 0x0208, 0x0209, 0x020A),
},
{ .channel = 208,
.freq = 5040, /* MHz */
.unk2 = 3360,
- RADIOREGS(0x71, 0x01, 0xF8, 0x0D, 0xEF, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0xF8, 0x01, 0x0D, 0xEF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
- PHYREGS(0xE407, 0xE007, 0xDC07, 0x0702, 0x0802, 0x0902),
+ PHYREGS(0x07E4, 0x07E0, 0x07DC, 0x0207, 0x0208, 0x0209),
},
{ .channel = 210,
.freq = 5050, /* MHz */
.unk2 = 3367,
- RADIOREGS(0x71, 0x01, 0xF9, 0x0D, 0xEF, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0xF9, 0x01, 0x0D, 0xEF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
- PHYREGS(0xE807, 0xE407, 0xE007, 0x0602, 0x0702, 0x0802),
+ PHYREGS(0x07E8, 0x07E4, 0x07E0, 0x0206, 0x0207, 0x0208),
},
{ .channel = 212,
.freq = 5060, /* MHz */
.unk2 = 3373,
- RADIOREGS(0x71, 0x01, 0xFA, 0x0D, 0xE6, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0xFA, 0x01, 0x0D, 0xE6, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xBB, 0xBB, 0xFF, 0x00, 0x0E, 0x0F,
0x8E, 0xFF, 0x00, 0x0E, 0x0F, 0x8E),
- PHYREGS(0xEC07, 0xE807, 0xE407, 0x0502, 0x0602, 0x0702),
+ PHYREGS(0x07EC, 0x07E8, 0x07E4, 0x0205, 0x0206, 0x0207),
},
{ .channel = 214,
.freq = 5070, /* MHz */
.unk2 = 3380,
- RADIOREGS(0x71, 0x01, 0xFB, 0x0D, 0xE6, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0xFB, 0x01, 0x0D, 0xE6, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xBB, 0xBB, 0xFF, 0x00, 0x0E, 0x0F,
0x8E, 0xFF, 0x00, 0x0E, 0x0F, 0x8E),
- PHYREGS(0xF007, 0xEC07, 0xE807, 0x0402, 0x0502, 0x0602),
+ PHYREGS(0x07F0, 0x07EC, 0x07E8, 0x0204, 0x0205, 0x0206),
},
{ .channel = 216,
.freq = 5080, /* MHz */
.unk2 = 3387,
- RADIOREGS(0x71, 0x01, 0xFC, 0x0D, 0xDE, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0xFC, 0x01, 0x0D, 0xDE, 0x01, 0x04, 0x0A,
0x00, 0x8E, 0xBB, 0xBB, 0xEE, 0x00, 0x0E, 0x0F,
0x8D, 0xEE, 0x00, 0x0E, 0x0F, 0x8D),
- PHYREGS(0xF407, 0xF007, 0xEC07, 0x0302, 0x0402, 0x0502),
+ PHYREGS(0x07F4, 0x07F0, 0x07EC, 0x0203, 0x0204, 0x0205),
},
{ .channel = 218,
.freq = 5090, /* MHz */
.unk2 = 3393,
- RADIOREGS(0x71, 0x01, 0xFD, 0x0D, 0xDE, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0xFD, 0x01, 0x0D, 0xDE, 0x01, 0x04, 0x0A,
0x00, 0x8E, 0xBB, 0xBB, 0xEE, 0x00, 0x0E, 0x0F,
0x8D, 0xEE, 0x00, 0x0E, 0x0F, 0x8D),
- PHYREGS(0xF807, 0xF407, 0xF007, 0x0202, 0x0302, 0x0402),
+ PHYREGS(0x07F8, 0x07F4, 0x07F0, 0x0202, 0x0203, 0x0204),
},
{ .channel = 220,
.freq = 5100, /* MHz */
.unk2 = 3400,
- RADIOREGS(0x71, 0x01, 0xFE, 0x0C, 0xD6, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0xFE, 0x01, 0x0C, 0xD6, 0x01, 0x04, 0x0A,
0x00, 0x8E, 0xAA, 0xAA, 0xEE, 0x00, 0x0D, 0x0F,
0x8D, 0xEE, 0x00, 0x0D, 0x0F, 0x8D),
- PHYREGS(0xFC07, 0xF807, 0xF407, 0x0102, 0x0202, 0x0302),
+ PHYREGS(0x07FC, 0x07F8, 0x07F4, 0x0201, 0x0202, 0x0203),
},
{ .channel = 222,
.freq = 5110, /* MHz */
.unk2 = 3407,
- RADIOREGS(0x71, 0x01, 0xFF, 0x0C, 0xD6, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0xFF, 0x01, 0x0C, 0xD6, 0x01, 0x04, 0x0A,
0x00, 0x8E, 0xAA, 0xAA, 0xEE, 0x00, 0x0D, 0x0F,
0x8D, 0xEE, 0x00, 0x0D, 0x0F, 0x8D),
- PHYREGS(0x0008, 0xFC07, 0xF807, 0x0002, 0x0102, 0x0202),
+ PHYREGS(0x0800, 0x07FC, 0x07F8, 0x0200, 0x0201, 0x0202),
},
{ .channel = 224,
.freq = 5120, /* MHz */
.unk2 = 3413,
- RADIOREGS(0x71, 0x02, 0x00, 0x0C, 0xCE, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x00, 0x02, 0x0C, 0xCE, 0x01, 0x04, 0x0A,
0x00, 0x8D, 0xAA, 0xAA, 0xDD, 0x00, 0x0D, 0x0F,
0x8C, 0xDD, 0x00, 0x0D, 0x0F, 0x8C),
- PHYREGS(0x0408, 0x0008, 0xFC07, 0xFF01, 0x0002, 0x0102),
+ PHYREGS(0x0804, 0x0800, 0x07FC, 0x01FF, 0x0200, 0x0201),
},
{ .channel = 226,
.freq = 5130, /* MHz */
.unk2 = 3420,
- RADIOREGS(0x71, 0x02, 0x01, 0x0C, 0xCE, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x01, 0x02, 0x0C, 0xCE, 0x01, 0x04, 0x0A,
0x00, 0x8D, 0xAA, 0xAA, 0xDD, 0x00, 0x0D, 0x0F,
0x8C, 0xDD, 0x00, 0x0D, 0x0F, 0x8C),
- PHYREGS(0x0808, 0x0408, 0x0008, 0xFE01, 0xFF01, 0x0002),
+ PHYREGS(0x0808, 0x0804, 0x0800, 0x01FE, 0x01FF, 0x0200),
},
{ .channel = 228,
.freq = 5140, /* MHz */
@@ -483,815 +483,815 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
RADIOREGS(0x71, 0x02, 0x02, 0x0C, 0xC6, 0x01, 0x04, 0x0A,
0x00, 0x8D, 0x99, 0x99, 0xDD, 0x00, 0x0C, 0x0E,
0x8B, 0xDD, 0x00, 0x0C, 0x0E, 0x8B),
- PHYREGS(0x0C08, 0x0808, 0x0408, 0xFD01, 0xFE01, 0xFF01),
+ PHYREGS(0x080C, 0x0808, 0x0804, 0x01FD, 0x01FE, 0x01FF),
},
{ .channel = 32,
.freq = 5160, /* MHz */
.unk2 = 3440,
- RADIOREGS(0x71, 0x02, 0x04, 0x0B, 0xBE, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x04, 0x02, 0x0B, 0xBE, 0x01, 0x04, 0x0A,
0x00, 0x8C, 0x99, 0x99, 0xCC, 0x00, 0x0B, 0x0D,
0x8A, 0xCC, 0x00, 0x0B, 0x0D, 0x8A),
- PHYREGS(0x1408, 0x1008, 0x0C08, 0xFB01, 0xFC01, 0xFD01),
+ PHYREGS(0x0814, 0x0810, 0x080C, 0x01FB, 0x01FC, 0x01FD),
},
{ .channel = 34,
.freq = 5170, /* MHz */
.unk2 = 3447,
- RADIOREGS(0x71, 0x02, 0x05, 0x0B, 0xBE, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x05, 0x02, 0x0B, 0xBE, 0x01, 0x04, 0x0A,
0x00, 0x8C, 0x99, 0x99, 0xCC, 0x00, 0x0B, 0x0D,
0x8A, 0xCC, 0x00, 0x0B, 0x0D, 0x8A),
- PHYREGS(0x1808, 0x1408, 0x1008, 0xFA01, 0xFB01, 0xFC01),
+ PHYREGS(0x0818, 0x0814, 0x0810, 0x01FA, 0x01FB, 0x01FC),
},
{ .channel = 36,
.freq = 5180, /* MHz */
.unk2 = 3453,
- RADIOREGS(0x71, 0x02, 0x06, 0x0B, 0xB6, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x06, 0x02, 0x0B, 0xB6, 0x01, 0x04, 0x0A,
0x00, 0x8C, 0x88, 0x88, 0xCC, 0x00, 0x0B, 0x0C,
0x89, 0xCC, 0x00, 0x0B, 0x0C, 0x89),
- PHYREGS(0x1C08, 0x1808, 0x1408, 0xF901, 0xFA01, 0xFB01),
+ PHYREGS(0x081C, 0x0818, 0x0814, 0x01F9, 0x01FA, 0x01FB),
},
{ .channel = 38,
.freq = 5190, /* MHz */
.unk2 = 3460,
- RADIOREGS(0x71, 0x02, 0x07, 0x0B, 0xB6, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x07, 0x02, 0x0B, 0xB6, 0x01, 0x04, 0x0A,
0x00, 0x8C, 0x88, 0x88, 0xCC, 0x00, 0x0B, 0x0C,
0x89, 0xCC, 0x00, 0x0B, 0x0C, 0x89),
- PHYREGS(0x2008, 0x1C08, 0x1808, 0xF801, 0xF901, 0xFA01),
+ PHYREGS(0x0820, 0x081C, 0x0818, 0x01F8, 0x01F9, 0x01FA),
},
{ .channel = 40,
.freq = 5200, /* MHz */
.unk2 = 3467,
- RADIOREGS(0x71, 0x02, 0x08, 0x0B, 0xAF, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x08, 0x02, 0x0B, 0xAF, 0x01, 0x04, 0x0A,
0x00, 0x8B, 0x88, 0x88, 0xBB, 0x00, 0x0A, 0x0B,
0x89, 0xBB, 0x00, 0x0A, 0x0B, 0x89),
- PHYREGS(0x2408, 0x2008, 0x1C08, 0xF701, 0xF801, 0xF901),
+ PHYREGS(0x0824, 0x0820, 0x081C, 0x01F7, 0x01F8, 0x01F9),
},
{ .channel = 42,
.freq = 5210, /* MHz */
.unk2 = 3473,
- RADIOREGS(0x71, 0x02, 0x09, 0x0B, 0xAF, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x09, 0x02, 0x0B, 0xAF, 0x01, 0x04, 0x0A,
0x00, 0x8B, 0x88, 0x88, 0xBB, 0x00, 0x0A, 0x0B,
0x89, 0xBB, 0x00, 0x0A, 0x0B, 0x89),
- PHYREGS(0x2808, 0x2408, 0x2008, 0xF601, 0xF701, 0xF801),
+ PHYREGS(0x0828, 0x0824, 0x0820, 0x01F6, 0x01F7, 0x01F8),
},
{ .channel = 44,
.freq = 5220, /* MHz */
.unk2 = 3480,
- RADIOREGS(0x71, 0x02, 0x0A, 0x0A, 0xA7, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x0A, 0x02, 0x0A, 0xA7, 0x01, 0x04, 0x0A,
0x00, 0x8B, 0x77, 0x77, 0xBB, 0x00, 0x09, 0x0A,
0x88, 0xBB, 0x00, 0x09, 0x0A, 0x88),
- PHYREGS(0x2C08, 0x2808, 0x2408, 0xF501, 0xF601, 0xF701),
+ PHYREGS(0x082C, 0x0828, 0x0824, 0x01F5, 0x01F6, 0x01F7),
},
{ .channel = 46,
.freq = 5230, /* MHz */
.unk2 = 3487,
- RADIOREGS(0x71, 0x02, 0x0B, 0x0A, 0xA7, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x0B, 0x02, 0x0A, 0xA7, 0x01, 0x04, 0x0A,
0x00, 0x8B, 0x77, 0x77, 0xBB, 0x00, 0x09, 0x0A,
0x88, 0xBB, 0x00, 0x09, 0x0A, 0x88),
- PHYREGS(0x3008, 0x2C08, 0x2808, 0xF401, 0xF501, 0xF601),
+ PHYREGS(0x0830, 0x082C, 0x0828, 0x01F4, 0x01F5, 0x01F6),
},
{ .channel = 48,
.freq = 5240, /* MHz */
.unk2 = 3493,
- RADIOREGS(0x71, 0x02, 0x0C, 0x0A, 0xA0, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x0C, 0x02, 0x0A, 0xA0, 0x01, 0x04, 0x0A,
0x00, 0x8A, 0x77, 0x77, 0xAA, 0x00, 0x09, 0x0A,
0x87, 0xAA, 0x00, 0x09, 0x0A, 0x87),
- PHYREGS(0x3408, 0x3008, 0x2C08, 0xF301, 0xF401, 0xF501),
+ PHYREGS(0x0834, 0x0830, 0x082C, 0x01F3, 0x01F4, 0x01F5),
},
{ .channel = 50,
.freq = 5250, /* MHz */
.unk2 = 3500,
- RADIOREGS(0x71, 0x02, 0x0D, 0x0A, 0xA0, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x0D, 0x02, 0x0A, 0xA0, 0x01, 0x04, 0x0A,
0x00, 0x8A, 0x77, 0x77, 0xAA, 0x00, 0x09, 0x0A,
0x87, 0xAA, 0x00, 0x09, 0x0A, 0x87),
- PHYREGS(0x3808, 0x3408, 0x3008, 0xF201, 0xF301, 0xF401),
+ PHYREGS(0x0838, 0x0834, 0x0830, 0x01F2, 0x01F3, 0x01F4),
},
{ .channel = 52,
.freq = 5260, /* MHz */
.unk2 = 3507,
- RADIOREGS(0x71, 0x02, 0x0E, 0x0A, 0x98, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x0E, 0x02, 0x0A, 0x98, 0x01, 0x04, 0x0A,
0x00, 0x8A, 0x66, 0x66, 0xAA, 0x00, 0x08, 0x09,
0x87, 0xAA, 0x00, 0x08, 0x09, 0x87),
- PHYREGS(0x3C08, 0x3808, 0x3408, 0xF101, 0xF201, 0xF301),
+ PHYREGS(0x083C, 0x0838, 0x0834, 0x01F1, 0x01F2, 0x01F3),
},
{ .channel = 54,
.freq = 5270, /* MHz */
.unk2 = 3513,
- RADIOREGS(0x71, 0x02, 0x0F, 0x0A, 0x98, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x0F, 0x02, 0x0A, 0x98, 0x01, 0x04, 0x0A,
0x00, 0x8A, 0x66, 0x66, 0xAA, 0x00, 0x08, 0x09,
0x87, 0xAA, 0x00, 0x08, 0x09, 0x87),
- PHYREGS(0x4008, 0x3C08, 0x3808, 0xF001, 0xF101, 0xF201),
+ PHYREGS(0x0840, 0x083C, 0x0838, 0x01F0, 0x01F1, 0x01F2),
},
{ .channel = 56,
.freq = 5280, /* MHz */
.unk2 = 3520,
- RADIOREGS(0x71, 0x02, 0x10, 0x09, 0x91, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x10, 0x02, 0x09, 0x91, 0x01, 0x04, 0x0A,
0x00, 0x89, 0x66, 0x66, 0x99, 0x00, 0x08, 0x08,
0x86, 0x99, 0x00, 0x08, 0x08, 0x86),
- PHYREGS(0x4408, 0x4008, 0x3C08, 0xF001, 0xF001, 0xF101),
+ PHYREGS(0x0844, 0x0840, 0x083C, 0x01F0, 0x01F0, 0x01F1),
},
{ .channel = 58,
.freq = 5290, /* MHz */
.unk2 = 3527,
- RADIOREGS(0x71, 0x02, 0x11, 0x09, 0x91, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x11, 0x02, 0x09, 0x91, 0x01, 0x04, 0x0A,
0x00, 0x89, 0x66, 0x66, 0x99, 0x00, 0x08, 0x08,
0x86, 0x99, 0x00, 0x08, 0x08, 0x86),
- PHYREGS(0x4808, 0x4408, 0x4008, 0xEF01, 0xF001, 0xF001),
+ PHYREGS(0x0848, 0x0844, 0x0840, 0x01EF, 0x01F0, 0x01F0),
},
{ .channel = 60,
.freq = 5300, /* MHz */
.unk2 = 3533,
- RADIOREGS(0x71, 0x02, 0x12, 0x09, 0x8A, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x12, 0x02, 0x09, 0x8A, 0x01, 0x04, 0x0A,
0x00, 0x89, 0x55, 0x55, 0x99, 0x00, 0x08, 0x07,
0x85, 0x99, 0x00, 0x08, 0x07, 0x85),
- PHYREGS(0x4C08, 0x4808, 0x4408, 0xEE01, 0xEF01, 0xF001),
+ PHYREGS(0x084C, 0x0848, 0x0844, 0x01EE, 0x01EF, 0x01F0),
},
{ .channel = 62,
.freq = 5310, /* MHz */
.unk2 = 3540,
- RADIOREGS(0x71, 0x02, 0x13, 0x09, 0x8A, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x13, 0x02, 0x09, 0x8A, 0x01, 0x04, 0x0A,
0x00, 0x89, 0x55, 0x55, 0x99, 0x00, 0x08, 0x07,
0x85, 0x99, 0x00, 0x08, 0x07, 0x85),
- PHYREGS(0x5008, 0x4C08, 0x4808, 0xED01, 0xEE01, 0xEF01),
+ PHYREGS(0x0850, 0x084C, 0x0848, 0x01ED, 0x01EE, 0x01EF),
},
{ .channel = 64,
.freq = 5320, /* MHz */
.unk2 = 3547,
- RADIOREGS(0x71, 0x02, 0x14, 0x09, 0x83, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x14, 0x02, 0x09, 0x83, 0x01, 0x04, 0x0A,
0x00, 0x88, 0x55, 0x55, 0x88, 0x00, 0x07, 0x07,
0x84, 0x88, 0x00, 0x07, 0x07, 0x84),
- PHYREGS(0x5408, 0x5008, 0x4C08, 0xEC01, 0xED01, 0xEE01),
+ PHYREGS(0x0854, 0x0850, 0x084C, 0x01EC, 0x01ED, 0x01EE),
},
{ .channel = 66,
.freq = 5330, /* MHz */
.unk2 = 3553,
- RADIOREGS(0x71, 0x02, 0x15, 0x09, 0x83, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x15, 0x02, 0x09, 0x83, 0x01, 0x04, 0x0A,
0x00, 0x88, 0x55, 0x55, 0x88, 0x00, 0x07, 0x07,
0x84, 0x88, 0x00, 0x07, 0x07, 0x84),
- PHYREGS(0x5808, 0x5408, 0x5008, 0xEB01, 0xEC01, 0xED01),
+ PHYREGS(0x0858, 0x0854, 0x0850, 0x01EB, 0x01EC, 0x01ED),
},
{ .channel = 68,
.freq = 5340, /* MHz */
.unk2 = 3560,
- RADIOREGS(0x71, 0x02, 0x16, 0x08, 0x7C, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x16, 0x02, 0x08, 0x7C, 0x01, 0x04, 0x0A,
0x00, 0x88, 0x44, 0x44, 0x88, 0x00, 0x07, 0x06,
0x84, 0x88, 0x00, 0x07, 0x06, 0x84),
- PHYREGS(0x5C08, 0x5808, 0x5408, 0xEA01, 0xEB01, 0xEC01),
+ PHYREGS(0x085C, 0x0858, 0x0854, 0x01EA, 0x01EB, 0x01EC),
},
{ .channel = 70,
.freq = 5350, /* MHz */
.unk2 = 3567,
- RADIOREGS(0x71, 0x02, 0x17, 0x08, 0x7C, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x17, 0x02, 0x08, 0x7C, 0x01, 0x04, 0x0A,
0x00, 0x88, 0x44, 0x44, 0x88, 0x00, 0x07, 0x06,
0x84, 0x88, 0x00, 0x07, 0x06, 0x84),
- PHYREGS(0x6008, 0x5C08, 0x5808, 0xE901, 0xEA01, 0xEB01),
+ PHYREGS(0x0860, 0x085C, 0x0858, 0x01E9, 0x01EA, 0x01EB),
},
{ .channel = 72,
.freq = 5360, /* MHz */
.unk2 = 3573,
- RADIOREGS(0x71, 0x02, 0x18, 0x08, 0x75, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x18, 0x02, 0x08, 0x75, 0x01, 0x04, 0x0A,
0x00, 0x87, 0x44, 0x44, 0x77, 0x00, 0x06, 0x05,
0x83, 0x77, 0x00, 0x06, 0x05, 0x83),
- PHYREGS(0x6408, 0x6008, 0x5C08, 0xE801, 0xE901, 0xEA01),
+ PHYREGS(0x0864, 0x0860, 0x085C, 0x01E8, 0x01E9, 0x01EA),
},
{ .channel = 74,
.freq = 5370, /* MHz */
.unk2 = 3580,
- RADIOREGS(0x71, 0x02, 0x19, 0x08, 0x75, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x19, 0x02, 0x08, 0x75, 0x01, 0x04, 0x0A,
0x00, 0x87, 0x44, 0x44, 0x77, 0x00, 0x06, 0x05,
0x83, 0x77, 0x00, 0x06, 0x05, 0x83),
- PHYREGS(0x6808, 0x6408, 0x6008, 0xE701, 0xE801, 0xE901),
+ PHYREGS(0x0868, 0x0864, 0x0860, 0x01E7, 0x01E8, 0x01E9),
},
{ .channel = 76,
.freq = 5380, /* MHz */
.unk2 = 3587,
- RADIOREGS(0x71, 0x02, 0x1A, 0x08, 0x6E, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x1A, 0x02, 0x08, 0x6E, 0x01, 0x04, 0x0A,
0x00, 0x87, 0x33, 0x33, 0x77, 0x00, 0x06, 0x04,
0x82, 0x77, 0x00, 0x06, 0x04, 0x82),
- PHYREGS(0x6C08, 0x6808, 0x6408, 0xE601, 0xE701, 0xE801),
+ PHYREGS(0x086C, 0x0868, 0x0864, 0x01E6, 0x01E7, 0x01E8),
},
{ .channel = 78,
.freq = 5390, /* MHz */
.unk2 = 3593,
- RADIOREGS(0x71, 0x02, 0x1B, 0x08, 0x6E, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x1B, 0x02, 0x08, 0x6E, 0x01, 0x04, 0x0A,
0x00, 0x87, 0x33, 0x33, 0x77, 0x00, 0x06, 0x04,
0x82, 0x77, 0x00, 0x06, 0x04, 0x82),
- PHYREGS(0x7008, 0x6C08, 0x6808, 0xE501, 0xE601, 0xE701),
+ PHYREGS(0x0870, 0x086C, 0x0868, 0x01E5, 0x01E6, 0x01E7),
},
{ .channel = 80,
.freq = 5400, /* MHz */
.unk2 = 3600,
- RADIOREGS(0x71, 0x02, 0x1C, 0x07, 0x67, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x1C, 0x02, 0x07, 0x67, 0x01, 0x04, 0x0A,
0x00, 0x86, 0x33, 0x33, 0x66, 0x00, 0x05, 0x04,
0x81, 0x66, 0x00, 0x05, 0x04, 0x81),
- PHYREGS(0x7408, 0x7008, 0x6C08, 0xE501, 0xE501, 0xE601),
+ PHYREGS(0x0874, 0x0870, 0x086C, 0x01E5, 0x01E5, 0x01E6),
},
{ .channel = 82,
.freq = 5410, /* MHz */
.unk2 = 3607,
- RADIOREGS(0x71, 0x02, 0x1D, 0x07, 0x67, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x1D, 0x02, 0x07, 0x67, 0x01, 0x04, 0x0A,
0x00, 0x86, 0x33, 0x33, 0x66, 0x00, 0x05, 0x04,
0x81, 0x66, 0x00, 0x05, 0x04, 0x81),
- PHYREGS(0x7808, 0x7408, 0x7008, 0xE401, 0xE501, 0xE501),
+ PHYREGS(0x0878, 0x0874, 0x0870, 0x01E4, 0x01E5, 0x01E5),
},
{ .channel = 84,
.freq = 5420, /* MHz */
.unk2 = 3613,
- RADIOREGS(0x71, 0x02, 0x1E, 0x07, 0x61, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x1E, 0x02, 0x07, 0x61, 0x01, 0x04, 0x0A,
0x00, 0x86, 0x22, 0x22, 0x66, 0x00, 0x05, 0x03,
0x80, 0x66, 0x00, 0x05, 0x03, 0x80),
- PHYREGS(0x7C08, 0x7808, 0x7408, 0xE301, 0xE401, 0xE501),
+ PHYREGS(0x087C, 0x0878, 0x0874, 0x01E3, 0x01E4, 0x01E5),
},
{ .channel = 86,
.freq = 5430, /* MHz */
.unk2 = 3620,
- RADIOREGS(0x71, 0x02, 0x1F, 0x07, 0x61, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x1F, 0x02, 0x07, 0x61, 0x01, 0x04, 0x0A,
0x00, 0x86, 0x22, 0x22, 0x66, 0x00, 0x05, 0x03,
0x80, 0x66, 0x00, 0x05, 0x03, 0x80),
- PHYREGS(0x8008, 0x7C08, 0x7808, 0xE201, 0xE301, 0xE401),
+ PHYREGS(0x0880, 0x087C, 0x0878, 0x01E2, 0x01E3, 0x01E4),
},
{ .channel = 88,
.freq = 5440, /* MHz */
.unk2 = 3627,
- RADIOREGS(0x71, 0x02, 0x20, 0x07, 0x5A, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x20, 0x02, 0x07, 0x5A, 0x01, 0x04, 0x0A,
0x00, 0x85, 0x22, 0x22, 0x55, 0x00, 0x04, 0x02,
0x80, 0x55, 0x00, 0x04, 0x02, 0x80),
- PHYREGS(0x8408, 0x8008, 0x7C08, 0xE101, 0xE201, 0xE301),
+ PHYREGS(0x0884, 0x0880, 0x087C, 0x01E1, 0x01E2, 0x01E3),
},
{ .channel = 90,
.freq = 5450, /* MHz */
.unk2 = 3633,
- RADIOREGS(0x71, 0x02, 0x21, 0x07, 0x5A, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x21, 0x02, 0x07, 0x5A, 0x01, 0x04, 0x0A,
0x00, 0x85, 0x22, 0x22, 0x55, 0x00, 0x04, 0x02,
0x80, 0x55, 0x00, 0x04, 0x02, 0x80),
- PHYREGS(0x8808, 0x8408, 0x8008, 0xE001, 0xE101, 0xE201),
+ PHYREGS(0x0888, 0x0884, 0x0880, 0x01E0, 0x01E1, 0x01E2),
},
{ .channel = 92,
.freq = 5460, /* MHz */
.unk2 = 3640,
- RADIOREGS(0x71, 0x02, 0x22, 0x06, 0x53, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x22, 0x02, 0x06, 0x53, 0x01, 0x04, 0x0A,
0x00, 0x85, 0x11, 0x11, 0x55, 0x00, 0x04, 0x01,
0x80, 0x55, 0x00, 0x04, 0x01, 0x80),
- PHYREGS(0x8C08, 0x8808, 0x8408, 0xDF01, 0xE001, 0xE101),
+ PHYREGS(0x088C, 0x0888, 0x0884, 0x01DF, 0x01E0, 0x01E1),
},
{ .channel = 94,
.freq = 5470, /* MHz */
.unk2 = 3647,
- RADIOREGS(0x71, 0x02, 0x23, 0x06, 0x53, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x23, 0x02, 0x06, 0x53, 0x01, 0x04, 0x0A,
0x00, 0x85, 0x11, 0x11, 0x55, 0x00, 0x04, 0x01,
0x80, 0x55, 0x00, 0x04, 0x01, 0x80),
- PHYREGS(0x9008, 0x8C08, 0x8808, 0xDE01, 0xDF01, 0xE001),
+ PHYREGS(0x0890, 0x088C, 0x0888, 0x01DE, 0x01DF, 0x01E0),
},
{ .channel = 96,
.freq = 5480, /* MHz */
.unk2 = 3653,
- RADIOREGS(0x71, 0x02, 0x24, 0x06, 0x4D, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x24, 0x02, 0x06, 0x4D, 0x01, 0x04, 0x0A,
0x00, 0x84, 0x11, 0x11, 0x44, 0x00, 0x03, 0x00,
0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
- PHYREGS(0x9408, 0x9008, 0x8C08, 0xDD01, 0xDE01, 0xDF01),
+ PHYREGS(0x0894, 0x0890, 0x088C, 0x01DD, 0x01DE, 0x01DF),
},
{ .channel = 98,
.freq = 5490, /* MHz */
.unk2 = 3660,
- RADIOREGS(0x71, 0x02, 0x25, 0x06, 0x4D, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x25, 0x02, 0x06, 0x4D, 0x01, 0x04, 0x0A,
0x00, 0x84, 0x11, 0x11, 0x44, 0x00, 0x03, 0x00,
0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
- PHYREGS(0x9808, 0x9408, 0x9008, 0xDD01, 0xDD01, 0xDE01),
+ PHYREGS(0x0898, 0x0894, 0x0890, 0x01DD, 0x01DD, 0x01DE),
},
{ .channel = 100,
.freq = 5500, /* MHz */
.unk2 = 3667,
- RADIOREGS(0x71, 0x02, 0x26, 0x06, 0x47, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x26, 0x02, 0x06, 0x47, 0x01, 0x04, 0x0A,
0x00, 0x84, 0x00, 0x00, 0x44, 0x00, 0x03, 0x00,
0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
- PHYREGS(0x9C08, 0x9808, 0x9408, 0xDC01, 0xDD01, 0xDD01),
+ PHYREGS(0x089C, 0x0898, 0x0894, 0x01DC, 0x01DD, 0x01DD),
},
{ .channel = 102,
.freq = 5510, /* MHz */
.unk2 = 3673,
- RADIOREGS(0x71, 0x02, 0x27, 0x06, 0x47, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x27, 0x02, 0x06, 0x47, 0x01, 0x04, 0x0A,
0x00, 0x84, 0x00, 0x00, 0x44, 0x00, 0x03, 0x00,
0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
- PHYREGS(0xA008, 0x9C08, 0x9808, 0xDB01, 0xDC01, 0xDD01),
+ PHYREGS(0x08A0, 0x089C, 0x0898, 0x01DB, 0x01DC, 0x01DD),
},
{ .channel = 104,
.freq = 5520, /* MHz */
.unk2 = 3680,
- RADIOREGS(0x71, 0x02, 0x28, 0x05, 0x40, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x28, 0x02, 0x05, 0x40, 0x01, 0x04, 0x0A,
0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
- PHYREGS(0xA408, 0xA008, 0x9C08, 0xDA01, 0xDB01, 0xDC01),
+ PHYREGS(0x08A4, 0x08A0, 0x089C, 0x01DA, 0x01DB, 0x01DC),
},
{ .channel = 106,
.freq = 5530, /* MHz */
.unk2 = 3687,
- RADIOREGS(0x71, 0x02, 0x29, 0x05, 0x40, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x29, 0x02, 0x05, 0x40, 0x01, 0x04, 0x0A,
0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
- PHYREGS(0xA808, 0xA408, 0xA008, 0xD901, 0xDA01, 0xDB01),
+ PHYREGS(0x08A8, 0x08A4, 0x08A0, 0x01D9, 0x01DA, 0x01DB),
},
{ .channel = 108,
.freq = 5540, /* MHz */
.unk2 = 3693,
- RADIOREGS(0x71, 0x02, 0x2A, 0x05, 0x3A, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x2A, 0x02, 0x05, 0x3A, 0x01, 0x04, 0x0A,
0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
- PHYREGS(0xAC08, 0xA808, 0xA408, 0xD801, 0xD901, 0xDA01),
+ PHYREGS(0x08AC, 0x08A8, 0x08A4, 0x01D8, 0x01D9, 0x01DA),
},
{ .channel = 110,
.freq = 5550, /* MHz */
.unk2 = 3700,
- RADIOREGS(0x71, 0x02, 0x2B, 0x05, 0x3A, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x2B, 0x02, 0x05, 0x3A, 0x01, 0x04, 0x0A,
0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
- PHYREGS(0xB008, 0xAC08, 0xA808, 0xD701, 0xD801, 0xD901),
+ PHYREGS(0x08B0, 0x08AC, 0x08A8, 0x01D7, 0x01D8, 0x01D9),
},
{ .channel = 112,
.freq = 5560, /* MHz */
.unk2 = 3707,
- RADIOREGS(0x71, 0x02, 0x2C, 0x05, 0x34, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x2C, 0x02, 0x05, 0x34, 0x01, 0x04, 0x0A,
0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
- PHYREGS(0xB408, 0xB008, 0xAC08, 0xD701, 0xD701, 0xD801),
+ PHYREGS(0x08B4, 0x08B0, 0x08AC, 0x01D7, 0x01D7, 0x01D8),
},
{ .channel = 114,
.freq = 5570, /* MHz */
.unk2 = 3713,
- RADIOREGS(0x71, 0x02, 0x2D, 0x05, 0x34, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x2D, 0x02, 0x05, 0x34, 0x01, 0x04, 0x0A,
0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
- PHYREGS(0xB808, 0xB408, 0xB008, 0xD601, 0xD701, 0xD701),
+ PHYREGS(0x08B8, 0x08B4, 0x08B0, 0x01D6, 0x01D7, 0x01D7),
},
{ .channel = 116,
.freq = 5580, /* MHz */
.unk2 = 3720,
- RADIOREGS(0x71, 0x02, 0x2E, 0x04, 0x2E, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x2E, 0x02, 0x04, 0x2E, 0x01, 0x04, 0x0A,
0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
- PHYREGS(0xBC08, 0xB808, 0xB408, 0xD501, 0xD601, 0xD701),
+ PHYREGS(0x08BC, 0x08B8, 0x08B4, 0x01D5, 0x01D6, 0x01D7),
},
{ .channel = 118,
.freq = 5590, /* MHz */
.unk2 = 3727,
- RADIOREGS(0x71, 0x02, 0x2F, 0x04, 0x2E, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x2F, 0x02, 0x04, 0x2E, 0x01, 0x04, 0x0A,
0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
- PHYREGS(0xC008, 0xBC08, 0xB808, 0xD401, 0xD501, 0xD601),
+ PHYREGS(0x08C0, 0x08BC, 0x08B8, 0x01D4, 0x01D5, 0x01D6),
},
{ .channel = 120,
.freq = 5600, /* MHz */
.unk2 = 3733,
- RADIOREGS(0x71, 0x02, 0x30, 0x04, 0x28, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x30, 0x02, 0x04, 0x28, 0x01, 0x04, 0x0A,
0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x01, 0x00,
0x80, 0x11, 0x00, 0x01, 0x00, 0x80),
- PHYREGS(0xC408, 0xC008, 0xBC08, 0xD301, 0xD401, 0xD501),
+ PHYREGS(0x08C4, 0x08C0, 0x08BC, 0x01D3, 0x01D4, 0x01D5),
},
{ .channel = 122,
.freq = 5610, /* MHz */
.unk2 = 3740,
- RADIOREGS(0x71, 0x02, 0x31, 0x04, 0x28, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x31, 0x02, 0x04, 0x28, 0x01, 0x04, 0x0A,
0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x01, 0x00,
0x80, 0x11, 0x00, 0x01, 0x00, 0x80),
- PHYREGS(0xC808, 0xC408, 0xC008, 0xD201, 0xD301, 0xD401),
+ PHYREGS(0x08C8, 0x08C4, 0x08C0, 0x01D2, 0x01D3, 0x01D4),
},
{ .channel = 124,
.freq = 5620, /* MHz */
.unk2 = 3747,
- RADIOREGS(0x71, 0x02, 0x32, 0x04, 0x21, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x32, 0x02, 0x04, 0x21, 0x01, 0x04, 0x0A,
0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
0x80, 0x11, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0xCC08, 0xC808, 0xC408, 0xD201, 0xD201, 0xD301),
+ PHYREGS(0x08CC, 0x08C8, 0x08C4, 0x01D2, 0x01D2, 0x01D3),
},
{ .channel = 126,
.freq = 5630, /* MHz */
.unk2 = 3753,
- RADIOREGS(0x71, 0x02, 0x33, 0x04, 0x21, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x33, 0x02, 0x04, 0x21, 0x01, 0x04, 0x0A,
0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
0x80, 0x11, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0xD008, 0xCC08, 0xC808, 0xD101, 0xD201, 0xD201),
+ PHYREGS(0x08D0, 0x08CC, 0x08C8, 0x01D1, 0x01D2, 0x01D2),
},
{ .channel = 128,
.freq = 5640, /* MHz */
.unk2 = 3760,
- RADIOREGS(0x71, 0x02, 0x34, 0x03, 0x1C, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x34, 0x02, 0x03, 0x1C, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0xD408, 0xD008, 0xCC08, 0xD001, 0xD101, 0xD201),
+ PHYREGS(0x08D4, 0x08D0, 0x08CC, 0x01D0, 0x01D1, 0x01D2),
},
{ .channel = 130,
.freq = 5650, /* MHz */
.unk2 = 3767,
- RADIOREGS(0x71, 0x02, 0x35, 0x03, 0x1C, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x35, 0x02, 0x03, 0x1C, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0xD808, 0xD408, 0xD008, 0xCF01, 0xD001, 0xD101),
+ PHYREGS(0x08D8, 0x08D4, 0x08D0, 0x01CF, 0x01D0, 0x01D1),
},
{ .channel = 132,
.freq = 5660, /* MHz */
.unk2 = 3773,
- RADIOREGS(0x71, 0x02, 0x36, 0x03, 0x16, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x36, 0x02, 0x03, 0x16, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0xDC08, 0xD808, 0xD408, 0xCE01, 0xCF01, 0xD001),
+ PHYREGS(0x08DC, 0x08D8, 0x08D4, 0x01CE, 0x01CF, 0x01D0),
},
{ .channel = 134,
.freq = 5670, /* MHz */
.unk2 = 3780,
- RADIOREGS(0x71, 0x02, 0x37, 0x03, 0x16, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x37, 0x02, 0x03, 0x16, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0xE008, 0xDC08, 0xD808, 0xCE01, 0xCE01, 0xCF01),
+ PHYREGS(0x08E0, 0x08DC, 0x08D8, 0x01CE, 0x01CE, 0x01CF),
},
{ .channel = 136,
.freq = 5680, /* MHz */
.unk2 = 3787,
- RADIOREGS(0x71, 0x02, 0x38, 0x03, 0x10, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x38, 0x02, 0x03, 0x10, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0xE408, 0xE008, 0xDC08, 0xCD01, 0xCE01, 0xCE01),
+ PHYREGS(0x08E4, 0x08E0, 0x08DC, 0x01CD, 0x01CE, 0x01CE),
},
{ .channel = 138,
.freq = 5690, /* MHz */
.unk2 = 3793,
- RADIOREGS(0x71, 0x02, 0x39, 0x03, 0x10, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x39, 0x02, 0x03, 0x10, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0xE808, 0xE408, 0xE008, 0xCC01, 0xCD01, 0xCE01),
+ PHYREGS(0x08E8, 0x08E4, 0x08E0, 0x01CC, 0x01CD, 0x01CE),
},
{ .channel = 140,
.freq = 5700, /* MHz */
.unk2 = 3800,
- RADIOREGS(0x71, 0x02, 0x3A, 0x02, 0x0A, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x3A, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0xEC08, 0xE808, 0xE408, 0xCB01, 0xCC01, 0xCD01),
+ PHYREGS(0x08EC, 0x08E8, 0x08E4, 0x01CB, 0x01CC, 0x01CD),
},
{ .channel = 142,
.freq = 5710, /* MHz */
.unk2 = 3807,
- RADIOREGS(0x71, 0x02, 0x3B, 0x02, 0x0A, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x3B, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0xF008, 0xEC08, 0xE808, 0xCA01, 0xCB01, 0xCC01),
+ PHYREGS(0x08F0, 0x08EC, 0x08E8, 0x01CA, 0x01CB, 0x01CC),
},
{ .channel = 144,
.freq = 5720, /* MHz */
.unk2 = 3813,
- RADIOREGS(0x71, 0x02, 0x3C, 0x02, 0x0A, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x3C, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0xF408, 0xF008, 0xEC08, 0xC901, 0xCA01, 0xCB01),
+ PHYREGS(0x08F4, 0x08F0, 0x08EC, 0x01C9, 0x01CA, 0x01CB),
},
{ .channel = 145,
.freq = 5725, /* MHz */
.unk2 = 3817,
- RADIOREGS(0x72, 0x04, 0x79, 0x02, 0x03, 0x01, 0x03, 0x14,
+ RADIOREGS(0x72, 0x79, 0x04, 0x02, 0x03, 0x01, 0x03, 0x14,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0xF608, 0xF208, 0xEE08, 0xC901, 0xCA01, 0xCB01),
+ PHYREGS(0x08F6, 0x08F2, 0x08EE, 0x01C9, 0x01CA, 0x01CB),
},
{ .channel = 146,
.freq = 5730, /* MHz */
.unk2 = 3820,
- RADIOREGS(0x71, 0x02, 0x3D, 0x02, 0x0A, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x3D, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0xF808, 0xF408, 0xF008, 0xC901, 0xC901, 0xCA01),
+ PHYREGS(0x08F8, 0x08F4, 0x08F0, 0x01C9, 0x01C9, 0x01CA),
},
{ .channel = 147,
.freq = 5735, /* MHz */
.unk2 = 3823,
- RADIOREGS(0x72, 0x04, 0x7B, 0x02, 0x03, 0x01, 0x03, 0x14,
+ RADIOREGS(0x72, 0x7B, 0x04, 0x02, 0x03, 0x01, 0x03, 0x14,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0xFA08, 0xF608, 0xF208, 0xC801, 0xC901, 0xCA01),
+ PHYREGS(0x08FA, 0x08F6, 0x08F2, 0x01C8, 0x01C9, 0x01CA),
},
{ .channel = 148,
.freq = 5740, /* MHz */
.unk2 = 3827,
- RADIOREGS(0x71, 0x02, 0x3E, 0x02, 0x0A, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x3E, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0xFC08, 0xF808, 0xF408, 0xC801, 0xC901, 0xC901),
+ PHYREGS(0x08FC, 0x08F8, 0x08F4, 0x01C8, 0x01C9, 0x01C9),
},
{ .channel = 149,
.freq = 5745, /* MHz */
.unk2 = 3830,
- RADIOREGS(0x72, 0x04, 0x7D, 0x02, 0xFE, 0x00, 0x03, 0x14,
+ RADIOREGS(0x72, 0x7D, 0x04, 0x02, 0xFE, 0x00, 0x03, 0x14,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0xFE08, 0xFA08, 0xF608, 0xC801, 0xC801, 0xC901),
+ PHYREGS(0x08FE, 0x08FA, 0x08F6, 0x01C8, 0x01C8, 0x01C9),
},
{ .channel = 150,
.freq = 5750, /* MHz */
.unk2 = 3833,
- RADIOREGS(0x71, 0x02, 0x3F, 0x02, 0x0A, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x3F, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x0009, 0xFC08, 0xF808, 0xC701, 0xC801, 0xC901),
+ PHYREGS(0x0900, 0x08FC, 0x08F8, 0x01C7, 0x01C8, 0x01C9),
},
{ .channel = 151,
.freq = 5755, /* MHz */
.unk2 = 3837,
- RADIOREGS(0x72, 0x04, 0x7F, 0x02, 0xFE, 0x00, 0x03, 0x14,
+ RADIOREGS(0x72, 0x7F, 0x04, 0x02, 0xFE, 0x00, 0x03, 0x14,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x0209, 0xFE08, 0xFA08, 0xC701, 0xC801, 0xC801),
+ PHYREGS(0x0902, 0x08FE, 0x08FA, 0x01C7, 0x01C8, 0x01C8),
},
{ .channel = 152,
.freq = 5760, /* MHz */
.unk2 = 3840,
- RADIOREGS(0x71, 0x02, 0x40, 0x02, 0x0A, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x40, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x0409, 0x0009, 0xFC08, 0xC601, 0xC701, 0xC801),
+ PHYREGS(0x0904, 0x0900, 0x08FC, 0x01C6, 0x01C7, 0x01C8),
},
{ .channel = 153,
.freq = 5765, /* MHz */
.unk2 = 3843,
- RADIOREGS(0x72, 0x04, 0x81, 0x02, 0xF8, 0x00, 0x03, 0x14,
+ RADIOREGS(0x72, 0x81, 0x04, 0x02, 0xF8, 0x00, 0x03, 0x14,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x0609, 0x0209, 0xFE08, 0xC601, 0xC701, 0xC801),
+ PHYREGS(0x0906, 0x0902, 0x08FE, 0x01C6, 0x01C7, 0x01C8),
},
{ .channel = 154,
.freq = 5770, /* MHz */
.unk2 = 3847,
- RADIOREGS(0x71, 0x02, 0x41, 0x02, 0x0A, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x41, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x0809, 0x0409, 0x0009, 0xC601, 0xC601, 0xC701),
+ PHYREGS(0x0908, 0x0904, 0x0900, 0x01C6, 0x01C6, 0x01C7),
},
{ .channel = 155,
.freq = 5775, /* MHz */
.unk2 = 3850,
- RADIOREGS(0x72, 0x04, 0x83, 0x02, 0xF8, 0x00, 0x03, 0x14,
+ RADIOREGS(0x72, 0x83, 0x04, 0x02, 0xF8, 0x00, 0x03, 0x14,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x0A09, 0x0609, 0x0209, 0xC501, 0xC601, 0xC701),
+ PHYREGS(0x090A, 0x0906, 0x0902, 0x01C5, 0x01C6, 0x01C7),
},
{ .channel = 156,
.freq = 5780, /* MHz */
.unk2 = 3853,
- RADIOREGS(0x71, 0x02, 0x42, 0x02, 0x0A, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x42, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x0C09, 0x0809, 0x0409, 0xC501, 0xC601, 0xC601),
+ PHYREGS(0x090C, 0x0908, 0x0904, 0x01C5, 0x01C6, 0x01C6),
},
{ .channel = 157,
.freq = 5785, /* MHz */
.unk2 = 3857,
- RADIOREGS(0x72, 0x04, 0x85, 0x02, 0xF2, 0x00, 0x03, 0x14,
+ RADIOREGS(0x72, 0x85, 0x04, 0x02, 0xF2, 0x00, 0x03, 0x14,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x0E09, 0x0A09, 0x0609, 0xC401, 0xC501, 0xC601),
+ PHYREGS(0x090E, 0x090A, 0x0906, 0x01C4, 0x01C5, 0x01C6),
},
{ .channel = 158,
.freq = 5790, /* MHz */
.unk2 = 3860,
- RADIOREGS(0x71, 0x02, 0x43, 0x02, 0x0A, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x43, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x1009, 0x0C09, 0x0809, 0xC401, 0xC501, 0xC601),
+ PHYREGS(0x0910, 0x090C, 0x0908, 0x01C4, 0x01C5, 0x01C6),
},
{ .channel = 159,
.freq = 5795, /* MHz */
.unk2 = 3863,
- RADIOREGS(0x72, 0x04, 0x87, 0x02, 0xF2, 0x00, 0x03, 0x14,
+ RADIOREGS(0x72, 0x87, 0x04, 0x02, 0xF2, 0x00, 0x03, 0x14,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x1209, 0x0E09, 0x0A09, 0xC401, 0xC401, 0xC501),
+ PHYREGS(0x0912, 0x090E, 0x090A, 0x01C4, 0x01C4, 0x01C5),
},
{ .channel = 160,
.freq = 5800, /* MHz */
.unk2 = 3867,
- RADIOREGS(0x71, 0x02, 0x44, 0x01, 0x0A, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x44, 0x02, 0x01, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x1409, 0x1009, 0x0C09, 0xC301, 0xC401, 0xC501),
+ PHYREGS(0x0914, 0x0910, 0x090C, 0x01C3, 0x01C4, 0x01C5),
},
{ .channel = 161,
.freq = 5805, /* MHz */
.unk2 = 3870,
- RADIOREGS(0x72, 0x04, 0x89, 0x01, 0xED, 0x00, 0x03, 0x14,
+ RADIOREGS(0x72, 0x89, 0x04, 0x01, 0xED, 0x00, 0x03, 0x14,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x1609, 0x1209, 0x0E09, 0xC301, 0xC401, 0xC401),
+ PHYREGS(0x0916, 0x0912, 0x090E, 0x01C3, 0x01C4, 0x01C4),
},
{ .channel = 162,
.freq = 5810, /* MHz */
.unk2 = 3873,
- RADIOREGS(0x71, 0x02, 0x45, 0x01, 0x0A, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x45, 0x02, 0x01, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x1809, 0x1409, 0x1009, 0xC201, 0xC301, 0xC401),
+ PHYREGS(0x0918, 0x0914, 0x0910, 0x01C2, 0x01C3, 0x01C4),
},
{ .channel = 163,
.freq = 5815, /* MHz */
.unk2 = 3877,
- RADIOREGS(0x72, 0x04, 0x8B, 0x01, 0xED, 0x00, 0x03, 0x14,
+ RADIOREGS(0x72, 0x8B, 0x04, 0x01, 0xED, 0x00, 0x03, 0x14,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x1A09, 0x1609, 0x1209, 0xC201, 0xC301, 0xC401),
+ PHYREGS(0x091A, 0x0916, 0x0912, 0x01C2, 0x01C3, 0x01C4),
},
{ .channel = 164,
.freq = 5820, /* MHz */
.unk2 = 3880,
- RADIOREGS(0x71, 0x02, 0x46, 0x01, 0x0A, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x46, 0x02, 0x01, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x1C09, 0x1809, 0x1409, 0xC201, 0xC201, 0xC301),
+ PHYREGS(0x091C, 0x0918, 0x0914, 0x01C2, 0x01C2, 0x01C3),
},
{ .channel = 165,
.freq = 5825, /* MHz */
.unk2 = 3883,
- RADIOREGS(0x72, 0x04, 0x8D, 0x01, 0xED, 0x00, 0x03, 0x14,
+ RADIOREGS(0x72, 0x8D, 0x04, 0x01, 0xED, 0x00, 0x03, 0x14,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x1E09, 0x1A09, 0x1609, 0xC101, 0xC201, 0xC301),
+ PHYREGS(0x091E, 0x091A, 0x0916, 0x01C1, 0x01C2, 0x01C3),
},
{ .channel = 166,
.freq = 5830, /* MHz */
.unk2 = 3887,
- RADIOREGS(0x71, 0x02, 0x47, 0x01, 0x0A, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x47, 0x02, 0x01, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x2009, 0x1C09, 0x1809, 0xC101, 0xC201, 0xC201),
+ PHYREGS(0x0920, 0x091C, 0x0918, 0x01C1, 0x01C2, 0x01C2),
},
{ .channel = 168,
.freq = 5840, /* MHz */
.unk2 = 3893,
- RADIOREGS(0x71, 0x02, 0x48, 0x01, 0x0A, 0x01, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x48, 0x02, 0x01, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x2409, 0x2009, 0x1C09, 0xC001, 0xC101, 0xC201),
+ PHYREGS(0x0924, 0x0920, 0x091C, 0x01C0, 0x01C1, 0x01C2),
},
{ .channel = 170,
.freq = 5850, /* MHz */
.unk2 = 3900,
- RADIOREGS(0x71, 0x02, 0x49, 0x01, 0xE0, 0x00, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x49, 0x02, 0x01, 0xE0, 0x00, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x2809, 0x2409, 0x2009, 0xBF01, 0xC001, 0xC101),
+ PHYREGS(0x0928, 0x0924, 0x0920, 0x01BF, 0x01C0, 0x01C1),
},
{ .channel = 172,
.freq = 5860, /* MHz */
.unk2 = 3907,
- RADIOREGS(0x71, 0x02, 0x4A, 0x01, 0xDE, 0x00, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x4A, 0x02, 0x01, 0xDE, 0x00, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x2C09, 0x2809, 0x2409, 0xBF01, 0xBF01, 0xC001),
+ PHYREGS(0x092C, 0x0928, 0x0924, 0x01BF, 0x01BF, 0x01C0),
},
{ .channel = 174,
.freq = 5870, /* MHz */
.unk2 = 3913,
- RADIOREGS(0x71, 0x02, 0x4B, 0x00, 0xDB, 0x00, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x4B, 0x02, 0x00, 0xDB, 0x00, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x3009, 0x2C09, 0x2809, 0xBE01, 0xBF01, 0xBF01),
+ PHYREGS(0x0930, 0x092C, 0x0928, 0x01BE, 0x01BF, 0x01BF),
},
{ .channel = 176,
.freq = 5880, /* MHz */
.unk2 = 3920,
- RADIOREGS(0x71, 0x02, 0x4C, 0x00, 0xD8, 0x00, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x4C, 0x02, 0x00, 0xD8, 0x00, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x3409, 0x3009, 0x2C09, 0xBD01, 0xBE01, 0xBF01),
+ PHYREGS(0x0934, 0x0930, 0x092C, 0x01BD, 0x01BE, 0x01BF),
},
{ .channel = 178,
.freq = 5890, /* MHz */
.unk2 = 3927,
- RADIOREGS(0x71, 0x02, 0x4D, 0x00, 0xD6, 0x00, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x4D, 0x02, 0x00, 0xD6, 0x00, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x3809, 0x3409, 0x3009, 0xBC01, 0xBD01, 0xBE01),
+ PHYREGS(0x0938, 0x0934, 0x0930, 0x01BC, 0x01BD, 0x01BE),
},
{ .channel = 180,
.freq = 5900, /* MHz */
.unk2 = 3933,
- RADIOREGS(0x71, 0x02, 0x4E, 0x00, 0xD3, 0x00, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x4E, 0x02, 0x00, 0xD3, 0x00, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x3C09, 0x3809, 0x3409, 0xBC01, 0xBC01, 0xBD01),
+ PHYREGS(0x093C, 0x0938, 0x0934, 0x01BC, 0x01BC, 0x01BD),
},
{ .channel = 182,
.freq = 5910, /* MHz */
.unk2 = 3940,
- RADIOREGS(0x71, 0x02, 0x4F, 0x00, 0xD6, 0x00, 0x04, 0x0A,
+ RADIOREGS(0x71, 0x4F, 0x02, 0x00, 0xD6, 0x00, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
- PHYREGS(0x4009, 0x3C09, 0x3809, 0xBB01, 0xBC01, 0xBC01),
+ PHYREGS(0x0940, 0x093C, 0x0938, 0x01BB, 0x01BC, 0x01BC),
},
{ .channel = 1,
.freq = 2412, /* MHz */
.unk2 = 3216,
- RADIOREGS(0x73, 0x09, 0x6C, 0x0F, 0x00, 0x01, 0x07, 0x15,
+ RADIOREGS(0x73, 0x6C, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0D, 0x0C,
0x80, 0xFF, 0x88, 0x0D, 0x0C, 0x80),
- PHYREGS(0xC903, 0xC503, 0xC103, 0x3A04, 0x3F04, 0x4304),
+ PHYREGS(0x03C9, 0x03C5, 0x03C1, 0x043A, 0x043F, 0x0443),
},
{ .channel = 2,
.freq = 2417, /* MHz */
.unk2 = 3223,
- RADIOREGS(0x73, 0x09, 0x71, 0x0F, 0x00, 0x01, 0x07, 0x15,
+ RADIOREGS(0x73, 0x71, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0B,
0x80, 0xFF, 0x88, 0x0C, 0x0B, 0x80),
- PHYREGS(0xCB03, 0xC703, 0xC303, 0x3804, 0x3D04, 0x4104),
+ PHYREGS(0x03CB, 0x03C7, 0x03C3, 0x0438, 0x043D, 0x0441),
},
{ .channel = 3,
.freq = 2422, /* MHz */
.unk2 = 3229,
- RADIOREGS(0x73, 0x09, 0x76, 0x0F, 0x00, 0x01, 0x07, 0x15,
+ RADIOREGS(0x73, 0x76, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0A,
0x80, 0xFF, 0x88, 0x0C, 0x0A, 0x80),
- PHYREGS(0xCD03, 0xC903, 0xC503, 0x3604, 0x3A04, 0x3F04),
+ PHYREGS(0x03CD, 0x03C9, 0x03C5, 0x0436, 0x043A, 0x043F),
},
{ .channel = 4,
.freq = 2427, /* MHz */
.unk2 = 3236,
- RADIOREGS(0x73, 0x09, 0x7B, 0x0F, 0x00, 0x01, 0x07, 0x15,
+ RADIOREGS(0x73, 0x7B, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0A,
0x80, 0xFF, 0x88, 0x0C, 0x0A, 0x80),
- PHYREGS(0xCF03, 0xCB03, 0xC703, 0x3404, 0x3804, 0x3D04),
+ PHYREGS(0x03CF, 0x03CB, 0x03C7, 0x0434, 0x0438, 0x043D),
},
{ .channel = 5,
.freq = 2432, /* MHz */
.unk2 = 3243,
- RADIOREGS(0x73, 0x09, 0x80, 0x0F, 0x00, 0x01, 0x07, 0x15,
+ RADIOREGS(0x73, 0x80, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x09,
0x80, 0xFF, 0x88, 0x0C, 0x09, 0x80),
- PHYREGS(0xD103, 0xCD03, 0xC903, 0x3104, 0x3604, 0x3A04),
+ PHYREGS(0x03D1, 0x03CD, 0x03C9, 0x0431, 0x0436, 0x043A),
},
{ .channel = 6,
.freq = 2437, /* MHz */
.unk2 = 3249,
- RADIOREGS(0x73, 0x09, 0x85, 0x0F, 0x00, 0x01, 0x07, 0x15,
+ RADIOREGS(0x73, 0x85, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0B, 0x08,
0x80, 0xFF, 0x88, 0x0B, 0x08, 0x80),
- PHYREGS(0xD303, 0xCF03, 0xCB03, 0x2F04, 0x3404, 0x3804),
+ PHYREGS(0x03D3, 0x03CF, 0x03CB, 0x042F, 0x0434, 0x0438),
},
{ .channel = 7,
.freq = 2442, /* MHz */
.unk2 = 3256,
- RADIOREGS(0x73, 0x09, 0x8A, 0x0F, 0x00, 0x01, 0x07, 0x15,
+ RADIOREGS(0x73, 0x8A, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0A, 0x07,
0x80, 0xFF, 0x88, 0x0A, 0x07, 0x80),
- PHYREGS(0xD503, 0xD103, 0xCD03, 0x2D04, 0x3104, 0x3604),
+ PHYREGS(0x03D5, 0x03D1, 0x03CD, 0x042D, 0x0431, 0x0436),
},
{ .channel = 8,
.freq = 2447, /* MHz */
.unk2 = 3263,
- RADIOREGS(0x73, 0x09, 0x8F, 0x0F, 0x00, 0x01, 0x07, 0x15,
+ RADIOREGS(0x73, 0x8F, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0A, 0x06,
0x80, 0xFF, 0x88, 0x0A, 0x06, 0x80),
- PHYREGS(0xD703, 0xD303, 0xCF03, 0x2B04, 0x2F04, 0x3404),
+ PHYREGS(0x03D7, 0x03D3, 0x03CF, 0x042B, 0x042F, 0x0434),
},
{ .channel = 9,
.freq = 2452, /* MHz */
.unk2 = 3269,
- RADIOREGS(0x73, 0x09, 0x94, 0x0F, 0x00, 0x01, 0x07, 0x15,
+ RADIOREGS(0x73, 0x94, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x09, 0x06,
0x80, 0xFF, 0x88, 0x09, 0x06, 0x80),
- PHYREGS(0xD903, 0xD503, 0xD103, 0x2904, 0x2D04, 0x3104),
+ PHYREGS(0x03D9, 0x03D5, 0x03D1, 0x0429, 0x042D, 0x0431),
},
{ .channel = 10,
.freq = 2457, /* MHz */
.unk2 = 3276,
- RADIOREGS(0x73, 0x09, 0x99, 0x0F, 0x00, 0x01, 0x07, 0x15,
+ RADIOREGS(0x73, 0x99, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x05,
0x80, 0xFF, 0x88, 0x08, 0x05, 0x80),
- PHYREGS(0xDB03, 0xD703, 0xD303, 0x2704, 0x2B04, 0x2F04),
+ PHYREGS(0x03DB, 0x03D7, 0x03D3, 0x0427, 0x042B, 0x042F),
},
{ .channel = 11,
.freq = 2462, /* MHz */
.unk2 = 3283,
- RADIOREGS(0x73, 0x09, 0x9E, 0x0F, 0x00, 0x01, 0x07, 0x15,
+ RADIOREGS(0x73, 0x9E, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x04,
0x80, 0xFF, 0x88, 0x08, 0x04, 0x80),
- PHYREGS(0xDD03, 0xD903, 0xD503, 0x2404, 0x2904, 0x2D04),
+ PHYREGS(0x03DD, 0x03D9, 0x03D5, 0x0424, 0x0429, 0x042D),
},
{ .channel = 12,
.freq = 2467, /* MHz */
.unk2 = 3289,
- RADIOREGS(0x73, 0x09, 0xA3, 0x0F, 0x00, 0x01, 0x07, 0x15,
+ RADIOREGS(0x73, 0xA3, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x03,
0x80, 0xFF, 0x88, 0x08, 0x03, 0x80),
- PHYREGS(0xDF03, 0xDB03, 0xD703, 0x2204, 0x2704, 0x2B04),
+ PHYREGS(0x03DF, 0x03DB, 0x03D7, 0x0422, 0x0427, 0x042B),
},
{ .channel = 13,
.freq = 2472, /* MHz */
.unk2 = 3296,
- RADIOREGS(0x73, 0x09, 0xA8, 0x0F, 0x00, 0x01, 0x07, 0x15,
+ RADIOREGS(0x73, 0xA8, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x07, 0x03,
0x80, 0xFF, 0x88, 0x07, 0x03, 0x80),
- PHYREGS(0xE103, 0xDD03, 0xD903, 0x2004, 0x2404, 0x2904),
+ PHYREGS(0x03E1, 0x03DD, 0x03D9, 0x0420, 0x0424, 0x0429),
},
{ .channel = 14,
.freq = 2484, /* MHz */
.unk2 = 3312,
- RADIOREGS(0x73, 0x09, 0xB4, 0x0F, 0xFF, 0x01, 0x07, 0x15,
+ RADIOREGS(0x73, 0xB4, 0x09, 0x0F, 0xFF, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x07, 0x01,
0x80, 0xFF, 0x88, 0x07, 0x01, 0x80),
- PHYREGS(0xE603, 0xE203, 0xDE03, 0x1B04, 0x1F04, 0x2404),
+ PHYREGS(0x03E6, 0x03E2, 0x03DE, 0x041B, 0x041F, 0x0424),
},
};
@@ -1299,7 +1299,7 @@ void b2055_upload_inittab(struct b43_wldev *dev,
bool ghz5, bool ignore_uploadflag)
{
const struct b2055_inittab_entry *e;
- unsigned int i;
+ unsigned int i, writes = 0;
u16 value;
for (i = 0; i < ARRAY_SIZE(b2055_inittab); i++) {
@@ -1312,6 +1312,8 @@ void b2055_upload_inittab(struct b43_wldev *dev,
else
value = e->ghz2;
b43_radio_write16(dev, i, value);
+ if (++writes % 4 == 0)
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush */
}
}
}
diff --git a/drivers/net/wireless/b43/radio_2056.c b/drivers/net/wireless/b43/radio_2056.c
index d8563192ce56..8890df067029 100644
--- a/drivers/net/wireless/b43/radio_2056.c
+++ b/drivers/net/wireless/b43/radio_2056.c
@@ -24,17 +24,9073 @@
#include "radio_2056.h"
#include "phy_common.h"
+struct b2056_inittab_entry {
+ /* Value to write if we use the 5GHz band. */
+ u16 ghz5;
+ /* Value to write if we use the 2.4GHz band. */
+ u16 ghz2;
+ /* Flags */
+ u8 flags;
+};
+#define B2056_INITTAB_ENTRY_OK 0x01
+#define B2056_INITTAB_UPLOAD 0x02
+#define UPLOAD .flags = B2056_INITTAB_ENTRY_OK | B2056_INITTAB_UPLOAD
+#define NOUPLOAD .flags = B2056_INITTAB_ENTRY_OK
+
+struct b2056_inittabs_pts {
+ const struct b2056_inittab_entry *syn;
+ unsigned int syn_length;
+ const struct b2056_inittab_entry *tx;
+ unsigned int tx_length;
+ const struct b2056_inittab_entry *rx;
+ unsigned int rx_length;
+};
+
+static const struct b2056_inittab_entry b2056_inittab_rev3_syn[] = {
+ [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_PU] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_GPIO_MASTER1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_GPIO_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_TOPBIAS_MASTER] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
+ [B2056_SYN_TOPBIAS_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_SYN_AFEREG] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_SYN_TEMPPROCSENSE] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_TEMPPROCSENSEIDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_TEMPPROCSENSERCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LPO] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_VDDCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_VDDCAL_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_VDDCAL_STATUS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCAL_CODE_OUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL0] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL1] = { .ghz5 = 0x001f, .ghz2 = 0x001f, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL2] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL3] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_ZCAL_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_ZCAL_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_MAST1] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_SYN_PLL_MAST2] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
+ [B2056_SYN_PLL_MAST3] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
+ [B2056_SYN_PLL_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL1] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL3] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL5] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL6] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_SYN_PLL_REFDIV] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_SYN_PLL_CP1] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
+ [B2056_SYN_PLL_CP2] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
+ [B2056_SYN_PLL_CP3] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER3] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER5] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_PLL_MMD1] = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, },
+ [B2056_SYN_PLL_MMD2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_SYN_PLL_VCO1] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_SYN_PLL_VCO2] = { .ghz5 = 0x00f7, .ghz2 = 0x00f7, UPLOAD, },
+ [B2056_SYN_PLL_MONITOR1] = { .ghz5 = 0x00b4, .ghz2 = 0x00b4, NOUPLOAD, },
+ [B2056_SYN_PLL_MONITOR2] = { .ghz5 = 0x00d2, .ghz2 = 0x00d2, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL4] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL5] = { .ghz5 = 0x0096, .ghz2 = 0x0096, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL6] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL7] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL8] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL9] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL11] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL12] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
+ [B2056_SYN_PLL_VCOCAL13] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_SYN_PLL_VREG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_SYN_PLL_STATUS1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_STATUS2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_STATUS3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU2] = { .ghz5 = 0x0040, .ghz2 = 0x0040, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU8] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RCCR1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_VCOBUF1] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER2] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGENBUF2] = { .ghz5 = 0x008f, .ghz2 = 0x008f, UPLOAD, },
+ [B2056_SYN_LOGEN_BUF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF4] = { .ghz5 = 0x00cc, .ghz2 = 0x00cc, NOUPLOAD, },
+ [B2056_SYN_LOGEN_DIV1] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_LOGEN_DIV2] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_DIV3] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLOUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLCAL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_CALEN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PEAKDET1] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CORE_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_TX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_TX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_VCOBUF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_VCOBUF2_OVRVAL]= { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER3_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF5_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF6_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL_WAITCNT] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CORE_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RX_CMOS_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+};
+
+static const struct b2056_inittab_entry b2056_inittab_rev3_tx[] = {
+ [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_IQCAL_GAIN_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_LOFT_FINE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_LOFT_FINE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_LOFT_COARSE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_LOFT_COARSE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_TX_COM_MASTER1] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_TX_TX_COM_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RXIQCAL_TXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_TX_SSI_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_IQCAL_VCM_HG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_IQCAL_IDAC] = { .ghz5 = 0x0037, .ghz2 = 0x0037, NOUPLOAD, },
+ [B2056_TX_TSSI_VCM] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_TX_AMP_DET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TX_SSI_MUX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TSSIA] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_TX_TSSIG] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_TX_TSSI_MISC1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TSSI_MISC2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TSSI_MISC3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PA_SPARE1] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
+ [B2056_TX_PA_SPARE2] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
+ [B2056_TX_INTPAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_INTPAA_IAUX_STAT] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
+ [B2056_TX_INTPAA_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_IMAIN_STAT] = { .ghz5 = 0x002d, .ghz2 = 0x002d, NOUPLOAD, },
+ [B2056_TX_INTPAA_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
+ [B2056_TX_INTPAA_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_TX_INTPAA_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_INTPAG_IAUX_STAT] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_INTPAG_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_IMAIN_STAT] = { .ghz5 = 0x001e, .ghz2 = 0x001e, NOUPLOAD, },
+ [B2056_TX_INTPAG_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
+ [B2056_TX_INTPAG_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_TX_INTPAG_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PADA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PADA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_TX_PADA_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_TX_PADA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PADA_BOOST_TUNE] = { .ghz5 = 0x0038, .ghz2 = 0x0038, NOUPLOAD, },
+ [B2056_TX_PADA_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
+ [B2056_TX_PADG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PADG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_PADG_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_TX_PADG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PADG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_TX_PADG_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
+ [B2056_TX_PGAA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PGAA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_TX_PGAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PGAA_BOOST_TUNE] = { .ghz5 = 0x0083, .ghz2 = 0x0083, NOUPLOAD, },
+ [B2056_TX_PGAA_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
+ [B2056_TX_PGAA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PGAG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PGAG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_PGAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PGAG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_TX_PGAG_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
+ [B2056_TX_PGAG_MISC] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_TX_MIXA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_MIXA_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_TX_MIXG] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_MIXG_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_TX_BB_GM_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_GMBB_GM] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC] = { .ghz5 = 0x0074, .ghz2 = 0x0074, UPLOAD, },
+ [B2056_TX_TXLPF_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_BW] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_0] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_1] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_2] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_3] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_4] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_5] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_6] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
+ [B2056_TX_TXLPF_OPAMP_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
+ [B2056_TX_TXLPF_MISC] = { .ghz5 = 0x005b, .ghz2 = 0x005b, NOUPLOAD, },
+ [B2056_TX_TXSPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_INTPA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_PAD_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_PGA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_GM_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_TXLPF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+};
+
+static const struct b2056_inittab_entry b2056_inittab_rev3_rx[] = {
+ [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXIQCAL_RXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_RX_RSSI_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RSSI_SEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RSSI_GAIN] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
+ [B2056_RX_RSSI_NB_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2I_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2I_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2Q_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2Q_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
+ [B2056_RX_RSSI_POLE] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_RX_RSSI_WB1_IDAC] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
+ [B2056_RX_RSSI_MISC] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
+ [B2056_RX_LNAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_LNAA_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_RX_LNAA_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
+ [B2056_RX_LNA_A_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
+ [B2056_RX_BIASPOLE_LNAA1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
+ [B2056_RX_LNAA2_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_RX_LNA1A_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_RX_LNAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_LNAG_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_RX_LNAG_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
+ [B2056_RX_LNA_G_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
+ [B2056_RX_BIASPOLE_LNAG1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
+ [B2056_RX_LNAG2_IDAC] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_RX_LNA1G_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_RX_MIXA_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_MIXA_VCM] = { .ghz5 = 0x0099, .ghz2 = 0x0099, NOUPLOAD, },
+ [B2056_RX_MIXA_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXA_LOB_BIAS] = { .ghz5 = 0x0044, .ghz2 = 0x0044, UPLOAD, },
+ [B2056_RX_MIXA_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXA_CMFB_IDAC] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
+ [B2056_RX_MIXA_BIAS_AUX] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
+ [B2056_RX_MIXA_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_RX_MIXA_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_RX_MIXA_MAST_BIAS] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
+ [B2056_RX_MIXG_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_MIXG_VCM] = { .ghz5 = 0x0099, .ghz2 = 0x0099, NOUPLOAD, },
+ [B2056_RX_MIXG_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXG_LOB_BIAS] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
+ [B2056_RX_MIXG_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXG_CMFB_IDAC] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
+ [B2056_RX_MIXG_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_RX_MIXG_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_RX_MIXG_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_RX_MIXG_MAST_BIAS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TIA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TIA_IOPAMP] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_RX_TIA_QOPAMP] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_RX_TIA_IMISC] = { .ghz5 = 0x0057, .ghz2 = 0x0057, NOUPLOAD, },
+ [B2056_RX_TIA_QMISC] = { .ghz5 = 0x0057, .ghz2 = 0x0057, NOUPLOAD, },
+ [B2056_RX_TIA_GAIN] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
+ [B2056_RX_TIA_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TIA_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_BB_LPF_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_AACI_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_RXLPF_IDAC] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_RX_RXLPF_OPAMPBIAS_LOWQ] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_RXLPF_OPAMPBIAS_HIGHQ]= { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_RXLPF_BIAS_DCCANCEL] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_RX_RXLPF_OUTVCM] = { .ghz5 = 0x0023, .ghz2 = 0x0023, NOUPLOAD, },
+ [B2056_RX_RXLPF_INVCM_BODY] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_RX_RXLPF_CC_OP] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
+ [B2056_RX_RXLPF_GAIN] = { .ghz5 = 0x0023, .ghz2 = 0x0023, NOUPLOAD, },
+ [B2056_RX_RXLPF_Q_BW] = { .ghz5 = 0x0041, .ghz2 = 0x0041, NOUPLOAD, },
+ [B2056_RX_RXLPF_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_RX_RXLPF_RCCAL_HPC] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_RCCAL_LPC] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_UNUSED] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_VGA_MASTER] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_VGA_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_VGA_BIAS_DCCANCEL] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_RX_VGA_GAIN] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_RX_VGA_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_RX_VGABUF_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_VGABUF_GAIN_BW] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
+ [B2056_RX_TXFBMIX_A] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TXFBMIX_G] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_LNAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_LNAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_MIXTIA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_VGA_BUF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_Q] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_BUF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_VGA_HPC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+};
+
+static const struct b2056_inittab_entry b2056_inittab_rev4_syn[] = {
+ [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_PU] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_GPIO_MASTER1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_GPIO_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_TOPBIAS_MASTER] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
+ [B2056_SYN_TOPBIAS_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_SYN_AFEREG] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_SYN_TEMPPROCSENSE] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_TEMPPROCSENSEIDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_TEMPPROCSENSERCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LPO] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_VDDCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_VDDCAL_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_VDDCAL_STATUS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCAL_CODE_OUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL0] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL1] = { .ghz5 = 0x001f, .ghz2 = 0x001f, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL2] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL3] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_ZCAL_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_ZCAL_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_MAST1] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_SYN_PLL_MAST2] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
+ [B2056_SYN_PLL_MAST3] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
+ [B2056_SYN_PLL_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL1] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL3] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL5] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL6] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_SYN_PLL_REFDIV] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_SYN_PLL_CP1] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
+ [B2056_SYN_PLL_CP2] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
+ [B2056_SYN_PLL_CP3] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER3] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER5] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_PLL_MMD1] = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, },
+ [B2056_SYN_PLL_MMD2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_SYN_PLL_VCO1] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_SYN_PLL_VCO2] = { .ghz5 = 0x00f7, .ghz2 = 0x00f7, UPLOAD, },
+ [B2056_SYN_PLL_MONITOR1] = { .ghz5 = 0x00b4, .ghz2 = 0x00b4, NOUPLOAD, },
+ [B2056_SYN_PLL_MONITOR2] = { .ghz5 = 0x00d2, .ghz2 = 0x00d2, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL4] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL5] = { .ghz5 = 0x0096, .ghz2 = 0x0096, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL6] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL7] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL8] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL9] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL11] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL12] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
+ [B2056_SYN_PLL_VCOCAL13] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_SYN_PLL_VREG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_SYN_PLL_STATUS1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_STATUS2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_STATUS3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU2] = { .ghz5 = 0x0040, .ghz2 = 0x0040, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU8] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RCCR1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_VCOBUF1] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER2] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGENBUF2] = { .ghz5 = 0x008f, .ghz2 = 0x008f, UPLOAD, },
+ [B2056_SYN_LOGEN_BUF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF4] = { .ghz5 = 0x00cc, .ghz2 = 0x00cc, NOUPLOAD, },
+ [B2056_SYN_LOGEN_DIV1] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_LOGEN_DIV2] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_DIV3] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLOUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLCAL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_CALEN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PEAKDET1] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CORE_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_TX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_TX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_VCOBUF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_VCOBUF2_OVRVAL]= { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER3_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF5_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF6_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL_WAITCNT] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CORE_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RX_CMOS_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+};
+
+static const struct b2056_inittab_entry b2056_inittab_rev4_tx[] = {
+ [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_IQCAL_GAIN_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_LOFT_FINE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_LOFT_FINE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_LOFT_COARSE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_LOFT_COARSE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_TX_COM_MASTER1] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_TX_TX_COM_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RXIQCAL_TXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_TX_SSI_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_IQCAL_VCM_HG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_IQCAL_IDAC] = { .ghz5 = 0x0037, .ghz2 = 0x0037, NOUPLOAD, },
+ [B2056_TX_TSSI_VCM] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_TX_AMP_DET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TX_SSI_MUX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TSSIA] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_TX_TSSIG] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_TX_TSSI_MISC1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TSSI_MISC2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TSSI_MISC3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PA_SPARE1] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
+ [B2056_TX_PA_SPARE2] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
+ [B2056_TX_INTPAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_INTPAA_IAUX_STAT] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
+ [B2056_TX_INTPAA_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_IMAIN_STAT] = { .ghz5 = 0x002d, .ghz2 = 0x002d, NOUPLOAD, },
+ [B2056_TX_INTPAA_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
+ [B2056_TX_INTPAA_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_TX_INTPAA_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_INTPAG_IAUX_STAT] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_INTPAG_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_IMAIN_STAT] = { .ghz5 = 0x001e, .ghz2 = 0x001e, NOUPLOAD, },
+ [B2056_TX_INTPAG_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
+ [B2056_TX_INTPAG_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_TX_INTPAG_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PADA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PADA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_TX_PADA_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_TX_PADA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PADA_BOOST_TUNE] = { .ghz5 = 0x0038, .ghz2 = 0x0038, NOUPLOAD, },
+ [B2056_TX_PADA_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
+ [B2056_TX_PADG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PADG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_PADG_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_TX_PADG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PADG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_TX_PADG_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
+ [B2056_TX_PGAA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PGAA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_TX_PGAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PGAA_BOOST_TUNE] = { .ghz5 = 0x0083, .ghz2 = 0x0083, NOUPLOAD, },
+ [B2056_TX_PGAA_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
+ [B2056_TX_PGAA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PGAG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PGAG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_PGAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PGAG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_TX_PGAG_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
+ [B2056_TX_PGAG_MISC] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_TX_MIXA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_MIXA_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_TX_MIXG] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_MIXG_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_TX_BB_GM_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_GMBB_GM] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC] = { .ghz5 = 0x0072, .ghz2 = 0x0072, UPLOAD, },
+ [B2056_TX_TXLPF_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_BW] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_0] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_1] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_2] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_3] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_4] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_5] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_6] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
+ [B2056_TX_TXLPF_OPAMP_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
+ [B2056_TX_TXLPF_MISC] = { .ghz5 = 0x005b, .ghz2 = 0x005b, NOUPLOAD, },
+ [B2056_TX_TXSPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_INTPA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_PAD_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_PGA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_GM_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_TXLPF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+};
+
+static const struct b2056_inittab_entry b2056_inittab_rev4_rx[] = {
+ [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXIQCAL_RXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_RX_RSSI_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RSSI_SEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RSSI_GAIN] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
+ [B2056_RX_RSSI_NB_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2I_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2I_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2Q_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2Q_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
+ [B2056_RX_RSSI_POLE] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_RX_RSSI_WB1_IDAC] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
+ [B2056_RX_RSSI_MISC] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
+ [B2056_RX_LNAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_LNAA_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_RX_LNAA_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
+ [B2056_RX_LNA_A_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
+ [B2056_RX_BIASPOLE_LNAA1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
+ [B2056_RX_LNAA2_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_RX_LNA1A_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_RX_LNAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_LNAG_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_RX_LNAG_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
+ [B2056_RX_LNA_G_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
+ [B2056_RX_BIASPOLE_LNAG1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
+ [B2056_RX_LNAG2_IDAC] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_RX_LNA1G_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_RX_MIXA_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_MIXA_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
+ [B2056_RX_MIXA_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXA_LOB_BIAS] = { .ghz5 = 0x0044, .ghz2 = 0x0044, UPLOAD, },
+ [B2056_RX_MIXA_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXA_CMFB_IDAC] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
+ [B2056_RX_MIXA_BIAS_AUX] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
+ [B2056_RX_MIXA_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_RX_MIXA_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_RX_MIXA_MAST_BIAS] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
+ [B2056_RX_MIXG_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_MIXG_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
+ [B2056_RX_MIXG_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXG_LOB_BIAS] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
+ [B2056_RX_MIXG_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXG_CMFB_IDAC] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
+ [B2056_RX_MIXG_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_RX_MIXG_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_RX_MIXG_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_RX_MIXG_MAST_BIAS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TIA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TIA_IOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
+ [B2056_RX_TIA_QOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
+ [B2056_RX_TIA_IMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
+ [B2056_RX_TIA_QMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
+ [B2056_RX_TIA_GAIN] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
+ [B2056_RX_TIA_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TIA_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_BB_LPF_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_AACI_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_RXLPF_IDAC] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_RX_RXLPF_OPAMPBIAS_LOWQ] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_RXLPF_OPAMPBIAS_HIGHQ]= { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_RXLPF_BIAS_DCCANCEL] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_RX_RXLPF_OUTVCM] = { .ghz5 = 0x002f, .ghz2 = 0x002f, UPLOAD, },
+ [B2056_RX_RXLPF_INVCM_BODY] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_RX_RXLPF_CC_OP] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
+ [B2056_RX_RXLPF_GAIN] = { .ghz5 = 0x0023, .ghz2 = 0x0023, NOUPLOAD, },
+ [B2056_RX_RXLPF_Q_BW] = { .ghz5 = 0x0041, .ghz2 = 0x0041, NOUPLOAD, },
+ [B2056_RX_RXLPF_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_RX_RXLPF_RCCAL_HPC] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_RCCAL_LPC] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_UNUSED] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_VGA_MASTER] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_VGA_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_VGA_BIAS_DCCANCEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+ [B2056_RX_VGA_GAIN] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_RX_VGA_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_RX_VGABUF_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_VGABUF_GAIN_BW] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
+ [B2056_RX_TXFBMIX_A] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TXFBMIX_G] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_LNAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_LNAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_MIXTIA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_VGA_BUF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_Q] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_BUF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_VGA_HPC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+};
+
+static const struct b2056_inittab_entry b2056_inittab_rev5_syn[] = {
+ [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_PU] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_GPIO_MASTER1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_GPIO_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_TOPBIAS_MASTER] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
+ [B2056_SYN_TOPBIAS_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_SYN_AFEREG] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_SYN_TEMPPROCSENSE] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_TEMPPROCSENSEIDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_TEMPPROCSENSERCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LPO] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_VDDCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_VDDCAL_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_VDDCAL_STATUS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCAL_CODE_OUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL1] = { .ghz5 = 0x001f, .ghz2 = 0x001f, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL2] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL3] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_ZCAL_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_ZCAL_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_MAST1] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_SYN_PLL_MAST2] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
+ [B2056_SYN_PLL_MAST3] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
+ [B2056_SYN_PLL_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL1] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL3] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL5] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL6] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_SYN_PLL_REFDIV] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_SYN_PLL_CP1] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
+ [B2056_SYN_PLL_CP2] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
+ [B2056_SYN_PLL_CP3] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER3] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER5] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_PLL_MMD1] = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, },
+ [B2056_SYN_PLL_MMD2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_SYN_PLL_VCO1] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_SYN_PLL_VCO2] = { .ghz5 = 0x00f7, .ghz2 = 0x00f7, UPLOAD, },
+ [B2056_SYN_PLL_MONITOR1] = { .ghz5 = 0x00b4, .ghz2 = 0x00b4, NOUPLOAD, },
+ [B2056_SYN_PLL_MONITOR2] = { .ghz5 = 0x00d2, .ghz2 = 0x00d2, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL4] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL5] = { .ghz5 = 0x0096, .ghz2 = 0x0096, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL6] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL7] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL8] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL9] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL11] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL12] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
+ [B2056_SYN_PLL_VCOCAL13] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_SYN_PLL_VREG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_SYN_PLL_STATUS1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_STATUS2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_STATUS3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU2] = { .ghz5 = 0x0040, .ghz2 = 0x0040, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU8] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RCCR1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_VCOBUF1] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER2] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGENBUF2] = { .ghz5 = 0x008f, .ghz2 = 0x008f, UPLOAD, },
+ [B2056_SYN_LOGEN_BUF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF4] = { .ghz5 = 0x00cc, .ghz2 = 0x00cc, NOUPLOAD, },
+ [B2056_SYN_LOGEN_DIV1] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_LOGEN_DIV2] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_DIV3] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLOUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLCAL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_CALEN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PEAKDET1] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CORE_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_TX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_TX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_VCOBUF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_VCOBUF2_OVRVAL]= { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER3_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF5_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF6_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL_WAITCNT] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CORE_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RX_CMOS_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+};
+
+static const struct b2056_inittab_entry b2056_inittab_rev5_tx[] = {
+ [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_IQCAL_GAIN_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_LOFT_FINE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_LOFT_FINE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_LOFT_COARSE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_LOFT_COARSE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_TX_COM_MASTER1] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_TX_TX_COM_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RXIQCAL_TXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_TX_SSI_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_IQCAL_VCM_HG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_IQCAL_IDAC] = { .ghz5 = 0x0037, .ghz2 = 0x0037, NOUPLOAD, },
+ [B2056_TX_TSSI_VCM] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_TX_AMP_DET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TX_SSI_MUX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TSSIA] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_TX_TSSIG] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_TX_TSSI_MISC1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TSSI_MISC2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TSSI_MISC3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PA_SPARE1] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
+ [B2056_TX_PA_SPARE2] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
+ [B2056_TX_INTPAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_INTPAA_IAUX_STAT] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
+ [B2056_TX_INTPAA_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_IMAIN_STAT] = { .ghz5 = 0x002d, .ghz2 = 0x002d, NOUPLOAD, },
+ [B2056_TX_INTPAA_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
+ [B2056_TX_INTPAA_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_TX_INTPAA_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_INTPAG_IAUX_STAT] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_INTPAG_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_IMAIN_STAT] = { .ghz5 = 0x001e, .ghz2 = 0x001e, NOUPLOAD, },
+ [B2056_TX_INTPAG_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
+ [B2056_TX_INTPAG_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_TX_INTPAG_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PADA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PADA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_TX_PADA_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_TX_PADA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PADA_BOOST_TUNE] = { .ghz5 = 0x0038, .ghz2 = 0x0038, NOUPLOAD, },
+ [B2056_TX_PADA_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
+ [B2056_TX_PADG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PADG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_PADG_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_TX_PADG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PADG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_TX_PADG_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
+ [B2056_TX_PGAA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PGAA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_TX_PGAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PGAA_BOOST_TUNE] = { .ghz5 = 0x0083, .ghz2 = 0x0083, NOUPLOAD, },
+ [B2056_TX_PGAA_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
+ [B2056_TX_PGAA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PGAG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PGAG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_PGAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PGAG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_TX_PGAG_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
+ [B2056_TX_PGAG_MISC] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_TX_MIXA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_MIXA_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_TX_MIXG] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_MIXG_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_TX_BB_GM_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_GMBB_GM] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+ [B2056_TX_TXLPF_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_BW] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_0] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_1] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_2] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_3] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_4] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_5] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_6] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
+ [B2056_TX_TXLPF_OPAMP_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
+ [B2056_TX_TXLPF_MISC] = { .ghz5 = 0x005b, .ghz2 = 0x005b, NOUPLOAD, },
+ [B2056_TX_TXSPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_INTPA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_PAD_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_PGA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_GM_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_TXLPF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC0] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC1] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC2] = { .ghz5 = 0x0071, .ghz2 = 0x0071, UPLOAD, },
+ [B2056_TX_GMBB_IDAC3] = { .ghz5 = 0x0071, .ghz2 = 0x0071, UPLOAD, },
+ [B2056_TX_GMBB_IDAC4] = { .ghz5 = 0x0072, .ghz2 = 0x0072, UPLOAD, },
+ [B2056_TX_GMBB_IDAC5] = { .ghz5 = 0x0073, .ghz2 = 0x0073, UPLOAD, },
+ [B2056_TX_GMBB_IDAC6] = { .ghz5 = 0x0074, .ghz2 = 0x0074, UPLOAD, },
+ [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, },
+};
+
+static const struct b2056_inittab_entry b2056_inittab_rev5_rx[] = {
+ [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXIQCAL_RXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_RX_RSSI_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RSSI_SEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RSSI_GAIN] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
+ [B2056_RX_RSSI_NB_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2I_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2I_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2Q_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2Q_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
+ [B2056_RX_RSSI_POLE] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_RX_RSSI_WB1_IDAC] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
+ [B2056_RX_RSSI_MISC] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
+ [B2056_RX_LNAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_LNAA_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_RX_LNAA_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
+ [B2056_RX_LNA_A_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
+ [B2056_RX_BIASPOLE_LNAA1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
+ [B2056_RX_LNAA2_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_RX_LNA1A_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_RX_LNAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_LNAG_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_RX_LNAG_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
+ [B2056_RX_LNA_G_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
+ [B2056_RX_BIASPOLE_LNAG1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
+ [B2056_RX_LNAG2_IDAC] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_RX_LNA1G_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_RX_MIXA_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_MIXA_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
+ [B2056_RX_MIXA_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXA_LOB_BIAS] = { .ghz5 = 0x0088, .ghz2 = 0x0088, UPLOAD, },
+ [B2056_RX_MIXA_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXA_CMFB_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+ [B2056_RX_MIXA_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
+ [B2056_RX_MIXA_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_RX_MIXA_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_RX_MIXA_MAST_BIAS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXG_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_MIXG_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
+ [B2056_RX_MIXG_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXG_LOB_BIAS] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
+ [B2056_RX_MIXG_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXG_CMFB_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+ [B2056_RX_MIXG_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_RX_MIXG_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_RX_MIXG_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_RX_MIXG_MAST_BIAS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TIA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TIA_IOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
+ [B2056_RX_TIA_QOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
+ [B2056_RX_TIA_IMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
+ [B2056_RX_TIA_QMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
+ [B2056_RX_TIA_GAIN] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
+ [B2056_RX_TIA_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TIA_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_BB_LPF_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_AACI_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_RXLPF_IDAC] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_RX_RXLPF_OPAMPBIAS_LOWQ] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_RXLPF_OPAMPBIAS_HIGHQ]= { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_RXLPF_BIAS_DCCANCEL] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_RX_RXLPF_OUTVCM] = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, },
+ [B2056_RX_RXLPF_INVCM_BODY] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_RX_RXLPF_CC_OP] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
+ [B2056_RX_RXLPF_GAIN] = { .ghz5 = 0x0023, .ghz2 = 0x0023, NOUPLOAD, },
+ [B2056_RX_RXLPF_Q_BW] = { .ghz5 = 0x0041, .ghz2 = 0x0041, NOUPLOAD, },
+ [B2056_RX_RXLPF_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_RX_RXLPF_RCCAL_HPC] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_RCCAL_LPC] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_UNUSED] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_VGA_MASTER] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_VGA_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_VGA_BIAS_DCCANCEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+ [B2056_RX_VGA_GAIN] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_RX_VGA_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_RX_VGABUF_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_VGABUF_GAIN_BW] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
+ [B2056_RX_TXFBMIX_A] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TXFBMIX_G] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_LNAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_LNAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_MIXTIA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_VGA_BUF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_Q] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_BUF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_VGA_HPC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+};
+
+static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
+ [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_PU] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_GPIO_MASTER1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_GPIO_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_TOPBIAS_MASTER] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
+ [B2056_SYN_TOPBIAS_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_SYN_AFEREG] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_SYN_TEMPPROCSENSE] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_TEMPPROCSENSEIDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_TEMPPROCSENSERCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LPO] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_VDDCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_VDDCAL_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_VDDCAL_STATUS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCAL_CODE_OUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL1] = { .ghz5 = 0x001f, .ghz2 = 0x001f, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL2] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL3] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_ZCAL_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_ZCAL_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_MAST1] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_SYN_PLL_MAST2] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
+ [B2056_SYN_PLL_MAST3] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
+ [B2056_SYN_PLL_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL1] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL3] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL5] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL6] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_SYN_PLL_REFDIV] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_SYN_PLL_CP1] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
+ [B2056_SYN_PLL_CP2] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
+ [B2056_SYN_PLL_CP3] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER3] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER5] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_PLL_MMD1] = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, },
+ [B2056_SYN_PLL_MMD2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_SYN_PLL_VCO1] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_SYN_PLL_VCO2] = { .ghz5 = 0x00f7, .ghz2 = 0x00f7, UPLOAD, },
+ [B2056_SYN_PLL_MONITOR1] = { .ghz5 = 0x00b4, .ghz2 = 0x00b4, NOUPLOAD, },
+ [B2056_SYN_PLL_MONITOR2] = { .ghz5 = 0x00d2, .ghz2 = 0x00d2, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL4] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL5] = { .ghz5 = 0x0096, .ghz2 = 0x0096, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL6] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL7] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL8] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL9] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL11] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL12] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
+ [B2056_SYN_PLL_VCOCAL13] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_SYN_PLL_VREG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_SYN_PLL_STATUS1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_STATUS2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_STATUS3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU2] = { .ghz5 = 0x0040, .ghz2 = 0x0040, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU8] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RCCR1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_VCOBUF1] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER2] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGENBUF2] = { .ghz5 = 0x008f, .ghz2 = 0x008f, UPLOAD, },
+ [B2056_SYN_LOGEN_BUF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF4] = { .ghz5 = 0x00cc, .ghz2 = 0x00cc, NOUPLOAD, },
+ [B2056_SYN_LOGEN_DIV1] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_LOGEN_DIV2] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_DIV3] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLOUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLCAL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_CALEN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PEAKDET1] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CORE_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_TX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_TX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_VCOBUF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_VCOBUF2_OVRVAL]= { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER3_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF5_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF6_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL_WAITCNT] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CORE_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RX_CMOS_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+};
+
+static const struct b2056_inittab_entry b2056_inittab_rev6_tx[] = {
+ [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_IQCAL_GAIN_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_LOFT_FINE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_LOFT_FINE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_LOFT_COARSE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_LOFT_COARSE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_TX_COM_MASTER1] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_TX_TX_COM_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RXIQCAL_TXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_TX_SSI_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_IQCAL_VCM_HG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_IQCAL_IDAC] = { .ghz5 = 0x0037, .ghz2 = 0x0037, NOUPLOAD, },
+ [B2056_TX_TSSI_VCM] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_TX_AMP_DET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TX_SSI_MUX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TSSIA] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_TX_TSSIG] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_TX_TSSI_MISC1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TSSI_MISC2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TSSI_MISC3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PA_SPARE1] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
+ [B2056_TX_PA_SPARE2] = { .ghz5 = 0x00ee, .ghz2 = 0x00ee, UPLOAD, },
+ [B2056_TX_INTPAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_INTPAA_IAUX_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
+ [B2056_TX_INTPAA_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_IMAIN_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
+ [B2056_TX_INTPAA_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
+ [B2056_TX_INTPAA_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_TX_INTPAA_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_INTPAG_IAUX_STAT] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_INTPAG_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_IMAIN_STAT] = { .ghz5 = 0x001e, .ghz2 = 0x001e, NOUPLOAD, },
+ [B2056_TX_INTPAG_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
+ [B2056_TX_INTPAG_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_TX_INTPAG_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PADA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PADA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_TX_PADA_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_TX_PADA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PADA_BOOST_TUNE] = { .ghz5 = 0x0038, .ghz2 = 0x0038, NOUPLOAD, },
+ [B2056_TX_PADA_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
+ [B2056_TX_PADG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PADG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_PADG_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_TX_PADG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PADG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_TX_PADG_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
+ [B2056_TX_PGAA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PGAA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_TX_PGAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PGAA_BOOST_TUNE] = { .ghz5 = 0x0083, .ghz2 = 0x0083, NOUPLOAD, },
+ [B2056_TX_PGAA_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
+ [B2056_TX_PGAA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PGAG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PGAG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_PGAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PGAG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_TX_PGAG_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
+ [B2056_TX_PGAG_MISC] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_TX_MIXA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_MIXA_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_TX_MIXG] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_MIXG_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_TX_BB_GM_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_GMBB_GM] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+ [B2056_TX_TXLPF_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_BW] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_0] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_1] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_2] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_3] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_4] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_5] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_6] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
+ [B2056_TX_TXLPF_OPAMP_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
+ [B2056_TX_TXLPF_MISC] = { .ghz5 = 0x005b, .ghz2 = 0x005b, NOUPLOAD, },
+ [B2056_TX_TXSPARE1] = { .ghz5 = 0x0030, .ghz2 = 0x0030, UPLOAD, },
+ [B2056_TX_TXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_INTPA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_PAD_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_PGA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_GM_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_TXLPF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC0] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC1] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC2] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC3] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC4] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC5] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC6] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
+};
+
+static const struct b2056_inittab_entry b2056_inittab_rev6_rx[] = {
+ [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXIQCAL_RXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_RX_RSSI_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RSSI_SEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RSSI_GAIN] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
+ [B2056_RX_RSSI_NB_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2I_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2I_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2Q_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2Q_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
+ [B2056_RX_RSSI_POLE] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_RX_RSSI_WB1_IDAC] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
+ [B2056_RX_RSSI_MISC] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
+ [B2056_RX_LNAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_LNAA_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_RX_LNAA_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
+ [B2056_RX_LNA_A_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
+ [B2056_RX_BIASPOLE_LNAA1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
+ [B2056_RX_LNAA2_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_RX_LNA1A_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_RX_LNAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_LNAG_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_RX_LNAG_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
+ [B2056_RX_LNA_G_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
+ [B2056_RX_BIASPOLE_LNAG1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
+ [B2056_RX_LNAG2_IDAC] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_RX_LNA1G_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_RX_MIXA_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_MIXA_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
+ [B2056_RX_MIXA_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXA_LOB_BIAS] = { .ghz5 = 0x0088, .ghz2 = 0x0088, UPLOAD, },
+ [B2056_RX_MIXA_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXA_CMFB_IDAC] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
+ [B2056_RX_MIXA_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
+ [B2056_RX_MIXA_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_RX_MIXA_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_RX_MIXA_MAST_BIAS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXG_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_MIXG_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
+ [B2056_RX_MIXG_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXG_LOB_BIAS] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
+ [B2056_RX_MIXG_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXG_CMFB_IDAC] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
+ [B2056_RX_MIXG_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_RX_MIXG_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_RX_MIXG_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_RX_MIXG_MAST_BIAS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TIA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TIA_IOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
+ [B2056_RX_TIA_QOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
+ [B2056_RX_TIA_IMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
+ [B2056_RX_TIA_QMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
+ [B2056_RX_TIA_GAIN] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
+ [B2056_RX_TIA_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TIA_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_BB_LPF_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_AACI_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_RXLPF_IDAC] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_RX_RXLPF_OPAMPBIAS_LOWQ] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_RXLPF_OPAMPBIAS_HIGHQ]= { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_RXLPF_BIAS_DCCANCEL] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_RX_RXLPF_OUTVCM] = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, },
+ [B2056_RX_RXLPF_INVCM_BODY] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_RX_RXLPF_CC_OP] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
+ [B2056_RX_RXLPF_GAIN] = { .ghz5 = 0x0023, .ghz2 = 0x0023, NOUPLOAD, },
+ [B2056_RX_RXLPF_Q_BW] = { .ghz5 = 0x0041, .ghz2 = 0x0041, NOUPLOAD, },
+ [B2056_RX_RXLPF_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_RX_RXLPF_RCCAL_HPC] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_RCCAL_LPC] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_UNUSED] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_VGA_MASTER] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_VGA_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_VGA_BIAS_DCCANCEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+ [B2056_RX_VGA_GAIN] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_RX_VGA_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_RX_VGABUF_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_VGABUF_GAIN_BW] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
+ [B2056_RX_TXFBMIX_A] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TXFBMIX_G] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE3] = { .ghz5 = 0x0005, .ghz2 = 0x0005, UPLOAD, },
+ [B2056_RX_RXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_LNAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_LNAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_MIXTIA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_VGA_BUF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_Q] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_BUF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_VGA_HPC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+};
+
+static const struct b2056_inittab_entry b2056_inittab_rev7_syn[] = {
+ [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_PU] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_GPIO_MASTER1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_GPIO_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_TOPBIAS_MASTER] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
+ [B2056_SYN_TOPBIAS_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_SYN_AFEREG] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_SYN_TEMPPROCSENSE] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_TEMPPROCSENSEIDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_TEMPPROCSENSERCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LPO] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_VDDCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_VDDCAL_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_VDDCAL_STATUS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCAL_CODE_OUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL1] = { .ghz5 = 0x001f, .ghz2 = 0x001f, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL2] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL3] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_ZCAL_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_ZCAL_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_MAST1] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_SYN_PLL_MAST2] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
+ [B2056_SYN_PLL_MAST3] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
+ [B2056_SYN_PLL_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL1] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL3] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL5] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL6] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_SYN_PLL_REFDIV] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_SYN_PLL_CP1] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
+ [B2056_SYN_PLL_CP2] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
+ [B2056_SYN_PLL_CP3] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER3] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER5] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_PLL_MMD1] = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, },
+ [B2056_SYN_PLL_MMD2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_SYN_PLL_VCO1] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_SYN_PLL_VCO2] = { .ghz5 = 0x00f7, .ghz2 = 0x00f7, UPLOAD, },
+ [B2056_SYN_PLL_MONITOR1] = { .ghz5 = 0x00b4, .ghz2 = 0x00b4, NOUPLOAD, },
+ [B2056_SYN_PLL_MONITOR2] = { .ghz5 = 0x00d2, .ghz2 = 0x00d2, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL4] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL5] = { .ghz5 = 0x0096, .ghz2 = 0x0096, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL6] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL7] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL8] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL9] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL11] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL12] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
+ [B2056_SYN_PLL_VCOCAL13] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_SYN_PLL_VREG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_SYN_PLL_STATUS1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_STATUS2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_STATUS3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU2] = { .ghz5 = 0x0040, .ghz2 = 0x0040, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU8] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RCCR1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_VCOBUF1] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER2] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGENBUF2] = { .ghz5 = 0x008f, .ghz2 = 0x008f, UPLOAD, },
+ [B2056_SYN_LOGEN_BUF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF4] = { .ghz5 = 0x00cc, .ghz2 = 0x00cc, NOUPLOAD, },
+ [B2056_SYN_LOGEN_DIV1] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_LOGEN_DIV2] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_DIV3] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLOUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLCAL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_CALEN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PEAKDET1] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CORE_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_TX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_TX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_VCOBUF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_VCOBUF2_OVRVAL]= { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER3_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF5_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF6_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL_WAITCNT] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CORE_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RX_CMOS_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+};
+
+static const struct b2056_inittab_entry b2056_inittab_rev7_tx[] = {
+ [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_IQCAL_GAIN_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_LOFT_FINE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_LOFT_FINE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_LOFT_COARSE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_LOFT_COARSE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_TX_COM_MASTER1] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_TX_TX_COM_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RXIQCAL_TXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_TX_SSI_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_IQCAL_VCM_HG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_IQCAL_IDAC] = { .ghz5 = 0x0037, .ghz2 = 0x0037, NOUPLOAD, },
+ [B2056_TX_TSSI_VCM] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_TX_AMP_DET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TX_SSI_MUX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TSSIA] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_TX_TSSIG] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_TX_TSSI_MISC1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TSSI_MISC2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TSSI_MISC3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PA_SPARE1] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
+ [B2056_TX_PA_SPARE2] = { .ghz5 = 0x00ee, .ghz2 = 0x00ee, UPLOAD, },
+ [B2056_TX_INTPAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_INTPAA_IAUX_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
+ [B2056_TX_INTPAA_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_IMAIN_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
+ [B2056_TX_INTPAA_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
+ [B2056_TX_INTPAA_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_TX_INTPAA_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_INTPAG_IAUX_STAT] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_INTPAG_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_IMAIN_STAT] = { .ghz5 = 0x001e, .ghz2 = 0x001e, NOUPLOAD, },
+ [B2056_TX_INTPAG_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
+ [B2056_TX_INTPAG_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_TX_INTPAG_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PADA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PADA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_TX_PADA_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_TX_PADA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PADA_BOOST_TUNE] = { .ghz5 = 0x0038, .ghz2 = 0x0038, NOUPLOAD, },
+ [B2056_TX_PADA_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
+ [B2056_TX_PADG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PADG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_PADG_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_TX_PADG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PADG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_TX_PADG_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
+ [B2056_TX_PGAA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PGAA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_TX_PGAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PGAA_BOOST_TUNE] = { .ghz5 = 0x0083, .ghz2 = 0x0083, NOUPLOAD, },
+ [B2056_TX_PGAA_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
+ [B2056_TX_PGAA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PGAG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PGAG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_PGAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PGAG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_TX_PGAG_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
+ [B2056_TX_PGAG_MISC] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_TX_MIXA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_MIXA_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_TX_MIXG] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_MIXG_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_TX_BB_GM_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_GMBB_GM] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+ [B2056_TX_TXLPF_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_BW] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_0] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_1] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_2] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_3] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_4] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_5] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_6] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
+ [B2056_TX_TXLPF_OPAMP_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
+ [B2056_TX_TXLPF_MISC] = { .ghz5 = 0x005b, .ghz2 = 0x005b, NOUPLOAD, },
+ [B2056_TX_TXSPARE1] = { .ghz5 = 0x0030, .ghz2 = 0x0030, UPLOAD, },
+ [B2056_TX_TXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_INTPA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_PAD_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_PGA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_GM_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_TXLPF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC0] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC1] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC2] = { .ghz5 = 0x0071, .ghz2 = 0x0071, UPLOAD, },
+ [B2056_TX_GMBB_IDAC3] = { .ghz5 = 0x0071, .ghz2 = 0x0071, UPLOAD, },
+ [B2056_TX_GMBB_IDAC4] = { .ghz5 = 0x0072, .ghz2 = 0x0072, UPLOAD, },
+ [B2056_TX_GMBB_IDAC5] = { .ghz5 = 0x0073, .ghz2 = 0x0073, UPLOAD, },
+ [B2056_TX_GMBB_IDAC6] = { .ghz5 = 0x0074, .ghz2 = 0x0074, UPLOAD, },
+ [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, },
+};
+
+static const struct b2056_inittab_entry b2056_inittab_rev7_rx[] = {
+ [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXIQCAL_RXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_RX_RSSI_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RSSI_SEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RSSI_GAIN] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
+ [B2056_RX_RSSI_NB_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2I_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2I_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2Q_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2Q_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
+ [B2056_RX_RSSI_POLE] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_RX_RSSI_WB1_IDAC] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
+ [B2056_RX_RSSI_MISC] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
+ [B2056_RX_LNAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_LNAA_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_RX_LNAA_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
+ [B2056_RX_LNA_A_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
+ [B2056_RX_BIASPOLE_LNAA1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
+ [B2056_RX_LNAA2_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_RX_LNA1A_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_RX_LNAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_LNAG_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_RX_LNAG_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
+ [B2056_RX_LNA_G_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
+ [B2056_RX_BIASPOLE_LNAG1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
+ [B2056_RX_LNAG2_IDAC] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_RX_LNA1G_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_RX_MIXA_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_MIXA_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
+ [B2056_RX_MIXA_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXA_LOB_BIAS] = { .ghz5 = 0x0088, .ghz2 = 0x0088, UPLOAD, },
+ [B2056_RX_MIXA_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXA_CMFB_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+ [B2056_RX_MIXA_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
+ [B2056_RX_MIXA_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_RX_MIXA_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_RX_MIXA_MAST_BIAS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXG_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_MIXG_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
+ [B2056_RX_MIXG_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXG_LOB_BIAS] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
+ [B2056_RX_MIXG_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXG_CMFB_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+ [B2056_RX_MIXG_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_RX_MIXG_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_RX_MIXG_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_RX_MIXG_MAST_BIAS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TIA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TIA_IOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
+ [B2056_RX_TIA_QOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
+ [B2056_RX_TIA_IMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
+ [B2056_RX_TIA_QMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
+ [B2056_RX_TIA_GAIN] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
+ [B2056_RX_TIA_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TIA_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_BB_LPF_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_AACI_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_RXLPF_IDAC] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_RX_RXLPF_OPAMPBIAS_LOWQ] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_RXLPF_OPAMPBIAS_HIGHQ]= { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_RXLPF_BIAS_DCCANCEL] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_RX_RXLPF_OUTVCM] = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, },
+ [B2056_RX_RXLPF_INVCM_BODY] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_RX_RXLPF_CC_OP] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
+ [B2056_RX_RXLPF_GAIN] = { .ghz5 = 0x0023, .ghz2 = 0x0023, NOUPLOAD, },
+ [B2056_RX_RXLPF_Q_BW] = { .ghz5 = 0x0041, .ghz2 = 0x0041, NOUPLOAD, },
+ [B2056_RX_RXLPF_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_RX_RXLPF_RCCAL_HPC] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_RCCAL_LPC] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_UNUSED] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_VGA_MASTER] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_VGA_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_VGA_BIAS_DCCANCEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+ [B2056_RX_VGA_GAIN] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_RX_VGA_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_RX_VGABUF_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_VGABUF_GAIN_BW] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
+ [B2056_RX_TXFBMIX_A] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TXFBMIX_G] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_LNAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_LNAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_MIXTIA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_VGA_BUF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_Q] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_BUF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_VGA_HPC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+};
+
+static const struct b2056_inittab_entry b2056_inittab_rev8_syn[] = {
+ [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_PU] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_GPIO_MASTER1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_GPIO_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_TOPBIAS_MASTER] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
+ [B2056_SYN_TOPBIAS_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_SYN_AFEREG] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_SYN_TEMPPROCSENSE] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_TEMPPROCSENSEIDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_TEMPPROCSENSERCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LPO] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_VDDCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_VDDCAL_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_VDDCAL_STATUS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCAL_CODE_OUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL1] = { .ghz5 = 0x001f, .ghz2 = 0x001f, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL2] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL3] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_RCCAL_CTRL11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_ZCAL_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_ZCAL_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_MAST1] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_SYN_PLL_MAST2] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
+ [B2056_SYN_PLL_MAST3] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
+ [B2056_SYN_PLL_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL1] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL3] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL5] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
+ [B2056_SYN_PLL_XTAL6] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_SYN_PLL_REFDIV] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_SYN_PLL_CP1] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
+ [B2056_SYN_PLL_CP2] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
+ [B2056_SYN_PLL_CP3] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER3] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER5] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_PLL_MMD1] = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, },
+ [B2056_SYN_PLL_MMD2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_SYN_PLL_VCO1] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_SYN_PLL_VCO2] = { .ghz5 = 0x00f7, .ghz2 = 0x00f7, UPLOAD, },
+ [B2056_SYN_PLL_MONITOR1] = { .ghz5 = 0x00b4, .ghz2 = 0x00b4, NOUPLOAD, },
+ [B2056_SYN_PLL_MONITOR2] = { .ghz5 = 0x00d2, .ghz2 = 0x00d2, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL4] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL5] = { .ghz5 = 0x0096, .ghz2 = 0x0096, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL6] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL7] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL8] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL9] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL11] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_SYN_PLL_VCOCAL12] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
+ [B2056_SYN_PLL_VCOCAL13] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_SYN_PLL_VREG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_SYN_PLL_STATUS1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_STATUS2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_PLL_STATUS3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU2] = { .ghz5 = 0x0040, .ghz2 = 0x0040, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PU8] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RCCR1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_VCOBUF1] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER2] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGENBUF2] = { .ghz5 = 0x008f, .ghz2 = 0x008f, UPLOAD, },
+ [B2056_SYN_LOGEN_BUF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF4] = { .ghz5 = 0x00cc, .ghz2 = 0x00cc, NOUPLOAD, },
+ [B2056_SYN_LOGEN_DIV1] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_SYN_LOGEN_DIV2] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_DIV3] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLOUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACLCAL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_CALEN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_PEAKDET1] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CORE_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_TX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_TX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_VCOBUF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_VCOBUF2_OVRVAL]= { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_SYN_LOGEN_MIXER3_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF5_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_BUF6_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CBUFTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CMOSTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
+ [B2056_SYN_LOGEN_ACL_WAITCNT] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_SYN_LOGEN_CORE_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_RX_CMOS_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+};
+
+static const struct b2056_inittab_entry b2056_inittab_rev8_tx[] = {
+ [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_IQCAL_GAIN_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_LOFT_FINE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_LOFT_FINE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_LOFT_COARSE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_LOFT_COARSE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_TX_COM_MASTER1] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_TX_TX_COM_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_RXIQCAL_TXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_TX_SSI_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_IQCAL_VCM_HG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_IQCAL_IDAC] = { .ghz5 = 0x0037, .ghz2 = 0x0037, NOUPLOAD, },
+ [B2056_TX_TSSI_VCM] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_TX_AMP_DET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TX_SSI_MUX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TSSIA] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_TX_TSSIG] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_TX_TSSI_MISC1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TSSI_MISC2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TSSI_MISC3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PA_SPARE1] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
+ [B2056_TX_PA_SPARE2] = { .ghz5 = 0x00ee, .ghz2 = 0x00ee, UPLOAD, },
+ [B2056_TX_INTPAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_INTPAA_IAUX_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
+ [B2056_TX_INTPAA_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_IMAIN_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
+ [B2056_TX_INTPAA_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAA_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
+ [B2056_TX_INTPAA_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_TX_INTPAA_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_INTPAG_IAUX_STAT] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_TX_INTPAG_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_IMAIN_STAT] = { .ghz5 = 0x001e, .ghz2 = 0x001e, NOUPLOAD, },
+ [B2056_TX_INTPAG_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_INTPAG_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
+ [B2056_TX_INTPAG_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_TX_INTPAG_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PADA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PADA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_TX_PADA_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_TX_PADA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PADA_BOOST_TUNE] = { .ghz5 = 0x0038, .ghz2 = 0x0038, NOUPLOAD, },
+ [B2056_TX_PADA_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
+ [B2056_TX_PADG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PADG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_PADG_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_TX_PADG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PADG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_TX_PADG_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
+ [B2056_TX_PGAA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PGAA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_TX_PGAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PGAA_BOOST_TUNE] = { .ghz5 = 0x0083, .ghz2 = 0x0083, NOUPLOAD, },
+ [B2056_TX_PGAA_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
+ [B2056_TX_PGAA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PGAG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_PGAG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_TX_PGAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_PGAG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_TX_PGAG_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
+ [B2056_TX_PGAG_MISC] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_TX_MIXA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_MIXA_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_TX_MIXG] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_MIXG_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_TX_BB_GM_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_GMBB_GM] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+ [B2056_TX_TXLPF_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_RCCAL_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_BW] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_TX_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_0] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_1] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_2] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_3] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_4] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_5] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
+ [B2056_TX_TXLPF_IDAC_6] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
+ [B2056_TX_TXLPF_OPAMP_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
+ [B2056_TX_TXLPF_MISC] = { .ghz5 = 0x005b, .ghz2 = 0x005b, NOUPLOAD, },
+ [B2056_TX_TXSPARE1] = { .ghz5 = 0x0030, .ghz2 = 0x0030, UPLOAD, },
+ [B2056_TX_TXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_TXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_INTPA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_PAD_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_PGA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_GM_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_TXLPF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC0] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC1] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC2] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC3] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC4] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC5] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC6] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
+ [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
+};
+
+static const struct b2056_inittab_entry b2056_inittab_rev8_rx[] = {
+ [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXIQCAL_RXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
+ [B2056_RX_RSSI_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RSSI_SEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RSSI_GAIN] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
+ [B2056_RX_RSSI_NB_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2I_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2I_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2Q_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
+ [B2056_RX_RSSI_WB2Q_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
+ [B2056_RX_RSSI_POLE] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_RX_RSSI_WB1_IDAC] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
+ [B2056_RX_RSSI_MISC] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
+ [B2056_RX_LNAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_LNAA_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_RX_LNAA_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
+ [B2056_RX_LNA_A_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
+ [B2056_RX_BIASPOLE_LNAA1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
+ [B2056_RX_LNAA2_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+ [B2056_RX_LNA1A_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_RX_LNAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_LNAG_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
+ [B2056_RX_LNAG_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
+ [B2056_RX_LNA_G_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
+ [B2056_RX_BIASPOLE_LNAG1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
+ [B2056_RX_LNAG2_IDAC] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+ [B2056_RX_LNA1G_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
+ [B2056_RX_MIXA_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_MIXA_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
+ [B2056_RX_MIXA_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXA_LOB_BIAS] = { .ghz5 = 0x0088, .ghz2 = 0x0088, UPLOAD, },
+ [B2056_RX_MIXA_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXA_CMFB_IDAC] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
+ [B2056_RX_MIXA_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
+ [B2056_RX_MIXA_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_RX_MIXA_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_RX_MIXA_MAST_BIAS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXG_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_MIXG_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
+ [B2056_RX_MIXG_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXG_LOB_BIAS] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
+ [B2056_RX_MIXG_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_MIXG_CMFB_IDAC] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
+ [B2056_RX_MIXG_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_RX_MIXG_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_RX_MIXG_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_RX_MIXG_MAST_BIAS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TIA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TIA_IOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
+ [B2056_RX_TIA_QOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
+ [B2056_RX_TIA_IMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
+ [B2056_RX_TIA_QMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
+ [B2056_RX_TIA_GAIN] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
+ [B2056_RX_TIA_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TIA_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_BB_LPF_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_AACI_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
+ [B2056_RX_RXLPF_IDAC] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_RX_RXLPF_OPAMPBIAS_LOWQ] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_RXLPF_OPAMPBIAS_HIGHQ]= { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_RXLPF_BIAS_DCCANCEL] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
+ [B2056_RX_RXLPF_OUTVCM] = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, },
+ [B2056_RX_RXLPF_INVCM_BODY] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
+ [B2056_RX_RXLPF_CC_OP] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
+ [B2056_RX_RXLPF_GAIN] = { .ghz5 = 0x0023, .ghz2 = 0x0023, NOUPLOAD, },
+ [B2056_RX_RXLPF_Q_BW] = { .ghz5 = 0x0041, .ghz2 = 0x0041, NOUPLOAD, },
+ [B2056_RX_RXLPF_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_RX_RXLPF_RCCAL_HPC] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXHPF_OFF7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_RCCAL_LPC] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXLPF_OFF_4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_UNUSED] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_VGA_MASTER] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_VGA_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_VGA_BIAS_DCCANCEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+ [B2056_RX_VGA_GAIN] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
+ [B2056_RX_VGA_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
+ [B2056_RX_VGABUF_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
+ [B2056_RX_VGABUF_GAIN_BW] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
+ [B2056_RX_TXFBMIX_A] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_TXFBMIX_G] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE3] = { .ghz5 = 0x0005, .ghz2 = 0x0005, UPLOAD, },
+ [B2056_RX_RXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_RXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_LNAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_LNAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_MIXTIA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_VGA_BUF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_Q] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_BUF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_VGA_HPC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_RXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+ [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
+};
+
+#define INITTABSPTS(prefix) \
+ .syn = prefix##_syn, \
+ .syn_length = ARRAY_SIZE(prefix##_syn), \
+ .tx = prefix##_tx, \
+ .tx_length = ARRAY_SIZE(prefix##_tx), \
+ .rx = prefix##_rx, \
+ .rx_length = ARRAY_SIZE(prefix##_rx)
+
+struct b2056_inittabs_pts b2056_inittabs[] = {
+ [3] = { INITTABSPTS(b2056_inittab_rev3) },
+ [4] = { INITTABSPTS(b2056_inittab_rev4) },
+ [5] = { INITTABSPTS(b2056_inittab_rev5) },
+ [6] = { INITTABSPTS(b2056_inittab_rev6) },
+ [7] = { INITTABSPTS(b2056_inittab_rev7) },
+ [8] = { INITTABSPTS(b2056_inittab_rev8) },
+ [9] = { INITTABSPTS(b2056_inittab_rev7) },
+};
+
+#define RADIOREGS3(r00, r01, r02, r03, r04, r05, r06, r07, r08, r09, \
+ r10, r11, r12, r13, r14, r15, r16, r17, r18, r19, \
+ r20, r21, r22, r23, r24, r25, r26, r27, r28, r29, \
+ r30, r31, r32, r33, r34, r35, r36) \
+ .radio_syn_pll_vcocal1 = r00, \
+ .radio_syn_pll_vcocal2 = r01, \
+ .radio_syn_pll_refdiv = r02, \
+ .radio_syn_pll_mmd2 = r03, \
+ .radio_syn_pll_mmd1 = r04, \
+ .radio_syn_pll_loopfilter1 = r05, \
+ .radio_syn_pll_loopfilter2 = r06, \
+ .radio_syn_pll_loopfilter3 = r07, \
+ .radio_syn_pll_loopfilter4 = r08, \
+ .radio_syn_pll_loopfilter5 = r09, \
+ .radio_syn_reserved_addr27 = r10, \
+ .radio_syn_reserved_addr28 = r11, \
+ .radio_syn_reserved_addr29 = r12, \
+ .radio_syn_logen_vcobuf1 = r13, \
+ .radio_syn_logen_mixer2 = r14, \
+ .radio_syn_logen_buf3 = r15, \
+ .radio_syn_logen_buf4 = r16, \
+ .radio_rx0_lnaa_tune = r17, \
+ .radio_rx0_lnag_tune = r18, \
+ .radio_tx0_intpaa_boost_tune = r19, \
+ .radio_tx0_intpag_boost_tune = r20, \
+ .radio_tx0_pada_boost_tune = r21, \
+ .radio_tx0_padg_boost_tune = r22, \
+ .radio_tx0_pgaa_boost_tune = r23, \
+ .radio_tx0_pgag_boost_tune = r24, \
+ .radio_tx0_mixa_boost_tune = r25, \
+ .radio_tx0_mixg_boost_tune = r26, \
+ .radio_rx1_lnaa_tune = r27, \
+ .radio_rx1_lnag_tune = r28, \
+ .radio_tx1_intpaa_boost_tune = r29, \
+ .radio_tx1_intpag_boost_tune = r30, \
+ .radio_tx1_pada_boost_tune = r31, \
+ .radio_tx1_padg_boost_tune = r32, \
+ .radio_tx1_pgaa_boost_tune = r33, \
+ .radio_tx1_pgag_boost_tune = r34, \
+ .radio_tx1_mixa_boost_tune = r35, \
+ .radio_tx1_mixg_boost_tune = r36
+
+#define PHYREGS(r0, r1, r2, r3, r4, r5) \
+ .phy_regs.phy_bw1a = r0, \
+ .phy_regs.phy_bw2 = r1, \
+ .phy_regs.phy_bw3 = r2, \
+ .phy_regs.phy_bw4 = r3, \
+ .phy_regs.phy_bw5 = r4, \
+ .phy_regs.phy_bw6 = r5
+
+/* http://bcm-v4.sipsolutions.net/802.11/Radio/2056/ChannelTable */
static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] = {
+ { .freq = 4920,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xff, 0x00),
+ PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
+ },
+ { .freq = 4930,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xff, 0x00),
+ PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
+ },
+ { .freq = 4940,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xff, 0x00),
+ PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
+ },
+ { .freq = 4950,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xff, 0x00),
+ PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
+ },
+ { .freq = 4960,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xff, 0x00),
+ PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
+ },
+ { .freq = 4970,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xff, 0x00),
+ PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
+ },
+ { .freq = 4980,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xff, 0x00),
+ PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
+ },
+ { .freq = 4990,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xff, 0x00),
+ PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
+ },
+ { .freq = 5000,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xff, 0x00),
+ PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
+ },
+ { .freq = 5010,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xff, 0x00),
+ PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
+ },
+ { .freq = 5020,
+ RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xff, 0x00),
+ PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
+ },
+ { .freq = 5030,
+ RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xff, 0x00),
+ PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
+ },
+ { .freq = 5040,
+ RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xff, 0x00),
+ PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
+ },
+ { .freq = 5050,
+ RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xff, 0x00),
+ PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
+ },
+ { .freq = 5060,
+ RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xff, 0x00),
+ PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
+ },
+ { .freq = 5070,
+ RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xff, 0x00),
+ PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
+ },
+ { .freq = 5080,
+ RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xff, 0x00),
+ PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
+ },
+ { .freq = 5090,
+ RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xff, 0x00),
+ PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
+ },
+ { .freq = 5100,
+ RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xff, 0x00),
+ PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
+ },
+ { .freq = 5110,
+ RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xfc, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xfc, 0x00),
+ PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
+ },
+ { .freq = 5120,
+ RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xfc, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xfc, 0x00),
+ PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
+ },
+ { .freq = 5130,
+ RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xfc, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xfc, 0x00),
+ PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
+ },
+ { .freq = 5140,
+ RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xfc, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xfc, 0x00),
+ PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
+ },
+ { .freq = 5160,
+ RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xfc, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xfc, 0x00),
+ PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
+ },
+ { .freq = 5170,
+ RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xfc, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xfc, 0x00),
+ PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
+ },
+ { .freq = 5180,
+ RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xef, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xfc, 0x00, 0xef, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xfc, 0x00),
+ PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
+ },
+ { .freq = 5190,
+ RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xef, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xfc, 0x00, 0xef, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xfc, 0x00),
+ PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
+ },
+ { .freq = 5200,
+ RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xef, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xfc, 0x00, 0xef, 0x00, 0x06, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xfc, 0x00),
+ PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
+ },
+ { .freq = 5210,
+ RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xdf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xfc, 0x00, 0xdf, 0x00, 0x06, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xfc, 0x00),
+ PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
+ },
+ { .freq = 5220,
+ RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xdf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xfc, 0x00, 0xdf, 0x00, 0x06, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xfc, 0x00),
+ PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
+ },
+ { .freq = 5230,
+ RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xdf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xfc, 0x00, 0xdf, 0x00, 0x06, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xfc, 0x00),
+ PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
+ },
+ { .freq = 5240,
+ RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xcf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x06, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xfc, 0x00),
+ PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
+ },
+ { .freq = 5250,
+ RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xcf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x06, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xfc, 0x00),
+ PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
+ },
+ { .freq = 5260,
+ RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xcf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x06, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xfc, 0x00),
+ PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
+ },
+ { .freq = 5270,
+ RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
+ 0xff, 0xcf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x06, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xfc, 0x00),
+ PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
+ },
+ { .freq = 5280,
+ RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
+ 0xff, 0xbf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xfc, 0x00, 0xbf, 0x00, 0x06, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xfc, 0x00),
+ PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
+ },
+ { .freq = 5290,
+ RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
+ 0xff, 0xbf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xfc, 0x00, 0xbf, 0x00, 0x06, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xfc, 0x00),
+ PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
+ },
+ { .freq = 5300,
+ RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0xbf, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xfc, 0x00, 0xbf, 0x00, 0x05, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xfc, 0x00),
+ PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
+ },
+ { .freq = 5310,
+ RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0xbf, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xfa, 0x00, 0xbf, 0x00, 0x05, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xfa, 0x00),
+ PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
+ },
+ { .freq = 5320,
+ RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0xbf, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xfa, 0x00, 0xbf, 0x00, 0x05, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xfa, 0x00),
+ PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
+ },
+ { .freq = 5330,
+ RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0xaf, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xfa, 0x00, 0xaf, 0x00, 0x05, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xfa, 0x00),
+ PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
+ },
+ { .freq = 5340,
+ RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0xaf, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xfa, 0x00, 0xaf, 0x00, 0x05, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xfa, 0x00),
+ PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
+ },
+ { .freq = 5350,
+ RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0x9f, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x05, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xfa, 0x00),
+ PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
+ },
+ { .freq = 5360,
+ RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0x9f, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x05, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xfa, 0x00),
+ PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
+ },
+ { .freq = 5370,
+ RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0x9f, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x05, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xfa, 0x00),
+ PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
+ },
+ { .freq = 5380,
+ RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0x9f, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x05, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xfa, 0x00),
+ PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
+ },
+ { .freq = 5390,
+ RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0x8f, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xfa, 0x00, 0x8f, 0x00, 0x05, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xfa, 0x00),
+ PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
+ },
+ { .freq = 5400,
+ RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x8f, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
+ 0x00, 0xfa, 0x00, 0x8f, 0x00, 0x04, 0x00, 0x7f,
+ 0x00, 0x08, 0x00, 0xfa, 0x00),
+ PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
+ },
+ { .freq = 5410,
+ RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x8f, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
+ 0x00, 0xfa, 0x00, 0x8f, 0x00, 0x04, 0x00, 0x7f,
+ 0x00, 0x08, 0x00, 0xfa, 0x00),
+ PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
+ },
+ { .freq = 5420,
+ RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x8e, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
+ 0x00, 0xfa, 0x00, 0x8e, 0x00, 0x04, 0x00, 0x7f,
+ 0x00, 0x08, 0x00, 0xfa, 0x00),
+ PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
+ },
+ { .freq = 5430,
+ RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x8e, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
+ 0x00, 0xfa, 0x00, 0x8e, 0x00, 0x04, 0x00, 0x7f,
+ 0x00, 0x08, 0x00, 0xfa, 0x00),
+ PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
+ },
+ { .freq = 5440,
+ RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x7e, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
+ 0x00, 0xfa, 0x00, 0x7e, 0x00, 0x04, 0x00, 0x7f,
+ 0x00, 0x08, 0x00, 0xfa, 0x00),
+ PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
+ },
+ { .freq = 5450,
+ RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x7d, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
+ 0x00, 0xfa, 0x00, 0x7d, 0x00, 0x04, 0x00, 0x7f,
+ 0x00, 0x08, 0x00, 0xfa, 0x00),
+ PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
+ },
+ { .freq = 5460,
+ RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x6d, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
+ 0x00, 0xf8, 0x00, 0x6d, 0x00, 0x04, 0x00, 0x7f,
+ 0x00, 0x08, 0x00, 0xf8, 0x00),
+ PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
+ },
+ { .freq = 5470,
+ RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x6d, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
+ 0x00, 0xf8, 0x00, 0x6d, 0x00, 0x04, 0x00, 0x7f,
+ 0x00, 0x08, 0x00, 0xf8, 0x00),
+ PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
+ },
+ { .freq = 5480,
+ RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x5d, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
+ 0x00, 0xf8, 0x00, 0x5d, 0x00, 0x04, 0x00, 0x7f,
+ 0x00, 0x08, 0x00, 0xf8, 0x00),
+ PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
+ },
+ { .freq = 5490,
+ RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x5c, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
+ 0x00, 0xf8, 0x00, 0x5c, 0x00, 0x04, 0x00, 0x7f,
+ 0x00, 0x08, 0x00, 0xf8, 0x00),
+ PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
+ },
+ { .freq = 5500,
+ RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x5c, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
+ 0x00, 0xf8, 0x00, 0x5c, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x07, 0x00, 0xf8, 0x00),
+ PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
+ },
+ { .freq = 5510,
+ RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x4c, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
+ 0x00, 0xf8, 0x00, 0x4c, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x07, 0x00, 0xf8, 0x00),
+ PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
+ },
+ { .freq = 5520,
+ RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x4c, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
+ 0x00, 0xf8, 0x00, 0x4c, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x07, 0x00, 0xf8, 0x00),
+ PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
+ },
+ { .freq = 5530,
+ RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x3b, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
+ 0x00, 0xf8, 0x00, 0x3b, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x07, 0x00, 0xf8, 0x00),
+ PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
+ },
+ { .freq = 5540,
+ RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x3b, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
+ 0x00, 0xf8, 0x00, 0x3b, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x07, 0x00, 0xf8, 0x00),
+ PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
+ },
+ { .freq = 5550,
+ RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x3b, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
+ 0x00, 0xf8, 0x00, 0x3b, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x07, 0x00, 0xf8, 0x00),
+ PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
+ },
+ { .freq = 5560,
+ RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x2b, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
+ 0x00, 0xf8, 0x00, 0x2b, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x07, 0x00, 0xf8, 0x00),
+ PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
+ },
+ { .freq = 5570,
+ RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x2a, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
+ 0x00, 0xf8, 0x00, 0x2a, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x07, 0x00, 0xf8, 0x00),
+ PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
+ },
+ { .freq = 5580,
+ RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x1a, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
+ 0x00, 0xf8, 0x00, 0x1a, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x07, 0x00, 0xf8, 0x00),
+ PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
+ },
+ { .freq = 5590,
+ RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x1a, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
+ 0x00, 0xf8, 0x00, 0x1a, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x07, 0x00, 0xf8, 0x00),
+ PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
+ },
+ { .freq = 5600,
+ RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x1a, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
+ 0x00, 0xf8, 0x00, 0x1a, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x07, 0x00, 0xf8, 0x00),
+ PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
+ },
+ { .freq = 5610,
+ RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x19, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
+ 0x00, 0xf8, 0x00, 0x19, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x07, 0x00, 0xf8, 0x00),
+ PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
+ },
+ { .freq = 5620,
+ RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x19, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
+ 0x00, 0xf8, 0x00, 0x19, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x07, 0x00, 0xf8, 0x00),
+ PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
+ },
+ { .freq = 5630,
+ RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x09, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
+ 0x00, 0xf8, 0x00, 0x09, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x07, 0x00, 0xf8, 0x00),
+ PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
+ },
+ { .freq = 5640,
+ RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x09, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
+ 0x00, 0xf8, 0x00, 0x09, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x07, 0x00, 0xf8, 0x00),
+ PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
+ },
+ { .freq = 5650,
+ RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x08, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
+ 0x00, 0xf8, 0x00, 0x08, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x07, 0x00, 0xf8, 0x00),
+ PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
+ },
+ { .freq = 5660,
+ RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x08, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
+ 0x00, 0xf6, 0x00, 0x08, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x07, 0x00, 0xf6, 0x00),
+ PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
+ },
+ { .freq = 5670,
+ RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x08, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
+ 0x00, 0xf6, 0x00, 0x08, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x07, 0x00, 0xf6, 0x00),
+ PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
+ },
+ { .freq = 5680,
+ RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x08, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
+ 0x00, 0xf6, 0x00, 0x08, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x07, 0x00, 0xf6, 0x00),
+ PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
+ },
+ { .freq = 5690,
+ RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x07, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
+ 0x00, 0xf6, 0x00, 0x07, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x07, 0x00, 0xf6, 0x00),
+ PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
+ },
+ { .freq = 5700,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x07, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf6, 0x00, 0x07, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf6, 0x00),
+ PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
+ },
+ { .freq = 5710,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x07, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x07, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
+ },
+ { .freq = 5720,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x07, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x07, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
+ },
+ { .freq = 5725,
+ RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x06, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x06, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
+ },
+ { .freq = 5730,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x06, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x06, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
+ },
+ { .freq = 5735,
+ RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x06, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x06, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
+ },
+ { .freq = 5740,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x06, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x06, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
+ },
+ { .freq = 5745,
+ RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x06, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x06, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
+ },
+ { .freq = 5750,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x06, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x06, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
+ },
+ { .freq = 5755,
+ RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x05, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x05, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
+ },
+ { .freq = 5760,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x05, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x05, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
+ },
+ { .freq = 5765,
+ RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x05, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x05, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
+ },
+ { .freq = 5770,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x05, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x05, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
+ },
+ { .freq = 5775,
+ RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x05, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x05, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
+ },
+ { .freq = 5780,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x05, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x05, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
+ },
+ { .freq = 5785,
+ RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
+ 0x40, 0x04, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x04, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
+ },
+ { .freq = 5790,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
+ 0x40, 0x04, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x04, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
+ },
+ { .freq = 5795,
+ RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
+ 0x40, 0x04, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x04, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
+ },
+ { .freq = 5800,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x04, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x04, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
+ },
+ { .freq = 5805,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x04, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x04, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
+ },
+ { .freq = 5810,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x04, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x04, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
+ },
+ { .freq = 5815,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x04, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x04, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
+ },
+ { .freq = 5820,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x03, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x03, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
+ },
+ { .freq = 5825,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x03, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x03, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
+ },
+ { .freq = 5830,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x03, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x03, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
+ },
+ { .freq = 5840,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x03, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x03, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
+ },
+ { .freq = 5850,
+ RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x03, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf4, 0x00, 0x03, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf4, 0x00),
+ PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
+ },
+ { .freq = 5860,
+ RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x03, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf2, 0x00, 0x03, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf2, 0x00),
+ PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
+ },
+ { .freq = 5870,
+ RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x02, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf2, 0x00, 0x02, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf2, 0x00),
+ PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
+ },
+ { .freq = 5880,
+ RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x02, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf2, 0x00, 0x02, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf2, 0x00),
+ PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
+ },
+ { .freq = 5890,
+ RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x02, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
+ 0x00, 0xf2, 0x00, 0x02, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0x06, 0x00, 0xf2, 0x00),
+ PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
+ },
+ { .freq = 5900,
+ RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x87, 0x03, 0x00,
+ 0x00, 0x02, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x05,
+ 0x00, 0xf2, 0x00, 0x02, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0x05, 0x00, 0xf2, 0x00),
+ PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
+ },
+ { .freq = 5910,
+ RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x87, 0x03, 0x00,
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x05,
+ 0x00, 0xf2, 0x00, 0x01, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0x05, 0x00, 0xf2, 0x00),
+ PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
+ },
+ { .freq = 2412,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xff, 0x00, 0x05, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0f, 0x00, 0xff, 0x00, 0x05, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0f),
+ PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
+ },
+ { .freq = 2417,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xff, 0x00, 0x05, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0f, 0x00, 0xff, 0x00, 0x05, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0f),
+ PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
+ },
+ { .freq = 2422,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xff, 0x00, 0x05, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0f, 0x00, 0xff, 0x00, 0x05, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0f),
+ PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
+ },
+ { .freq = 2427,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xfd, 0x00, 0x05, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0f, 0x00, 0xfd, 0x00, 0x05, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0f),
+ PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
+ },
+ { .freq = 2432,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xfb, 0x00, 0x05, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0f, 0x00, 0xfb, 0x00, 0x05, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0f),
+ PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
+ },
+ { .freq = 2437,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xfa, 0x00, 0x05, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0f, 0x00, 0xfa, 0x00, 0x05, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0f),
+ PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
+ },
+ { .freq = 2442,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xf8, 0x00, 0x05, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0f, 0x00, 0xf8, 0x00, 0x05, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0f),
+ PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
+ },
+ { .freq = 2447,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xf7, 0x00, 0x05, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0f, 0x00, 0xf7, 0x00, 0x05, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0f),
+ PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
+ },
+ { .freq = 2452,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xf6, 0x00, 0x05, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0f, 0x00, 0xf6, 0x00, 0x05, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0f),
+ PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
+ },
+ { .freq = 2457,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xf5, 0x00, 0x05, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0d, 0x00, 0xf5, 0x00, 0x05, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0d),
+ PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
+ },
+ { .freq = 2462,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xf4, 0x00, 0x05, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0d, 0x00, 0xf4, 0x00, 0x05, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0d),
+ PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
+ },
+ { .freq = 2467,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xf3, 0x00, 0x05, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0d, 0x00, 0xf3, 0x00, 0x05, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0d),
+ PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
+ },
+ { .freq = 2472,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xf2, 0x00, 0x05, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0d, 0x00, 0xf2, 0x00, 0x05, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0d),
+ PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
+ },
+ { .freq = 2484,
+ RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xf0, 0x00, 0x05, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0d, 0x00, 0xf0, 0x00, 0x05, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0d),
+ PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
+ },
+};
+
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev4[] = {
+ { .freq = 4920,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xff, 0x00),
+ PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
+ },
+ { .freq = 4930,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xff, 0x00),
+ PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
+ },
+ { .freq = 4940,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xff, 0x00),
+ PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
+ },
+ { .freq = 4950,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xff, 0x00),
+ PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
+ },
+ { .freq = 4960,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xff, 0x00),
+ PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
+ },
+ { .freq = 4970,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xff, 0x00),
+ PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
+ },
+ { .freq = 4980,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xff, 0x00),
+ PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
+ },
+ { .freq = 4990,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xff, 0x00),
+ PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
+ },
+ { .freq = 5000,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xff, 0x00),
+ PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
+ },
+ { .freq = 5010,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xff, 0x00),
+ PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
+ },
+ { .freq = 5020,
+ RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xff, 0x00),
+ PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
+ },
+ { .freq = 5030,
+ RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xff, 0x00),
+ PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
+ },
+ { .freq = 5040,
+ RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xff, 0x00),
+ PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
+ },
+ { .freq = 5050,
+ RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xff, 0x00),
+ PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
+ },
+ { .freq = 5060,
+ RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xff, 0x00),
+ PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
+ },
+ { .freq = 5070,
+ RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xff, 0x00),
+ PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
+ },
+ { .freq = 5080,
+ RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xff, 0x00),
+ PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
+ },
+ { .freq = 5090,
+ RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xff, 0x00),
+ PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
+ },
+ { .freq = 5100,
+ RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfe, 0x00, 0xff, 0x00, 0x0c, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfe, 0x00),
+ PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
+ },
+ { .freq = 5110,
+ RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfe, 0x00, 0xff, 0x00, 0x0c, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfe, 0x00),
+ PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
+ },
+ { .freq = 5120,
+ RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfe, 0x00, 0xff, 0x00, 0x0c, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfe, 0x00),
+ PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
+ },
+ { .freq = 5130,
+ RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfe, 0x00, 0xff, 0x00, 0x0c, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfe, 0x00),
+ PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
+ },
+ { .freq = 5140,
+ RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfe, 0x00, 0xff, 0x00, 0x0c, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfe, 0x00),
+ PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
+ },
+ { .freq = 5160,
+ RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfe, 0x00, 0xff, 0x00, 0x0c, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfe, 0x00),
+ PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
+ },
+ { .freq = 5170,
+ RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfe, 0x00, 0xff, 0x00, 0x0c, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfe, 0x00),
+ PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
+ },
+ { .freq = 5180,
+ RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xef, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfe, 0x00, 0xef, 0x00, 0x0c, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfe, 0x00),
+ PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
+ },
+ { .freq = 5190,
+ RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xef, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfe, 0x00, 0xef, 0x00, 0x0c, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfe, 0x00),
+ PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
+ },
+ { .freq = 5200,
+ RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xef, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfc, 0x00, 0xef, 0x00, 0x0a, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfc, 0x00),
+ PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
+ },
+ { .freq = 5210,
+ RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xdf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfc, 0x00, 0xdf, 0x00, 0x0a, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfc, 0x00),
+ PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
+ },
+ { .freq = 5220,
+ RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xdf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfc, 0x00, 0xdf, 0x00, 0x0a, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfc, 0x00),
+ PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
+ },
+ { .freq = 5230,
+ RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xdf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfc, 0x00, 0xdf, 0x00, 0x0a, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfc, 0x00),
+ PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
+ },
+ { .freq = 5240,
+ RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xcf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x0a, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfc, 0x00),
+ PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
+ },
+ { .freq = 5250,
+ RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xcf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x0a, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfc, 0x00),
+ PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
+ },
+ { .freq = 5260,
+ RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xcf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x0a, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfc, 0x00),
+ PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
+ },
+ { .freq = 5270,
+ RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
+ 0xff, 0xcf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x0a, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfc, 0x00),
+ PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
+ },
+ { .freq = 5280,
+ RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
+ 0xff, 0xbf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfc, 0x00, 0xbf, 0x00, 0x0a, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfc, 0x00),
+ PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
+ },
+ { .freq = 5290,
+ RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
+ 0xff, 0xbf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfc, 0x00, 0xbf, 0x00, 0x0a, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfc, 0x00),
+ PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
+ },
+ { .freq = 5300,
+ RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0xbf, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfa, 0x00, 0xbf, 0x00, 0x08, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfa, 0x00),
+ PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
+ },
+ { .freq = 5310,
+ RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0xbf, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfa, 0x00, 0xbf, 0x00, 0x08, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfa, 0x00),
+ PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
+ },
+ { .freq = 5320,
+ RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0xbf, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfa, 0x00, 0xbf, 0x00, 0x08, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfa, 0x00),
+ PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
+ },
+ { .freq = 5330,
+ RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0xaf, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfa, 0x00, 0xaf, 0x00, 0x08, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfa, 0x00),
+ PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
+ },
+ { .freq = 5340,
+ RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0xaf, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfa, 0x00, 0xaf, 0x00, 0x08, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfa, 0x00),
+ PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
+ },
+ { .freq = 5350,
+ RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0x9f, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x08, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfa, 0x00),
+ PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
+ },
+ { .freq = 5360,
+ RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0x9f, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x08, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfa, 0x00),
+ PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
+ },
+ { .freq = 5370,
+ RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0x9f, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x08, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfa, 0x00),
+ PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
+ },
+ { .freq = 5380,
+ RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0x9f, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x08, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfa, 0x00),
+ PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
+ },
+ { .freq = 5390,
+ RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0x8f, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xfa, 0x00, 0x8f, 0x00, 0x08, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xfa, 0x00),
+ PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
+ },
+ { .freq = 5400,
+ RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x8f, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xf8, 0x00, 0x8f, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xf8, 0x00),
+ PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
+ },
+ { .freq = 5410,
+ RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x8f, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xf8, 0x00, 0x8f, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xf8, 0x00),
+ PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
+ },
+ { .freq = 5420,
+ RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x8e, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xf8, 0x00, 0x8e, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xf8, 0x00),
+ PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
+ },
+ { .freq = 5430,
+ RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x8e, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xf8, 0x00, 0x8e, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xf8, 0x00),
+ PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
+ },
+ { .freq = 5440,
+ RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x7e, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xf8, 0x00, 0x7e, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xf8, 0x00),
+ PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
+ },
+ { .freq = 5450,
+ RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x7d, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xf8, 0x00, 0x7d, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xf8, 0x00),
+ PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
+ },
+ { .freq = 5460,
+ RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x6d, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xf8, 0x00, 0x6d, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xf8, 0x00),
+ PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
+ },
+ { .freq = 5470,
+ RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x6d, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xf8, 0x00, 0x6d, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xf8, 0x00),
+ PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
+ },
+ { .freq = 5480,
+ RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x5d, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xf8, 0x00, 0x5d, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xf8, 0x00),
+ PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
+ },
+ { .freq = 5490,
+ RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x5c, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
+ 0x00, 0xf8, 0x00, 0x5c, 0x00, 0x07, 0x00, 0x7f,
+ 0x00, 0x0f, 0x00, 0xf8, 0x00),
+ PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
+ },
+ { .freq = 5500,
+ RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x5c, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
+ 0x00, 0xf6, 0x00, 0x5c, 0x00, 0x06, 0x00, 0x7f,
+ 0x00, 0x0d, 0x00, 0xf6, 0x00),
+ PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
+ },
+ { .freq = 5510,
+ RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x4c, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
+ 0x00, 0xf6, 0x00, 0x4c, 0x00, 0x06, 0x00, 0x7f,
+ 0x00, 0x0d, 0x00, 0xf6, 0x00),
+ PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
+ },
+ { .freq = 5520,
+ RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x4c, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
+ 0x00, 0xf6, 0x00, 0x4c, 0x00, 0x06, 0x00, 0x7f,
+ 0x00, 0x0d, 0x00, 0xf6, 0x00),
+ PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
+ },
+ { .freq = 5530,
+ RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x3b, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
+ 0x00, 0xf6, 0x00, 0x3b, 0x00, 0x06, 0x00, 0x7f,
+ 0x00, 0x0d, 0x00, 0xf6, 0x00),
+ PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
+ },
+ { .freq = 5540,
+ RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x3b, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
+ 0x00, 0xf6, 0x00, 0x3b, 0x00, 0x06, 0x00, 0x7f,
+ 0x00, 0x0d, 0x00, 0xf6, 0x00),
+ PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
+ },
+ { .freq = 5550,
+ RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x3b, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
+ 0x00, 0xf6, 0x00, 0x3b, 0x00, 0x06, 0x00, 0x7f,
+ 0x00, 0x0d, 0x00, 0xf6, 0x00),
+ PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
+ },
+ { .freq = 5560,
+ RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x2b, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
+ 0x00, 0xf6, 0x00, 0x2b, 0x00, 0x06, 0x00, 0x7f,
+ 0x00, 0x0d, 0x00, 0xf6, 0x00),
+ PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
+ },
+ { .freq = 5570,
+ RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x2a, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
+ 0x00, 0xf6, 0x00, 0x2a, 0x00, 0x06, 0x00, 0x7f,
+ 0x00, 0x0d, 0x00, 0xf6, 0x00),
+ PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
+ },
+ { .freq = 5580,
+ RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x1a, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
+ 0x00, 0xf6, 0x00, 0x1a, 0x00, 0x06, 0x00, 0x7f,
+ 0x00, 0x0d, 0x00, 0xf6, 0x00),
+ PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
+ },
+ { .freq = 5590,
+ RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x1a, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
+ 0x00, 0xf6, 0x00, 0x1a, 0x00, 0x06, 0x00, 0x7f,
+ 0x00, 0x0d, 0x00, 0xf6, 0x00),
+ PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
+ },
+ { .freq = 5600,
+ RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x1a, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xf4, 0x00, 0x1a, 0x00, 0x04, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xf4, 0x00),
+ PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
+ },
+ { .freq = 5610,
+ RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x19, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xf4, 0x00, 0x19, 0x00, 0x04, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xf4, 0x00),
+ PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
+ },
+ { .freq = 5620,
+ RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x19, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xf4, 0x00, 0x19, 0x00, 0x04, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xf4, 0x00),
+ PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
+ },
+ { .freq = 5630,
+ RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x09, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xf4, 0x00, 0x09, 0x00, 0x04, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xf4, 0x00),
+ PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
+ },
+ { .freq = 5640,
+ RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x09, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xf4, 0x00, 0x09, 0x00, 0x04, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xf4, 0x00),
+ PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
+ },
+ { .freq = 5650,
+ RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x08, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xf4, 0x00, 0x08, 0x00, 0x04, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xf4, 0x00),
+ PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
+ },
+ { .freq = 5660,
+ RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x08, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xf4, 0x00, 0x08, 0x00, 0x04, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xf4, 0x00),
+ PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
+ },
+ { .freq = 5670,
+ RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x08, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xf4, 0x00, 0x08, 0x00, 0x04, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xf4, 0x00),
+ PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
+ },
+ { .freq = 5680,
+ RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x08, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xf4, 0x00, 0x08, 0x00, 0x04, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xf4, 0x00),
+ PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
+ },
+ { .freq = 5690,
+ RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x07, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
+ 0x00, 0xf4, 0x00, 0x07, 0x00, 0x04, 0x00, 0x7f,
+ 0x00, 0x0b, 0x00, 0xf4, 0x00),
+ PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
+ },
+ { .freq = 5700,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x07, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xf2, 0x00, 0x07, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xf2, 0x00),
+ PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
+ },
+ { .freq = 5710,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x07, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xf2, 0x00, 0x07, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xf2, 0x00),
+ PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
+ },
+ { .freq = 5720,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x07, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xf2, 0x00, 0x07, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xf2, 0x00),
+ PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
+ },
+ { .freq = 5725,
+ RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x06, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xf2, 0x00, 0x06, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xf2, 0x00),
+ PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
+ },
+ { .freq = 5730,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x06, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xf2, 0x00, 0x06, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xf2, 0x00),
+ PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
+ },
+ { .freq = 5735,
+ RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x06, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xf2, 0x00, 0x06, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xf2, 0x00),
+ PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
+ },
+ { .freq = 5740,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x06, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xf2, 0x00, 0x06, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xf2, 0x00),
+ PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
+ },
+ { .freq = 5745,
+ RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x06, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xf2, 0x00, 0x06, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xf2, 0x00),
+ PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
+ },
+ { .freq = 5750,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x06, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xf2, 0x00, 0x06, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xf2, 0x00),
+ PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
+ },
+ { .freq = 5755,
+ RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x05, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xf2, 0x00, 0x05, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xf2, 0x00),
+ PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
+ },
+ { .freq = 5760,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x05, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xf2, 0x00, 0x05, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xf2, 0x00),
+ PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
+ },
+ { .freq = 5765,
+ RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x05, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xf2, 0x00, 0x05, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xf2, 0x00),
+ PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
+ },
+ { .freq = 5770,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x05, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xf2, 0x00, 0x05, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xf2, 0x00),
+ PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
+ },
+ { .freq = 5775,
+ RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x05, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xf2, 0x00, 0x05, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xf2, 0x00),
+ PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
+ },
+ { .freq = 5780,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x05, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xf2, 0x00, 0x05, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xf2, 0x00),
+ PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
+ },
+ { .freq = 5785,
+ RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
+ 0x40, 0x04, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xf2, 0x00, 0x04, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xf2, 0x00),
+ PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
+ },
+ { .freq = 5790,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
+ 0x40, 0x04, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xf2, 0x00, 0x04, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xf2, 0x00),
+ PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
+ },
+ { .freq = 5795,
+ RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
+ 0x40, 0x04, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
+ 0x00, 0xf2, 0x00, 0x04, 0x00, 0x03, 0x00, 0x7f,
+ 0x00, 0x0a, 0x00, 0xf2, 0x00),
+ PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
+ },
+ { .freq = 5800,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x04, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xf0, 0x00, 0x04, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xf0, 0x00),
+ PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
+ },
+ { .freq = 5805,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x04, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xf0, 0x00, 0x04, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xf0, 0x00),
+ PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
+ },
+ { .freq = 5810,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x04, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xf0, 0x00, 0x04, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xf0, 0x00),
+ PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
+ },
+ { .freq = 5815,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x04, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xf0, 0x00, 0x04, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xf0, 0x00),
+ PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
+ },
+ { .freq = 5820,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x03, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xf0, 0x00, 0x03, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xf0, 0x00),
+ PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
+ },
+ { .freq = 5825,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x03, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xf0, 0x00, 0x03, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xf0, 0x00),
+ PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
+ },
+ { .freq = 5830,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x03, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xf0, 0x00, 0x03, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xf0, 0x00),
+ PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
+ },
+ { .freq = 5840,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x03, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xf0, 0x00, 0x03, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xf0, 0x00),
+ PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
+ },
+ { .freq = 5850,
+ RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x03, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xf0, 0x00, 0x03, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xf0, 0x00),
+ PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
+ },
+ { .freq = 5860,
+ RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x03, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xf0, 0x00, 0x03, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xf0, 0x00),
+ PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
+ },
+ { .freq = 5870,
+ RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x02, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xf0, 0x00, 0x02, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xf0, 0x00),
+ PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
+ },
+ { .freq = 5880,
+ RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x02, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xf0, 0x00, 0x02, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xf0, 0x00),
+ PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
+ },
+ { .freq = 5890,
+ RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x02, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
+ 0x00, 0xf0, 0x00, 0x02, 0x00, 0x02, 0x00, 0x7f,
+ 0x00, 0x09, 0x00, 0xf0, 0x00),
+ PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
+ },
+ { .freq = 5900,
+ RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x87, 0x03, 0x00,
+ 0x00, 0x02, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x07,
+ 0x00, 0xf0, 0x00, 0x02, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0x07, 0x00, 0xf0, 0x00),
+ PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
+ },
+ { .freq = 5910,
+ RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x87, 0x03, 0x00,
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x07,
+ 0x00, 0xf0, 0x00, 0x01, 0x00, 0x00, 0x00, 0x7f,
+ 0x00, 0x07, 0x00, 0xf0, 0x00),
+ PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
+ },
+ { .freq = 2412,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xff, 0x00, 0x04, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0e, 0x00, 0xff, 0x00, 0x04, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0e),
+ PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
+ },
+ { .freq = 2417,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xff, 0x00, 0x04, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0e, 0x00, 0xff, 0x00, 0x04, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0e),
+ PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
+ },
+ { .freq = 2422,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xff, 0x00, 0x04, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0e, 0x00, 0xff, 0x00, 0x04, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0e),
+ PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
+ },
+ { .freq = 2427,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xfd, 0x00, 0x04, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0e, 0x00, 0xfd, 0x00, 0x04, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0e),
+ PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
+ },
+ { .freq = 2432,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xfb, 0x00, 0x04, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0e, 0x00, 0xfb, 0x00, 0x04, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0e),
+ PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
+ },
+ { .freq = 2437,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xfa, 0x00, 0x04, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0e, 0x00, 0xfa, 0x00, 0x04, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0e),
+ PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
+ },
+ { .freq = 2442,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xf8, 0x00, 0x04, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0e, 0x00, 0xf8, 0x00, 0x04, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0e),
+ PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
+ },
+ { .freq = 2447,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xf7, 0x00, 0x04, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0e, 0x00, 0xf7, 0x00, 0x04, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0e),
+ PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
+ },
+ { .freq = 2452,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xf6, 0x00, 0x04, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0e, 0x00, 0xf6, 0x00, 0x04, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0e),
+ PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
+ },
+ { .freq = 2457,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xf5, 0x00, 0x04, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0e, 0x00, 0xf5, 0x00, 0x04, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0e),
+ PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
+ },
+ { .freq = 2462,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xf4, 0x00, 0x04, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0e, 0x00, 0xf4, 0x00, 0x04, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0e),
+ PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
+ },
+ { .freq = 2467,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xf3, 0x00, 0x04, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0e, 0x00, 0xf3, 0x00, 0x04, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0e),
+ PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
+ },
+ { .freq = 2472,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xf2, 0x00, 0x04, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0e, 0x00, 0xf2, 0x00, 0x04, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0e),
+ PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
+ },
+ { .freq = 2484,
+ RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0xf0, 0x00, 0x04, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0e, 0x00, 0xf0, 0x00, 0x04, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0e),
+ PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
+ },
+};
+
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev5[] = {
+ { .freq = 4920,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0f,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
+ },
+ { .freq = 4930,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0e,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
+ 0x00, 0x0e, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
+ },
+ { .freq = 4940,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0e,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
+ 0x00, 0x0e, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
+ },
+ { .freq = 4950,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0e,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
+ 0x00, 0x0e, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
+ },
+ { .freq = 4960,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0e,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
+ 0x00, 0x0e, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
+ },
+ { .freq = 4970,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
+ },
+ { .freq = 4980,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
+ },
+ { .freq = 4990,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
+ },
+ { .freq = 5000,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
+ },
+ { .freq = 5010,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
+ },
+ { .freq = 5020,
+ RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0d,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x09, 0x00, 0x70,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
+ },
+ { .freq = 5030,
+ RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x09, 0x00, 0x70,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
+ },
+ { .freq = 5040,
+ RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
+ 0x00, 0x9f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x70,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
+ },
+ { .freq = 5050,
+ RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
+ 0x00, 0x9f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x70,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
+ },
+ { .freq = 5060,
+ RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfd, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
+ 0x00, 0x9f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x70,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
+ },
+ { .freq = 5070,
+ RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfd, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
+ 0x00, 0x9f, 0x00, 0xfd, 0x00, 0x08, 0x00, 0x70,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
+ },
+ { .freq = 5080,
+ RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
+ 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
+ },
+ { .freq = 5090,
+ RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
+ 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
+ },
+ { .freq = 5100,
+ RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
+ 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
+ },
+ { .freq = 5110,
+ RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
+ 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
+ },
+ { .freq = 5120,
+ RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
+ 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
+ },
+ { .freq = 5130,
+ RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfb, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0a,
+ 0x00, 0x9f, 0x00, 0xfb, 0x00, 0x08, 0x00, 0x70,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
+ },
+ { .freq = 5140,
+ RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfb, 0x00, 0x07, 0x00, 0x70, 0x00, 0x0a,
+ 0x00, 0x9f, 0x00, 0xfb, 0x00, 0x07, 0x00, 0x70,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
+ },
+ { .freq = 5160,
+ RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfb, 0x00, 0x07, 0x00, 0x70, 0x00, 0x09,
+ 0x00, 0x9e, 0x00, 0xfb, 0x00, 0x07, 0x00, 0x70,
+ 0x00, 0x09, 0x00, 0x6e, 0x00),
+ PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
+ },
+ { .freq = 5170,
+ RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfb, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
+ 0x00, 0x9e, 0x00, 0xfb, 0x00, 0x06, 0x00, 0x70,
+ 0x00, 0x09, 0x00, 0x6e, 0x00),
+ PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
+ },
+ { .freq = 5180,
+ RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
+ 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
+ 0x00, 0x09, 0x00, 0x6e, 0x00),
+ PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
+ },
+ { .freq = 5190,
+ RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
+ 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
+ 0x00, 0x09, 0x00, 0x6e, 0x00),
+ PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
+ },
+ { .freq = 5200,
+ RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
+ 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
+ 0x00, 0x09, 0x00, 0x6e, 0x00),
+ PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
+ },
+ { .freq = 5210,
+ RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
+ 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
+ 0x00, 0x09, 0x00, 0x6e, 0x00),
+ PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
+ },
+ { .freq = 5220,
+ RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
+ 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
+ 0x00, 0x09, 0x00, 0x6e, 0x00),
+ PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
+ },
+ { .freq = 5230,
+ RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xea, 0x00, 0x06, 0x00, 0x70, 0x00, 0x08,
+ 0x00, 0x9e, 0x00, 0xea, 0x00, 0x06, 0x00, 0x70,
+ 0x00, 0x08, 0x00, 0x6e, 0x00),
+ PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
+ },
+ { .freq = 5240,
+ RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xe9, 0x00, 0x05, 0x00, 0x70, 0x00, 0x08,
+ 0x00, 0x9d, 0x00, 0xe9, 0x00, 0x05, 0x00, 0x70,
+ 0x00, 0x08, 0x00, 0x6d, 0x00),
+ PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
+ },
+ { .freq = 5250,
+ RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xe9, 0x00, 0x05, 0x00, 0x70, 0x00, 0x08,
+ 0x00, 0x9d, 0x00, 0xe9, 0x00, 0x05, 0x00, 0x70,
+ 0x00, 0x08, 0x00, 0x6d, 0x00),
+ PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
+ },
+ { .freq = 5260,
+ RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xd9, 0x00, 0x05, 0x00, 0x70, 0x00, 0x08,
+ 0x00, 0x9d, 0x00, 0xd9, 0x00, 0x05, 0x00, 0x70,
+ 0x00, 0x08, 0x00, 0x6d, 0x00),
+ PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
+ },
+ { .freq = 5270,
+ RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
+ 0xff, 0xd8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
+ 0x00, 0x9c, 0x00, 0xd8, 0x00, 0x04, 0x00, 0x70,
+ 0x00, 0x07, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
+ },
+ { .freq = 5280,
+ RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
+ 0xff, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
+ 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
+ 0x00, 0x07, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
+ },
+ { .freq = 5290,
+ RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
+ 0xff, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
+ 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
+ 0x00, 0x07, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
+ },
+ { .freq = 5300,
+ RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
+ 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
+ 0x00, 0x07, 0x00, 0x6c, 0x00),
+ PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
+ },
+ { .freq = 5310,
+ RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
+ 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
+ 0x00, 0x07, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
+ },
+ { .freq = 5320,
+ RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0xb8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
+ 0x00, 0x9c, 0x00, 0xb8, 0x00, 0x04, 0x00, 0x70,
+ 0x00, 0x07, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
+ },
+ { .freq = 5330,
+ RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0xb7, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
+ 0x00, 0x9b, 0x00, 0xb7, 0x00, 0x04, 0x00, 0x70,
+ 0x00, 0x07, 0x00, 0x6b, 0x00),
+ PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
+ },
+ { .freq = 5340,
+ RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0xb7, 0x00, 0x03, 0x00, 0x70, 0x00, 0x07,
+ 0x00, 0x9b, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x70,
+ 0x00, 0x07, 0x00, 0x6b, 0x00),
+ PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
+ },
+ { .freq = 5350,
+ RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0xa7, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
+ 0x00, 0x9b, 0x00, 0xa7, 0x00, 0x03, 0x00, 0x70,
+ 0x00, 0x06, 0x00, 0x6b, 0x00),
+ PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
+ },
+ { .freq = 5360,
+ RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0xa6, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
+ 0x00, 0x9b, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x70,
+ 0x00, 0x06, 0x00, 0x6b, 0x00),
+ PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
+ },
+ { .freq = 5370,
+ RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0xa6, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
+ 0x00, 0x9b, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x70,
+ 0x00, 0x06, 0x00, 0x5b, 0x00),
+ PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
+ },
+ { .freq = 5380,
+ RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0x96, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
+ 0x00, 0x9a, 0x00, 0x96, 0x00, 0x03, 0x00, 0x70,
+ 0x00, 0x06, 0x00, 0x5a, 0x00),
+ PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
+ },
+ { .freq = 5390,
+ RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
+ 0xff, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
+ 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
+ 0x00, 0x06, 0x00, 0x5a, 0x00),
+ PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
+ },
+ { .freq = 5400,
+ RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
+ 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
+ 0x00, 0x06, 0x00, 0x5a, 0x00),
+ PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
+ },
+ { .freq = 5410,
+ RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x05,
+ 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
+ 0x00, 0x05, 0x00, 0x5a, 0x00),
+ PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
+ },
+ { .freq = 5420,
+ RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x05,
+ 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
+ 0x00, 0x05, 0x00, 0x5a, 0x00),
+ PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
+ },
+ { .freq = 5430,
+ RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x85, 0x00, 0x02, 0x00, 0x70, 0x00, 0x05,
+ 0x00, 0x99, 0x00, 0x85, 0x00, 0x02, 0x00, 0x70,
+ 0x00, 0x05, 0x00, 0x59, 0x00),
+ PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
+ },
+ { .freq = 5440,
+ RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x84, 0x00, 0x02, 0x00, 0x70, 0x00, 0x05,
+ 0x00, 0x99, 0x00, 0x84, 0x00, 0x02, 0x00, 0x70,
+ 0x00, 0x05, 0x00, 0x59, 0x00),
+ PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
+ },
+ { .freq = 5450,
+ RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x84, 0x00, 0x02, 0x00, 0x70, 0x00, 0x05,
+ 0x00, 0x99, 0x00, 0x84, 0x00, 0x02, 0x00, 0x70,
+ 0x00, 0x05, 0x00, 0x59, 0x00),
+ PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
+ },
+ { .freq = 5460,
+ RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x84, 0x00, 0x02, 0x00, 0x70, 0x00, 0x04,
+ 0x00, 0x99, 0x00, 0x84, 0x00, 0x02, 0x00, 0x70,
+ 0x00, 0x04, 0x00, 0x69, 0x00),
+ PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
+ },
+ { .freq = 5470,
+ RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x74, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
+ 0x00, 0x99, 0x00, 0x74, 0x00, 0x01, 0x00, 0x70,
+ 0x00, 0x04, 0x00, 0x69, 0x00),
+ PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
+ },
+ { .freq = 5480,
+ RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
+ 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
+ 0x00, 0x04, 0x00, 0x68, 0x00),
+ PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
+ },
+ { .freq = 5490,
+ RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
+ 0xc8, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
+ 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
+ 0x00, 0x04, 0x00, 0x68, 0x00),
+ PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
+ },
+ { .freq = 5500,
+ RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
+ 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
+ 0x00, 0x04, 0x00, 0x78, 0x00),
+ PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
+ },
+ { .freq = 5510,
+ RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
+ 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
+ 0x00, 0x04, 0x00, 0x78, 0x00),
+ PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
+ },
+ { .freq = 5520,
+ RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
+ 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
+ 0x00, 0x04, 0x00, 0x78, 0x00),
+ PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
+ },
+ { .freq = 5530,
+ RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x63, 0x00, 0x01, 0x00, 0x70, 0x00, 0x03,
+ 0x00, 0x98, 0x00, 0x63, 0x00, 0x01, 0x00, 0x70,
+ 0x00, 0x03, 0x00, 0x78, 0x00),
+ PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
+ },
+ { .freq = 5540,
+ RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x62, 0x00, 0x00, 0x00, 0x70, 0x00, 0x03,
+ 0x00, 0x97, 0x00, 0x62, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x03, 0x00, 0x77, 0x00),
+ PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
+ },
+ { .freq = 5550,
+ RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x62, 0x00, 0x00, 0x00, 0x70, 0x00, 0x03,
+ 0x00, 0x97, 0x00, 0x62, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x03, 0x00, 0x77, 0x00),
+ PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
+ },
+ { .freq = 5560,
+ RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x62, 0x00, 0x00, 0x00, 0x70, 0x00, 0x03,
+ 0x00, 0x97, 0x00, 0x62, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x03, 0x00, 0x77, 0x00),
+ PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
+ },
+ { .freq = 5570,
+ RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x52, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
+ 0x00, 0x96, 0x00, 0x52, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x02, 0x00, 0x76, 0x00),
+ PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
+ },
+ { .freq = 5580,
+ RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x52, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
+ 0x00, 0x96, 0x00, 0x52, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x02, 0x00, 0x76, 0x00),
+ PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
+ },
+ { .freq = 5590,
+ RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
+ 0x84, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
+ 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x02, 0x00, 0x76, 0x00),
+ PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
+ },
+ { .freq = 5600,
+ RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
+ 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x02, 0x00, 0x76, 0x00),
+ PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
+ },
+ { .freq = 5610,
+ RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
+ 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x02, 0x00, 0x76, 0x00),
+ PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
+ },
+ { .freq = 5620,
+ RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
+ 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x02, 0x00, 0x76, 0x00),
+ PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
+ },
+ { .freq = 5630,
+ RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
+ 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x02, 0x00, 0x76, 0x00),
+ PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
+ },
+ { .freq = 5640,
+ RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
+ 0x00, 0x95, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x02, 0x00, 0x75, 0x00),
+ PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
+ },
+ { .freq = 5650,
+ RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x50, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
+ 0x00, 0x95, 0x00, 0x50, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x01, 0x00, 0x75, 0x00),
+ PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
+ },
+ { .freq = 5660,
+ RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x50, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
+ 0x00, 0x95, 0x00, 0x50, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x01, 0x00, 0x75, 0x00),
+ PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
+ },
+ { .freq = 5670,
+ RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
+ 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x01, 0x00, 0x74, 0x00),
+ PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
+ },
+ { .freq = 5680,
+ RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
+ 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x01, 0x00, 0x74, 0x00),
+ PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
+ },
+ { .freq = 5690,
+ RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
+ 0x70, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
+ 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x01, 0x00, 0x74, 0x00),
+ PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
+ },
+ { .freq = 5700,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
+ 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x01, 0x00, 0x74, 0x00),
+ PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
+ },
+ { .freq = 5710,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
+ 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x01, 0x00, 0x74, 0x00),
+ PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
+ },
+ { .freq = 5720,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
+ 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x01, 0x00, 0x74, 0x00),
+ PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
+ },
+ { .freq = 5725,
+ RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
+ 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x01, 0x00, 0x74, 0x00),
+ PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
+ },
+ { .freq = 5730,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
+ 0x00, 0x94, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x01, 0x00, 0x84, 0x00),
+ PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
+ },
+ { .freq = 5735,
+ RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x83, 0x00),
+ PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
+ },
+ { .freq = 5740,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x83, 0x00),
+ PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
+ },
+ { .freq = 5745,
+ RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x83, 0x00),
+ PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
+ },
+ { .freq = 5750,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x83, 0x00),
+ PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
+ },
+ { .freq = 5755,
+ RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x83, 0x00),
+ PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
+ },
+ { .freq = 5760,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x93, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x83, 0x00),
+ PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
+ },
+ { .freq = 5765,
+ RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x82, 0x00),
+ PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
+ },
+ { .freq = 5770,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x82, 0x00),
+ PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
+ },
+ { .freq = 5775,
+ RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x82, 0x00),
+ PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
+ },
+ { .freq = 5780,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
+ 0x40, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x82, 0x00),
+ PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
+ },
+ { .freq = 5785,
+ RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
+ 0x40, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x82, 0x00),
+ PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
+ },
+ { .freq = 5790,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
+ 0x40, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x82, 0x00),
+ PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
+ },
+ { .freq = 5795,
+ RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
+ 0x40, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x82, 0x00),
+ PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
+ },
+ { .freq = 5800,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x82, 0x00),
+ PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
+ },
+ { .freq = 5805,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x82, 0x00),
+ PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
+ },
+ { .freq = 5810,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x82, 0x00),
+ PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
+ },
+ { .freq = 5815,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x82, 0x00),
+ PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
+ },
+ { .freq = 5820,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x82, 0x00),
+ PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
+ },
+ { .freq = 5825,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x82, 0x00),
+ PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
+ },
+ { .freq = 5830,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x72, 0x00),
+ PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
+ },
+ { .freq = 5840,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x72, 0x00),
+ PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
+ },
+ { .freq = 5850,
+ RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x72, 0x00),
+ PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
+ },
+ { .freq = 5860,
+ RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x72, 0x00),
+ PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
+ },
+ { .freq = 5870,
+ RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x71, 0x00),
+ PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
+ },
+ { .freq = 5880,
+ RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x71, 0x00),
+ PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
+ },
+ { .freq = 5890,
+ RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x71, 0x00),
+ PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
+ },
+ { .freq = 5900,
+ RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x87, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x71, 0x00),
+ PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
+ },
+ { .freq = 5910,
+ RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x87, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x71, 0x00),
+ PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
+ },
+ { .freq = 2412,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x1f, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0b, 0x00, 0x1f, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0b),
+ PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
+ },
+ { .freq = 2417,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x1f, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0a, 0x00, 0x1f, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0a),
+ PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
+ },
+ { .freq = 2422,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x0e, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0a, 0x00, 0x0e, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0a),
+ PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
+ },
+ { .freq = 2427,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x0d, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0e, 0x00, 0x0a, 0x00, 0x0d, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0e, 0x00, 0x0a),
+ PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
+ },
+ { .freq = 2432,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x0c, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0e, 0x00, 0x0a, 0x00, 0x0c, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0e, 0x00, 0x0a),
+ PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
+ },
+ { .freq = 2437,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x0b, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0e, 0x00, 0x0a, 0x00, 0x0b, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0e, 0x00, 0x0a),
+ PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
+ },
+ { .freq = 2442,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x09, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0e, 0x00, 0x0a, 0x00, 0x09, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0e, 0x00, 0x0a),
+ PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
+ },
+ { .freq = 2447,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x08, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0e, 0x00, 0x09, 0x00, 0x08, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0e, 0x00, 0x09),
+ PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
+ },
+ { .freq = 2452,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x07, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0e, 0x00, 0x09, 0x00, 0x07, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0e, 0x00, 0x09),
+ PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
+ },
+ { .freq = 2457,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x06, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0d, 0x00, 0x09, 0x00, 0x06, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0d, 0x00, 0x09),
+ PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
+ },
+ { .freq = 2462,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x05, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0d, 0x00, 0x09, 0x00, 0x05, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0d, 0x00, 0x09),
+ PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
+ },
+ { .freq = 2467,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x04, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0d, 0x00, 0x08, 0x00, 0x04, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0d, 0x00, 0x08),
+ PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
+ },
+ { .freq = 2472,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x03, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0d, 0x00, 0x08, 0x00, 0x03, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0d, 0x00, 0x08),
+ PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
+ },
+ { .freq = 2484,
+ RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0d, 0x00, 0x08, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0d, 0x00, 0x08),
+ PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
+ },
+};
+
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev6[] = {
+ { .freq = 4920,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
+ },
+ { .freq = 4930,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
+ },
+ { .freq = 4940,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
+ },
+ { .freq = 4950,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
+ },
+ { .freq = 4960,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
+ },
+ { .freq = 4970,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
+ },
+ { .freq = 4980,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
+ },
+ { .freq = 4990,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
+ },
+ { .freq = 5000,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
+ },
+ { .freq = 5010,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
+ },
+ { .freq = 5020,
+ RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
+ },
+ { .freq = 5030,
+ RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
+ },
+ { .freq = 5040,
+ RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
+ },
+ { .freq = 5050,
+ RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
+ },
+ { .freq = 5060,
+ RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
+ },
+ { .freq = 5070,
+ RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
+ },
+ { .freq = 5080,
+ RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
+ },
+ { .freq = 5090,
+ RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
+ },
+ { .freq = 5100,
+ RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfd, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x08, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
+ },
+ { .freq = 5110,
+ RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
+ },
+ { .freq = 5120,
+ RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
+ },
+ { .freq = 5130,
+ RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
+ },
+ { .freq = 5140,
+ RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfb, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfb, 0x00, 0x08, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
+ },
+ { .freq = 5160,
+ RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfa, 0x00, 0x07, 0x00, 0x77, 0x00, 0x0e,
+ 0x00, 0x6f, 0x00, 0xfa, 0x00, 0x07, 0x00, 0x77,
+ 0x00, 0x0e, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
+ },
+ { .freq = 5170,
+ RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfa, 0x00, 0x07, 0x00, 0x77, 0x00, 0x0e,
+ 0x00, 0x6f, 0x00, 0xfa, 0x00, 0x07, 0x00, 0x77,
+ 0x00, 0x0e, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
+ },
+ { .freq = 5180,
+ RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0e,
+ 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
+ 0x00, 0x0e, 0x00, 0x6f, 0x00),
+ PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
+ },
+ { .freq = 5190,
+ RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
+ },
+ { .freq = 5200,
+ RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
+ },
+ { .freq = 5210,
+ RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
+ },
+ { .freq = 5220,
+ RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+ 0xfe, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
+ },
+ { .freq = 5230,
+ RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+ 0xee, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
+ },
+ { .freq = 5240,
+ RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+ 0xee, 0xc8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xc8, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
+ },
+ { .freq = 5250,
+ RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+ 0xed, 0xc7, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
+ },
+ { .freq = 5260,
+ RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0e, 0x00,
+ 0xed, 0xc7, 0x00, 0x04, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x04, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
+ },
+ { .freq = 5270,
+ RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8e, 0x0e, 0x00,
+ 0xed, 0xc7, 0x00, 0x04, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x04, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
+ },
+ { .freq = 5280,
+ RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
+ },
+ { .freq = 5290,
+ RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
+ },
+ { .freq = 5300,
+ RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
+ },
+ { .freq = 5310,
+ RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
+ },
+ { .freq = 5320,
+ RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdb, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
+ },
+ { .freq = 5330,
+ RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+ 0xcb, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
+ 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
+ },
+ { .freq = 5340,
+ RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+ 0xca, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
+ 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
+ },
+ { .freq = 5350,
+ RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
+ 0xca, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
+ 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
+ },
+ { .freq = 5360,
+ RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
+ 0xc9, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
+ },
+ { .freq = 5370,
+ RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
+ 0xc9, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
+ },
+ { .freq = 5380,
+ RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xb8, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
+ },
+ { .freq = 5390,
+ RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xb8, 0x84, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
+ },
+ { .freq = 5400,
+ RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xb8, 0x84, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
+ },
+ { .freq = 5410,
+ RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xb7, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
+ },
+ { .freq = 5420,
+ RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xa7, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
+ },
+ { .freq = 5430,
+ RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0b, 0x00,
+ 0xa6, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
+ },
+ { .freq = 5440,
+ RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+ 0xa6, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
+ },
+ { .freq = 5450,
+ RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+ 0x95, 0x84, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x01, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
+ },
+ { .freq = 5460,
+ RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+ 0x95, 0x84, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x01, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
+ },
+ { .freq = 5470,
+ RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+ 0x94, 0x73, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x01, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
+ },
+ { .freq = 5480,
+ RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x84, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
+ },
+ { .freq = 5490,
+ RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x83, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
+ },
+ { .freq = 5500,
+ RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x82, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
+ },
+ { .freq = 5510,
+ RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x82, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
+ },
+ { .freq = 5520,
+ RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x72, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
+ },
+ { .freq = 5530,
+ RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
+ 0x72, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
+ },
+ { .freq = 5540,
+ RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
+ 0x71, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
+ },
+ { .freq = 5550,
+ RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
+ 0x61, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
+ },
+ { .freq = 5560,
+ RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
+ 0x61, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
+ },
+ { .freq = 5570,
+ RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
+ 0x61, 0x62, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x62, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
+ },
+ { .freq = 5580,
+ RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
+ 0x60, 0x62, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
+ 0x00, 0x6f, 0x00, 0x62, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x08, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
+ },
+ { .freq = 5590,
+ RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
+ 0x50, 0x61, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
+ 0x00, 0x6f, 0x00, 0x61, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x08, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
+ },
+ { .freq = 5600,
+ RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
+ 0x50, 0x51, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
+ 0x00, 0x6f, 0x00, 0x51, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x08, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
+ },
+ { .freq = 5610,
+ RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
+ 0x50, 0x51, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
+ 0x00, 0x6f, 0x00, 0x51, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x08, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
+ },
+ { .freq = 5620,
+ RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
+ 0x50, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
+ 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x07, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
+ },
+ { .freq = 5630,
+ RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x50, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
+ 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x07, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
+ },
+ { .freq = 5640,
+ RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x40, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
+ 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x07, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
+ },
+ { .freq = 5650,
+ RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x40, 0x40, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
+ 0x00, 0x6f, 0x00, 0x40, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x07, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
+ },
+ { .freq = 5660,
+ RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x40, 0x40, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6f, 0x00, 0x40, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
+ },
+ { .freq = 5670,
+ RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x40, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
+ },
+ { .freq = 5680,
+ RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
+ },
+ { .freq = 5690,
+ RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
+ },
+ { .freq = 5700,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6e, 0x00),
+ PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
+ },
+ { .freq = 5710,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6e, 0x00),
+ PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
+ },
+ { .freq = 5720,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6e, 0x00),
+ PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
+ },
+ { .freq = 5725,
+ RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6e, 0x00),
+ PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
+ },
+ { .freq = 5730,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6e, 0x00),
+ PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
+ },
+ { .freq = 5735,
+ RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6d, 0x00),
+ PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
+ },
+ { .freq = 5740,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6d, 0x00),
+ PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
+ },
+ { .freq = 5745,
+ RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6d, 0x00),
+ PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
+ },
+ { .freq = 5750,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6d, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6d, 0x00),
+ PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
+ },
+ { .freq = 5755,
+ RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x10, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6c, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
+ },
+ { .freq = 5760,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
+ 0x10, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6c, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
+ },
+ { .freq = 5765,
+ RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
+ 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6c, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
+ },
+ { .freq = 5770,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
+ 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
+ },
+ { .freq = 5775,
+ RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
+ 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
+ },
+ { .freq = 5780,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
+ 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
+ },
+ { .freq = 5785,
+ RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
+ },
+ { .freq = 5790,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
+ },
+ { .freq = 5795,
+ RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
+ },
+ { .freq = 5800,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
+ },
+ { .freq = 5805,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6a, 0x00),
+ PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
+ },
+ { .freq = 5810,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6a, 0x00),
+ PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
+ },
+ { .freq = 5815,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6a, 0x00),
+ PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
+ },
+ { .freq = 5820,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6a, 0x00),
+ PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
+ },
+ { .freq = 5825,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x69, 0x00),
+ PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
+ },
+ { .freq = 5830,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x69, 0x00),
+ PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
+ },
+ { .freq = 5840,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x69, 0x00),
+ PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
+ },
+ { .freq = 5850,
+ RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x69, 0x00),
+ PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
+ },
+ { .freq = 5860,
+ RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x69, 0x00),
+ PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
+ },
+ { .freq = 5870,
+ RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x68, 0x00),
+ PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
+ },
+ { .freq = 5880,
+ RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x68, 0x00),
+ PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
+ },
+ { .freq = 5890,
+ RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x68, 0x00),
+ PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
+ },
+ { .freq = 5900,
+ RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x68, 0x00),
+ PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
+ },
+ { .freq = 5910,
+ RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x68, 0x00),
+ PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
+ },
+ { .freq = 2412,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0b, 0x00, 0x0a, 0x00, 0x78, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0b, 0x00, 0x0a),
+ PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
+ },
+ { .freq = 2417,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0b, 0x00, 0x0a, 0x00, 0x78, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0b, 0x00, 0x0a),
+ PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
+ },
+ { .freq = 2422,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x67, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0b, 0x00, 0x0a, 0x00, 0x67, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0b, 0x00, 0x0a),
+ PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
+ },
+ { .freq = 2427,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x57, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x0a, 0x00, 0x57, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x0a),
+ PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
+ },
+ { .freq = 2432,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x56, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x0a, 0x00, 0x56, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x0a),
+ PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
+ },
+ { .freq = 2437,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x46, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x0a, 0x00, 0x46, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x0a),
+ PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
+ },
+ { .freq = 2442,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x45, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x0a, 0x00, 0x45, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x0a),
+ PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
+ },
+ { .freq = 2447,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x34, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x09, 0x00, 0x34, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x09),
+ PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
+ },
+ { .freq = 2452,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x23, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x09, 0x00, 0x23, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x09),
+ PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
+ },
+ { .freq = 2457,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x12, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x09, 0x00, 0x12, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x09),
+ PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
+ },
+ { .freq = 2462,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x02, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x09, 0x00, 0x09, 0x00, 0x02, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x09, 0x00, 0x09),
+ PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
+ },
+ { .freq = 2467,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x09, 0x00, 0x09, 0x00, 0x01, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x09, 0x00, 0x09),
+ PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
+ },
+ { .freq = 2472,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x09, 0x00, 0x09, 0x00, 0x01, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x09, 0x00, 0x09),
+ PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
+ },
+ { .freq = 2484,
+ RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x09, 0x00, 0x09, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x09, 0x00, 0x09),
+ PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
+ },
+};
+
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev7_9[] = {
+ { .freq = 4920,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0f,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
+ },
+ { .freq = 4930,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0e,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
+ 0x00, 0x0e, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
+ },
+ { .freq = 4940,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0e,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
+ 0x00, 0x0e, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
+ },
+ { .freq = 4950,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0e,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
+ 0x00, 0x0e, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
+ },
+ { .freq = 4960,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0e,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
+ 0x00, 0x0e, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
+ },
+ { .freq = 4970,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
+ },
+ { .freq = 4980,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
+ },
+ { .freq = 4990,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
+ },
+ { .freq = 5000,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
+ },
+ { .freq = 5010,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
+ },
+ { .freq = 5020,
+ RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0d,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x09, 0x00, 0x70,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
+ },
+ { .freq = 5030,
+ RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xff, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
+ 0x00, 0x9f, 0x00, 0xff, 0x00, 0x09, 0x00, 0x70,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
+ },
+ { .freq = 5040,
+ RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
+ 0x00, 0x9f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x70,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
+ },
+ { .freq = 5050,
+ RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
+ 0x00, 0x9f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x70,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
+ },
+ { .freq = 5060,
+ RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfd, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
+ 0x00, 0x9f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x70,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
+ },
+ { .freq = 5070,
+ RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfd, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
+ 0x00, 0x9f, 0x00, 0xfd, 0x00, 0x08, 0x00, 0x70,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
+ },
+ { .freq = 5080,
+ RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
+ 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
+ },
+ { .freq = 5090,
+ RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
+ 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
+ },
+ { .freq = 5100,
+ RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
+ 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
+ },
+ { .freq = 5110,
+ RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
+ 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
+ },
+ { .freq = 5120,
+ RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
+ 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
+ },
+ { .freq = 5130,
+ RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfb, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0a,
+ 0x00, 0x9f, 0x00, 0xfb, 0x00, 0x08, 0x00, 0x70,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
+ },
+ { .freq = 5140,
+ RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfb, 0x00, 0x07, 0x00, 0x70, 0x00, 0x0a,
+ 0x00, 0x9f, 0x00, 0xfb, 0x00, 0x07, 0x00, 0x70,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
+ },
+ { .freq = 5160,
+ RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfb, 0x00, 0x07, 0x00, 0x70, 0x00, 0x09,
+ 0x00, 0x9e, 0x00, 0xfb, 0x00, 0x07, 0x00, 0x70,
+ 0x00, 0x09, 0x00, 0x6e, 0x00),
+ PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
+ },
+ { .freq = 5170,
+ RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfb, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
+ 0x00, 0x9e, 0x00, 0xfb, 0x00, 0x06, 0x00, 0x70,
+ 0x00, 0x09, 0x00, 0x6e, 0x00),
+ PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
+ },
+ { .freq = 5180,
+ RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
+ 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
+ 0x00, 0x09, 0x00, 0x6e, 0x00),
+ PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
+ },
+ { .freq = 5190,
+ RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
+ 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
+ 0x00, 0x09, 0x00, 0x6e, 0x00),
+ PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
+ },
+ { .freq = 5200,
+ RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
+ 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
+ 0x00, 0x09, 0x00, 0x6e, 0x00),
+ PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
+ },
+ { .freq = 5210,
+ RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
+ 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
+ 0x00, 0x09, 0x00, 0x6e, 0x00),
+ PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
+ },
+ { .freq = 5220,
+ RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+ 0xfe, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
+ 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
+ 0x00, 0x09, 0x00, 0x6e, 0x00),
+ PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
+ },
+ { .freq = 5230,
+ RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+ 0xee, 0xea, 0x00, 0x06, 0x00, 0x70, 0x00, 0x08,
+ 0x00, 0x9e, 0x00, 0xea, 0x00, 0x06, 0x00, 0x70,
+ 0x00, 0x08, 0x00, 0x6e, 0x00),
+ PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
+ },
+ { .freq = 5240,
+ RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+ 0xee, 0xe9, 0x00, 0x05, 0x00, 0x70, 0x00, 0x08,
+ 0x00, 0x9d, 0x00, 0xe9, 0x00, 0x05, 0x00, 0x70,
+ 0x00, 0x08, 0x00, 0x6d, 0x00),
+ PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
+ },
+ { .freq = 5250,
+ RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+ 0xed, 0xe9, 0x00, 0x05, 0x00, 0x70, 0x00, 0x08,
+ 0x00, 0x9d, 0x00, 0xe9, 0x00, 0x05, 0x00, 0x70,
+ 0x00, 0x08, 0x00, 0x6d, 0x00),
+ PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
+ },
+ { .freq = 5260,
+ RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0e, 0x00,
+ 0xed, 0xd9, 0x00, 0x05, 0x00, 0x70, 0x00, 0x08,
+ 0x00, 0x9d, 0x00, 0xd9, 0x00, 0x05, 0x00, 0x70,
+ 0x00, 0x08, 0x00, 0x6d, 0x00),
+ PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
+ },
+ { .freq = 5270,
+ RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8e, 0x0e, 0x00,
+ 0xed, 0xd8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
+ 0x00, 0x9c, 0x00, 0xd8, 0x00, 0x04, 0x00, 0x70,
+ 0x00, 0x07, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
+ },
+ { .freq = 5280,
+ RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdc, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
+ 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
+ 0x00, 0x07, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
+ },
+ { .freq = 5290,
+ RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdc, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
+ 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
+ 0x00, 0x07, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
+ },
+ { .freq = 5300,
+ RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdc, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
+ 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
+ 0x00, 0x07, 0x00, 0x6c, 0x00),
+ PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
+ },
+ { .freq = 5310,
+ RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdc, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
+ 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
+ 0x00, 0x07, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
+ },
+ { .freq = 5320,
+ RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdb, 0xb8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
+ 0x00, 0x9c, 0x00, 0xb8, 0x00, 0x04, 0x00, 0x70,
+ 0x00, 0x07, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
+ },
+ { .freq = 5330,
+ RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+ 0xcb, 0xb7, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
+ 0x00, 0x9b, 0x00, 0xb7, 0x00, 0x04, 0x00, 0x70,
+ 0x00, 0x07, 0x00, 0x6b, 0x00),
+ PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
+ },
+ { .freq = 5340,
+ RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+ 0xca, 0xb7, 0x00, 0x03, 0x00, 0x70, 0x00, 0x07,
+ 0x00, 0x9b, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x70,
+ 0x00, 0x07, 0x00, 0x6b, 0x00),
+ PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
+ },
+ { .freq = 5350,
+ RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
+ 0xca, 0xa7, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
+ 0x00, 0x9b, 0x00, 0xa7, 0x00, 0x03, 0x00, 0x70,
+ 0x00, 0x06, 0x00, 0x6b, 0x00),
+ PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
+ },
+ { .freq = 5360,
+ RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
+ 0xc9, 0xa6, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
+ 0x00, 0x9b, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x70,
+ 0x00, 0x06, 0x00, 0x6b, 0x00),
+ PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
+ },
+ { .freq = 5370,
+ RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
+ 0xc9, 0xa6, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
+ 0x00, 0x9b, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x70,
+ 0x00, 0x06, 0x00, 0x7b, 0x00),
+ PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
+ },
+ { .freq = 5380,
+ RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xb8, 0x96, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
+ 0x00, 0x9a, 0x00, 0x96, 0x00, 0x03, 0x00, 0x70,
+ 0x00, 0x06, 0x00, 0x7a, 0x00),
+ PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
+ },
+ { .freq = 5390,
+ RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xb8, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
+ 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
+ 0x00, 0x06, 0x00, 0x7a, 0x00),
+ PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
+ },
+ { .freq = 5400,
+ RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xb8, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
+ 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
+ 0x00, 0x06, 0x00, 0x7a, 0x00),
+ PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
+ },
+ { .freq = 5410,
+ RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xb7, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x05,
+ 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
+ 0x00, 0x05, 0x00, 0x7a, 0x00),
+ PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
+ },
+ { .freq = 5420,
+ RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xa7, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x05,
+ 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
+ 0x00, 0x05, 0x00, 0x7a, 0x00),
+ PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
+ },
+ { .freq = 5430,
+ RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0b, 0x00,
+ 0xa6, 0x85, 0x00, 0x02, 0x00, 0x70, 0x00, 0x05,
+ 0x00, 0x99, 0x00, 0x85, 0x00, 0x02, 0x00, 0x70,
+ 0x00, 0x05, 0x00, 0x79, 0x00),
+ PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
+ },
+ { .freq = 5440,
+ RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+ 0xa6, 0x84, 0x00, 0x02, 0x00, 0x70, 0x00, 0x05,
+ 0x00, 0x99, 0x00, 0x84, 0x00, 0x02, 0x00, 0x70,
+ 0x00, 0x05, 0x00, 0x79, 0x00),
+ PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
+ },
+ { .freq = 5450,
+ RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+ 0x95, 0x84, 0x00, 0x02, 0x00, 0x70, 0x00, 0x05,
+ 0x00, 0x99, 0x00, 0x84, 0x00, 0x02, 0x00, 0x70,
+ 0x00, 0x05, 0x00, 0x79, 0x00),
+ PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
+ },
+ { .freq = 5460,
+ RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+ 0x95, 0x84, 0x00, 0x02, 0x00, 0x70, 0x00, 0x04,
+ 0x00, 0x99, 0x00, 0x84, 0x00, 0x02, 0x00, 0x70,
+ 0x00, 0x04, 0x00, 0x79, 0x00),
+ PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
+ },
+ { .freq = 5470,
+ RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+ 0x94, 0x74, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
+ 0x00, 0x99, 0x00, 0x74, 0x00, 0x01, 0x00, 0x70,
+ 0x00, 0x04, 0x00, 0x79, 0x00),
+ PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
+ },
+ { .freq = 5480,
+ RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x84, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
+ 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
+ 0x00, 0x04, 0x00, 0x78, 0x00),
+ PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
+ },
+ { .freq = 5490,
+ RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x83, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
+ 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
+ 0x00, 0x04, 0x00, 0x78, 0x00),
+ PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
+ },
+ { .freq = 5500,
+ RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x82, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
+ 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
+ 0x00, 0x04, 0x00, 0x78, 0x00),
+ PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
+ },
+ { .freq = 5510,
+ RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x82, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
+ 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
+ 0x00, 0x04, 0x00, 0x78, 0x00),
+ PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
+ },
+ { .freq = 5520,
+ RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x72, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
+ 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
+ 0x00, 0x04, 0x00, 0x78, 0x00),
+ PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
+ },
+ { .freq = 5530,
+ RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
+ 0x72, 0x63, 0x00, 0x01, 0x00, 0x70, 0x00, 0x03,
+ 0x00, 0x98, 0x00, 0x63, 0x00, 0x01, 0x00, 0x70,
+ 0x00, 0x03, 0x00, 0x78, 0x00),
+ PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
+ },
+ { .freq = 5540,
+ RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
+ 0x71, 0x62, 0x00, 0x00, 0x00, 0x70, 0x00, 0x03,
+ 0x00, 0x97, 0x00, 0x62, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x03, 0x00, 0x77, 0x00),
+ PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
+ },
+ { .freq = 5550,
+ RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
+ 0x61, 0x62, 0x00, 0x00, 0x00, 0x70, 0x00, 0x03,
+ 0x00, 0x97, 0x00, 0x62, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x03, 0x00, 0x77, 0x00),
+ PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
+ },
+ { .freq = 5560,
+ RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
+ 0x61, 0x62, 0x00, 0x00, 0x00, 0x70, 0x00, 0x03,
+ 0x00, 0x97, 0x00, 0x62, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x03, 0x00, 0x77, 0x00),
+ PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
+ },
+ { .freq = 5570,
+ RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
+ 0x61, 0x52, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
+ 0x00, 0x96, 0x00, 0x52, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x02, 0x00, 0x76, 0x00),
+ PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
+ },
+ { .freq = 5580,
+ RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
+ 0x60, 0x52, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
+ 0x00, 0x96, 0x00, 0x52, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x02, 0x00, 0x86, 0x00),
+ PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
+ },
+ { .freq = 5590,
+ RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
+ 0x50, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
+ 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x02, 0x00, 0x86, 0x00),
+ PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
+ },
+ { .freq = 5600,
+ RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
+ 0x50, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
+ 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x02, 0x00, 0x86, 0x00),
+ PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
+ },
+ { .freq = 5610,
+ RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
+ 0x50, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
+ 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x02, 0x00, 0x86, 0x00),
+ PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
+ },
+ { .freq = 5620,
+ RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
+ 0x50, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
+ 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x02, 0x00, 0x86, 0x00),
+ PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
+ },
+ { .freq = 5630,
+ RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x50, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
+ 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x02, 0x00, 0x86, 0x00),
+ PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
+ },
+ { .freq = 5640,
+ RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x40, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
+ 0x00, 0x95, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x02, 0x00, 0x85, 0x00),
+ PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
+ },
+ { .freq = 5650,
+ RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x40, 0x50, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
+ 0x00, 0x95, 0x00, 0x50, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x01, 0x00, 0x85, 0x00),
+ PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
+ },
+ { .freq = 5660,
+ RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x40, 0x50, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
+ 0x00, 0x95, 0x00, 0x50, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x01, 0x00, 0x85, 0x00),
+ PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
+ },
+ { .freq = 5670,
+ RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x40, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
+ 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x01, 0x00, 0x84, 0x00),
+ PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
+ },
+ { .freq = 5680,
+ RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
+ 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x01, 0x00, 0x84, 0x00),
+ PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
+ },
+ { .freq = 5690,
+ RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
+ 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x01, 0x00, 0x94, 0x00),
+ PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
+ },
+ { .freq = 5700,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
+ 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x01, 0x00, 0x94, 0x00),
+ PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
+ },
+ { .freq = 5710,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
+ 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x01, 0x00, 0x94, 0x00),
+ PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
+ },
+ { .freq = 5720,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
+ 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x01, 0x00, 0x94, 0x00),
+ PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
+ },
+ { .freq = 5725,
+ RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
+ 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x01, 0x00, 0x94, 0x00),
+ PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
+ },
+ { .freq = 5730,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
+ 0x00, 0x94, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x01, 0x00, 0x94, 0x00),
+ PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
+ },
+ { .freq = 5735,
+ RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x93, 0x00),
+ PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
+ },
+ { .freq = 5740,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x93, 0x00),
+ PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
+ },
+ { .freq = 5745,
+ RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x93, 0x00),
+ PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
+ },
+ { .freq = 5750,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x93, 0x00),
+ PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
+ },
+ { .freq = 5755,
+ RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x10, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x93, 0x00),
+ PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
+ },
+ { .freq = 5760,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
+ 0x10, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x93, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x93, 0x00),
+ PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
+ },
+ { .freq = 5765,
+ RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
+ 0x10, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x92, 0x00),
+ PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
+ },
+ { .freq = 5770,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
+ 0x10, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x92, 0x00),
+ PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
+ },
+ { .freq = 5775,
+ RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
+ 0x10, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x92, 0x00),
+ PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
+ },
+ { .freq = 5780,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
+ 0x10, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x92, 0x00),
+ PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
+ },
+ { .freq = 5785,
+ RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x92, 0x00),
+ PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
+ },
+ { .freq = 5790,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x92, 0x00),
+ PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
+ },
+ { .freq = 5795,
+ RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x92, 0x00),
+ PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
+ },
+ { .freq = 5800,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x92, 0x00),
+ PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
+ },
+ { .freq = 5805,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x92, 0x00),
+ PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
+ },
+ { .freq = 5810,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x92, 0x00),
+ PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
+ },
+ { .freq = 5815,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x92, 0x00),
+ PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
+ },
+ { .freq = 5820,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x92, 0x00),
+ PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
+ },
+ { .freq = 5825,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x92, 0x00),
+ PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
+ },
+ { .freq = 5830,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x92, 0x00),
+ PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
+ },
+ { .freq = 5840,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x92, 0x00),
+ PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
+ },
+ { .freq = 5850,
+ RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x92, 0x00),
+ PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
+ },
+ { .freq = 5860,
+ RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x92, 0x00),
+ PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
+ },
+ { .freq = 5870,
+ RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x91, 0x00),
+ PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
+ },
+ { .freq = 5880,
+ RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x91, 0x00),
+ PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
+ },
+ { .freq = 5890,
+ RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x91, 0x00),
+ PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
+ },
+ { .freq = 5900,
+ RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x91, 0x00),
+ PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
+ },
+ { .freq = 5910,
+ RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
+ 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
+ 0x00, 0x00, 0x00, 0x91, 0x00),
+ PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
+ },
+ { .freq = 2412,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x89, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0b, 0x00, 0x89, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0b),
+ PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
+ },
+ { .freq = 2417,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x89, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0a),
+ PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
+ },
+ { .freq = 2422,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x89, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0f, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0f, 0x00, 0x0a),
+ PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
+ },
+ { .freq = 2427,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0e, 0x00, 0x0a, 0x00, 0x78, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0e, 0x00, 0x0a),
+ PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
+ },
+ { .freq = 2432,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x77, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0e, 0x00, 0x0a, 0x00, 0x77, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0e, 0x00, 0x0a),
+ PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
+ },
+ { .freq = 2437,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x76, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0e, 0x00, 0x0a, 0x00, 0x76, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0e, 0x00, 0x0a),
+ PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
+ },
+ { .freq = 2442,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x66, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0e, 0x00, 0x0a, 0x00, 0x66, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0e, 0x00, 0x0a),
+ PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
+ },
+ { .freq = 2447,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x55, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0e, 0x00, 0x09, 0x00, 0x55, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0e, 0x00, 0x09),
+ PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
+ },
+ { .freq = 2452,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x45, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0e, 0x00, 0x09, 0x00, 0x45, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0e, 0x00, 0x09),
+ PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
+ },
+ { .freq = 2457,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x34, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0d, 0x00, 0x09, 0x00, 0x34, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0d, 0x00, 0x09),
+ PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
+ },
+ { .freq = 2462,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x33, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0d, 0x00, 0x09, 0x00, 0x33, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0d, 0x00, 0x09),
+ PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
+ },
+ { .freq = 2467,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x22, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0d, 0x00, 0x08, 0x00, 0x22, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0d, 0x00, 0x08),
+ PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
+ },
+ { .freq = 2472,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0d, 0x00, 0x08, 0x00, 0x11, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0d, 0x00, 0x08),
+ PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
+ },
+ { .freq = 2484,
+ RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0d, 0x00, 0x08, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0d, 0x00, 0x08),
+ PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
+ },
};
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev8[] = {
+ { .freq = 4920,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
+ },
+ { .freq = 4930,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
+ },
+ { .freq = 4940,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
+ },
+ { .freq = 4950,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
+ },
+ { .freq = 4960,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
+ },
+ { .freq = 4970,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
+ },
+ { .freq = 4980,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
+ },
+ { .freq = 4990,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
+ },
+ { .freq = 5000,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
+ },
+ { .freq = 5010,
+ RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
+ },
+ { .freq = 5020,
+ RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
+ },
+ { .freq = 5030,
+ RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
+ },
+ { .freq = 5040,
+ RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
+ },
+ { .freq = 5050,
+ RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
+ },
+ { .freq = 5060,
+ RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
+ },
+ { .freq = 5070,
+ RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
+ },
+ { .freq = 5080,
+ RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
+ },
+ { .freq = 5090,
+ RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
+ },
+ { .freq = 5100,
+ RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfd, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x08, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
+ },
+ { .freq = 5110,
+ RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
+ },
+ { .freq = 5120,
+ RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
+ },
+ { .freq = 5130,
+ RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
+ },
+ { .freq = 5140,
+ RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfb, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
+ 0x00, 0x6f, 0x00, 0xfb, 0x00, 0x08, 0x00, 0x77,
+ 0x00, 0x0f, 0x00, 0x6f, 0x00),
+ PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
+ },
+ { .freq = 5160,
+ RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfa, 0x00, 0x07, 0x00, 0x77, 0x00, 0x0e,
+ 0x00, 0x6f, 0x00, 0xfa, 0x00, 0x07, 0x00, 0x77,
+ 0x00, 0x0e, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
+ },
+ { .freq = 5170,
+ RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xfa, 0x00, 0x07, 0x00, 0x77, 0x00, 0x0e,
+ 0x00, 0x6f, 0x00, 0xfa, 0x00, 0x07, 0x00, 0x77,
+ 0x00, 0x0e, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
+ },
+ { .freq = 5180,
+ RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0e,
+ 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
+ 0x00, 0x0e, 0x00, 0x6f, 0x00),
+ PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
+ },
+ { .freq = 5190,
+ RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
+ },
+ { .freq = 5200,
+ RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
+ },
+ { .freq = 5210,
+ RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+ 0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
+ },
+ { .freq = 5220,
+ RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+ 0xfe, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
+ },
+ { .freq = 5230,
+ RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+ 0xee, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
+ },
+ { .freq = 5240,
+ RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+ 0xee, 0xc8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xc8, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
+ },
+ { .freq = 5250,
+ RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+ 0xed, 0xc7, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x05, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
+ },
+ { .freq = 5260,
+ RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0e, 0x00,
+ 0xed, 0xc7, 0x00, 0x04, 0x00, 0x77, 0x00, 0x0d,
+ 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x04, 0x00, 0x77,
+ 0x00, 0x0d, 0x00, 0x6f, 0x00),
+ PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
+ },
+ { .freq = 5270,
+ RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8e, 0x0e, 0x00,
+ 0xed, 0xc7, 0x00, 0x04, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x04, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
+ },
+ { .freq = 5280,
+ RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
+ },
+ { .freq = 5290,
+ RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
+ },
+ { .freq = 5300,
+ RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
+ },
+ { .freq = 5310,
+ RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
+ },
+ { .freq = 5320,
+ RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
+ 0xdb, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
+ 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0c, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
+ },
+ { .freq = 5330,
+ RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+ 0xcb, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
+ 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
+ },
+ { .freq = 5340,
+ RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
+ 0xca, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
+ 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
+ },
+ { .freq = 5350,
+ RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
+ 0xca, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
+ 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0b, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
+ },
+ { .freq = 5360,
+ RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
+ 0xc9, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
+ },
+ { .freq = 5370,
+ RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
+ 0xc9, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
+ },
+ { .freq = 5380,
+ RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xb8, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
+ },
+ { .freq = 5390,
+ RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xb8, 0x84, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
+ },
+ { .freq = 5400,
+ RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xb8, 0x84, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x03, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
+ },
+ { .freq = 5410,
+ RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xb7, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
+ },
+ { .freq = 5420,
+ RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
+ 0xa7, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
+ },
+ { .freq = 5430,
+ RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0b, 0x00,
+ 0xa6, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
+ 0x00, 0x0a, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
+ },
+ { .freq = 5440,
+ RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+ 0xa6, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
+ },
+ { .freq = 5450,
+ RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+ 0x95, 0x84, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x01, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
+ },
+ { .freq = 5460,
+ RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+ 0x95, 0x84, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x84, 0x00, 0x01, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
+ },
+ { .freq = 5470,
+ RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
+ 0x94, 0x73, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x01, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
+ },
+ { .freq = 5480,
+ RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x84, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
+ },
+ { .freq = 5490,
+ RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x83, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
+ },
+ { .freq = 5500,
+ RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x82, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
+ },
+ { .freq = 5510,
+ RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x82, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
+ },
+ { .freq = 5520,
+ RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
+ 0x72, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
+ },
+ { .freq = 5530,
+ RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
+ 0x72, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
+ },
+ { .freq = 5540,
+ RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
+ 0x71, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
+ },
+ { .freq = 5550,
+ RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
+ 0x61, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
+ },
+ { .freq = 5560,
+ RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
+ 0x61, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
+ },
+ { .freq = 5570,
+ RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
+ 0x61, 0x62, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
+ 0x00, 0x6f, 0x00, 0x62, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x09, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
+ },
+ { .freq = 5580,
+ RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
+ 0x60, 0x62, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
+ 0x00, 0x6f, 0x00, 0x62, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x08, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
+ },
+ { .freq = 5590,
+ RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
+ 0x50, 0x61, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
+ 0x00, 0x6f, 0x00, 0x61, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x08, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
+ },
+ { .freq = 5600,
+ RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
+ 0x50, 0x51, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
+ 0x00, 0x6f, 0x00, 0x51, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x08, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
+ },
+ { .freq = 5610,
+ RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
+ 0x50, 0x51, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
+ 0x00, 0x6f, 0x00, 0x51, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x08, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
+ },
+ { .freq = 5620,
+ RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
+ 0x50, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
+ 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x07, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
+ },
+ { .freq = 5630,
+ RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x50, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
+ 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x07, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
+ },
+ { .freq = 5640,
+ RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x40, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
+ 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x07, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
+ },
+ { .freq = 5650,
+ RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x40, 0x40, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
+ 0x00, 0x6f, 0x00, 0x40, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x07, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
+ },
+ { .freq = 5660,
+ RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x40, 0x40, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6f, 0x00, 0x40, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
+ },
+ { .freq = 5670,
+ RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
+ 0x40, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
+ },
+ { .freq = 5680,
+ RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
+ },
+ { .freq = 5690,
+ RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6f, 0x00),
+ PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
+ },
+ { .freq = 5700,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6e, 0x00),
+ PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
+ },
+ { .freq = 5710,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6e, 0x00),
+ PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
+ },
+ { .freq = 5720,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6e, 0x00),
+ PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
+ },
+ { .freq = 5725,
+ RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
+ 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6e, 0x00),
+ PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
+ },
+ { .freq = 5730,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6e, 0x00),
+ PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
+ },
+ { .freq = 5735,
+ RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6d, 0x00),
+ PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
+ },
+ { .freq = 5740,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6d, 0x00),
+ PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
+ },
+ { .freq = 5745,
+ RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+ 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x06, 0x00, 0x6d, 0x00),
+ PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
+ },
+ { .freq = 5750,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6d, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6d, 0x00),
+ PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
+ },
+ { .freq = 5755,
+ RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+ 0x10, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6c, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
+ },
+ { .freq = 5760,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
+ 0x10, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6c, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
+ },
+ { .freq = 5765,
+ RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
+ 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6c, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6c, 0x00),
+ PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
+ },
+ { .freq = 5770,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
+ 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
+ },
+ { .freq = 5775,
+ RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
+ 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
+ },
+ { .freq = 5780,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
+ 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
+ },
+ { .freq = 5785,
+ RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
+ },
+ { .freq = 5790,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
+ },
+ { .freq = 5795,
+ RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
+ },
+ { .freq = 5800,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6b, 0x00),
+ PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
+ },
+ { .freq = 5805,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6a, 0x00),
+ PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
+ },
+ { .freq = 5810,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6a, 0x00),
+ PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
+ },
+ { .freq = 5815,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6a, 0x00),
+ PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
+ },
+ { .freq = 5820,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x6a, 0x00),
+ PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
+ },
+ { .freq = 5825,
+ RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x07, 0x07, 0x04,
+ 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x69, 0x00),
+ PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
+ },
+ { .freq = 5830,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+ 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x05, 0x00, 0x69, 0x00),
+ PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
+ },
+ { .freq = 5840,
+ RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x69, 0x00),
+ PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
+ },
+ { .freq = 5850,
+ RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x69, 0x00),
+ PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
+ },
+ { .freq = 5860,
+ RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x69, 0x00),
+ PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
+ },
+ { .freq = 5870,
+ RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x68, 0x00),
+ PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
+ },
+ { .freq = 5880,
+ RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x68, 0x00),
+ PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
+ },
+ { .freq = 5890,
+ RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x68, 0x00),
+ PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
+ },
+ { .freq = 5900,
+ RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x68, 0x00),
+ PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
+ },
+ { .freq = 5910,
+ RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x04,
+ 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
+ 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+ 0x00, 0x04, 0x00, 0x68, 0x00),
+ PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
+ },
+ { .freq = 2412,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0b, 0x00, 0x0a),
+ PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
+ },
+ { .freq = 2417,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0b, 0x00, 0x0a),
+ PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
+ },
+ { .freq = 2422,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x67, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0b, 0x00, 0x0a),
+ PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
+ },
+ { .freq = 2427,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x57, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x0a, 0x00, 0x78, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x0a),
+ PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
+ },
+ { .freq = 2432,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x56, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x0a, 0x00, 0x77, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x0a),
+ PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
+ },
+ { .freq = 2437,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x46, 0x00, 0x03, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x0a, 0x00, 0x76, 0x00, 0x03, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x0a),
+ PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
+ },
+ { .freq = 2442,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x45, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x0a, 0x00, 0x66, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x0a),
+ PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
+ },
+ { .freq = 2447,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x34, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x09, 0x00, 0x55, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x09),
+ PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
+ },
+ { .freq = 2452,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x23, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x09, 0x00, 0x45, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x09),
+ PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
+ },
+ { .freq = 2457,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x12, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x0a, 0x00, 0x09, 0x00, 0x34, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x0a, 0x00, 0x09),
+ PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
+ },
+ { .freq = 2462,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x02, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x09, 0x00, 0x09, 0x00, 0x33, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x09, 0x00, 0x09),
+ PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
+ },
+ { .freq = 2467,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x09, 0x00, 0x09, 0x00, 0x22, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x09, 0x00, 0x09),
+ PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
+ },
+ { .freq = 2472,
+ RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
+ 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x09, 0x00, 0x09, 0x00, 0x11, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x09, 0x00, 0x09),
+ PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
+ },
+ { .freq = 2484,
+ RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x08, 0x08, 0x04,
+ 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x70, 0x00,
+ 0x09, 0x00, 0x09, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x70, 0x00, 0x09, 0x00, 0x09),
+ PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
+ },
+};
+
+static void b2056_upload_inittab(struct b43_wldev *dev, bool ghz5,
+ bool ignore_uploadflag, u16 routing,
+ const struct b2056_inittab_entry *e,
+ unsigned int length)
+{
+ unsigned int i;
+ u16 value;
+
+ for (i = 0; i < length; i++, e++) {
+ if (!(e->flags & B2056_INITTAB_ENTRY_OK))
+ continue;
+ if ((e->flags & B2056_INITTAB_UPLOAD) || ignore_uploadflag) {
+ if (ghz5)
+ value = e->ghz5;
+ else
+ value = e->ghz2;
+ b43_radio_write(dev, routing | i, value);
+ }
+ }
+}
+
+void b2056_upload_inittabs(struct b43_wldev *dev,
+ bool ghz5, bool ignore_uploadflag)
+{
+ struct b2056_inittabs_pts *pts;
+
+ if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
+ B43_WARN_ON(1);
+ return;
+ }
+ pts = &b2056_inittabs[dev->phy.rev];
+
+ b2056_upload_inittab(dev, ghz5, ignore_uploadflag,
+ B2056_SYN, pts->syn, pts->syn_length);
+ b2056_upload_inittab(dev, ghz5, ignore_uploadflag,
+ B2056_TX0, pts->tx, pts->tx_length);
+ b2056_upload_inittab(dev, ghz5, ignore_uploadflag,
+ B2056_TX1, pts->tx, pts->tx_length);
+ b2056_upload_inittab(dev, ghz5, ignore_uploadflag,
+ B2056_RX0, pts->rx, pts->rx_length);
+ b2056_upload_inittab(dev, ghz5, ignore_uploadflag,
+ B2056_RX1, pts->rx, pts->rx_length);
+}
+
const struct b43_nphy_channeltab_entry_rev3 *
b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq)
{
const struct b43_nphy_channeltab_entry_rev3 *e;
- unsigned int i;
+ unsigned int length, i;
+
+ switch (dev->phy.rev) {
+ case 3:
+ e = b43_nphy_channeltab_rev3;
+ length = ARRAY_SIZE(b43_nphy_channeltab_rev3);
+ break;
+ case 4:
+ e = b43_nphy_channeltab_rev4;
+ length = ARRAY_SIZE(b43_nphy_channeltab_rev4);
+ break;
+ case 5:
+ e = b43_nphy_channeltab_rev5;
+ length = ARRAY_SIZE(b43_nphy_channeltab_rev5);
+ break;
+ case 6:
+ e = b43_nphy_channeltab_rev6;
+ length = ARRAY_SIZE(b43_nphy_channeltab_rev6);
+ break;
+ case 7:
+ case 9:
+ e = b43_nphy_channeltab_rev7_9;
+ length = ARRAY_SIZE(b43_nphy_channeltab_rev7_9);
+ break;
+ case 8:
+ e = b43_nphy_channeltab_rev8;
+ length = ARRAY_SIZE(b43_nphy_channeltab_rev8);
+ break;
+ default:
+ B43_WARN_ON(1);
+ return NULL;
+ }
- for (i = 0; i < ARRAY_SIZE(b43_nphy_channeltab_rev3); i++) {
- e = &(b43_nphy_channeltab_rev3[i]);
+ for (i = 0; i < length; i++, e++) {
if (e->freq == freq)
return e;
}
diff --git a/drivers/net/wireless/b43/radio_2056.h b/drivers/net/wireless/b43/radio_2056.h
index fda6dafecb8c..d601f6e7e313 100644
--- a/drivers/net/wireless/b43/radio_2056.h
+++ b/drivers/net/wireless/b43/radio_2056.h
@@ -4,6 +4,9 @@
Copyright (c) 2010 Rafał Miłecki <zajec5@gmail.com>
+ Some parts of the code in this file are derived from the brcm80211
+ driver Copyright (c) 2010 Broadcom Corporation
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
@@ -28,15 +31,1090 @@
#include "tables_nphy.h"
+#define B2056_SYN (0x0 << 12)
+#define B2056_TX0 (0x2 << 12)
+#define B2056_TX1 (0x3 << 12)
+#define B2056_RX0 (0x6 << 12)
+#define B2056_RX1 (0x7 << 12)
+#define B2056_ALLTX (0xE << 12)
+#define B2056_ALLRX (0xF << 12)
+
+#define B2056_SYN_RESERVED_ADDR0 0x00
+#define B2056_SYN_IDCODE 0x01
+#define B2056_SYN_RESERVED_ADDR2 0x02
+#define B2056_SYN_RESERVED_ADDR3 0x03
+#define B2056_SYN_RESERVED_ADDR4 0x04
+#define B2056_SYN_RESERVED_ADDR5 0x05
+#define B2056_SYN_RESERVED_ADDR6 0x06
+#define B2056_SYN_RESERVED_ADDR7 0x07
+#define B2056_SYN_COM_CTRL 0x08
+#define B2056_SYN_COM_PU 0x09
+#define B2056_SYN_COM_OVR 0x0A
+#define B2056_SYN_COM_RESET 0x0B
+#define B2056_SYN_COM_RCAL 0x0C
+#define B2056_SYN_COM_RC_RXLPF 0x0D
+#define B2056_SYN_COM_RC_TXLPF 0x0E
+#define B2056_SYN_COM_RC_RXHPF 0x0F
+#define B2056_SYN_RESERVED_ADDR16 0x10
+#define B2056_SYN_RESERVED_ADDR17 0x11
+#define B2056_SYN_RESERVED_ADDR18 0x12
+#define B2056_SYN_RESERVED_ADDR19 0x13
+#define B2056_SYN_RESERVED_ADDR20 0x14
+#define B2056_SYN_RESERVED_ADDR21 0x15
+#define B2056_SYN_RESERVED_ADDR22 0x16
+#define B2056_SYN_RESERVED_ADDR23 0x17
+#define B2056_SYN_RESERVED_ADDR24 0x18
+#define B2056_SYN_RESERVED_ADDR25 0x19
+#define B2056_SYN_RESERVED_ADDR26 0x1A
+#define B2056_SYN_RESERVED_ADDR27 0x1B
+#define B2056_SYN_RESERVED_ADDR28 0x1C
+#define B2056_SYN_RESERVED_ADDR29 0x1D
+#define B2056_SYN_RESERVED_ADDR30 0x1E
+#define B2056_SYN_RESERVED_ADDR31 0x1F
+#define B2056_SYN_GPIO_MASTER1 0x20
+#define B2056_SYN_GPIO_MASTER2 0x21
+#define B2056_SYN_TOPBIAS_MASTER 0x22
+#define B2056_SYN_TOPBIAS_RCAL 0x23
+#define B2056_SYN_AFEREG 0x24
+#define B2056_SYN_TEMPPROCSENSE 0x25
+#define B2056_SYN_TEMPPROCSENSEIDAC 0x26
+#define B2056_SYN_TEMPPROCSENSERCAL 0x27
+#define B2056_SYN_LPO 0x28
+#define B2056_SYN_VDDCAL_MASTER 0x29
+#define B2056_SYN_VDDCAL_IDAC 0x2A
+#define B2056_SYN_VDDCAL_STATUS 0x2B
+#define B2056_SYN_RCAL_MASTER 0x2C
+#define B2056_SYN_RCAL_CODE_OUT 0x2D
+#define B2056_SYN_RCCAL_CTRL0 0x2E
+#define B2056_SYN_RCCAL_CTRL1 0x2F
+#define B2056_SYN_RCCAL_CTRL2 0x30
+#define B2056_SYN_RCCAL_CTRL3 0x31
+#define B2056_SYN_RCCAL_CTRL4 0x32
+#define B2056_SYN_RCCAL_CTRL5 0x33
+#define B2056_SYN_RCCAL_CTRL6 0x34
+#define B2056_SYN_RCCAL_CTRL7 0x35
+#define B2056_SYN_RCCAL_CTRL8 0x36
+#define B2056_SYN_RCCAL_CTRL9 0x37
+#define B2056_SYN_RCCAL_CTRL10 0x38
+#define B2056_SYN_RCCAL_CTRL11 0x39
+#define B2056_SYN_ZCAL_SPARE1 0x3A
+#define B2056_SYN_ZCAL_SPARE2 0x3B
+#define B2056_SYN_PLL_MAST1 0x3C
+#define B2056_SYN_PLL_MAST2 0x3D
+#define B2056_SYN_PLL_MAST3 0x3E
+#define B2056_SYN_PLL_BIAS_RESET 0x3F
+#define B2056_SYN_PLL_XTAL0 0x40
+#define B2056_SYN_PLL_XTAL1 0x41
+#define B2056_SYN_PLL_XTAL3 0x42
+#define B2056_SYN_PLL_XTAL4 0x43
+#define B2056_SYN_PLL_XTAL5 0x44
+#define B2056_SYN_PLL_XTAL6 0x45
+#define B2056_SYN_PLL_REFDIV 0x46
+#define B2056_SYN_PLL_PFD 0x47
+#define B2056_SYN_PLL_CP1 0x48
+#define B2056_SYN_PLL_CP2 0x49
+#define B2056_SYN_PLL_CP3 0x4A
+#define B2056_SYN_PLL_LOOPFILTER1 0x4B
+#define B2056_SYN_PLL_LOOPFILTER2 0x4C
+#define B2056_SYN_PLL_LOOPFILTER3 0x4D
+#define B2056_SYN_PLL_LOOPFILTER4 0x4E
+#define B2056_SYN_PLL_LOOPFILTER5 0x4F
+#define B2056_SYN_PLL_MMD1 0x50
+#define B2056_SYN_PLL_MMD2 0x51
+#define B2056_SYN_PLL_VCO1 0x52
+#define B2056_SYN_PLL_VCO2 0x53
+#define B2056_SYN_PLL_MONITOR1 0x54
+#define B2056_SYN_PLL_MONITOR2 0x55
+#define B2056_SYN_PLL_VCOCAL1 0x56
+#define B2056_SYN_PLL_VCOCAL2 0x57
+#define B2056_SYN_PLL_VCOCAL4 0x58
+#define B2056_SYN_PLL_VCOCAL5 0x59
+#define B2056_SYN_PLL_VCOCAL6 0x5A
+#define B2056_SYN_PLL_VCOCAL7 0x5B
+#define B2056_SYN_PLL_VCOCAL8 0x5C
+#define B2056_SYN_PLL_VCOCAL9 0x5D
+#define B2056_SYN_PLL_VCOCAL10 0x5E
+#define B2056_SYN_PLL_VCOCAL11 0x5F
+#define B2056_SYN_PLL_VCOCAL12 0x60
+#define B2056_SYN_PLL_VCOCAL13 0x61
+#define B2056_SYN_PLL_VREG 0x62
+#define B2056_SYN_PLL_STATUS1 0x63
+#define B2056_SYN_PLL_STATUS2 0x64
+#define B2056_SYN_PLL_STATUS3 0x65
+#define B2056_SYN_LOGEN_PU0 0x66
+#define B2056_SYN_LOGEN_PU1 0x67
+#define B2056_SYN_LOGEN_PU2 0x68
+#define B2056_SYN_LOGEN_PU3 0x69
+#define B2056_SYN_LOGEN_PU5 0x6A
+#define B2056_SYN_LOGEN_PU6 0x6B
+#define B2056_SYN_LOGEN_PU7 0x6C
+#define B2056_SYN_LOGEN_PU8 0x6D
+#define B2056_SYN_LOGEN_BIAS_RESET 0x6E
+#define B2056_SYN_LOGEN_RCCR1 0x6F
+#define B2056_SYN_LOGEN_VCOBUF1 0x70
+#define B2056_SYN_LOGEN_MIXER1 0x71
+#define B2056_SYN_LOGEN_MIXER2 0x72
+#define B2056_SYN_LOGEN_BUF1 0x73
+#define B2056_SYN_LOGENBUF2 0x74
+#define B2056_SYN_LOGEN_BUF3 0x75
+#define B2056_SYN_LOGEN_BUF4 0x76
+#define B2056_SYN_LOGEN_DIV1 0x77
+#define B2056_SYN_LOGEN_DIV2 0x78
+#define B2056_SYN_LOGEN_DIV3 0x79
+#define B2056_SYN_LOGEN_ACL1 0x7A
+#define B2056_SYN_LOGEN_ACL2 0x7B
+#define B2056_SYN_LOGEN_ACL3 0x7C
+#define B2056_SYN_LOGEN_ACL4 0x7D
+#define B2056_SYN_LOGEN_ACL5 0x7E
+#define B2056_SYN_LOGEN_ACL6 0x7F
+#define B2056_SYN_LOGEN_ACLOUT 0x80
+#define B2056_SYN_LOGEN_ACLCAL1 0x81
+#define B2056_SYN_LOGEN_ACLCAL2 0x82
+#define B2056_SYN_LOGEN_ACLCAL3 0x83
+#define B2056_SYN_CALEN 0x84
+#define B2056_SYN_LOGEN_PEAKDET1 0x85
+#define B2056_SYN_LOGEN_CORE_ACL_OVR 0x86
+#define B2056_SYN_LOGEN_RX_DIFF_ACL_OVR 0x87
+#define B2056_SYN_LOGEN_TX_DIFF_ACL_OVR 0x88
+#define B2056_SYN_LOGEN_RX_CMOS_ACL_OVR 0x89
+#define B2056_SYN_LOGEN_TX_CMOS_ACL_OVR 0x8A
+#define B2056_SYN_LOGEN_VCOBUF2 0x8B
+#define B2056_SYN_LOGEN_MIXER3 0x8C
+#define B2056_SYN_LOGEN_BUF5 0x8D
+#define B2056_SYN_LOGEN_BUF6 0x8E
+#define B2056_SYN_LOGEN_CBUFRX1 0x8F
+#define B2056_SYN_LOGEN_CBUFRX2 0x90
+#define B2056_SYN_LOGEN_CBUFRX3 0x91
+#define B2056_SYN_LOGEN_CBUFRX4 0x92
+#define B2056_SYN_LOGEN_CBUFTX1 0x93
+#define B2056_SYN_LOGEN_CBUFTX2 0x94
+#define B2056_SYN_LOGEN_CBUFTX3 0x95
+#define B2056_SYN_LOGEN_CBUFTX4 0x96
+#define B2056_SYN_LOGEN_CMOSRX1 0x97
+#define B2056_SYN_LOGEN_CMOSRX2 0x98
+#define B2056_SYN_LOGEN_CMOSRX3 0x99
+#define B2056_SYN_LOGEN_CMOSRX4 0x9A
+#define B2056_SYN_LOGEN_CMOSTX1 0x9B
+#define B2056_SYN_LOGEN_CMOSTX2 0x9C
+#define B2056_SYN_LOGEN_CMOSTX3 0x9D
+#define B2056_SYN_LOGEN_CMOSTX4 0x9E
+#define B2056_SYN_LOGEN_VCOBUF2_OVRVAL 0x9F
+#define B2056_SYN_LOGEN_MIXER3_OVRVAL 0xA0
+#define B2056_SYN_LOGEN_BUF5_OVRVAL 0xA1
+#define B2056_SYN_LOGEN_BUF6_OVRVAL 0xA2
+#define B2056_SYN_LOGEN_CBUFRX1_OVRVAL 0xA3
+#define B2056_SYN_LOGEN_CBUFRX2_OVRVAL 0xA4
+#define B2056_SYN_LOGEN_CBUFRX3_OVRVAL 0xA5
+#define B2056_SYN_LOGEN_CBUFRX4_OVRVAL 0xA6
+#define B2056_SYN_LOGEN_CBUFTX1_OVRVAL 0xA7
+#define B2056_SYN_LOGEN_CBUFTX2_OVRVAL 0xA8
+#define B2056_SYN_LOGEN_CBUFTX3_OVRVAL 0xA9
+#define B2056_SYN_LOGEN_CBUFTX4_OVRVAL 0xAA
+#define B2056_SYN_LOGEN_CMOSRX1_OVRVAL 0xAB
+#define B2056_SYN_LOGEN_CMOSRX2_OVRVAL 0xAC
+#define B2056_SYN_LOGEN_CMOSRX3_OVRVAL 0xAD
+#define B2056_SYN_LOGEN_CMOSRX4_OVRVAL 0xAE
+#define B2056_SYN_LOGEN_CMOSTX1_OVRVAL 0xAF
+#define B2056_SYN_LOGEN_CMOSTX2_OVRVAL 0xB0
+#define B2056_SYN_LOGEN_CMOSTX3_OVRVAL 0xB1
+#define B2056_SYN_LOGEN_CMOSTX4_OVRVAL 0xB2
+#define B2056_SYN_LOGEN_ACL_WAITCNT 0xB3
+#define B2056_SYN_LOGEN_CORE_CALVALID 0xB4
+#define B2056_SYN_LOGEN_RX_CMOS_CALVALID 0xB5
+#define B2056_SYN_LOGEN_TX_CMOS_VALID 0xB6
+
+#define B2056_TX_RESERVED_ADDR0 0x00
+#define B2056_TX_IDCODE 0x01
+#define B2056_TX_RESERVED_ADDR2 0x02
+#define B2056_TX_RESERVED_ADDR3 0x03
+#define B2056_TX_RESERVED_ADDR4 0x04
+#define B2056_TX_RESERVED_ADDR5 0x05
+#define B2056_TX_RESERVED_ADDR6 0x06
+#define B2056_TX_RESERVED_ADDR7 0x07
+#define B2056_TX_COM_CTRL 0x08
+#define B2056_TX_COM_PU 0x09
+#define B2056_TX_COM_OVR 0x0A
+#define B2056_TX_COM_RESET 0x0B
+#define B2056_TX_COM_RCAL 0x0C
+#define B2056_TX_COM_RC_RXLPF 0x0D
+#define B2056_TX_COM_RC_TXLPF 0x0E
+#define B2056_TX_COM_RC_RXHPF 0x0F
+#define B2056_TX_RESERVED_ADDR16 0x10
+#define B2056_TX_RESERVED_ADDR17 0x11
+#define B2056_TX_RESERVED_ADDR18 0x12
+#define B2056_TX_RESERVED_ADDR19 0x13
+#define B2056_TX_RESERVED_ADDR20 0x14
+#define B2056_TX_RESERVED_ADDR21 0x15
+#define B2056_TX_RESERVED_ADDR22 0x16
+#define B2056_TX_RESERVED_ADDR23 0x17
+#define B2056_TX_RESERVED_ADDR24 0x18
+#define B2056_TX_RESERVED_ADDR25 0x19
+#define B2056_TX_RESERVED_ADDR26 0x1A
+#define B2056_TX_RESERVED_ADDR27 0x1B
+#define B2056_TX_RESERVED_ADDR28 0x1C
+#define B2056_TX_RESERVED_ADDR29 0x1D
+#define B2056_TX_RESERVED_ADDR30 0x1E
+#define B2056_TX_RESERVED_ADDR31 0x1F
+#define B2056_TX_IQCAL_GAIN_BW 0x20
+#define B2056_TX_LOFT_FINE_I 0x21
+#define B2056_TX_LOFT_FINE_Q 0x22
+#define B2056_TX_LOFT_COARSE_I 0x23
+#define B2056_TX_LOFT_COARSE_Q 0x24
+#define B2056_TX_TX_COM_MASTER1 0x25
+#define B2056_TX_TX_COM_MASTER2 0x26
+#define B2056_TX_RXIQCAL_TXMUX 0x27
+#define B2056_TX_TX_SSI_MASTER 0x28
+#define B2056_TX_IQCAL_VCM_HG 0x29
+#define B2056_TX_IQCAL_IDAC 0x2A
+#define B2056_TX_TSSI_VCM 0x2B
+#define B2056_TX_TX_AMP_DET 0x2C
+#define B2056_TX_TX_SSI_MUX 0x2D
+#define B2056_TX_TSSIA 0x2E
+#define B2056_TX_TSSIG 0x2F
+#define B2056_TX_TSSI_MISC1 0x30
+#define B2056_TX_TSSI_MISC2 0x31
+#define B2056_TX_TSSI_MISC3 0x32
+#define B2056_TX_PA_SPARE1 0x33
+#define B2056_TX_PA_SPARE2 0x34
+#define B2056_TX_INTPAA_MASTER 0x35
+#define B2056_TX_INTPAA_GAIN 0x36
+#define B2056_TX_INTPAA_BOOST_TUNE 0x37
+#define B2056_TX_INTPAA_IAUX_STAT 0x38
+#define B2056_TX_INTPAA_IAUX_DYN 0x39
+#define B2056_TX_INTPAA_IMAIN_STAT 0x3A
+#define B2056_TX_INTPAA_IMAIN_DYN 0x3B
+#define B2056_TX_INTPAA_CASCBIAS 0x3C
+#define B2056_TX_INTPAA_PASLOPE 0x3D
+#define B2056_TX_INTPAA_PA_MISC 0x3E
+#define B2056_TX_INTPAG_MASTER 0x3F
+#define B2056_TX_INTPAG_GAIN 0x40
+#define B2056_TX_INTPAG_BOOST_TUNE 0x41
+#define B2056_TX_INTPAG_IAUX_STAT 0x42
+#define B2056_TX_INTPAG_IAUX_DYN 0x43
+#define B2056_TX_INTPAG_IMAIN_STAT 0x44
+#define B2056_TX_INTPAG_IMAIN_DYN 0x45
+#define B2056_TX_INTPAG_CASCBIAS 0x46
+#define B2056_TX_INTPAG_PASLOPE 0x47
+#define B2056_TX_INTPAG_PA_MISC 0x48
+#define B2056_TX_PADA_MASTER 0x49
+#define B2056_TX_PADA_IDAC 0x4A
+#define B2056_TX_PADA_CASCBIAS 0x4B
+#define B2056_TX_PADA_GAIN 0x4C
+#define B2056_TX_PADA_BOOST_TUNE 0x4D
+#define B2056_TX_PADA_SLOPE 0x4E
+#define B2056_TX_PADG_MASTER 0x4F
+#define B2056_TX_PADG_IDAC 0x50
+#define B2056_TX_PADG_CASCBIAS 0x51
+#define B2056_TX_PADG_GAIN 0x52
+#define B2056_TX_PADG_BOOST_TUNE 0x53
+#define B2056_TX_PADG_SLOPE 0x54
+#define B2056_TX_PGAA_MASTER 0x55
+#define B2056_TX_PGAA_IDAC 0x56
+#define B2056_TX_PGAA_GAIN 0x57
+#define B2056_TX_PGAA_BOOST_TUNE 0x58
+#define B2056_TX_PGAA_SLOPE 0x59
+#define B2056_TX_PGAA_MISC 0x5A
+#define B2056_TX_PGAG_MASTER 0x5B
+#define B2056_TX_PGAG_IDAC 0x5C
+#define B2056_TX_PGAG_GAIN 0x5D
+#define B2056_TX_PGAG_BOOST_TUNE 0x5E
+#define B2056_TX_PGAG_SLOPE 0x5F
+#define B2056_TX_PGAG_MISC 0x60
+#define B2056_TX_MIXA_MASTER 0x61
+#define B2056_TX_MIXA_BOOST_TUNE 0x62
+#define B2056_TX_MIXG 0x63
+#define B2056_TX_MIXG_BOOST_TUNE 0x64
+#define B2056_TX_BB_GM_MASTER 0x65
+#define B2056_TX_GMBB_GM 0x66
+#define B2056_TX_GMBB_IDAC 0x67
+#define B2056_TX_TXLPF_MASTER 0x68
+#define B2056_TX_TXLPF_RCCAL 0x69
+#define B2056_TX_TXLPF_RCCAL_OFF0 0x6A
+#define B2056_TX_TXLPF_RCCAL_OFF1 0x6B
+#define B2056_TX_TXLPF_RCCAL_OFF2 0x6C
+#define B2056_TX_TXLPF_RCCAL_OFF3 0x6D
+#define B2056_TX_TXLPF_RCCAL_OFF4 0x6E
+#define B2056_TX_TXLPF_RCCAL_OFF5 0x6F
+#define B2056_TX_TXLPF_RCCAL_OFF6 0x70
+#define B2056_TX_TXLPF_BW 0x71
+#define B2056_TX_TXLPF_GAIN 0x72
+#define B2056_TX_TXLPF_IDAC 0x73
+#define B2056_TX_TXLPF_IDAC_0 0x74
+#define B2056_TX_TXLPF_IDAC_1 0x75
+#define B2056_TX_TXLPF_IDAC_2 0x76
+#define B2056_TX_TXLPF_IDAC_3 0x77
+#define B2056_TX_TXLPF_IDAC_4 0x78
+#define B2056_TX_TXLPF_IDAC_5 0x79
+#define B2056_TX_TXLPF_IDAC_6 0x7A
+#define B2056_TX_TXLPF_OPAMP_IDAC 0x7B
+#define B2056_TX_TXLPF_MISC 0x7C
+#define B2056_TX_TXSPARE1 0x7D
+#define B2056_TX_TXSPARE2 0x7E
+#define B2056_TX_TXSPARE3 0x7F
+#define B2056_TX_TXSPARE4 0x80
+#define B2056_TX_TXSPARE5 0x81
+#define B2056_TX_TXSPARE6 0x82
+#define B2056_TX_TXSPARE7 0x83
+#define B2056_TX_TXSPARE8 0x84
+#define B2056_TX_TXSPARE9 0x85
+#define B2056_TX_TXSPARE10 0x86
+#define B2056_TX_TXSPARE11 0x87
+#define B2056_TX_TXSPARE12 0x88
+#define B2056_TX_TXSPARE13 0x89
+#define B2056_TX_TXSPARE14 0x8A
+#define B2056_TX_TXSPARE15 0x8B
+#define B2056_TX_TXSPARE16 0x8C
+#define B2056_TX_STATUS_INTPA_GAIN 0x8D
+#define B2056_TX_STATUS_PAD_GAIN 0x8E
+#define B2056_TX_STATUS_PGA_GAIN 0x8F
+#define B2056_TX_STATUS_GM_TXLPF_GAIN 0x90
+#define B2056_TX_STATUS_TXLPF_BW 0x91
+#define B2056_TX_STATUS_TXLPF_RC 0x92
+#define B2056_TX_GMBB_IDAC0 0x93
+#define B2056_TX_GMBB_IDAC1 0x94
+#define B2056_TX_GMBB_IDAC2 0x95
+#define B2056_TX_GMBB_IDAC3 0x96
+#define B2056_TX_GMBB_IDAC4 0x97
+#define B2056_TX_GMBB_IDAC5 0x98
+#define B2056_TX_GMBB_IDAC6 0x99
+#define B2056_TX_GMBB_IDAC7 0x9A
+
+#define B2056_RX_RESERVED_ADDR0 0x00
+#define B2056_RX_IDCODE 0x01
+#define B2056_RX_RESERVED_ADDR2 0x02
+#define B2056_RX_RESERVED_ADDR3 0x03
+#define B2056_RX_RESERVED_ADDR4 0x04
+#define B2056_RX_RESERVED_ADDR5 0x05
+#define B2056_RX_RESERVED_ADDR6 0x06
+#define B2056_RX_RESERVED_ADDR7 0x07
+#define B2056_RX_COM_CTRL 0x08
+#define B2056_RX_COM_PU 0x09
+#define B2056_RX_COM_OVR 0x0A
+#define B2056_RX_COM_RESET 0x0B
+#define B2056_RX_COM_RCAL 0x0C
+#define B2056_RX_COM_RC_RXLPF 0x0D
+#define B2056_RX_COM_RC_TXLPF 0x0E
+#define B2056_RX_COM_RC_RXHPF 0x0F
+#define B2056_RX_RESERVED_ADDR16 0x10
+#define B2056_RX_RESERVED_ADDR17 0x11
+#define B2056_RX_RESERVED_ADDR18 0x12
+#define B2056_RX_RESERVED_ADDR19 0x13
+#define B2056_RX_RESERVED_ADDR20 0x14
+#define B2056_RX_RESERVED_ADDR21 0x15
+#define B2056_RX_RESERVED_ADDR22 0x16
+#define B2056_RX_RESERVED_ADDR23 0x17
+#define B2056_RX_RESERVED_ADDR24 0x18
+#define B2056_RX_RESERVED_ADDR25 0x19
+#define B2056_RX_RESERVED_ADDR26 0x1A
+#define B2056_RX_RESERVED_ADDR27 0x1B
+#define B2056_RX_RESERVED_ADDR28 0x1C
+#define B2056_RX_RESERVED_ADDR29 0x1D
+#define B2056_RX_RESERVED_ADDR30 0x1E
+#define B2056_RX_RESERVED_ADDR31 0x1F
+#define B2056_RX_RXIQCAL_RXMUX 0x20
+#define B2056_RX_RSSI_PU 0x21
+#define B2056_RX_RSSI_SEL 0x22
+#define B2056_RX_RSSI_GAIN 0x23
+#define B2056_RX_RSSI_NB_IDAC 0x24
+#define B2056_RX_RSSI_WB2I_IDAC_1 0x25
+#define B2056_RX_RSSI_WB2I_IDAC_2 0x26
+#define B2056_RX_RSSI_WB2Q_IDAC_1 0x27
+#define B2056_RX_RSSI_WB2Q_IDAC_2 0x28
+#define B2056_RX_RSSI_POLE 0x29
+#define B2056_RX_RSSI_WB1_IDAC 0x2A
+#define B2056_RX_RSSI_MISC 0x2B
+#define B2056_RX_LNAA_MASTER 0x2C
+#define B2056_RX_LNAA_TUNE 0x2D
+#define B2056_RX_LNAA_GAIN 0x2E
+#define B2056_RX_LNA_A_SLOPE 0x2F
+#define B2056_RX_BIASPOLE_LNAA1_IDAC 0x30
+#define B2056_RX_LNAA2_IDAC 0x31
+#define B2056_RX_LNA1A_MISC 0x32
+#define B2056_RX_LNAG_MASTER 0x33
+#define B2056_RX_LNAG_TUNE 0x34
+#define B2056_RX_LNAG_GAIN 0x35
+#define B2056_RX_LNA_G_SLOPE 0x36
+#define B2056_RX_BIASPOLE_LNAG1_IDAC 0x37
+#define B2056_RX_LNAG2_IDAC 0x38
+#define B2056_RX_LNA1G_MISC 0x39
+#define B2056_RX_MIXA_MASTER 0x3A
+#define B2056_RX_MIXA_VCM 0x3B
+#define B2056_RX_MIXA_CTRLPTAT 0x3C
+#define B2056_RX_MIXA_LOB_BIAS 0x3D
+#define B2056_RX_MIXA_CORE_IDAC 0x3E
+#define B2056_RX_MIXA_CMFB_IDAC 0x3F
+#define B2056_RX_MIXA_BIAS_AUX 0x40
+#define B2056_RX_MIXA_BIAS_MAIN 0x41
+#define B2056_RX_MIXA_BIAS_MISC 0x42
+#define B2056_RX_MIXA_MAST_BIAS 0x43
+#define B2056_RX_MIXG_MASTER 0x44
+#define B2056_RX_MIXG_VCM 0x45
+#define B2056_RX_MIXG_CTRLPTAT 0x46
+#define B2056_RX_MIXG_LOB_BIAS 0x47
+#define B2056_RX_MIXG_CORE_IDAC 0x48
+#define B2056_RX_MIXG_CMFB_IDAC 0x49
+#define B2056_RX_MIXG_BIAS_AUX 0x4A
+#define B2056_RX_MIXG_BIAS_MAIN 0x4B
+#define B2056_RX_MIXG_BIAS_MISC 0x4C
+#define B2056_RX_MIXG_MAST_BIAS 0x4D
+#define B2056_RX_TIA_MASTER 0x4E
+#define B2056_RX_TIA_IOPAMP 0x4F
+#define B2056_RX_TIA_QOPAMP 0x50
+#define B2056_RX_TIA_IMISC 0x51
+#define B2056_RX_TIA_QMISC 0x52
+#define B2056_RX_TIA_GAIN 0x53
+#define B2056_RX_TIA_SPARE1 0x54
+#define B2056_RX_TIA_SPARE2 0x55
+#define B2056_RX_BB_LPF_MASTER 0x56
+#define B2056_RX_AACI_MASTER 0x57
+#define B2056_RX_RXLPF_IDAC 0x58
+#define B2056_RX_RXLPF_OPAMPBIAS_LOWQ 0x59
+#define B2056_RX_RXLPF_OPAMPBIAS_HIGHQ 0x5A
+#define B2056_RX_RXLPF_BIAS_DCCANCEL 0x5B
+#define B2056_RX_RXLPF_OUTVCM 0x5C
+#define B2056_RX_RXLPF_INVCM_BODY 0x5D
+#define B2056_RX_RXLPF_CC_OP 0x5E
+#define B2056_RX_RXLPF_GAIN 0x5F
+#define B2056_RX_RXLPF_Q_BW 0x60
+#define B2056_RX_RXLPF_HP_CORNER_BW 0x61
+#define B2056_RX_RXLPF_RCCAL_HPC 0x62
+#define B2056_RX_RXHPF_OFF0 0x63
+#define B2056_RX_RXHPF_OFF1 0x64
+#define B2056_RX_RXHPF_OFF2 0x65
+#define B2056_RX_RXHPF_OFF3 0x66
+#define B2056_RX_RXHPF_OFF4 0x67
+#define B2056_RX_RXHPF_OFF5 0x68
+#define B2056_RX_RXHPF_OFF6 0x69
+#define B2056_RX_RXHPF_OFF7 0x6A
+#define B2056_RX_RXLPF_RCCAL_LPC 0x6B
+#define B2056_RX_RXLPF_OFF_0 0x6C
+#define B2056_RX_RXLPF_OFF_1 0x6D
+#define B2056_RX_RXLPF_OFF_2 0x6E
+#define B2056_RX_RXLPF_OFF_3 0x6F
+#define B2056_RX_RXLPF_OFF_4 0x70
+#define B2056_RX_UNUSED 0x71
+#define B2056_RX_VGA_MASTER 0x72
+#define B2056_RX_VGA_BIAS 0x73
+#define B2056_RX_VGA_BIAS_DCCANCEL 0x74
+#define B2056_RX_VGA_GAIN 0x75
+#define B2056_RX_VGA_HP_CORNER_BW 0x76
+#define B2056_RX_VGABUF_BIAS 0x77
+#define B2056_RX_VGABUF_GAIN_BW 0x78
+#define B2056_RX_TXFBMIX_A 0x79
+#define B2056_RX_TXFBMIX_G 0x7A
+#define B2056_RX_RXSPARE1 0x7B
+#define B2056_RX_RXSPARE2 0x7C
+#define B2056_RX_RXSPARE3 0x7D
+#define B2056_RX_RXSPARE4 0x7E
+#define B2056_RX_RXSPARE5 0x7F
+#define B2056_RX_RXSPARE6 0x80
+#define B2056_RX_RXSPARE7 0x81
+#define B2056_RX_RXSPARE8 0x82
+#define B2056_RX_RXSPARE9 0x83
+#define B2056_RX_RXSPARE10 0x84
+#define B2056_RX_RXSPARE11 0x85
+#define B2056_RX_RXSPARE12 0x86
+#define B2056_RX_RXSPARE13 0x87
+#define B2056_RX_RXSPARE14 0x88
+#define B2056_RX_RXSPARE15 0x89
+#define B2056_RX_RXSPARE16 0x8A
+#define B2056_RX_STATUS_LNAA_GAIN 0x8B
+#define B2056_RX_STATUS_LNAG_GAIN 0x8C
+#define B2056_RX_STATUS_MIXTIA_GAIN 0x8D
+#define B2056_RX_STATUS_RXLPF_GAIN 0x8E
+#define B2056_RX_STATUS_VGA_BUF_GAIN 0x8F
+#define B2056_RX_STATUS_RXLPF_Q 0x90
+#define B2056_RX_STATUS_RXLPF_BUF_BW 0x91
+#define B2056_RX_STATUS_RXLPF_VGA_HPC 0x92
+#define B2056_RX_STATUS_RXLPF_RC 0x93
+#define B2056_RX_STATUS_HPC_RC 0x94
+
+#define B2056_LNA1_A_PU 0x01
+#define B2056_LNA2_A_PU 0x02
+#define B2056_LNA1_G_PU 0x01
+#define B2056_LNA2_G_PU 0x02
+#define B2056_MIXA_PU_I 0x01
+#define B2056_MIXA_PU_Q 0x02
+#define B2056_MIXA_PU_GM 0x10
+#define B2056_MIXG_PU_I 0x01
+#define B2056_MIXG_PU_Q 0x02
+#define B2056_MIXG_PU_GM 0x10
+#define B2056_TIA_PU 0x01
+#define B2056_BB_LPF_PU 0x20
+#define B2056_W1_PU 0x02
+#define B2056_W2_PU 0x04
+#define B2056_NB_PU 0x08
+#define B2056_RSSI_W1_SEL 0x02
+#define B2056_RSSI_W2_SEL 0x04
+#define B2056_RSSI_NB_SEL 0x08
+#define B2056_VCM_MASK 0x1C
+#define B2056_RSSI_VCM_SHIFT 0x02
+
+#define B2056_SYN (0x0 << 12)
+#define B2056_TX0 (0x2 << 12)
+#define B2056_TX1 (0x3 << 12)
+#define B2056_RX0 (0x6 << 12)
+#define B2056_RX1 (0x7 << 12)
+#define B2056_ALLTX (0xE << 12)
+#define B2056_ALLRX (0xF << 12)
+
+#define B2056_SYN_RESERVED_ADDR0 0x00
+#define B2056_SYN_IDCODE 0x01
+#define B2056_SYN_RESERVED_ADDR2 0x02
+#define B2056_SYN_RESERVED_ADDR3 0x03
+#define B2056_SYN_RESERVED_ADDR4 0x04
+#define B2056_SYN_RESERVED_ADDR5 0x05
+#define B2056_SYN_RESERVED_ADDR6 0x06
+#define B2056_SYN_RESERVED_ADDR7 0x07
+#define B2056_SYN_COM_CTRL 0x08
+#define B2056_SYN_COM_PU 0x09
+#define B2056_SYN_COM_OVR 0x0A
+#define B2056_SYN_COM_RESET 0x0B
+#define B2056_SYN_COM_RCAL 0x0C
+#define B2056_SYN_COM_RC_RXLPF 0x0D
+#define B2056_SYN_COM_RC_TXLPF 0x0E
+#define B2056_SYN_COM_RC_RXHPF 0x0F
+#define B2056_SYN_RESERVED_ADDR16 0x10
+#define B2056_SYN_RESERVED_ADDR17 0x11
+#define B2056_SYN_RESERVED_ADDR18 0x12
+#define B2056_SYN_RESERVED_ADDR19 0x13
+#define B2056_SYN_RESERVED_ADDR20 0x14
+#define B2056_SYN_RESERVED_ADDR21 0x15
+#define B2056_SYN_RESERVED_ADDR22 0x16
+#define B2056_SYN_RESERVED_ADDR23 0x17
+#define B2056_SYN_RESERVED_ADDR24 0x18
+#define B2056_SYN_RESERVED_ADDR25 0x19
+#define B2056_SYN_RESERVED_ADDR26 0x1A
+#define B2056_SYN_RESERVED_ADDR27 0x1B
+#define B2056_SYN_RESERVED_ADDR28 0x1C
+#define B2056_SYN_RESERVED_ADDR29 0x1D
+#define B2056_SYN_RESERVED_ADDR30 0x1E
+#define B2056_SYN_RESERVED_ADDR31 0x1F
+#define B2056_SYN_GPIO_MASTER1 0x20
+#define B2056_SYN_GPIO_MASTER2 0x21
+#define B2056_SYN_TOPBIAS_MASTER 0x22
+#define B2056_SYN_TOPBIAS_RCAL 0x23
+#define B2056_SYN_AFEREG 0x24
+#define B2056_SYN_TEMPPROCSENSE 0x25
+#define B2056_SYN_TEMPPROCSENSEIDAC 0x26
+#define B2056_SYN_TEMPPROCSENSERCAL 0x27
+#define B2056_SYN_LPO 0x28
+#define B2056_SYN_VDDCAL_MASTER 0x29
+#define B2056_SYN_VDDCAL_IDAC 0x2A
+#define B2056_SYN_VDDCAL_STATUS 0x2B
+#define B2056_SYN_RCAL_MASTER 0x2C
+#define B2056_SYN_RCAL_CODE_OUT 0x2D
+#define B2056_SYN_RCCAL_CTRL0 0x2E
+#define B2056_SYN_RCCAL_CTRL1 0x2F
+#define B2056_SYN_RCCAL_CTRL2 0x30
+#define B2056_SYN_RCCAL_CTRL3 0x31
+#define B2056_SYN_RCCAL_CTRL4 0x32
+#define B2056_SYN_RCCAL_CTRL5 0x33
+#define B2056_SYN_RCCAL_CTRL6 0x34
+#define B2056_SYN_RCCAL_CTRL7 0x35
+#define B2056_SYN_RCCAL_CTRL8 0x36
+#define B2056_SYN_RCCAL_CTRL9 0x37
+#define B2056_SYN_RCCAL_CTRL10 0x38
+#define B2056_SYN_RCCAL_CTRL11 0x39
+#define B2056_SYN_ZCAL_SPARE1 0x3A
+#define B2056_SYN_ZCAL_SPARE2 0x3B
+#define B2056_SYN_PLL_MAST1 0x3C
+#define B2056_SYN_PLL_MAST2 0x3D
+#define B2056_SYN_PLL_MAST3 0x3E
+#define B2056_SYN_PLL_BIAS_RESET 0x3F
+#define B2056_SYN_PLL_XTAL0 0x40
+#define B2056_SYN_PLL_XTAL1 0x41
+#define B2056_SYN_PLL_XTAL3 0x42
+#define B2056_SYN_PLL_XTAL4 0x43
+#define B2056_SYN_PLL_XTAL5 0x44
+#define B2056_SYN_PLL_XTAL6 0x45
+#define B2056_SYN_PLL_REFDIV 0x46
+#define B2056_SYN_PLL_PFD 0x47
+#define B2056_SYN_PLL_CP1 0x48
+#define B2056_SYN_PLL_CP2 0x49
+#define B2056_SYN_PLL_CP3 0x4A
+#define B2056_SYN_PLL_LOOPFILTER1 0x4B
+#define B2056_SYN_PLL_LOOPFILTER2 0x4C
+#define B2056_SYN_PLL_LOOPFILTER3 0x4D
+#define B2056_SYN_PLL_LOOPFILTER4 0x4E
+#define B2056_SYN_PLL_LOOPFILTER5 0x4F
+#define B2056_SYN_PLL_MMD1 0x50
+#define B2056_SYN_PLL_MMD2 0x51
+#define B2056_SYN_PLL_VCO1 0x52
+#define B2056_SYN_PLL_VCO2 0x53
+#define B2056_SYN_PLL_MONITOR1 0x54
+#define B2056_SYN_PLL_MONITOR2 0x55
+#define B2056_SYN_PLL_VCOCAL1 0x56
+#define B2056_SYN_PLL_VCOCAL2 0x57
+#define B2056_SYN_PLL_VCOCAL4 0x58
+#define B2056_SYN_PLL_VCOCAL5 0x59
+#define B2056_SYN_PLL_VCOCAL6 0x5A
+#define B2056_SYN_PLL_VCOCAL7 0x5B
+#define B2056_SYN_PLL_VCOCAL8 0x5C
+#define B2056_SYN_PLL_VCOCAL9 0x5D
+#define B2056_SYN_PLL_VCOCAL10 0x5E
+#define B2056_SYN_PLL_VCOCAL11 0x5F
+#define B2056_SYN_PLL_VCOCAL12 0x60
+#define B2056_SYN_PLL_VCOCAL13 0x61
+#define B2056_SYN_PLL_VREG 0x62
+#define B2056_SYN_PLL_STATUS1 0x63
+#define B2056_SYN_PLL_STATUS2 0x64
+#define B2056_SYN_PLL_STATUS3 0x65
+#define B2056_SYN_LOGEN_PU0 0x66
+#define B2056_SYN_LOGEN_PU1 0x67
+#define B2056_SYN_LOGEN_PU2 0x68
+#define B2056_SYN_LOGEN_PU3 0x69
+#define B2056_SYN_LOGEN_PU5 0x6A
+#define B2056_SYN_LOGEN_PU6 0x6B
+#define B2056_SYN_LOGEN_PU7 0x6C
+#define B2056_SYN_LOGEN_PU8 0x6D
+#define B2056_SYN_LOGEN_BIAS_RESET 0x6E
+#define B2056_SYN_LOGEN_RCCR1 0x6F
+#define B2056_SYN_LOGEN_VCOBUF1 0x70
+#define B2056_SYN_LOGEN_MIXER1 0x71
+#define B2056_SYN_LOGEN_MIXER2 0x72
+#define B2056_SYN_LOGEN_BUF1 0x73
+#define B2056_SYN_LOGENBUF2 0x74
+#define B2056_SYN_LOGEN_BUF3 0x75
+#define B2056_SYN_LOGEN_BUF4 0x76
+#define B2056_SYN_LOGEN_DIV1 0x77
+#define B2056_SYN_LOGEN_DIV2 0x78
+#define B2056_SYN_LOGEN_DIV3 0x79
+#define B2056_SYN_LOGEN_ACL1 0x7A
+#define B2056_SYN_LOGEN_ACL2 0x7B
+#define B2056_SYN_LOGEN_ACL3 0x7C
+#define B2056_SYN_LOGEN_ACL4 0x7D
+#define B2056_SYN_LOGEN_ACL5 0x7E
+#define B2056_SYN_LOGEN_ACL6 0x7F
+#define B2056_SYN_LOGEN_ACLOUT 0x80
+#define B2056_SYN_LOGEN_ACLCAL1 0x81
+#define B2056_SYN_LOGEN_ACLCAL2 0x82
+#define B2056_SYN_LOGEN_ACLCAL3 0x83
+#define B2056_SYN_CALEN 0x84
+#define B2056_SYN_LOGEN_PEAKDET1 0x85
+#define B2056_SYN_LOGEN_CORE_ACL_OVR 0x86
+#define B2056_SYN_LOGEN_RX_DIFF_ACL_OVR 0x87
+#define B2056_SYN_LOGEN_TX_DIFF_ACL_OVR 0x88
+#define B2056_SYN_LOGEN_RX_CMOS_ACL_OVR 0x89
+#define B2056_SYN_LOGEN_TX_CMOS_ACL_OVR 0x8A
+#define B2056_SYN_LOGEN_VCOBUF2 0x8B
+#define B2056_SYN_LOGEN_MIXER3 0x8C
+#define B2056_SYN_LOGEN_BUF5 0x8D
+#define B2056_SYN_LOGEN_BUF6 0x8E
+#define B2056_SYN_LOGEN_CBUFRX1 0x8F
+#define B2056_SYN_LOGEN_CBUFRX2 0x90
+#define B2056_SYN_LOGEN_CBUFRX3 0x91
+#define B2056_SYN_LOGEN_CBUFRX4 0x92
+#define B2056_SYN_LOGEN_CBUFTX1 0x93
+#define B2056_SYN_LOGEN_CBUFTX2 0x94
+#define B2056_SYN_LOGEN_CBUFTX3 0x95
+#define B2056_SYN_LOGEN_CBUFTX4 0x96
+#define B2056_SYN_LOGEN_CMOSRX1 0x97
+#define B2056_SYN_LOGEN_CMOSRX2 0x98
+#define B2056_SYN_LOGEN_CMOSRX3 0x99
+#define B2056_SYN_LOGEN_CMOSRX4 0x9A
+#define B2056_SYN_LOGEN_CMOSTX1 0x9B
+#define B2056_SYN_LOGEN_CMOSTX2 0x9C
+#define B2056_SYN_LOGEN_CMOSTX3 0x9D
+#define B2056_SYN_LOGEN_CMOSTX4 0x9E
+#define B2056_SYN_LOGEN_VCOBUF2_OVRVAL 0x9F
+#define B2056_SYN_LOGEN_MIXER3_OVRVAL 0xA0
+#define B2056_SYN_LOGEN_BUF5_OVRVAL 0xA1
+#define B2056_SYN_LOGEN_BUF6_OVRVAL 0xA2
+#define B2056_SYN_LOGEN_CBUFRX1_OVRVAL 0xA3
+#define B2056_SYN_LOGEN_CBUFRX2_OVRVAL 0xA4
+#define B2056_SYN_LOGEN_CBUFRX3_OVRVAL 0xA5
+#define B2056_SYN_LOGEN_CBUFRX4_OVRVAL 0xA6
+#define B2056_SYN_LOGEN_CBUFTX1_OVRVAL 0xA7
+#define B2056_SYN_LOGEN_CBUFTX2_OVRVAL 0xA8
+#define B2056_SYN_LOGEN_CBUFTX3_OVRVAL 0xA9
+#define B2056_SYN_LOGEN_CBUFTX4_OVRVAL 0xAA
+#define B2056_SYN_LOGEN_CMOSRX1_OVRVAL 0xAB
+#define B2056_SYN_LOGEN_CMOSRX2_OVRVAL 0xAC
+#define B2056_SYN_LOGEN_CMOSRX3_OVRVAL 0xAD
+#define B2056_SYN_LOGEN_CMOSRX4_OVRVAL 0xAE
+#define B2056_SYN_LOGEN_CMOSTX1_OVRVAL 0xAF
+#define B2056_SYN_LOGEN_CMOSTX2_OVRVAL 0xB0
+#define B2056_SYN_LOGEN_CMOSTX3_OVRVAL 0xB1
+#define B2056_SYN_LOGEN_CMOSTX4_OVRVAL 0xB2
+#define B2056_SYN_LOGEN_ACL_WAITCNT 0xB3
+#define B2056_SYN_LOGEN_CORE_CALVALID 0xB4
+#define B2056_SYN_LOGEN_RX_CMOS_CALVALID 0xB5
+#define B2056_SYN_LOGEN_TX_CMOS_VALID 0xB6
+
+#define B2056_TX_RESERVED_ADDR0 0x00
+#define B2056_TX_IDCODE 0x01
+#define B2056_TX_RESERVED_ADDR2 0x02
+#define B2056_TX_RESERVED_ADDR3 0x03
+#define B2056_TX_RESERVED_ADDR4 0x04
+#define B2056_TX_RESERVED_ADDR5 0x05
+#define B2056_TX_RESERVED_ADDR6 0x06
+#define B2056_TX_RESERVED_ADDR7 0x07
+#define B2056_TX_COM_CTRL 0x08
+#define B2056_TX_COM_PU 0x09
+#define B2056_TX_COM_OVR 0x0A
+#define B2056_TX_COM_RESET 0x0B
+#define B2056_TX_COM_RCAL 0x0C
+#define B2056_TX_COM_RC_RXLPF 0x0D
+#define B2056_TX_COM_RC_TXLPF 0x0E
+#define B2056_TX_COM_RC_RXHPF 0x0F
+#define B2056_TX_RESERVED_ADDR16 0x10
+#define B2056_TX_RESERVED_ADDR17 0x11
+#define B2056_TX_RESERVED_ADDR18 0x12
+#define B2056_TX_RESERVED_ADDR19 0x13
+#define B2056_TX_RESERVED_ADDR20 0x14
+#define B2056_TX_RESERVED_ADDR21 0x15
+#define B2056_TX_RESERVED_ADDR22 0x16
+#define B2056_TX_RESERVED_ADDR23 0x17
+#define B2056_TX_RESERVED_ADDR24 0x18
+#define B2056_TX_RESERVED_ADDR25 0x19
+#define B2056_TX_RESERVED_ADDR26 0x1A
+#define B2056_TX_RESERVED_ADDR27 0x1B
+#define B2056_TX_RESERVED_ADDR28 0x1C
+#define B2056_TX_RESERVED_ADDR29 0x1D
+#define B2056_TX_RESERVED_ADDR30 0x1E
+#define B2056_TX_RESERVED_ADDR31 0x1F
+#define B2056_TX_IQCAL_GAIN_BW 0x20
+#define B2056_TX_LOFT_FINE_I 0x21
+#define B2056_TX_LOFT_FINE_Q 0x22
+#define B2056_TX_LOFT_COARSE_I 0x23
+#define B2056_TX_LOFT_COARSE_Q 0x24
+#define B2056_TX_TX_COM_MASTER1 0x25
+#define B2056_TX_TX_COM_MASTER2 0x26
+#define B2056_TX_RXIQCAL_TXMUX 0x27
+#define B2056_TX_TX_SSI_MASTER 0x28
+#define B2056_TX_IQCAL_VCM_HG 0x29
+#define B2056_TX_IQCAL_IDAC 0x2A
+#define B2056_TX_TSSI_VCM 0x2B
+#define B2056_TX_TX_AMP_DET 0x2C
+#define B2056_TX_TX_SSI_MUX 0x2D
+#define B2056_TX_TSSIA 0x2E
+#define B2056_TX_TSSIG 0x2F
+#define B2056_TX_TSSI_MISC1 0x30
+#define B2056_TX_TSSI_MISC2 0x31
+#define B2056_TX_TSSI_MISC3 0x32
+#define B2056_TX_PA_SPARE1 0x33
+#define B2056_TX_PA_SPARE2 0x34
+#define B2056_TX_INTPAA_MASTER 0x35
+#define B2056_TX_INTPAA_GAIN 0x36
+#define B2056_TX_INTPAA_BOOST_TUNE 0x37
+#define B2056_TX_INTPAA_IAUX_STAT 0x38
+#define B2056_TX_INTPAA_IAUX_DYN 0x39
+#define B2056_TX_INTPAA_IMAIN_STAT 0x3A
+#define B2056_TX_INTPAA_IMAIN_DYN 0x3B
+#define B2056_TX_INTPAA_CASCBIAS 0x3C
+#define B2056_TX_INTPAA_PASLOPE 0x3D
+#define B2056_TX_INTPAA_PA_MISC 0x3E
+#define B2056_TX_INTPAG_MASTER 0x3F
+#define B2056_TX_INTPAG_GAIN 0x40
+#define B2056_TX_INTPAG_BOOST_TUNE 0x41
+#define B2056_TX_INTPAG_IAUX_STAT 0x42
+#define B2056_TX_INTPAG_IAUX_DYN 0x43
+#define B2056_TX_INTPAG_IMAIN_STAT 0x44
+#define B2056_TX_INTPAG_IMAIN_DYN 0x45
+#define B2056_TX_INTPAG_CASCBIAS 0x46
+#define B2056_TX_INTPAG_PASLOPE 0x47
+#define B2056_TX_INTPAG_PA_MISC 0x48
+#define B2056_TX_PADA_MASTER 0x49
+#define B2056_TX_PADA_IDAC 0x4A
+#define B2056_TX_PADA_CASCBIAS 0x4B
+#define B2056_TX_PADA_GAIN 0x4C
+#define B2056_TX_PADA_BOOST_TUNE 0x4D
+#define B2056_TX_PADA_SLOPE 0x4E
+#define B2056_TX_PADG_MASTER 0x4F
+#define B2056_TX_PADG_IDAC 0x50
+#define B2056_TX_PADG_CASCBIAS 0x51
+#define B2056_TX_PADG_GAIN 0x52
+#define B2056_TX_PADG_BOOST_TUNE 0x53
+#define B2056_TX_PADG_SLOPE 0x54
+#define B2056_TX_PGAA_MASTER 0x55
+#define B2056_TX_PGAA_IDAC 0x56
+#define B2056_TX_PGAA_GAIN 0x57
+#define B2056_TX_PGAA_BOOST_TUNE 0x58
+#define B2056_TX_PGAA_SLOPE 0x59
+#define B2056_TX_PGAA_MISC 0x5A
+#define B2056_TX_PGAG_MASTER 0x5B
+#define B2056_TX_PGAG_IDAC 0x5C
+#define B2056_TX_PGAG_GAIN 0x5D
+#define B2056_TX_PGAG_BOOST_TUNE 0x5E
+#define B2056_TX_PGAG_SLOPE 0x5F
+#define B2056_TX_PGAG_MISC 0x60
+#define B2056_TX_MIXA_MASTER 0x61
+#define B2056_TX_MIXA_BOOST_TUNE 0x62
+#define B2056_TX_MIXG 0x63
+#define B2056_TX_MIXG_BOOST_TUNE 0x64
+#define B2056_TX_BB_GM_MASTER 0x65
+#define B2056_TX_GMBB_GM 0x66
+#define B2056_TX_GMBB_IDAC 0x67
+#define B2056_TX_TXLPF_MASTER 0x68
+#define B2056_TX_TXLPF_RCCAL 0x69
+#define B2056_TX_TXLPF_RCCAL_OFF0 0x6A
+#define B2056_TX_TXLPF_RCCAL_OFF1 0x6B
+#define B2056_TX_TXLPF_RCCAL_OFF2 0x6C
+#define B2056_TX_TXLPF_RCCAL_OFF3 0x6D
+#define B2056_TX_TXLPF_RCCAL_OFF4 0x6E
+#define B2056_TX_TXLPF_RCCAL_OFF5 0x6F
+#define B2056_TX_TXLPF_RCCAL_OFF6 0x70
+#define B2056_TX_TXLPF_BW 0x71
+#define B2056_TX_TXLPF_GAIN 0x72
+#define B2056_TX_TXLPF_IDAC 0x73
+#define B2056_TX_TXLPF_IDAC_0 0x74
+#define B2056_TX_TXLPF_IDAC_1 0x75
+#define B2056_TX_TXLPF_IDAC_2 0x76
+#define B2056_TX_TXLPF_IDAC_3 0x77
+#define B2056_TX_TXLPF_IDAC_4 0x78
+#define B2056_TX_TXLPF_IDAC_5 0x79
+#define B2056_TX_TXLPF_IDAC_6 0x7A
+#define B2056_TX_TXLPF_OPAMP_IDAC 0x7B
+#define B2056_TX_TXLPF_MISC 0x7C
+#define B2056_TX_TXSPARE1 0x7D
+#define B2056_TX_TXSPARE2 0x7E
+#define B2056_TX_TXSPARE3 0x7F
+#define B2056_TX_TXSPARE4 0x80
+#define B2056_TX_TXSPARE5 0x81
+#define B2056_TX_TXSPARE6 0x82
+#define B2056_TX_TXSPARE7 0x83
+#define B2056_TX_TXSPARE8 0x84
+#define B2056_TX_TXSPARE9 0x85
+#define B2056_TX_TXSPARE10 0x86
+#define B2056_TX_TXSPARE11 0x87
+#define B2056_TX_TXSPARE12 0x88
+#define B2056_TX_TXSPARE13 0x89
+#define B2056_TX_TXSPARE14 0x8A
+#define B2056_TX_TXSPARE15 0x8B
+#define B2056_TX_TXSPARE16 0x8C
+#define B2056_TX_STATUS_INTPA_GAIN 0x8D
+#define B2056_TX_STATUS_PAD_GAIN 0x8E
+#define B2056_TX_STATUS_PGA_GAIN 0x8F
+#define B2056_TX_STATUS_GM_TXLPF_GAIN 0x90
+#define B2056_TX_STATUS_TXLPF_BW 0x91
+#define B2056_TX_STATUS_TXLPF_RC 0x92
+#define B2056_TX_GMBB_IDAC0 0x93
+#define B2056_TX_GMBB_IDAC1 0x94
+#define B2056_TX_GMBB_IDAC2 0x95
+#define B2056_TX_GMBB_IDAC3 0x96
+#define B2056_TX_GMBB_IDAC4 0x97
+#define B2056_TX_GMBB_IDAC5 0x98
+#define B2056_TX_GMBB_IDAC6 0x99
+#define B2056_TX_GMBB_IDAC7 0x9A
+
+#define B2056_RX_RESERVED_ADDR0 0x00
+#define B2056_RX_IDCODE 0x01
+#define B2056_RX_RESERVED_ADDR2 0x02
+#define B2056_RX_RESERVED_ADDR3 0x03
+#define B2056_RX_RESERVED_ADDR4 0x04
+#define B2056_RX_RESERVED_ADDR5 0x05
+#define B2056_RX_RESERVED_ADDR6 0x06
+#define B2056_RX_RESERVED_ADDR7 0x07
+#define B2056_RX_COM_CTRL 0x08
+#define B2056_RX_COM_PU 0x09
+#define B2056_RX_COM_OVR 0x0A
+#define B2056_RX_COM_RESET 0x0B
+#define B2056_RX_COM_RCAL 0x0C
+#define B2056_RX_COM_RC_RXLPF 0x0D
+#define B2056_RX_COM_RC_TXLPF 0x0E
+#define B2056_RX_COM_RC_RXHPF 0x0F
+#define B2056_RX_RESERVED_ADDR16 0x10
+#define B2056_RX_RESERVED_ADDR17 0x11
+#define B2056_RX_RESERVED_ADDR18 0x12
+#define B2056_RX_RESERVED_ADDR19 0x13
+#define B2056_RX_RESERVED_ADDR20 0x14
+#define B2056_RX_RESERVED_ADDR21 0x15
+#define B2056_RX_RESERVED_ADDR22 0x16
+#define B2056_RX_RESERVED_ADDR23 0x17
+#define B2056_RX_RESERVED_ADDR24 0x18
+#define B2056_RX_RESERVED_ADDR25 0x19
+#define B2056_RX_RESERVED_ADDR26 0x1A
+#define B2056_RX_RESERVED_ADDR27 0x1B
+#define B2056_RX_RESERVED_ADDR28 0x1C
+#define B2056_RX_RESERVED_ADDR29 0x1D
+#define B2056_RX_RESERVED_ADDR30 0x1E
+#define B2056_RX_RESERVED_ADDR31 0x1F
+#define B2056_RX_RXIQCAL_RXMUX 0x20
+#define B2056_RX_RSSI_PU 0x21
+#define B2056_RX_RSSI_SEL 0x22
+#define B2056_RX_RSSI_GAIN 0x23
+#define B2056_RX_RSSI_NB_IDAC 0x24
+#define B2056_RX_RSSI_WB2I_IDAC_1 0x25
+#define B2056_RX_RSSI_WB2I_IDAC_2 0x26
+#define B2056_RX_RSSI_WB2Q_IDAC_1 0x27
+#define B2056_RX_RSSI_WB2Q_IDAC_2 0x28
+#define B2056_RX_RSSI_POLE 0x29
+#define B2056_RX_RSSI_WB1_IDAC 0x2A
+#define B2056_RX_RSSI_MISC 0x2B
+#define B2056_RX_LNAA_MASTER 0x2C
+#define B2056_RX_LNAA_TUNE 0x2D
+#define B2056_RX_LNAA_GAIN 0x2E
+#define B2056_RX_LNA_A_SLOPE 0x2F
+#define B2056_RX_BIASPOLE_LNAA1_IDAC 0x30
+#define B2056_RX_LNAA2_IDAC 0x31
+#define B2056_RX_LNA1A_MISC 0x32
+#define B2056_RX_LNAG_MASTER 0x33
+#define B2056_RX_LNAG_TUNE 0x34
+#define B2056_RX_LNAG_GAIN 0x35
+#define B2056_RX_LNA_G_SLOPE 0x36
+#define B2056_RX_BIASPOLE_LNAG1_IDAC 0x37
+#define B2056_RX_LNAG2_IDAC 0x38
+#define B2056_RX_LNA1G_MISC 0x39
+#define B2056_RX_MIXA_MASTER 0x3A
+#define B2056_RX_MIXA_VCM 0x3B
+#define B2056_RX_MIXA_CTRLPTAT 0x3C
+#define B2056_RX_MIXA_LOB_BIAS 0x3D
+#define B2056_RX_MIXA_CORE_IDAC 0x3E
+#define B2056_RX_MIXA_CMFB_IDAC 0x3F
+#define B2056_RX_MIXA_BIAS_AUX 0x40
+#define B2056_RX_MIXA_BIAS_MAIN 0x41
+#define B2056_RX_MIXA_BIAS_MISC 0x42
+#define B2056_RX_MIXA_MAST_BIAS 0x43
+#define B2056_RX_MIXG_MASTER 0x44
+#define B2056_RX_MIXG_VCM 0x45
+#define B2056_RX_MIXG_CTRLPTAT 0x46
+#define B2056_RX_MIXG_LOB_BIAS 0x47
+#define B2056_RX_MIXG_CORE_IDAC 0x48
+#define B2056_RX_MIXG_CMFB_IDAC 0x49
+#define B2056_RX_MIXG_BIAS_AUX 0x4A
+#define B2056_RX_MIXG_BIAS_MAIN 0x4B
+#define B2056_RX_MIXG_BIAS_MISC 0x4C
+#define B2056_RX_MIXG_MAST_BIAS 0x4D
+#define B2056_RX_TIA_MASTER 0x4E
+#define B2056_RX_TIA_IOPAMP 0x4F
+#define B2056_RX_TIA_QOPAMP 0x50
+#define B2056_RX_TIA_IMISC 0x51
+#define B2056_RX_TIA_QMISC 0x52
+#define B2056_RX_TIA_GAIN 0x53
+#define B2056_RX_TIA_SPARE1 0x54
+#define B2056_RX_TIA_SPARE2 0x55
+#define B2056_RX_BB_LPF_MASTER 0x56
+#define B2056_RX_AACI_MASTER 0x57
+#define B2056_RX_RXLPF_IDAC 0x58
+#define B2056_RX_RXLPF_OPAMPBIAS_LOWQ 0x59
+#define B2056_RX_RXLPF_OPAMPBIAS_HIGHQ 0x5A
+#define B2056_RX_RXLPF_BIAS_DCCANCEL 0x5B
+#define B2056_RX_RXLPF_OUTVCM 0x5C
+#define B2056_RX_RXLPF_INVCM_BODY 0x5D
+#define B2056_RX_RXLPF_CC_OP 0x5E
+#define B2056_RX_RXLPF_GAIN 0x5F
+#define B2056_RX_RXLPF_Q_BW 0x60
+#define B2056_RX_RXLPF_HP_CORNER_BW 0x61
+#define B2056_RX_RXLPF_RCCAL_HPC 0x62
+#define B2056_RX_RXHPF_OFF0 0x63
+#define B2056_RX_RXHPF_OFF1 0x64
+#define B2056_RX_RXHPF_OFF2 0x65
+#define B2056_RX_RXHPF_OFF3 0x66
+#define B2056_RX_RXHPF_OFF4 0x67
+#define B2056_RX_RXHPF_OFF5 0x68
+#define B2056_RX_RXHPF_OFF6 0x69
+#define B2056_RX_RXHPF_OFF7 0x6A
+#define B2056_RX_RXLPF_RCCAL_LPC 0x6B
+#define B2056_RX_RXLPF_OFF_0 0x6C
+#define B2056_RX_RXLPF_OFF_1 0x6D
+#define B2056_RX_RXLPF_OFF_2 0x6E
+#define B2056_RX_RXLPF_OFF_3 0x6F
+#define B2056_RX_RXLPF_OFF_4 0x70
+#define B2056_RX_UNUSED 0x71
+#define B2056_RX_VGA_MASTER 0x72
+#define B2056_RX_VGA_BIAS 0x73
+#define B2056_RX_VGA_BIAS_DCCANCEL 0x74
+#define B2056_RX_VGA_GAIN 0x75
+#define B2056_RX_VGA_HP_CORNER_BW 0x76
+#define B2056_RX_VGABUF_BIAS 0x77
+#define B2056_RX_VGABUF_GAIN_BW 0x78
+#define B2056_RX_TXFBMIX_A 0x79
+#define B2056_RX_TXFBMIX_G 0x7A
+#define B2056_RX_RXSPARE1 0x7B
+#define B2056_RX_RXSPARE2 0x7C
+#define B2056_RX_RXSPARE3 0x7D
+#define B2056_RX_RXSPARE4 0x7E
+#define B2056_RX_RXSPARE5 0x7F
+#define B2056_RX_RXSPARE6 0x80
+#define B2056_RX_RXSPARE7 0x81
+#define B2056_RX_RXSPARE8 0x82
+#define B2056_RX_RXSPARE9 0x83
+#define B2056_RX_RXSPARE10 0x84
+#define B2056_RX_RXSPARE11 0x85
+#define B2056_RX_RXSPARE12 0x86
+#define B2056_RX_RXSPARE13 0x87
+#define B2056_RX_RXSPARE14 0x88
+#define B2056_RX_RXSPARE15 0x89
+#define B2056_RX_RXSPARE16 0x8A
+#define B2056_RX_STATUS_LNAA_GAIN 0x8B
+#define B2056_RX_STATUS_LNAG_GAIN 0x8C
+#define B2056_RX_STATUS_MIXTIA_GAIN 0x8D
+#define B2056_RX_STATUS_RXLPF_GAIN 0x8E
+#define B2056_RX_STATUS_VGA_BUF_GAIN 0x8F
+#define B2056_RX_STATUS_RXLPF_Q 0x90
+#define B2056_RX_STATUS_RXLPF_BUF_BW 0x91
+#define B2056_RX_STATUS_RXLPF_VGA_HPC 0x92
+#define B2056_RX_STATUS_RXLPF_RC 0x93
+#define B2056_RX_STATUS_HPC_RC 0x94
+
+#define B2056_LNA1_A_PU 0x01
+#define B2056_LNA2_A_PU 0x02
+#define B2056_LNA1_G_PU 0x01
+#define B2056_LNA2_G_PU 0x02
+#define B2056_MIXA_PU_I 0x01
+#define B2056_MIXA_PU_Q 0x02
+#define B2056_MIXA_PU_GM 0x10
+#define B2056_MIXG_PU_I 0x01
+#define B2056_MIXG_PU_Q 0x02
+#define B2056_MIXG_PU_GM 0x10
+#define B2056_TIA_PU 0x01
+#define B2056_BB_LPF_PU 0x20
+#define B2056_W1_PU 0x02
+#define B2056_W2_PU 0x04
+#define B2056_NB_PU 0x08
+#define B2056_RSSI_W1_SEL 0x02
+#define B2056_RSSI_W2_SEL 0x04
+#define B2056_RSSI_NB_SEL 0x08
+#define B2056_VCM_MASK 0x1C
+#define B2056_RSSI_VCM_SHIFT 0x02
+
struct b43_nphy_channeltab_entry_rev3 {
- /* The channel number */
- u8 channel;
/* The channel frequency in MHz */
u16 freq;
/* Radio register values on channelswitch */
- /* TODO */
+ u8 radio_syn_pll_vcocal1;
+ u8 radio_syn_pll_vcocal2;
+ u8 radio_syn_pll_refdiv;
+ u8 radio_syn_pll_mmd2;
+ u8 radio_syn_pll_mmd1;
+ u8 radio_syn_pll_loopfilter1;
+ u8 radio_syn_pll_loopfilter2;
+ u8 radio_syn_pll_loopfilter3;
+ u8 radio_syn_pll_loopfilter4;
+ u8 radio_syn_pll_loopfilter5;
+ u8 radio_syn_reserved_addr27;
+ u8 radio_syn_reserved_addr28;
+ u8 radio_syn_reserved_addr29;
+ u8 radio_syn_logen_vcobuf1;
+ u8 radio_syn_logen_mixer2;
+ u8 radio_syn_logen_buf3;
+ u8 radio_syn_logen_buf4;
+ u8 radio_rx0_lnaa_tune;
+ u8 radio_rx0_lnag_tune;
+ u8 radio_tx0_intpaa_boost_tune;
+ u8 radio_tx0_intpag_boost_tune;
+ u8 radio_tx0_pada_boost_tune;
+ u8 radio_tx0_padg_boost_tune;
+ u8 radio_tx0_pgaa_boost_tune;
+ u8 radio_tx0_pgag_boost_tune;
+ u8 radio_tx0_mixa_boost_tune;
+ u8 radio_tx0_mixg_boost_tune;
+ u8 radio_rx1_lnaa_tune;
+ u8 radio_rx1_lnag_tune;
+ u8 radio_tx1_intpaa_boost_tune;
+ u8 radio_tx1_intpag_boost_tune;
+ u8 radio_tx1_pada_boost_tune;
+ u8 radio_tx1_padg_boost_tune;
+ u8 radio_tx1_pgaa_boost_tune;
+ u8 radio_tx1_pgag_boost_tune;
+ u8 radio_tx1_mixa_boost_tune;
+ u8 radio_tx1_mixg_boost_tune;
/* PHY register values on channelswitch */
struct b43_phy_n_sfo_cfg phy_regs;
};
+void b2056_upload_inittabs(struct b43_wldev *dev,
+ bool ghz5, bool ignore_uploadflag);
+
#endif /* B43_RADIO_2056_H_ */
diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
index 78016ae21c50..86bc0a0f735c 100644
--- a/drivers/net/wireless/b43/rfkill.c
+++ b/drivers/net/wireless/b43/rfkill.c
@@ -28,23 +28,8 @@
/* Returns TRUE, if the radio is enabled in hardware. */
bool b43_is_hw_radio_enabled(struct b43_wldev *dev)
{
- if (dev->phy.rev >= 3 || dev->phy.type == B43_PHYTYPE_LP) {
- if (!(b43_read32(dev, B43_MMIO_RADIO_HWENABLED_HI)
- & B43_MMIO_RADIO_HWENABLED_HI_MASK))
- return 1;
- } else {
- /* To prevent CPU fault on PPC, do not read a register
- * unless the interface is started; however, on resume
- * for hibernation, this routine is entered early. When
- * that happens, unconditionally return TRUE.
- */
- if (b43_status(dev) < B43_STAT_STARTED)
- return 1;
- if (b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO)
- & B43_MMIO_RADIO_HWENABLED_LO_MASK)
- return 1;
- }
- return 0;
+ return !(b43_read32(dev, B43_MMIO_RADIO_HWENABLED_HI)
+ & B43_MMIO_RADIO_HWENABLED_HI_MASK);
}
/* The poll callback for the hardware button. */
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index d60db078eae2..2de483b3d3ba 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -28,41 +28,41 @@
#include "phy_n.h"
static const u8 b43_ntab_adjustpower0[] = {
- 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,
- 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03,
- 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05,
- 0x06, 0x06, 0x06, 0x06, 0x07, 0x07, 0x07, 0x07,
- 0x08, 0x08, 0x08, 0x08, 0x09, 0x09, 0x09, 0x09,
- 0x0A, 0x0A, 0x0A, 0x0A, 0x0B, 0x0B, 0x0B, 0x0B,
- 0x0C, 0x0C, 0x0C, 0x0C, 0x0D, 0x0D, 0x0D, 0x0D,
- 0x0E, 0x0E, 0x0E, 0x0E, 0x0F, 0x0F, 0x0F, 0x0F,
- 0x10, 0x10, 0x10, 0x10, 0x11, 0x11, 0x11, 0x11,
- 0x12, 0x12, 0x12, 0x12, 0x13, 0x13, 0x13, 0x13,
- 0x14, 0x14, 0x14, 0x14, 0x15, 0x15, 0x15, 0x15,
- 0x16, 0x16, 0x16, 0x16, 0x17, 0x17, 0x17, 0x17,
- 0x18, 0x18, 0x18, 0x18, 0x19, 0x19, 0x19, 0x19,
- 0x1A, 0x1A, 0x1A, 0x1A, 0x1B, 0x1B, 0x1B, 0x1B,
- 0x1C, 0x1C, 0x1C, 0x1C, 0x1D, 0x1D, 0x1D, 0x1D,
- 0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
static const u8 b43_ntab_adjustpower1[] = {
- 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,
- 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03,
- 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05,
- 0x06, 0x06, 0x06, 0x06, 0x07, 0x07, 0x07, 0x07,
- 0x08, 0x08, 0x08, 0x08, 0x09, 0x09, 0x09, 0x09,
- 0x0A, 0x0A, 0x0A, 0x0A, 0x0B, 0x0B, 0x0B, 0x0B,
- 0x0C, 0x0C, 0x0C, 0x0C, 0x0D, 0x0D, 0x0D, 0x0D,
- 0x0E, 0x0E, 0x0E, 0x0E, 0x0F, 0x0F, 0x0F, 0x0F,
- 0x10, 0x10, 0x10, 0x10, 0x11, 0x11, 0x11, 0x11,
- 0x12, 0x12, 0x12, 0x12, 0x13, 0x13, 0x13, 0x13,
- 0x14, 0x14, 0x14, 0x14, 0x15, 0x15, 0x15, 0x15,
- 0x16, 0x16, 0x16, 0x16, 0x17, 0x17, 0x17, 0x17,
- 0x18, 0x18, 0x18, 0x18, 0x19, 0x19, 0x19, 0x19,
- 0x1A, 0x1A, 0x1A, 0x1A, 0x1B, 0x1B, 0x1B, 0x1B,
- 0x1C, 0x1C, 0x1C, 0x1C, 0x1D, 0x1D, 0x1D, 0x1D,
- 0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
static const u16 b43_ntab_bdi[] = {
@@ -130,8 +130,8 @@ static const u32 b43_ntab_framestruct[] = {
0x09804506, 0x00100030, 0x09804507, 0x00100030,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x08004A0C, 0x00100008, 0x01000A0D, 0x00100028,
- 0x0980450E, 0x00100038, 0x0980450F, 0x00100038,
+ 0x08004A0C, 0x00100004, 0x01000A0D, 0x00100024,
+ 0x0980450E, 0x00100034, 0x0980450F, 0x00100034,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000A04, 0x00100000, 0x11008A05, 0x00100020,
@@ -202,13 +202,13 @@ static const u32 b43_ntab_framestruct[] = {
0x53028A06, 0x01900060, 0x53028A07, 0x01900060,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x4002140C, 0x000F4810, 0x6203140D, 0x00100050,
- 0x53028A0E, 0x01900070, 0x53028A0F, 0x01900070,
+ 0x4002140C, 0x000F4808, 0x6203140D, 0x00100048,
+ 0x53028A0E, 0x01900068, 0x53028A0F, 0x01900068,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000A0C, 0x00100008, 0x11008A0D, 0x00100028,
- 0x1980C50E, 0x00100038, 0x2181050E, 0x00100038,
- 0x2181050E, 0x00100038, 0x0180050C, 0x00100038,
+ 0x00000A0C, 0x00100004, 0x11008A0D, 0x00100024,
+ 0x1980C50E, 0x00100034, 0x2181050E, 0x00100034,
+ 0x2181050E, 0x00100034, 0x0180050C, 0x00100038,
0x1180850D, 0x00100038, 0x1181850D, 0x00100038,
0x2981450F, 0x01100038, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -238,9 +238,9 @@ static const u32 b43_ntab_framestruct[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x4002140C, 0x00100010, 0x0200140D, 0x00100050,
- 0x0B004A0E, 0x01900070, 0x13008A0E, 0x01900070,
- 0x13008A0E, 0x01900070, 0x43020A0C, 0x00100070,
+ 0x4002140C, 0x00100008, 0x0200140D, 0x00100048,
+ 0x0B004A0E, 0x01900068, 0x13008A0E, 0x01900068,
+ 0x13008A0E, 0x01900068, 0x43020A0C, 0x00100070,
0x1B00CA0D, 0x00100070, 0x1B014A0D, 0x00100070,
0x23010A0F, 0x01500070, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -337,73 +337,73 @@ static const u32 b43_ntab_framestruct[] = {
};
static const u32 b43_ntab_gainctl0[] = {
- 0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E,
- 0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C,
- 0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A,
- 0x00730C39, 0x00720D39, 0x00710E38, 0x00700F38,
- 0x006F0037, 0x006E0137, 0x006D0236, 0x006C0336,
- 0x006B0435, 0x006A0535, 0x00690634, 0x00680734,
- 0x00670833, 0x00660933, 0x00650A32, 0x00640B32,
- 0x00630C31, 0x00620D31, 0x00610E30, 0x00600F30,
- 0x005F002F, 0x005E012F, 0x005D022E, 0x005C032E,
- 0x005B042D, 0x005A052D, 0x0059062C, 0x0058072C,
- 0x0057082B, 0x0056092B, 0x00550A2A, 0x00540B2A,
- 0x00530C29, 0x00520D29, 0x00510E28, 0x00500F28,
- 0x004F0027, 0x004E0127, 0x004D0226, 0x004C0326,
- 0x004B0425, 0x004A0525, 0x00490624, 0x00480724,
- 0x00470823, 0x00460923, 0x00450A22, 0x00440B22,
- 0x00430C21, 0x00420D21, 0x00410E20, 0x00400F20,
- 0x003F001F, 0x003E011F, 0x003D021E, 0x003C031E,
- 0x003B041D, 0x003A051D, 0x0039061C, 0x0038071C,
- 0x0037081B, 0x0036091B, 0x00350A1A, 0x00340B1A,
- 0x00330C19, 0x00320D19, 0x00310E18, 0x00300F18,
- 0x002F0017, 0x002E0117, 0x002D0216, 0x002C0316,
- 0x002B0415, 0x002A0515, 0x00290614, 0x00280714,
- 0x00270813, 0x00260913, 0x00250A12, 0x00240B12,
- 0x00230C11, 0x00220D11, 0x00210E10, 0x00200F10,
- 0x001F000F, 0x001E010F, 0x001D020E, 0x001C030E,
- 0x001B040D, 0x001A050D, 0x0019060C, 0x0018070C,
- 0x0017080B, 0x0016090B, 0x00150A0A, 0x00140B0A,
- 0x00130C09, 0x00120D09, 0x00110E08, 0x00100F08,
- 0x000F0007, 0x000E0107, 0x000D0206, 0x000C0306,
- 0x000B0405, 0x000A0505, 0x00090604, 0x00080704,
- 0x00070803, 0x00060903, 0x00050A02, 0x00040B02,
- 0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00,
+ 0x03CC2B44, 0x03CC2B42, 0x03CC2B40, 0x03CC2B3E,
+ 0x03CC2B3D, 0x03CC2B3B, 0x03C82B44, 0x03C82B42,
+ 0x03C82B40, 0x03C82B3E, 0x03C82B3D, 0x03C82B3B,
+ 0x03C82B39, 0x03C82B38, 0x03C82B36, 0x03C82B34,
+ 0x03C42B44, 0x03C42B42, 0x03C42B40, 0x03C42B3E,
+ 0x03C42B3D, 0x03C42B3B, 0x03C42B39, 0x03C42B38,
+ 0x03C42B36, 0x03C42B34, 0x03C42B33, 0x03C42B32,
+ 0x03C42B30, 0x03C42B2F, 0x03C42B2D, 0x03C02B44,
+ 0x03C02B42, 0x03C02B40, 0x03C02B3E, 0x03C02B3D,
+ 0x03C02B3B, 0x03C02B39, 0x03C02B38, 0x03C02B36,
+ 0x03C02B34, 0x03B02B44, 0x03B02B42, 0x03B02B40,
+ 0x03B02B3E, 0x03B02B3D, 0x03B02B3B, 0x03B02B39,
+ 0x03B02B38, 0x03B02B36, 0x03B02B34, 0x03B02B33,
+ 0x03B02B32, 0x03B02B30, 0x03B02B2F, 0x03B02B2D,
+ 0x03A02B44, 0x03A02B42, 0x03A02B40, 0x03A02B3E,
+ 0x03A02B3D, 0x03A02B3B, 0x03A02B39, 0x03A02B38,
+ 0x03A02B36, 0x03A02B34, 0x03902B44, 0x03902B42,
+ 0x03902B40, 0x03902B3E, 0x03902B3D, 0x03902B3B,
+ 0x03902B39, 0x03902B38, 0x03902B36, 0x03902B34,
+ 0x03902B33, 0x03902B32, 0x03902B30, 0x03802B44,
+ 0x03802B42, 0x03802B40, 0x03802B3E, 0x03802B3D,
+ 0x03802B3B, 0x03802B39, 0x03802B38, 0x03802B36,
+ 0x03802B34, 0x03802B33, 0x03802B32, 0x03802B30,
+ 0x03802B2F, 0x03802B2D, 0x03802B2C, 0x03802B2B,
+ 0x03802B2A, 0x03802B29, 0x03802B27, 0x03802B26,
+ 0x03802B25, 0x03802B24, 0x03802B23, 0x03802B22,
+ 0x03802B21, 0x03802B20, 0x03802B1F, 0x03802B1E,
+ 0x03802B1E, 0x03802B1D, 0x03802B1C, 0x03802B1B,
+ 0x03802B1A, 0x03802B1A, 0x03802B19, 0x03802B18,
+ 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18,
+ 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18,
+ 0x03802B18, 0x03802B18, 0x03802B18, 0x00002B00,
};
static const u32 b43_ntab_gainctl1[] = {
- 0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E,
- 0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C,
- 0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A,
- 0x00730C39, 0x00720D39, 0x00710E38, 0x00700F38,
- 0x006F0037, 0x006E0137, 0x006D0236, 0x006C0336,
- 0x006B0435, 0x006A0535, 0x00690634, 0x00680734,
- 0x00670833, 0x00660933, 0x00650A32, 0x00640B32,
- 0x00630C31, 0x00620D31, 0x00610E30, 0x00600F30,
- 0x005F002F, 0x005E012F, 0x005D022E, 0x005C032E,
- 0x005B042D, 0x005A052D, 0x0059062C, 0x0058072C,
- 0x0057082B, 0x0056092B, 0x00550A2A, 0x00540B2A,
- 0x00530C29, 0x00520D29, 0x00510E28, 0x00500F28,
- 0x004F0027, 0x004E0127, 0x004D0226, 0x004C0326,
- 0x004B0425, 0x004A0525, 0x00490624, 0x00480724,
- 0x00470823, 0x00460923, 0x00450A22, 0x00440B22,
- 0x00430C21, 0x00420D21, 0x00410E20, 0x00400F20,
- 0x003F001F, 0x003E011F, 0x003D021E, 0x003C031E,
- 0x003B041D, 0x003A051D, 0x0039061C, 0x0038071C,
- 0x0037081B, 0x0036091B, 0x00350A1A, 0x00340B1A,
- 0x00330C19, 0x00320D19, 0x00310E18, 0x00300F18,
- 0x002F0017, 0x002E0117, 0x002D0216, 0x002C0316,
- 0x002B0415, 0x002A0515, 0x00290614, 0x00280714,
- 0x00270813, 0x00260913, 0x00250A12, 0x00240B12,
- 0x00230C11, 0x00220D11, 0x00210E10, 0x00200F10,
- 0x001F000F, 0x001E010F, 0x001D020E, 0x001C030E,
- 0x001B040D, 0x001A050D, 0x0019060C, 0x0018070C,
- 0x0017080B, 0x0016090B, 0x00150A0A, 0x00140B0A,
- 0x00130C09, 0x00120D09, 0x00110E08, 0x00100F08,
- 0x000F0007, 0x000E0107, 0x000D0206, 0x000C0306,
- 0x000B0405, 0x000A0505, 0x00090604, 0x00080704,
- 0x00070803, 0x00060903, 0x00050A02, 0x00040B02,
- 0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00,
+ 0x03CC2B44, 0x03CC2B42, 0x03CC2B40, 0x03CC2B3E,
+ 0x03CC2B3D, 0x03CC2B3B, 0x03C82B44, 0x03C82B42,
+ 0x03C82B40, 0x03C82B3E, 0x03C82B3D, 0x03C82B3B,
+ 0x03C82B39, 0x03C82B38, 0x03C82B36, 0x03C82B34,
+ 0x03C42B44, 0x03C42B42, 0x03C42B40, 0x03C42B3E,
+ 0x03C42B3D, 0x03C42B3B, 0x03C42B39, 0x03C42B38,
+ 0x03C42B36, 0x03C42B34, 0x03C42B33, 0x03C42B32,
+ 0x03C42B30, 0x03C42B2F, 0x03C42B2D, 0x03C02B44,
+ 0x03C02B42, 0x03C02B40, 0x03C02B3E, 0x03C02B3D,
+ 0x03C02B3B, 0x03C02B39, 0x03C02B38, 0x03C02B36,
+ 0x03C02B34, 0x03B02B44, 0x03B02B42, 0x03B02B40,
+ 0x03B02B3E, 0x03B02B3D, 0x03B02B3B, 0x03B02B39,
+ 0x03B02B38, 0x03B02B36, 0x03B02B34, 0x03B02B33,
+ 0x03B02B32, 0x03B02B30, 0x03B02B2F, 0x03B02B2D,
+ 0x03A02B44, 0x03A02B42, 0x03A02B40, 0x03A02B3E,
+ 0x03A02B3D, 0x03A02B3B, 0x03A02B39, 0x03A02B38,
+ 0x03A02B36, 0x03A02B34, 0x03902B44, 0x03902B42,
+ 0x03902B40, 0x03902B3E, 0x03902B3D, 0x03902B3B,
+ 0x03902B39, 0x03902B38, 0x03902B36, 0x03902B34,
+ 0x03902B33, 0x03902B32, 0x03902B30, 0x03802B44,
+ 0x03802B42, 0x03802B40, 0x03802B3E, 0x03802B3D,
+ 0x03802B3B, 0x03802B39, 0x03802B38, 0x03802B36,
+ 0x03802B34, 0x03802B33, 0x03802B32, 0x03802B30,
+ 0x03802B2F, 0x03802B2D, 0x03802B2C, 0x03802B2B,
+ 0x03802B2A, 0x03802B29, 0x03802B27, 0x03802B26,
+ 0x03802B25, 0x03802B24, 0x03802B23, 0x03802B22,
+ 0x03802B21, 0x03802B20, 0x03802B1F, 0x03802B1E,
+ 0x03802B1E, 0x03802B1D, 0x03802B1C, 0x03802B1B,
+ 0x03802B1A, 0x03802B1A, 0x03802B19, 0x03802B18,
+ 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18,
+ 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18,
+ 0x03802B18, 0x03802B18, 0x03802B18, 0x00002B00,
};
static const u32 b43_ntab_intlevel[] = {
@@ -1097,6 +1097,1080 @@ static const u32 b43_ntab_tmap[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+/* static tables, PHY revision >= 3 */
+static const u32 b43_ntab_framestruct_r3[] = {
+ 0x08004a04, 0x00100000, 0x01000a05, 0x00100020,
+ 0x09804506, 0x00100030, 0x09804507, 0x00100030,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x08004a0c, 0x00100004, 0x01000a0d, 0x00100024,
+ 0x0980450e, 0x00100034, 0x0980450f, 0x00100034,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000a04, 0x00100000, 0x11008a05, 0x00100020,
+ 0x1980c506, 0x00100030, 0x21810506, 0x00100030,
+ 0x21810506, 0x00100030, 0x01800504, 0x00100030,
+ 0x11808505, 0x00100030, 0x29814507, 0x01100030,
+ 0x00000a04, 0x00100000, 0x11008a05, 0x00100020,
+ 0x21810506, 0x00100030, 0x21810506, 0x00100030,
+ 0x29814507, 0x01100030, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028,
+ 0x1980c50e, 0x00100038, 0x2181050e, 0x00100038,
+ 0x2181050e, 0x00100038, 0x0180050c, 0x00100038,
+ 0x1180850d, 0x00100038, 0x2981450f, 0x01100038,
+ 0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028,
+ 0x2181050e, 0x00100038, 0x2181050e, 0x00100038,
+ 0x2981450f, 0x01100038, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x08004a04, 0x00100000, 0x01000a05, 0x00100020,
+ 0x1980c506, 0x00100030, 0x1980c506, 0x00100030,
+ 0x11808504, 0x00100030, 0x3981ca05, 0x00100030,
+ 0x29814507, 0x01100030, 0x00000000, 0x00000000,
+ 0x10008a04, 0x00100000, 0x3981ca05, 0x00100030,
+ 0x1980c506, 0x00100030, 0x29814507, 0x01100030,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x08004a0c, 0x00100008, 0x01000a0d, 0x00100028,
+ 0x1980c50e, 0x00100038, 0x1980c50e, 0x00100038,
+ 0x1180850c, 0x00100038, 0x3981ca0d, 0x00100038,
+ 0x2981450f, 0x01100038, 0x00000000, 0x00000000,
+ 0x10008a0c, 0x00100008, 0x3981ca0d, 0x00100038,
+ 0x1980c50e, 0x00100038, 0x2981450f, 0x01100038,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x40021404, 0x00100000, 0x02001405, 0x00100040,
+ 0x0b004a06, 0x01900060, 0x13008a06, 0x01900060,
+ 0x13008a06, 0x01900060, 0x43020a04, 0x00100060,
+ 0x1b00ca05, 0x00100060, 0x23010a07, 0x01500060,
+ 0x40021404, 0x00100000, 0x1a00d405, 0x00100040,
+ 0x13008a06, 0x01900060, 0x13008a06, 0x01900060,
+ 0x23010a07, 0x01500060, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x4002140c, 0x00100010, 0x0200140d, 0x00100050,
+ 0x0b004a0e, 0x01900070, 0x13008a0e, 0x01900070,
+ 0x13008a0e, 0x01900070, 0x43020a0c, 0x00100070,
+ 0x1b00ca0d, 0x00100070, 0x23010a0f, 0x01500070,
+ 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
+ 0x13008a0e, 0x01900070, 0x13008a0e, 0x01900070,
+ 0x23010a0f, 0x01500070, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x50029404, 0x00100000, 0x32019405, 0x00100040,
+ 0x0b004a06, 0x01900060, 0x0b004a06, 0x01900060,
+ 0x5b02ca04, 0x00100060, 0x3b01d405, 0x00100060,
+ 0x23010a07, 0x01500060, 0x00000000, 0x00000000,
+ 0x5802d404, 0x00100000, 0x3b01d405, 0x00100060,
+ 0x0b004a06, 0x01900060, 0x23010a07, 0x01500060,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x5002940c, 0x00100010, 0x3201940d, 0x00100050,
+ 0x0b004a0e, 0x01900070, 0x0b004a0e, 0x01900070,
+ 0x5b02ca0c, 0x00100070, 0x3b01d40d, 0x00100070,
+ 0x23010a0f, 0x01500070, 0x00000000, 0x00000000,
+ 0x5802d40c, 0x00100010, 0x3b01d40d, 0x00100070,
+ 0x0b004a0e, 0x01900070, 0x23010a0f, 0x01500070,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x40021404, 0x000f4800, 0x62031405, 0x00100040,
+ 0x53028a06, 0x01900060, 0x53028a07, 0x01900060,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x4002140c, 0x000f4808, 0x6203140d, 0x00100048,
+ 0x53028a0e, 0x01900068, 0x53028a0f, 0x01900068,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000a0c, 0x00100004, 0x11008a0d, 0x00100024,
+ 0x1980c50e, 0x00100034, 0x2181050e, 0x00100034,
+ 0x2181050e, 0x00100034, 0x0180050c, 0x00100038,
+ 0x1180850d, 0x00100038, 0x1181850d, 0x00100038,
+ 0x2981450f, 0x01100038, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028,
+ 0x2181050e, 0x00100038, 0x2181050e, 0x00100038,
+ 0x1181850d, 0x00100038, 0x2981450f, 0x01100038,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x08004a04, 0x00100000, 0x01000a05, 0x00100020,
+ 0x0180c506, 0x00100030, 0x0180c506, 0x00100030,
+ 0x2180c50c, 0x00100030, 0x49820a0d, 0x0016a130,
+ 0x41824a0d, 0x0016a130, 0x2981450f, 0x01100030,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x2000ca0c, 0x00100000, 0x49820a0d, 0x0016a130,
+ 0x1980c50e, 0x00100030, 0x41824a0d, 0x0016a130,
+ 0x2981450f, 0x01100030, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x4002140c, 0x00100008, 0x0200140d, 0x00100048,
+ 0x0b004a0e, 0x01900068, 0x13008a0e, 0x01900068,
+ 0x13008a0e, 0x01900068, 0x43020a0c, 0x00100070,
+ 0x1b00ca0d, 0x00100070, 0x1b014a0d, 0x00100070,
+ 0x23010a0f, 0x01500070, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
+ 0x13008a0e, 0x01900070, 0x13008a0e, 0x01900070,
+ 0x1b014a0d, 0x00100070, 0x23010a0f, 0x01500070,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x50029404, 0x00100000, 0x32019405, 0x00100040,
+ 0x03004a06, 0x01900060, 0x03004a06, 0x01900060,
+ 0x6b030a0c, 0x00100060, 0x4b02140d, 0x0016a160,
+ 0x4302540d, 0x0016a160, 0x23010a0f, 0x01500060,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x6b03140c, 0x00100060, 0x4b02140d, 0x0016a160,
+ 0x0b004a0e, 0x01900060, 0x4302540d, 0x0016a160,
+ 0x23010a0f, 0x01500060, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x40021404, 0x00100000, 0x1a00d405, 0x00100040,
+ 0x53028a06, 0x01900060, 0x5b02ca06, 0x01900060,
+ 0x5b02ca06, 0x01900060, 0x43020a04, 0x00100060,
+ 0x1b00ca05, 0x00100060, 0x53028a07, 0x0190c060,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
+ 0x53028a0e, 0x01900070, 0x5b02ca0e, 0x01900070,
+ 0x5b02ca0e, 0x01900070, 0x43020a0c, 0x00100070,
+ 0x1b00ca0d, 0x00100070, 0x53028a0f, 0x0190c070,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x40021404, 0x00100000, 0x1a00d405, 0x00100040,
+ 0x5b02ca06, 0x01900060, 0x5b02ca06, 0x01900060,
+ 0x53028a07, 0x0190c060, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050,
+ 0x5b02ca0e, 0x01900070, 0x5b02ca0e, 0x01900070,
+ 0x53028a0f, 0x0190c070, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u16 b43_ntab_pilot_r3[] = {
+ 0xff08, 0xff08, 0xff08, 0xff08, 0xff08, 0xff08,
+ 0xff08, 0xff08, 0x80d5, 0x80d5, 0x80d5, 0x80d5,
+ 0x80d5, 0x80d5, 0x80d5, 0x80d5, 0xff0a, 0xff82,
+ 0xffa0, 0xff28, 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xff82, 0xffa0, 0xff28, 0xff0a, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0xf83f, 0xfa1f, 0xfa97, 0xfab5,
+ 0xf2bd, 0xf0bf, 0xffff, 0xffff, 0xf017, 0xf815,
+ 0xf215, 0xf095, 0xf035, 0xf01d, 0xffff, 0xffff,
+ 0xff08, 0xff02, 0xff80, 0xff20, 0xff08, 0xff02,
+ 0xff80, 0xff20, 0xf01f, 0xf817, 0xfa15, 0xf295,
+ 0xf0b5, 0xf03d, 0xffff, 0xffff, 0xf82a, 0xfa0a,
+ 0xfa82, 0xfaa0, 0xf2a8, 0xf0aa, 0xffff, 0xffff,
+ 0xf002, 0xf800, 0xf200, 0xf080, 0xf020, 0xf008,
+ 0xffff, 0xffff, 0xf00a, 0xf802, 0xfa00, 0xf280,
+ 0xf0a0, 0xf028, 0xffff, 0xffff,
+};
+
+static const u32 b43_ntab_tmap_r3[] = {
+ 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+ 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0xf1111110, 0x11111111, 0x11f11111, 0x00000111,
+ 0x11000000, 0x1111f111, 0x11111111, 0x111111f1,
+ 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x000aa888,
+ 0x88880000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
+ 0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
+ 0xa2222220, 0x22222222, 0x22c22222, 0x00000222,
+ 0x22000000, 0x2222a222, 0x22222222, 0x222222a2,
+ 0xf1111110, 0x11111111, 0x11f11111, 0x00011111,
+ 0x11110000, 0x1111f111, 0x11111111, 0x111111f1,
+ 0xa8aa88a0, 0xa88888a8, 0xa8a8a88a, 0x00088aaa,
+ 0xaaaa0000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a,
+ 0xaaa8aaa0, 0x8aaa8aaa, 0xaa8a8a8a, 0x000aaa88,
+ 0x8aaa0000, 0xaaa8a888, 0x8aa88a8a, 0x8a88a888,
+ 0x08080a00, 0x0a08080a, 0x080a0a08, 0x00080808,
+ 0x080a0000, 0x080a0808, 0x080a0808, 0x0a0a0a08,
+ 0xa0a0a0a0, 0x80a0a080, 0x8080a0a0, 0x00008080,
+ 0x80a00000, 0x80a080a0, 0xa080a0a0, 0x8080a0a0,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x99999000, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9,
+ 0x9b99bb90, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999,
+ 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00aaa888,
+ 0x22000000, 0x2222b222, 0x22222222, 0x222222b2,
+ 0xb2222220, 0x22222222, 0x22d22222, 0x00000222,
+ 0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
+ 0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
+ 0x33000000, 0x3333b333, 0x33333333, 0x333333b3,
+ 0xb3333330, 0x33333333, 0x33d33333, 0x00000333,
+ 0x22000000, 0x2222a222, 0x22222222, 0x222222a2,
+ 0xa2222220, 0x22222222, 0x22c22222, 0x00000222,
+ 0x99b99b00, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9,
+ 0x9b99bb99, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999,
+ 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x08aaa888,
+ 0x22222200, 0x2222f222, 0x22222222, 0x222222f2,
+ 0x22222222, 0x22222222, 0x22f22222, 0x00000222,
+ 0x11000000, 0x1111f111, 0x11111111, 0x11111111,
+ 0xf1111111, 0x11111111, 0x11f11111, 0x01111111,
+ 0xbb9bb900, 0xb9b9bb99, 0xb99bbbbb, 0xbbbb9b9b,
+ 0xb9bb99bb, 0xb99999b9, 0xb9b9b99b, 0x00000bbb,
+ 0xaa000000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a,
+ 0xa8aa88aa, 0xa88888a8, 0xa8a8a88a, 0x0a888aaa,
+ 0xaa000000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a,
+ 0xa8aa88a0, 0xa88888a8, 0xa8a8a88a, 0x00000aaa,
+ 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+ 0xbbbbbb00, 0x999bbbbb, 0x9bb99b9b, 0xb9b9b9bb,
+ 0xb9b99bbb, 0xb9b9b9bb, 0xb9bb9b99, 0x00000999,
+ 0x8a000000, 0xaa88a888, 0xa88888aa, 0xa88a8a88,
+ 0xa88aa88a, 0x88a8aaaa, 0xa8aa8aaa, 0x0888a88a,
+ 0x0b0b0b00, 0x090b0b0b, 0x0b090b0b, 0x0909090b,
+ 0x09090b0b, 0x09090b0b, 0x09090b09, 0x00000909,
+ 0x0a000000, 0x0a080808, 0x080a080a, 0x080a0a08,
+ 0x080a080a, 0x0808080a, 0x0a0a0a08, 0x0808080a,
+ 0xb0b0b000, 0x9090b0b0, 0x90b09090, 0xb0b0b090,
+ 0xb0b090b0, 0x90b0b0b0, 0xb0b09090, 0x00000090,
+ 0x80000000, 0xa080a080, 0xa08080a0, 0xa0808080,
+ 0xa080a080, 0x80a0a0a0, 0xa0a080a0, 0x00a0a0a0,
+ 0x22000000, 0x2222f222, 0x22222222, 0x222222f2,
+ 0xf2222220, 0x22222222, 0x22f22222, 0x00000222,
+ 0x11000000, 0x1111f111, 0x11111111, 0x111111f1,
+ 0xf1111110, 0x11111111, 0x11f11111, 0x00000111,
+ 0x33000000, 0x3333f333, 0x33333333, 0x333333f3,
+ 0xf3333330, 0x33333333, 0x33f33333, 0x00000333,
+ 0x22000000, 0x2222f222, 0x22222222, 0x222222f2,
+ 0xf2222220, 0x22222222, 0x22f22222, 0x00000222,
+ 0x99000000, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9,
+ 0x9b99bb90, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999,
+ 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+ 0x88888000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+ 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00aaa888,
+ 0x88a88a00, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+ 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x08aaa888,
+ 0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
+ 0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
+ 0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
+ 0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
+ 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+ 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+ 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_intlevel_r3[] = {
+ 0x00802070, 0x0671188d, 0x0a60192c, 0x0a300e46,
+ 0x00c1188d, 0x080024d2, 0x00000070,
+};
+
+static const u32 b43_ntab_tdtrn_r3[] = {
+ 0x061c061c, 0x0050ee68, 0xf592fe36, 0xfe5212f6,
+ 0x00000c38, 0xfe5212f6, 0xf592fe36, 0x0050ee68,
+ 0x061c061c, 0xee680050, 0xfe36f592, 0x12f6fe52,
+ 0x0c380000, 0x12f6fe52, 0xfe36f592, 0xee680050,
+ 0x061c061c, 0x0050ee68, 0xf592fe36, 0xfe5212f6,
+ 0x00000c38, 0xfe5212f6, 0xf592fe36, 0x0050ee68,
+ 0x061c061c, 0xee680050, 0xfe36f592, 0x12f6fe52,
+ 0x0c380000, 0x12f6fe52, 0xfe36f592, 0xee680050,
+ 0x05e305e3, 0x004def0c, 0xf5f3fe47, 0xfe611246,
+ 0x00000bc7, 0xfe611246, 0xf5f3fe47, 0x004def0c,
+ 0x05e305e3, 0xef0c004d, 0xfe47f5f3, 0x1246fe61,
+ 0x0bc70000, 0x1246fe61, 0xfe47f5f3, 0xef0c004d,
+ 0x05e305e3, 0x004def0c, 0xf5f3fe47, 0xfe611246,
+ 0x00000bc7, 0xfe611246, 0xf5f3fe47, 0x004def0c,
+ 0x05e305e3, 0xef0c004d, 0xfe47f5f3, 0x1246fe61,
+ 0x0bc70000, 0x1246fe61, 0xfe47f5f3, 0xef0c004d,
+ 0xfa58fa58, 0xf895043b, 0xff4c09c0, 0xfbc6ffa8,
+ 0xfb84f384, 0x0798f6f9, 0x05760122, 0x058409f6,
+ 0x0b500000, 0x05b7f542, 0x08860432, 0x06ddfee7,
+ 0xfb84f384, 0xf9d90664, 0xf7e8025c, 0x00fff7bd,
+ 0x05a805a8, 0xf7bd00ff, 0x025cf7e8, 0x0664f9d9,
+ 0xf384fb84, 0xfee706dd, 0x04320886, 0xf54205b7,
+ 0x00000b50, 0x09f60584, 0x01220576, 0xf6f90798,
+ 0xf384fb84, 0xffa8fbc6, 0x09c0ff4c, 0x043bf895,
+ 0x02d402d4, 0x07de0270, 0xfc96079c, 0xf90afe94,
+ 0xfe00ff2c, 0x02d4065d, 0x092a0096, 0x0014fbb8,
+ 0xfd2cfd2c, 0x076afb3c, 0x0096f752, 0xf991fd87,
+ 0xfb2c0200, 0xfeb8f960, 0x08e0fc96, 0x049802a8,
+ 0xfd2cfd2c, 0x02a80498, 0xfc9608e0, 0xf960feb8,
+ 0x0200fb2c, 0xfd87f991, 0xf7520096, 0xfb3c076a,
+ 0xfd2cfd2c, 0xfbb80014, 0x0096092a, 0x065d02d4,
+ 0xff2cfe00, 0xfe94f90a, 0x079cfc96, 0x027007de,
+ 0x02d402d4, 0x027007de, 0x079cfc96, 0xfe94f90a,
+ 0xff2cfe00, 0x065d02d4, 0x0096092a, 0xfbb80014,
+ 0xfd2cfd2c, 0xfb3c076a, 0xf7520096, 0xfd87f991,
+ 0x0200fb2c, 0xf960feb8, 0xfc9608e0, 0x02a80498,
+ 0xfd2cfd2c, 0x049802a8, 0x08e0fc96, 0xfeb8f960,
+ 0xfb2c0200, 0xf991fd87, 0x0096f752, 0x076afb3c,
+ 0xfd2cfd2c, 0x0014fbb8, 0x092a0096, 0x02d4065d,
+ 0xfe00ff2c, 0xf90afe94, 0xfc96079c, 0x07de0270,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x062a0000, 0xfefa0759, 0x08b80908, 0xf396fc2d,
+ 0xf9d6045c, 0xfc4ef608, 0xf748f596, 0x07b207bf,
+ 0x062a062a, 0xf84ef841, 0xf748f596, 0x03b209f8,
+ 0xf9d6045c, 0x0c6a03d3, 0x08b80908, 0x0106f8a7,
+ 0x062a0000, 0xfefaf8a7, 0x08b8f6f8, 0xf39603d3,
+ 0xf9d6fba4, 0xfc4e09f8, 0xf7480a6a, 0x07b2f841,
+ 0x062af9d6, 0xf84e07bf, 0xf7480a6a, 0x03b2f608,
+ 0xf9d6fba4, 0x0c6afc2d, 0x08b8f6f8, 0x01060759,
+ 0x062a0000, 0xfefa0759, 0x08b80908, 0xf396fc2d,
+ 0xf9d6045c, 0xfc4ef608, 0xf748f596, 0x07b207bf,
+ 0x062a062a, 0xf84ef841, 0xf748f596, 0x03b209f8,
+ 0xf9d6045c, 0x0c6a03d3, 0x08b80908, 0x0106f8a7,
+ 0x062a0000, 0xfefaf8a7, 0x08b8f6f8, 0xf39603d3,
+ 0xf9d6fba4, 0xfc4e09f8, 0xf7480a6a, 0x07b2f841,
+ 0x062af9d6, 0xf84e07bf, 0xf7480a6a, 0x03b2f608,
+ 0xf9d6fba4, 0x0c6afc2d, 0x08b8f6f8, 0x01060759,
+ 0x061c061c, 0xff30009d, 0xffb21141, 0xfd87fb54,
+ 0xf65dfe59, 0x02eef99e, 0x0166f03c, 0xfff809b6,
+ 0x000008a4, 0x000af42b, 0x00eff577, 0xfa840bf2,
+ 0xfc02ff51, 0x08260f67, 0xfff0036f, 0x0842f9c3,
+ 0x00000000, 0x063df7be, 0xfc910010, 0xf099f7da,
+ 0x00af03fe, 0xf40e057c, 0x0a89ff11, 0x0bd5fff6,
+ 0xf75c0000, 0xf64a0008, 0x0fc4fe9a, 0x0662fd12,
+ 0x01a709a3, 0x04ac0279, 0xeebf004e, 0xff6300d0,
+ 0xf9e4f9e4, 0x00d0ff63, 0x004eeebf, 0x027904ac,
+ 0x09a301a7, 0xfd120662, 0xfe9a0fc4, 0x0008f64a,
+ 0x0000f75c, 0xfff60bd5, 0xff110a89, 0x057cf40e,
+ 0x03fe00af, 0xf7daf099, 0x0010fc91, 0xf7be063d,
+ 0x00000000, 0xf9c30842, 0x036ffff0, 0x0f670826,
+ 0xff51fc02, 0x0bf2fa84, 0xf57700ef, 0xf42b000a,
+ 0x08a40000, 0x09b6fff8, 0xf03c0166, 0xf99e02ee,
+ 0xfe59f65d, 0xfb54fd87, 0x1141ffb2, 0x009dff30,
+ 0x05e30000, 0xff060705, 0x085408a0, 0xf425fc59,
+ 0xfa1d042a, 0xfc78f67a, 0xf7acf60e, 0x075a0766,
+ 0x05e305e3, 0xf8a6f89a, 0xf7acf60e, 0x03880986,
+ 0xfa1d042a, 0x0bdb03a7, 0x085408a0, 0x00faf8fb,
+ 0x05e30000, 0xff06f8fb, 0x0854f760, 0xf42503a7,
+ 0xfa1dfbd6, 0xfc780986, 0xf7ac09f2, 0x075af89a,
+ 0x05e3fa1d, 0xf8a60766, 0xf7ac09f2, 0x0388f67a,
+ 0xfa1dfbd6, 0x0bdbfc59, 0x0854f760, 0x00fa0705,
+ 0x05e30000, 0xff060705, 0x085408a0, 0xf425fc59,
+ 0xfa1d042a, 0xfc78f67a, 0xf7acf60e, 0x075a0766,
+ 0x05e305e3, 0xf8a6f89a, 0xf7acf60e, 0x03880986,
+ 0xfa1d042a, 0x0bdb03a7, 0x085408a0, 0x00faf8fb,
+ 0x05e30000, 0xff06f8fb, 0x0854f760, 0xf42503a7,
+ 0xfa1dfbd6, 0xfc780986, 0xf7ac09f2, 0x075af89a,
+ 0x05e3fa1d, 0xf8a60766, 0xf7ac09f2, 0x0388f67a,
+ 0xfa1dfbd6, 0x0bdbfc59, 0x0854f760, 0x00fa0705,
+ 0xfa58fa58, 0xf8f0fe00, 0x0448073d, 0xfdc9fe46,
+ 0xf9910258, 0x089d0407, 0xfd5cf71a, 0x02affde0,
+ 0x083e0496, 0xff5a0740, 0xff7afd97, 0x00fe01f1,
+ 0x0009082e, 0xfa94ff75, 0xfecdf8ea, 0xffb0f693,
+ 0xfd2cfa58, 0x0433ff16, 0xfba405dd, 0xfa610341,
+ 0x06a606cb, 0x0039fd2d, 0x0677fa97, 0x01fa05e0,
+ 0xf896003e, 0x075a068b, 0x012cfc3e, 0xfa23f98d,
+ 0xfc7cfd43, 0xff90fc0d, 0x01c10982, 0x00c601d6,
+ 0xfd2cfd2c, 0x01d600c6, 0x098201c1, 0xfc0dff90,
+ 0xfd43fc7c, 0xf98dfa23, 0xfc3e012c, 0x068b075a,
+ 0x003ef896, 0x05e001fa, 0xfa970677, 0xfd2d0039,
+ 0x06cb06a6, 0x0341fa61, 0x05ddfba4, 0xff160433,
+ 0xfa58fd2c, 0xf693ffb0, 0xf8eafecd, 0xff75fa94,
+ 0x082e0009, 0x01f100fe, 0xfd97ff7a, 0x0740ff5a,
+ 0x0496083e, 0xfde002af, 0xf71afd5c, 0x0407089d,
+ 0x0258f991, 0xfe46fdc9, 0x073d0448, 0xfe00f8f0,
+ 0xfd2cfd2c, 0xfce00500, 0xfc09fddc, 0xfe680157,
+ 0x04c70571, 0xfc3aff21, 0xfcd70228, 0x056d0277,
+ 0x0200fe00, 0x0022f927, 0xfe3c032b, 0xfc44ff3c,
+ 0x03e9fbdb, 0x04570313, 0x04c9ff5c, 0x000d03b8,
+ 0xfa580000, 0xfbe900d2, 0xf9d0fe0b, 0x0125fdf9,
+ 0x042501bf, 0x0328fa2b, 0xffa902f0, 0xfa250157,
+ 0x0200fe00, 0x03740438, 0xff0405fd, 0x030cfe52,
+ 0x0037fb39, 0xff6904c5, 0x04f8fd23, 0xfd31fc1b,
+ 0xfd2cfd2c, 0xfc1bfd31, 0xfd2304f8, 0x04c5ff69,
+ 0xfb390037, 0xfe52030c, 0x05fdff04, 0x04380374,
+ 0xfe000200, 0x0157fa25, 0x02f0ffa9, 0xfa2b0328,
+ 0x01bf0425, 0xfdf90125, 0xfe0bf9d0, 0x00d2fbe9,
+ 0x0000fa58, 0x03b8000d, 0xff5c04c9, 0x03130457,
+ 0xfbdb03e9, 0xff3cfc44, 0x032bfe3c, 0xf9270022,
+ 0xfe000200, 0x0277056d, 0x0228fcd7, 0xff21fc3a,
+ 0x057104c7, 0x0157fe68, 0xfddcfc09, 0x0500fce0,
+ 0xfd2cfd2c, 0x0500fce0, 0xfddcfc09, 0x0157fe68,
+ 0x057104c7, 0xff21fc3a, 0x0228fcd7, 0x0277056d,
+ 0xfe000200, 0xf9270022, 0x032bfe3c, 0xff3cfc44,
+ 0xfbdb03e9, 0x03130457, 0xff5c04c9, 0x03b8000d,
+ 0x0000fa58, 0x00d2fbe9, 0xfe0bf9d0, 0xfdf90125,
+ 0x01bf0425, 0xfa2b0328, 0x02f0ffa9, 0x0157fa25,
+ 0xfe000200, 0x04380374, 0x05fdff04, 0xfe52030c,
+ 0xfb390037, 0x04c5ff69, 0xfd2304f8, 0xfc1bfd31,
+ 0xfd2cfd2c, 0xfd31fc1b, 0x04f8fd23, 0xff6904c5,
+ 0x0037fb39, 0x030cfe52, 0xff0405fd, 0x03740438,
+ 0x0200fe00, 0xfa250157, 0xffa902f0, 0x0328fa2b,
+ 0x042501bf, 0x0125fdf9, 0xf9d0fe0b, 0xfbe900d2,
+ 0xfa580000, 0x000d03b8, 0x04c9ff5c, 0x04570313,
+ 0x03e9fbdb, 0xfc44ff3c, 0xfe3c032b, 0x0022f927,
+ 0x0200fe00, 0x056d0277, 0xfcd70228, 0xfc3aff21,
+ 0x04c70571, 0xfe680157, 0xfc09fddc, 0xfce00500,
+ 0x05a80000, 0xff1006be, 0x0800084a, 0xf49cfc7e,
+ 0xfa580400, 0xfc9cf6da, 0xf800f672, 0x0710071c,
+ 0x05a805a8, 0xf8f0f8e4, 0xf800f672, 0x03640926,
+ 0xfa580400, 0x0b640382, 0x0800084a, 0x00f0f942,
+ 0x05a80000, 0xff10f942, 0x0800f7b6, 0xf49c0382,
+ 0xfa58fc00, 0xfc9c0926, 0xf800098e, 0x0710f8e4,
+ 0x05a8fa58, 0xf8f0071c, 0xf800098e, 0x0364f6da,
+ 0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be,
+ 0x05a80000, 0xff1006be, 0x0800084a, 0xf49cfc7e,
+ 0xfa580400, 0xfc9cf6da, 0xf800f672, 0x0710071c,
+ 0x05a805a8, 0xf8f0f8e4, 0xf800f672, 0x03640926,
+ 0xfa580400, 0x0b640382, 0x0800084a, 0x00f0f942,
+ 0x05a80000, 0xff10f942, 0x0800f7b6, 0xf49c0382,
+ 0xfa58fc00, 0xfc9c0926, 0xf800098e, 0x0710f8e4,
+ 0x05a8fa58, 0xf8f0071c, 0xf800098e, 0x0364f6da,
+ 0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be,
+};
+
+static const u32 b43_ntab_noisevar0_r3[] = {
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+};
+
+static const u32 b43_ntab_noisevar1_r3[] = {
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+ 0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
+};
+
+static const u16 b43_ntab_mcs_r3[] = {
+ 0x0000, 0x0008, 0x000a, 0x0010, 0x0012, 0x0019,
+ 0x001a, 0x001c, 0x0080, 0x0088, 0x008a, 0x0090,
+ 0x0092, 0x0099, 0x009a, 0x009c, 0x0100, 0x0108,
+ 0x010a, 0x0110, 0x0112, 0x0119, 0x011a, 0x011c,
+ 0x0180, 0x0188, 0x018a, 0x0190, 0x0192, 0x0199,
+ 0x019a, 0x019c, 0x0000, 0x0098, 0x00a0, 0x00a8,
+ 0x009a, 0x00a2, 0x00aa, 0x0120, 0x0128, 0x0128,
+ 0x0130, 0x0138, 0x0138, 0x0140, 0x0122, 0x012a,
+ 0x012a, 0x0132, 0x013a, 0x013a, 0x0142, 0x01a8,
+ 0x01b0, 0x01b8, 0x01b0, 0x01b8, 0x01c0, 0x01c8,
+ 0x01c0, 0x01c8, 0x01d0, 0x01d0, 0x01d8, 0x01aa,
+ 0x01b2, 0x01ba, 0x01b2, 0x01ba, 0x01c2, 0x01ca,
+ 0x01c2, 0x01ca, 0x01d2, 0x01d2, 0x01da, 0x0001,
+ 0x0002, 0x0004, 0x0009, 0x000c, 0x0011, 0x0014,
+ 0x0018, 0x0020, 0x0021, 0x0022, 0x0024, 0x0081,
+ 0x0082, 0x0084, 0x0089, 0x008c, 0x0091, 0x0094,
+ 0x0098, 0x00a0, 0x00a1, 0x00a2, 0x00a4, 0x0007,
+ 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
+ 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
+ 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
+ 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
+ 0x0007, 0x0007,
+};
+
+static const u32 b43_ntab_tdi20a0_r3[] = {
+ 0x00091226, 0x000a1429, 0x000b56ad, 0x000c58b0,
+ 0x000d5ab3, 0x000e9cb6, 0x000f9eba, 0x0000c13d,
+ 0x00020301, 0x00030504, 0x00040708, 0x0005090b,
+ 0x00064b8e, 0x00095291, 0x000a5494, 0x000b9718,
+ 0x000c9927, 0x000d9b2a, 0x000edd2e, 0x000fdf31,
+ 0x000101b4, 0x000243b7, 0x000345bb, 0x000447be,
+ 0x00058982, 0x00068c05, 0x00099309, 0x000a950c,
+ 0x000bd78f, 0x000cd992, 0x000ddb96, 0x000f1d99,
+ 0x00005fa8, 0x0001422c, 0x0002842f, 0x00038632,
+ 0x00048835, 0x0005ca38, 0x0006ccbc, 0x0009d3bf,
+ 0x000b1603, 0x000c1806, 0x000d1a0a, 0x000e1c0d,
+ 0x000f5e10, 0x00008093, 0x00018297, 0x0002c49a,
+ 0x0003c680, 0x0004c880, 0x00060b00, 0x00070d00,
+ 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_tdi20a1_r3[] = {
+ 0x00014b26, 0x00028d29, 0x000393ad, 0x00049630,
+ 0x0005d833, 0x0006da36, 0x00099c3a, 0x000a9e3d,
+ 0x000bc081, 0x000cc284, 0x000dc488, 0x000f068b,
+ 0x0000488e, 0x00018b91, 0x0002d214, 0x0003d418,
+ 0x0004d6a7, 0x000618aa, 0x00071aae, 0x0009dcb1,
+ 0x000b1eb4, 0x000c0137, 0x000d033b, 0x000e053e,
+ 0x000f4702, 0x00008905, 0x00020c09, 0x0003128c,
+ 0x0004148f, 0x00051712, 0x00065916, 0x00091b19,
+ 0x000a1d28, 0x000b5f2c, 0x000c41af, 0x000d43b2,
+ 0x000e85b5, 0x000f87b8, 0x0000c9bc, 0x00024cbf,
+ 0x00035303, 0x00045506, 0x0005978a, 0x0006998d,
+ 0x00095b90, 0x000a5d93, 0x000b9f97, 0x000c821a,
+ 0x000d8400, 0x000ec600, 0x000fc800, 0x00010a00,
+ 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_tdi40a0_r3[] = {
+ 0x0011a346, 0x00136ccf, 0x0014f5d9, 0x001641e2,
+ 0x0017cb6b, 0x00195475, 0x001b2383, 0x001cad0c,
+ 0x001e7616, 0x0000821f, 0x00020ba8, 0x0003d4b2,
+ 0x00056447, 0x00072dd0, 0x0008b6da, 0x000a02e3,
+ 0x000b8c6c, 0x000d15f6, 0x0011e484, 0x0013ae0d,
+ 0x00153717, 0x00168320, 0x00180ca9, 0x00199633,
+ 0x001b6548, 0x001ceed1, 0x001eb7db, 0x0000c3e4,
+ 0x00024d6d, 0x000416f7, 0x0005a585, 0x00076f0f,
+ 0x0008f818, 0x000a4421, 0x000bcdab, 0x000d9734,
+ 0x00122649, 0x0013efd2, 0x001578dc, 0x0016c4e5,
+ 0x00184e6e, 0x001a17f8, 0x001ba686, 0x001d3010,
+ 0x001ef999, 0x00010522, 0x00028eac, 0x00045835,
+ 0x0005e74a, 0x0007b0d3, 0x00093a5d, 0x000a85e6,
+ 0x000c0f6f, 0x000dd8f9, 0x00126787, 0x00143111,
+ 0x0015ba9a, 0x00170623, 0x00188fad, 0x001a5936,
+ 0x001be84b, 0x001db1d4, 0x001f3b5e, 0x000146e7,
+ 0x00031070, 0x000499fa, 0x00062888, 0x0007f212,
+ 0x00097b9b, 0x000ac7a4, 0x000c50ae, 0x000e1a37,
+ 0x0012a94c, 0x001472d5, 0x0015fc5f, 0x00174868,
+ 0x0018d171, 0x001a9afb, 0x001c2989, 0x001df313,
+ 0x001f7c9c, 0x000188a5, 0x000351af, 0x0004db38,
+ 0x0006aa4d, 0x000833d7, 0x0009bd60, 0x000b0969,
+ 0x000c9273, 0x000e5bfc, 0x00132a8a, 0x0014b414,
+ 0x00163d9d, 0x001789a6, 0x001912b0, 0x001adc39,
+ 0x001c6bce, 0x001e34d8, 0x001fbe61, 0x0001ca6a,
+ 0x00039374, 0x00051cfd, 0x0006ec0b, 0x00087515,
+ 0x0009fe9e, 0x000b4aa7, 0x000cd3b1, 0x000e9d3a,
+ 0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_tdi40a1_r3[] = {
+ 0x001edb36, 0x000129ca, 0x0002b353, 0x00047cdd,
+ 0x0005c8e6, 0x000791ef, 0x00091bf9, 0x000aaa07,
+ 0x000c3391, 0x000dfd1a, 0x00120923, 0x0013d22d,
+ 0x00155c37, 0x0016eacb, 0x00187454, 0x001a3dde,
+ 0x001b89e7, 0x001d12f0, 0x001f1cfa, 0x00016b88,
+ 0x00033492, 0x0004be1b, 0x00060a24, 0x0007d32e,
+ 0x00095d38, 0x000aec4c, 0x000c7555, 0x000e3edf,
+ 0x00124ae8, 0x001413f1, 0x0015a37b, 0x00172c89,
+ 0x0018b593, 0x001a419c, 0x001bcb25, 0x001d942f,
+ 0x001f63b9, 0x0001ad4d, 0x00037657, 0x0004c260,
+ 0x00068be9, 0x000814f3, 0x0009a47c, 0x000b2d8a,
+ 0x000cb694, 0x000e429d, 0x00128c26, 0x001455b0,
+ 0x0015e4ba, 0x00176e4e, 0x0018f758, 0x001a8361,
+ 0x001c0cea, 0x001dd674, 0x001fa57d, 0x0001ee8b,
+ 0x0003b795, 0x0005039e, 0x0006cd27, 0x000856b1,
+ 0x0009e5c6, 0x000b6f4f, 0x000cf859, 0x000e8462,
+ 0x00130deb, 0x00149775, 0x00162603, 0x0017af8c,
+ 0x00193896, 0x001ac49f, 0x001c4e28, 0x001e17b2,
+ 0x0000a6c7, 0x00023050, 0x0003f9da, 0x00054563,
+ 0x00070eec, 0x00089876, 0x000a2704, 0x000bb08d,
+ 0x000d3a17, 0x001185a0, 0x00134f29, 0x0014d8b3,
+ 0x001667c8, 0x0017f151, 0x00197adb, 0x001b0664,
+ 0x001c8fed, 0x001e5977, 0x0000e805, 0x0002718f,
+ 0x00043b18, 0x000586a1, 0x0007502b, 0x0008d9b4,
+ 0x000a68c9, 0x000bf252, 0x000dbbdc, 0x0011c7e5,
+ 0x001390ee, 0x00151a78, 0x0016a906, 0x00183290,
+ 0x0019bc19, 0x001b4822, 0x001cd12c, 0x001e9ab5,
+ 0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_pilotlt_r3[] = {
+ 0x76540213, 0x62407351, 0x76543210, 0x76540213,
+ 0x76540213, 0x76430521,
+};
+
+static const u32 b43_ntab_channelest_r3[] = {
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+};
+
+static const u8 b43_ntab_framelookup_r3[] = {
+ 0x02, 0x04, 0x14, 0x14, 0x03, 0x05, 0x16, 0x16,
+ 0x0a, 0x0c, 0x1c, 0x1c, 0x0b, 0x0d, 0x1e, 0x1e,
+ 0x06, 0x08, 0x18, 0x18, 0x07, 0x09, 0x1a, 0x1a,
+ 0x0e, 0x10, 0x20, 0x28, 0x0f, 0x11, 0x22, 0x2a,
+};
+
+static const u8 b43_ntab_estimatepowerlt0_r3[] = {
+ 0x55, 0x54, 0x54, 0x53, 0x52, 0x52, 0x51, 0x51,
+ 0x50, 0x4f, 0x4f, 0x4e, 0x4e, 0x4d, 0x4c, 0x4c,
+ 0x4b, 0x4a, 0x49, 0x49, 0x48, 0x47, 0x46, 0x46,
+ 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, 0x40, 0x3f,
+ 0x3e, 0x3d, 0x3c, 0x3a, 0x39, 0x38, 0x37, 0x36,
+ 0x35, 0x33, 0x32, 0x31, 0x2f, 0x2e, 0x2c, 0x2b,
+ 0x29, 0x27, 0x25, 0x23, 0x21, 0x1f, 0x1d, 0x1a,
+ 0x18, 0x15, 0x12, 0x0e, 0x0b, 0x07, 0x02, 0xfd,
+};
+
+static const u8 b43_ntab_estimatepowerlt1_r3[] = {
+ 0x55, 0x54, 0x54, 0x53, 0x52, 0x52, 0x51, 0x51,
+ 0x50, 0x4f, 0x4f, 0x4e, 0x4e, 0x4d, 0x4c, 0x4c,
+ 0x4b, 0x4a, 0x49, 0x49, 0x48, 0x47, 0x46, 0x46,
+ 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, 0x40, 0x3f,
+ 0x3e, 0x3d, 0x3c, 0x3a, 0x39, 0x38, 0x37, 0x36,
+ 0x35, 0x33, 0x32, 0x31, 0x2f, 0x2e, 0x2c, 0x2b,
+ 0x29, 0x27, 0x25, 0x23, 0x21, 0x1f, 0x1d, 0x1a,
+ 0x18, 0x15, 0x12, 0x0e, 0x0b, 0x07, 0x02, 0xfd,
+};
+
+static const u8 b43_ntab_adjustpower0_r3[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 b43_ntab_adjustpower1_r3[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u32 b43_ntab_gainctl0_r3[] = {
+ 0x5bf70044, 0x5bf70042, 0x5bf70040, 0x5bf7003e,
+ 0x5bf7003c, 0x5bf7003b, 0x5bf70039, 0x5bf70037,
+ 0x5bf70036, 0x5bf70034, 0x5bf70033, 0x5bf70031,
+ 0x5bf70030, 0x5ba70044, 0x5ba70042, 0x5ba70040,
+ 0x5ba7003e, 0x5ba7003c, 0x5ba7003b, 0x5ba70039,
+ 0x5ba70037, 0x5ba70036, 0x5ba70034, 0x5ba70033,
+ 0x5b770044, 0x5b770042, 0x5b770040, 0x5b77003e,
+ 0x5b77003c, 0x5b77003b, 0x5b770039, 0x5b770037,
+ 0x5b770036, 0x5b770034, 0x5b770033, 0x5b770031,
+ 0x5b770030, 0x5b77002f, 0x5b77002d, 0x5b77002c,
+ 0x5b470044, 0x5b470042, 0x5b470040, 0x5b47003e,
+ 0x5b47003c, 0x5b47003b, 0x5b470039, 0x5b470037,
+ 0x5b470036, 0x5b470034, 0x5b470033, 0x5b470031,
+ 0x5b470030, 0x5b47002f, 0x5b47002d, 0x5b47002c,
+ 0x5b47002b, 0x5b47002a, 0x5b270044, 0x5b270042,
+ 0x5b270040, 0x5b27003e, 0x5b27003c, 0x5b27003b,
+ 0x5b270039, 0x5b270037, 0x5b270036, 0x5b270034,
+ 0x5b270033, 0x5b270031, 0x5b270030, 0x5b27002f,
+ 0x5b170044, 0x5b170042, 0x5b170040, 0x5b17003e,
+ 0x5b17003c, 0x5b17003b, 0x5b170039, 0x5b170037,
+ 0x5b170036, 0x5b170034, 0x5b170033, 0x5b170031,
+ 0x5b170030, 0x5b17002f, 0x5b17002d, 0x5b17002c,
+ 0x5b17002b, 0x5b17002a, 0x5b170028, 0x5b170027,
+ 0x5b170026, 0x5b170025, 0x5b170024, 0x5b170023,
+ 0x5b070044, 0x5b070042, 0x5b070040, 0x5b07003e,
+ 0x5b07003c, 0x5b07003b, 0x5b070039, 0x5b070037,
+ 0x5b070036, 0x5b070034, 0x5b070033, 0x5b070031,
+ 0x5b070030, 0x5b07002f, 0x5b07002d, 0x5b07002c,
+ 0x5b07002b, 0x5b07002a, 0x5b070028, 0x5b070027,
+ 0x5b070026, 0x5b070025, 0x5b070024, 0x5b070023,
+ 0x5b070022, 0x5b070021, 0x5b070020, 0x5b07001f,
+ 0x5b07001e, 0x5b07001d, 0x5b07001d, 0x5b07001c,
+};
+
+static const u32 b43_ntab_gainctl1_r3[] = {
+ 0x5bf70044, 0x5bf70042, 0x5bf70040, 0x5bf7003e,
+ 0x5bf7003c, 0x5bf7003b, 0x5bf70039, 0x5bf70037,
+ 0x5bf70036, 0x5bf70034, 0x5bf70033, 0x5bf70031,
+ 0x5bf70030, 0x5ba70044, 0x5ba70042, 0x5ba70040,
+ 0x5ba7003e, 0x5ba7003c, 0x5ba7003b, 0x5ba70039,
+ 0x5ba70037, 0x5ba70036, 0x5ba70034, 0x5ba70033,
+ 0x5b770044, 0x5b770042, 0x5b770040, 0x5b77003e,
+ 0x5b77003c, 0x5b77003b, 0x5b770039, 0x5b770037,
+ 0x5b770036, 0x5b770034, 0x5b770033, 0x5b770031,
+ 0x5b770030, 0x5b77002f, 0x5b77002d, 0x5b77002c,
+ 0x5b470044, 0x5b470042, 0x5b470040, 0x5b47003e,
+ 0x5b47003c, 0x5b47003b, 0x5b470039, 0x5b470037,
+ 0x5b470036, 0x5b470034, 0x5b470033, 0x5b470031,
+ 0x5b470030, 0x5b47002f, 0x5b47002d, 0x5b47002c,
+ 0x5b47002b, 0x5b47002a, 0x5b270044, 0x5b270042,
+ 0x5b270040, 0x5b27003e, 0x5b27003c, 0x5b27003b,
+ 0x5b270039, 0x5b270037, 0x5b270036, 0x5b270034,
+ 0x5b270033, 0x5b270031, 0x5b270030, 0x5b27002f,
+ 0x5b170044, 0x5b170042, 0x5b170040, 0x5b17003e,
+ 0x5b17003c, 0x5b17003b, 0x5b170039, 0x5b170037,
+ 0x5b170036, 0x5b170034, 0x5b170033, 0x5b170031,
+ 0x5b170030, 0x5b17002f, 0x5b17002d, 0x5b17002c,
+ 0x5b17002b, 0x5b17002a, 0x5b170028, 0x5b170027,
+ 0x5b170026, 0x5b170025, 0x5b170024, 0x5b170023,
+ 0x5b070044, 0x5b070042, 0x5b070040, 0x5b07003e,
+ 0x5b07003c, 0x5b07003b, 0x5b070039, 0x5b070037,
+ 0x5b070036, 0x5b070034, 0x5b070033, 0x5b070031,
+ 0x5b070030, 0x5b07002f, 0x5b07002d, 0x5b07002c,
+ 0x5b07002b, 0x5b07002a, 0x5b070028, 0x5b070027,
+ 0x5b070026, 0x5b070025, 0x5b070024, 0x5b070023,
+ 0x5b070022, 0x5b070021, 0x5b070020, 0x5b07001f,
+ 0x5b07001e, 0x5b07001d, 0x5b07001d, 0x5b07001c,
+};
+
+static const u32 b43_ntab_iqlt0_r3[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 b43_ntab_iqlt1_r3[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u16 b43_ntab_loftlt0_r3[] = {
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000,
+};
+
+static const u16 b43_ntab_loftlt1_r3[] = {
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000,
+};
+
+/* TX gain tables */
const u32 b43_ntab_tx_gain_rev0_1_2[] = {
0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
0x03cc2944, 0x03c82b44, 0x03c82b42, 0x03c82a44,
@@ -1635,6 +2709,79 @@ const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = {
{ 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */
};
+struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = {
+ { /* 2GHz */
+ { /* PHY rev 3 */
+ { 7, 11, 16, 23 },
+ { -5, 6, 10, 14 },
+ { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
+ { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
+ 0x627E,
+ { 0x613F, 0x613F, 0x613F, 0x613F },
+ 0x107E, 0x0066, 0x0074,
+ 0x18, 0x18, 0x18,
+ 0x020D, 0x5,
+ },
+ { /* PHY rev 4 */
+ { 8, 12, 17, 25 },
+ { -5, 6, 10, 14 },
+ { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
+ { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
+ 0x527E,
+ { 0x513F, 0x513F, 0x513F, 0x513F },
+ 0x007E, 0x0066, 0x0074,
+ 0x18, 0x18, 0x18,
+ 0x01A1, 0x5,
+ },
+ { /* PHY rev 5+ */
+ { 9, 13, 18, 26 },
+ { -3, 7, 11, 16 },
+ { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
+ { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
+ 0x427E, /* invalid for external LNA! */
+ { 0x413F, 0x413F, 0x413F, 0x413F }, /* invalid for external LNA! */
+ 0x1076, 0x0066, 0x106A,
+ 0xC, 0xC, 0xC,
+ 0x01D0, 0x5,
+ },
+ },
+ { /* 5GHz */
+ { /* PHY rev 3 */
+ { 7, 11, 17, 23 },
+ { -6, 2, 6, 10 },
+ { 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 },
+ { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 },
+ 0x52DE,
+ { 0x516F, 0x516F, 0x516F, 0x516F },
+ 0x00DE, 0x00CA, 0x00CC,
+ 0x1E, 0x1E, 0x1E,
+ 0x01A1, 25,
+ },
+ { /* PHY rev 4 */
+ { 8, 12, 18, 23 },
+ { -5, 2, 6, 10 },
+ { 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD },
+ { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 },
+ 0x629E,
+ { 0x614F, 0x614F, 0x614F, 0x614F },
+ 0x029E, 0x1084, 0x0086,
+ 0x24, 0x24, 0x24,
+ 0x0107, 25,
+ },
+ { /* PHY rev 5+ */
+ { 6, 10, 16, 21 },
+ { -7, 0, 4, 8 },
+ { 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD },
+ { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 },
+ 0x729E,
+ { 0x714F, 0x714F, 0x714F, 0x714F },
+ 0x029E, 0x2084, 0x2086,
+ 0x24, 0x24, 0x24,
+ 0x00A9, 25,
+ },
+ },
+};
+
static inline void assert_ntab_array_sizes(void)
{
#undef check
@@ -1811,11 +2958,8 @@ void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
}
#define ntab_upload(dev, offset, data) do { \
- unsigned int i; \
- for (i = 0; i < (offset##_SIZE); i++) \
- b43_ntab_write(dev, (offset) + i, (data)[i]); \
+ b43_ntab_write_bulk(dev, offset, offset##_SIZE, data); \
} while (0)
-
void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
{
/* Static tables */
@@ -1825,35 +2969,94 @@ void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn);
ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel);
ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot);
- ntab_upload(dev, B43_NTAB_PILOTLT, b43_ntab_pilotlt);
ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0);
ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1);
ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0);
ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1);
- ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi);
ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest);
ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs);
-
- /* Volatile tables */
ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10);
ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11);
+
+ /* Volatile tables */
+ ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi);
+ ntab_upload(dev, B43_NTAB_PILOTLT, b43_ntab_pilotlt);
+ ntab_upload(dev, B43_NTAB_C0_GAINCTL, b43_ntab_gainctl0);
+ ntab_upload(dev, B43_NTAB_C1_GAINCTL, b43_ntab_gainctl1);
ntab_upload(dev, B43_NTAB_C0_ESTPLT, b43_ntab_estimatepowerlt0);
ntab_upload(dev, B43_NTAB_C1_ESTPLT, b43_ntab_estimatepowerlt1);
ntab_upload(dev, B43_NTAB_C0_ADJPLT, b43_ntab_adjustpower0);
ntab_upload(dev, B43_NTAB_C1_ADJPLT, b43_ntab_adjustpower1);
- ntab_upload(dev, B43_NTAB_C0_GAINCTL, b43_ntab_gainctl0);
- ntab_upload(dev, B43_NTAB_C1_GAINCTL, b43_ntab_gainctl1);
ntab_upload(dev, B43_NTAB_C0_IQLT, b43_ntab_iqlt0);
ntab_upload(dev, B43_NTAB_C1_IQLT, b43_ntab_iqlt1);
ntab_upload(dev, B43_NTAB_C0_LOFEEDTH, b43_ntab_loftlt0);
ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1);
}
+#define ntab_upload_r3(dev, offset, data) do { \
+ b43_ntab_write_bulk(dev, offset, ARRAY_SIZE(data), data); \
+ } while (0)
void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
{
/* Static tables */
- /* TODO */
+ ntab_upload_r3(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
+ ntab_upload_r3(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
+ ntab_upload_r3(dev, B43_NTAB_TMAP_R3, b43_ntab_tmap_r3);
+ ntab_upload_r3(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3);
+ ntab_upload_r3(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3);
+ ntab_upload_r3(dev, B43_NTAB_NOISEVAR0_R3, b43_ntab_noisevar0_r3);
+ ntab_upload_r3(dev, B43_NTAB_NOISEVAR1_R3, b43_ntab_noisevar1_r3);
+ ntab_upload_r3(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3);
+ ntab_upload_r3(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3);
+ ntab_upload_r3(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3);
+ ntab_upload_r3(dev, B43_NTAB_TDI40A0_R3, b43_ntab_tdi40a0_r3);
+ ntab_upload_r3(dev, B43_NTAB_TDI40A1_R3, b43_ntab_tdi40a1_r3);
+ ntab_upload_r3(dev, B43_NTAB_PILOTLT_R3, b43_ntab_pilotlt_r3);
+ ntab_upload_r3(dev, B43_NTAB_CHANEST_R3, b43_ntab_channelest_r3);
+ ntab_upload_r3(dev, B43_NTAB_FRAMELT_R3, b43_ntab_framelookup_r3);
+ ntab_upload_r3(dev, B43_NTAB_C0_ESTPLT_R3,
+ b43_ntab_estimatepowerlt0_r3);
+ ntab_upload_r3(dev, B43_NTAB_C1_ESTPLT_R3,
+ b43_ntab_estimatepowerlt1_r3);
+ ntab_upload_r3(dev, B43_NTAB_C0_ADJPLT_R3, b43_ntab_adjustpower0_r3);
+ ntab_upload_r3(dev, B43_NTAB_C1_ADJPLT_R3, b43_ntab_adjustpower1_r3);
+ ntab_upload_r3(dev, B43_NTAB_C0_GAINCTL_R3, b43_ntab_gainctl0_r3);
+ ntab_upload_r3(dev, B43_NTAB_C1_GAINCTL_R3, b43_ntab_gainctl1_r3);
+ ntab_upload_r3(dev, B43_NTAB_C0_IQLT_R3, b43_ntab_iqlt0_r3);
+ ntab_upload_r3(dev, B43_NTAB_C1_IQLT_R3, b43_ntab_iqlt1_r3);
+ ntab_upload_r3(dev, B43_NTAB_C0_LOFEEDTH_R3, b43_ntab_loftlt0_r3);
+ ntab_upload_r3(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
/* Volatile tables */
/* TODO */
}
+
+struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
+ struct b43_wldev *dev, bool ghz5, bool ext_lna)
+{
+ struct nphy_gain_ctl_workaround_entry *e;
+ u8 phy_idx;
+
+ B43_WARN_ON(dev->phy.rev < 3);
+ if (dev->phy.rev >= 5)
+ phy_idx = 2;
+ else if (dev->phy.rev == 4)
+ phy_idx = 1;
+ else
+ phy_idx = 0;
+
+ e = &nphy_gain_ctl_workaround[ghz5][phy_idx];
+
+ /* Only one entry differs for external LNA, so instead making whole
+ * table 2 times bigger, hack is here
+ */
+ if (!ghz5 && dev->phy.rev >= 5 && ext_lna) {
+ e->rfseq_init[0] &= 0x0FFF;
+ e->rfseq_init[1] &= 0x0FFF;
+ e->rfseq_init[2] &= 0x0FFF;
+ e->rfseq_init[3] &= 0x0FFF;
+ e->init_gain &= 0x0FFF;
+ }
+
+ return e;
+}
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 4ec593ba3eef..18569367ce43 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -35,6 +35,31 @@ struct nphy_rf_control_override_rev3 {
u8 val_addr1;
};
+struct nphy_gain_ctl_workaround_entry {
+ s8 lna1_gain[4];
+ s8 lna2_gain[4];
+ u8 gain_db[10];
+ u8 gain_bits[10];
+
+ u16 init_gain;
+ u16 rfseq_init[4];
+
+ u16 cliphi_gain;
+ u16 clipmd_gain;
+ u16 cliplo_gain;
+
+ u16 crsmin;
+ u16 crsminl;
+ u16 crsminu;
+
+ u16 nbclip;
+ u16 wlclip;
+};
+
+/* Get entry with workaround values for gain ctl. Does not return NULL. */
+struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
+ struct b43_wldev *dev, bool ghz5, bool ext_lna);
+
/* Get the NPHY Channel Switch Table entry for a channel.
* Returns NULL on failure to find an entry. */
const struct b43_nphy_channeltab_entry_rev2 *
@@ -109,6 +134,33 @@ b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq);
#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */
#define B43_NTAB_C1_LOFEEDTH_SIZE 128
+/* Static N-PHY tables, PHY revision >= 3 */
+#define B43_NTAB_FRAMESTRUCT_R3 B43_NTAB32(10, 000) /* frame struct */
+#define B43_NTAB_PILOT_R3 B43_NTAB16(11, 000) /* pilot */
+#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 000) /* TM AP */
+#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 000) /* INT LV */
+#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 000) /* TD TRN */
+#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 000) /* noise variance 0 */
+#define B43_NTAB_NOISEVAR1_R3 B43_NTAB32(16, 128) /* noise variance 1 */
+#define B43_NTAB_MCS_R3 B43_NTAB16(18, 000) /* MCS */
+#define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */
+#define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */
+#define B43_NTAB_TDI40A0_R3 B43_NTAB32(19, 640) /* TDI 40/0 */
+#define B43_NTAB_TDI40A1_R3 B43_NTAB32(19, 768) /* TDI 40/1 */
+#define B43_NTAB_PILOTLT_R3 B43_NTAB32(20, 000) /* PLT lookup */
+#define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 000) /* channel estimate */
+#define B43_NTAB_FRAMELT_R3 B43_NTAB8 (24, 000) /* frame lookup */
+#define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8 (26, 000) /* estimated power lookup 0 */
+#define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8 (27, 000) /* estimated power lookup 1 */
+#define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8 (26, 064) /* adjusted power lookup 0 */
+#define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8 (27, 064) /* adjusted power lookup 1 */
+#define B43_NTAB_C0_GAINCTL_R3 B43_NTAB32(26, 192) /* gain control lookup 0 */
+#define B43_NTAB_C1_GAINCTL_R3 B43_NTAB32(27, 192) /* gain control lookup 1 */
+#define B43_NTAB_C0_IQLT_R3 B43_NTAB32(26, 320) /* I/Q lookup 0 */
+#define B43_NTAB_C1_IQLT_R3 B43_NTAB32(27, 320) /* I/Q lookup 1 */
+#define B43_NTAB_C0_LOFEEDTH_R3 B43_NTAB16(26, 448) /* Local Oscillator Feed Through lookup 0 */
+#define B43_NTAB_C1_LOFEEDTH_R3 B43_NTAB16(27, 448) /* Local Oscillator Feed Through lookup 1 */
+
#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_40_SIZE 18
#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_20_SIZE 18
#define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_40_SIZE 18
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index e6b0528f3b52..e5be381c17bc 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -32,6 +32,36 @@
#include "dma.h"
#include "pio.h"
+static const struct b43_tx_legacy_rate_phy_ctl_entry b43_tx_legacy_rate_phy_ctl[] = {
+ { B43_CCK_RATE_1MB, 0x0, 0x0 },
+ { B43_CCK_RATE_2MB, 0x0, 0x1 },
+ { B43_CCK_RATE_5MB, 0x0, 0x2 },
+ { B43_CCK_RATE_11MB, 0x0, 0x3 },
+ { B43_OFDM_RATE_6MB, B43_TXH_PHY1_CRATE_1_2, B43_TXH_PHY1_MODUL_BPSK },
+ { B43_OFDM_RATE_9MB, B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_BPSK },
+ { B43_OFDM_RATE_12MB, B43_TXH_PHY1_CRATE_1_2, B43_TXH_PHY1_MODUL_QPSK },
+ { B43_OFDM_RATE_18MB, B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_QPSK },
+ { B43_OFDM_RATE_24MB, B43_TXH_PHY1_CRATE_1_2, B43_TXH_PHY1_MODUL_QAM16 },
+ { B43_OFDM_RATE_36MB, B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_QAM16 },
+ { B43_OFDM_RATE_48MB, B43_TXH_PHY1_CRATE_2_3, B43_TXH_PHY1_MODUL_QAM64 },
+ { B43_OFDM_RATE_54MB, B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_QAM64 },
+};
+
+static const struct b43_tx_legacy_rate_phy_ctl_entry *
+b43_tx_legacy_rate_phy_ctl_ent(u8 bitrate)
+{
+ const struct b43_tx_legacy_rate_phy_ctl_entry *e;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(b43_tx_legacy_rate_phy_ctl); i++) {
+ e = &(b43_tx_legacy_rate_phy_ctl[i]);
+ if (e->bitrate == bitrate)
+ return e;
+ }
+
+ B43_WARN_ON(1);
+ return NULL;
+}
/* Extract the bitrate index out of a CCK PLCP header. */
static int b43_plcp_get_bitrate_idx_cck(struct b43_plcp_hdr6 *plcp)
@@ -145,6 +175,34 @@ void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp,
}
}
+static u16 b43_generate_tx_phy_ctl1(struct b43_wldev *dev, u8 bitrate)
+{
+ const struct b43_phy *phy = &dev->phy;
+ const struct b43_tx_legacy_rate_phy_ctl_entry *e;
+ u16 control = 0;
+ u16 bw;
+
+ if (phy->type == B43_PHYTYPE_LP)
+ bw = B43_TXH_PHY1_BW_20;
+ else /* FIXME */
+ bw = B43_TXH_PHY1_BW_20;
+
+ if (0) { /* FIXME: MIMO */
+ } else if (b43_is_cck_rate(bitrate) && phy->type != B43_PHYTYPE_LP) {
+ control = bw;
+ } else {
+ control = bw;
+ e = b43_tx_legacy_rate_phy_ctl_ent(bitrate);
+ if (e) {
+ control |= e->coding_rate;
+ control |= e->modulation;
+ }
+ control |= B43_TXH_PHY1_MODE_SISO;
+ }
+
+ return control;
+}
+
static u8 b43_calc_fallback_rate(u8 bitrate)
{
switch (bitrate) {
@@ -437,6 +495,14 @@ int b43_generate_txhdr(struct b43_wldev *dev,
extra_ft |= B43_TXH_EFT_RTSFB_OFDM;
else
extra_ft |= B43_TXH_EFT_RTSFB_CCK;
+
+ if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS &&
+ phy->type == B43_PHYTYPE_N) {
+ txhdr->phy_ctl1_rts = cpu_to_le16(
+ b43_generate_tx_phy_ctl1(dev, rts_rate));
+ txhdr->phy_ctl1_rts_fb = cpu_to_le16(
+ b43_generate_tx_phy_ctl1(dev, rts_rate_fb));
+ }
}
/* Magic cookie */
@@ -445,6 +511,13 @@ int b43_generate_txhdr(struct b43_wldev *dev,
else
txhdr->new_format.cookie = cpu_to_le16(cookie);
+ if (phy->type == B43_PHYTYPE_N) {
+ txhdr->phy_ctl1 =
+ cpu_to_le16(b43_generate_tx_phy_ctl1(dev, rate));
+ txhdr->phy_ctl1_fb =
+ cpu_to_le16(b43_generate_tx_phy_ctl1(dev, rate_fb));
+ }
+
/* Apply the bitfields */
txhdr->mac_ctl = cpu_to_le32(mac_ctl);
txhdr->phy_ctl = cpu_to_le16(phy_ctl);
@@ -652,7 +725,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
status.mactime += mactime;
if (low_mactime_now <= mactime)
status.mactime -= 0x10000;
- status.flag |= RX_FLAG_TSFT;
+ status.flag |= RX_FLAG_MACTIME_MPDU;
}
chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT;
diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h
index d4cf9b390af3..42debb5cd6fa 100644
--- a/drivers/net/wireless/b43/xmit.h
+++ b/drivers/net/wireless/b43/xmit.h
@@ -73,6 +73,12 @@ struct b43_txhdr {
} __packed;
} __packed;
+struct b43_tx_legacy_rate_phy_ctl_entry {
+ u8 bitrate;
+ u16 coding_rate;
+ u16 modulation;
+};
+
/* MAC TX control */
#define B43_TXH_MAC_USEFBR 0x10000000 /* Use fallback rate for this AMPDU */
#define B43_TXH_MAC_KEYIDX 0x0FF00000 /* Security key index */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 67f18ecdb3bf..c7fd73e3ad76 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -181,52 +181,75 @@ static int b43legacy_ratelimit(struct b43legacy_wl *wl)
void b43legacyinfo(struct b43legacy_wl *wl, const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
if (!b43legacy_ratelimit(wl))
return;
+
va_start(args, fmt);
- printk(KERN_INFO "b43legacy-%s: ",
- (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan");
- vprintk(fmt, args);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk(KERN_INFO "b43legacy-%s: %pV",
+ (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
+
va_end(args);
}
void b43legacyerr(struct b43legacy_wl *wl, const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
if (!b43legacy_ratelimit(wl))
return;
+
va_start(args, fmt);
- printk(KERN_ERR "b43legacy-%s ERROR: ",
- (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan");
- vprintk(fmt, args);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk(KERN_ERR "b43legacy-%s ERROR: %pV",
+ (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
+
va_end(args);
}
void b43legacywarn(struct b43legacy_wl *wl, const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
if (!b43legacy_ratelimit(wl))
return;
+
va_start(args, fmt);
- printk(KERN_WARNING "b43legacy-%s warning: ",
- (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan");
- vprintk(fmt, args);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk(KERN_WARNING "b43legacy-%s warning: %pV",
+ (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
+
va_end(args);
}
#if B43legacy_DEBUG
void b43legacydbg(struct b43legacy_wl *wl, const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
va_start(args, fmt);
- printk(KERN_DEBUG "b43legacy-%s debug: ",
- (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan");
- vprintk(fmt, args);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk(KERN_DEBUG "b43legacy-%s debug: %pV",
+ (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
+
va_end(args);
}
#endif /* DEBUG */
@@ -2419,8 +2442,8 @@ static int b43legacy_rng_init(struct b43legacy_wl *wl)
return err;
}
-static int b43legacy_op_tx(struct ieee80211_hw *hw,
- struct sk_buff *skb)
+static void b43legacy_op_tx(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev = wl->current_dev;
@@ -2443,7 +2466,6 @@ out:
/* Drop the packet. */
dev_kfree_skb_any(skb);
}
- return NETDEV_TX_OK;
}
static int b43legacy_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c
index 35033dd342ce..28e477d01587 100644
--- a/drivers/net/wireless/b43legacy/phy.c
+++ b/drivers/net/wireless/b43legacy/phy.c
@@ -153,7 +153,7 @@ void b43legacy_phy_calibrate(struct b43legacy_wldev *dev)
phy->calibrated = 1;
}
-/* intialize B PHY power control
+/* initialize B PHY power control
* as described in http://bcm-specs.sipsolutions.net/InitPowerControl
*/
static void b43legacy_phy_init_pctl(struct b43legacy_wldev *dev)
diff --git a/drivers/net/wireless/b43legacy/rfkill.c b/drivers/net/wireless/b43legacy/rfkill.c
index d579df72b783..b90f223fb31c 100644
--- a/drivers/net/wireless/b43legacy/rfkill.c
+++ b/drivers/net/wireless/b43legacy/rfkill.c
@@ -29,7 +29,7 @@
/* Returns TRUE, if the radio is enabled in hardware. */
bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev)
{
- if (dev->phy.rev >= 3) {
+ if (dev->dev->id.revision >= 3) {
if (!(b43legacy_read32(dev, B43legacy_MMIO_RADIO_HWENABLED_HI)
& B43legacy_MMIO_RADIO_HWENABLED_HI_MASK))
return 1;
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 7d177d97f1f7..3a95541708a6 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -572,7 +572,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
status.mactime += mactime;
if (low_mactime_now <= mactime)
status.mactime -= 0x10000;
- status.flag |= RX_FLAG_TSFT;
+ status.flag |= RX_FLAG_MACTIME_MPDU;
}
chanid = (chanstat & B43legacy_RX_CHAN_ID) >>
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index dbb986946e1a..18d63f57777d 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -858,7 +858,10 @@ void hostap_free_data(struct ap_data *ap)
return;
}
+ flush_work_sync(&ap->add_sta_proc_queue);
+
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+ flush_work_sync(&ap->wds_oper_queue);
if (ap->crypt)
ap->crypt->deinit(ap->crypt_priv);
ap->crypt = ap->crypt_priv = NULL;
diff --git a/drivers/net/wireless/hostap/hostap_config.h b/drivers/net/wireless/hostap/hostap_config.h
index 30acd39d76a2..2c8f71f0ed45 100644
--- a/drivers/net/wireless/hostap/hostap_config.h
+++ b/drivers/net/wireless/hostap/hostap_config.h
@@ -30,9 +30,9 @@
/* Following defines can be used to remove unneeded parts of the driver, e.g.,
* to limit the size of the kernel module. Definitions can be added here in
- * hostap_config.h or they can be added to make command with EXTRA_CFLAGS,
+ * hostap_config.h or they can be added to make command with ccflags-y,
* e.g.,
- * 'make pccard EXTRA_CFLAGS="-DPRISM2_NO_DEBUG -DPRISM2_NO_PROCFS_DEBUG"'
+ * 'make pccard ccflags-y="-DPRISM2_NO_DEBUG -DPRISM2_NO_PROCFS_DEBUG"'
*/
/* Do not include debug messages into the driver */
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index bd8a4134edeb..2176edede39b 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -518,22 +518,21 @@ static int prism2_config(struct pcmcia_device *link)
hw_priv->link = link;
/*
- * Make sure the IRQ handler cannot proceed until at least
- * dev->base_addr is initialized.
+ * We enable IRQ here, but IRQ handler will not proceed
+ * until dev->base_addr is set below. This protect us from
+ * receive interrupts when driver is not initialized.
*/
- spin_lock_irqsave(&local->irq_init_lock, flags);
-
ret = pcmcia_request_irq(link, prism2_interrupt);
if (ret)
- goto failed_unlock;
+ goto failed;
ret = pcmcia_enable_device(link);
if (ret)
- goto failed_unlock;
+ goto failed;
+ spin_lock_irqsave(&local->irq_init_lock, flags);
dev->irq = link->irq;
dev->base_addr = link->resource[0]->start;
-
spin_unlock_irqrestore(&local->irq_init_lock, flags);
local->shutdown = 0;
@@ -546,8 +545,6 @@ static int prism2_config(struct pcmcia_device *link)
return ret;
- failed_unlock:
- spin_unlock_irqrestore(&local->irq_init_lock, flags);
failed:
kfree(hw_priv);
prism2_release((u_long)link);
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index b7cb165d612b..a8bddd81b4d1 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -3317,7 +3317,13 @@ static void prism2_free_local_data(struct net_device *dev)
unregister_netdev(local->dev);
- flush_scheduled_work();
+ flush_work_sync(&local->reset_queue);
+ flush_work_sync(&local->set_multicast_list_queue);
+ flush_work_sync(&local->set_tim_queue);
+#ifndef PRISM2_NO_STATION_MODES
+ flush_work_sync(&local->info_queue);
+#endif
+ flush_work_sync(&local->comms_qual_update);
lib80211_crypt_info_free(&local->crypt_info);
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 25a2722c8a98..1d9aed645723 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -891,7 +891,6 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local,
SET_ETHTOOL_OPS(dev, &prism2_ethtool_ops);
- netif_stop_queue(dev);
}
static int hostap_enable_hostapd(local_info_t *local, int rtnl_locked)
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 61915f371416..4b97f918daff 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -706,11 +706,10 @@ static void schedule_reset(struct ipw2100_priv *priv)
netif_stop_queue(priv->net_dev);
priv->status |= STATUS_RESET_PENDING;
if (priv->reset_backoff)
- queue_delayed_work(priv->workqueue, &priv->reset_work,
- priv->reset_backoff * HZ);
+ schedule_delayed_work(&priv->reset_work,
+ priv->reset_backoff * HZ);
else
- queue_delayed_work(priv->workqueue, &priv->reset_work,
- 0);
+ schedule_delayed_work(&priv->reset_work, 0);
if (priv->reset_backoff < MAX_RESET_BACKOFF)
priv->reset_backoff++;
@@ -1397,7 +1396,7 @@ static int ipw2100_power_cycle_adapter(struct ipw2100_priv *priv)
}
/*
- * Send the CARD_DISABLE_PHY_OFF comamnd to the card to disable it
+ * Send the CARD_DISABLE_PHY_OFF command to the card to disable it
*
* After disabling, if the card was associated, a STATUS_ASSN_LOST will be sent.
*
@@ -1474,7 +1473,7 @@ static int ipw2100_enable_adapter(struct ipw2100_priv *priv)
if (priv->stop_hang_check) {
priv->stop_hang_check = 0;
- queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2);
+ schedule_delayed_work(&priv->hang_check, HZ / 2);
}
fail_up:
@@ -1808,8 +1807,8 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
if (priv->stop_rf_kill) {
priv->stop_rf_kill = 0;
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies_relative(HZ));
+ schedule_delayed_work(&priv->rf_kill,
+ round_jiffies_relative(HZ));
}
deferred = 1;
@@ -2086,7 +2085,7 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
priv->status |= STATUS_ASSOCIATING;
priv->connect_start = get_seconds();
- queue_delayed_work(priv->workqueue, &priv->wx_event_work, HZ / 10);
+ schedule_delayed_work(&priv->wx_event_work, HZ / 10);
}
static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
@@ -2166,9 +2165,9 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status)
return;
if (priv->status & STATUS_SECURITY_UPDATED)
- queue_delayed_work(priv->workqueue, &priv->security_work, 0);
+ schedule_delayed_work(&priv->security_work, 0);
- queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0);
+ schedule_delayed_work(&priv->wx_event_work, 0);
}
static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
@@ -2183,8 +2182,7 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
/* Make sure the RF Kill check timer is running */
priv->stop_rf_kill = 0;
cancel_delayed_work(&priv->rf_kill);
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies_relative(HZ));
+ schedule_delayed_work(&priv->rf_kill, round_jiffies_relative(HZ));
}
static void send_scan_event(void *data)
@@ -2219,13 +2217,12 @@ static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
/* Only userspace-requested scan completion events go out immediately */
if (!priv->user_requested_scan) {
if (!delayed_work_pending(&priv->scan_event_later))
- queue_delayed_work(priv->workqueue,
- &priv->scan_event_later,
- round_jiffies_relative(msecs_to_jiffies(4000)));
+ schedule_delayed_work(&priv->scan_event_later,
+ round_jiffies_relative(msecs_to_jiffies(4000)));
} else {
priv->user_requested_scan = 0;
cancel_delayed_work(&priv->scan_event_later);
- queue_work(priv->workqueue, &priv->scan_event_now);
+ schedule_work(&priv->scan_event_now);
}
}
@@ -4329,8 +4326,8 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio)
/* Make sure the RF_KILL check timer is running */
priv->stop_rf_kill = 0;
cancel_delayed_work(&priv->rf_kill);
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies_relative(HZ));
+ schedule_delayed_work(&priv->rf_kill,
+ round_jiffies_relative(HZ));
} else
schedule_reset(priv);
}
@@ -4461,20 +4458,17 @@ static void bd_queue_initialize(struct ipw2100_priv *priv,
IPW_DEBUG_INFO("exit\n");
}
-static void ipw2100_kill_workqueue(struct ipw2100_priv *priv)
+static void ipw2100_kill_works(struct ipw2100_priv *priv)
{
- if (priv->workqueue) {
- priv->stop_rf_kill = 1;
- priv->stop_hang_check = 1;
- cancel_delayed_work(&priv->reset_work);
- cancel_delayed_work(&priv->security_work);
- cancel_delayed_work(&priv->wx_event_work);
- cancel_delayed_work(&priv->hang_check);
- cancel_delayed_work(&priv->rf_kill);
- cancel_delayed_work(&priv->scan_event_later);
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
- }
+ priv->stop_rf_kill = 1;
+ priv->stop_hang_check = 1;
+ cancel_delayed_work_sync(&priv->reset_work);
+ cancel_delayed_work_sync(&priv->security_work);
+ cancel_delayed_work_sync(&priv->wx_event_work);
+ cancel_delayed_work_sync(&priv->hang_check);
+ cancel_delayed_work_sync(&priv->rf_kill);
+ cancel_work_sync(&priv->scan_event_now);
+ cancel_delayed_work_sync(&priv->scan_event_later);
}
static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
@@ -6046,7 +6040,7 @@ static void ipw2100_hang_check(struct work_struct *work)
priv->last_rtc = rtc;
if (!priv->stop_hang_check)
- queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2);
+ schedule_delayed_work(&priv->hang_check, HZ / 2);
spin_unlock_irqrestore(&priv->low_lock, flags);
}
@@ -6062,8 +6056,8 @@ static void ipw2100_rf_kill(struct work_struct *work)
if (rf_kill_active(priv)) {
IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
if (!priv->stop_rf_kill)
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies_relative(HZ));
+ schedule_delayed_work(&priv->rf_kill,
+ round_jiffies_relative(HZ));
goto exit_unlock;
}
@@ -6209,8 +6203,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
INIT_LIST_HEAD(&priv->fw_pend_list);
INIT_STAT(&priv->fw_pend_stat);
- priv->workqueue = create_workqueue(DRV_NAME);
-
INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter);
INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work);
INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work);
@@ -6410,7 +6402,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
if (dev->irq)
free_irq(dev->irq, priv);
- ipw2100_kill_workqueue(priv);
+ ipw2100_kill_works(priv);
/* These are safe to call even if they weren't allocated */
ipw2100_queues_free(priv);
@@ -6460,9 +6452,7 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
* first, then close() will crash. */
unregister_netdev(dev);
- /* ipw2100_down will ensure that there is no more pending work
- * in the workqueue's, so we can safely remove them now. */
- ipw2100_kill_workqueue(priv);
+ ipw2100_kill_works(priv);
ipw2100_queues_free(priv);
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h
index 838002b4881e..99cba968aa58 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/ipw2x00/ipw2100.h
@@ -580,7 +580,6 @@ struct ipw2100_priv {
struct tasklet_struct irq_tasklet;
- struct workqueue_struct *workqueue;
struct delayed_work reset_work;
struct delayed_work security_work;
struct delayed_work wx_event_work;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 8d6ed5f6f46f..160881f234cc 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -894,9 +894,8 @@ static void ipw_led_link_on(struct ipw_priv *priv)
/* If we aren't associated, schedule turning the LED off */
if (!(priv->status & STATUS_ASSOCIATED))
- queue_delayed_work(priv->workqueue,
- &priv->led_link_off,
- LD_TIME_LINK_ON);
+ schedule_delayed_work(&priv->led_link_off,
+ LD_TIME_LINK_ON);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -939,8 +938,8 @@ static void ipw_led_link_off(struct ipw_priv *priv)
* turning the LED on (blink while unassociated) */
if (!(priv->status & STATUS_RF_KILL_MASK) &&
!(priv->status & STATUS_ASSOCIATED))
- queue_delayed_work(priv->workqueue, &priv->led_link_on,
- LD_TIME_LINK_OFF);
+ schedule_delayed_work(&priv->led_link_on,
+ LD_TIME_LINK_OFF);
}
@@ -980,13 +979,11 @@ static void __ipw_led_activity_on(struct ipw_priv *priv)
priv->status |= STATUS_LED_ACT_ON;
cancel_delayed_work(&priv->led_act_off);
- queue_delayed_work(priv->workqueue, &priv->led_act_off,
- LD_TIME_ACT_ON);
+ schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
} else {
/* Reschedule LED off for full time period */
cancel_delayed_work(&priv->led_act_off);
- queue_delayed_work(priv->workqueue, &priv->led_act_off,
- LD_TIME_ACT_ON);
+ schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
}
}
@@ -1795,13 +1792,11 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
if (disable_radio) {
priv->status |= STATUS_RF_KILL_SW;
- if (priv->workqueue) {
- cancel_delayed_work(&priv->request_scan);
- cancel_delayed_work(&priv->request_direct_scan);
- cancel_delayed_work(&priv->request_passive_scan);
- cancel_delayed_work(&priv->scan_event);
- }
- queue_work(priv->workqueue, &priv->down);
+ cancel_delayed_work(&priv->request_scan);
+ cancel_delayed_work(&priv->request_direct_scan);
+ cancel_delayed_work(&priv->request_passive_scan);
+ cancel_delayed_work(&priv->scan_event);
+ schedule_work(&priv->down);
} else {
priv->status &= ~STATUS_RF_KILL_SW;
if (rf_kill_active(priv)) {
@@ -1809,10 +1804,10 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
"disabled by HW switch\n");
/* Make sure the RF_KILL check timer is running */
cancel_delayed_work(&priv->rf_kill);
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies_relative(2 * HZ));
+ schedule_delayed_work(&priv->rf_kill,
+ round_jiffies_relative(2 * HZ));
} else
- queue_work(priv->workqueue, &priv->up);
+ schedule_work(&priv->up);
}
return 1;
@@ -1973,6 +1968,13 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
inta = ipw_read32(priv, IPW_INTA_RW);
inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
+
+ if (inta == 0xFFFFFFFF) {
+ /* Hardware disappeared */
+ IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
+ /* Only handle the cached INTA values */
+ inta = 0;
+ }
inta &= (IPW_INTA_MASK_ALL & inta_mask);
/* Add any cached INTA values that need to be handled */
@@ -2056,7 +2058,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
cancel_delayed_work(&priv->request_passive_scan);
cancel_delayed_work(&priv->scan_event);
schedule_work(&priv->link_down);
- queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
+ schedule_delayed_work(&priv->rf_kill, 2 * HZ);
handled |= IPW_INTA_BIT_RF_KILL_DONE;
}
@@ -2096,7 +2098,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
priv->status &= ~STATUS_HCMD_ACTIVE;
wake_up_interruptible(&priv->wait_command_queue);
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
handled |= IPW_INTA_BIT_FATAL_ERROR;
}
@@ -2316,11 +2318,6 @@ static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
}
-/*
- * NOTE: This must be executed from our workqueue as it results in udelay
- * being called which may corrupt the keyboard if executed on default
- * workqueue
- */
static void ipw_adapter_restart(void *adapter)
{
struct ipw_priv *priv = adapter;
@@ -2361,13 +2358,13 @@ static void ipw_scan_check(void *data)
IPW_DEBUG_SCAN("Scan completion watchdog resetting "
"adapter after (%dms).\n",
jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
} else if (priv->status & STATUS_SCANNING) {
IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
"after (%dms).\n",
jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
ipw_abort_scan(priv);
- queue_delayed_work(priv->workqueue, &priv->scan_check, HZ);
+ schedule_delayed_work(&priv->scan_check, HZ);
}
}
@@ -3936,7 +3933,7 @@ static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
if (priv->status & STATUS_ASSOCIATING) {
IPW_DEBUG_ASSOC("Disassociating while associating.\n");
- queue_work(priv->workqueue, &priv->disassociate);
+ schedule_work(&priv->disassociate);
return;
}
@@ -4353,8 +4350,7 @@ static void ipw_gather_stats(struct ipw_priv *priv)
priv->quality = quality;
- queue_delayed_work(priv->workqueue, &priv->gather_stats,
- IPW_STATS_INTERVAL);
+ schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL);
}
static void ipw_bg_gather_stats(struct work_struct *work)
@@ -4389,10 +4385,10 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv,
IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
IPW_DL_STATE,
"Aborting scan with missed beacon.\n");
- queue_work(priv->workqueue, &priv->abort_scan);
+ schedule_work(&priv->abort_scan);
}
- queue_work(priv->workqueue, &priv->disassociate);
+ schedule_work(&priv->disassociate);
return;
}
@@ -4418,8 +4414,7 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv,
if (!(priv->status & STATUS_ROAMING)) {
priv->status |= STATUS_ROAMING;
if (!(priv->status & STATUS_SCANNING))
- queue_delayed_work(priv->workqueue,
- &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
}
return;
}
@@ -4432,7 +4427,7 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv,
* channels..) */
IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
"Aborting scan with missed beacon.\n");
- queue_work(priv->workqueue, &priv->abort_scan);
+ schedule_work(&priv->abort_scan);
}
IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
@@ -4455,8 +4450,8 @@ static void handle_scan_event(struct ipw_priv *priv)
/* Only userspace-requested scan completion events go out immediately */
if (!priv->user_requested_scan) {
if (!delayed_work_pending(&priv->scan_event))
- queue_delayed_work(priv->workqueue, &priv->scan_event,
- round_jiffies_relative(msecs_to_jiffies(4000)));
+ schedule_delayed_work(&priv->scan_event,
+ round_jiffies_relative(msecs_to_jiffies(4000)));
} else {
union iwreq_data wrqu;
@@ -4509,20 +4504,17 @@ static void ipw_rx_notification(struct ipw_priv *priv,
IPW_DEBUG_ASSOC
("queueing adhoc check\n");
- queue_delayed_work(priv->
- workqueue,
- &priv->
- adhoc_check,
- le16_to_cpu(priv->
- assoc_request.
- beacon_interval));
+ schedule_delayed_work(
+ &priv->adhoc_check,
+ le16_to_cpu(priv->
+ assoc_request.
+ beacon_interval));
break;
}
priv->status &= ~STATUS_ASSOCIATING;
priv->status |= STATUS_ASSOCIATED;
- queue_work(priv->workqueue,
- &priv->system_config);
+ schedule_work(&priv->system_config);
#ifdef CONFIG_IPW2200_QOS
#define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
@@ -4785,43 +4777,37 @@ static void ipw_rx_notification(struct ipw_priv *priv,
#ifdef CONFIG_IPW2200_MONITOR
if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
priv->status |= STATUS_SCAN_FORCED;
- queue_delayed_work(priv->workqueue,
- &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
break;
}
priv->status &= ~STATUS_SCAN_FORCED;
#endif /* CONFIG_IPW2200_MONITOR */
/* Do queued direct scans first */
- if (priv->status & STATUS_DIRECT_SCAN_PENDING) {
- queue_delayed_work(priv->workqueue,
- &priv->request_direct_scan, 0);
- }
+ if (priv->status & STATUS_DIRECT_SCAN_PENDING)
+ schedule_delayed_work(&priv->request_direct_scan, 0);
if (!(priv->status & (STATUS_ASSOCIATED |
STATUS_ASSOCIATING |
STATUS_ROAMING |
STATUS_DISASSOCIATING)))
- queue_work(priv->workqueue, &priv->associate);
+ schedule_work(&priv->associate);
else if (priv->status & STATUS_ROAMING) {
if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
/* If a scan completed and we are in roam mode, then
* the scan that completed was the one requested as a
* result of entering roam... so, schedule the
* roam work */
- queue_work(priv->workqueue,
- &priv->roam);
+ schedule_work(&priv->roam);
else
/* Don't schedule if we aborted the scan */
priv->status &= ~STATUS_ROAMING;
} else if (priv->status & STATUS_SCAN_PENDING)
- queue_delayed_work(priv->workqueue,
- &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
else if (priv->config & CFG_BACKGROUND_SCAN
&& priv->status & STATUS_ASSOCIATED)
- queue_delayed_work(priv->workqueue,
- &priv->request_scan,
- round_jiffies_relative(HZ));
+ schedule_delayed_work(&priv->request_scan,
+ round_jiffies_relative(HZ));
/* Send an empty event to user space.
* We don't send the received data on the event because
@@ -5185,7 +5171,7 @@ static void ipw_rx_queue_restock(struct ipw_priv *priv)
/* If the pre-allocated buffer pool is dropping low, schedule to
* refill it */
if (rxq->free_count <= RX_LOW_WATERMARK)
- queue_work(priv->workqueue, &priv->rx_replenish);
+ schedule_work(&priv->rx_replenish);
/* If we've added more space for the firmware to place data, tell it */
if (write != rxq->write)
@@ -6126,8 +6112,8 @@ static void ipw_adhoc_check(void *data)
return;
}
- queue_delayed_work(priv->workqueue, &priv->adhoc_check,
- le16_to_cpu(priv->assoc_request.beacon_interval));
+ schedule_delayed_work(&priv->adhoc_check,
+ le16_to_cpu(priv->assoc_request.beacon_interval));
}
static void ipw_bg_adhoc_check(struct work_struct *work)
@@ -6516,8 +6502,7 @@ send_request:
} else
priv->status &= ~STATUS_SCAN_PENDING;
- queue_delayed_work(priv->workqueue, &priv->scan_check,
- IPW_SCAN_CHECK_WATCHDOG);
+ schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG);
done:
mutex_unlock(&priv->mutex);
return err;
@@ -6987,8 +6972,7 @@ static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
!memcmp(network->ssid,
priv->assoc_network->ssid,
network->ssid_len)) {
- queue_work(priv->workqueue,
- &priv->merge_networks);
+ schedule_work(&priv->merge_networks);
}
}
@@ -7656,7 +7640,7 @@ static int ipw_associate(void *data)
if (priv->status & STATUS_DISASSOCIATING) {
IPW_DEBUG_ASSOC("Not attempting association (in "
"disassociating)\n ");
- queue_work(priv->workqueue, &priv->associate);
+ schedule_work(&priv->associate);
return 0;
}
@@ -7724,12 +7708,10 @@ static int ipw_associate(void *data)
if (!(priv->status & STATUS_SCANNING)) {
if (!(priv->config & CFG_SPEED_SCAN))
- queue_delayed_work(priv->workqueue,
- &priv->request_scan,
- SCAN_INTERVAL);
+ schedule_delayed_work(&priv->request_scan,
+ SCAN_INTERVAL);
else
- queue_delayed_work(priv->workqueue,
- &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
}
return 0;
@@ -8892,7 +8874,7 @@ static int ipw_wx_set_mode(struct net_device *dev,
priv->ieee->iw_mode = wrqu->mode;
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
mutex_unlock(&priv->mutex);
return err;
}
@@ -9591,7 +9573,7 @@ static int ipw_wx_set_scan(struct net_device *dev,
IPW_DEBUG_WX("Start scan\n");
- queue_delayed_work(priv->workqueue, work, 0);
+ schedule_delayed_work(work, 0);
return 0;
}
@@ -9930,7 +9912,7 @@ static int ipw_wx_set_monitor(struct net_device *dev,
#else
priv->net_dev->type = ARPHRD_IEEE80211;
#endif
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
}
ipw_set_channel(priv, parms[1]);
@@ -9940,7 +9922,7 @@ static int ipw_wx_set_monitor(struct net_device *dev,
return 0;
}
priv->net_dev->type = ARPHRD_ETHER;
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
}
mutex_unlock(&priv->mutex);
return 0;
@@ -9954,7 +9936,7 @@ static int ipw_wx_reset(struct net_device *dev,
{
struct ipw_priv *priv = libipw_priv(dev);
IPW_DEBUG_WX("RESET\n");
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
return 0;
}
@@ -10544,7 +10526,7 @@ static int ipw_net_set_mac_address(struct net_device *dev, void *p)
memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
printk(KERN_INFO "%s: Setting MAC to %pM\n",
priv->net_dev->name, priv->mac_addr);
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
mutex_unlock(&priv->mutex);
return 0;
}
@@ -10677,9 +10659,7 @@ static void ipw_rf_kill(void *adapter)
if (rf_kill_active(priv)) {
IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
- if (priv->workqueue)
- queue_delayed_work(priv->workqueue,
- &priv->rf_kill, 2 * HZ);
+ schedule_delayed_work(&priv->rf_kill, 2 * HZ);
goto exit_unlock;
}
@@ -10690,7 +10670,7 @@ static void ipw_rf_kill(void *adapter)
"device\n");
/* we can not do an adapter restart while inside an irq lock */
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
} else
IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
"enabled\n");
@@ -10728,7 +10708,7 @@ static void ipw_link_up(struct ipw_priv *priv)
notify_wx_assoc_event(priv);
if (priv->config & CFG_BACKGROUND_SCAN)
- queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
+ schedule_delayed_work(&priv->request_scan, HZ);
}
static void ipw_bg_link_up(struct work_struct *work)
@@ -10757,7 +10737,7 @@ static void ipw_link_down(struct ipw_priv *priv)
if (!(priv->status & STATUS_EXIT_PENDING)) {
/* Queue up another scan... */
- queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
} else
cancel_delayed_work(&priv->scan_event);
}
@@ -10775,7 +10755,6 @@ static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
{
int ret = 0;
- priv->workqueue = create_workqueue(DRV_NAME);
init_waitqueue_head(&priv->wait_command_queue);
init_waitqueue_head(&priv->wait_state);
@@ -11332,8 +11311,7 @@ static int ipw_up(struct ipw_priv *priv)
IPW_WARNING("Radio Frequency Kill Switch is On:\n"
"Kill switch must be turned off for "
"wireless networking to work.\n");
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- 2 * HZ);
+ schedule_delayed_work(&priv->rf_kill, 2 * HZ);
return 0;
}
@@ -11343,8 +11321,7 @@ static int ipw_up(struct ipw_priv *priv)
/* If configure to try and auto-associate, kick
* off a scan. */
- queue_delayed_work(priv->workqueue,
- &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
return 0;
}
@@ -11810,7 +11787,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
if (err) {
IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
- goto out_destroy_workqueue;
+ goto out_iounmap;
}
SET_NETDEV_DEV(net_dev, &pdev->dev);
@@ -11878,9 +11855,6 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
out_release_irq:
free_irq(pdev->irq, priv);
- out_destroy_workqueue:
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
out_iounmap:
iounmap(priv->hw_base);
out_pci_release_regions:
@@ -11923,18 +11897,31 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev)
kfree(priv->cmdlog);
priv->cmdlog = NULL;
}
- /* ipw_down will ensure that there is no more pending work
- * in the workqueue's, so we can safely remove them now. */
- cancel_delayed_work(&priv->adhoc_check);
- cancel_delayed_work(&priv->gather_stats);
- cancel_delayed_work(&priv->request_scan);
- cancel_delayed_work(&priv->request_direct_scan);
- cancel_delayed_work(&priv->request_passive_scan);
- cancel_delayed_work(&priv->scan_event);
- cancel_delayed_work(&priv->rf_kill);
- cancel_delayed_work(&priv->scan_check);
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
+
+ /* make sure all works are inactive */
+ cancel_delayed_work_sync(&priv->adhoc_check);
+ cancel_work_sync(&priv->associate);
+ cancel_work_sync(&priv->disassociate);
+ cancel_work_sync(&priv->system_config);
+ cancel_work_sync(&priv->rx_replenish);
+ cancel_work_sync(&priv->adapter_restart);
+ cancel_delayed_work_sync(&priv->rf_kill);
+ cancel_work_sync(&priv->up);
+ cancel_work_sync(&priv->down);
+ cancel_delayed_work_sync(&priv->request_scan);
+ cancel_delayed_work_sync(&priv->request_direct_scan);
+ cancel_delayed_work_sync(&priv->request_passive_scan);
+ cancel_delayed_work_sync(&priv->scan_event);
+ cancel_delayed_work_sync(&priv->gather_stats);
+ cancel_work_sync(&priv->abort_scan);
+ cancel_work_sync(&priv->roam);
+ cancel_delayed_work_sync(&priv->scan_check);
+ cancel_work_sync(&priv->link_up);
+ cancel_work_sync(&priv->link_down);
+ cancel_delayed_work_sync(&priv->led_link_on);
+ cancel_delayed_work_sync(&priv->led_link_off);
+ cancel_delayed_work_sync(&priv->led_act_off);
+ cancel_work_sync(&priv->merge_networks);
/* Free MAC hash list for ADHOC */
for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
@@ -12022,7 +12009,7 @@ static int ipw_pci_resume(struct pci_dev *pdev)
priv->suspend_time = get_seconds() - priv->suspend_at;
/* Bring the device back up */
- queue_work(priv->workqueue, &priv->up);
+ schedule_work(&priv->up);
return 0;
}
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h
index d7d049c7a4fa..91795b5a93c5 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/ipw2x00/ipw2200.h
@@ -961,7 +961,7 @@ struct ipw_country_channel_info {
struct ipw_country_info {
u8 id;
u8 length;
- u8 country_str[3];
+ u8 country_str[IEEE80211_COUNTRY_STRING_LEN];
struct ipw_country_channel_info groups[7];
} __packed;
@@ -1299,8 +1299,6 @@ struct ipw_priv {
u8 direct_scan_ssid[IW_ESSID_MAX_SIZE];
u8 direct_scan_ssid_len;
- struct workqueue_struct *workqueue;
-
struct delayed_work adhoc_check;
struct work_struct associate;
struct work_struct disassociate;
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig
new file mode 100644
index 000000000000..2a45dd44cc12
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/Kconfig
@@ -0,0 +1,116 @@
+config IWLWIFI_LEGACY
+ tristate "Intel Wireless Wifi legacy devices"
+ depends on PCI && MAC80211
+ select FW_LOADER
+ select NEW_LEDS
+ select LEDS_CLASS
+ select LEDS_TRIGGERS
+ select MAC80211_LEDS
+
+menu "Debugging Options"
+ depends on IWLWIFI_LEGACY
+
+config IWLWIFI_LEGACY_DEBUG
+ bool "Enable full debugging output in 4965 and 3945 drivers"
+ depends on IWLWIFI_LEGACY
+ ---help---
+ This option will enable debug tracing output for the iwlwifilegacy
+ drivers.
+
+ This will result in the kernel module being ~100k larger. You can
+ control which debug output is sent to the kernel log by setting the
+ value in
+
+ /sys/class/net/wlan0/device/debug_level
+
+ This entry will only exist if this option is enabled.
+
+ To set a value, simply echo an 8-byte hex value to the same file:
+
+ % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
+
+ You can find the list of debug mask values in:
+ drivers/net/wireless/iwlwifilegacy/iwl-debug.h
+
+ If this is your first time using this driver, you should say Y here
+ as the debug information can assist others in helping you resolve
+ any problems you may encounter.
+
+config IWLWIFI_LEGACY_DEBUGFS
+ bool "4965 and 3945 debugfs support"
+ depends on IWLWIFI_LEGACY && MAC80211_DEBUGFS
+ ---help---
+ Enable creation of debugfs files for the iwlwifilegacy drivers. This
+ is a low-impact option that allows getting insight into the
+ driver's state at runtime.
+
+config IWLWIFI_LEGACY_DEVICE_TRACING
+ bool "iwlwifilegacy legacy device access tracing"
+ depends on IWLWIFI_LEGACY
+ depends on EVENT_TRACING
+ help
+ Say Y here to trace all commands, including TX frames and IO
+ accesses, sent to the device. If you say yes, iwlwifilegacy will
+ register with the ftrace framework for event tracing and dump
+ all this information to the ringbuffer, you may need to
+ increase the ringbuffer size. See the ftrace documentation
+ for more information.
+
+ When tracing is not enabled, this option still has some
+ (though rather small) overhead.
+
+ If unsure, say Y so we can help you better when problems
+ occur.
+endmenu
+
+config IWL4965
+ tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
+ depends on IWLWIFI_LEGACY
+ ---help---
+ This option enables support for
+
+ Select to build the driver supporting the:
+
+ Intel Wireless WiFi Link 4965AGN
+
+ This driver uses the kernel's mac80211 subsystem.
+
+ In order to use this driver, you will need a microcode (uCode)
+ image for it. You can obtain the microcode from:
+
+ <http://intellinuxwireless.org/>.
+
+ The microcode is typically installed in /lib/firmware. You can
+ look in the hotplug script /etc/hotplug/firmware.agent to
+ determine which directory FIRMWARE_DIR is set to when the script
+ runs.
+
+ If you want to compile the driver as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want),
+ say M here and read <file:Documentation/kbuild/modules.txt>. The
+ module will be called iwl4965.
+
+config IWL3945
+ tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
+ depends on IWLWIFI_LEGACY
+ ---help---
+ Select to build the driver supporting the:
+
+ Intel PRO/Wireless 3945ABG/BG Network Connection
+
+ This driver uses the kernel's mac80211 subsystem.
+
+ In order to use this driver, you will need a microcode (uCode)
+ image for it. You can obtain the microcode from:
+
+ <http://intellinuxwireless.org/>.
+
+ The microcode is typically installed in /lib/firmware. You can
+ look in the hotplug script /etc/hotplug/firmware.agent to
+ determine which directory FIRMWARE_DIR is set to when the script
+ runs.
+
+ If you want to compile the driver as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want),
+ say M here and read <file:Documentation/kbuild/modules.txt>. The
+ module will be called iwl3945.
diff --git a/drivers/net/wireless/iwlegacy/Makefile b/drivers/net/wireless/iwlegacy/Makefile
new file mode 100644
index 000000000000..d56aeb38c211
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/Makefile
@@ -0,0 +1,25 @@
+obj-$(CONFIG_IWLWIFI_LEGACY) += iwl-legacy.o
+iwl-legacy-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
+iwl-legacy-objs += iwl-rx.o iwl-tx.o iwl-sta.o
+iwl-legacy-objs += iwl-scan.o iwl-led.o
+iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-debugfs.o
+iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) += iwl-devtrace.o
+
+iwl-legacy-objs += $(iwl-legacy-m)
+
+CFLAGS_iwl-devtrace.o := -I$(src)
+
+# 4965
+obj-$(CONFIG_IWL4965) += iwl4965.o
+iwl4965-objs := iwl-4965.o iwl4965-base.o iwl-4965-rs.o iwl-4965-led.o
+iwl4965-objs += iwl-4965-ucode.o iwl-4965-tx.o
+iwl4965-objs += iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o
+iwl4965-objs += iwl-4965-sta.o iwl-4965-eeprom.o
+iwl4965-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-4965-debugfs.o
+
+# 3945
+obj-$(CONFIG_IWL3945) += iwl3945.o
+iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
+iwl3945-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-3945-debugfs.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
index ef0835b01b6b..cfabb38793ab 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -60,12 +60,13 @@ ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
ssize_t ret;
- struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+ struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm,
+ *max_ofdm;
struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
struct iwl39_statistics_rx_non_phy *general, *accum_general;
struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
- if (!iwl_is_alive(priv))
+ if (!iwl_legacy_is_alive(priv))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -335,7 +336,7 @@ ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
ssize_t ret;
struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
- if (!iwl_is_alive(priv))
+ if (!iwl_legacy_is_alive(priv))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -434,7 +435,7 @@ ssize_t iwl3945_ucode_general_stats_read(struct file *file,
struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
- if (!iwl_is_alive(priv))
+ if (!iwl_legacy_is_alive(priv))
return -EAGAIN;
buf = kzalloc(bufsz, GFP_KERNEL);
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
index 70809c53c215..8fef4b32b447 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
#include "iwl-core.h"
#include "iwl-debug.h"
-#ifdef CONFIG_IWLWIFI_DEBUGFS
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos);
ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
index 2c9ed2b502a3..836c9919f82e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -185,4 +185,3 @@ struct iwl3945_tfd {
#endif /* __iwl_3945_fh_h__ */
-
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
index 65b5834da28c..779d3cb86e2c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -164,12 +164,11 @@ struct iwl3945_eeprom {
/*
* Per-channel regulatory data.
*
- * Each channel that *might* be supported by 3945 or 4965 has a fixed location
+ * Each channel that *might* be supported by 3945 has a fixed location
* in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
* txpower (MSB).
*
- * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
- * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
+ * Entries immediately below are for 20 MHz channel width.
*
* 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
*/
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
index abe2b739c4dc..abd923558d48 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -56,36 +56,9 @@ static int iwl3945_send_led_cmd(struct iwl_priv *priv,
.callback = NULL,
};
- return iwl_send_cmd(priv, &cmd);
-}
-
-/* Set led on command */
-static int iwl3945_led_on(struct iwl_priv *priv)
-{
- struct iwl_led_cmd led_cmd = {
- .id = IWL_LED_LINK,
- .on = IWL_LED_SOLID,
- .off = 0,
- .interval = IWL_DEF_LED_INTRVL
- };
- return iwl3945_send_led_cmd(priv, &led_cmd);
-}
-
-/* Set led off command */
-static int iwl3945_led_off(struct iwl_priv *priv)
-{
- struct iwl_led_cmd led_cmd = {
- .id = IWL_LED_LINK,
- .on = 0,
- .off = 0,
- .interval = IWL_DEF_LED_INTRVL
- };
- IWL_DEBUG_LED(priv, "led off\n");
- return iwl3945_send_led_cmd(priv, &led_cmd);
+ return iwl_legacy_send_cmd(priv, &cmd);
}
const struct iwl_led_ops iwl3945_led_ops = {
.cmd = iwl3945_send_led_cmd,
- .on = iwl3945_led_on,
- .off = iwl3945_led_off,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
index ce990adc51e7..96716276eb0d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
index 1f3e7e34fbc7..977bd2477c6a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -89,7 +89,7 @@ static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
};
#define IWL_RATE_MAX_WINDOW 62
-#define IWL_RATE_FLUSH (3*HZ)
+#define IWL_RATE_FLUSH (3*HZ)
#define IWL_RATE_WIN_FLUSH (HZ/2)
#define IWL39_RATE_HIGH_TH 11520
#define IWL_SUCCESS_UP_TH 8960
@@ -394,18 +394,18 @@ out:
IWL_DEBUG_INFO(priv, "leave\n");
}
-static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
{
return hw->priv;
}
/* rate scale requires free function to be implemented */
-static void rs_free(void *priv)
+static void iwl3945_rs_free(void *priv)
{
return;
}
-static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
+static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
{
struct iwl3945_rs_sta *rs_sta;
struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
@@ -423,7 +423,7 @@ static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
return rs_sta;
}
-static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
+static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
void *priv_sta)
{
struct iwl3945_rs_sta *rs_sta = priv_sta;
@@ -438,12 +438,12 @@ static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
/**
- * rs_tx_status - Update rate control values based on Tx results
+ * iwl3945_rs_tx_status - Update rate control values based on Tx results
*
* NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by
* the hardware for each rate.
*/
-static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
+static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta,
struct sk_buff *skb)
{
@@ -612,7 +612,7 @@ static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
}
/**
- * rs_get_rate - find the rate for the requested packet
+ * iwl3945_rs_get_rate - find the rate for the requested packet
*
* Returns the ieee80211_rate structure allocated by the driver.
*
@@ -627,7 +627,7 @@ static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
* rate table and must reference the driver allocated rate table
*
*/
-static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
+static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
void *priv_sta, struct ieee80211_tx_rate_control *txrc)
{
struct ieee80211_supported_band *sband = txrc->sband;
@@ -644,7 +644,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
u32 fail_count;
s8 scale_action = 0;
unsigned long flags;
- u16 rate_mask = sta ? sta->supp_rates[sband->band] : 0;
+ u16 rate_mask;
s8 max_rate_idx = -1;
struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -899,7 +899,8 @@ static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
* the station is added. Since mac80211 calls this function before a
* station is added we ignore it.
*/
-static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
+static void iwl3945_rs_rate_init_stub(void *priv_r,
+ struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta)
{
}
@@ -907,13 +908,13 @@ static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sba
static struct rate_control_ops rs_ops = {
.module = NULL,
.name = RS_NAME,
- .tx_status = rs_tx_status,
- .get_rate = rs_get_rate,
- .rate_init = rs_rate_init_stub,
- .alloc = rs_alloc,
- .free = rs_free,
- .alloc_sta = rs_alloc_sta,
- .free_sta = rs_free_sta,
+ .tx_status = iwl3945_rs_tx_status,
+ .get_rate = iwl3945_rs_get_rate,
+ .rate_init = iwl3945_rs_rate_init_stub,
+ .alloc = iwl3945_rs_alloc,
+ .free = iwl3945_rs_free,
+ .alloc_sta = iwl3945_rs_alloc_sta,
+ .free_sta = iwl3945_rs_free_sta,
#ifdef CONFIG_MAC80211_DEBUGFS
.add_sta_debugfs = iwl3945_add_debugfs,
.remove_sta_debugfs = iwl3945_remove_debugfs,
@@ -991,5 +992,3 @@ void iwl3945_rate_control_unregister(void)
{
ieee80211_rate_control_unregister(&rs_ops);
}
-
-
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
index 176e52577673..d096dc28204d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -115,7 +115,7 @@ void iwl3945_disable_events(struct iwl_priv *priv)
u32 base; /* SRAM address of event log header */
u32 disable_ptr; /* SRAM address of event-disable bitmap array */
u32 array_size; /* # of u32 entries in array */
- u32 evt_disable[IWL_EVT_DISABLE_SIZE] = {
+ static const u32 evt_disable[IWL_EVT_DISABLE_SIZE] = {
0x00000000, /* 31 - 0 Event id numbers */
0x00000000, /* 63 - 32 */
0x00000000, /* 95 - 64 */
@@ -171,14 +171,14 @@ void iwl3945_disable_events(struct iwl_priv *priv)
return;
}
- disable_ptr = iwl_read_targ_mem(priv, base + (4 * sizeof(u32)));
- array_size = iwl_read_targ_mem(priv, base + (5 * sizeof(u32)));
+ disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32)));
+ array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32)));
if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
disable_ptr);
for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
- iwl_write_targ_mem(priv,
+ iwl_legacy_write_targ_mem(priv,
disable_ptr + (i * sizeof(u32)),
evt_disable[i]);
@@ -201,7 +201,7 @@ static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
return -1;
}
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
static const char *iwl3945_get_tx_fail_reason(u32 status)
@@ -254,7 +254,7 @@ int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
break;
case IEEE80211_BAND_2GHZ:
if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
- iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
+ iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
if (rate == IWL_RATE_11M_INDEX)
next_rate = IWL_RATE_5M_INDEX;
}
@@ -284,8 +284,9 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM);
- for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+ for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
+ q->read_ptr != index;
+ q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
tx_info = &txq->txb[txq->q.read_ptr];
ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
@@ -293,10 +294,10 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
priv->cfg->ops->lib->txq_free_tfd(priv, txq);
}
- if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) &&
+ if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) &&
(txq_id != IWL39_CMD_QUEUE_NUM) &&
priv->mac80211_registered)
- iwl_wake_queue(priv, txq_id);
+ iwl_legacy_wake_queue(priv, txq);
}
/**
@@ -316,7 +317,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
int rate_idx;
int fail;
- if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
+ if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
"is out of range [0-%d] %d %d\n", txq_id,
index, txq->q.n_bd, txq->q.write_ptr,
@@ -324,6 +325,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
return;
}
+ txq->time_stamp = jiffies;
info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
ieee80211_tx_info_clear_status(info);
@@ -361,12 +363,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
* RX handler implementations
*
*****************************************************************************/
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-/*
- * based on the assumption of all statistics counter are in DWORD
- * FIXME: This function is for debugging, do not deal with
- * the case of counters roll-over.
- */
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
__le32 *stats)
{
@@ -400,72 +397,6 @@ static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
}
#endif
-/**
- * iwl3945_good_plcp_health - checks for plcp error.
- *
- * When the plcp error is exceeding the thresholds, reset the radio
- * to improve the throughput.
- */
-static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt)
-{
- bool rc = true;
- struct iwl3945_notif_statistics current_stat;
- int combined_plcp_delta;
- unsigned int plcp_msec;
- unsigned long plcp_received_jiffies;
-
- if (priv->cfg->base_params->plcp_delta_threshold ==
- IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
- IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
- return rc;
- }
- memcpy(&current_stat, pkt->u.raw, sizeof(struct
- iwl3945_notif_statistics));
- /*
- * check for plcp_err and trigger radio reset if it exceeds
- * the plcp error threshold plcp_delta.
- */
- plcp_received_jiffies = jiffies;
- plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
- (long) priv->plcp_jiffies);
- priv->plcp_jiffies = plcp_received_jiffies;
- /*
- * check to make sure plcp_msec is not 0 to prevent division
- * by zero.
- */
- if (plcp_msec) {
- combined_plcp_delta =
- (le32_to_cpu(current_stat.rx.ofdm.plcp_err) -
- le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err));
-
- if ((combined_plcp_delta > 0) &&
- ((combined_plcp_delta * 100) / plcp_msec) >
- priv->cfg->base_params->plcp_delta_threshold) {
- /*
- * if plcp_err exceed the threshold, the following
- * data is printed in csv format:
- * Text: plcp_err exceeded %d,
- * Received ofdm.plcp_err,
- * Current ofdm.plcp_err,
- * combined_plcp_delta,
- * plcp_msec
- */
- IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
- "%u, %d, %u mSecs\n",
- priv->cfg->base_params->plcp_delta_threshold,
- le32_to_cpu(current_stat.rx.ofdm.plcp_err),
- combined_plcp_delta, plcp_msec);
- /*
- * Reset the RF radio due to the high plcp
- * error rate
- */
- rc = false;
- }
- }
- return rc;
-}
-
void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
@@ -474,10 +405,10 @@ void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
(int)sizeof(struct iwl3945_notif_statistics),
le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
#endif
- iwl_recover_from_statistics(priv, pkt);
+ iwl_legacy_recover_from_statistics(priv, pkt);
memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
}
@@ -489,7 +420,7 @@ void iwl3945_reply_statistics(struct iwl_priv *priv,
__le32 *flag = (__le32 *)&pkt->u.raw;
if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_DEBUGFS
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
memset(&priv->_3945.accum_statistics, 0,
sizeof(struct iwl3945_notif_statistics));
memset(&priv->_3945.delta_statistics, 0,
@@ -560,14 +491,14 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
}
if (!iwl3945_mod_params.sw_crypto)
- iwl_set_decrypted_flag(priv,
+ iwl_legacy_set_decrypted_flag(priv,
(struct ieee80211_hdr *)rxb_addr(rxb),
le32_to_cpu(rx_end->status), stats);
skb_add_rx_frag(skb, 0, rxb->page,
(void *)rx_hdr->payload - (void *)pkt, len);
- iwl_update_stats(priv, false, fc, len);
+ iwl_legacy_update_stats(priv, false, fc, len);
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
ieee80211_rx(priv->hw, skb);
@@ -592,10 +523,11 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
rx_status.flag = 0;
rx_status.mactime = le64_to_cpu(rx_end->timestamp);
- rx_status.freq =
- ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel));
rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
+ rx_status.band);
rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
if (rx_status.band == IEEE80211_BAND_5GHZ)
@@ -639,7 +571,8 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
rx_status.signal, rx_status.signal,
rx_status.rate_idx);
- iwl_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header);
+ iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len),
+ header);
if (network_packet) {
priv->_3945.last_beacon_time =
@@ -759,8 +692,7 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
/* We need to figure out how to get the sta->supp_rates while
* in this running context */
- rate_mask = IWL_RATES_MASK;
-
+ rate_mask = IWL_RATES_MASK_3945;
/* Set retry limit on DATA packets and Probe Responses*/
if (ieee80211_is_probe_resp(fc))
@@ -808,7 +740,7 @@ static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
station->sta.rate_n_flags = cpu_to_le16(tx_rate);
station->sta.mode = STA_CONTROL_MODIFY_MSK;
- iwl_send_add_sta(priv, &station->sta, CMD_ASYNC);
+ iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC);
spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
@@ -823,7 +755,7 @@ static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
* to set power to V_AUX, do
if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
- iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+ iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
~APMG_PS_CTRL_MSK_PWR_SRC);
@@ -833,7 +765,7 @@ static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
}
*/
- iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+ iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
~APMG_PS_CTRL_MSK_PWR_SRC);
@@ -843,10 +775,11 @@ static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
{
- iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
- iwl_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
- iwl_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
- iwl_write_direct32(priv, FH39_RCSR_CONFIG(0),
+ iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
+ iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0),
+ rxq->rb_stts_dma);
+ iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
+ iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0),
FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
@@ -857,7 +790,7 @@ static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
/* fake read to flush all prev I/O */
- iwl_read_direct32(priv, FH39_RSSR_CTRL);
+ iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL);
return 0;
}
@@ -866,23 +799,23 @@ static int iwl3945_tx_reset(struct iwl_priv *priv)
{
/* bypass mode */
- iwl_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
+ iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
/* RA 0 is active */
- iwl_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
+ iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
/* all 6 fifo are active */
- iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
+ iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
- iwl_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
- iwl_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
- iwl_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
- iwl_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
+ iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
+ iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
+ iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
+ iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
- iwl_write_direct32(priv, FH39_TSSR_CBB_BASE,
+ iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE,
priv->_3945.shared_phys);
- iwl_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
+ iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
@@ -908,7 +841,7 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
iwl3945_hw_txq_ctx_free(priv);
/* allocate tx queue structure */
- rc = iwl_alloc_txq_mem(priv);
+ rc = iwl_legacy_alloc_txq_mem(priv);
if (rc)
return rc;
@@ -921,8 +854,8 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
- txq_id);
+ rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id],
+ slots_num, txq_id);
if (rc) {
IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
goto error;
@@ -939,21 +872,23 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
/*
* Start up 3945's basic functionality after it has been reset
- * (e.g. after platform boot, or shutdown via iwl_apm_stop())
+ * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
* NOTE: This does not load uCode nor start the embedded processor
*/
static int iwl3945_apm_init(struct iwl_priv *priv)
{
- int ret = iwl_apm_init(priv);
+ int ret = iwl_legacy_apm_init(priv);
/* Clear APMG (NIC's internal power management) interrupts */
- iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
- iwl_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
+ iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
+ iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
/* Reset radio chip */
- iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+ iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_RESET_REQ);
udelay(5);
- iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+ iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_RESET_REQ);
return ret;
}
@@ -962,30 +897,28 @@ static void iwl3945_nic_config(struct iwl_priv *priv)
{
struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
unsigned long flags;
- u8 rev_id = 0;
+ u8 rev_id = priv->pci_dev->revision;
spin_lock_irqsave(&priv->lock, flags);
/* Determine HW type */
- pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
-
IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
IWL_DEBUG_INFO(priv, "RTP type\n");
else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
} else {
IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n");
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
}
if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n");
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
} else
IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n");
@@ -993,24 +926,24 @@ static void iwl3945_nic_config(struct iwl_priv *priv)
if ((eeprom->board_revision & 0xF0) == 0xD0) {
IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
eeprom->board_revision);
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
} else {
IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
eeprom->board_revision);
- iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
}
if (eeprom->almgor_m_version <= 1) {
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n",
eeprom->almgor_m_version);
} else {
IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n",
eeprom->almgor_m_version);
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1038,7 +971,7 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
/* Allocate the RX queue, or reset if it is already allocated */
if (!rxq->bd) {
- rc = iwl_rx_queue_alloc(priv);
+ rc = iwl_legacy_rx_queue_alloc(priv);
if (rc) {
IWL_ERR(priv, "Unable to initialize Rx queue\n");
return -ENOMEM;
@@ -1053,10 +986,10 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
/* Look at using this instead:
rxq->need_update = 1;
- iwl_rx_queue_update_write_ptr(priv, rxq);
+ iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
*/
- iwl_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
+ iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
rc = iwl3945_txq_ctx_reset(priv);
if (rc)
@@ -1081,12 +1014,12 @@ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
txq_id++)
if (txq_id == IWL39_CMD_QUEUE_NUM)
- iwl_cmd_queue_free(priv);
+ iwl_legacy_cmd_queue_free(priv);
else
- iwl_tx_queue_free(priv, txq_id);
+ iwl_legacy_tx_queue_free(priv, txq_id);
/* free tx queue structure */
- iwl_free_txq_mem(priv);
+ iwl_legacy_txq_mem(priv);
}
void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
@@ -1094,12 +1027,12 @@ void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
int txq_id;
/* stop SCD */
- iwl_write_prph(priv, ALM_SCD_MODE_REG, 0);
- iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
+ iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0);
+ iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
/* reset TFD queues */
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
+ iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
1000);
@@ -1166,12 +1099,12 @@ static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv)
#define IWL_TEMPERATURE_LIMIT_TIMER 6
/**
- * is_temp_calib_needed - determines if new calibration is needed
+ * iwl3945_is_temp_calib_needed - determines if new calibration is needed
*
* records new temperature in tx_mgr->temperature.
* replaces tx_mgr->last_temperature *only* if calib needed
* (assumes caller will actually do the calibration!). */
-static int is_temp_calib_needed(struct iwl_priv *priv)
+static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv)
{
int temp_diff;
@@ -1402,9 +1335,6 @@ static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_in
* based on eeprom channel data) for this channel. */
power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]);
- /* further limit to user's max power preference.
- * FIXME: Other spectrum management power limitations do not
- * seem to apply?? */
power = min(power, priv->tx_power_user_lmt);
scan_power_info->requested_power = power;
@@ -1451,10 +1381,14 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
};
u16 chan;
+ if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
+ "TX Power requested while scanning!\n"))
+ return -EAGAIN;
+
chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
- ch_info = iwl_get_channel_info(priv, priv->band, chan);
+ ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan);
if (!ch_info) {
IWL_ERR(priv,
"Failed to get channel info for channel %d [%d]\n",
@@ -1462,7 +1396,7 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
return -EINVAL;
}
- if (!is_channel_valid(ch_info)) {
+ if (!iwl_legacy_is_channel_valid(ch_info)) {
IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on "
"non-Tx channel.\n");
return 0;
@@ -1497,7 +1431,7 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
txpower.power[i].rate);
}
- return iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
+ return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
sizeof(struct iwl3945_txpowertable_cmd),
&txpower);
@@ -1631,7 +1565,7 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
/* set up new Tx power info for each and every channel, 2.4 and 5.x */
for (i = 0; i < priv->channel_count; i++) {
ch_info = &priv->channel_info[i];
- a_band = is_channel_a_band(ch_info);
+ a_band = iwl_legacy_is_channel_a_band(ch_info);
/* Get this chnlgrp's factory calibration temperature */
ref_temp = (s16)eeprom->groups[ch_info->group_index].
@@ -1643,7 +1577,7 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
ref_temp);
/* set tx power value for all rates, OFDM and CCK */
- for (rate_index = 0; rate_index < IWL_RATE_COUNT;
+ for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945;
rate_index++) {
int power_idx =
ch_info->power_info[rate_index].base_power_index;
@@ -1697,7 +1631,7 @@ int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
for (i = 0; i < priv->channel_count; i++) {
ch_info = &priv->channel_info[i];
- a_band = is_channel_a_band(ch_info);
+ a_band = iwl_legacy_is_channel_a_band(ch_info);
/* find minimum power of all user and regulatory constraints
* (does not consider h/w clipping limitations) */
@@ -1713,7 +1647,7 @@ int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
/* update txpower settings for all channels,
* send to NIC if associated. */
- is_temp_calib_needed(priv);
+ iwl3945_is_temp_calib_needed(priv);
iwl3945_hw_reg_comp_txpower_temp(priv);
return 0;
@@ -1731,8 +1665,8 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
.flags = CMD_WANT_SKB,
.data = &rxon_assoc,
};
- const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
- const struct iwl_rxon_cmd *rxon2 = &ctx->active;
+ const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
+ const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
if ((rxon1->flags == rxon2->flags) &&
(rxon1->filter_flags == rxon2->filter_flags) &&
@@ -1748,7 +1682,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
rxon_assoc.reserved = 0;
- rc = iwl_send_cmd_sync(priv, &cmd);
+ rc = iwl_legacy_send_cmd_sync(priv, &cmd);
if (rc)
return rc;
@@ -1758,7 +1692,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
rc = -EIO;
}
- iwl_free_pages(priv, cmd.reply_page);
+ iwl_legacy_free_pages(priv, cmd.reply_page);
return rc;
}
@@ -1779,7 +1713,10 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
int rc = 0;
bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
- if (!iwl_is_alive(priv))
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return -EINVAL;
+
+ if (!iwl_legacy_is_alive(priv))
return -1;
/* always get timestamp with Rx frame */
@@ -1790,7 +1727,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
- rc = iwl_check_rxon_cmd(priv, ctx);
+ rc = iwl_legacy_check_rxon_cmd(priv, ctx);
if (rc) {
IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
return -EINVAL;
@@ -1799,8 +1736,9 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
/* If we don't need to send a full RXON, we can use
* iwl3945_rxon_assoc_cmd which is used to reconfigure filter
* and other flags for the current radio configuration. */
- if (!iwl_full_rxon_required(priv, &priv->contexts[IWL_RXON_CTX_BSS])) {
- rc = iwl_send_rxon_assoc(priv,
+ if (!iwl_legacy_full_rxon_required(priv,
+ &priv->contexts[IWL_RXON_CTX_BSS])) {
+ rc = iwl_legacy_send_rxon_assoc(priv,
&priv->contexts[IWL_RXON_CTX_BSS]);
if (rc) {
IWL_ERR(priv, "Error setting RXON_ASSOC "
@@ -1817,7 +1755,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
* an RXON_ASSOC and the new config wants the associated mask enabled,
* we must clear the associated from the active configuration
* before we apply the new config */
- if (iwl_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
+ if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
@@ -1827,7 +1765,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
*/
active_rxon->reserved4 = 0;
active_rxon->reserved5 = 0;
- rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
+ rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
sizeof(struct iwl3945_rxon_cmd),
&priv->contexts[IWL_RXON_CTX_BSS].active);
@@ -1839,9 +1777,10 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
"configuration (%d).\n", rc);
return rc;
}
- iwl_clear_ucode_stations(priv,
+ iwl_legacy_clear_ucode_stations(priv,
+ &priv->contexts[IWL_RXON_CTX_BSS]);
+ iwl_legacy_restore_stations(priv,
&priv->contexts[IWL_RXON_CTX_BSS]);
- iwl_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
}
IWL_DEBUG_INFO(priv, "Sending RXON\n"
@@ -1859,10 +1798,10 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
staging_rxon->reserved4 = 0;
staging_rxon->reserved5 = 0;
- iwl_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
+ iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
/* Apply the new configuration */
- rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
+ rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
sizeof(struct iwl3945_rxon_cmd),
staging_rxon);
if (rc) {
@@ -1873,14 +1812,15 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
if (!new_assoc) {
- iwl_clear_ucode_stations(priv,
+ iwl_legacy_clear_ucode_stations(priv,
&priv->contexts[IWL_RXON_CTX_BSS]);
- iwl_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
+ iwl_legacy_restore_stations(priv,
+ &priv->contexts[IWL_RXON_CTX_BSS]);
}
/* If we issue a new RXON command which required a tune then we must
* send a new TXPOWER command or we won't be able to Tx any frames */
- rc = priv->cfg->ops->lib->send_tx_power(priv);
+ rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
if (rc) {
IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
return rc;
@@ -1910,7 +1850,7 @@ void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
{
/* This will kick in the "brute force"
* iwl3945_hw_reg_comp_txpower_temp() below */
- if (!is_temp_calib_needed(priv))
+ if (!iwl3945_is_temp_calib_needed(priv))
goto reschedule;
/* Set up a new set of temp-adjusted TxPowers, send to NIC.
@@ -1957,7 +1897,7 @@ static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv,
u8 grp_channel;
/* Find the group index for the channel ... don't use index 1(?) */
- if (is_channel_a_band(ch_info)) {
+ if (iwl_legacy_is_channel_a_band(ch_info)) {
for (group = 1; group < 5; group++) {
grp_channel = ch_grp[group].group_channel;
if (ch_info->channel <= grp_channel) {
@@ -2137,8 +2077,8 @@ int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
/* initialize Tx power info for each and every channel, 2.4 and 5.x */
for (i = 0, ch_info = priv->channel_info; i < priv->channel_count;
i++, ch_info++) {
- a_band = is_channel_a_band(ch_info);
- if (!is_channel_valid(ch_info))
+ a_band = iwl_legacy_is_channel_a_band(ch_info);
+ if (!iwl_legacy_is_channel_valid(ch_info))
continue;
/* find this channel's channel group (*not* "band") index */
@@ -2241,7 +2181,7 @@ int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
{
int rc;
- iwl_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
+ iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
if (rc < 0)
@@ -2258,10 +2198,10 @@ int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
- iwl_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
- iwl_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
+ iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
+ iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
- iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
+ iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
@@ -2290,7 +2230,8 @@ static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
}
-static u16 iwl3945_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
+static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
+ u8 *data)
{
struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data;
addsta->mode = cmd->mode;
@@ -2318,7 +2259,7 @@ static int iwl3945_add_bssid_station(struct iwl_priv *priv,
if (sta_id_r)
*sta_id_r = IWL_INVALID_STATION;
- ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
+ ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
if (ret) {
IWL_ERR(priv, "Unable to add station %pM\n", addr);
return ret;
@@ -2353,7 +2294,7 @@ static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
return 0;
}
- return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
+ return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
vif->bss_conf.bssid);
}
@@ -2404,7 +2345,7 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
* 1M CCK rates */
if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
- iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
+ iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
index = IWL_FIRST_CCK_RATE;
for (i = IWL_RATE_6M_INDEX_TABLE;
@@ -2425,14 +2366,14 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
/* Update the rate scaling for control frame Tx */
rate_cmd.table_id = 0;
- rc = iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
+ rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
&rate_cmd);
if (rc)
return rc;
/* Update the rate scaling for data frame Tx */
rate_cmd.table_id = 1;
- return iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
+ return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
&rate_cmd);
}
@@ -2532,11 +2473,11 @@ static int iwl3945_verify_bsm(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
/* verify BSM SRAM contents */
- val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
+ val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
for (reg = BSM_SRAM_LOWER_BOUND;
reg < BSM_SRAM_LOWER_BOUND + len;
reg += sizeof(u32), image++) {
- val = iwl_read_prph(priv, reg);
+ val = iwl_legacy_read_prph(priv, reg);
if (val != le32_to_cpu(*image)) {
IWL_ERR(priv, "BSM uCode verification failed at "
"addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
@@ -2569,7 +2510,7 @@ static int iwl3945_verify_bsm(struct iwl_priv *priv)
*/
static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
{
- _iwl_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
+ _iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
return 0;
}
@@ -2640,16 +2581,16 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
inst_len = priv->ucode_init.len;
data_len = priv->ucode_init_data.len;
- iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
- iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
+ iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
+ iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
+ iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
+ iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
/* Fill BSM memory with bootstrap instructions */
for (reg_offset = BSM_SRAM_LOWER_BOUND;
reg_offset < BSM_SRAM_LOWER_BOUND + len;
reg_offset += sizeof(u32), image++)
- _iwl_write_prph(priv, reg_offset,
+ _iwl_legacy_write_prph(priv, reg_offset,
le32_to_cpu(*image));
rc = iwl3945_verify_bsm(priv);
@@ -2657,19 +2598,19 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
return rc;
/* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
- iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
- iwl_write_prph(priv, BSM_WR_MEM_DST_REG,
+ iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
+ iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG,
IWL39_RTC_INST_LOWER_BOUND);
- iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
+ iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
/* Load bootstrap code into instruction SRAM now,
* to prepare to load "initialize" uCode */
- iwl_write_prph(priv, BSM_WR_CTRL_REG,
+ iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
BSM_WR_CTRL_REG_BIT_START);
/* Wait for load of bootstrap uCode to finish */
for (i = 0; i < 100; i++) {
- done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
+ done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
if (!(done & BSM_WR_CTRL_REG_BIT_START))
break;
udelay(10);
@@ -2683,7 +2624,7 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
/* Enable future boot loads whenever power management unit triggers it
* (e.g. when powering back up after power-save shutdown) */
- iwl_write_prph(priv, BSM_WR_CTRL_REG,
+ iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
BSM_WR_CTRL_REG_BIT_START_EN);
return 0;
@@ -2692,7 +2633,6 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
static struct iwl_hcmd_ops iwl3945_hcmd = {
.rxon_assoc = iwl3945_send_rxon_assoc,
.commit_rxon = iwl3945_commit_rxon,
- .send_bt_config = iwl_send_bt_config,
};
static struct iwl_lib_ops iwl3945_lib = {
@@ -2718,16 +2658,9 @@ static struct iwl_lib_ops iwl3945_lib = {
},
.acquire_semaphore = iwl3945_eeprom_acquire_semaphore,
.release_semaphore = iwl3945_eeprom_release_semaphore,
- .query_addr = iwlcore_eeprom_query_addr,
},
.send_tx_power = iwl3945_send_tx_power,
.is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
- .post_associate = iwl3945_post_associate,
- .isr = iwl_isr_legacy,
- .config_ap = iwl3945_config_ap,
- .manage_ibss_station = iwl3945_manage_ibss_station,
- .recover_from_tx_stall = iwl_bg_monitor_recover,
- .check_plcp_health = iwl3945_good_plcp_health,
.debugfs_ops = {
.rx_stats_read = iwl3945_ucode_rx_stats_read,
@@ -2736,10 +2669,15 @@ static struct iwl_lib_ops iwl3945_lib = {
},
};
+static const struct iwl_legacy_ops iwl3945_legacy_ops = {
+ .post_associate = iwl3945_post_associate,
+ .config_ap = iwl3945_config_ap,
+ .manage_ibss_station = iwl3945_manage_ibss_station,
+};
+
static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
.get_hcmd_size = iwl3945_get_hcmd_size,
.build_addsta_hcmd = iwl3945_build_addsta_hcmd,
- .tx_cmd_protection = iwlcore_tx_cmd_protection,
.request_scan = iwl3945_request_scan,
.post_scan = iwl3945_post_scan,
};
@@ -2749,6 +2687,8 @@ static const struct iwl_ops iwl3945_ops = {
.hcmd = &iwl3945_hcmd,
.utils = &iwl3945_hcmd_utils,
.led = &iwl3945_led_ops,
+ .legacy = &iwl3945_legacy_ops,
+ .ieee80211_ops = &iwl3945_hw_ops,
};
static struct iwl_base_params iwl3945_base_params = {
@@ -2757,13 +2697,10 @@ static struct iwl_base_params iwl3945_base_params = {
.pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
.set_l0s = false,
.use_bsm = true,
- .use_isr_legacy = true,
.led_compensation = 64,
- .broken_powersave = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
- .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
+ .wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
- .tx_power_by_driver = true,
};
static struct iwl_cfg iwl3945_bg_cfg = {
@@ -2776,6 +2713,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
.ops = &iwl3945_ops,
.mod_params = &iwl3945_mod_params,
.base_params = &iwl3945_base_params,
+ .led_mode = IWL_LED_BLINK,
};
static struct iwl_cfg iwl3945_abg_cfg = {
@@ -2788,6 +2726,7 @@ static struct iwl_cfg iwl3945_abg_cfg = {
.ops = &iwl3945_ops,
.mod_params = &iwl3945_mod_params,
.base_params = &iwl3945_base_params,
+ .led_mode = IWL_LED_BLINK,
};
DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlegacy/iwl-3945.h
index 09391f0ee61f..b118b59b71de 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -108,7 +108,7 @@ struct iwl3945_rs_sta {
/*
* The common struct MUST be first because it is shared between
- * 3945 and agn!
+ * 3945 and 4965!
*/
struct iwl3945_sta_priv {
struct iwl_station_priv_common common;
@@ -201,7 +201,7 @@ struct iwl3945_ibss_seq {
/******************************************************************************
*
- * Functions implemented in iwl-base.c which are forward declared here
+ * Functions implemented in iwl3945-base.c which are forward declared here
* for use by iwl-*.c
*
*****************************************************************************/
@@ -209,7 +209,7 @@ extern int iwl3945_calc_db_from_ratio(int sig_ratio);
extern void iwl3945_rx_replenish(void *data);
extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,int left);
+ struct ieee80211_hdr *hdr, int left);
extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
char **buf, bool display);
extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
@@ -217,7 +217,7 @@ extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
/******************************************************************************
*
* Functions implemented in iwl-[34]*.c which are forward declared here
- * for use by iwl-base.c
+ * for use by iwl3945-base.c
*
* NOTE: The implementation of these functions are hardware specific
* which is why they are in the hardware specific files (vs. iwl-base.c)
@@ -264,10 +264,8 @@ void iwl3945_reply_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
extern void iwl3945_disable_events(struct iwl_priv *priv);
extern int iwl4965_get_temperature(const struct iwl_priv *priv);
-extern void iwl3945_post_associate(struct iwl_priv *priv,
- struct ieee80211_vif *vif);
-extern void iwl3945_config_ap(struct iwl_priv *priv,
- struct ieee80211_vif *vif);
+extern void iwl3945_post_associate(struct iwl_priv *priv);
+extern void iwl3945_config_ap(struct iwl_priv *priv);
extern int iwl3945_commit_rxon(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
@@ -282,8 +280,10 @@ extern int iwl3945_commit_rxon(struct iwl_priv *priv,
*/
extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
+extern struct ieee80211_ops iwl3945_hw_ops;
+
/*
- * Forward declare iwl-3945.c functions for iwl-base.c
+ * Forward declare iwl-3945.c functions for iwl3945-base.c
*/
extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c b/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
new file mode 100644
index 000000000000..81d6a25eb04f
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
@@ -0,0 +1,967 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-4965-calib.h"
+
+/*****************************************************************************
+ * INIT calibrations framework
+ *****************************************************************************/
+
+struct statistics_general_data {
+ u32 beacon_silence_rssi_a;
+ u32 beacon_silence_rssi_b;
+ u32 beacon_silence_rssi_c;
+ u32 beacon_energy_a;
+ u32 beacon_energy_b;
+ u32 beacon_energy_c;
+};
+
+void iwl4965_calib_free_results(struct iwl_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < IWL_CALIB_MAX; i++) {
+ kfree(priv->calib_results[i].buf);
+ priv->calib_results[i].buf = NULL;
+ priv->calib_results[i].buf_len = 0;
+ }
+}
+
+/*****************************************************************************
+ * RUNTIME calibrations framework
+ *****************************************************************************/
+
+/* "false alarms" are signals that our DSP tries to lock onto,
+ * but then determines that they are either noise, or transmissions
+ * from a distant wireless network (also "noise", really) that get
+ * "stepped on" by stronger transmissions within our own network.
+ * This algorithm attempts to set a sensitivity level that is high
+ * enough to receive all of our own network traffic, but not so
+ * high that our DSP gets too busy trying to lock onto non-network
+ * activity/noise. */
+static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
+ u32 norm_fa,
+ u32 rx_enable_time,
+ struct statistics_general_data *rx_info)
+{
+ u32 max_nrg_cck = 0;
+ int i = 0;
+ u8 max_silence_rssi = 0;
+ u32 silence_ref = 0;
+ u8 silence_rssi_a = 0;
+ u8 silence_rssi_b = 0;
+ u8 silence_rssi_c = 0;
+ u32 val;
+
+ /* "false_alarms" values below are cross-multiplications to assess the
+ * numbers of false alarms within the measured period of actual Rx
+ * (Rx is off when we're txing), vs the min/max expected false alarms
+ * (some should be expected if rx is sensitive enough) in a
+ * hypothetical listening period of 200 time units (TU), 204.8 msec:
+ *
+ * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
+ *
+ * */
+ u32 false_alarms = norm_fa * 200 * 1024;
+ u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
+ u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
+ struct iwl_sensitivity_data *data = NULL;
+ const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+
+ data = &(priv->sensitivity_data);
+
+ data->nrg_auto_corr_silence_diff = 0;
+
+ /* Find max silence rssi among all 3 receivers.
+ * This is background noise, which may include transmissions from other
+ * networks, measured during silence before our network's beacon */
+ silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
+ ALL_BAND_FILTER) >> 8);
+ silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
+ ALL_BAND_FILTER) >> 8);
+ silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
+ ALL_BAND_FILTER) >> 8);
+
+ val = max(silence_rssi_b, silence_rssi_c);
+ max_silence_rssi = max(silence_rssi_a, (u8) val);
+
+ /* Store silence rssi in 20-beacon history table */
+ data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
+ data->nrg_silence_idx++;
+ if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
+ data->nrg_silence_idx = 0;
+
+ /* Find max silence rssi across 20 beacon history */
+ for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
+ val = data->nrg_silence_rssi[i];
+ silence_ref = max(silence_ref, val);
+ }
+ IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n",
+ silence_rssi_a, silence_rssi_b, silence_rssi_c,
+ silence_ref);
+
+ /* Find max rx energy (min value!) among all 3 receivers,
+ * measured during beacon frame.
+ * Save it in 10-beacon history table. */
+ i = data->nrg_energy_idx;
+ val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
+ data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
+
+ data->nrg_energy_idx++;
+ if (data->nrg_energy_idx >= 10)
+ data->nrg_energy_idx = 0;
+
+ /* Find min rx energy (max value) across 10 beacon history.
+ * This is the minimum signal level that we want to receive well.
+ * Add backoff (margin so we don't miss slightly lower energy frames).
+ * This establishes an upper bound (min value) for energy threshold. */
+ max_nrg_cck = data->nrg_value[0];
+ for (i = 1; i < 10; i++)
+ max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
+ max_nrg_cck += 6;
+
+ IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
+ rx_info->beacon_energy_a, rx_info->beacon_energy_b,
+ rx_info->beacon_energy_c, max_nrg_cck - 6);
+
+ /* Count number of consecutive beacons with fewer-than-desired
+ * false alarms. */
+ if (false_alarms < min_false_alarms)
+ data->num_in_cck_no_fa++;
+ else
+ data->num_in_cck_no_fa = 0;
+ IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n",
+ data->num_in_cck_no_fa);
+
+ /* If we got too many false alarms this time, reduce sensitivity */
+ if ((false_alarms > max_false_alarms) &&
+ (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
+ IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n",
+ false_alarms, max_false_alarms);
+ IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n");
+ data->nrg_curr_state = IWL_FA_TOO_MANY;
+ /* Store for "fewer than desired" on later beacon */
+ data->nrg_silence_ref = silence_ref;
+
+ /* increase energy threshold (reduce nrg value)
+ * to decrease sensitivity */
+ data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
+ /* Else if we got fewer than desired, increase sensitivity */
+ } else if (false_alarms < min_false_alarms) {
+ data->nrg_curr_state = IWL_FA_TOO_FEW;
+
+ /* Compare silence level with silence level for most recent
+ * healthy number or too many false alarms */
+ data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
+ (s32)silence_ref;
+
+ IWL_DEBUG_CALIB(priv,
+ "norm FA %u < min FA %u, silence diff %d\n",
+ false_alarms, min_false_alarms,
+ data->nrg_auto_corr_silence_diff);
+
+ /* Increase value to increase sensitivity, but only if:
+ * 1a) previous beacon did *not* have *too many* false alarms
+ * 1b) AND there's a significant difference in Rx levels
+ * from a previous beacon with too many, or healthy # FAs
+ * OR 2) We've seen a lot of beacons (100) with too few
+ * false alarms */
+ if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
+ ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
+ (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
+
+ IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n");
+ /* Increase nrg value to increase sensitivity */
+ val = data->nrg_th_cck + NRG_STEP_CCK;
+ data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
+ } else {
+ IWL_DEBUG_CALIB(priv,
+ "... but not changing sensitivity\n");
+ }
+
+ /* Else we got a healthy number of false alarms, keep status quo */
+ } else {
+ IWL_DEBUG_CALIB(priv, " FA in safe zone\n");
+ data->nrg_curr_state = IWL_FA_GOOD_RANGE;
+
+ /* Store for use in "fewer than desired" with later beacon */
+ data->nrg_silence_ref = silence_ref;
+
+ /* If previous beacon had too many false alarms,
+ * give it some extra margin by reducing sensitivity again
+ * (but don't go below measured energy of desired Rx) */
+ if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
+ IWL_DEBUG_CALIB(priv, "... increasing margin\n");
+ if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
+ data->nrg_th_cck -= NRG_MARGIN;
+ else
+ data->nrg_th_cck = max_nrg_cck;
+ }
+ }
+
+ /* Make sure the energy threshold does not go above the measured
+ * energy of the desired Rx signals (reduced by backoff margin),
+ * or else we might start missing Rx frames.
+ * Lower value is higher energy, so we use max()!
+ */
+ data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
+ IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck);
+
+ data->nrg_prev_state = data->nrg_curr_state;
+
+ /* Auto-correlation CCK algorithm */
+ if (false_alarms > min_false_alarms) {
+
+ /* increase auto_corr values to decrease sensitivity
+ * so the DSP won't be disturbed by the noise
+ */
+ if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
+ data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
+ else {
+ val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
+ data->auto_corr_cck =
+ min((u32)ranges->auto_corr_max_cck, val);
+ }
+ val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
+ data->auto_corr_cck_mrc =
+ min((u32)ranges->auto_corr_max_cck_mrc, val);
+ } else if ((false_alarms < min_false_alarms) &&
+ ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
+ (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
+
+ /* Decrease auto_corr values to increase sensitivity */
+ val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
+ data->auto_corr_cck =
+ max((u32)ranges->auto_corr_min_cck, val);
+ val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
+ data->auto_corr_cck_mrc =
+ max((u32)ranges->auto_corr_min_cck_mrc, val);
+ }
+
+ return 0;
+}
+
+
+static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
+ u32 norm_fa,
+ u32 rx_enable_time)
+{
+ u32 val;
+ u32 false_alarms = norm_fa * 200 * 1024;
+ u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
+ u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
+ struct iwl_sensitivity_data *data = NULL;
+ const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+
+ data = &(priv->sensitivity_data);
+
+ /* If we got too many false alarms this time, reduce sensitivity */
+ if (false_alarms > max_false_alarms) {
+
+ IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n",
+ false_alarms, max_false_alarms);
+
+ val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
+ data->auto_corr_ofdm =
+ min((u32)ranges->auto_corr_max_ofdm, val);
+
+ val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
+ data->auto_corr_ofdm_mrc =
+ min((u32)ranges->auto_corr_max_ofdm_mrc, val);
+
+ val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
+ data->auto_corr_ofdm_x1 =
+ min((u32)ranges->auto_corr_max_ofdm_x1, val);
+
+ val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
+ data->auto_corr_ofdm_mrc_x1 =
+ min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val);
+ }
+
+ /* Else if we got fewer than desired, increase sensitivity */
+ else if (false_alarms < min_false_alarms) {
+
+ IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n",
+ false_alarms, min_false_alarms);
+
+ val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
+ data->auto_corr_ofdm =
+ max((u32)ranges->auto_corr_min_ofdm, val);
+
+ val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
+ data->auto_corr_ofdm_mrc =
+ max((u32)ranges->auto_corr_min_ofdm_mrc, val);
+
+ val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
+ data->auto_corr_ofdm_x1 =
+ max((u32)ranges->auto_corr_min_ofdm_x1, val);
+
+ val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
+ data->auto_corr_ofdm_mrc_x1 =
+ max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
+ } else {
+ IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n",
+ min_false_alarms, false_alarms, max_false_alarms);
+ }
+ return 0;
+}
+
+static void iwl4965_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv,
+ struct iwl_sensitivity_data *data,
+ __le16 *tbl)
+{
+ tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
+ cpu_to_le16((u16)data->auto_corr_ofdm);
+ tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
+ cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
+ tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
+ cpu_to_le16((u16)data->auto_corr_ofdm_x1);
+ tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
+ cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
+
+ tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
+ cpu_to_le16((u16)data->auto_corr_cck);
+ tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
+ cpu_to_le16((u16)data->auto_corr_cck_mrc);
+
+ tbl[HD_MIN_ENERGY_CCK_DET_INDEX] =
+ cpu_to_le16((u16)data->nrg_th_cck);
+ tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] =
+ cpu_to_le16((u16)data->nrg_th_ofdm);
+
+ tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
+ cpu_to_le16(data->barker_corr_th_min);
+ tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
+ cpu_to_le16(data->barker_corr_th_min_mrc);
+ tbl[HD_OFDM_ENERGY_TH_IN_INDEX] =
+ cpu_to_le16(data->nrg_th_cca);
+
+ IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
+ data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
+ data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
+ data->nrg_th_ofdm);
+
+ IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
+ data->auto_corr_cck, data->auto_corr_cck_mrc,
+ data->nrg_th_cck);
+}
+
+/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
+static int iwl4965_sensitivity_write(struct iwl_priv *priv)
+{
+ struct iwl_sensitivity_cmd cmd;
+ struct iwl_sensitivity_data *data = NULL;
+ struct iwl_host_cmd cmd_out = {
+ .id = SENSITIVITY_CMD,
+ .len = sizeof(struct iwl_sensitivity_cmd),
+ .flags = CMD_ASYNC,
+ .data = &cmd,
+ };
+
+ data = &(priv->sensitivity_data);
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ iwl4965_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]);
+
+ /* Update uCode's "work" table, and copy it to DSP */
+ cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
+
+ /* Don't send command to uCode if nothing has changed */
+ if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
+ sizeof(u16)*HD_TABLE_SIZE)) {
+ IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
+ return 0;
+ }
+
+ /* Copy table for comparison next time */
+ memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
+ sizeof(u16)*HD_TABLE_SIZE);
+
+ return iwl_legacy_send_cmd(priv, &cmd_out);
+}
+
+void iwl4965_init_sensitivity(struct iwl_priv *priv)
+{
+ int ret = 0;
+ int i;
+ struct iwl_sensitivity_data *data = NULL;
+ const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+
+ if (priv->disable_sens_cal)
+ return;
+
+ IWL_DEBUG_CALIB(priv, "Start iwl4965_init_sensitivity\n");
+
+ /* Clear driver's sensitivity algo data */
+ data = &(priv->sensitivity_data);
+
+ if (ranges == NULL)
+ return;
+
+ memset(data, 0, sizeof(struct iwl_sensitivity_data));
+
+ data->num_in_cck_no_fa = 0;
+ data->nrg_curr_state = IWL_FA_TOO_MANY;
+ data->nrg_prev_state = IWL_FA_TOO_MANY;
+ data->nrg_silence_ref = 0;
+ data->nrg_silence_idx = 0;
+ data->nrg_energy_idx = 0;
+
+ for (i = 0; i < 10; i++)
+ data->nrg_value[i] = 0;
+
+ for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
+ data->nrg_silence_rssi[i] = 0;
+
+ data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
+ data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
+ data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
+ data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
+ data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
+ data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
+ data->nrg_th_cck = ranges->nrg_th_cck;
+ data->nrg_th_ofdm = ranges->nrg_th_ofdm;
+ data->barker_corr_th_min = ranges->barker_corr_th_min;
+ data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc;
+ data->nrg_th_cca = ranges->nrg_th_cca;
+
+ data->last_bad_plcp_cnt_ofdm = 0;
+ data->last_fa_cnt_ofdm = 0;
+ data->last_bad_plcp_cnt_cck = 0;
+ data->last_fa_cnt_cck = 0;
+
+ ret |= iwl4965_sensitivity_write(priv);
+ IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
+}
+
+void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
+{
+ u32 rx_enable_time;
+ u32 fa_cck;
+ u32 fa_ofdm;
+ u32 bad_plcp_cck;
+ u32 bad_plcp_ofdm;
+ u32 norm_fa_ofdm;
+ u32 norm_fa_cck;
+ struct iwl_sensitivity_data *data = NULL;
+ struct statistics_rx_non_phy *rx_info;
+ struct statistics_rx_phy *ofdm, *cck;
+ unsigned long flags;
+ struct statistics_general_data statis;
+
+ if (priv->disable_sens_cal)
+ return;
+
+ data = &(priv->sensitivity_data);
+
+ if (!iwl_legacy_is_any_associated(priv)) {
+ IWL_DEBUG_CALIB(priv, "<< - not associated\n");
+ return;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general);
+ ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm);
+ cck = &(((struct iwl_notif_statistics *)resp)->rx.cck);
+
+ if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
+ IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return;
+ }
+
+ /* Extract Statistics: */
+ rx_enable_time = le32_to_cpu(rx_info->channel_load);
+ fa_cck = le32_to_cpu(cck->false_alarm_cnt);
+ fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt);
+ bad_plcp_cck = le32_to_cpu(cck->plcp_err);
+ bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
+
+ statis.beacon_silence_rssi_a =
+ le32_to_cpu(rx_info->beacon_silence_rssi_a);
+ statis.beacon_silence_rssi_b =
+ le32_to_cpu(rx_info->beacon_silence_rssi_b);
+ statis.beacon_silence_rssi_c =
+ le32_to_cpu(rx_info->beacon_silence_rssi_c);
+ statis.beacon_energy_a =
+ le32_to_cpu(rx_info->beacon_energy_a);
+ statis.beacon_energy_b =
+ le32_to_cpu(rx_info->beacon_energy_b);
+ statis.beacon_energy_c =
+ le32_to_cpu(rx_info->beacon_energy_c);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
+
+ if (!rx_enable_time) {
+ IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
+ return;
+ }
+
+ /* These statistics increase monotonically, and do not reset
+ * at each beacon. Calculate difference from last value, or just
+ * use the new statistics value if it has reset or wrapped around. */
+ if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
+ data->last_bad_plcp_cnt_cck = bad_plcp_cck;
+ else {
+ bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
+ data->last_bad_plcp_cnt_cck += bad_plcp_cck;
+ }
+
+ if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
+ data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
+ else {
+ bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
+ data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
+ }
+
+ if (data->last_fa_cnt_ofdm > fa_ofdm)
+ data->last_fa_cnt_ofdm = fa_ofdm;
+ else {
+ fa_ofdm -= data->last_fa_cnt_ofdm;
+ data->last_fa_cnt_ofdm += fa_ofdm;
+ }
+
+ if (data->last_fa_cnt_cck > fa_cck)
+ data->last_fa_cnt_cck = fa_cck;
+ else {
+ fa_cck -= data->last_fa_cnt_cck;
+ data->last_fa_cnt_cck += fa_cck;
+ }
+
+ /* Total aborted signal locks */
+ norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
+ norm_fa_cck = fa_cck + bad_plcp_cck;
+
+ IWL_DEBUG_CALIB(priv,
+ "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
+ bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
+
+ iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
+ iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
+
+ iwl4965_sensitivity_write(priv);
+}
+
+static inline u8 iwl4965_find_first_chain(u8 mask)
+{
+ if (mask & ANT_A)
+ return CHAIN_A;
+ if (mask & ANT_B)
+ return CHAIN_B;
+ return CHAIN_C;
+}
+
+/**
+ * Run disconnected antenna algorithm to find out which antennas are
+ * disconnected.
+ */
+static void
+iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
+ struct iwl_chain_noise_data *data)
+{
+ u32 active_chains = 0;
+ u32 max_average_sig;
+ u16 max_average_sig_antenna_i;
+ u8 num_tx_chains;
+ u8 first_chain;
+ u16 i = 0;
+
+ average_sig[0] = data->chain_signal_a /
+ priv->cfg->base_params->chain_noise_num_beacons;
+ average_sig[1] = data->chain_signal_b /
+ priv->cfg->base_params->chain_noise_num_beacons;
+ average_sig[2] = data->chain_signal_c /
+ priv->cfg->base_params->chain_noise_num_beacons;
+
+ if (average_sig[0] >= average_sig[1]) {
+ max_average_sig = average_sig[0];
+ max_average_sig_antenna_i = 0;
+ active_chains = (1 << max_average_sig_antenna_i);
+ } else {
+ max_average_sig = average_sig[1];
+ max_average_sig_antenna_i = 1;
+ active_chains = (1 << max_average_sig_antenna_i);
+ }
+
+ if (average_sig[2] >= max_average_sig) {
+ max_average_sig = average_sig[2];
+ max_average_sig_antenna_i = 2;
+ active_chains = (1 << max_average_sig_antenna_i);
+ }
+
+ IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
+ average_sig[0], average_sig[1], average_sig[2]);
+ IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
+ max_average_sig, max_average_sig_antenna_i);
+
+ /* Compare signal strengths for all 3 receivers. */
+ for (i = 0; i < NUM_RX_CHAINS; i++) {
+ if (i != max_average_sig_antenna_i) {
+ s32 rssi_delta = (max_average_sig - average_sig[i]);
+
+ /* If signal is very weak, compared with
+ * strongest, mark it as disconnected. */
+ if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
+ data->disconn_array[i] = 1;
+ else
+ active_chains |= (1 << i);
+ IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d "
+ "disconn_array[i] = %d\n",
+ i, rssi_delta, data->disconn_array[i]);
+ }
+ }
+
+ /*
+ * The above algorithm sometimes fails when the ucode
+ * reports 0 for all chains. It's not clear why that
+ * happens to start with, but it is then causing trouble
+ * because this can make us enable more chains than the
+ * hardware really has.
+ *
+ * To be safe, simply mask out any chains that we know
+ * are not on the device.
+ */
+ active_chains &= priv->hw_params.valid_rx_ant;
+
+ num_tx_chains = 0;
+ for (i = 0; i < NUM_RX_CHAINS; i++) {
+ /* loops on all the bits of
+ * priv->hw_setting.valid_tx_ant */
+ u8 ant_msk = (1 << i);
+ if (!(priv->hw_params.valid_tx_ant & ant_msk))
+ continue;
+
+ num_tx_chains++;
+ if (data->disconn_array[i] == 0)
+ /* there is a Tx antenna connected */
+ break;
+ if (num_tx_chains == priv->hw_params.tx_chains_num &&
+ data->disconn_array[i]) {
+ /*
+ * If all chains are disconnected
+ * connect the first valid tx chain
+ */
+ first_chain =
+ iwl4965_find_first_chain(priv->cfg->valid_tx_ant);
+ data->disconn_array[first_chain] = 0;
+ active_chains |= BIT(first_chain);
+ IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected \
+ W/A - declare %d as connected\n",
+ first_chain);
+ break;
+ }
+ }
+
+ if (active_chains != priv->hw_params.valid_rx_ant &&
+ active_chains != priv->chain_noise_data.active_chains)
+ IWL_DEBUG_CALIB(priv,
+ "Detected that not all antennas are connected! "
+ "Connected: %#x, valid: %#x.\n",
+ active_chains, priv->hw_params.valid_rx_ant);
+
+ /* Save for use within RXON, TX, SCAN commands, etc. */
+ data->active_chains = active_chains;
+ IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
+ active_chains);
+}
+
+static void iwl4965_gain_computation(struct iwl_priv *priv,
+ u32 *average_noise,
+ u16 min_average_noise_antenna_i,
+ u32 min_average_noise,
+ u8 default_chain)
+{
+ int i, ret;
+ struct iwl_chain_noise_data *data = &priv->chain_noise_data;
+
+ data->delta_gain_code[min_average_noise_antenna_i] = 0;
+
+ for (i = default_chain; i < NUM_RX_CHAINS; i++) {
+ s32 delta_g = 0;
+
+ if (!(data->disconn_array[i]) &&
+ (data->delta_gain_code[i] ==
+ CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
+ delta_g = average_noise[i] - min_average_noise;
+ data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
+ data->delta_gain_code[i] =
+ min(data->delta_gain_code[i],
+ (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
+
+ data->delta_gain_code[i] =
+ (data->delta_gain_code[i] | (1 << 2));
+ } else {
+ data->delta_gain_code[i] = 0;
+ }
+ }
+ IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
+ data->delta_gain_code[0],
+ data->delta_gain_code[1],
+ data->delta_gain_code[2]);
+
+ /* Differential gain gets sent to uCode only once */
+ if (!data->radio_write) {
+ struct iwl_calib_diff_gain_cmd cmd;
+ data->radio_write = 1;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
+ cmd.diff_gain_a = data->delta_gain_code[0];
+ cmd.diff_gain_b = data->delta_gain_code[1];
+ cmd.diff_gain_c = data->delta_gain_code[2];
+ ret = iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
+ sizeof(cmd), &cmd);
+ if (ret)
+ IWL_DEBUG_CALIB(priv, "fail sending cmd "
+ "REPLY_PHY_CALIBRATION_CMD\n");
+
+ /* TODO we might want recalculate
+ * rx_chain in rxon cmd */
+
+ /* Mark so we run this algo only once! */
+ data->state = IWL_CHAIN_NOISE_CALIBRATED;
+ }
+}
+
+
+
+/*
+ * Accumulate 16 beacons of signal and noise statistics for each of
+ * 3 receivers/antennas/rx-chains, then figure out:
+ * 1) Which antennas are connected.
+ * 2) Differential rx gain settings to balance the 3 receivers.
+ */
+void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
+{
+ struct iwl_chain_noise_data *data = NULL;
+
+ u32 chain_noise_a;
+ u32 chain_noise_b;
+ u32 chain_noise_c;
+ u32 chain_sig_a;
+ u32 chain_sig_b;
+ u32 chain_sig_c;
+ u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
+ u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
+ u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
+ u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
+ u16 i = 0;
+ u16 rxon_chnum = INITIALIZATION_VALUE;
+ u16 stat_chnum = INITIALIZATION_VALUE;
+ u8 rxon_band24;
+ u8 stat_band24;
+ unsigned long flags;
+ struct statistics_rx_non_phy *rx_info;
+
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+ if (priv->disable_chain_noise_cal)
+ return;
+
+ data = &(priv->chain_noise_data);
+
+ /*
+ * Accumulate just the first "chain_noise_num_beacons" after
+ * the first association, then we're done forever.
+ */
+ if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
+ if (data->state == IWL_CHAIN_NOISE_ALIVE)
+ IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
+ return;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rx_info = &(((struct iwl_notif_statistics *)stat_resp)->
+ rx.general);
+
+ if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
+ IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return;
+ }
+
+ rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
+ rxon_chnum = le16_to_cpu(ctx->staging.channel);
+
+ stat_band24 = !!(((struct iwl_notif_statistics *)
+ stat_resp)->flag &
+ STATISTICS_REPLY_FLG_BAND_24G_MSK);
+ stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *)
+ stat_resp)->flag) >> 16;
+
+ /* Make sure we accumulate data for just the associated channel
+ * (even if scanning). */
+ if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
+ IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
+ rxon_chnum, rxon_band24);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return;
+ }
+
+ /*
+ * Accumulate beacon statistics values across
+ * "chain_noise_num_beacons"
+ */
+ chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
+ IN_BAND_FILTER;
+ chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
+ IN_BAND_FILTER;
+ chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
+ IN_BAND_FILTER;
+
+ chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
+ chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
+ chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ data->beacon_count++;
+
+ data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
+ data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
+ data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
+
+ data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
+ data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
+ data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
+
+ IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n",
+ rxon_chnum, rxon_band24, data->beacon_count);
+ IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n",
+ chain_sig_a, chain_sig_b, chain_sig_c);
+ IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
+ chain_noise_a, chain_noise_b, chain_noise_c);
+
+ /* If this is the "chain_noise_num_beacons", determine:
+ * 1) Disconnected antennas (using signal strengths)
+ * 2) Differential gain (using silence noise) to balance receivers */
+ if (data->beacon_count !=
+ priv->cfg->base_params->chain_noise_num_beacons)
+ return;
+
+ /* Analyze signal for disconnected antenna */
+ iwl4965_find_disconn_antenna(priv, average_sig, data);
+
+ /* Analyze noise for rx balance */
+ average_noise[0] = data->chain_noise_a /
+ priv->cfg->base_params->chain_noise_num_beacons;
+ average_noise[1] = data->chain_noise_b /
+ priv->cfg->base_params->chain_noise_num_beacons;
+ average_noise[2] = data->chain_noise_c /
+ priv->cfg->base_params->chain_noise_num_beacons;
+
+ for (i = 0; i < NUM_RX_CHAINS; i++) {
+ if (!(data->disconn_array[i]) &&
+ (average_noise[i] <= min_average_noise)) {
+ /* This means that chain i is active and has
+ * lower noise values so far: */
+ min_average_noise = average_noise[i];
+ min_average_noise_antenna_i = i;
+ }
+ }
+
+ IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n",
+ average_noise[0], average_noise[1],
+ average_noise[2]);
+
+ IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
+ min_average_noise, min_average_noise_antenna_i);
+
+ iwl4965_gain_computation(priv, average_noise,
+ min_average_noise_antenna_i, min_average_noise,
+ iwl4965_find_first_chain(priv->cfg->valid_rx_ant));
+
+ /* Some power changes may have been made during the calibration.
+ * Update and commit the RXON
+ */
+ if (priv->cfg->ops->lib->update_chain_flags)
+ priv->cfg->ops->lib->update_chain_flags(priv);
+
+ data->state = IWL_CHAIN_NOISE_DONE;
+ iwl_legacy_power_update_mode(priv, false);
+}
+
+void iwl4965_reset_run_time_calib(struct iwl_priv *priv)
+{
+ int i;
+ memset(&(priv->sensitivity_data), 0,
+ sizeof(struct iwl_sensitivity_data));
+ memset(&(priv->chain_noise_data), 0,
+ sizeof(struct iwl_chain_noise_data));
+ for (i = 0; i < NUM_RX_CHAINS; i++)
+ priv->chain_noise_data.delta_gain_code[i] =
+ CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
+
+ /* Ask for statistics now, the uCode will send notification
+ * periodically after association */
+ iwl_legacy_send_statistics_request(priv, CMD_ASYNC, true);
+}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
new file mode 100644
index 000000000000..f46c80e6e005
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
@@ -0,0 +1,75 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#ifndef __iwl_4965_calib_h__
+#define __iwl_4965_calib_h__
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-commands.h"
+
+void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp);
+void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp);
+void iwl4965_init_sensitivity(struct iwl_priv *priv);
+void iwl4965_reset_run_time_calib(struct iwl_priv *priv);
+void iwl4965_calib_free_results(struct iwl_priv *priv);
+
+#endif /* __iwl_4965_calib_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
new file mode 100644
index 000000000000..1c93665766e4
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
@@ -0,0 +1,774 @@
+/******************************************************************************
+*
+* GPL LICENSE SUMMARY
+*
+* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+* USA
+*
+* The full GNU General Public License is included in this distribution
+* in the file called LICENSE.GPL.
+*
+* Contact Information:
+* Intel Linux Wireless <ilw@linux.intel.com>
+* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*****************************************************************************/
+#include "iwl-4965.h"
+#include "iwl-4965-debugfs.h"
+
+static const char *fmt_value = " %-30s %10u\n";
+static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
+static const char *fmt_header =
+ "%-32s current cumulative delta max\n";
+
+static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
+{
+ int p = 0;
+ u32 flag;
+
+ flag = le32_to_cpu(priv->_4965.statistics.flag);
+
+ p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
+ if (flag & UCODE_STATISTICS_CLEAR_MSK)
+ p += scnprintf(buf + p, bufsz - p,
+ "\tStatistics have been cleared\n");
+ p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+ (flag & UCODE_STATISTICS_FREQUENCY_MSK)
+ ? "2.4 GHz" : "5.2 GHz");
+ p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+ (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
+ ? "enabled" : "disabled");
+
+ return p;
+}
+
+ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct statistics_rx_phy) * 40 +
+ sizeof(struct statistics_rx_non_phy) * 40 +
+ sizeof(struct statistics_rx_ht_phy) * 40 + 400;
+ ssize_t ret;
+ struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+ struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+ struct statistics_rx_non_phy *general, *accum_general;
+ struct statistics_rx_non_phy *delta_general, *max_general;
+ struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
+
+ if (!iwl_legacy_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * the statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ ofdm = &priv->_4965.statistics.rx.ofdm;
+ cck = &priv->_4965.statistics.rx.cck;
+ general = &priv->_4965.statistics.rx.general;
+ ht = &priv->_4965.statistics.rx.ofdm_ht;
+ accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm;
+ accum_cck = &priv->_4965.accum_statistics.rx.cck;
+ accum_general = &priv->_4965.accum_statistics.rx.general;
+ accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht;
+ delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm;
+ delta_cck = &priv->_4965.delta_statistics.rx.cck;
+ delta_general = &priv->_4965.delta_statistics.rx.general;
+ delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht;
+ max_ofdm = &priv->_4965.max_delta.rx.ofdm;
+ max_cck = &priv->_4965.max_delta.rx.cck;
+ max_general = &priv->_4965.max_delta.rx.general;
+ max_ht = &priv->_4965.max_delta.rx.ofdm_ht;
+
+ pos += iwl4965_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_header, "Statistics_Rx - OFDM:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "ina_cnt:",
+ le32_to_cpu(ofdm->ina_cnt),
+ accum_ofdm->ina_cnt,
+ delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "fina_cnt:",
+ le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+ delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "plcp_err:",
+ le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+ delta_ofdm->plcp_err, max_ofdm->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "crc32_err:",
+ le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+ delta_ofdm->crc32_err, max_ofdm->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "overrun_err:",
+ le32_to_cpu(ofdm->overrun_err),
+ accum_ofdm->overrun_err, delta_ofdm->overrun_err,
+ max_ofdm->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "early_overrun_err:",
+ le32_to_cpu(ofdm->early_overrun_err),
+ accum_ofdm->early_overrun_err,
+ delta_ofdm->early_overrun_err,
+ max_ofdm->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "crc32_good:",
+ le32_to_cpu(ofdm->crc32_good),
+ accum_ofdm->crc32_good, delta_ofdm->crc32_good,
+ max_ofdm->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "false_alarm_cnt:",
+ le32_to_cpu(ofdm->false_alarm_cnt),
+ accum_ofdm->false_alarm_cnt,
+ delta_ofdm->false_alarm_cnt,
+ max_ofdm->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "fina_sync_err_cnt:",
+ le32_to_cpu(ofdm->fina_sync_err_cnt),
+ accum_ofdm->fina_sync_err_cnt,
+ delta_ofdm->fina_sync_err_cnt,
+ max_ofdm->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "sfd_timeout:",
+ le32_to_cpu(ofdm->sfd_timeout),
+ accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
+ max_ofdm->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "fina_timeout:",
+ le32_to_cpu(ofdm->fina_timeout),
+ accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
+ max_ofdm->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "unresponded_rts:",
+ le32_to_cpu(ofdm->unresponded_rts),
+ accum_ofdm->unresponded_rts,
+ delta_ofdm->unresponded_rts,
+ max_ofdm->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+ accum_ofdm->rxe_frame_limit_overrun,
+ delta_ofdm->rxe_frame_limit_overrun,
+ max_ofdm->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "sent_ack_cnt:",
+ le32_to_cpu(ofdm->sent_ack_cnt),
+ accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
+ max_ofdm->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "sent_cts_cnt:",
+ le32_to_cpu(ofdm->sent_cts_cnt),
+ accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
+ max_ofdm->sent_cts_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "sent_ba_rsp_cnt:",
+ le32_to_cpu(ofdm->sent_ba_rsp_cnt),
+ accum_ofdm->sent_ba_rsp_cnt,
+ delta_ofdm->sent_ba_rsp_cnt,
+ max_ofdm->sent_ba_rsp_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "dsp_self_kill:",
+ le32_to_cpu(ofdm->dsp_self_kill),
+ accum_ofdm->dsp_self_kill,
+ delta_ofdm->dsp_self_kill,
+ max_ofdm->dsp_self_kill);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "mh_format_err:",
+ le32_to_cpu(ofdm->mh_format_err),
+ accum_ofdm->mh_format_err,
+ delta_ofdm->mh_format_err,
+ max_ofdm->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "re_acq_main_rssi_sum:",
+ le32_to_cpu(ofdm->re_acq_main_rssi_sum),
+ accum_ofdm->re_acq_main_rssi_sum,
+ delta_ofdm->re_acq_main_rssi_sum,
+ max_ofdm->re_acq_main_rssi_sum);
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_header, "Statistics_Rx - CCK:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "ina_cnt:",
+ le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+ delta_cck->ina_cnt, max_cck->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "fina_cnt:",
+ le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+ delta_cck->fina_cnt, max_cck->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "plcp_err:",
+ le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+ delta_cck->plcp_err, max_cck->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "crc32_err:",
+ le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+ delta_cck->crc32_err, max_cck->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "overrun_err:",
+ le32_to_cpu(cck->overrun_err),
+ accum_cck->overrun_err, delta_cck->overrun_err,
+ max_cck->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "early_overrun_err:",
+ le32_to_cpu(cck->early_overrun_err),
+ accum_cck->early_overrun_err,
+ delta_cck->early_overrun_err,
+ max_cck->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "crc32_good:",
+ le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+ delta_cck->crc32_good, max_cck->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "false_alarm_cnt:",
+ le32_to_cpu(cck->false_alarm_cnt),
+ accum_cck->false_alarm_cnt,
+ delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "fina_sync_err_cnt:",
+ le32_to_cpu(cck->fina_sync_err_cnt),
+ accum_cck->fina_sync_err_cnt,
+ delta_cck->fina_sync_err_cnt,
+ max_cck->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "sfd_timeout:",
+ le32_to_cpu(cck->sfd_timeout),
+ accum_cck->sfd_timeout, delta_cck->sfd_timeout,
+ max_cck->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "fina_timeout:",
+ le32_to_cpu(cck->fina_timeout),
+ accum_cck->fina_timeout, delta_cck->fina_timeout,
+ max_cck->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "unresponded_rts:",
+ le32_to_cpu(cck->unresponded_rts),
+ accum_cck->unresponded_rts, delta_cck->unresponded_rts,
+ max_cck->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(cck->rxe_frame_limit_overrun),
+ accum_cck->rxe_frame_limit_overrun,
+ delta_cck->rxe_frame_limit_overrun,
+ max_cck->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "sent_ack_cnt:",
+ le32_to_cpu(cck->sent_ack_cnt),
+ accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
+ max_cck->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "sent_cts_cnt:",
+ le32_to_cpu(cck->sent_cts_cnt),
+ accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
+ max_cck->sent_cts_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "sent_ba_rsp_cnt:",
+ le32_to_cpu(cck->sent_ba_rsp_cnt),
+ accum_cck->sent_ba_rsp_cnt,
+ delta_cck->sent_ba_rsp_cnt,
+ max_cck->sent_ba_rsp_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "dsp_self_kill:",
+ le32_to_cpu(cck->dsp_self_kill),
+ accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
+ max_cck->dsp_self_kill);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "mh_format_err:",
+ le32_to_cpu(cck->mh_format_err),
+ accum_cck->mh_format_err, delta_cck->mh_format_err,
+ max_cck->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "re_acq_main_rssi_sum:",
+ le32_to_cpu(cck->re_acq_main_rssi_sum),
+ accum_cck->re_acq_main_rssi_sum,
+ delta_cck->re_acq_main_rssi_sum,
+ max_cck->re_acq_main_rssi_sum);
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_header, "Statistics_Rx - GENERAL:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "bogus_cts:",
+ le32_to_cpu(general->bogus_cts),
+ accum_general->bogus_cts, delta_general->bogus_cts,
+ max_general->bogus_cts);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "bogus_ack:",
+ le32_to_cpu(general->bogus_ack),
+ accum_general->bogus_ack, delta_general->bogus_ack,
+ max_general->bogus_ack);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "non_bssid_frames:",
+ le32_to_cpu(general->non_bssid_frames),
+ accum_general->non_bssid_frames,
+ delta_general->non_bssid_frames,
+ max_general->non_bssid_frames);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "filtered_frames:",
+ le32_to_cpu(general->filtered_frames),
+ accum_general->filtered_frames,
+ delta_general->filtered_frames,
+ max_general->filtered_frames);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "non_channel_beacons:",
+ le32_to_cpu(general->non_channel_beacons),
+ accum_general->non_channel_beacons,
+ delta_general->non_channel_beacons,
+ max_general->non_channel_beacons);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "channel_beacons:",
+ le32_to_cpu(general->channel_beacons),
+ accum_general->channel_beacons,
+ delta_general->channel_beacons,
+ max_general->channel_beacons);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "num_missed_bcon:",
+ le32_to_cpu(general->num_missed_bcon),
+ accum_general->num_missed_bcon,
+ delta_general->num_missed_bcon,
+ max_general->num_missed_bcon);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "adc_rx_saturation_time:",
+ le32_to_cpu(general->adc_rx_saturation_time),
+ accum_general->adc_rx_saturation_time,
+ delta_general->adc_rx_saturation_time,
+ max_general->adc_rx_saturation_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "ina_detect_search_tm:",
+ le32_to_cpu(general->ina_detection_search_time),
+ accum_general->ina_detection_search_time,
+ delta_general->ina_detection_search_time,
+ max_general->ina_detection_search_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "beacon_silence_rssi_a:",
+ le32_to_cpu(general->beacon_silence_rssi_a),
+ accum_general->beacon_silence_rssi_a,
+ delta_general->beacon_silence_rssi_a,
+ max_general->beacon_silence_rssi_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "beacon_silence_rssi_b:",
+ le32_to_cpu(general->beacon_silence_rssi_b),
+ accum_general->beacon_silence_rssi_b,
+ delta_general->beacon_silence_rssi_b,
+ max_general->beacon_silence_rssi_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "beacon_silence_rssi_c:",
+ le32_to_cpu(general->beacon_silence_rssi_c),
+ accum_general->beacon_silence_rssi_c,
+ delta_general->beacon_silence_rssi_c,
+ max_general->beacon_silence_rssi_c);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "interference_data_flag:",
+ le32_to_cpu(general->interference_data_flag),
+ accum_general->interference_data_flag,
+ delta_general->interference_data_flag,
+ max_general->interference_data_flag);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "channel_load:",
+ le32_to_cpu(general->channel_load),
+ accum_general->channel_load,
+ delta_general->channel_load,
+ max_general->channel_load);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "dsp_false_alarms:",
+ le32_to_cpu(general->dsp_false_alarms),
+ accum_general->dsp_false_alarms,
+ delta_general->dsp_false_alarms,
+ max_general->dsp_false_alarms);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "beacon_rssi_a:",
+ le32_to_cpu(general->beacon_rssi_a),
+ accum_general->beacon_rssi_a,
+ delta_general->beacon_rssi_a,
+ max_general->beacon_rssi_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "beacon_rssi_b:",
+ le32_to_cpu(general->beacon_rssi_b),
+ accum_general->beacon_rssi_b,
+ delta_general->beacon_rssi_b,
+ max_general->beacon_rssi_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "beacon_rssi_c:",
+ le32_to_cpu(general->beacon_rssi_c),
+ accum_general->beacon_rssi_c,
+ delta_general->beacon_rssi_c,
+ max_general->beacon_rssi_c);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "beacon_energy_a:",
+ le32_to_cpu(general->beacon_energy_a),
+ accum_general->beacon_energy_a,
+ delta_general->beacon_energy_a,
+ max_general->beacon_energy_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "beacon_energy_b:",
+ le32_to_cpu(general->beacon_energy_b),
+ accum_general->beacon_energy_b,
+ delta_general->beacon_energy_b,
+ max_general->beacon_energy_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "beacon_energy_c:",
+ le32_to_cpu(general->beacon_energy_c),
+ accum_general->beacon_energy_c,
+ delta_general->beacon_energy_c,
+ max_general->beacon_energy_c);
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_header, "Statistics_Rx - OFDM_HT:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "plcp_err:",
+ le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
+ delta_ht->plcp_err, max_ht->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "overrun_err:",
+ le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
+ delta_ht->overrun_err, max_ht->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "early_overrun_err:",
+ le32_to_cpu(ht->early_overrun_err),
+ accum_ht->early_overrun_err,
+ delta_ht->early_overrun_err,
+ max_ht->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "crc32_good:",
+ le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
+ delta_ht->crc32_good, max_ht->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "crc32_err:",
+ le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
+ delta_ht->crc32_err, max_ht->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "mh_format_err:",
+ le32_to_cpu(ht->mh_format_err),
+ accum_ht->mh_format_err,
+ delta_ht->mh_format_err, max_ht->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg_crc32_good:",
+ le32_to_cpu(ht->agg_crc32_good),
+ accum_ht->agg_crc32_good,
+ delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg_mpdu_cnt:",
+ le32_to_cpu(ht->agg_mpdu_cnt),
+ accum_ht->agg_mpdu_cnt,
+ delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg_cnt:",
+ le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
+ delta_ht->agg_cnt, max_ht->agg_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "unsupport_mcs:",
+ le32_to_cpu(ht->unsupport_mcs),
+ accum_ht->unsupport_mcs,
+ delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t iwl4965_ucode_tx_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
+ ssize_t ret;
+ struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+ if (!iwl_legacy_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /* the statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ tx = &priv->_4965.statistics.tx;
+ accum_tx = &priv->_4965.accum_statistics.tx;
+ delta_tx = &priv->_4965.delta_statistics.tx;
+ max_tx = &priv->_4965.max_delta.tx;
+
+ pos += iwl4965_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_header, "Statistics_Tx:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "preamble:",
+ le32_to_cpu(tx->preamble_cnt),
+ accum_tx->preamble_cnt,
+ delta_tx->preamble_cnt, max_tx->preamble_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "rx_detected_cnt:",
+ le32_to_cpu(tx->rx_detected_cnt),
+ accum_tx->rx_detected_cnt,
+ delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "bt_prio_defer_cnt:",
+ le32_to_cpu(tx->bt_prio_defer_cnt),
+ accum_tx->bt_prio_defer_cnt,
+ delta_tx->bt_prio_defer_cnt,
+ max_tx->bt_prio_defer_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "bt_prio_kill_cnt:",
+ le32_to_cpu(tx->bt_prio_kill_cnt),
+ accum_tx->bt_prio_kill_cnt,
+ delta_tx->bt_prio_kill_cnt,
+ max_tx->bt_prio_kill_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "few_bytes_cnt:",
+ le32_to_cpu(tx->few_bytes_cnt),
+ accum_tx->few_bytes_cnt,
+ delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "cts_timeout:",
+ le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+ delta_tx->cts_timeout, max_tx->cts_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "ack_timeout:",
+ le32_to_cpu(tx->ack_timeout),
+ accum_tx->ack_timeout,
+ delta_tx->ack_timeout, max_tx->ack_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "expected_ack_cnt:",
+ le32_to_cpu(tx->expected_ack_cnt),
+ accum_tx->expected_ack_cnt,
+ delta_tx->expected_ack_cnt,
+ max_tx->expected_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "actual_ack_cnt:",
+ le32_to_cpu(tx->actual_ack_cnt),
+ accum_tx->actual_ack_cnt,
+ delta_tx->actual_ack_cnt,
+ max_tx->actual_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "dump_msdu_cnt:",
+ le32_to_cpu(tx->dump_msdu_cnt),
+ accum_tx->dump_msdu_cnt,
+ delta_tx->dump_msdu_cnt,
+ max_tx->dump_msdu_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "abort_nxt_frame_mismatch:",
+ le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
+ accum_tx->burst_abort_next_frame_mismatch_cnt,
+ delta_tx->burst_abort_next_frame_mismatch_cnt,
+ max_tx->burst_abort_next_frame_mismatch_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "abort_missing_nxt_frame:",
+ le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
+ accum_tx->burst_abort_missing_next_frame_cnt,
+ delta_tx->burst_abort_missing_next_frame_cnt,
+ max_tx->burst_abort_missing_next_frame_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "cts_timeout_collision:",
+ le32_to_cpu(tx->cts_timeout_collision),
+ accum_tx->cts_timeout_collision,
+ delta_tx->cts_timeout_collision,
+ max_tx->cts_timeout_collision);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "ack_ba_timeout_collision:",
+ le32_to_cpu(tx->ack_or_ba_timeout_collision),
+ accum_tx->ack_or_ba_timeout_collision,
+ delta_tx->ack_or_ba_timeout_collision,
+ max_tx->ack_or_ba_timeout_collision);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg ba_timeout:",
+ le32_to_cpu(tx->agg.ba_timeout),
+ accum_tx->agg.ba_timeout,
+ delta_tx->agg.ba_timeout,
+ max_tx->agg.ba_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg ba_resched_frames:",
+ le32_to_cpu(tx->agg.ba_reschedule_frames),
+ accum_tx->agg.ba_reschedule_frames,
+ delta_tx->agg.ba_reschedule_frames,
+ max_tx->agg.ba_reschedule_frames);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg scd_query_agg_frame:",
+ le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
+ accum_tx->agg.scd_query_agg_frame_cnt,
+ delta_tx->agg.scd_query_agg_frame_cnt,
+ max_tx->agg.scd_query_agg_frame_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg scd_query_no_agg:",
+ le32_to_cpu(tx->agg.scd_query_no_agg),
+ accum_tx->agg.scd_query_no_agg,
+ delta_tx->agg.scd_query_no_agg,
+ max_tx->agg.scd_query_no_agg);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg scd_query_agg:",
+ le32_to_cpu(tx->agg.scd_query_agg),
+ accum_tx->agg.scd_query_agg,
+ delta_tx->agg.scd_query_agg,
+ max_tx->agg.scd_query_agg);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg scd_query_mismatch:",
+ le32_to_cpu(tx->agg.scd_query_mismatch),
+ accum_tx->agg.scd_query_mismatch,
+ delta_tx->agg.scd_query_mismatch,
+ max_tx->agg.scd_query_mismatch);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg frame_not_ready:",
+ le32_to_cpu(tx->agg.frame_not_ready),
+ accum_tx->agg.frame_not_ready,
+ delta_tx->agg.frame_not_ready,
+ max_tx->agg.frame_not_ready);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg underrun:",
+ le32_to_cpu(tx->agg.underrun),
+ accum_tx->agg.underrun,
+ delta_tx->agg.underrun, max_tx->agg.underrun);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg bt_prio_kill:",
+ le32_to_cpu(tx->agg.bt_prio_kill),
+ accum_tx->agg.bt_prio_kill,
+ delta_tx->agg.bt_prio_kill,
+ max_tx->agg.bt_prio_kill);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "agg rx_ba_rsp_cnt:",
+ le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
+ accum_tx->agg.rx_ba_rsp_cnt,
+ delta_tx->agg.rx_ba_rsp_cnt,
+ max_tx->agg.rx_ba_rsp_cnt);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t
+iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct statistics_general) * 10 + 300;
+ ssize_t ret;
+ struct statistics_general_common *general, *accum_general;
+ struct statistics_general_common *delta_general, *max_general;
+ struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+ struct statistics_div *div, *accum_div, *delta_div, *max_div;
+
+ if (!iwl_legacy_is_alive(priv))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /* the statistic information display here is based on
+ * the last statistics notification from uCode
+ * might not reflect the current uCode activity
+ */
+ general = &priv->_4965.statistics.general.common;
+ dbg = &priv->_4965.statistics.general.common.dbg;
+ div = &priv->_4965.statistics.general.common.div;
+ accum_general = &priv->_4965.accum_statistics.general.common;
+ accum_dbg = &priv->_4965.accum_statistics.general.common.dbg;
+ accum_div = &priv->_4965.accum_statistics.general.common.div;
+ delta_general = &priv->_4965.delta_statistics.general.common;
+ max_general = &priv->_4965.max_delta.general.common;
+ delta_dbg = &priv->_4965.delta_statistics.general.common.dbg;
+ max_dbg = &priv->_4965.max_delta.general.common.dbg;
+ delta_div = &priv->_4965.delta_statistics.general.common.div;
+ max_div = &priv->_4965.max_delta.general.common.div;
+
+ pos += iwl4965_statistics_flag(priv, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_header, "Statistics_General:");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_value, "temperature:",
+ le32_to_cpu(general->temperature));
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_value, "ttl_timestamp:",
+ le32_to_cpu(general->ttl_timestamp));
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "burst_check:",
+ le32_to_cpu(dbg->burst_check),
+ accum_dbg->burst_check,
+ delta_dbg->burst_check, max_dbg->burst_check);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "burst_count:",
+ le32_to_cpu(dbg->burst_count),
+ accum_dbg->burst_count,
+ delta_dbg->burst_count, max_dbg->burst_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "wait_for_silence_timeout_count:",
+ le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
+ accum_dbg->wait_for_silence_timeout_cnt,
+ delta_dbg->wait_for_silence_timeout_cnt,
+ max_dbg->wait_for_silence_timeout_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "sleep_time:",
+ le32_to_cpu(general->sleep_time),
+ accum_general->sleep_time,
+ delta_general->sleep_time, max_general->sleep_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "slots_out:",
+ le32_to_cpu(general->slots_out),
+ accum_general->slots_out,
+ delta_general->slots_out, max_general->slots_out);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "slots_idle:",
+ le32_to_cpu(general->slots_idle),
+ accum_general->slots_idle,
+ delta_general->slots_idle, max_general->slots_idle);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "tx_on_a:",
+ le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+ delta_div->tx_on_a, max_div->tx_on_a);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "tx_on_b:",
+ le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+ delta_div->tx_on_b, max_div->tx_on_b);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "exec_time:",
+ le32_to_cpu(div->exec_time), accum_div->exec_time,
+ delta_div->exec_time, max_div->exec_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "probe_time:",
+ le32_to_cpu(div->probe_time), accum_div->probe_time,
+ delta_div->probe_time, max_div->probe_time);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "rx_enable_counter:",
+ le32_to_cpu(general->rx_enable_counter),
+ accum_general->rx_enable_counter,
+ delta_general->rx_enable_counter,
+ max_general->rx_enable_counter);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ fmt_table, "num_of_sos_states:",
+ le32_to_cpu(general->num_of_sos_states),
+ accum_general->num_of_sos_states,
+ delta_general->num_of_sos_states,
+ max_general->num_of_sos_states);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
new file mode 100644
index 000000000000..6c8e35361a9e
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
@@ -0,0 +1,59 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-debug.h"
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t iwl4965_ucode_general_stats_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos);
+#else
+static ssize_t
+iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+static ssize_t
+iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+static ssize_t
+iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
new file mode 100644
index 000000000000..cb9baab1ff7d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
@@ -0,0 +1,154 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <net/mac80211.h>
+
+#include "iwl-commands.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-debug.h"
+#include "iwl-4965.h"
+#include "iwl-io.h"
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+******************************************************************************/
+
+/*
+ * The device's EEPROM semaphore prevents conflicts between driver and uCode
+ * when accessing the EEPROM; each access is a series of pulses to/from the
+ * EEPROM chip, not a single event, so even reads could conflict if they
+ * weren't arbitrated by the semaphore.
+ */
+int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv)
+{
+ u16 count;
+ int ret;
+
+ for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
+ /* Request semaphore */
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+ /* See if we got it */
+ ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+ EEPROM_SEM_TIMEOUT);
+ if (ret >= 0) {
+ IWL_DEBUG_IO(priv,
+ "Acquired semaphore after %d tries.\n",
+ count+1);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv)
+{
+ iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+}
+
+int iwl4965_eeprom_check_version(struct iwl_priv *priv)
+{
+ u16 eeprom_ver;
+ u16 calib_ver;
+
+ eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
+ calib_ver = iwl_legacy_eeprom_query16(priv,
+ EEPROM_4965_CALIB_VERSION_OFFSET);
+
+ if (eeprom_ver < priv->cfg->eeprom_ver ||
+ calib_ver < priv->cfg->eeprom_calib_ver)
+ goto err;
+
+ IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
+ eeprom_ver, calib_ver);
+
+ return 0;
+err:
+ IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
+ "CALIB=0x%x < 0x%x\n",
+ eeprom_ver, priv->cfg->eeprom_ver,
+ calib_ver, priv->cfg->eeprom_calib_ver);
+ return -EINVAL;
+
+}
+
+void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
+{
+ const u8 *addr = iwl_legacy_eeprom_query_addr(priv,
+ EEPROM_MAC_ADDRESS);
+ memcpy(mac, addr, ETH_ALEN);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
index 9166794eda0d..08b189c8472d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -789,4 +789,26 @@ struct iwl4965_scd_bc_tbl {
u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
} __packed;
+
+#define IWL4965_RTC_INST_LOWER_BOUND (0x000000)
+
+/* RSSI to dBm */
+#define IWL4965_RSSI_OFFSET 44
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT 0x041
+
+/* PCI register values */
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
+
+#define IWL4965_DEFAULT_TX_RETRY 15
+
+/* Limit range of txpower output target to be between these values */
+#define IWL4965_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */
+
+/* EEPROM */
+#define IWL4965_FIRST_AMPDU_QUEUE 10
+
+
#endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
new file mode 100644
index 000000000000..26d324e30692
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
@@ -0,0 +1,74 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "iwl-commands.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-4965-led.h"
+
+/* Send led command */
+static int
+iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
+{
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_LEDS_CMD,
+ .len = sizeof(struct iwl_led_cmd),
+ .data = led_cmd,
+ .flags = CMD_ASYNC,
+ .callback = NULL,
+ };
+ u32 reg;
+
+ reg = iwl_read32(priv, CSR_LED_REG);
+ if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
+ iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
+
+ return iwl_legacy_send_cmd(priv, &cmd);
+}
+
+/* Set led register off */
+void iwl4965_led_enable(struct iwl_priv *priv)
+{
+ iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
+}
+
+const struct iwl_led_ops iwl4965_led_ops = {
+ .cmd = iwl4965_send_led_cmd,
+};
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.h b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
new file mode 100644
index 000000000000..5ed3615fc338
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
@@ -0,0 +1,33 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_4965_led_h__
+#define __iwl_4965_led_h__
+
+extern const struct iwl_led_ops iwl4965_led_ops;
+void iwl4965_led_enable(struct iwl_priv *priv);
+
+#endif /* __iwl_4965_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
new file mode 100644
index 000000000000..5a8a3cce27bc
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
@@ -0,0 +1,1260 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-4965-hw.h"
+#include "iwl-4965.h"
+#include "iwl-sta.h"
+
+void iwl4965_check_abort_status(struct iwl_priv *priv,
+ u8 frame_count, u32 status)
+{
+ if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
+ IWL_ERR(priv, "Tx flush command to flush out all frames\n");
+ if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
+ queue_work(priv->workqueue, &priv->tx_flush);
+ }
+}
+
+/*
+ * EEPROM
+ */
+struct iwl_mod_params iwl4965_mod_params = {
+ .amsdu_size_8K = 1,
+ .restart_fw = 1,
+ /* the rest are 0 by default */
+};
+
+void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+ unsigned long flags;
+ int i;
+ spin_lock_irqsave(&rxq->lock, flags);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ /* In the reset function, these buffers may have been allocated
+ * to an SKB, so we need to unmap and free potential storage */
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << priv->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __iwl_legacy_free_pages(priv, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+ }
+
+ for (i = 0; i < RX_QUEUE_SIZE; i++)
+ rxq->queue[i] = NULL;
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+ u32 rb_size;
+ const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+ u32 rb_timeout = 0;
+
+ if (priv->cfg->mod_params->amsdu_size_8K)
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+ else
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+ /* Stop Rx DMA */
+ iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+
+ /* Reset driver's Rx queue write index */
+ iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+ /* Tell device where to find RBD circular buffer in DRAM */
+ iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ (u32)(rxq->bd_dma >> 8));
+
+ /* Tell device where in DRAM to update its Rx status */
+ iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ rxq->rb_stts_dma >> 4);
+
+ /* Enable Rx DMA
+ * Direct rx interrupts to hosts
+ * Rx buffer size 4 or 8k
+ * RB timeout 0x10
+ * 256 RBDs
+ */
+ iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+ FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+ FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
+ rb_size|
+ (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
+ (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+
+ return 0;
+}
+
+static void iwl4965_set_pwr_vmain(struct iwl_priv *priv)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do:
+
+ if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
+ iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+ */
+
+ iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+}
+
+int iwl4965_hw_nic_init(struct iwl_priv *priv)
+{
+ unsigned long flags;
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ int ret;
+
+ /* nic_init */
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->cfg->ops->lib->apm_ops.init(priv);
+
+ /* Set interrupt coalescing calibration timer to default (512 usecs) */
+ iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ iwl4965_set_pwr_vmain(priv);
+
+ priv->cfg->ops->lib->apm_ops.config(priv);
+
+ /* Allocate the RX queue, or reset if it is already allocated */
+ if (!rxq->bd) {
+ ret = iwl_legacy_rx_queue_alloc(priv);
+ if (ret) {
+ IWL_ERR(priv, "Unable to initialize Rx queue\n");
+ return -ENOMEM;
+ }
+ } else
+ iwl4965_rx_queue_reset(priv, rxq);
+
+ iwl4965_rx_replenish(priv);
+
+ iwl4965_rx_init(priv, rxq);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ rxq->need_update = 1;
+ iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Allocate or reset and init all Tx and Command queues */
+ if (!priv->txq) {
+ ret = iwl4965_txq_ctx_alloc(priv);
+ if (ret)
+ return ret;
+ } else
+ iwl4965_txq_ctx_reset(priv);
+
+ set_bit(STATUS_INIT, &priv->status);
+
+ return 0;
+}
+
+/**
+ * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
+ dma_addr_t dma_addr)
+{
+ return cpu_to_le32((u32)(dma_addr >> 8));
+}
+
+/**
+ * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' index forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+void iwl4965_rx_queue_restock(struct iwl_priv *priv)
+{
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ struct list_head *element;
+ struct iwl_rx_mem_buffer *rxb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
+ /* The overwritten rxb must be a used one */
+ rxb = rxq->queue[rxq->write];
+ BUG_ON(rxb && rxb->page);
+
+ /* Get next free Rx buffer, remove from free list */
+ element = rxq->rx_free.next;
+ rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+ list_del(element);
+
+ /* Point to Rx buffer via next RBD in circular buffer */
+ rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv,
+ rxb->page_dma);
+ rxq->queue[rxq->write] = rxb;
+ rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+ rxq->free_count--;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ /* If the pre-allocated buffer pool is dropping low, schedule to
+ * refill it */
+ if (rxq->free_count <= RX_LOW_WATERMARK)
+ queue_work(priv->workqueue, &priv->rx_replenish);
+
+
+ /* If we've added more space for the firmware to place data, tell it.
+ * Increment device's write pointer in multiples of 8. */
+ if (rxq->write_actual != (rxq->write & ~0x7)) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ rxq->need_update = 1;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
+ }
+}
+
+/**
+ * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via iwl_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority)
+{
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ struct list_head *element;
+ struct iwl_rx_mem_buffer *rxb;
+ struct page *page;
+ unsigned long flags;
+ gfp_t gfp_mask = priority;
+
+ while (1) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ if (priv->hw_params.rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
+ /* Alloc a new receive buffer */
+ page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ IWL_DEBUG_INFO(priv, "alloc_pages failed, "
+ "order: %d\n",
+ priv->hw_params.rx_page_order);
+
+ if ((rxq->free_count <= RX_LOW_WATERMARK) &&
+ net_ratelimit())
+ IWL_CRIT(priv,
+ "Failed to alloc_pages with %s. "
+ "Only %u free buffers remaining.\n",
+ priority == GFP_ATOMIC ?
+ "GFP_ATOMIC" : "GFP_KERNEL",
+ rxq->free_count);
+ /* We don't reschedule replenish work here -- we will
+ * call the restock method and if it still needs
+ * more buffers it will schedule replenish */
+ return;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ __free_pages(page, priv->hw_params.rx_page_order);
+ return;
+ }
+ element = rxq->rx_used.next;
+ rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+ list_del(element);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ BUG_ON(rxb->page);
+ rxb->page = page;
+ /* Get physical address of the RB */
+ rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
+ PAGE_SIZE << priv->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ /* dma address must be no more than 36 bits */
+ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+ /* and also 256 byte aligned! */
+ BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ priv->alloc_rxb_page++;
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ }
+}
+
+void iwl4965_rx_replenish(struct iwl_priv *priv)
+{
+ unsigned long flags;
+
+ iwl4965_rx_allocate(priv, GFP_KERNEL);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ iwl4965_rx_queue_restock(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+void iwl4965_rx_replenish_now(struct iwl_priv *priv)
+{
+ iwl4965_rx_allocate(priv, GFP_ATOMIC);
+
+ iwl4965_rx_queue_restock(priv);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+ int i;
+ for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << priv->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __iwl_legacy_free_pages(priv, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ }
+
+ dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->bd_dma);
+ dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ rxq->bd = NULL;
+ rxq->rb_stts = NULL;
+}
+
+int iwl4965_rxq_stop(struct iwl_priv *priv)
+{
+
+ /* stop Rx DMA */
+ iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
+ FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+
+ return 0;
+}
+
+int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
+{
+ int idx = 0;
+ int band_offset = 0;
+
+ /* HT rate format: mac80211 wants an MCS number, which is just LSB */
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ idx = (rate_n_flags & 0xff);
+ return idx;
+ /* Legacy rate format, search for match in table */
+ } else {
+ if (band == IEEE80211_BAND_5GHZ)
+ band_offset = IWL_FIRST_OFDM_RATE;
+ for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
+ if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
+ return idx - band_offset;
+ }
+
+ return -1;
+}
+
+static int iwl4965_calc_rssi(struct iwl_priv *priv,
+ struct iwl_rx_phy_res *rx_resp)
+{
+ /* data from PHY/DSP regarding signal strength, etc.,
+ * contents are always there, not configurable by host. */
+ struct iwl4965_rx_non_cfg_phy *ncphy =
+ (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
+ u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
+ >> IWL49_AGC_DB_POS;
+
+ u32 valid_antennae =
+ (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
+ >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
+ u8 max_rssi = 0;
+ u32 i;
+
+ /* Find max rssi among 3 possible receivers.
+ * These values are measured by the digital signal processor (DSP).
+ * They should stay fairly constant even as the signal strength varies,
+ * if the radio's automatic gain control (AGC) is working right.
+ * AGC value (see below) will provide the "interesting" info. */
+ for (i = 0; i < 3; i++)
+ if (valid_antennae & (1 << i))
+ max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
+
+ IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
+ ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
+ max_rssi, agc);
+
+ /* dBm = max_rssi dB - agc dB - constant.
+ * Higher AGC (higher radio gain) means lower signal. */
+ return max_rssi - agc - IWL4965_RSSI_OFFSET;
+}
+
+
+static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
+{
+ u32 decrypt_out = 0;
+
+ if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
+ RX_RES_STATUS_STATION_FOUND)
+ decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
+ RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
+
+ decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
+
+ /* packet was not encrypted */
+ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+ RX_RES_STATUS_SEC_TYPE_NONE)
+ return decrypt_out;
+
+ /* packet was encrypted with unknown alg */
+ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+ RX_RES_STATUS_SEC_TYPE_ERR)
+ return decrypt_out;
+
+ /* decryption was not done in HW */
+ if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
+ RX_MPDU_RES_STATUS_DEC_DONE_MSK)
+ return decrypt_out;
+
+ switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
+
+ case RX_RES_STATUS_SEC_TYPE_CCMP:
+ /* alg is CCM: check MIC only */
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
+ /* Bad MIC */
+ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+ else
+ decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+
+ break;
+
+ case RX_RES_STATUS_SEC_TYPE_TKIP:
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
+ /* Bad TTAK */
+ decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
+ break;
+ }
+ /* fall through if TTAK OK */
+ default:
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
+ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+ else
+ decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+ break;
+ }
+
+ IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
+ decrypt_in, decrypt_out);
+
+ return decrypt_out;
+}
+
+static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv,
+ struct ieee80211_hdr *hdr,
+ u16 len,
+ u32 ampdu_status,
+ struct iwl_rx_mem_buffer *rxb,
+ struct ieee80211_rx_status *stats)
+{
+ struct sk_buff *skb;
+ __le16 fc = hdr->frame_control;
+
+ /* We only process data packets if the interface is open */
+ if (unlikely(!priv->is_open)) {
+ IWL_DEBUG_DROP_LIMIT(priv,
+ "Dropping packet while interface is not open.\n");
+ return;
+ }
+
+ /* In case of HW accelerated crypto and bad decryption, drop */
+ if (!priv->cfg->mod_params->sw_crypto &&
+ iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats))
+ return;
+
+ skb = dev_alloc_skb(128);
+ if (!skb) {
+ IWL_ERR(priv, "dev_alloc_skb failed\n");
+ return;
+ }
+
+ skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
+
+ iwl_legacy_update_stats(priv, false, fc, len);
+ memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+ ieee80211_rx(priv->hw, skb);
+ priv->alloc_rxb_page--;
+ rxb->page = NULL;
+}
+
+/* Called for REPLY_RX (legacy ABG frames), or
+ * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
+void iwl4965_rx_reply_rx(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct ieee80211_hdr *header;
+ struct ieee80211_rx_status rx_status;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rx_phy_res *phy_res;
+ __le32 rx_pkt_status;
+ struct iwl_rx_mpdu_res_start *amsdu;
+ u32 len;
+ u32 ampdu_status;
+ u32 rate_n_flags;
+
+ /**
+ * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
+ * REPLY_RX: physical layer info is in this buffer
+ * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
+ * command and cached in priv->last_phy_res
+ *
+ * Here we set up local variables depending on which command is
+ * received.
+ */
+ if (pkt->hdr.cmd == REPLY_RX) {
+ phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
+ header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
+ + phy_res->cfg_phy_cnt);
+
+ len = le16_to_cpu(phy_res->byte_count);
+ rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
+ phy_res->cfg_phy_cnt + len);
+ ampdu_status = le32_to_cpu(rx_pkt_status);
+ } else {
+ if (!priv->_4965.last_phy_res_valid) {
+ IWL_ERR(priv, "MPDU frame without cached PHY data\n");
+ return;
+ }
+ phy_res = &priv->_4965.last_phy_res;
+ amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
+ header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
+ len = le16_to_cpu(amsdu->byte_count);
+ rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
+ ampdu_status = iwl4965_translate_rx_status(priv,
+ le32_to_cpu(rx_pkt_status));
+ }
+
+ if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
+ IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
+ phy_res->cfg_phy_cnt);
+ return;
+ }
+
+ if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+ !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+ IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
+ le32_to_cpu(rx_pkt_status));
+ return;
+ }
+
+ /* This will be used in several places later */
+ rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
+
+ /* rx_status carries information about the packet to mac80211 */
+ rx_status.mactime = le64_to_cpu(phy_res->timestamp);
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
+ rx_status.band);
+ rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
+ IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ rx_status.rate_idx =
+ iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
+ rx_status.flag = 0;
+
+ /* TSF isn't reliable. In order to allow smooth user experience,
+ * this W/A doesn't propagate it to the mac80211 */
+ /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
+
+ priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
+
+ /* Find max signal strength (dBm) among 3 antenna/receiver chains */
+ rx_status.signal = iwl4965_calc_rssi(priv, phy_res);
+
+ iwl_legacy_dbg_log_rx_data_frame(priv, len, header);
+ IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
+ rx_status.signal, (unsigned long long)rx_status.mactime);
+
+ /*
+ * "antenna number"
+ *
+ * It seems that the antenna field in the phy flags value
+ * is actually a bit field. This is undefined by radiotap,
+ * it wants an actual antenna number but I always get "7"
+ * for most legacy frames I receive indicating that the
+ * same frame was received on all three RX chains.
+ *
+ * I think this field should be removed in favor of a
+ * new 802.11n radiotap field "RX chains" that is defined
+ * as a bitmask.
+ */
+ rx_status.antenna =
+ (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
+ >> RX_RES_PHY_FLAGS_ANTENNA_POS;
+
+ /* set the preamble flag if appropriate */
+ if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+ rx_status.flag |= RX_FLAG_SHORTPRE;
+
+ /* Set up the HT phy flags */
+ if (rate_n_flags & RATE_MCS_HT_MSK)
+ rx_status.flag |= RX_FLAG_HT;
+ if (rate_n_flags & RATE_MCS_HT40_MSK)
+ rx_status.flag |= RX_FLAG_40MHZ;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rx_status.flag |= RX_FLAG_SHORT_GI;
+
+ iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status,
+ rxb, &rx_status);
+}
+
+/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
+ * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
+void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ priv->_4965.last_phy_res_valid = true;
+ memcpy(&priv->_4965.last_phy_res, pkt->u.raw,
+ sizeof(struct iwl_rx_phy_res));
+}
+
+static int iwl4965_get_single_channel_for_scan(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ enum ieee80211_band band,
+ struct iwl_scan_channel *scan_ch)
+{
+ const struct ieee80211_supported_band *sband;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int added = 0;
+ u16 channel = 0;
+
+ sband = iwl_get_hw_mode(priv, band);
+ if (!sband) {
+ IWL_ERR(priv, "invalid band\n");
+ return added;
+ }
+
+ active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0);
+ passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ channel = iwl_legacy_get_single_channel_number(priv, band);
+ if (channel) {
+ scan_ch->channel = cpu_to_le16(channel);
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+ added++;
+ } else
+ IWL_ERR(priv, "no valid channel found\n");
+ return added;
+}
+
+static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ enum ieee80211_band band,
+ u8 is_active, u8 n_probes,
+ struct iwl_scan_channel *scan_ch)
+{
+ struct ieee80211_channel *chan;
+ const struct ieee80211_supported_band *sband;
+ const struct iwl_channel_info *ch_info;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int added, i;
+ u16 channel;
+
+ sband = iwl_get_hw_mode(priv, band);
+ if (!sband)
+ return 0;
+
+ active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
+ passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
+ chan = priv->scan_request->channels[i];
+
+ if (chan->band != band)
+ continue;
+
+ channel = chan->hw_value;
+ scan_ch->channel = cpu_to_le16(channel);
+
+ ch_info = iwl_legacy_get_channel_info(priv, band, channel);
+ if (!iwl_legacy_is_channel_valid(ch_info)) {
+ IWL_DEBUG_SCAN(priv,
+ "Channel %d is INVALID for this band.\n",
+ channel);
+ continue;
+ }
+
+ if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
+ (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ else
+ scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
+
+ if (n_probes)
+ scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
+
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+
+ /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+ * power level:
+ * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
+ */
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+
+ IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
+ channel, le32_to_cpu(scan_ch->type),
+ (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
+ "ACTIVE" : "PASSIVE",
+ (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
+ active_dwell : passive_dwell);
+
+ scan_ch++;
+ added++;
+ }
+
+ IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
+ return added;
+}
+
+int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
+{
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_SCAN_CMD,
+ .len = sizeof(struct iwl_scan_cmd),
+ .flags = CMD_SIZE_HUGE,
+ };
+ struct iwl_scan_cmd *scan;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ u32 rate_flags = 0;
+ u16 cmd_len;
+ u16 rx_chain = 0;
+ enum ieee80211_band band;
+ u8 n_probes = 0;
+ u8 rx_ant = priv->hw_params.valid_rx_ant;
+ u8 rate;
+ bool is_active = false;
+ int chan_mod;
+ u8 active_chains;
+ u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
+ int ret;
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (vif)
+ ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+
+ if (!priv->scan_cmd) {
+ priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
+ IWL_MAX_SCAN_SIZE, GFP_KERNEL);
+ if (!priv->scan_cmd) {
+ IWL_DEBUG_SCAN(priv,
+ "fail to allocate memory for scan\n");
+ return -ENOMEM;
+ }
+ }
+ scan = priv->scan_cmd;
+ memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
+
+ scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
+ scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
+
+ if (iwl_legacy_is_any_associated(priv)) {
+ u16 interval = 0;
+ u32 extra;
+ u32 suspend_time = 100;
+ u32 scan_suspend_time = 100;
+
+ IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
+ if (priv->is_internal_short_scan)
+ interval = 0;
+ else
+ interval = vif->bss_conf.beacon_int;
+
+ scan->suspend_time = 0;
+ scan->max_out_time = cpu_to_le32(200 * 1024);
+ if (!interval)
+ interval = suspend_time;
+
+ extra = (suspend_time / interval) << 22;
+ scan_suspend_time = (extra |
+ ((suspend_time % interval) * 1024));
+ scan->suspend_time = cpu_to_le32(scan_suspend_time);
+ IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
+ scan_suspend_time, interval);
+ }
+
+ if (priv->is_internal_short_scan) {
+ IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
+ } else if (priv->scan_request->n_ssids) {
+ int i, p = 0;
+ IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
+ for (i = 0; i < priv->scan_request->n_ssids; i++) {
+ /* always does wildcard anyway */
+ if (!priv->scan_request->ssids[i].ssid_len)
+ continue;
+ scan->direct_scan[p].id = WLAN_EID_SSID;
+ scan->direct_scan[p].len =
+ priv->scan_request->ssids[i].ssid_len;
+ memcpy(scan->direct_scan[p].ssid,
+ priv->scan_request->ssids[i].ssid,
+ priv->scan_request->ssids[i].ssid_len);
+ n_probes++;
+ p++;
+ }
+ is_active = true;
+ } else
+ IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
+
+ scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+ scan->tx_cmd.sta_id = ctx->bcast_sta_id;
+ scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+ switch (priv->scan_band) {
+ case IEEE80211_BAND_2GHZ:
+ scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+ chan_mod = le32_to_cpu(
+ priv->contexts[IWL_RXON_CTX_BSS].active.flags &
+ RXON_FLG_CHANNEL_MODE_MSK)
+ >> RXON_FLG_CHANNEL_MODE_POS;
+ if (chan_mod == CHANNEL_MODE_PURE_40) {
+ rate = IWL_RATE_6M_PLCP;
+ } else {
+ rate = IWL_RATE_1M_PLCP;
+ rate_flags = RATE_MCS_CCK_MSK;
+ }
+ break;
+ case IEEE80211_BAND_5GHZ:
+ rate = IWL_RATE_6M_PLCP;
+ break;
+ default:
+ IWL_WARN(priv, "Invalid scan band\n");
+ return -EIO;
+ }
+
+ /*
+ * If active scanning is requested but a certain channel is
+ * marked passive, we can do active scanning if we detect
+ * transmissions.
+ *
+ * There is an issue with some firmware versions that triggers
+ * a sysassert on a "good CRC threshold" of zero (== disabled),
+ * on a radar channel even though this means that we should NOT
+ * send probes.
+ *
+ * The "good CRC threshold" is the number of frames that we
+ * need to receive during our dwell time on a channel before
+ * sending out probes -- setting this to a huge value will
+ * mean we never reach it, but at the same time work around
+ * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
+ * here instead of IWL_GOOD_CRC_TH_DISABLED.
+ */
+ scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
+ IWL_GOOD_CRC_TH_NEVER;
+
+ band = priv->scan_band;
+
+ if (priv->cfg->scan_rx_antennas[band])
+ rx_ant = priv->cfg->scan_rx_antennas[band];
+
+ if (priv->cfg->scan_tx_antennas[band])
+ scan_tx_antennas = priv->cfg->scan_tx_antennas[band];
+
+ priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv,
+ priv->scan_tx_ant[band],
+ scan_tx_antennas);
+ rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]);
+ scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags);
+
+ /* In power save mode use one chain, otherwise use all chains */
+ if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+ /* rx_ant has been set to all valid chains previously */
+ active_chains = rx_ant &
+ ((u8)(priv->chain_noise_data.active_chains));
+ if (!active_chains)
+ active_chains = rx_ant;
+
+ IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
+ priv->chain_noise_data.active_chains);
+
+ rx_ant = iwl4965_first_antenna(active_chains);
+ }
+
+ /* MIMO is not used here, but value is required */
+ rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
+ rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
+ scan->rx_chain = cpu_to_le16(rx_chain);
+ if (!priv->is_internal_short_scan) {
+ cmd_len = iwl_legacy_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ vif->addr,
+ priv->scan_request->ie,
+ priv->scan_request->ie_len,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ } else {
+ /* use bcast addr, will not be transmitted but must be valid */
+ cmd_len = iwl_legacy_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ iwlegacy_bcast_addr, NULL, 0,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+
+ }
+ scan->tx_cmd.len = cpu_to_le16(cmd_len);
+
+ scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
+ RXON_FILTER_BCON_AWARE_MSK);
+
+ if (priv->is_internal_short_scan) {
+ scan->channel_count =
+ iwl4965_get_single_channel_for_scan(priv, vif, band,
+ (void *)&scan->data[le16_to_cpu(
+ scan->tx_cmd.len)]);
+ } else {
+ scan->channel_count =
+ iwl4965_get_channels_for_scan(priv, vif, band,
+ is_active, n_probes,
+ (void *)&scan->data[le16_to_cpu(
+ scan->tx_cmd.len)]);
+ }
+ if (scan->channel_count == 0) {
+ IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
+ return -EIO;
+ }
+
+ cmd.len += le16_to_cpu(scan->tx_cmd.len) +
+ scan->channel_count * sizeof(struct iwl_scan_channel);
+ cmd.data = scan;
+ scan->len = cpu_to_le16(cmd.len);
+
+ set_bit(STATUS_SCAN_HW, &priv->status);
+
+ ret = iwl_legacy_send_cmd_sync(priv, &cmd);
+ if (ret)
+ clear_bit(STATUS_SCAN_HW, &priv->status);
+
+ return ret;
+}
+
+int iwl4965_manage_ibss_station(struct iwl_priv *priv,
+ struct ieee80211_vif *vif, bool add)
+{
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+ if (add)
+ return iwl4965_add_bssid_station(priv, vif_priv->ctx,
+ vif->bss_conf.bssid,
+ &vif_priv->ibss_bssid_sta_id);
+ return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
+ vif->bss_conf.bssid);
+}
+
+void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
+ int sta_id, int tid, int freed)
+{
+ lockdep_assert_held(&priv->sta_lock);
+
+ if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
+ priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ else {
+ IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
+ priv->stations[sta_id].tid[tid].tfds_in_queue,
+ freed);
+ priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
+ }
+}
+
+#define IWL_TX_QUEUE_MSK 0xfffff
+
+static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv)
+{
+ return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
+ priv->current_ht_config.single_chain_sufficient;
+}
+
+#define IWL_NUM_RX_CHAINS_MULTIPLE 3
+#define IWL_NUM_RX_CHAINS_SINGLE 2
+#define IWL_NUM_IDLE_CHAINS_DUAL 2
+#define IWL_NUM_IDLE_CHAINS_SINGLE 1
+
+/*
+ * Determine how many receiver/antenna chains to use.
+ *
+ * More provides better reception via diversity. Fewer saves power
+ * at the expense of throughput, but only when not in powersave to
+ * start with.
+ *
+ * MIMO (dual stream) requires at least 2, but works better with 3.
+ * This does not determine *which* chains to use, just how many.
+ */
+static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv)
+{
+ /* # of Rx chains to use when expecting MIMO. */
+ if (iwl4965_is_single_rx_stream(priv))
+ return IWL_NUM_RX_CHAINS_SINGLE;
+ else
+ return IWL_NUM_RX_CHAINS_MULTIPLE;
+}
+
+/*
+ * When we are in power saving mode, unless device support spatial
+ * multiplexing power save, use the active count for rx chain count.
+ */
+static int
+iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
+{
+ /* # Rx chains when idling, depending on SMPS mode */
+ switch (priv->current_ht_config.smps) {
+ case IEEE80211_SMPS_STATIC:
+ case IEEE80211_SMPS_DYNAMIC:
+ return IWL_NUM_IDLE_CHAINS_SINGLE;
+ case IEEE80211_SMPS_OFF:
+ return active_cnt;
+ default:
+ WARN(1, "invalid SMPS mode %d",
+ priv->current_ht_config.smps);
+ return active_cnt;
+ }
+}
+
+/* up to 4 chains */
+static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap)
+{
+ u8 res;
+ res = (chain_bitmap & BIT(0)) >> 0;
+ res += (chain_bitmap & BIT(1)) >> 1;
+ res += (chain_bitmap & BIT(2)) >> 2;
+ res += (chain_bitmap & BIT(3)) >> 3;
+ return res;
+}
+
+/**
+ * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
+ *
+ * Selects how many and which Rx receivers/antennas/chains to use.
+ * This should not be used for scan command ... it puts data in wrong place.
+ */
+void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ bool is_single = iwl4965_is_single_rx_stream(priv);
+ bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
+ u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
+ u32 active_chains;
+ u16 rx_chain;
+
+ /* Tell uCode which antennas are actually connected.
+ * Before first association, we assume all antennas are connected.
+ * Just after first association, iwl4965_chain_noise_calibration()
+ * checks which antennas actually *are* connected. */
+ if (priv->chain_noise_data.active_chains)
+ active_chains = priv->chain_noise_data.active_chains;
+ else
+ active_chains = priv->hw_params.valid_rx_ant;
+
+ rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
+
+ /* How many receivers should we use? */
+ active_rx_cnt = iwl4965_get_active_rx_chain_count(priv);
+ idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt);
+
+
+ /* correct rx chain count according hw settings
+ * and chain noise calibration
+ */
+ valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains);
+ if (valid_rx_cnt < active_rx_cnt)
+ active_rx_cnt = valid_rx_cnt;
+
+ if (valid_rx_cnt < idle_rx_cnt)
+ idle_rx_cnt = valid_rx_cnt;
+
+ rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
+ rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
+
+ ctx->staging.rx_chain = cpu_to_le16(rx_chain);
+
+ if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
+ ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
+ else
+ ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
+
+ IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
+ ctx->staging.rx_chain,
+ active_rx_cnt, idle_rx_cnt);
+
+ WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
+ active_rx_cnt < idle_rx_cnt);
+}
+
+u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
+{
+ int i;
+ u8 ind = ant;
+
+ for (i = 0; i < RATE_ANT_NUM - 1; i++) {
+ ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
+ if (valid & BIT(ind))
+ return ind;
+ }
+ return ant;
+}
+
+static const char *iwl4965_get_fh_string(int cmd)
+{
+ switch (cmd) {
+ IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
+ IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
+ IWL_CMD(FH_RSCSR_CHNL0_WPTR);
+ IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
+ IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
+ IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
+ IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+ IWL_CMD(FH_TSSR_TX_STATUS_REG);
+ IWL_CMD(FH_TSSR_TX_ERROR_REG);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display)
+{
+ int i;
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ int pos = 0;
+ size_t bufsz = 0;
+#endif
+ static const u32 fh_tbl[] = {
+ FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ FH_RSCSR_CHNL0_WPTR,
+ FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_MEM_RSSR_SHARED_CTRL_REG,
+ FH_MEM_RSSR_RX_STATUS_REG,
+ FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+ FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_ERROR_REG
+ };
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (display) {
+ bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ " %34s: 0X%08x\n",
+ iwl4965_get_fh_string(fh_tbl[i]),
+ iwl_legacy_read_direct32(priv, fh_tbl[i]));
+ }
+ return pos;
+ }
+#endif
+ IWL_ERR(priv, "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ IWL_ERR(priv, " %34s: 0X%08x\n",
+ iwl4965_get_fh_string(fh_tbl[i]),
+ iwl_legacy_read_direct32(priv, fh_tbl[i]));
+ }
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
new file mode 100644
index 000000000000..31ac672b64e1
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
@@ -0,0 +1,2870 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/wireless.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+
+#include "iwl-dev.h"
+#include "iwl-sta.h"
+#include "iwl-core.h"
+#include "iwl-4965.h"
+
+#define IWL4965_RS_NAME "iwl-4965-rs"
+
+#define NUM_TRY_BEFORE_ANT_TOGGLE 1
+#define IWL_NUMBER_TRY 1
+#define IWL_HT_NUMBER_TRY 3
+
+#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
+#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
+#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
+
+/* max allowed rate miss before sync LQ cmd */
+#define IWL_MISSED_RATE_MAX 15
+/* max time to accum history 2 seconds */
+#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
+
+static u8 rs_ht_to_legacy[] = {
+ IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
+ IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
+ IWL_RATE_6M_INDEX,
+ IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
+ IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
+ IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
+ IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
+};
+
+static const u8 ant_toggle_lookup[] = {
+ /*ANT_NONE -> */ ANT_NONE,
+ /*ANT_A -> */ ANT_B,
+ /*ANT_B -> */ ANT_C,
+ /*ANT_AB -> */ ANT_BC,
+ /*ANT_C -> */ ANT_A,
+ /*ANT_AC -> */ ANT_AB,
+ /*ANT_BC -> */ ANT_AC,
+ /*ANT_ABC -> */ ANT_ABC,
+};
+
+#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
+ [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
+ IWL_RATE_SISO_##s##M_PLCP, \
+ IWL_RATE_MIMO2_##s##M_PLCP,\
+ IWL_RATE_##r##M_IEEE, \
+ IWL_RATE_##ip##M_INDEX, \
+ IWL_RATE_##in##M_INDEX, \
+ IWL_RATE_##rp##M_INDEX, \
+ IWL_RATE_##rn##M_INDEX, \
+ IWL_RATE_##pp##M_INDEX, \
+ IWL_RATE_##np##M_INDEX }
+
+/*
+ * Parameter order:
+ * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to IWL_RATE_INVALID
+ *
+ */
+const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT] = {
+ IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
+ IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
+ IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
+ IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
+ IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
+ IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
+ IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
+ IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
+ IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
+ IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
+ IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
+ IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
+ IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
+};
+
+static int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
+{
+ int idx = 0;
+
+ /* HT rate format */
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ idx = (rate_n_flags & 0xff);
+
+ if (idx >= IWL_RATE_MIMO2_6M_PLCP)
+ idx = idx - IWL_RATE_MIMO2_6M_PLCP;
+
+ idx += IWL_FIRST_OFDM_RATE;
+ /* skip 9M not supported in ht*/
+ if (idx >= IWL_RATE_9M_INDEX)
+ idx += 1;
+ if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
+ return idx;
+
+ /* legacy rate format, search for match in table */
+ } else {
+ for (idx = 0; idx < ARRAY_SIZE(iwlegacy_rates); idx++)
+ if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
+ return idx;
+ }
+
+ return -1;
+}
+
+static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
+ struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta);
+static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
+static void iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta,
+ bool force_search);
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+ u32 *rate_n_flags, int index);
+#else
+static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+ u32 *rate_n_flags, int index)
+{}
+#endif
+
+/**
+ * The following tables contain the expected throughput metrics for all rates
+ *
+ * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
+ *
+ * where invalid entries are zeros.
+ *
+ * CCK rates are only valid in legacy table and will only be used in G
+ * (2.4 GHz) band.
+ */
+
+static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
+ 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
+};
+
+static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
+ {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
+ {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
+ {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
+ {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
+ {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
+ {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
+ {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
+ {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
+ {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
+};
+
+static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
+ {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
+ {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
+ {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
+};
+
+/* mbps, mcs */
+static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
+ { "1", "BPSK DSSS"},
+ { "2", "QPSK DSSS"},
+ {"5.5", "BPSK CCK"},
+ { "11", "QPSK CCK"},
+ { "6", "BPSK 1/2"},
+ { "9", "BPSK 1/2"},
+ { "12", "QPSK 1/2"},
+ { "18", "QPSK 3/4"},
+ { "24", "16QAM 1/2"},
+ { "36", "16QAM 3/4"},
+ { "48", "64QAM 2/3"},
+ { "54", "64QAM 3/4"},
+ { "60", "64QAM 5/6"},
+};
+
+#define MCS_INDEX_PER_STREAM (8)
+
+static inline u8 iwl4965_rs_extract_rate(u32 rate_n_flags)
+{
+ return (u8)(rate_n_flags & 0xFF);
+}
+
+static void
+iwl4965_rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
+{
+ window->data = 0;
+ window->success_counter = 0;
+ window->success_ratio = IWL_INVALID_VALUE;
+ window->counter = 0;
+ window->average_tpt = IWL_INVALID_VALUE;
+ window->stamp = 0;
+}
+
+static inline u8 iwl4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
+{
+ return (ant_type & valid_antenna) == ant_type;
+}
+
+/*
+ * removes the old data from the statistics. All data that is older than
+ * TID_MAX_TIME_DIFF, will be deleted.
+ */
+static void
+iwl4965_rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
+{
+ /* The oldest age we want to keep */
+ u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
+
+ while (tl->queue_count &&
+ (tl->time_stamp < oldest_time)) {
+ tl->total -= tl->packet_count[tl->head];
+ tl->packet_count[tl->head] = 0;
+ tl->time_stamp += TID_QUEUE_CELL_SPACING;
+ tl->queue_count--;
+ tl->head++;
+ if (tl->head >= TID_QUEUE_MAX_SIZE)
+ tl->head = 0;
+ }
+}
+
+/*
+ * increment traffic load value for tid and also remove
+ * any old values if passed the certain time period
+ */
+static u8 iwl4965_rs_tl_add_packet(struct iwl_lq_sta *lq_data,
+ struct ieee80211_hdr *hdr)
+{
+ u32 curr_time = jiffies_to_msecs(jiffies);
+ u32 time_diff;
+ s32 index;
+ struct iwl_traffic_load *tl = NULL;
+ u8 tid;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ } else
+ return MAX_TID_COUNT;
+
+ if (unlikely(tid >= TID_MAX_LOAD_COUNT))
+ return MAX_TID_COUNT;
+
+ tl = &lq_data->load[tid];
+
+ curr_time -= curr_time % TID_ROUND_VALUE;
+
+ /* Happens only for the first packet. Initialize the data */
+ if (!(tl->queue_count)) {
+ tl->total = 1;
+ tl->time_stamp = curr_time;
+ tl->queue_count = 1;
+ tl->head = 0;
+ tl->packet_count[0] = 1;
+ return MAX_TID_COUNT;
+ }
+
+ time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+ index = time_diff / TID_QUEUE_CELL_SPACING;
+
+ /* The history is too long: remove data that is older than */
+ /* TID_MAX_TIME_DIFF */
+ if (index >= TID_QUEUE_MAX_SIZE)
+ iwl4965_rs_tl_rm_old_stats(tl, curr_time);
+
+ index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
+ tl->packet_count[index] = tl->packet_count[index] + 1;
+ tl->total = tl->total + 1;
+
+ if ((index + 1) > tl->queue_count)
+ tl->queue_count = index + 1;
+
+ return tid;
+}
+
+/*
+ get the traffic load value for tid
+*/
+static u32 iwl4965_rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
+{
+ u32 curr_time = jiffies_to_msecs(jiffies);
+ u32 time_diff;
+ s32 index;
+ struct iwl_traffic_load *tl = NULL;
+
+ if (tid >= TID_MAX_LOAD_COUNT)
+ return 0;
+
+ tl = &(lq_data->load[tid]);
+
+ curr_time -= curr_time % TID_ROUND_VALUE;
+
+ if (!(tl->queue_count))
+ return 0;
+
+ time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+ index = time_diff / TID_QUEUE_CELL_SPACING;
+
+ /* The history is too long: remove data that is older than */
+ /* TID_MAX_TIME_DIFF */
+ if (index >= TID_QUEUE_MAX_SIZE)
+ iwl4965_rs_tl_rm_old_stats(tl, curr_time);
+
+ return tl->total;
+}
+
+static int iwl4965_rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_data, u8 tid,
+ struct ieee80211_sta *sta)
+{
+ int ret = -EAGAIN;
+ u32 load;
+
+ load = iwl4965_rs_tl_get_load(lq_data, tid);
+
+ if (load > IWL_AGG_LOAD_THRESHOLD) {
+ IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
+ sta->addr, tid);
+ ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
+ if (ret == -EAGAIN) {
+ /*
+ * driver and mac80211 is out of sync
+ * this might be cause by reloading firmware
+ * stop the tx ba session here
+ */
+ IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
+ tid);
+ ieee80211_stop_tx_ba_session(sta, tid);
+ }
+ } else {
+ IWL_ERR(priv, "Aggregation not enabled for tid %d "
+ "because load = %u\n", tid, load);
+ }
+ return ret;
+}
+
+static void iwl4965_rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
+ struct iwl_lq_sta *lq_data,
+ struct ieee80211_sta *sta)
+{
+ if (tid < TID_MAX_LOAD_COUNT)
+ iwl4965_rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
+ else
+ IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
+ tid, TID_MAX_LOAD_COUNT);
+}
+
+static inline int iwl4965_get_iwl4965_num_of_ant_from_rate(u32 rate_n_flags)
+{
+ return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
+ !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
+ !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
+}
+
+/*
+ * Static function to get the expected throughput from an iwl_scale_tbl_info
+ * that wraps a NULL pointer check
+ */
+static s32
+iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
+{
+ if (tbl->expected_tpt)
+ return tbl->expected_tpt[rs_index];
+ return 0;
+}
+
+/**
+ * iwl4965_rs_collect_tx_data - Update the success/failure sliding window
+ *
+ * We keep a sliding window of the last 62 packets transmitted
+ * at this rate. window->data contains the bitmask of successful
+ * packets.
+ */
+static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
+ int scale_index, int attempts, int successes)
+{
+ struct iwl_rate_scale_data *window = NULL;
+ static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
+ s32 fail_count, tpt;
+
+ if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
+ return -EINVAL;
+
+ /* Select window for current tx bit rate */
+ window = &(tbl->win[scale_index]);
+
+ /* Get expected throughput */
+ tpt = iwl4965_get_expected_tpt(tbl, scale_index);
+
+ /*
+ * Keep track of only the latest 62 tx frame attempts in this rate's
+ * history window; anything older isn't really relevant any more.
+ * If we have filled up the sliding window, drop the oldest attempt;
+ * if the oldest attempt (highest bit in bitmap) shows "success",
+ * subtract "1" from the success counter (this is the main reason
+ * we keep these bitmaps!).
+ */
+ while (attempts > 0) {
+ if (window->counter >= IWL_RATE_MAX_WINDOW) {
+
+ /* remove earliest */
+ window->counter = IWL_RATE_MAX_WINDOW - 1;
+
+ if (window->data & mask) {
+ window->data &= ~mask;
+ window->success_counter--;
+ }
+ }
+
+ /* Increment frames-attempted counter */
+ window->counter++;
+
+ /* Shift bitmap by one frame to throw away oldest history */
+ window->data <<= 1;
+
+ /* Mark the most recent #successes attempts as successful */
+ if (successes > 0) {
+ window->success_counter++;
+ window->data |= 0x1;
+ successes--;
+ }
+
+ attempts--;
+ }
+
+ /* Calculate current success ratio, avoid divide-by-0! */
+ if (window->counter > 0)
+ window->success_ratio = 128 * (100 * window->success_counter)
+ / window->counter;
+ else
+ window->success_ratio = IWL_INVALID_VALUE;
+
+ fail_count = window->counter - window->success_counter;
+
+ /* Calculate average throughput, if we have enough history. */
+ if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
+ (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
+ window->average_tpt = (window->success_ratio * tpt + 64) / 128;
+ else
+ window->average_tpt = IWL_INVALID_VALUE;
+
+ /* Tag this window as having been updated */
+ window->stamp = jiffies;
+
+ return 0;
+}
+
+/*
+ * Fill uCode API rate_n_flags field, based on "search" or "active" table.
+ */
+static u32 iwl4965_rate_n_flags_from_tbl(struct iwl_priv *priv,
+ struct iwl_scale_tbl_info *tbl,
+ int index, u8 use_green)
+{
+ u32 rate_n_flags = 0;
+
+ if (is_legacy(tbl->lq_type)) {
+ rate_n_flags = iwlegacy_rates[index].plcp;
+ if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
+ rate_n_flags |= RATE_MCS_CCK_MSK;
+
+ } else if (is_Ht(tbl->lq_type)) {
+ if (index > IWL_LAST_OFDM_RATE) {
+ IWL_ERR(priv, "Invalid HT rate index %d\n", index);
+ index = IWL_LAST_OFDM_RATE;
+ }
+ rate_n_flags = RATE_MCS_HT_MSK;
+
+ if (is_siso(tbl->lq_type))
+ rate_n_flags |= iwlegacy_rates[index].plcp_siso;
+ else
+ rate_n_flags |= iwlegacy_rates[index].plcp_mimo2;
+ } else {
+ IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
+ }
+
+ rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
+ RATE_MCS_ANT_ABC_MSK);
+
+ if (is_Ht(tbl->lq_type)) {
+ if (tbl->is_ht40) {
+ if (tbl->is_dup)
+ rate_n_flags |= RATE_MCS_DUP_MSK;
+ else
+ rate_n_flags |= RATE_MCS_HT40_MSK;
+ }
+ if (tbl->is_SGI)
+ rate_n_flags |= RATE_MCS_SGI_MSK;
+
+ if (use_green) {
+ rate_n_flags |= RATE_MCS_GF_MSK;
+ if (is_siso(tbl->lq_type) && tbl->is_SGI) {
+ rate_n_flags &= ~RATE_MCS_SGI_MSK;
+ IWL_ERR(priv, "GF was set with SGI:SISO\n");
+ }
+ }
+ }
+ return rate_n_flags;
+}
+
+/*
+ * Interpret uCode API's rate_n_flags format,
+ * fill "search" or "active" tx mode table.
+ */
+static int iwl4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
+ enum ieee80211_band band,
+ struct iwl_scale_tbl_info *tbl,
+ int *rate_idx)
+{
+ u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
+ u8 iwl4965_num_of_ant = iwl4965_get_iwl4965_num_of_ant_from_rate(rate_n_flags);
+ u8 mcs;
+
+ memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
+ *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
+
+ if (*rate_idx == IWL_RATE_INVALID) {
+ *rate_idx = -1;
+ return -EINVAL;
+ }
+ tbl->is_SGI = 0; /* default legacy setup */
+ tbl->is_ht40 = 0;
+ tbl->is_dup = 0;
+ tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
+ tbl->lq_type = LQ_NONE;
+ tbl->max_search = IWL_MAX_SEARCH;
+
+ /* legacy rate format */
+ if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
+ if (iwl4965_num_of_ant == 1) {
+ if (band == IEEE80211_BAND_5GHZ)
+ tbl->lq_type = LQ_A;
+ else
+ tbl->lq_type = LQ_G;
+ }
+ /* HT rate format */
+ } else {
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ tbl->is_SGI = 1;
+
+ if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
+ (rate_n_flags & RATE_MCS_DUP_MSK))
+ tbl->is_ht40 = 1;
+
+ if (rate_n_flags & RATE_MCS_DUP_MSK)
+ tbl->is_dup = 1;
+
+ mcs = iwl4965_rs_extract_rate(rate_n_flags);
+
+ /* SISO */
+ if (mcs <= IWL_RATE_SISO_60M_PLCP) {
+ if (iwl4965_num_of_ant == 1)
+ tbl->lq_type = LQ_SISO; /*else NONE*/
+ /* MIMO2 */
+ } else {
+ if (iwl4965_num_of_ant == 2)
+ tbl->lq_type = LQ_MIMO2;
+ }
+ }
+ return 0;
+}
+
+/* switch to another antenna/antennas and return 1 */
+/* if no other valid antenna found, return 0 */
+static int iwl4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
+ struct iwl_scale_tbl_info *tbl)
+{
+ u8 new_ant_type;
+
+ if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
+ return 0;
+
+ if (!iwl4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
+ return 0;
+
+ new_ant_type = ant_toggle_lookup[tbl->ant_type];
+
+ while ((new_ant_type != tbl->ant_type) &&
+ !iwl4965_rs_is_valid_ant(valid_ant, new_ant_type))
+ new_ant_type = ant_toggle_lookup[new_ant_type];
+
+ if (new_ant_type == tbl->ant_type)
+ return 0;
+
+ tbl->ant_type = new_ant_type;
+ *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
+ *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
+ return 1;
+}
+
+/**
+ * Green-field mode is valid if the station supports it and
+ * there are no non-GF stations present in the BSS.
+ */
+static bool iwl4965_rs_use_green(struct ieee80211_sta *sta)
+{
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+ return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
+ !(ctx->ht.non_gf_sta_present);
+}
+
+/**
+ * iwl4965_rs_get_supported_rates - get the available rates
+ *
+ * if management frame or broadcast frame only return
+ * basic available rates.
+ *
+ */
+static u16 iwl4965_rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
+ struct ieee80211_hdr *hdr,
+ enum iwl_table_type rate_type)
+{
+ if (is_legacy(rate_type)) {
+ return lq_sta->active_legacy_rate;
+ } else {
+ if (is_siso(rate_type))
+ return lq_sta->active_siso_rate;
+ else
+ return lq_sta->active_mimo2_rate;
+ }
+}
+
+static u16
+iwl4965_rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
+ int rate_type)
+{
+ u8 high = IWL_RATE_INVALID;
+ u8 low = IWL_RATE_INVALID;
+
+ /* 802.11A or ht walks to the next literal adjacent rate in
+ * the rate table */
+ if (is_a_band(rate_type) || !is_legacy(rate_type)) {
+ int i;
+ u32 mask;
+
+ /* Find the previous rate that is in the rate mask */
+ i = index - 1;
+ for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+ if (rate_mask & mask) {
+ low = i;
+ break;
+ }
+ }
+
+ /* Find the next rate that is in the rate mask */
+ i = index + 1;
+ for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
+ if (rate_mask & mask) {
+ high = i;
+ break;
+ }
+ }
+
+ return (high << 8) | low;
+ }
+
+ low = index;
+ while (low != IWL_RATE_INVALID) {
+ low = iwlegacy_rates[low].prev_rs;
+ if (low == IWL_RATE_INVALID)
+ break;
+ if (rate_mask & (1 << low))
+ break;
+ IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
+ }
+
+ high = index;
+ while (high != IWL_RATE_INVALID) {
+ high = iwlegacy_rates[high].next_rs;
+ if (high == IWL_RATE_INVALID)
+ break;
+ if (rate_mask & (1 << high))
+ break;
+ IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
+ }
+
+ return (high << 8) | low;
+}
+
+static u32 iwl4965_rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl,
+ u8 scale_index, u8 ht_possible)
+{
+ s32 low;
+ u16 rate_mask;
+ u16 high_low;
+ u8 switch_to_legacy = 0;
+ u8 is_green = lq_sta->is_green;
+ struct iwl_priv *priv = lq_sta->drv;
+
+ /* check if we need to switch from HT to legacy rates.
+ * assumption is that mandatory rates (1Mbps or 6Mbps)
+ * are always supported (spec demand) */
+ if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
+ switch_to_legacy = 1;
+ scale_index = rs_ht_to_legacy[scale_index];
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ tbl->lq_type = LQ_A;
+ else
+ tbl->lq_type = LQ_G;
+
+ if (iwl4965_num_of_ant(tbl->ant_type) > 1)
+ tbl->ant_type =
+ iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
+
+ tbl->is_ht40 = 0;
+ tbl->is_SGI = 0;
+ tbl->max_search = IWL_MAX_SEARCH;
+ }
+
+ rate_mask = iwl4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
+
+ /* Mask with station rate restriction */
+ if (is_legacy(tbl->lq_type)) {
+ /* supp_rates has no CCK bits in A mode */
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ rate_mask = (u16)(rate_mask &
+ (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
+ else
+ rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
+ }
+
+ /* If we switched from HT to legacy, check current rate */
+ if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
+ low = scale_index;
+ goto out;
+ }
+
+ high_low = iwl4965_rs_get_adjacent_rate(lq_sta->drv,
+ scale_index, rate_mask,
+ tbl->lq_type);
+ low = high_low & 0xff;
+
+ if (low == IWL_RATE_INVALID)
+ low = scale_index;
+
+out:
+ return iwl4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
+}
+
+/*
+ * Simple function to compare two rate scale table types
+ */
+static bool iwl4965_table_type_matches(struct iwl_scale_tbl_info *a,
+ struct iwl_scale_tbl_info *b)
+{
+ return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
+ (a->is_SGI == b->is_SGI);
+}
+
+/*
+ * mac80211 sends us Tx status
+ */
+static void
+iwl4965_rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta,
+ struct sk_buff *skb)
+{
+ int legacy_success;
+ int retries;
+ int rs_index, mac_index, i;
+ struct iwl_lq_sta *lq_sta = priv_sta;
+ struct iwl_link_quality_cmd *table;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_priv *priv = (struct iwl_priv *)priv_r;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ enum mac80211_rate_control_flags mac_flags;
+ u32 tx_rate;
+ struct iwl_scale_tbl_info tbl_type;
+ struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+ IWL_DEBUG_RATE_LIMIT(priv,
+ "get frame ack response, update rate scale window\n");
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (!lq_sta) {
+ IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
+ return;
+ } else if (!lq_sta->drv) {
+ IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
+ return;
+ }
+
+ if (!ieee80211_is_data(hdr->frame_control) ||
+ info->flags & IEEE80211_TX_CTL_NO_ACK)
+ return;
+
+ /* This packet was aggregated but doesn't carry status info */
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+ !(info->flags & IEEE80211_TX_STAT_AMPDU))
+ return;
+
+ /*
+ * Ignore this Tx frame response if its initial rate doesn't match
+ * that of latest Link Quality command. There may be stragglers
+ * from a previous Link Quality command, but we're no longer interested
+ * in those; they're either from the "active" mode while we're trying
+ * to check "search" mode, or a prior "search" mode after we've moved
+ * to a new "search" mode (which might become the new "active" mode).
+ */
+ table = &lq_sta->lq;
+ tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+ iwl4965_rs_get_tbl_info_from_mcs(tx_rate,
+ priv->band, &tbl_type, &rs_index);
+ if (priv->band == IEEE80211_BAND_5GHZ)
+ rs_index -= IWL_FIRST_OFDM_RATE;
+ mac_flags = info->status.rates[0].flags;
+ mac_index = info->status.rates[0].idx;
+ /* For HT packets, map MCS to PLCP */
+ if (mac_flags & IEEE80211_TX_RC_MCS) {
+ mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */
+ if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
+ mac_index++;
+ /*
+ * mac80211 HT index is always zero-indexed; we need to move
+ * HT OFDM rates after CCK rates in 2.4 GHz band
+ */
+ if (priv->band == IEEE80211_BAND_2GHZ)
+ mac_index += IWL_FIRST_OFDM_RATE;
+ }
+ /* Here we actually compare this rate to the latest LQ command */
+ if ((mac_index < 0) ||
+ (tbl_type.is_SGI !=
+ !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
+ (tbl_type.is_ht40 !=
+ !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
+ (tbl_type.is_dup !=
+ !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
+ (tbl_type.ant_type != info->antenna_sel_tx) ||
+ (!!(tx_rate & RATE_MCS_HT_MSK) !=
+ !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
+ (!!(tx_rate & RATE_MCS_GF_MSK) !=
+ !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
+ (rs_index != mac_index)) {
+ IWL_DEBUG_RATE(priv,
+ "initial rate %d does not match %d (0x%x)\n",
+ mac_index, rs_index, tx_rate);
+ /*
+ * Since rates mis-match, the last LQ command may have failed.
+ * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
+ * ... driver.
+ */
+ lq_sta->missed_rate_counter++;
+ if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
+ lq_sta->missed_rate_counter = 0;
+ iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq,
+ CMD_ASYNC, false);
+ }
+ /* Regardless, ignore this status info for outdated rate */
+ return;
+ } else
+ /* Rate did match, so reset the missed_rate_counter */
+ lq_sta->missed_rate_counter = 0;
+
+ /* Figure out if rate scale algorithm is in active or search table */
+ if (iwl4965_table_type_matches(&tbl_type,
+ &(lq_sta->lq_info[lq_sta->active_tbl]))) {
+ curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ } else if (iwl4965_table_type_matches(&tbl_type,
+ &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
+ curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ } else {
+ IWL_DEBUG_RATE(priv,
+ "Neither active nor search matches tx rate\n");
+ tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
+ tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
+ tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
+ tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
+ IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
+ tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
+ /*
+ * no matching table found, let's by-pass the data collection
+ * and continue to perform rate scale to find the rate table
+ */
+ iwl4965_rs_stay_in_table(lq_sta, true);
+ goto done;
+ }
+
+ /*
+ * Updating the frame history depends on whether packets were
+ * aggregated.
+ *
+ * For aggregation, all packets were transmitted at the same rate, the
+ * first index into rate scale table.
+ */
+ if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+ tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+ iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
+ &rs_index);
+ iwl4965_rs_collect_tx_data(curr_tbl, rs_index,
+ info->status.ampdu_len,
+ info->status.ampdu_ack_len);
+
+ /* Update success/fail counts if not searching for new mode */
+ if (lq_sta->stay_in_tbl) {
+ lq_sta->total_success += info->status.ampdu_ack_len;
+ lq_sta->total_failed += (info->status.ampdu_len -
+ info->status.ampdu_ack_len);
+ }
+ } else {
+ /*
+ * For legacy, update frame history with for each Tx retry.
+ */
+ retries = info->status.rates[0].count - 1;
+ /* HW doesn't send more than 15 retries */
+ retries = min(retries, 15);
+
+ /* The last transmission may have been successful */
+ legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+ /* Collect data for each rate used during failed TX attempts */
+ for (i = 0; i <= retries; ++i) {
+ tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
+ iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band,
+ &tbl_type, &rs_index);
+ /*
+ * Only collect stats if retried rate is in the same RS
+ * table as active/search.
+ */
+ if (iwl4965_table_type_matches(&tbl_type, curr_tbl))
+ tmp_tbl = curr_tbl;
+ else if (iwl4965_table_type_matches(&tbl_type,
+ other_tbl))
+ tmp_tbl = other_tbl;
+ else
+ continue;
+ iwl4965_rs_collect_tx_data(tmp_tbl, rs_index, 1,
+ i < retries ? 0 : legacy_success);
+ }
+
+ /* Update success/fail counts if not searching for new mode */
+ if (lq_sta->stay_in_tbl) {
+ lq_sta->total_success += legacy_success;
+ lq_sta->total_failed += retries + (1 - legacy_success);
+ }
+ }
+ /* The last TX rate is cached in lq_sta; it's set in if/else above */
+ lq_sta->last_rate_n_flags = tx_rate;
+done:
+ /* See if there's a better rate or modulation mode to try. */
+ if (sta && sta->supp_rates[sband->band])
+ iwl4965_rs_rate_scale_perform(priv, skb, sta, lq_sta);
+}
+
+/*
+ * Begin a period of staying with a selected modulation mode.
+ * Set "stay_in_tbl" flag to prevent any mode switches.
+ * Set frame tx success limits according to legacy vs. high-throughput,
+ * and reset overall (spanning all rates) tx success history statistics.
+ * These control how long we stay using same modulation mode before
+ * searching for a new mode.
+ */
+static void iwl4965_rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
+ struct iwl_lq_sta *lq_sta)
+{
+ IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
+ lq_sta->stay_in_tbl = 1; /* only place this gets set */
+ if (is_legacy) {
+ lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
+ lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
+ lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
+ } else {
+ lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
+ lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
+ lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
+ }
+ lq_sta->table_count = 0;
+ lq_sta->total_failed = 0;
+ lq_sta->total_success = 0;
+ lq_sta->flush_timer = jiffies;
+ lq_sta->action_counter = 0;
+}
+
+/*
+ * Find correct throughput table for given mode of modulation
+ */
+static void iwl4965_rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl)
+{
+ /* Used to choose among HT tables */
+ s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
+
+ /* Check for invalid LQ type */
+ if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
+ tbl->expected_tpt = expected_tpt_legacy;
+ return;
+ }
+
+ /* Legacy rates have only one table */
+ if (is_legacy(tbl->lq_type)) {
+ tbl->expected_tpt = expected_tpt_legacy;
+ return;
+ }
+
+ /* Choose among many HT tables depending on number of streams
+ * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
+ * status */
+ if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+ ht_tbl_pointer = expected_tpt_siso20MHz;
+ else if (is_siso(tbl->lq_type))
+ ht_tbl_pointer = expected_tpt_siso40MHz;
+ else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+ ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+ else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
+ ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+
+ if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
+ tbl->expected_tpt = ht_tbl_pointer[0];
+ else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
+ tbl->expected_tpt = ht_tbl_pointer[1];
+ else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
+ tbl->expected_tpt = ht_tbl_pointer[2];
+ else /* AGG+SGI */
+ tbl->expected_tpt = ht_tbl_pointer[3];
+}
+
+/*
+ * Find starting rate for new "search" high-throughput mode of modulation.
+ * Goal is to find lowest expected rate (under perfect conditions) that is
+ * above the current measured throughput of "active" mode, to give new mode
+ * a fair chance to prove itself without too many challenges.
+ *
+ * This gets called when transitioning to more aggressive modulation
+ * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
+ * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
+ * to decrease to match "active" throughput. When moving from MIMO to SISO,
+ * bit rate will typically need to increase, but not if performance was bad.
+ */
+static s32 iwl4965_rs_get_best_rate(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl, /* "search" */
+ u16 rate_mask, s8 index)
+{
+ /* "active" values */
+ struct iwl_scale_tbl_info *active_tbl =
+ &(lq_sta->lq_info[lq_sta->active_tbl]);
+ s32 active_sr = active_tbl->win[index].success_ratio;
+ s32 active_tpt = active_tbl->expected_tpt[index];
+
+ /* expected "search" throughput */
+ s32 *tpt_tbl = tbl->expected_tpt;
+
+ s32 new_rate, high, low, start_hi;
+ u16 high_low;
+ s8 rate = index;
+
+ new_rate = high = low = start_hi = IWL_RATE_INVALID;
+
+ for (; ;) {
+ high_low = iwl4965_rs_get_adjacent_rate(priv, rate, rate_mask,
+ tbl->lq_type);
+
+ low = high_low & 0xff;
+ high = (high_low >> 8) & 0xff;
+
+ /*
+ * Lower the "search" bit rate, to give new "search" mode
+ * approximately the same throughput as "active" if:
+ *
+ * 1) "Active" mode has been working modestly well (but not
+ * great), and expected "search" throughput (under perfect
+ * conditions) at candidate rate is above the actual
+ * measured "active" throughput (but less than expected
+ * "active" throughput under perfect conditions).
+ * OR
+ * 2) "Active" mode has been working perfectly or very well
+ * and expected "search" throughput (under perfect
+ * conditions) at candidate rate is above expected
+ * "active" throughput (under perfect conditions).
+ */
+ if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
+ ((active_sr > IWL_RATE_DECREASE_TH) &&
+ (active_sr <= IWL_RATE_HIGH_TH) &&
+ (tpt_tbl[rate] <= active_tpt))) ||
+ ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
+ (tpt_tbl[rate] > active_tpt))) {
+
+ /* (2nd or later pass)
+ * If we've already tried to raise the rate, and are
+ * now trying to lower it, use the higher rate. */
+ if (start_hi != IWL_RATE_INVALID) {
+ new_rate = start_hi;
+ break;
+ }
+
+ new_rate = rate;
+
+ /* Loop again with lower rate */
+ if (low != IWL_RATE_INVALID)
+ rate = low;
+
+ /* Lower rate not available, use the original */
+ else
+ break;
+
+ /* Else try to raise the "search" rate to match "active" */
+ } else {
+ /* (2nd or later pass)
+ * If we've already tried to lower the rate, and are
+ * now trying to raise it, use the lower rate. */
+ if (new_rate != IWL_RATE_INVALID)
+ break;
+
+ /* Loop again with higher rate */
+ else if (high != IWL_RATE_INVALID) {
+ start_hi = high;
+ rate = high;
+
+ /* Higher rate not available, use the original */
+ } else {
+ new_rate = rate;
+ break;
+ }
+ }
+ }
+
+ return new_rate;
+}
+
+/*
+ * Set up search table for MIMO2
+ */
+static int iwl4965_rs_switch_to_mimo2(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl, int index)
+{
+ u16 rate_mask;
+ s32 rate;
+ s8 is_green = lq_sta->is_green;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+ if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ return -1;
+
+ if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
+ == WLAN_HT_CAP_SM_PS_STATIC)
+ return -1;
+
+ /* Need both Tx chains/antennas to support MIMO */
+ if (priv->hw_params.tx_chains_num < 2)
+ return -1;
+
+ IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
+
+ tbl->lq_type = LQ_MIMO2;
+ tbl->is_dup = lq_sta->is_dup;
+ tbl->action = 0;
+ tbl->max_search = IWL_MAX_SEARCH;
+ rate_mask = lq_sta->active_mimo2_rate;
+
+ if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+ tbl->is_ht40 = 1;
+ else
+ tbl->is_ht40 = 0;
+
+ iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
+
+ rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
+
+ IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n",
+ rate, rate_mask);
+ if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
+ IWL_DEBUG_RATE(priv,
+ "Can't switch with index %d rate mask %x\n",
+ rate, rate_mask);
+ return -1;
+ }
+ tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
+ tbl, rate, is_green);
+
+ IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
+ tbl->current_rate, is_green);
+ return 0;
+}
+
+/*
+ * Set up search table for SISO
+ */
+static int iwl4965_rs_switch_to_siso(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl, int index)
+{
+ u16 rate_mask;
+ u8 is_green = lq_sta->is_green;
+ s32 rate;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+ if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ return -1;
+
+ IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
+
+ tbl->is_dup = lq_sta->is_dup;
+ tbl->lq_type = LQ_SISO;
+ tbl->action = 0;
+ tbl->max_search = IWL_MAX_SEARCH;
+ rate_mask = lq_sta->active_siso_rate;
+
+ if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+ tbl->is_ht40 = 1;
+ else
+ tbl->is_ht40 = 0;
+
+ if (is_green)
+ tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
+
+ iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
+ rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
+
+ IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
+ if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
+ IWL_DEBUG_RATE(priv,
+ "can not switch with index %d rate mask %x\n",
+ rate, rate_mask);
+ return -1;
+ }
+ tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
+ tbl, rate, is_green);
+ IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
+ tbl->current_rate, is_green);
+ return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from legacy
+ */
+static int iwl4965_rs_move_legacy_other(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta,
+ int index)
+{
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct iwl_rate_scale_data *window = &(tbl->win[index]);
+ u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+ (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+ u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ int ret = 0;
+ u8 update_search_tbl_counter = 0;
+
+ tbl->action = IWL_LEGACY_SWITCH_SISO;
+
+ start_action = tbl->action;
+ for (; ;) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IWL_LEGACY_SWITCH_ANTENNA1:
+ case IWL_LEGACY_SWITCH_ANTENNA2:
+ IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
+
+ if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
+ tx_chains_num <= 1) ||
+ (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
+ tx_chains_num <= 2))
+ break;
+
+ /* Don't change antenna if success has been great */
+ if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+ break;
+
+ /* Set up search table to try other antenna */
+ memcpy(search_tbl, tbl, sz);
+
+ if (iwl4965_rs_toggle_antenna(valid_tx_ant,
+ &search_tbl->current_rate, search_tbl)) {
+ update_search_tbl_counter = 1;
+ iwl4965_rs_set_expected_tpt_table(lq_sta,
+ search_tbl);
+ goto out;
+ }
+ break;
+ case IWL_LEGACY_SWITCH_SISO:
+ IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
+
+ /* Set up search table to try SISO */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+ ret = iwl4965_rs_switch_to_siso(priv, lq_sta, conf, sta,
+ search_tbl, index);
+ if (!ret) {
+ lq_sta->action_counter = 0;
+ goto out;
+ }
+
+ break;
+ case IWL_LEGACY_SWITCH_MIMO2_AB:
+ case IWL_LEGACY_SWITCH_MIMO2_AC:
+ case IWL_LEGACY_SWITCH_MIMO2_BC:
+ IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
+
+ /* Set up search table to try MIMO */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+
+ if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
+ search_tbl->ant_type = ANT_AB;
+ else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
+ search_tbl->ant_type = ANT_AC;
+ else
+ search_tbl->ant_type = ANT_BC;
+
+ if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
+ search_tbl->ant_type))
+ break;
+
+ ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
+ conf, sta,
+ search_tbl, index);
+ if (!ret) {
+ lq_sta->action_counter = 0;
+ goto out;
+ }
+ break;
+ }
+ tbl->action++;
+ if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
+ tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+
+out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
+ tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+ return 0;
+
+}
+
+/*
+ * Try to switch to new modulation mode from SISO
+ */
+static int iwl4965_rs_move_siso_to_other(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int index)
+{
+ u8 is_green = lq_sta->is_green;
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct iwl_rate_scale_data *window = &(tbl->win[index]);
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+ (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+ u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ u8 update_search_tbl_counter = 0;
+ int ret;
+
+ start_action = tbl->action;
+
+ for (;;) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IWL_SISO_SWITCH_ANTENNA1:
+ case IWL_SISO_SWITCH_ANTENNA2:
+ IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
+ if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
+ tx_chains_num <= 1) ||
+ (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
+ tx_chains_num <= 2))
+ break;
+
+ if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+ break;
+
+ memcpy(search_tbl, tbl, sz);
+ if (iwl4965_rs_toggle_antenna(valid_tx_ant,
+ &search_tbl->current_rate, search_tbl)) {
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ break;
+ case IWL_SISO_SWITCH_MIMO2_AB:
+ case IWL_SISO_SWITCH_MIMO2_AC:
+ case IWL_SISO_SWITCH_MIMO2_BC:
+ IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+
+ if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
+ search_tbl->ant_type = ANT_AB;
+ else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
+ search_tbl->ant_type = ANT_AC;
+ else
+ search_tbl->ant_type = ANT_BC;
+
+ if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
+ search_tbl->ant_type))
+ break;
+
+ ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
+ conf, sta,
+ search_tbl, index);
+ if (!ret)
+ goto out;
+ break;
+ case IWL_SISO_SWITCH_GI:
+ if (!tbl->is_ht40 && !(ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_20))
+ break;
+ if (tbl->is_ht40 && !(ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_40))
+ break;
+
+ IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
+
+ memcpy(search_tbl, tbl, sz);
+ if (is_green) {
+ if (!tbl->is_SGI)
+ break;
+ else
+ IWL_ERR(priv,
+ "SGI was set in GF+SISO\n");
+ }
+ search_tbl->is_SGI = !tbl->is_SGI;
+ iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
+ if (tbl->is_SGI) {
+ s32 tpt = lq_sta->last_tpt / 100;
+ if (tpt >= search_tbl->expected_tpt[index])
+ break;
+ }
+ search_tbl->current_rate =
+ iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
+ index, is_green);
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ tbl->action++;
+ if (tbl->action > IWL_SISO_SWITCH_GI)
+ tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+
+ out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IWL_SISO_SWITCH_GI)
+ tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+
+ return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from MIMO2
+ */
+static int iwl4965_rs_move_mimo2_to_other(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int index)
+{
+ s8 is_green = lq_sta->is_green;
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct iwl_rate_scale_data *window = &(tbl->win[index]);
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+ (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+ u8 tx_chains_num = priv->hw_params.tx_chains_num;
+ u8 update_search_tbl_counter = 0;
+ int ret;
+
+ start_action = tbl->action;
+ for (;;) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IWL_MIMO2_SWITCH_ANTENNA1:
+ case IWL_MIMO2_SWITCH_ANTENNA2:
+ IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
+
+ if (tx_chains_num <= 2)
+ break;
+
+ if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+ break;
+
+ memcpy(search_tbl, tbl, sz);
+ if (iwl4965_rs_toggle_antenna(valid_tx_ant,
+ &search_tbl->current_rate, search_tbl)) {
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ break;
+ case IWL_MIMO2_SWITCH_SISO_A:
+ case IWL_MIMO2_SWITCH_SISO_B:
+ case IWL_MIMO2_SWITCH_SISO_C:
+ IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
+
+ /* Set up new search table for SISO */
+ memcpy(search_tbl, tbl, sz);
+
+ if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
+ search_tbl->ant_type = ANT_A;
+ else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
+ search_tbl->ant_type = ANT_B;
+ else
+ search_tbl->ant_type = ANT_C;
+
+ if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
+ search_tbl->ant_type))
+ break;
+
+ ret = iwl4965_rs_switch_to_siso(priv, lq_sta,
+ conf, sta,
+ search_tbl, index);
+ if (!ret)
+ goto out;
+
+ break;
+
+ case IWL_MIMO2_SWITCH_GI:
+ if (!tbl->is_ht40 && !(ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_20))
+ break;
+ if (tbl->is_ht40 && !(ht_cap->cap &
+ IEEE80211_HT_CAP_SGI_40))
+ break;
+
+ IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
+
+ /* Set up new search table for MIMO2 */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = !tbl->is_SGI;
+ iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
+ /*
+ * If active table already uses the fastest possible
+ * modulation (dual stream with short guard interval),
+ * and it's working well, there's no need to look
+ * for a better type of modulation!
+ */
+ if (tbl->is_SGI) {
+ s32 tpt = lq_sta->last_tpt / 100;
+ if (tpt >= search_tbl->expected_tpt[index])
+ break;
+ }
+ search_tbl->current_rate =
+ iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
+ index, is_green);
+ update_search_tbl_counter = 1;
+ goto out;
+
+ }
+ tbl->action++;
+ if (tbl->action > IWL_MIMO2_SWITCH_GI)
+ tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+ out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IWL_MIMO2_SWITCH_GI)
+ tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+
+ return 0;
+
+}
+
+/*
+ * Check whether we should continue using same modulation mode, or
+ * begin search for a new mode, based on:
+ * 1) # tx successes or failures while using this mode
+ * 2) # times calling this function
+ * 3) elapsed time in this mode (not used, for now)
+ */
+static void
+iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
+{
+ struct iwl_scale_tbl_info *tbl;
+ int i;
+ int active_tbl;
+ int flush_interval_passed = 0;
+ struct iwl_priv *priv;
+
+ priv = lq_sta->drv;
+ active_tbl = lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ /* If we've been disallowing search, see if we should now allow it */
+ if (lq_sta->stay_in_tbl) {
+
+ /* Elapsed time using current modulation mode */
+ if (lq_sta->flush_timer)
+ flush_interval_passed =
+ time_after(jiffies,
+ (unsigned long)(lq_sta->flush_timer +
+ IWL_RATE_SCALE_FLUSH_INTVL));
+
+ /*
+ * Check if we should allow search for new modulation mode.
+ * If many frames have failed or succeeded, or we've used
+ * this same modulation for a long time, allow search, and
+ * reset history stats that keep track of whether we should
+ * allow a new search. Also (below) reset all bitmaps and
+ * stats in active history.
+ */
+ if (force_search ||
+ (lq_sta->total_failed > lq_sta->max_failure_limit) ||
+ (lq_sta->total_success > lq_sta->max_success_limit) ||
+ ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
+ && (flush_interval_passed))) {
+ IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
+ lq_sta->total_failed,
+ lq_sta->total_success,
+ flush_interval_passed);
+
+ /* Allow search for new mode */
+ lq_sta->stay_in_tbl = 0; /* only place reset */
+ lq_sta->total_failed = 0;
+ lq_sta->total_success = 0;
+ lq_sta->flush_timer = 0;
+
+ /*
+ * Else if we've used this modulation mode enough repetitions
+ * (regardless of elapsed time or success/failure), reset
+ * history bitmaps and rate-specific stats for all rates in
+ * active table.
+ */
+ } else {
+ lq_sta->table_count++;
+ if (lq_sta->table_count >=
+ lq_sta->table_count_limit) {
+ lq_sta->table_count = 0;
+
+ IWL_DEBUG_RATE(priv,
+ "LQ: stay in table clear win\n");
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ iwl4965_rs_rate_scale_clear_window(
+ &(tbl->win[i]));
+ }
+ }
+
+ /* If transitioning to allow "search", reset all history
+ * bitmaps and stats in active table (this will become the new
+ * "search" table). */
+ if (!lq_sta->stay_in_tbl) {
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ iwl4965_rs_rate_scale_clear_window(
+ &(tbl->win[i]));
+ }
+ }
+}
+
+/*
+ * setup rate table in uCode
+ * return rate_n_flags as used in the table
+ */
+static u32 iwl4965_rs_update_rate_tbl(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct iwl_lq_sta *lq_sta,
+ struct iwl_scale_tbl_info *tbl,
+ int index, u8 is_green)
+{
+ u32 rate;
+
+ /* Update uCode's rate table. */
+ rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, index, is_green);
+ iwl4965_rs_fill_link_cmd(priv, lq_sta, rate);
+ iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
+
+ return rate;
+}
+
+/*
+ * Do rate scaling and search for new modulation mode.
+ */
+static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
+ struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta)
+{
+ struct ieee80211_hw *hw = priv->hw;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int low = IWL_RATE_INVALID;
+ int high = IWL_RATE_INVALID;
+ int index;
+ int i;
+ struct iwl_rate_scale_data *window = NULL;
+ int current_tpt = IWL_INVALID_VALUE;
+ int low_tpt = IWL_INVALID_VALUE;
+ int high_tpt = IWL_INVALID_VALUE;
+ u32 fail_count;
+ s8 scale_action = 0;
+ u16 rate_mask;
+ u8 update_lq = 0;
+ struct iwl_scale_tbl_info *tbl, *tbl1;
+ u16 rate_scale_index_msk = 0;
+ u32 rate;
+ u8 is_green = 0;
+ u8 active_tbl = 0;
+ u8 done_search = 0;
+ u16 high_low;
+ s32 sr;
+ u8 tid = MAX_TID_COUNT;
+ struct iwl_tid_data *tid_data;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+ IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
+
+ /* Send management frames and NO_ACK data using lowest rate. */
+ /* TODO: this could probably be improved.. */
+ if (!ieee80211_is_data(hdr->frame_control) ||
+ info->flags & IEEE80211_TX_CTL_NO_ACK)
+ return;
+
+ if (!sta || !lq_sta)
+ return;
+
+ lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
+
+ tid = iwl4965_rs_tl_add_packet(lq_sta, hdr);
+ if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
+ tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
+ if (tid_data->agg.state == IWL_AGG_OFF)
+ lq_sta->is_agg = 0;
+ else
+ lq_sta->is_agg = 1;
+ } else
+ lq_sta->is_agg = 0;
+
+ /*
+ * Select rate-scale / modulation-mode table to work with in
+ * the rest of this function: "search" if searching for better
+ * modulation mode, or "active" if doing rate scaling within a mode.
+ */
+ if (!lq_sta->search_better_tbl)
+ active_tbl = lq_sta->active_tbl;
+ else
+ active_tbl = 1 - lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+ if (is_legacy(tbl->lq_type))
+ lq_sta->is_green = 0;
+ else
+ lq_sta->is_green = iwl4965_rs_use_green(sta);
+ is_green = lq_sta->is_green;
+
+ /* current tx rate */
+ index = lq_sta->last_txrate_idx;
+
+ IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
+ tbl->lq_type);
+
+ /* rates available for this association, and for modulation mode */
+ rate_mask = iwl4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
+
+ IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
+
+ /* mask with station rate restriction */
+ if (is_legacy(tbl->lq_type)) {
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ /* supp_rates has no CCK bits in A mode */
+ rate_scale_index_msk = (u16) (rate_mask &
+ (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
+ else
+ rate_scale_index_msk = (u16) (rate_mask &
+ lq_sta->supp_rates);
+
+ } else
+ rate_scale_index_msk = rate_mask;
+
+ if (!rate_scale_index_msk)
+ rate_scale_index_msk = rate_mask;
+
+ if (!((1 << index) & rate_scale_index_msk)) {
+ IWL_ERR(priv, "Current Rate is not valid\n");
+ if (lq_sta->search_better_tbl) {
+ /* revert to active table if search table is not valid*/
+ tbl->lq_type = LQ_NONE;
+ lq_sta->search_better_tbl = 0;
+ tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ /* get "active" rate info */
+ index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
+ rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
+ tbl, index, is_green);
+ }
+ return;
+ }
+
+ /* Get expected throughput table and history window for current rate */
+ if (!tbl->expected_tpt) {
+ IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
+ return;
+ }
+
+ /* force user max rate if set by user */
+ if ((lq_sta->max_rate_idx != -1) &&
+ (lq_sta->max_rate_idx < index)) {
+ index = lq_sta->max_rate_idx;
+ update_lq = 1;
+ window = &(tbl->win[index]);
+ goto lq_update;
+ }
+
+ window = &(tbl->win[index]);
+
+ /*
+ * If there is not enough history to calculate actual average
+ * throughput, keep analyzing results of more tx frames, without
+ * changing rate or mode (bypass most of the rest of this function).
+ * Set up new rate table in uCode only if old rate is not supported
+ * in current association (use new rate found above).
+ */
+ fail_count = window->counter - window->success_counter;
+ if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
+ (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
+ IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
+ "for index %d\n",
+ window->success_counter, window->counter, index);
+
+ /* Can't calculate this yet; not enough history */
+ window->average_tpt = IWL_INVALID_VALUE;
+
+ /* Should we stay with this modulation mode,
+ * or search for a new one? */
+ iwl4965_rs_stay_in_table(lq_sta, false);
+
+ goto out;
+ }
+ /* Else we have enough samples; calculate estimate of
+ * actual average throughput */
+ if (window->average_tpt != ((window->success_ratio *
+ tbl->expected_tpt[index] + 64) / 128)) {
+ IWL_ERR(priv,
+ "expected_tpt should have been calculated by now\n");
+ window->average_tpt = ((window->success_ratio *
+ tbl->expected_tpt[index] + 64) / 128);
+ }
+
+ /* If we are searching for better modulation mode, check success. */
+ if (lq_sta->search_better_tbl) {
+ /* If good success, continue using the "search" mode;
+ * no need to send new link quality command, since we're
+ * continuing to use the setup that we've been trying. */
+ if (window->average_tpt > lq_sta->last_tpt) {
+
+ IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
+ "suc=%d cur-tpt=%d old-tpt=%d\n",
+ window->success_ratio,
+ window->average_tpt,
+ lq_sta->last_tpt);
+
+ if (!is_legacy(tbl->lq_type))
+ lq_sta->enable_counter = 1;
+
+ /* Swap tables; "search" becomes "active" */
+ lq_sta->active_tbl = active_tbl;
+ current_tpt = window->average_tpt;
+
+ /* Else poor success; go back to mode in "active" table */
+ } else {
+
+ IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
+ "suc=%d cur-tpt=%d old-tpt=%d\n",
+ window->success_ratio,
+ window->average_tpt,
+ lq_sta->last_tpt);
+
+ /* Nullify "search" table */
+ tbl->lq_type = LQ_NONE;
+
+ /* Revert to "active" table */
+ active_tbl = lq_sta->active_tbl;
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ /* Revert to "active" rate and throughput info */
+ index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
+ current_tpt = lq_sta->last_tpt;
+
+ /* Need to set up a new rate table in uCode */
+ update_lq = 1;
+ }
+
+ /* Either way, we've made a decision; modulation mode
+ * search is done, allow rate adjustment next time. */
+ lq_sta->search_better_tbl = 0;
+ done_search = 1; /* Don't switch modes below! */
+ goto lq_update;
+ }
+
+ /* (Else) not in search of better modulation mode, try for better
+ * starting rate, while staying in this mode. */
+ high_low = iwl4965_rs_get_adjacent_rate(priv, index,
+ rate_scale_index_msk,
+ tbl->lq_type);
+ low = high_low & 0xff;
+ high = (high_low >> 8) & 0xff;
+
+ /* If user set max rate, dont allow higher than user constrain */
+ if ((lq_sta->max_rate_idx != -1) &&
+ (lq_sta->max_rate_idx < high))
+ high = IWL_RATE_INVALID;
+
+ sr = window->success_ratio;
+
+ /* Collect measured throughputs for current and adjacent rates */
+ current_tpt = window->average_tpt;
+ if (low != IWL_RATE_INVALID)
+ low_tpt = tbl->win[low].average_tpt;
+ if (high != IWL_RATE_INVALID)
+ high_tpt = tbl->win[high].average_tpt;
+
+ scale_action = 0;
+
+ /* Too many failures, decrease rate */
+ if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
+ IWL_DEBUG_RATE(priv,
+ "decrease rate because of low success_ratio\n");
+ scale_action = -1;
+
+ /* No throughput measured yet for adjacent rates; try increase. */
+ } else if ((low_tpt == IWL_INVALID_VALUE) &&
+ (high_tpt == IWL_INVALID_VALUE)) {
+
+ if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
+ scale_action = 1;
+ else if (low != IWL_RATE_INVALID)
+ scale_action = 0;
+ }
+
+ /* Both adjacent throughputs are measured, but neither one has better
+ * throughput; we're using the best rate, don't change it! */
+ else if ((low_tpt != IWL_INVALID_VALUE) &&
+ (high_tpt != IWL_INVALID_VALUE) &&
+ (low_tpt < current_tpt) &&
+ (high_tpt < current_tpt))
+ scale_action = 0;
+
+ /* At least one adjacent rate's throughput is measured,
+ * and may have better performance. */
+ else {
+ /* Higher adjacent rate's throughput is measured */
+ if (high_tpt != IWL_INVALID_VALUE) {
+ /* Higher rate has better throughput */
+ if (high_tpt > current_tpt &&
+ sr >= IWL_RATE_INCREASE_TH) {
+ scale_action = 1;
+ } else {
+ scale_action = 0;
+ }
+
+ /* Lower adjacent rate's throughput is measured */
+ } else if (low_tpt != IWL_INVALID_VALUE) {
+ /* Lower rate has better throughput */
+ if (low_tpt > current_tpt) {
+ IWL_DEBUG_RATE(priv,
+ "decrease rate because of low tpt\n");
+ scale_action = -1;
+ } else if (sr >= IWL_RATE_INCREASE_TH) {
+ scale_action = 1;
+ }
+ }
+ }
+
+ /* Sanity check; asked for decrease, but success rate or throughput
+ * has been good at old rate. Don't change it. */
+ if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
+ ((sr > IWL_RATE_HIGH_TH) ||
+ (current_tpt > (100 * tbl->expected_tpt[low]))))
+ scale_action = 0;
+
+ switch (scale_action) {
+ case -1:
+ /* Decrease starting rate, update uCode's rate table */
+ if (low != IWL_RATE_INVALID) {
+ update_lq = 1;
+ index = low;
+ }
+
+ break;
+ case 1:
+ /* Increase starting rate, update uCode's rate table */
+ if (high != IWL_RATE_INVALID) {
+ update_lq = 1;
+ index = high;
+ }
+
+ break;
+ case 0:
+ /* No change */
+ default:
+ break;
+ }
+
+ IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
+ "high %d type %d\n",
+ index, scale_action, low, high, tbl->lq_type);
+
+lq_update:
+ /* Replace uCode's rate table for the destination station. */
+ if (update_lq)
+ rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
+ tbl, index, is_green);
+
+ /* Should we stay with this modulation mode,
+ * or search for a new one? */
+ iwl4965_rs_stay_in_table(lq_sta, false);
+
+ /*
+ * Search for new modulation mode if we're:
+ * 1) Not changing rates right now
+ * 2) Not just finishing up a search
+ * 3) Allowing a new search
+ */
+ if (!update_lq && !done_search &&
+ !lq_sta->stay_in_tbl && window->counter) {
+ /* Save current throughput to compare with "search" throughput*/
+ lq_sta->last_tpt = current_tpt;
+
+ /* Select a new "search" modulation mode to try.
+ * If one is found, set up the new "search" table. */
+ if (is_legacy(tbl->lq_type))
+ iwl4965_rs_move_legacy_other(priv, lq_sta,
+ conf, sta, index);
+ else if (is_siso(tbl->lq_type))
+ iwl4965_rs_move_siso_to_other(priv, lq_sta,
+ conf, sta, index);
+ else /* (is_mimo2(tbl->lq_type)) */
+ iwl4965_rs_move_mimo2_to_other(priv, lq_sta,
+ conf, sta, index);
+
+ /* If new "search" mode was selected, set up in uCode table */
+ if (lq_sta->search_better_tbl) {
+ /* Access the "search" table, clear its history. */
+ tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ iwl4965_rs_rate_scale_clear_window(
+ &(tbl->win[i]));
+
+ /* Use new "search" start rate */
+ index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
+
+ IWL_DEBUG_RATE(priv,
+ "Switch current mcs: %X index: %d\n",
+ tbl->current_rate, index);
+ iwl4965_rs_fill_link_cmd(priv, lq_sta,
+ tbl->current_rate);
+ iwl_legacy_send_lq_cmd(priv, ctx,
+ &lq_sta->lq, CMD_ASYNC, false);
+ } else
+ done_search = 1;
+ }
+
+ if (done_search && !lq_sta->stay_in_tbl) {
+ /* If the "active" (non-search) mode was legacy,
+ * and we've tried switching antennas,
+ * but we haven't been able to try HT modes (not available),
+ * stay with best antenna legacy modulation for a while
+ * before next round of mode comparisons. */
+ tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
+ lq_sta->action_counter > tbl1->max_search) {
+ IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
+ iwl4965_rs_set_stay_in_table(priv, 1, lq_sta);
+ }
+
+ /* If we're in an HT mode, and all 3 mode switch actions
+ * have been tried and compared, stay in this best modulation
+ * mode for a while before next round of mode comparisons. */
+ if (lq_sta->enable_counter &&
+ (lq_sta->action_counter >= tbl1->max_search)) {
+ if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
+ (lq_sta->tx_agg_tid_en & (1 << tid)) &&
+ (tid != MAX_TID_COUNT)) {
+ tid_data =
+ &priv->stations[lq_sta->lq.sta_id].tid[tid];
+ if (tid_data->agg.state == IWL_AGG_OFF) {
+ IWL_DEBUG_RATE(priv,
+ "try to aggregate tid %d\n",
+ tid);
+ iwl4965_rs_tl_turn_on_agg(priv, tid,
+ lq_sta, sta);
+ }
+ }
+ iwl4965_rs_set_stay_in_table(priv, 0, lq_sta);
+ }
+ }
+
+out:
+ tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, tbl,
+ index, is_green);
+ i = index;
+ lq_sta->last_txrate_idx = i;
+}
+
+/**
+ * iwl4965_rs_initialize_lq - Initialize a station's hardware rate table
+ *
+ * The uCode's station table contains a table of fallback rates
+ * for automatic fallback during transmission.
+ *
+ * NOTE: This sets up a default set of values. These will be replaced later
+ * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
+ * rc80211_simple.
+ *
+ * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
+ * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
+ * which requires station table entry to exist).
+ */
+static void iwl4965_rs_initialize_lq(struct iwl_priv *priv,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta)
+{
+ struct iwl_scale_tbl_info *tbl;
+ int rate_idx;
+ int i;
+ u32 rate;
+ u8 use_green = iwl4965_rs_use_green(sta);
+ u8 active_tbl = 0;
+ u8 valid_tx_ant;
+ struct iwl_station_priv *sta_priv;
+ struct iwl_rxon_context *ctx;
+
+ if (!sta || !lq_sta)
+ return;
+
+ sta_priv = (void *)sta->drv_priv;
+ ctx = sta_priv->common.ctx;
+
+ i = lq_sta->last_txrate_idx;
+
+ valid_tx_ant = priv->hw_params.valid_tx_ant;
+
+ if (!lq_sta->search_better_tbl)
+ active_tbl = lq_sta->active_tbl;
+ else
+ active_tbl = 1 - lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ if ((i < 0) || (i >= IWL_RATE_COUNT))
+ i = 0;
+
+ rate = iwlegacy_rates[i].plcp;
+ tbl->ant_type = iwl4965_first_antenna(valid_tx_ant);
+ rate |= tbl->ant_type << RATE_MCS_ANT_POS;
+
+ if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
+ rate |= RATE_MCS_CCK_MSK;
+
+ iwl4965_rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
+ if (!iwl4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
+ iwl4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
+
+ rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
+ tbl->current_rate = rate;
+ iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
+ iwl4965_rs_fill_link_cmd(NULL, lq_sta, rate);
+ priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
+ iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
+}
+
+static void
+iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
+ struct ieee80211_tx_rate_control *txrc)
+{
+
+ struct sk_buff *skb = txrc->skb;
+ struct ieee80211_supported_band *sband = txrc->sband;
+ struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct iwl_lq_sta *lq_sta = priv_sta;
+ int rate_idx;
+
+ IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
+
+ /* Get max rate if user set max rate */
+ if (lq_sta) {
+ lq_sta->max_rate_idx = txrc->max_rate_idx;
+ if ((sband->band == IEEE80211_BAND_5GHZ) &&
+ (lq_sta->max_rate_idx != -1))
+ lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
+ if ((lq_sta->max_rate_idx < 0) ||
+ (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
+ lq_sta->max_rate_idx = -1;
+ }
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (lq_sta && !lq_sta->drv) {
+ IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
+ priv_sta = NULL;
+ }
+
+ /* Send management frames and NO_ACK data using lowest rate. */
+ if (rate_control_send_low(sta, priv_sta, txrc))
+ return;
+
+ rate_idx = lq_sta->last_txrate_idx;
+
+ if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
+ rate_idx -= IWL_FIRST_OFDM_RATE;
+ /* 6M and 9M shared same MCS index */
+ rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
+ if (iwl4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
+ IWL_RATE_MIMO2_6M_PLCP)
+ rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
+ info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_SHORT_GI;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_DUP_DATA;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_GREEN_FIELD;
+ } else {
+ /* Check for invalid rates */
+ if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
+ ((sband->band == IEEE80211_BAND_5GHZ) &&
+ (rate_idx < IWL_FIRST_OFDM_RATE)))
+ rate_idx = rate_lowest_index(sband, sta);
+ /* On valid 5 GHz rate, adjust index */
+ else if (sband->band == IEEE80211_BAND_5GHZ)
+ rate_idx -= IWL_FIRST_OFDM_RATE;
+ info->control.rates[0].flags = 0;
+ }
+ info->control.rates[0].idx = rate_idx;
+
+}
+
+static void *iwl4965_rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
+ gfp_t gfp)
+{
+ struct iwl_lq_sta *lq_sta;
+ struct iwl_station_priv *sta_priv =
+ (struct iwl_station_priv *) sta->drv_priv;
+ struct iwl_priv *priv;
+
+ priv = (struct iwl_priv *)priv_rate;
+ IWL_DEBUG_RATE(priv, "create station rate scale window\n");
+
+ lq_sta = &sta_priv->lq_sta;
+
+ return lq_sta;
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void
+iwl4965_rs_rate_init(struct iwl_priv *priv,
+ struct ieee80211_sta *sta,
+ u8 sta_id)
+{
+ int i, j;
+ struct ieee80211_hw *hw = priv->hw;
+ struct ieee80211_conf *conf = &priv->hw->conf;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct iwl_station_priv *sta_priv;
+ struct iwl_lq_sta *lq_sta;
+ struct ieee80211_supported_band *sband;
+
+ sta_priv = (struct iwl_station_priv *) sta->drv_priv;
+ lq_sta = &sta_priv->lq_sta;
+ sband = hw->wiphy->bands[conf->channel->band];
+
+
+ lq_sta->lq.sta_id = sta_id;
+
+ for (j = 0; j < LQ_SIZE; j++)
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ iwl4965_rs_rate_scale_clear_window(
+ &lq_sta->lq_info[j].win[i]);
+
+ lq_sta->flush_timer = 0;
+ lq_sta->supp_rates = sta->supp_rates[sband->band];
+ for (j = 0; j < LQ_SIZE; j++)
+ for (i = 0; i < IWL_RATE_COUNT; i++)
+ iwl4965_rs_rate_scale_clear_window(
+ &lq_sta->lq_info[j].win[i]);
+
+ IWL_DEBUG_RATE(priv, "LQ:"
+ "*** rate scale station global init for station %d ***\n",
+ sta_id);
+ /* TODO: what is a good starting rate for STA? About middle? Maybe not
+ * the lowest or the highest rate.. Could consider using RSSI from
+ * previous packets? Need to have IEEE 802.1X auth succeed immediately
+ * after assoc.. */
+
+ lq_sta->is_dup = 0;
+ lq_sta->max_rate_idx = -1;
+ lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
+ lq_sta->is_green = iwl4965_rs_use_green(sta);
+ lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
+ lq_sta->band = priv->band;
+ /*
+ * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
+ * supp_rates[] does not; shift to convert format, force 9 MBits off.
+ */
+ lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+ lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+ lq_sta->active_siso_rate &= ~((u16)0x2);
+ lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
+
+ /* Same here */
+ lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+ lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+ lq_sta->active_mimo2_rate &= ~((u16)0x2);
+ lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
+
+ /* These values will be overridden later */
+ lq_sta->lq.general_params.single_stream_ant_msk =
+ iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
+ lq_sta->lq.general_params.dual_stream_ant_msk =
+ priv->hw_params.valid_tx_ant &
+ ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
+ if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
+ lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
+ } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+ lq_sta->lq.general_params.dual_stream_ant_msk =
+ priv->hw_params.valid_tx_ant;
+ }
+
+ /* as default allow aggregation for all tids */
+ lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
+ lq_sta->drv = priv;
+
+ /* Set last_txrate_idx to lowest rate */
+ lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
+ if (sband->band == IEEE80211_BAND_5GHZ)
+ lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
+ lq_sta->is_agg = 0;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ lq_sta->dbg_fixed_rate = 0;
+#endif
+
+ iwl4965_rs_initialize_lq(priv, conf, sta, lq_sta);
+}
+
+static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
+ struct iwl_lq_sta *lq_sta, u32 new_rate)
+{
+ struct iwl_scale_tbl_info tbl_type;
+ int index = 0;
+ int rate_idx;
+ int repeat_rate = 0;
+ u8 ant_toggle_cnt = 0;
+ u8 use_ht_possible = 1;
+ u8 valid_tx_ant = 0;
+ struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
+
+ /* Override starting rate (index 0) if needed for debug purposes */
+ iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+ /* Interpret new_rate (rate_n_flags) */
+ iwl4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
+ &tbl_type, &rate_idx);
+
+ /* How many times should we repeat the initial rate? */
+ if (is_legacy(tbl_type.lq_type)) {
+ ant_toggle_cnt = 1;
+ repeat_rate = IWL_NUMBER_TRY;
+ } else {
+ repeat_rate = IWL_HT_NUMBER_TRY;
+ }
+
+ lq_cmd->general_params.mimo_delimiter =
+ is_mimo(tbl_type.lq_type) ? 1 : 0;
+
+ /* Fill 1st table entry (index 0) */
+ lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
+
+ if (iwl4965_num_of_ant(tbl_type.ant_type) == 1) {
+ lq_cmd->general_params.single_stream_ant_msk =
+ tbl_type.ant_type;
+ } else if (iwl4965_num_of_ant(tbl_type.ant_type) == 2) {
+ lq_cmd->general_params.dual_stream_ant_msk =
+ tbl_type.ant_type;
+ } /* otherwise we don't modify the existing value */
+
+ index++;
+ repeat_rate--;
+ if (priv)
+ valid_tx_ant = priv->hw_params.valid_tx_ant;
+
+ /* Fill rest of rate table */
+ while (index < LINK_QUAL_MAX_RETRY_NUM) {
+ /* Repeat initial/next rate.
+ * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
+ * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
+ while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
+ if (is_legacy(tbl_type.lq_type)) {
+ if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+ ant_toggle_cnt++;
+ else if (priv &&
+ iwl4965_rs_toggle_antenna(valid_tx_ant,
+ &new_rate, &tbl_type))
+ ant_toggle_cnt = 1;
+ }
+
+ /* Override next rate if needed for debug purposes */
+ iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+ /* Fill next table entry */
+ lq_cmd->rs_table[index].rate_n_flags =
+ cpu_to_le32(new_rate);
+ repeat_rate--;
+ index++;
+ }
+
+ iwl4965_rs_get_tbl_info_from_mcs(new_rate,
+ lq_sta->band, &tbl_type,
+ &rate_idx);
+
+ /* Indicate to uCode which entries might be MIMO.
+ * If initial rate was MIMO, this will finally end up
+ * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
+ if (is_mimo(tbl_type.lq_type))
+ lq_cmd->general_params.mimo_delimiter = index;
+
+ /* Get next rate */
+ new_rate = iwl4965_rs_get_lower_rate(lq_sta,
+ &tbl_type, rate_idx,
+ use_ht_possible);
+
+ /* How many times should we repeat the next rate? */
+ if (is_legacy(tbl_type.lq_type)) {
+ if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+ ant_toggle_cnt++;
+ else if (priv &&
+ iwl4965_rs_toggle_antenna(valid_tx_ant,
+ &new_rate, &tbl_type))
+ ant_toggle_cnt = 1;
+
+ repeat_rate = IWL_NUMBER_TRY;
+ } else {
+ repeat_rate = IWL_HT_NUMBER_TRY;
+ }
+
+ /* Don't allow HT rates after next pass.
+ * iwl4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
+ use_ht_possible = 0;
+
+ /* Override next rate if needed for debug purposes */
+ iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+ /* Fill next table entry */
+ lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
+
+ index++;
+ repeat_rate--;
+ }
+
+ lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+ lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+
+ lq_cmd->agg_params.agg_time_limit =
+ cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+}
+
+static void
+*iwl4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+ return hw->priv;
+}
+/* rate scale requires free function to be implemented */
+static void iwl4965_rs_free(void *priv_rate)
+{
+ return;
+}
+
+static void iwl4965_rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
+ void *priv_sta)
+{
+ struct iwl_priv *priv __maybe_unused = priv_r;
+
+ IWL_DEBUG_RATE(priv, "enter\n");
+ IWL_DEBUG_RATE(priv, "leave\n");
+}
+
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static int iwl4965_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+ u32 *rate_n_flags, int index)
+{
+ struct iwl_priv *priv;
+ u8 valid_tx_ant;
+ u8 ant_sel_tx;
+
+ priv = lq_sta->drv;
+ valid_tx_ant = priv->hw_params.valid_tx_ant;
+ if (lq_sta->dbg_fixed_rate) {
+ ant_sel_tx =
+ ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
+ >> RATE_MCS_ANT_POS);
+ if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
+ *rate_n_flags = lq_sta->dbg_fixed_rate;
+ IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
+ } else {
+ lq_sta->dbg_fixed_rate = 0;
+ IWL_ERR(priv,
+ "Invalid antenna selection 0x%X, Valid is 0x%X\n",
+ ant_sel_tx, valid_tx_ant);
+ IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
+ }
+ } else {
+ IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
+ }
+}
+
+static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ struct iwl_priv *priv;
+ char buf[64];
+ int buf_size;
+ u32 parsed_rate;
+ struct iwl_station_priv *sta_priv =
+ container_of(lq_sta, struct iwl_station_priv, lq_sta);
+ struct iwl_rxon_context *ctx = sta_priv->common.ctx;
+
+ priv = lq_sta->drv;
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (sscanf(buf, "%x", &parsed_rate) == 1)
+ lq_sta->dbg_fixed_rate = parsed_rate;
+ else
+ lq_sta->dbg_fixed_rate = 0;
+
+ lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
+ lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+ lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+
+ IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
+ lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
+
+ if (lq_sta->dbg_fixed_rate) {
+ iwl4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
+ iwl_legacy_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
+ false);
+ }
+
+ return count;
+}
+
+static ssize_t iwl4965_rs_sta_dbgfs_scale_table_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char *buff;
+ int desc = 0;
+ int i = 0;
+ int index = 0;
+ ssize_t ret;
+
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ struct iwl_priv *priv;
+ struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+
+ priv = lq_sta->drv;
+ buff = kmalloc(1024, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
+ desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
+ lq_sta->total_failed, lq_sta->total_success,
+ lq_sta->active_legacy_rate);
+ desc += sprintf(buff+desc, "fixed rate 0x%X\n",
+ lq_sta->dbg_fixed_rate);
+ desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
+ (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
+ (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
+ (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
+ desc += sprintf(buff+desc, "lq type %s\n",
+ (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
+ if (is_Ht(tbl->lq_type)) {
+ desc += sprintf(buff+desc, " %s",
+ (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
+ desc += sprintf(buff+desc, " %s",
+ (tbl->is_ht40) ? "40MHz" : "20MHz");
+ desc += sprintf(buff+desc, " %s %s %s\n",
+ (tbl->is_SGI) ? "SGI" : "",
+ (lq_sta->is_green) ? "GF enabled" : "",
+ (lq_sta->is_agg) ? "AGG on" : "");
+ }
+ desc += sprintf(buff+desc, "last tx rate=0x%X\n",
+ lq_sta->last_rate_n_flags);
+ desc += sprintf(buff+desc, "general:"
+ "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
+ lq_sta->lq.general_params.flags,
+ lq_sta->lq.general_params.mimo_delimiter,
+ lq_sta->lq.general_params.single_stream_ant_msk,
+ lq_sta->lq.general_params.dual_stream_ant_msk);
+
+ desc += sprintf(buff+desc, "agg:"
+ "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
+ le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
+ lq_sta->lq.agg_params.agg_dis_start_th,
+ lq_sta->lq.agg_params.agg_frame_cnt_limit);
+
+ desc += sprintf(buff+desc,
+ "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
+ lq_sta->lq.general_params.start_rate_index[0],
+ lq_sta->lq.general_params.start_rate_index[1],
+ lq_sta->lq.general_params.start_rate_index[2],
+ lq_sta->lq.general_params.start_rate_index[3]);
+
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+ index = iwl4965_hwrate_to_plcp_idx(
+ le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
+ if (is_legacy(tbl->lq_type)) {
+ desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
+ i,
+ le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
+ iwl_rate_mcs[index].mbps);
+ } else {
+ desc += sprintf(buff+desc,
+ " rate[%d] 0x%X %smbps (%s)\n",
+ i,
+ le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
+ iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
+ }
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
+ .write = iwl4965_rs_sta_dbgfs_scale_table_write,
+ .read = iwl4965_rs_sta_dbgfs_scale_table_read,
+ .open = iwl4965_open_file_generic,
+ .llseek = default_llseek,
+};
+static ssize_t iwl4965_rs_sta_dbgfs_stats_table_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char *buff;
+ int desc = 0;
+ int i, j;
+ ssize_t ret;
+
+ struct iwl_lq_sta *lq_sta = file->private_data;
+
+ buff = kmalloc(1024, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ for (i = 0; i < LQ_SIZE; i++) {
+ desc += sprintf(buff+desc,
+ "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
+ "rate=0x%X\n",
+ lq_sta->active_tbl == i ? "*" : "x",
+ lq_sta->lq_info[i].lq_type,
+ lq_sta->lq_info[i].is_SGI,
+ lq_sta->lq_info[i].is_ht40,
+ lq_sta->lq_info[i].is_dup,
+ lq_sta->is_green,
+ lq_sta->lq_info[i].current_rate);
+ for (j = 0; j < IWL_RATE_COUNT; j++) {
+ desc += sprintf(buff+desc,
+ "counter=%d success=%d %%=%d\n",
+ lq_sta->lq_info[i].win[j].counter,
+ lq_sta->lq_info[i].win[j].success_counter,
+ lq_sta->lq_info[i].win[j].success_ratio);
+ }
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+ .read = iwl4965_rs_sta_dbgfs_stats_table_read,
+ .open = iwl4965_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static ssize_t iwl4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buff[120];
+ int desc = 0;
+ ssize_t ret;
+
+ struct iwl_lq_sta *lq_sta = file->private_data;
+ struct iwl_priv *priv;
+ struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
+
+ priv = lq_sta->drv;
+
+ if (is_Ht(tbl->lq_type))
+ desc += sprintf(buff+desc,
+ "Bit Rate= %d Mb/s\n",
+ tbl->expected_tpt[lq_sta->last_txrate_idx]);
+ else
+ desc += sprintf(buff+desc,
+ "Bit Rate= %d Mb/s\n",
+ iwlegacy_rates[lq_sta->last_txrate_idx].ieee >> 1);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
+ .read = iwl4965_rs_sta_dbgfs_rate_scale_data_read,
+ .open = iwl4965_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static void iwl4965_rs_add_debugfs(void *priv, void *priv_sta,
+ struct dentry *dir)
+{
+ struct iwl_lq_sta *lq_sta = priv_sta;
+ lq_sta->rs_sta_dbgfs_scale_table_file =
+ debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
+ lq_sta, &rs_sta_dbgfs_scale_table_ops);
+ lq_sta->rs_sta_dbgfs_stats_table_file =
+ debugfs_create_file("rate_stats_table", S_IRUSR, dir,
+ lq_sta, &rs_sta_dbgfs_stats_table_ops);
+ lq_sta->rs_sta_dbgfs_rate_scale_data_file =
+ debugfs_create_file("rate_scale_data", S_IRUSR, dir,
+ lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
+ lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
+ debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
+ &lq_sta->tx_agg_tid_en);
+
+}
+
+static void iwl4965_rs_remove_debugfs(void *priv, void *priv_sta)
+{
+ struct iwl_lq_sta *lq_sta = priv_sta;
+ debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void
+iwl4965_rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta)
+{
+}
+static struct rate_control_ops rs_4965_ops = {
+ .module = NULL,
+ .name = IWL4965_RS_NAME,
+ .tx_status = iwl4965_rs_tx_status,
+ .get_rate = iwl4965_rs_get_rate,
+ .rate_init = iwl4965_rs_rate_init_stub,
+ .alloc = iwl4965_rs_alloc,
+ .free = iwl4965_rs_free,
+ .alloc_sta = iwl4965_rs_alloc_sta,
+ .free_sta = iwl4965_rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+ .add_sta_debugfs = iwl4965_rs_add_debugfs,
+ .remove_sta_debugfs = iwl4965_rs_remove_debugfs,
+#endif
+};
+
+int iwl4965_rate_control_register(void)
+{
+ pr_err("Registering 4965 rate control operations\n");
+ return ieee80211_rate_control_register(&rs_4965_ops);
+}
+
+void iwl4965_rate_control_unregister(void)
+{
+ ieee80211_rate_control_unregister(&rs_4965_ops);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
index bbd40b7dd597..b9fa2f6411a7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,14 +34,14 @@
#include "iwl-dev.h"
#include "iwl-core.h"
-#include "iwl-agn-calib.h"
+#include "iwl-4965-calib.h"
#include "iwl-sta.h"
#include "iwl-io.h"
#include "iwl-helpers.h"
-#include "iwl-agn-hw.h"
-#include "iwl-agn.h"
+#include "iwl-4965-hw.h"
+#include "iwl-4965.h"
-void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
+void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
@@ -58,14 +58,14 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
le32_to_cpu(missed_beacon->num_recvd_beacons),
le32_to_cpu(missed_beacon->num_expected_beacons));
if (!test_bit(STATUS_SCANNING, &priv->status))
- iwl_init_sensitivity(priv);
+ iwl4965_init_sensitivity(priv);
}
}
/* Calculate noise level, based on measurements during network silence just
* before arriving beacon. This measurement can be done only if we know
* exactly when to expect beacons, therefore only when we're associated. */
-static void iwl_rx_calc_noise(struct iwl_priv *priv)
+static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
{
struct statistics_rx_non_phy *rx_info;
int num_active_rx = 0;
@@ -73,11 +73,7 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
int bcn_silence_a, bcn_silence_b, bcn_silence_c;
int last_rx_noise;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics)
- rx_info = &(priv->_agn.statistics_bt.rx.general.common);
- else
- rx_info = &(priv->_agn.statistics.rx.general);
+ rx_info = &(priv->_4965.statistics.rx.general);
bcn_silence_a =
le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
bcn_silence_b =
@@ -109,13 +105,13 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
last_rx_noise);
}
-#ifdef CONFIG_IWLWIFI_DEBUGFS
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
/*
* based on the assumption of all statistics counter are in DWORD
* FIXME: This function is for debugging, do not deal with
* the case of counters roll-over.
*/
-static void iwl_accumulative_statistics(struct iwl_priv *priv,
+static void iwl4965_accumulative_statistics(struct iwl_priv *priv,
__le32 *stats)
{
int i, size;
@@ -125,28 +121,16 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
struct statistics_general_common *general, *accum_general;
struct statistics_tx *tx, *accum_tx;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
- prev_stats = (__le32 *)&priv->_agn.statistics_bt;
- accum_stats = (u32 *)&priv->_agn.accum_statistics_bt;
- size = sizeof(struct iwl_bt_notif_statistics);
- general = &priv->_agn.statistics_bt.general.common;
- accum_general = &priv->_agn.accum_statistics_bt.general.common;
- tx = &priv->_agn.statistics_bt.tx;
- accum_tx = &priv->_agn.accum_statistics_bt.tx;
- delta = (u32 *)&priv->_agn.delta_statistics_bt;
- max_delta = (u32 *)&priv->_agn.max_delta_bt;
- } else {
- prev_stats = (__le32 *)&priv->_agn.statistics;
- accum_stats = (u32 *)&priv->_agn.accum_statistics;
- size = sizeof(struct iwl_notif_statistics);
- general = &priv->_agn.statistics.general.common;
- accum_general = &priv->_agn.accum_statistics.general.common;
- tx = &priv->_agn.statistics.tx;
- accum_tx = &priv->_agn.accum_statistics.tx;
- delta = (u32 *)&priv->_agn.delta_statistics;
- max_delta = (u32 *)&priv->_agn.max_delta;
- }
+ prev_stats = (__le32 *)&priv->_4965.statistics;
+ accum_stats = (u32 *)&priv->_4965.accum_statistics;
+ size = sizeof(struct iwl_notif_statistics);
+ general = &priv->_4965.statistics.general.common;
+ accum_general = &priv->_4965.accum_statistics.general.common;
+ tx = &priv->_4965.statistics.tx;
+ accum_tx = &priv->_4965.accum_statistics.tx;
+ delta = (u32 *)&priv->_4965.delta_statistics;
+ max_delta = (u32 *)&priv->_4965.max_delta;
+
for (i = sizeof(__le32); i < size;
i += sizeof(__le32), stats++, prev_stats++, delta++,
max_delta++, accum_stats++) {
@@ -161,23 +145,19 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
/* reset accumulative statistics for "no-counter" type statistics */
accum_general->temperature = general->temperature;
- accum_general->temperature_m = general->temperature_m;
accum_general->ttl_timestamp = general->ttl_timestamp;
- accum_tx->tx_power.ant_a = tx->tx_power.ant_a;
- accum_tx->tx_power.ant_b = tx->tx_power.ant_b;
- accum_tx->tx_power.ant_c = tx->tx_power.ant_c;
}
#endif
#define REG_RECALIB_PERIOD (60)
/**
- * iwl_good_plcp_health - checks for plcp error.
+ * iwl4965_good_plcp_health - checks for plcp error.
*
* When the plcp error is exceeding the thresholds, reset the radio
* to improve the throughput.
*/
-bool iwl_good_plcp_health(struct iwl_priv *priv,
+bool iwl4965_good_plcp_health(struct iwl_priv *priv,
struct iwl_rx_packet *pkt)
{
bool rc = true;
@@ -207,28 +187,15 @@ bool iwl_good_plcp_health(struct iwl_priv *priv,
struct statistics_rx_phy *ofdm;
struct statistics_rx_ht_phy *ofdm_ht;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
- ofdm = &pkt->u.stats_bt.rx.ofdm;
- ofdm_ht = &pkt->u.stats_bt.rx.ofdm_ht;
- combined_plcp_delta =
- (le32_to_cpu(ofdm->plcp_err) -
- le32_to_cpu(priv->_agn.statistics_bt.
- rx.ofdm.plcp_err)) +
- (le32_to_cpu(ofdm_ht->plcp_err) -
- le32_to_cpu(priv->_agn.statistics_bt.
- rx.ofdm_ht.plcp_err));
- } else {
- ofdm = &pkt->u.stats.rx.ofdm;
- ofdm_ht = &pkt->u.stats.rx.ofdm_ht;
- combined_plcp_delta =
- (le32_to_cpu(ofdm->plcp_err) -
- le32_to_cpu(priv->_agn.statistics.
- rx.ofdm.plcp_err)) +
- (le32_to_cpu(ofdm_ht->plcp_err) -
- le32_to_cpu(priv->_agn.statistics.
- rx.ofdm_ht.plcp_err));
- }
+ ofdm = &pkt->u.stats.rx.ofdm;
+ ofdm_ht = &pkt->u.stats.rx.ofdm_ht;
+ combined_plcp_delta =
+ (le32_to_cpu(ofdm->plcp_err) -
+ le32_to_cpu(priv->_4965.statistics.
+ rx.ofdm.plcp_err)) +
+ (le32_to_cpu(ofdm_ht->plcp_err) -
+ le32_to_cpu(priv->_4965.statistics.
+ rx.ofdm_ht.plcp_err));
if ((combined_plcp_delta > 0) &&
((combined_plcp_delta * 100) / plcp_msec) >
@@ -259,58 +226,32 @@ bool iwl_good_plcp_health(struct iwl_priv *priv,
return rc;
}
-void iwl_rx_statistics(struct iwl_priv *priv,
+void iwl4965_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
int change;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
- IWL_DEBUG_RX(priv,
- "Statistics notification received (%d vs %d).\n",
- (int)sizeof(struct iwl_bt_notif_statistics),
- le32_to_cpu(pkt->len_n_flags) &
- FH_RSCSR_FRAME_SIZE_MSK);
-
- change = ((priv->_agn.statistics_bt.general.common.temperature !=
- pkt->u.stats_bt.general.common.temperature) ||
- ((priv->_agn.statistics_bt.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
- (pkt->u.stats_bt.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt);
+ IWL_DEBUG_RX(priv,
+ "Statistics notification received (%d vs %d).\n",
+ (int)sizeof(struct iwl_notif_statistics),
+ le32_to_cpu(pkt->len_n_flags) &
+ FH_RSCSR_FRAME_SIZE_MSK);
+
+ change = ((priv->_4965.statistics.general.common.temperature !=
+ pkt->u.stats.general.common.temperature) ||
+ ((priv->_4965.statistics.flag &
+ STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
+ (pkt->u.stats.flag &
+ STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+ iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
#endif
- } else {
- IWL_DEBUG_RX(priv,
- "Statistics notification received (%d vs %d).\n",
- (int)sizeof(struct iwl_notif_statistics),
- le32_to_cpu(pkt->len_n_flags) &
- FH_RSCSR_FRAME_SIZE_MSK);
+ iwl_legacy_recover_from_statistics(priv, pkt);
- change = ((priv->_agn.statistics.general.common.temperature !=
- pkt->u.stats.general.common.temperature) ||
- ((priv->_agn.statistics.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
- (pkt->u.stats.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
-#endif
-
- }
-
- iwl_recover_from_statistics(priv, pkt);
-
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics)
- memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt,
- sizeof(priv->_agn.statistics_bt));
- else
- memcpy(&priv->_agn.statistics, &pkt->u.stats,
- sizeof(priv->_agn.statistics));
+ memcpy(&priv->_4965.statistics, &pkt->u.stats,
+ sizeof(priv->_4965.statistics));
set_bit(STATUS_STATISTICS, &priv->status);
@@ -323,34 +264,28 @@ void iwl_rx_statistics(struct iwl_priv *priv,
if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
(pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
- iwl_rx_calc_noise(priv);
+ iwl4965_rx_calc_noise(priv);
queue_work(priv->workqueue, &priv->run_time_calib_work);
}
if (priv->cfg->ops->lib->temp_ops.temperature && change)
priv->cfg->ops->lib->temp_ops.temperature(priv);
}
-void iwl_reply_statistics(struct iwl_priv *priv,
+void iwl4965_reply_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- memset(&priv->_agn.accum_statistics, 0,
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+ memset(&priv->_4965.accum_statistics, 0,
sizeof(struct iwl_notif_statistics));
- memset(&priv->_agn.delta_statistics, 0,
+ memset(&priv->_4965.delta_statistics, 0,
sizeof(struct iwl_notif_statistics));
- memset(&priv->_agn.max_delta, 0,
+ memset(&priv->_4965.max_delta, 0,
sizeof(struct iwl_notif_statistics));
- memset(&priv->_agn.accum_statistics_bt, 0,
- sizeof(struct iwl_bt_notif_statistics));
- memset(&priv->_agn.delta_statistics_bt, 0,
- sizeof(struct iwl_bt_notif_statistics));
- memset(&priv->_agn.max_delta_bt, 0,
- sizeof(struct iwl_bt_notif_statistics));
#endif
IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
}
- iwl_rx_statistics(priv, rxb);
+ iwl4965_rx_statistics(priv, rxb);
}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
new file mode 100644
index 000000000000..a262c23553d2
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
@@ -0,0 +1,721 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-4965.h"
+
+static struct iwl_link_quality_cmd *
+iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id)
+{
+ int i, r;
+ struct iwl_link_quality_cmd *link_cmd;
+ u32 rate_flags = 0;
+ __le32 rate_n_flags;
+
+ link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
+ if (!link_cmd) {
+ IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
+ return NULL;
+ }
+ /* Set up the rate scaling to start at selected rate, fall back
+ * all the way down to 1M in IEEE order, and then spin on 1M */
+ if (priv->band == IEEE80211_BAND_5GHZ)
+ r = IWL_RATE_6M_INDEX;
+ else
+ r = IWL_RATE_1M_INDEX;
+
+ if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) <<
+ RATE_MCS_ANT_POS;
+ rate_n_flags = iwl4965_hw_set_rate_n_flags(iwlegacy_rates[r].plcp,
+ rate_flags);
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+ link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
+
+ link_cmd->general_params.single_stream_ant_msk =
+ iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
+
+ link_cmd->general_params.dual_stream_ant_msk =
+ priv->hw_params.valid_tx_ant &
+ ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
+ if (!link_cmd->general_params.dual_stream_ant_msk) {
+ link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
+ } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+ link_cmd->general_params.dual_stream_ant_msk =
+ priv->hw_params.valid_tx_ant;
+ }
+
+ link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+ link_cmd->agg_params.agg_time_limit =
+ cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+
+ link_cmd->sta_id = sta_id;
+
+ return link_cmd;
+}
+
+/*
+ * iwl4965_add_bssid_station - Add the special IBSS BSSID station
+ *
+ * Function sleeps.
+ */
+int
+iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ const u8 *addr, u8 *sta_id_r)
+{
+ int ret;
+ u8 sta_id;
+ struct iwl_link_quality_cmd *link_cmd;
+ unsigned long flags;
+
+ if (sta_id_r)
+ *sta_id_r = IWL_INVALID_STATION;
+
+ ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
+ if (ret) {
+ IWL_ERR(priv, "Unable to add station %pM\n", addr);
+ return ret;
+ }
+
+ if (sta_id_r)
+ *sta_id_r = sta_id;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].used |= IWL_STA_LOCAL;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ /* Set up default rate scaling table in device's station table */
+ link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
+ if (!link_cmd) {
+ IWL_ERR(priv,
+ "Unable to initialize rate scaling for station %pM.\n",
+ addr);
+ return -ENOMEM;
+ }
+
+ ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
+ if (ret)
+ IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return 0;
+}
+
+static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ bool send_if_empty)
+{
+ int i, not_empty = 0;
+ u8 buff[sizeof(struct iwl_wep_cmd) +
+ sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
+ struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
+ size_t cmd_size = sizeof(struct iwl_wep_cmd);
+ struct iwl_host_cmd cmd = {
+ .id = ctx->wep_key_cmd,
+ .data = wep_cmd,
+ .flags = CMD_SYNC,
+ };
+
+ might_sleep();
+
+ memset(wep_cmd, 0, cmd_size +
+ (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
+
+ for (i = 0; i < WEP_KEYS_MAX ; i++) {
+ wep_cmd->key[i].key_index = i;
+ if (ctx->wep_keys[i].key_size) {
+ wep_cmd->key[i].key_offset = i;
+ not_empty = 1;
+ } else {
+ wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
+ }
+
+ wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
+ memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
+ ctx->wep_keys[i].key_size);
+ }
+
+ wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
+ wep_cmd->num_keys = WEP_KEYS_MAX;
+
+ cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
+
+ cmd.len = cmd_size;
+
+ if (not_empty || send_if_empty)
+ return iwl_legacy_send_cmd(priv, &cmd);
+ else
+ return 0;
+}
+
+int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ lockdep_assert_held(&priv->mutex);
+
+ return iwl4965_static_wepkey_cmd(priv, ctx, false);
+}
+
+int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf)
+{
+ int ret;
+
+ lockdep_assert_held(&priv->mutex);
+
+ IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
+ keyconf->keyidx);
+
+ memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
+ if (iwl_legacy_is_rfkill(priv)) {
+ IWL_DEBUG_WEP(priv,
+ "Not sending REPLY_WEPKEY command due to RFKILL.\n");
+ /* but keys in device are clear anyway so return success */
+ return 0;
+ }
+ ret = iwl4965_static_wepkey_cmd(priv, ctx, 1);
+ IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
+ keyconf->keyidx, ret);
+
+ return ret;
+}
+
+int iwl4965_set_default_wep_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf)
+{
+ int ret;
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (keyconf->keylen != WEP_KEY_LEN_128 &&
+ keyconf->keylen != WEP_KEY_LEN_64) {
+ IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
+ return -EINVAL;
+ }
+
+ keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
+ keyconf->hw_key_idx = HW_KEY_DEFAULT;
+ priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
+
+ ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
+ memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
+ keyconf->keylen);
+
+ ret = iwl4965_static_wepkey_cmd(priv, ctx, false);
+ IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
+ keyconf->keylen, keyconf->keyidx, ret);
+
+ return ret;
+}
+
+static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ u8 sta_id)
+{
+ unsigned long flags;
+ __le16 key_flags = 0;
+ struct iwl_legacy_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&priv->mutex);
+
+ keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
+
+ key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ if (keyconf->keylen == WEP_KEY_LEN_128)
+ key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
+
+ if (sta_id == ctx->bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+
+ priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+ priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+ priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
+
+ memcpy(priv->stations[sta_id].keyinfo.key,
+ keyconf->key, keyconf->keylen);
+
+ memcpy(&priv->stations[sta_id].sta.key.key[3],
+ keyconf->key, keyconf->keylen);
+
+ if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
+ == STA_KEY_FLG_NO_ENC)
+ priv->stations[sta_id].sta.key.key_offset =
+ iwl_legacy_get_free_ucode_key_index(priv);
+ /* else, we are overriding an existing key => no need to allocated room
+ * in uCode. */
+
+ WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for a new key");
+
+ priv->stations[sta_id].sta.key.key_flags = key_flags;
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+ sizeof(struct iwl_legacy_addsta_cmd));
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ u8 sta_id)
+{
+ unsigned long flags;
+ __le16 key_flags = 0;
+ struct iwl_legacy_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&priv->mutex);
+
+ key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ if (sta_id == ctx->bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+ priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+
+ memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
+ keyconf->keylen);
+
+ memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
+ keyconf->keylen);
+
+ if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
+ == STA_KEY_FLG_NO_ENC)
+ priv->stations[sta_id].sta.key.key_offset =
+ iwl_legacy_get_free_ucode_key_index(priv);
+ /* else, we are overriding an existing key => no need to allocated room
+ * in uCode. */
+
+ WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for a new key");
+
+ priv->stations[sta_id].sta.key.key_flags = key_flags;
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+ sizeof(struct iwl_legacy_addsta_cmd));
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ u8 sta_id)
+{
+ unsigned long flags;
+ int ret = 0;
+ __le16 key_flags = 0;
+
+ key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ if (sta_id == ctx->bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+
+ priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+ priv->stations[sta_id].keyinfo.keylen = 16;
+
+ if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
+ == STA_KEY_FLG_NO_ENC)
+ priv->stations[sta_id].sta.key.key_offset =
+ iwl_legacy_get_free_ucode_key_index(priv);
+ /* else, we are overriding an existing key => no need to allocated room
+ * in uCode. */
+
+ WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for a new key");
+
+ priv->stations[sta_id].sta.key.key_flags = key_flags;
+
+
+ /* This copy is acutally not needed: we get the key with each TX */
+ memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
+
+ memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
+
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return ret;
+}
+
+void iwl4965_update_tkip_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
+{
+ u8 sta_id;
+ unsigned long flags;
+ int i;
+
+ if (iwl_legacy_scan_cancel(priv)) {
+ /* cancel scan failed, just live w/ bad key and rely
+ briefly on SW decryption */
+ return;
+ }
+
+ sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta);
+ if (sta_id == IWL_INVALID_STATION)
+ return;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+
+ priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
+
+ for (i = 0; i < 5; i++)
+ priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
+ cpu_to_le16(phase1key[i]);
+
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+}
+
+int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ u8 sta_id)
+{
+ unsigned long flags;
+ u16 key_flags;
+ u8 keyidx;
+ struct iwl_legacy_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&priv->mutex);
+
+ ctx->key_mapping_keys--;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
+ keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
+
+ IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
+ keyconf->keyidx, sta_id);
+
+ if (keyconf->keyidx != keyidx) {
+ /* We need to remove a key with index different that the one
+ * in the uCode. This means that the key we need to remove has
+ * been replaced by another one with different index.
+ * Don't do anything and return ok
+ */
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ return 0;
+ }
+
+ if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
+ IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
+ keyconf->keyidx, key_flags);
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ return 0;
+ }
+
+ if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
+ &priv->ucode_key_table))
+ IWL_ERR(priv, "index %d not used in uCode key table.\n",
+ priv->stations[sta_id].sta.key.key_offset);
+ memset(&priv->stations[sta_id].keyinfo, 0,
+ sizeof(struct iwl_hw_key));
+ memset(&priv->stations[sta_id].sta.key, 0,
+ sizeof(struct iwl4965_keyinfo));
+ priv->stations[sta_id].sta.key.key_flags =
+ STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
+ priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ if (iwl_legacy_is_rfkill(priv)) {
+ IWL_DEBUG_WEP(priv,
+ "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ return 0;
+ }
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+ sizeof(struct iwl_legacy_addsta_cmd));
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ int ret;
+
+ lockdep_assert_held(&priv->mutex);
+
+ ctx->key_mapping_keys++;
+ keyconf->hw_key_idx = HW_KEY_DYNAMIC;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx,
+ keyconf, sta_id);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx,
+ keyconf, sta_id);
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ ret = iwl4965_set_wep_dynamic_key_info(priv, ctx,
+ keyconf, sta_id);
+ break;
+ default:
+ IWL_ERR(priv,
+ "Unknown alg: %s cipher = %x\n", __func__,
+ keyconf->cipher);
+ ret = -EINVAL;
+ }
+
+ IWL_DEBUG_WEP(priv,
+ "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
+ keyconf->cipher, keyconf->keylen, keyconf->keyidx,
+ sta_id, ret);
+
+ return ret;
+}
+
+/**
+ * iwl4965_alloc_bcast_station - add broadcast station into driver's station table.
+ *
+ * This adds the broadcast station into the driver's station table
+ * and marks it driver active, so that it will be restored to the
+ * device at the next best time.
+ */
+int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ struct iwl_link_quality_cmd *link_cmd;
+ unsigned long flags;
+ u8 sta_id;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ sta_id = iwl_legacy_prep_station(priv, ctx, iwlegacy_bcast_addr,
+ false, NULL);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Unable to prepare broadcast station\n");
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return -EINVAL;
+ }
+
+ priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
+ priv->stations[sta_id].used |= IWL_STA_BCAST;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
+ if (!link_cmd) {
+ IWL_ERR(priv,
+ "Unable to initialize rate scaling for bcast station.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return 0;
+}
+
+/**
+ * iwl4965_update_bcast_station - update broadcast station's LQ command
+ *
+ * Only used by iwl4965. Placed here to have all bcast station management
+ * code together.
+ */
+static int iwl4965_update_bcast_station(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ unsigned long flags;
+ struct iwl_link_quality_cmd *link_cmd;
+ u8 sta_id = ctx->bcast_sta_id;
+
+ link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
+ if (!link_cmd) {
+ IWL_ERR(priv,
+ "Unable to initialize rate scaling for bcast station.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ if (priv->stations[sta_id].lq)
+ kfree(priv->stations[sta_id].lq);
+ else
+ IWL_DEBUG_INFO(priv,
+ "Bcast station rate scaling has not been initialized yet.\n");
+ priv->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return 0;
+}
+
+int iwl4965_update_bcast_stations(struct iwl_priv *priv)
+{
+ struct iwl_rxon_context *ctx;
+ int ret = 0;
+
+ for_each_context(priv, ctx) {
+ ret = iwl4965_update_bcast_station(priv, ctx);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
+ */
+int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
+{
+ unsigned long flags;
+ struct iwl_legacy_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&priv->mutex);
+
+ /* Remove "disable" flag, to enable Tx for this TID */
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+ priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+ sizeof(struct iwl_legacy_addsta_cmd));
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ int tid, u16 ssn)
+{
+ unsigned long flags;
+ int sta_id;
+ struct iwl_legacy_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&priv->mutex);
+
+ sta_id = iwl_legacy_sta_id(sta);
+ if (sta_id == IWL_INVALID_STATION)
+ return -ENXIO;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].sta.station_flags_msk = 0;
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
+ priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
+ priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+ sizeof(struct iwl_legacy_addsta_cmd));
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ int tid)
+{
+ unsigned long flags;
+ int sta_id;
+ struct iwl_legacy_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&priv->mutex);
+
+ sta_id = iwl_legacy_sta_id(sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].sta.station_flags_msk = 0;
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
+ priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+ sizeof(struct iwl_legacy_addsta_cmd));
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+}
+
+void
+iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
+ priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
+ priv->stations[sta_id].sta.sta.modify_mask =
+ STA_MODIFY_SLEEP_TX_COUNT_MSK;
+ priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ iwl_legacy_send_add_sta(priv,
+ &priv->stations[sta_id].sta, CMD_ASYNC);
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
new file mode 100644
index 000000000000..5c40502f869a
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
@@ -0,0 +1,1369 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-4965-hw.h"
+#include "iwl-4965.h"
+
+/*
+ * mac80211 queues, ACs, hardware queues, FIFOs.
+ *
+ * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
+ *
+ * Mac80211 uses the following numbers, which we get as from it
+ * by way of skb_get_queue_mapping(skb):
+ *
+ * VO 0
+ * VI 1
+ * BE 2
+ * BK 3
+ *
+ *
+ * Regular (not A-MPDU) frames are put into hardware queues corresponding
+ * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
+ * own queue per aggregation session (RA/TID combination), such queues are
+ * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
+ * order to map frames to the right queue, we also need an AC->hw queue
+ * mapping. This is implemented here.
+ *
+ * Due to the way hw queues are set up (by the hw specific modules like
+ * iwl-4965.c), the AC->hw queue mapping is the identity
+ * mapping.
+ */
+
+static const u8 tid_to_ac[] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO
+};
+
+static inline int iwl4965_get_ac_from_tid(u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return tid_to_ac[tid];
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
+static inline int
+iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return ctx->ac_to_fifo[tid_to_ac[tid]];
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
+/*
+ * handle build REPLY_TX command notification.
+ */
+static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv,
+ struct sk_buff *skb,
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr,
+ u8 std_id)
+{
+ __le16 fc = hdr->frame_control;
+ __le32 tx_flags = tx_cmd->tx_flags;
+
+ tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ tx_flags |= TX_CMD_FLG_ACK_MSK;
+ if (ieee80211_is_mgmt(fc))
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ if (ieee80211_is_probe_resp(fc) &&
+ !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
+ tx_flags |= TX_CMD_FLG_TSF_MSK;
+ } else {
+ tx_flags &= (~TX_CMD_FLG_ACK_MSK);
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ if (ieee80211_is_back_req(fc))
+ tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
+
+ tx_cmd->sta_id = std_id;
+ if (ieee80211_has_morefrags(fc))
+ tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tx_cmd->tid_tspec = qc[0] & 0xf;
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+ } else {
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
+
+ tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+ if (ieee80211_is_mgmt(fc)) {
+ if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+ else
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+ } else {
+ tx_cmd->timeout.pm_frame_timeout = 0;
+ }
+
+ tx_cmd->driver_txop = 0;
+ tx_cmd->tx_flags = tx_flags;
+ tx_cmd->next_frame_len = 0;
+}
+
+#define RTS_DFAULT_RETRY_LIMIT 60
+
+static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv,
+ struct iwl_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ __le16 fc)
+{
+ u32 rate_flags;
+ int rate_idx;
+ u8 rts_retry_limit;
+ u8 data_retry_limit;
+ u8 rate_plcp;
+
+ /* Set retry limit on DATA packets and Probe Responses*/
+ if (ieee80211_is_probe_resp(fc))
+ data_retry_limit = 3;
+ else
+ data_retry_limit = IWL4965_DEFAULT_TX_RETRY;
+ tx_cmd->data_retry_limit = data_retry_limit;
+
+ /* Set retry limit on RTS packets */
+ rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
+ if (data_retry_limit < rts_retry_limit)
+ rts_retry_limit = data_retry_limit;
+ tx_cmd->rts_retry_limit = rts_retry_limit;
+
+ /* DATA packets will use the uCode station table for rate/antenna
+ * selection */
+ if (ieee80211_is_data(fc)) {
+ tx_cmd->initial_rate_index = 0;
+ tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
+ return;
+ }
+
+ /**
+ * If the current TX rate stored in mac80211 has the MCS bit set, it's
+ * not really a TX rate. Thus, we use the lowest supported rate for
+ * this band. Also use the lowest supported rate if the stored rate
+ * index is invalid.
+ */
+ rate_idx = info->control.rates[0].idx;
+ if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
+ (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
+ rate_idx = rate_lowest_index(&priv->bands[info->band],
+ info->control.sta);
+ /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+ if (info->band == IEEE80211_BAND_5GHZ)
+ rate_idx += IWL_FIRST_OFDM_RATE;
+ /* Get PLCP rate for tx_cmd->rate_n_flags */
+ rate_plcp = iwlegacy_rates[rate_idx].plcp;
+ /* Zero out flags for this packet */
+ rate_flags = 0;
+
+ /* Set CCK flag as needed */
+ if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ /* Set up antennas */
+ priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
+ priv->hw_params.valid_tx_ant);
+
+ rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
+
+ /* Set the rate in the TX cmd */
+ tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
+}
+
+static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
+ struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd *tx_cmd,
+ struct sk_buff *skb_frag,
+ int sta_id)
+{
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+ memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
+ IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
+ break;
+
+ case WLAN_CIPHER_SUITE_TKIP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+ ieee80211_get_tkip_key(keyconf, skb_frag,
+ IEEE80211_TKIP_P2_KEY, tx_cmd->key);
+ IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
+ break;
+
+ case WLAN_CIPHER_SUITE_WEP104:
+ tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+ /* fall through */
+ case WLAN_CIPHER_SUITE_WEP40:
+ tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
+ (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
+
+ memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
+
+ IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
+ "with key %d\n", keyconf->keyidx);
+ break;
+
+ default:
+ IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
+ break;
+ }
+}
+
+/*
+ * start REPLY_TX command process
+ */
+int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = info->control.sta;
+ struct iwl_station_priv *sta_priv = NULL;
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+ struct iwl_device_cmd *out_cmd;
+ struct iwl_cmd_meta *out_meta;
+ struct iwl_tx_cmd *tx_cmd;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ int txq_id;
+ dma_addr_t phys_addr;
+ dma_addr_t txcmd_phys;
+ dma_addr_t scratch_phys;
+ u16 len, firstlen, secondlen;
+ u16 seq_number = 0;
+ __le16 fc;
+ u8 hdr_len;
+ u8 sta_id;
+ u8 wait_write_ptr = 0;
+ u8 tid = 0;
+ u8 *qc = NULL;
+ unsigned long flags;
+ bool is_agg = false;
+
+ if (info->control.vif)
+ ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (iwl_legacy_is_rfkill(priv)) {
+ IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
+ goto drop_unlock;
+ }
+
+ fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (ieee80211_is_auth(fc))
+ IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
+ else if (ieee80211_is_assoc_req(fc))
+ IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
+ else if (ieee80211_is_reassoc_req(fc))
+ IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
+#endif
+
+ hdr_len = ieee80211_hdrlen(fc);
+
+ /* Find index into station table for destination station */
+ sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
+ hdr->addr1);
+ goto drop_unlock;
+ }
+
+ IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
+
+ if (sta)
+ sta_priv = (void *)sta->drv_priv;
+
+ if (sta_priv && sta_priv->asleep &&
+ (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
+ /*
+ * This sends an asynchronous command to the device,
+ * but we can rely on it being processed before the
+ * next frame is processed -- and the next frame to
+ * this station is the one that will consume this
+ * counter.
+ * For now set the counter to just 1 since we do not
+ * support uAPSD yet.
+ */
+ iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1);
+ }
+
+ /*
+ * Send this frame after DTIM -- there's a special queue
+ * reserved for this for contexts that support AP mode.
+ */
+ if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
+ txq_id = ctx->mcast_queue;
+ /*
+ * The microcode will clear the more data
+ * bit in the last frame it transmits.
+ */
+ hdr->frame_control |=
+ cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ } else
+ txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
+
+ /* irqs already disabled/saved above when locking priv->lock */
+ spin_lock(&priv->sta_lock);
+
+ if (ieee80211_is_data_qos(fc)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
+ spin_unlock(&priv->sta_lock);
+ goto drop_unlock;
+ }
+ seq_number = priv->stations[sta_id].tid[tid].seq_number;
+ seq_number &= IEEE80211_SCTL_SEQ;
+ hdr->seq_ctrl = hdr->seq_ctrl &
+ cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seq_number);
+ seq_number += 0x10;
+ /* aggregation is on for this <sta,tid> */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
+ txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
+ is_agg = true;
+ }
+ }
+
+ txq = &priv->txq[txq_id];
+ q = &txq->q;
+
+ if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) {
+ spin_unlock(&priv->sta_lock);
+ goto drop_unlock;
+ }
+
+ if (ieee80211_is_data_qos(fc)) {
+ priv->stations[sta_id].tid[tid].tfds_in_queue++;
+ if (!ieee80211_has_morefrags(fc))
+ priv->stations[sta_id].tid[tid].seq_number = seq_number;
+ }
+
+ spin_unlock(&priv->sta_lock);
+
+ /* Set up driver data for this TFD */
+ memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
+ txq->txb[q->write_ptr].skb = skb;
+ txq->txb[q->write_ptr].ctx = ctx;
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_cmd = txq->cmd[q->write_ptr];
+ out_meta = &txq->meta[q->write_ptr];
+ tx_cmd = &out_cmd->cmd.tx;
+ memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
+ memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
+
+ /*
+ * Set up the Tx-command (not MAC!) header.
+ * Store the chosen Tx queue and TFD index within the sequence field;
+ * after Tx, uCode's Tx response will return this value so driver can
+ * locate the frame within the tx queue and do post-tx processing.
+ */
+ out_cmd->hdr.cmd = REPLY_TX;
+ out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(q->write_ptr)));
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+
+ /* Total # bytes to be transmitted */
+ len = (u16)skb->len;
+ tx_cmd->len = cpu_to_le16(len);
+
+ if (info->control.hw_key)
+ iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
+
+ /* TODO need this for burst mode later on */
+ iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
+ iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
+
+ iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc);
+
+ iwl_legacy_update_stats(priv, true, fc, len);
+ /*
+ * Use the first empty entry in this queue's command buffer array
+ * to contain the Tx command and MAC header concatenated together
+ * (payload data will be in another buffer).
+ * Size of this varies, due to varying MAC header length.
+ * If end is not dword aligned, we'll have 2 extra bytes at the end
+ * of the MAC header (device reads on dword boundaries).
+ * We'll tell device about this padding later.
+ */
+ len = sizeof(struct iwl_tx_cmd) +
+ sizeof(struct iwl_cmd_header) + hdr_len;
+ firstlen = (len + 3) & ~3;
+
+ /* Tell NIC about any 2-byte padding after MAC header */
+ if (firstlen != len)
+ tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+ /* Physical address of this Tx command's header (not MAC header!),
+ * within command buffer array. */
+ txcmd_phys = pci_map_single(priv->pci_dev,
+ &out_cmd->hdr, firstlen,
+ PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, firstlen);
+ /* Add buffer containing Tx command and MAC(!) header to TFD's
+ * first entry */
+ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+ txcmd_phys, firstlen, 1, 0);
+
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ txq->need_update = 1;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
+ }
+
+ /* Set up TFD's 2nd entry to point directly to remainder of skb,
+ * if any (802.11 null frames have no payload). */
+ secondlen = skb->len - hdr_len;
+ if (secondlen > 0) {
+ phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
+ secondlen, PCI_DMA_TODEVICE);
+ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+ phys_addr, secondlen,
+ 0, 0);
+ }
+
+ scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
+ offsetof(struct iwl_tx_cmd, scratch);
+
+ /* take back ownership of DMA buffer to enable update */
+ pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
+ firstlen, PCI_DMA_BIDIRECTIONAL);
+ tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+ tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys);
+
+ IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
+ le16_to_cpu(out_cmd->hdr.sequence));
+ IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+ iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
+ iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
+
+ /* Set up entry for this TFD in Tx byte-count array */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
+ le16_to_cpu(tx_cmd->len));
+
+ pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
+ firstlen, PCI_DMA_BIDIRECTIONAL);
+
+ trace_iwlwifi_legacy_dev_tx(priv,
+ &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
+ sizeof(struct iwl_tfd),
+ &out_cmd->hdr, firstlen,
+ skb->data + hdr_len, secondlen);
+
+ /* Tell device the write index *just past* this latest filled TFD */
+ q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
+ iwl_legacy_txq_update_write_ptr(priv, txq);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually,
+ * regardless of the value of ret. "ret" only indicates
+ * whether or not we should update the write pointer.
+ */
+
+ /*
+ * Avoid atomic ops if it isn't an associated client.
+ * Also, if this is a packet for aggregation, don't
+ * increase the counter because the ucode will stop
+ * aggregation queues when their respective station
+ * goes to sleep.
+ */
+ if (sta_priv && sta_priv->client && !is_agg)
+ atomic_inc(&sta_priv->pending_frames);
+
+ if ((iwl_legacy_queue_space(q) < q->high_mark) &&
+ priv->mac80211_registered) {
+ if (wait_write_ptr) {
+ spin_lock_irqsave(&priv->lock, flags);
+ txq->need_update = 1;
+ iwl_legacy_txq_update_write_ptr(priv, txq);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ } else {
+ iwl_legacy_stop_queue(priv, txq);
+ }
+ }
+
+ return 0;
+
+drop_unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return -1;
+}
+
+static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv,
+ struct iwl_dma_ptr *ptr, size_t size)
+{
+ ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
+ GFP_KERNEL);
+ if (!ptr->addr)
+ return -ENOMEM;
+ ptr->size = size;
+ return 0;
+}
+
+static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv,
+ struct iwl_dma_ptr *ptr)
+{
+ if (unlikely(!ptr->addr))
+ return;
+
+ dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
+ memset(ptr, 0, sizeof(*ptr));
+}
+
+/**
+ * iwl4965_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
+{
+ int txq_id;
+
+ /* Tx queues */
+ if (priv->txq) {
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+ if (txq_id == priv->cmd_queue)
+ iwl_legacy_cmd_queue_free(priv);
+ else
+ iwl_legacy_tx_queue_free(priv, txq_id);
+ }
+ iwl4965_free_dma_ptr(priv, &priv->kw);
+
+ iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
+
+ /* free tx queue structure */
+ iwl_legacy_txq_mem(priv);
+}
+
+/**
+ * iwl4965_txq_ctx_alloc - allocate TX queue context
+ * Allocate all Tx DMA structures and initialize them
+ *
+ * @param priv
+ * @return error code
+ */
+int iwl4965_txq_ctx_alloc(struct iwl_priv *priv)
+{
+ int ret;
+ int txq_id, slots_num;
+ unsigned long flags;
+
+ /* Free all tx/cmd queues and keep-warm buffer */
+ iwl4965_hw_txq_ctx_free(priv);
+
+ ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
+ priv->hw_params.scd_bc_tbls_size);
+ if (ret) {
+ IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
+ goto error_bc_tbls;
+ }
+ /* Alloc keep-warm buffer */
+ ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
+ if (ret) {
+ IWL_ERR(priv, "Keep Warm allocation failed\n");
+ goto error_kw;
+ }
+
+ /* allocate tx queue structure */
+ ret = iwl_legacy_alloc_txq_mem(priv);
+ if (ret)
+ goto error;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ iwl4965_txq_set_sched(priv, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
+ slots_num = (txq_id == priv->cmd_queue) ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = iwl_legacy_tx_queue_init(priv,
+ &priv->txq[txq_id], slots_num,
+ txq_id);
+ if (ret) {
+ IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return ret;
+
+ error:
+ iwl4965_hw_txq_ctx_free(priv);
+ iwl4965_free_dma_ptr(priv, &priv->kw);
+ error_kw:
+ iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
+ error_bc_tbls:
+ return ret;
+}
+
+void iwl4965_txq_ctx_reset(struct iwl_priv *priv)
+{
+ int txq_id, slots_num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ iwl4965_txq_set_sched(priv, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4) */
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
+ slots_num = txq_id == priv->cmd_queue ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id],
+ slots_num, txq_id);
+ }
+}
+
+/**
+ * iwl4965_txq_ctx_stop - Stop all Tx DMA channels
+ */
+void iwl4965_txq_ctx_stop(struct iwl_priv *priv)
+{
+ int ch, txq_id;
+ unsigned long flags;
+
+ /* Turn off all Tx DMA fifos */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ iwl4965_txq_set_sched(priv, 0);
+
+ /* Stop each Tx DMA channel, and wait for it to be idle */
+ for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
+ iwl_legacy_write_direct32(priv,
+ FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+ if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
+ 1000))
+ IWL_ERR(priv, "Failing on timeout while stopping"
+ " DMA channel %d [0x%08x]", ch,
+ iwl_legacy_read_direct32(priv,
+ FH_TSSR_TX_STATUS_REG));
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (!priv->txq)
+ return;
+
+ /* Unmap DMA from host system and free skb's */
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+ if (txq_id == priv->cmd_queue)
+ iwl_legacy_cmd_queue_unmap(priv);
+ else
+ iwl_legacy_tx_queue_unmap(priv, txq_id);
+}
+
+/*
+ * Find first available (lowest unused) Tx Queue, mark it "active".
+ * Called only when finding queue for aggregation.
+ * Should never return anything < 7, because they should already
+ * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
+ */
+static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
+{
+ int txq_id;
+
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+ if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
+ return txq_id;
+ return -1;
+}
+
+/**
+ * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
+ */
+static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
+ u16 txq_id)
+{
+ /* Simply stop the queue, but don't change any configuration;
+ * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
+ iwl_legacy_write_prph(priv,
+ IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
+ (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
+ (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+}
+
+/**
+ * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
+ */
+static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
+ u16 txq_id)
+{
+ u32 tbl_dw_addr;
+ u32 tbl_dw;
+ u16 scd_q2ratid;
+
+ scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
+
+ tbl_dw_addr = priv->scd_base_addr +
+ IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
+
+ tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr);
+
+ if (txq_id & 0x1)
+ tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
+ else
+ tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
+
+ iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
+
+ return 0;
+}
+
+/**
+ * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
+ *
+ * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
+ * i.e. it must be one of the higher queues used for aggregation
+ */
+static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
+ int tx_fifo, int sta_id, int tid, u16 ssn_idx)
+{
+ unsigned long flags;
+ u16 ra_tid;
+ int ret;
+
+ if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
+ (IWL49_FIRST_AMPDU_QUEUE +
+ priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+ IWL_WARN(priv,
+ "queue number out of range: %d, must be %d to %d\n",
+ txq_id, IWL49_FIRST_AMPDU_QUEUE,
+ IWL49_FIRST_AMPDU_QUEUE +
+ priv->cfg->base_params->num_of_ampdu_queues - 1);
+ return -EINVAL;
+ }
+
+ ra_tid = BUILD_RAxTID(sta_id, tid);
+
+ /* Modify device's station table to Tx this TID */
+ ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Stop this Tx queue before configuring it */
+ iwl4965_tx_queue_stop_scheduler(priv, txq_id);
+
+ /* Map receiver-address / traffic-ID to this queue */
+ iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
+
+ /* Set this queue as a chain-building queue */
+ iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
+
+ /* Place first TFD at index corresponding to start sequence number.
+ * Assumes that ssn_idx is valid (!= 0xFFF) */
+ priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+ priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+ iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
+
+ /* Set up Tx window size and frame limit for this queue */
+ iwl_legacy_write_targ_mem(priv,
+ priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
+ (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
+ IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
+
+ iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
+ IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
+ (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
+ & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
+
+ iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
+
+ /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
+ iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+
+int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+ int sta_id;
+ int tx_fifo;
+ int txq_id;
+ int ret;
+ unsigned long flags;
+ struct iwl_tid_data *tid_data;
+
+ tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
+ if (unlikely(tx_fifo < 0))
+ return tx_fifo;
+
+ IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
+ __func__, sta->addr, tid);
+
+ sta_id = iwl_legacy_sta_id(sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Start AGG on invalid station\n");
+ return -ENXIO;
+ }
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
+ if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
+ IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
+ return -ENXIO;
+ }
+
+ txq_id = iwl4965_txq_ctx_activate_free(priv);
+ if (txq_id == -1) {
+ IWL_ERR(priv, "No free aggregation queue available\n");
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ tid_data = &priv->stations[sta_id].tid[tid];
+ *ssn = SEQ_TO_SN(tid_data->seq_number);
+ tid_data->agg.txq_id = txq_id;
+ iwl_legacy_set_swq_id(&priv->txq[txq_id],
+ iwl4965_get_ac_from_tid(tid), txq_id);
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo,
+ sta_id, tid, *ssn);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ tid_data = &priv->stations[sta_id].tid[tid];
+ if (tid_data->tfds_in_queue == 0) {
+ IWL_DEBUG_HT(priv, "HW queue is empty\n");
+ tid_data->agg.state = IWL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ } else {
+ IWL_DEBUG_HT(priv,
+ "HW queue is NOT empty: %d packets in HW queue\n",
+ tid_data->tfds_in_queue);
+ tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+ }
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ return ret;
+}
+
+/**
+ * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
+ * priv->lock must be held by the caller
+ */
+static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
+ u16 ssn_idx, u8 tx_fifo)
+{
+ if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
+ (IWL49_FIRST_AMPDU_QUEUE +
+ priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+ IWL_WARN(priv,
+ "queue number out of range: %d, must be %d to %d\n",
+ txq_id, IWL49_FIRST_AMPDU_QUEUE,
+ IWL49_FIRST_AMPDU_QUEUE +
+ priv->cfg->base_params->num_of_ampdu_queues - 1);
+ return -EINVAL;
+ }
+
+ iwl4965_tx_queue_stop_scheduler(priv, txq_id);
+
+ iwl_legacy_clear_bits_prph(priv,
+ IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
+
+ priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+ priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+ /* supposes that ssn_idx is valid (!= 0xFFF) */
+ iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
+
+ iwl_legacy_clear_bits_prph(priv,
+ IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
+ iwl_txq_ctx_deactivate(priv, txq_id);
+ iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
+
+ return 0;
+}
+
+int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ int tx_fifo_id, txq_id, sta_id, ssn;
+ struct iwl_tid_data *tid_data;
+ int write_ptr, read_ptr;
+ unsigned long flags;
+
+ tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
+ if (unlikely(tx_fifo_id < 0))
+ return tx_fifo_id;
+
+ sta_id = iwl_legacy_sta_id(sta);
+
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+
+ tid_data = &priv->stations[sta_id].tid[tid];
+ ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
+ txq_id = tid_data->agg.txq_id;
+
+ switch (priv->stations[sta_id].tid[tid].agg.state) {
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /*
+ * This can happen if the peer stops aggregation
+ * again before we've had a chance to drain the
+ * queue we selected previously, i.e. before the
+ * session was really started completely.
+ */
+ IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
+ goto turn_off;
+ case IWL_AGG_ON:
+ break;
+ default:
+ IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
+ }
+
+ write_ptr = priv->txq[txq_id].q.write_ptr;
+ read_ptr = priv->txq[txq_id].q.read_ptr;
+
+ /* The queue is not empty */
+ if (write_ptr != read_ptr) {
+ IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
+ priv->stations[sta_id].tid[tid].agg.state =
+ IWL_EMPTYING_HW_QUEUE_DELBA;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ return 0;
+ }
+
+ IWL_DEBUG_HT(priv, "HW queue is empty\n");
+ turn_off:
+ priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
+
+ /* do not restore/save irqs */
+ spin_unlock(&priv->sta_lock);
+ spin_lock(&priv->lock);
+
+ /*
+ * the only reason this call can fail is queue number out of range,
+ * which can happen if uCode is reloaded and all the station
+ * information are lost. if it is outside the range, there is no need
+ * to deactivate the uCode queue, just return "success" to allow
+ * mac80211 to clean up it own data.
+ */
+ iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
+ return 0;
+}
+
+int iwl4965_txq_check_empty(struct iwl_priv *priv,
+ int sta_id, u8 tid, int txq_id)
+{
+ struct iwl_queue *q = &priv->txq[txq_id].q;
+ u8 *addr = priv->stations[sta_id].sta.sta.addr;
+ struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
+ struct iwl_rxon_context *ctx;
+
+ ctx = &priv->contexts[priv->stations[sta_id].ctxid];
+
+ lockdep_assert_held(&priv->sta_lock);
+
+ switch (priv->stations[sta_id].tid[tid].agg.state) {
+ case IWL_EMPTYING_HW_QUEUE_DELBA:
+ /* We are reclaiming the last packet of the */
+ /* aggregated HW queue */
+ if ((txq_id == tid_data->agg.txq_id) &&
+ (q->read_ptr == q->write_ptr)) {
+ u16 ssn = SEQ_TO_SN(tid_data->seq_number);
+ int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid);
+ IWL_DEBUG_HT(priv,
+ "HW queue empty: continue DELBA flow\n");
+ iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
+ tid_data->agg.state = IWL_AGG_OFF;
+ ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
+ }
+ break;
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /* We are reclaiming the last packet of the queue */
+ if (tid_data->tfds_in_queue == 0) {
+ IWL_DEBUG_HT(priv,
+ "HW queue empty: continue ADDBA flow\n");
+ tid_data->agg.state = IWL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static void iwl4965_non_agg_tx_status(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ const u8 *addr1)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_station_priv *sta_priv;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(ctx->vif, addr1);
+ if (sta) {
+ sta_priv = (void *)sta->drv_priv;
+ /* avoid atomic ops if this isn't a client */
+ if (sta_priv->client &&
+ atomic_dec_return(&sta_priv->pending_frames) == 0)
+ ieee80211_sta_block_awake(priv->hw, sta, false);
+ }
+ rcu_read_unlock();
+}
+
+static void
+iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
+ bool is_agg)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
+
+ if (!is_agg)
+ iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
+
+ ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
+}
+
+int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
+{
+ struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+ struct iwl_tx_info *tx_info;
+ int nfreed = 0;
+ struct ieee80211_hdr *hdr;
+
+ if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) {
+ IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
+ "is out of range [0-%d] %d %d.\n", txq_id,
+ index, q->n_bd, q->write_ptr, q->read_ptr);
+ return 0;
+ }
+
+ for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
+ q->read_ptr != index;
+ q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ tx_info = &txq->txb[txq->q.read_ptr];
+ iwl4965_tx_status(priv, tx_info,
+ txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
+
+ hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+ if (hdr && ieee80211_is_data_qos(hdr->frame_control))
+ nfreed++;
+ tx_info->skb = NULL;
+
+ priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+ }
+ return nfreed;
+}
+
+/**
+ * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
+ *
+ * Go through block-ack's bitmap of ACK'd frames, update driver's record of
+ * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
+ */
+static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_ht_agg *agg,
+ struct iwl_compressed_ba_resp *ba_resp)
+
+{
+ int i, sh, ack;
+ u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
+ u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+ int successes = 0;
+ struct ieee80211_tx_info *info;
+ u64 bitmap, sent_bitmap;
+
+ if (unlikely(!agg->wait_for_ba)) {
+ if (unlikely(ba_resp->bitmap))
+ IWL_ERR(priv, "Received BA when not expected\n");
+ return -EINVAL;
+ }
+
+ /* Mark that the expected block-ack response arrived */
+ agg->wait_for_ba = 0;
+ IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx,
+ ba_resp->seq_ctl);
+
+ /* Calculate shift to align block-ack bits with our Tx window bits */
+ sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
+ if (sh < 0) /* tbw something is wrong with indices */
+ sh += 0x100;
+
+ if (agg->frame_count > (64 - sh)) {
+ IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
+ return -1;
+ }
+
+ /* don't use 64-bit values for now */
+ bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
+
+ /* check for success or failure according to the
+ * transmitted bitmap and block-ack bitmap */
+ sent_bitmap = bitmap & agg->bitmap;
+
+ /* For each frame attempted in aggregation,
+ * update driver's record of tx frame's status. */
+ i = 0;
+ while (sent_bitmap) {
+ ack = sent_bitmap & 1ULL;
+ successes += ack;
+ IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
+ ack ? "ACK" : "NACK", i,
+ (agg->start_idx + i) & 0xff,
+ agg->start_idx + i);
+ sent_bitmap >>= 1;
+ ++i;
+ }
+
+ IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
+ (unsigned long long)bitmap);
+
+ info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
+ memset(&info->status, 0, sizeof(info->status));
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+ info->status.ampdu_ack_len = successes;
+ info->status.ampdu_len = agg->frame_count;
+ iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
+
+ return 0;
+}
+
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_tx_rate *r = &info->control.rates[0];
+
+ info->antenna_sel_tx =
+ ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+ if (rate_n_flags & RATE_MCS_HT_MSK)
+ r->flags |= IEEE80211_TX_RC_MCS;
+ if (rate_n_flags & RATE_MCS_GF_MSK)
+ r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ if (rate_n_flags & RATE_MCS_HT40_MSK)
+ r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (rate_n_flags & RATE_MCS_DUP_MSK)
+ r->flags |= IEEE80211_TX_RC_DUP_DATA;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ r->flags |= IEEE80211_TX_RC_SHORT_GI;
+ r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
+}
+
+/**
+ * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
+ *
+ * Handles block-acknowledge notification from device, which reports success
+ * of frames sent via aggregation.
+ */
+void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
+ struct iwl_tx_queue *txq = NULL;
+ struct iwl_ht_agg *agg;
+ int index;
+ int sta_id;
+ int tid;
+ unsigned long flags;
+
+ /* "flow" corresponds to Tx queue */
+ u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+
+ /* "ssn" is start of block-ack Tx window, corresponds to index
+ * (in Tx queue's circular buffer) of first TFD/frame in window */
+ u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
+
+ if (scd_flow >= priv->hw_params.max_txq_num) {
+ IWL_ERR(priv,
+ "BUG_ON scd_flow is bigger than number of queues\n");
+ return;
+ }
+
+ txq = &priv->txq[scd_flow];
+ sta_id = ba_resp->sta_id;
+ tid = ba_resp->tid;
+ agg = &priv->stations[sta_id].tid[tid].agg;
+ if (unlikely(agg->txq_id != scd_flow)) {
+ /*
+ * FIXME: this is a uCode bug which need to be addressed,
+ * log the information and return for now!
+ * since it is possible happen very often and in order
+ * not to fill the syslog, don't enable the logging by default
+ */
+ IWL_DEBUG_TX_REPLY(priv,
+ "BA scd_flow %d does not match txq_id %d\n",
+ scd_flow, agg->txq_id);
+ return;
+ }
+
+ /* Find index just before block-ack window */
+ index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+
+ IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
+ "sta_id = %d\n",
+ agg->wait_for_ba,
+ (u8 *) &ba_resp->sta_addr_lo32,
+ ba_resp->sta_id);
+ IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
+ "scd_flow = "
+ "%d, scd_ssn = %d\n",
+ ba_resp->tid,
+ ba_resp->seq_ctl,
+ (unsigned long long)le64_to_cpu(ba_resp->bitmap),
+ ba_resp->scd_flow,
+ ba_resp->scd_ssn);
+ IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
+ agg->start_idx,
+ (unsigned long long)agg->bitmap);
+
+ /* Update driver's record of ACK vs. not for each frame in window */
+ iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
+
+ /* Release all TFDs before the SSN, i.e. all TFDs in front of
+ * block-ack window (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway). */
+ if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
+ /* calculate mac80211 ampdu sw queue to wake */
+ int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
+ iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
+
+ if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) &&
+ priv->mac80211_registered &&
+ (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
+ iwl_legacy_wake_queue(priv, txq);
+
+ iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow);
+ }
+
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+const char *iwl4965_get_tx_fail_reason(u32 status)
+{
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
+
+ switch (status & TX_STATUS_MSK) {
+ case TX_STATUS_SUCCESS:
+ return "SUCCESS";
+ TX_STATUS_POSTPONE(DELAY);
+ TX_STATUS_POSTPONE(FEW_BYTES);
+ TX_STATUS_POSTPONE(QUIET_PERIOD);
+ TX_STATUS_POSTPONE(CALC_TTAK);
+ TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+ TX_STATUS_FAIL(SHORT_LIMIT);
+ TX_STATUS_FAIL(LONG_LIMIT);
+ TX_STATUS_FAIL(FIFO_UNDERRUN);
+ TX_STATUS_FAIL(DRAIN_FLOW);
+ TX_STATUS_FAIL(RFKILL_FLUSH);
+ TX_STATUS_FAIL(LIFE_EXPIRE);
+ TX_STATUS_FAIL(DEST_PS);
+ TX_STATUS_FAIL(HOST_ABORTED);
+ TX_STATUS_FAIL(BT_RETRY);
+ TX_STATUS_FAIL(STA_INVALID);
+ TX_STATUS_FAIL(FRAG_DROPPED);
+ TX_STATUS_FAIL(TID_DISABLE);
+ TX_STATUS_FAIL(FIFO_FLUSHED);
+ TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
+ TX_STATUS_FAIL(PASSIVE_NO_RX);
+ TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
+ }
+
+ return "UNKNOWN";
+
+#undef TX_STATUS_FAIL
+#undef TX_STATUS_POSTPONE
+}
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
new file mode 100644
index 000000000000..001d148feb94
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
@@ -0,0 +1,166 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-4965-hw.h"
+#include "iwl-4965.h"
+#include "iwl-4965-calib.h"
+
+#define IWL_AC_UNSET -1
+
+/**
+ * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ * using sample data 100 bytes apart. If these sample points are good,
+ * it's a pretty good bet that everything between them is good, too.
+ */
+static int
+iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
+{
+ u32 val;
+ int ret = 0;
+ u32 errcnt = 0;
+ u32 i;
+
+ IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
+
+ for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
+ /* read data comes through single port, auto-incr addr */
+ /* NOTE: Use the debugless read so we don't flood kernel log
+ * if IWL_DL_IO is set */
+ iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+ i + IWL4965_RTC_INST_LOWER_BOUND);
+ val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ if (val != le32_to_cpu(*image)) {
+ ret = -EIO;
+ errcnt++;
+ if (errcnt >= 3)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
+ * looking at all data.
+ */
+static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
+ u32 len)
+{
+ u32 val;
+ u32 save_len = len;
+ int ret = 0;
+ u32 errcnt;
+
+ IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
+
+ iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+ IWL4965_RTC_INST_LOWER_BOUND);
+
+ errcnt = 0;
+ for (; len > 0; len -= sizeof(u32), image++) {
+ /* read data comes through single port, auto-incr addr */
+ /* NOTE: Use the debugless read so we don't flood kernel log
+ * if IWL_DL_IO is set */
+ val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ if (val != le32_to_cpu(*image)) {
+ IWL_ERR(priv, "uCode INST section is invalid at "
+ "offset 0x%x, is 0x%x, s/b 0x%x\n",
+ save_len - len, val, le32_to_cpu(*image));
+ ret = -EIO;
+ errcnt++;
+ if (errcnt >= 20)
+ break;
+ }
+ }
+
+ if (!errcnt)
+ IWL_DEBUG_INFO(priv,
+ "ucode image in INSTRUCTION memory is good\n");
+
+ return ret;
+}
+
+/**
+ * iwl4965_verify_ucode - determine which instruction image is in SRAM,
+ * and verify its contents
+ */
+int iwl4965_verify_ucode(struct iwl_priv *priv)
+{
+ __le32 *image;
+ u32 len;
+ int ret;
+
+ /* Try bootstrap */
+ image = (__le32 *)priv->ucode_boot.v_addr;
+ len = priv->ucode_boot.len;
+ ret = iwl4965_verify_inst_sparse(priv, image, len);
+ if (!ret) {
+ IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ /* Try initialize */
+ image = (__le32 *)priv->ucode_init.v_addr;
+ len = priv->ucode_init.len;
+ ret = iwl4965_verify_inst_sparse(priv, image, len);
+ if (!ret) {
+ IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ /* Try runtime/protocol */
+ image = (__le32 *)priv->ucode_code.v_addr;
+ len = priv->ucode_code.len;
+ ret = iwl4965_verify_inst_sparse(priv, image, len);
+ if (!ret) {
+ IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
+
+ /* Since nothing seems to match, show first several data entries in
+ * instruction SRAM, so maybe visual inspection will give a clue.
+ * Selection of bootstrap image (vs. other images) is arbitrary. */
+ image = (__le32 *)priv->ucode_boot.v_addr;
+ len = priv->ucode_boot.len;
+ ret = iwl4965_verify_inst_full(priv, image, len);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
index b207e3e9299f..f5433c74b845 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -43,11 +43,11 @@
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-helpers.h"
-#include "iwl-agn-calib.h"
+#include "iwl-4965-calib.h"
#include "iwl-sta.h"
-#include "iwl-agn-led.h"
-#include "iwl-agn.h"
-#include "iwl-agn-debugfs.h"
+#include "iwl-4965-led.h"
+#include "iwl-4965.h"
+#include "iwl-4965-debugfs.h"
static int iwl4965_send_tx_power(struct iwl_priv *priv);
static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
@@ -73,11 +73,11 @@ static int iwl4965_verify_bsm(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
/* verify BSM SRAM contents */
- val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
+ val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
for (reg = BSM_SRAM_LOWER_BOUND;
reg < BSM_SRAM_LOWER_BOUND + len;
reg += sizeof(u32), image++) {
- val = iwl_read_prph(priv, reg);
+ val = iwl_legacy_read_prph(priv, reg);
if (val != le32_to_cpu(*image)) {
IWL_ERR(priv, "BSM uCode verification failed at "
"addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
@@ -157,33 +157,34 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
inst_len = priv->ucode_init.len;
data_len = priv->ucode_init_data.len;
- iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
- iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
+ iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
+ iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
+ iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
+ iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
/* Fill BSM memory with bootstrap instructions */
for (reg_offset = BSM_SRAM_LOWER_BOUND;
reg_offset < BSM_SRAM_LOWER_BOUND + len;
reg_offset += sizeof(u32), image++)
- _iwl_write_prph(priv, reg_offset, le32_to_cpu(*image));
+ _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image));
ret = iwl4965_verify_bsm(priv);
if (ret)
return ret;
/* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
- iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
- iwl_write_prph(priv, BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
- iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
+ iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
+ iwl_legacy_write_prph(priv,
+ BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
+ iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
/* Load bootstrap code into instruction SRAM now,
* to prepare to load "initialize" uCode */
- iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
+ iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
/* Wait for load of bootstrap uCode to finish */
for (i = 0; i < 100; i++) {
- done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
+ done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
if (!(done & BSM_WR_CTRL_REG_BIT_START))
break;
udelay(10);
@@ -197,7 +198,8 @@ static int iwl4965_load_bsm(struct iwl_priv *priv)
/* Enable future boot loads whenever power management unit triggers it
* (e.g. when powering back up after power-save shutdown) */
- iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
+ iwl_legacy_write_prph(priv,
+ BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
return 0;
@@ -223,14 +225,14 @@ static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
pdata = priv->ucode_data_backup.p_addr >> 4;
/* Tell bootstrap uCode where to find image to load */
- iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
+ iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
+ iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
+ iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
priv->ucode_data.len);
/* Inst byte count must be last to set up, bit 31 signals uCode
* that all new ptr/size info is in place */
- iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
+ iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
priv->ucode_code.len | BSM_DRAM_INST_LOAD);
IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
@@ -250,18 +252,10 @@ static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
*/
static void iwl4965_init_alive_start(struct iwl_priv *priv)
{
- /* Check alive response for "valid" sign from uCode */
- if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
- goto restart;
- }
-
/* Bootstrap uCode has loaded initialize uCode ... verify inst image.
* This is a paranoid check, because we would not have gotten the
* "initialize" alive if code weren't properly loaded. */
- if (iwl_verify_ucode(priv)) {
+ if (iwl4965_verify_ucode(priv)) {
/* Runtime instruction load was bad;
* take it all the way back down so we can try again */
IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
@@ -287,7 +281,7 @@ restart:
queue_work(priv->workqueue, &priv->restart);
}
-static bool is_ht40_channel(__le32 rxon_flags)
+static bool iw4965_is_ht40_channel(__le32 rxon_flags)
{
int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
>> RXON_FLG_CHANNEL_MODE_POS;
@@ -295,23 +289,6 @@ static bool is_ht40_channel(__le32 rxon_flags)
(chan_mod == CHANNEL_MODE_MIXED));
}
-/*
- * EEPROM handlers
- */
-static u16 iwl4965_eeprom_calib_version(struct iwl_priv *priv)
-{
- return iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET);
-}
-
-/*
- * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- * must be called under priv->lock and mac access
- */
-static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
-{
- iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
-}
-
static void iwl4965_nic_config(struct iwl_priv *priv)
{
unsigned long flags;
@@ -319,22 +296,23 @@ static void iwl4965_nic_config(struct iwl_priv *priv)
spin_lock_irqsave(&priv->lock, flags);
- radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
+ radio_cfg = iwl_legacy_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
/* write radio config values to register */
if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
EEPROM_RF_CFG_DASH_MSK(radio_cfg));
/* set CSR_HW_CONFIG_REG for uCode use */
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
priv->calib_info = (struct iwl_eeprom_calib_info *)
- iwl_eeprom_query_addr(priv, EEPROM_4965_CALIB_TXPOWER_OFFSET);
+ iwl_legacy_eeprom_query_addr(priv,
+ EEPROM_4965_CALIB_TXPOWER_OFFSET);
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -347,7 +325,7 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
- iwl_is_any_associated(priv)) {
+ iwl_legacy_is_any_associated(priv)) {
struct iwl_calib_diff_gain_cmd cmd;
/* clear data for chain noise calibration algorithm */
@@ -364,7 +342,7 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
cmd.diff_gain_a = 0;
cmd.diff_gain_b = 0;
cmd.diff_gain_c = 0;
- if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
+ if (iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
sizeof(cmd), &cmd))
IWL_ERR(priv,
"Could not send REPLY_PHY_CALIBRATION_CMD\n");
@@ -373,237 +351,6 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
}
}
-static void iwl4965_gain_computation(struct iwl_priv *priv,
- u32 *average_noise,
- u16 min_average_noise_antenna_i,
- u32 min_average_noise,
- u8 default_chain)
-{
- int i, ret;
- struct iwl_chain_noise_data *data = &priv->chain_noise_data;
-
- data->delta_gain_code[min_average_noise_antenna_i] = 0;
-
- for (i = default_chain; i < NUM_RX_CHAINS; i++) {
- s32 delta_g = 0;
-
- if (!(data->disconn_array[i]) &&
- (data->delta_gain_code[i] ==
- CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
- delta_g = average_noise[i] - min_average_noise;
- data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
- data->delta_gain_code[i] =
- min(data->delta_gain_code[i],
- (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
-
- data->delta_gain_code[i] =
- (data->delta_gain_code[i] | (1 << 2));
- } else {
- data->delta_gain_code[i] = 0;
- }
- }
- IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
- data->delta_gain_code[0],
- data->delta_gain_code[1],
- data->delta_gain_code[2]);
-
- /* Differential gain gets sent to uCode only once */
- if (!data->radio_write) {
- struct iwl_calib_diff_gain_cmd cmd;
- data->radio_write = 1;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
- cmd.diff_gain_a = data->delta_gain_code[0];
- cmd.diff_gain_b = data->delta_gain_code[1];
- cmd.diff_gain_c = data->delta_gain_code[2];
- ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
- sizeof(cmd), &cmd);
- if (ret)
- IWL_DEBUG_CALIB(priv, "fail sending cmd "
- "REPLY_PHY_CALIBRATION_CMD\n");
-
- /* TODO we might want recalculate
- * rx_chain in rxon cmd */
-
- /* Mark so we run this algo only once! */
- data->state = IWL_CHAIN_NOISE_CALIBRATED;
- }
-}
-
-static void iwl4965_bg_txpower_work(struct work_struct *work)
-{
- struct iwl_priv *priv = container_of(work, struct iwl_priv,
- txpower_work);
-
- /* If a scan happened to start before we got here
- * then just return; the statistics notification will
- * kick off another scheduled work to compensate for
- * any temperature delta we missed here. */
- if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
- test_bit(STATUS_SCANNING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
-
- /* Regardless of if we are associated, we must reconfigure the
- * TX power since frames can be sent on non-radar channels while
- * not associated */
- iwl4965_send_tx_power(priv);
-
- /* Update last_temperature to keep is_calib_needed from running
- * when it isn't needed... */
- priv->last_temperature = priv->temperature;
-
- mutex_unlock(&priv->mutex);
-}
-
-/*
- * Acquire priv->lock before calling this function !
- */
-static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
-{
- iwl_write_direct32(priv, HBUS_TARG_WRPTR,
- (index & 0xff) | (txq_id << 8));
- iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
-}
-
-/**
- * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
- * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
- * @scd_retry: (1) Indicates queue will be used in aggregation mode
- *
- * NOTE: Acquire priv->lock before calling this function !
- */
-static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- int tx_fifo_id, int scd_retry)
-{
- int txq_id = txq->q.id;
-
- /* Find out whether to activate Tx queue */
- int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
-
- /* Set up and activate */
- iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
- (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
- (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
- (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
- (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
- IWL49_SCD_QUEUE_STTS_REG_MSK);
-
- txq->sched_retry = scd_retry;
-
- IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
- active ? "Activate" : "Deactivate",
- scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
-}
-
-static const s8 default_queue_to_tx_fifo[] = {
- IWL_TX_FIFO_VO,
- IWL_TX_FIFO_VI,
- IWL_TX_FIFO_BE,
- IWL_TX_FIFO_BK,
- IWL49_CMD_FIFO_NUM,
- IWL_TX_FIFO_UNUSED,
- IWL_TX_FIFO_UNUSED,
-};
-
-static int iwl4965_alive_notify(struct iwl_priv *priv)
-{
- u32 a;
- unsigned long flags;
- int i, chan;
- u32 reg_val;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Clear 4965's internal Tx Scheduler data base */
- priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
- a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
- for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
- iwl_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
- iwl_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr +
- IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
- iwl_write_targ_mem(priv, a, 0);
-
- /* Tel 4965 where to find Tx byte count tables */
- iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
- priv->scd_bc_tbls.dma >> 10);
-
- /* Enable DMA channel */
- for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
- iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
-
- /* Update FH chicken bits */
- reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
- iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
- reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
-
- /* Disable chain mode for all queues */
- iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
-
- /* Initialize each Tx queue (including the command queue) */
- for (i = 0; i < priv->hw_params.max_txq_num; i++) {
-
- /* TFD circular buffer read/write indexes */
- iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
- iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
-
- /* Max Tx Window size for Scheduler-ACK mode */
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
- (SCD_WIN_SIZE <<
- IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
- IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
-
- /* Frame limit */
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
- sizeof(u32),
- (SCD_FRAME_LIMIT <<
- IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
-
- }
- iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
- (1 << priv->hw_params.max_txq_num) - 1);
-
- /* Activate all Tx DMA/FIFO channels */
- priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 6));
-
- iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
-
- /* make sure all queue are not stopped */
- memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
- for (i = 0; i < 4; i++)
- atomic_set(&priv->queue_stop_count[i], 0);
-
- /* reset to 0 to enable all the queue first */
- priv->txq_ctx_active_msk = 0;
- /* Map each Tx/cmd queue to its corresponding fifo */
- BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
-
- for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
- int ac = default_queue_to_tx_fifo[i];
-
- iwl_txq_ctx_activate(priv, i);
-
- if (ac == IWL_TX_FIFO_UNUSED)
- continue;
-
- iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
.min_nrg_cck = 97,
.max_nrg_cck = 0, /* not used, set to 0 */
@@ -665,15 +412,15 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
- priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+ priv->hw_params.tx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_tx_ant);
+ priv->hw_params.rx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_rx_ant);
priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
iwl4965_set_ct_threshold(priv);
priv->hw_params.sens = &iwl4965_sensitivity;
- priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+ priv->hw_params.beacon_time_tsf_bits = IWL4965_EXT_BEACON_TIME_POS;
return 0;
}
@@ -1157,9 +904,9 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band,
is_ht40);
- ch_info = iwl_get_channel_info(priv, priv->band, channel);
+ ch_info = iwl_legacy_get_channel_info(priv, priv->band, channel);
- if (!is_channel_valid(ch_info))
+ if (!iwl_legacy_is_channel_valid(ch_info))
return -EINVAL;
/* get txatten group, used to select 1) thermal txpower adjustment
@@ -1377,17 +1124,13 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
u8 ctrl_chan_high = 0;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- if (test_bit(STATUS_SCANNING, &priv->status)) {
- /* If this gets hit a lot, switch it to a BUG() and catch
- * the stack trace to find out who is calling this during
- * a scan. */
- IWL_WARN(priv, "TX Power requested while scanning!\n");
+ if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
+ "TX Power requested while scanning!\n"))
return -EAGAIN;
- }
band = priv->band == IEEE80211_BAND_2GHZ;
- is_ht40 = is_ht40_channel(ctx->active.flags);
+ is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
ctrl_chan_high = 1;
@@ -1401,7 +1144,8 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
if (ret)
goto out;
- ret = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
+ ret = iwl_legacy_send_cmd_pdu(priv,
+ REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
out:
return ret;
@@ -1412,8 +1156,8 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
{
int ret = 0;
struct iwl4965_rxon_assoc_cmd rxon_assoc;
- const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
- const struct iwl_rxon_cmd *rxon2 = &ctx->active;
+ const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
+ const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
if ((rxon1->flags == rxon2->flags) &&
(rxon1->filter_flags == rxon2->filter_flags) &&
@@ -1439,7 +1183,7 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
ctx->staging.ofdm_ht_dual_stream_basic_rates;
rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
- ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
+ ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
sizeof(rxon_assoc), &rxon_assoc, NULL);
if (ret)
return ret;
@@ -1447,6 +1191,143 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
return ret;
}
+static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ /* cast away the const for active_rxon in this function */
+ struct iwl_legacy_rxon_cmd *active_rxon = (void *)&ctx->active;
+ int ret;
+ bool new_assoc =
+ !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
+
+ if (!iwl_legacy_is_alive(priv))
+ return -EBUSY;
+
+ if (!ctx->is_active)
+ return 0;
+
+ /* always get timestamp with Rx frame */
+ ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
+
+ ret = iwl_legacy_check_rxon_cmd(priv, ctx);
+ if (ret) {
+ IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * receive commit_rxon request
+ * abort any previous channel switch if still in process
+ */
+ if (priv->switch_rxon.switch_in_progress &&
+ (priv->switch_rxon.channel != ctx->staging.channel)) {
+ IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
+ le16_to_cpu(priv->switch_rxon.channel));
+ iwl_legacy_chswitch_done(priv, false);
+ }
+
+ /* If we don't need to send a full RXON, we can use
+ * iwl_rxon_assoc_cmd which is used to reconfigure filter
+ * and other flags for the current radio configuration. */
+ if (!iwl_legacy_full_rxon_required(priv, ctx)) {
+ ret = iwl_legacy_send_rxon_assoc(priv, ctx);
+ if (ret) {
+ IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
+ return ret;
+ }
+
+ memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+ iwl_legacy_print_rx_config_cmd(priv, ctx);
+ return 0;
+ }
+
+ /* If we are currently associated and the new config requires
+ * an RXON_ASSOC and the new config wants the associated mask enabled,
+ * we must clear the associated from the active configuration
+ * before we apply the new config */
+ if (iwl_legacy_is_associated_ctx(ctx) && new_assoc) {
+ IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
+ active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+
+ ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
+ sizeof(struct iwl_legacy_rxon_cmd),
+ active_rxon);
+
+ /* If the mask clearing failed then we set
+ * active_rxon back to what it was previously */
+ if (ret) {
+ active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
+ IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
+ return ret;
+ }
+ iwl_legacy_clear_ucode_stations(priv, ctx);
+ iwl_legacy_restore_stations(priv, ctx);
+ ret = iwl4965_restore_default_wep_keys(priv, ctx);
+ if (ret) {
+ IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ IWL_DEBUG_INFO(priv, "Sending RXON\n"
+ "* with%s RXON_FILTER_ASSOC_MSK\n"
+ "* channel = %d\n"
+ "* bssid = %pM\n",
+ (new_assoc ? "" : "out"),
+ le16_to_cpu(ctx->staging.channel),
+ ctx->staging.bssid_addr);
+
+ iwl_legacy_set_rxon_hwcrypto(priv, ctx,
+ !priv->cfg->mod_params->sw_crypto);
+
+ /* Apply the new configuration
+ * RXON unassoc clears the station table in uCode so restoration of
+ * stations is needed after it (the RXON command) completes
+ */
+ if (!new_assoc) {
+ ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
+ sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
+ if (ret) {
+ IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
+ return ret;
+ }
+ IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
+ memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+ iwl_legacy_clear_ucode_stations(priv, ctx);
+ iwl_legacy_restore_stations(priv, ctx);
+ ret = iwl4965_restore_default_wep_keys(priv, ctx);
+ if (ret) {
+ IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
+ return ret;
+ }
+ }
+ if (new_assoc) {
+ priv->start_calib = 0;
+ /* Apply the new configuration
+ * RXON assoc doesn't clear the station table in uCode,
+ */
+ ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
+ sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
+ if (ret) {
+ IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
+ return ret;
+ }
+ memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+ }
+ iwl_legacy_print_rx_config_cmd(priv, ctx);
+
+ iwl4965_init_sensitivity(priv);
+
+ /* If we issue a new RXON command which required a tune then we must
+ * send a new TXPOWER command or we won't be able to Tx any frames */
+ ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
+ if (ret) {
+ IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
struct ieee80211_channel_switch *ch_switch)
{
@@ -1465,7 +1346,7 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
struct ieee80211_vif *vif = ctx->vif;
band = priv->band == IEEE80211_BAND_2GHZ;
- is_ht40 = is_ht40_channel(ctx->staging.flags);
+ is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
if (is_ht40 &&
(ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
@@ -1496,19 +1377,19 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
else {
switch_time_in_usec =
vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
- ucode_switch_time = iwl_usecs_to_beacons(priv,
+ ucode_switch_time = iwl_legacy_usecs_to_beacons(priv,
switch_time_in_usec,
beacon_interval);
- cmd.switch_time = iwl_add_beacon_time(priv,
+ cmd.switch_time = iwl_legacy_add_beacon_time(priv,
priv->ucode_beacon_time,
ucode_switch_time,
beacon_interval);
}
IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
cmd.switch_time);
- ch_info = iwl_get_channel_info(priv, priv->band, ch);
+ ch_info = iwl_legacy_get_channel_info(priv, priv->band, ch);
if (ch_info)
- cmd.expect_beacon = is_channel_radar(ch_info);
+ cmd.expect_beacon = iwl_legacy_is_channel_radar(ch_info);
else {
IWL_ERR(priv, "invalid channel switch from %u to %u\n",
ctx->active.channel, ch);
@@ -1525,7 +1406,8 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
priv->switch_rxon.channel = cmd.channel;
priv->switch_rxon.switch_in_progress = true;
- return iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
+ return iwl_legacy_send_cmd_pdu(priv,
+ REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
}
/**
@@ -1554,22 +1436,6 @@ static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
}
/**
- * sign_extend - Sign extend a value using specified bit as sign-bit
- *
- * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
- * and bit0..2 is 001b which when sign extended to 1111111111111001b is -7.
- *
- * @param oper value to sign extend
- * @param index 0 based bit index (0<=index<32) to sign bit
- */
-static s32 sign_extend(u32 oper, int index)
-{
- u8 shift = 31 - index;
-
- return (s32)(oper << shift) >> shift;
-}
-
-/**
* iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
* @statistics: Provides the temperature reading from the uCode
*
@@ -1583,7 +1449,7 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
u32 R4;
if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
- (priv->_agn.statistics.flag &
+ (priv->_4965.statistics.flag &
STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
@@ -1606,9 +1472,9 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
* "initialize" ALIVE response.
*/
if (!test_bit(STATUS_TEMPERATURE, &priv->status))
- vt = sign_extend(R4, 23);
+ vt = sign_extend32(R4, 23);
else
- vt = sign_extend(le32_to_cpu(priv->_agn.statistics.
+ vt = sign_extend32(le32_to_cpu(priv->_4965.statistics.
general.common.temperature), 23);
IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
@@ -1693,7 +1559,6 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv)
}
priv->temperature = temp;
- iwl_tt_handler(priv);
set_bit(STATUS_TEMPERATURE, &priv->status);
if (!priv->disable_tx_power_cal &&
@@ -1702,152 +1567,6 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv)
queue_work(priv->workqueue, &priv->txpower_work);
}
-/**
- * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
- */
-static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
- u16 txq_id)
-{
- /* Simply stop the queue, but don't change any configuration;
- * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
- iwl_write_prph(priv,
- IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
- (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
- (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
-}
-
-/**
- * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
- * priv->lock must be held by the caller
- */
-static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
- u16 ssn_idx, u8 tx_fifo)
-{
- if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
- IWL_WARN(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWL49_FIRST_AMPDU_QUEUE,
- IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- iwl4965_tx_queue_stop_scheduler(priv, txq_id);
-
- iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
-
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- /* supposes that ssn_idx is valid (!= 0xFFF) */
- iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
-
- iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
- iwl_txq_ctx_deactivate(priv, txq_id);
- iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
-
- return 0;
-}
-
-/**
- * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
- */
-static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
- u16 txq_id)
-{
- u32 tbl_dw_addr;
- u32 tbl_dw;
- u16 scd_q2ratid;
-
- scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
-
- tbl_dw_addr = priv->scd_base_addr +
- IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
-
- tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
-
- if (txq_id & 0x1)
- tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
- else
- tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
-
- iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
-
- return 0;
-}
-
-
-/**
- * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
- *
- * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
- * i.e. it must be one of the higher queues used for aggregation
- */
-static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
- int tx_fifo, int sta_id, int tid, u16 ssn_idx)
-{
- unsigned long flags;
- u16 ra_tid;
- int ret;
-
- if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
- IWL_WARN(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWL49_FIRST_AMPDU_QUEUE,
- IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- ra_tid = BUILD_RAxTID(sta_id, tid);
-
- /* Modify device's station table to Tx this TID */
- ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
- if (ret)
- return ret;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Stop this Tx queue before configuring it */
- iwl4965_tx_queue_stop_scheduler(priv, txq_id);
-
- /* Map receiver-address / traffic-ID to this queue */
- iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
-
- /* Set this queue as a chain-building queue */
- iwl_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
-
- /* Place first TFD at index corresponding to start sequence number.
- * Assumes that ssn_idx is valid (!= 0xFFF) */
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
-
- /* Set up Tx window size and frame limit for this queue */
- iwl_write_targ_mem(priv,
- priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
- (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
- IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
-
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
- (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
- & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
-
- iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
-
- /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
- iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-
static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
{
switch (cmd_id) {
@@ -1858,7 +1577,8 @@ static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
}
}
-static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
+static u16 iwl4965_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
+ u8 *data)
{
struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
addsta->mode = cmd->mode;
@@ -1911,16 +1631,14 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
status = le16_to_cpu(frame_status[0].status);
idx = start_idx;
- /* FIXME: code repetition */
IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
agg->frame_count, agg->start_idx, idx);
info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
info->status.rates[0].count = tx_resp->failure_frame + 1;
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
- info->flags |= iwl_tx_status_to_mac80211(status);
- iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
- /* FIXME: code repetition end */
+ info->flags |= iwl4965_tx_status_to_mac80211(status);
+ iwl4965_hwrate_to_tx_control(priv, rate_n_flags, info);
IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
status & 0xff, tx_resp->failure_frame);
@@ -1947,7 +1665,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
agg->frame_count, txq_id, idx);
- hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
+ hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, idx);
if (!hdr) {
IWL_ERR(priv,
"BUG_ON idx doesn't point to valid skb"
@@ -1998,15 +1716,14 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
return 0;
}
-static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
+static u8 iwl4965_find_station(struct iwl_priv *priv, const u8 *addr)
{
int i;
int start = 0;
int ret = IWL_INVALID_STATION;
unsigned long flags;
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) ||
- (priv->iw_mode == NL80211_IFTYPE_AP))
+ if ((priv->iw_mode == NL80211_IFTYPE_ADHOC))
start = IWL_STA_ID;
if (is_broadcast_ether_addr(addr))
@@ -2042,13 +1759,13 @@ static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
return ret;
}
-static int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
+static int iwl4965_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
{
if (priv->iw_mode == NL80211_IFTYPE_STATION) {
return IWL_AP_ID;
} else {
u8 *da = ieee80211_get_DA(hdr);
- return iwl_find_station(priv, da);
+ return iwl4965_find_station(priv, da);
}
}
@@ -2073,7 +1790,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
u8 *qc = NULL;
unsigned long flags;
- if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
+ if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
"is out of range [0-%d] %d %d\n", txq_id,
index, txq->q.n_bd, txq->q.write_ptr,
@@ -2081,16 +1798,17 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
return;
}
+ txq->time_stamp = jiffies;
info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
memset(&info->status, 0, sizeof(info->status));
- hdr = iwl_tx_queue_get_hdr(priv, txq_id, index);
+ hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, index);
if (ieee80211_is_data_qos(hdr->frame_control)) {
qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & 0xf;
}
- sta_id = iwl_get_ra_sta_id(priv, hdr);
+ sta_id = iwl4965_get_ra_sta_id(priv, hdr);
if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
IWL_ERR(priv, "Station not known\n");
return;
@@ -2107,118 +1825,89 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
/* check if BAR is needed */
- if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
+ if ((tx_resp->frame_count == 1) && !iwl4965_is_tx_success(status))
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
if (txq->q.read_ptr != (scd_ssn & 0xff)) {
- index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
+ index = iwl_legacy_queue_dec_wrap(scd_ssn & 0xff,
+ txq->q.n_bd);
IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
"%d index %d\n", scd_ssn , index);
- freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
+ freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
if (qc)
- iwl_free_tfds_in_queue(priv, sta_id,
+ iwl4965_free_tfds_in_queue(priv, sta_id,
tid, freed);
if (priv->mac80211_registered &&
- (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
- (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
- if (agg->state == IWL_AGG_OFF)
- iwl_wake_queue(priv, txq_id);
- else
- iwl_wake_queue(priv, txq->swq_id);
- }
+ (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark)
+ && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
+ iwl_legacy_wake_queue(priv, txq);
}
} else {
info->status.rates[0].count = tx_resp->failure_frame + 1;
- info->flags |= iwl_tx_status_to_mac80211(status);
- iwlagn_hwrate_to_tx_control(priv,
+ info->flags |= iwl4965_tx_status_to_mac80211(status);
+ iwl4965_hwrate_to_tx_control(priv,
le32_to_cpu(tx_resp->rate_n_flags),
info);
IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
"rate_n_flags 0x%x retries %d\n",
txq_id,
- iwl_get_tx_fail_reason(status), status,
+ iwl4965_get_tx_fail_reason(status), status,
le32_to_cpu(tx_resp->rate_n_flags),
tx_resp->failure_frame);
- freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
+ freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
if (qc && likely(sta_id != IWL_INVALID_STATION))
- iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
+ iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
else if (sta_id == IWL_INVALID_STATION)
IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
if (priv->mac80211_registered &&
- (iwl_queue_space(&txq->q) > txq->q.low_mark))
- iwl_wake_queue(priv, txq_id);
+ (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark))
+ iwl_legacy_wake_queue(priv, txq);
}
if (qc && likely(sta_id != IWL_INVALID_STATION))
- iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
+ iwl4965_txq_check_empty(priv, sta_id, tid, txq_id);
- iwl_check_abort_status(priv, tx_resp->frame_count, status);
+ iwl4965_check_abort_status(priv, tx_resp->frame_count, status);
spin_unlock_irqrestore(&priv->sta_lock, flags);
}
-static int iwl4965_calc_rssi(struct iwl_priv *priv,
- struct iwl_rx_phy_res *rx_resp)
+static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
{
- /* data from PHY/DSP regarding signal strength, etc.,
- * contents are always there, not configurable by host. */
- struct iwl4965_rx_non_cfg_phy *ncphy =
- (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
- u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
- >> IWL49_AGC_DB_POS;
-
- u32 valid_antennae =
- (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
- >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
- u8 max_rssi = 0;
- u32 i;
-
- /* Find max rssi among 3 possible receivers.
- * These values are measured by the digital signal processor (DSP).
- * They should stay fairly constant even as the signal strength varies,
- * if the radio's automatic gain control (AGC) is working right.
- * AGC value (see below) will provide the "interesting" info. */
- for (i = 0; i < 3; i++)
- if (valid_antennae & (1 << i))
- max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
-
- IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
- ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
- max_rssi, agc);
-
- /* dBm = max_rssi dB - agc dB - constant.
- * Higher AGC (higher radio gain) means lower signal. */
- return max_rssi - agc - IWLAGN_RSSI_OFFSET;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw;
+ u8 rate __maybe_unused =
+ iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
+
+ IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
+ "tsf:0x%.8x%.8x rate:%d\n",
+ le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
+ beacon->beacon_notify_hdr.failure_frame,
+ le32_to_cpu(beacon->ibss_mgr_status),
+ le32_to_cpu(beacon->high_tsf),
+ le32_to_cpu(beacon->low_tsf), rate);
+
+ priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
}
-
/* Set up 4965-specific Rx frame reply handlers */
static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
{
/* Legacy Rx frames */
- priv->rx_handlers[REPLY_RX] = iwlagn_rx_reply_rx;
+ priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
/* Tx response */
priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
-}
-
-static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
-{
- INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
-}
-
-static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
-{
- cancel_work_sync(&priv->txpower_work);
+ priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
}
static struct iwl_hcmd_ops iwl4965_hcmd = {
.rxon_assoc = iwl4965_send_rxon_assoc,
- .commit_rxon = iwlagn_commit_rxon,
- .set_rxon_chain = iwlagn_set_rxon_chain,
- .send_bt_config = iwl_send_bt_config,
+ .commit_rxon = iwl4965_commit_rxon,
+ .set_rxon_chain = iwl4965_set_rxon_chain,
};
static void iwl4965_post_scan(struct iwl_priv *priv)
@@ -2230,42 +1919,166 @@ static void iwl4965_post_scan(struct iwl_priv *priv)
* performing the scan, fire one off if needed
*/
if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
- iwlcore_commit_rxon(priv, ctx);
+ iwl_legacy_commit_rxon(priv, ctx);
+}
+
+static void iwl4965_post_associate(struct iwl_priv *priv)
+{
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct ieee80211_vif *vif = ctx->vif;
+ struct ieee80211_conf *conf = NULL;
+ int ret = 0;
+
+ if (!vif || !priv->is_open)
+ return;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ iwl_legacy_scan_cancel_timeout(priv, 200);
+
+ conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
+
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ iwl_legacy_commit_rxon(priv, ctx);
+
+ ret = iwl_legacy_send_rxon_timing(priv, ctx);
+ if (ret)
+ IWL_WARN(priv, "RXON timing - "
+ "Attempting to continue.\n");
+
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+
+ iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
+
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+
+ ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
+
+ IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
+ vif->bss_conf.aid, vif->bss_conf.beacon_int);
+
+ if (vif->bss_conf.use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+ if (vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+ }
+
+ iwl_legacy_commit_rxon(priv, ctx);
+
+ IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
+ vif->bss_conf.aid, ctx->active.bssid_addr);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ iwl4965_send_beacon_cmd(priv);
+ break;
+ default:
+ IWL_ERR(priv, "%s Should not be called in %d mode\n",
+ __func__, vif->type);
+ break;
+ }
+
+ /* the chain noise calibration will enabled PM upon completion
+ * If chain noise has already been run, then we need to enable
+ * power management here */
+ if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
+ iwl_legacy_power_update_mode(priv, false);
+
+ /* Enable Rx differential gain and sensitivity calibrations */
+ iwl4965_chain_noise_reset(priv);
+ priv->start_calib = 1;
+}
+
+static void iwl4965_config_ap(struct iwl_priv *priv)
+{
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct ieee80211_vif *vif = ctx->vif;
+ int ret = 0;
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ /* The following should be done only at AP bring up */
+ if (!iwl_legacy_is_associated_ctx(ctx)) {
+
+ /* RXON - unassoc (to set timing command) */
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ iwl_legacy_commit_rxon(priv, ctx);
+
+ /* RXON Timing */
+ ret = iwl_legacy_send_rxon_timing(priv, ctx);
+ if (ret)
+ IWL_WARN(priv, "RXON timing failed - "
+ "Attempting to continue.\n");
+
+ /* AP has all antennas */
+ priv->chain_noise_data.active_chains =
+ priv->hw_params.valid_rx_ant;
+ iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+
+ ctx->staging.assoc_id = 0;
+
+ if (vif->bss_conf.use_short_preamble)
+ ctx->staging.flags |=
+ RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &=
+ ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+ if (vif->bss_conf.use_short_slot)
+ ctx->staging.flags |=
+ RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &=
+ ~RXON_FLG_SHORT_SLOT_MSK;
+ }
+ /* need to send beacon cmd before committing assoc RXON! */
+ iwl4965_send_beacon_cmd(priv);
+ /* restore RXON assoc */
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ iwl_legacy_commit_rxon(priv, ctx);
+ }
+ iwl4965_send_beacon_cmd(priv);
}
static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
.get_hcmd_size = iwl4965_get_hcmd_size,
.build_addsta_hcmd = iwl4965_build_addsta_hcmd,
- .chain_noise_reset = iwl4965_chain_noise_reset,
- .gain_computation = iwl4965_gain_computation,
- .tx_cmd_protection = iwlcore_tx_cmd_protection,
- .calc_rssi = iwl4965_calc_rssi,
- .request_scan = iwlagn_request_scan,
+ .request_scan = iwl4965_request_scan,
.post_scan = iwl4965_post_scan,
};
static struct iwl_lib_ops iwl4965_lib = {
.set_hw_params = iwl4965_hw_set_hw_params,
.txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
- .txq_set_sched = iwl4965_txq_set_sched,
- .txq_agg_enable = iwl4965_txq_agg_enable,
- .txq_agg_disable = iwl4965_txq_agg_disable,
- .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
- .txq_free_tfd = iwl_hw_txq_free_tfd,
- .txq_init = iwl_hw_tx_queue_init,
+ .txq_attach_buf_to_tfd = iwl4965_hw_txq_attach_buf_to_tfd,
+ .txq_free_tfd = iwl4965_hw_txq_free_tfd,
+ .txq_init = iwl4965_hw_tx_queue_init,
.rx_handler_setup = iwl4965_rx_handler_setup,
- .setup_deferred_work = iwl4965_setup_deferred_work,
- .cancel_deferred_work = iwl4965_cancel_deferred_work,
.is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
- .alive_notify = iwl4965_alive_notify,
.init_alive_start = iwl4965_init_alive_start,
.load_ucode = iwl4965_load_bsm,
- .dump_nic_event_log = iwl_dump_nic_event_log,
- .dump_nic_error_log = iwl_dump_nic_error_log,
- .dump_fh = iwl_dump_fh,
+ .dump_nic_event_log = iwl4965_dump_nic_event_log,
+ .dump_nic_error_log = iwl4965_dump_nic_error_log,
+ .dump_fh = iwl4965_dump_fh,
.set_channel_switch = iwl4965_hw_channel_switch,
.apm_ops = {
- .init = iwl_apm_init,
+ .init = iwl_legacy_apm_init,
.config = iwl4965_nic_config,
},
.eeprom_ops = {
@@ -2278,37 +2091,58 @@ static struct iwl_lib_ops iwl4965_lib = {
EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
},
- .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
- .release_semaphore = iwlcore_eeprom_release_semaphore,
- .calib_version = iwl4965_eeprom_calib_version,
- .query_addr = iwlcore_eeprom_query_addr,
+ .acquire_semaphore = iwl4965_eeprom_acquire_semaphore,
+ .release_semaphore = iwl4965_eeprom_release_semaphore,
},
.send_tx_power = iwl4965_send_tx_power,
- .update_chain_flags = iwl_update_chain_flags,
- .post_associate = iwl_post_associate,
- .config_ap = iwl_config_ap,
- .isr = iwl_isr_legacy,
+ .update_chain_flags = iwl4965_update_chain_flags,
.temp_ops = {
.temperature = iwl4965_temperature_calib,
},
- .manage_ibss_station = iwlagn_manage_ibss_station,
- .update_bcast_stations = iwl_update_bcast_stations,
.debugfs_ops = {
- .rx_stats_read = iwl_ucode_rx_stats_read,
- .tx_stats_read = iwl_ucode_tx_stats_read,
- .general_stats_read = iwl_ucode_general_stats_read,
- .bt_stats_read = iwl_ucode_bt_stats_read,
- .reply_tx_error = iwl_reply_tx_error_read,
+ .rx_stats_read = iwl4965_ucode_rx_stats_read,
+ .tx_stats_read = iwl4965_ucode_tx_stats_read,
+ .general_stats_read = iwl4965_ucode_general_stats_read,
},
- .recover_from_tx_stall = iwl_bg_monitor_recover,
- .check_plcp_health = iwl_good_plcp_health,
+ .check_plcp_health = iwl4965_good_plcp_health,
+};
+
+static const struct iwl_legacy_ops iwl4965_legacy_ops = {
+ .post_associate = iwl4965_post_associate,
+ .config_ap = iwl4965_config_ap,
+ .manage_ibss_station = iwl4965_manage_ibss_station,
+ .update_bcast_stations = iwl4965_update_bcast_stations,
+};
+
+struct ieee80211_ops iwl4965_hw_ops = {
+ .tx = iwl4965_mac_tx,
+ .start = iwl4965_mac_start,
+ .stop = iwl4965_mac_stop,
+ .add_interface = iwl_legacy_mac_add_interface,
+ .remove_interface = iwl_legacy_mac_remove_interface,
+ .change_interface = iwl_legacy_mac_change_interface,
+ .config = iwl_legacy_mac_config,
+ .configure_filter = iwl4965_configure_filter,
+ .set_key = iwl4965_mac_set_key,
+ .update_tkip_key = iwl4965_mac_update_tkip_key,
+ .conf_tx = iwl_legacy_mac_conf_tx,
+ .reset_tsf = iwl_legacy_mac_reset_tsf,
+ .bss_info_changed = iwl_legacy_mac_bss_info_changed,
+ .ampdu_action = iwl4965_mac_ampdu_action,
+ .hw_scan = iwl_legacy_mac_hw_scan,
+ .sta_add = iwl4965_mac_sta_add,
+ .sta_remove = iwl_legacy_mac_sta_remove,
+ .channel_switch = iwl4965_mac_channel_switch,
+ .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
};
static const struct iwl_ops iwl4965_ops = {
.lib = &iwl4965_lib,
.hcmd = &iwl4965_hcmd,
.utils = &iwl4965_hcmd_utils,
- .led = &iwlagn_led_ops,
+ .led = &iwl4965_led_ops,
+ .legacy = &iwl4965_legacy_ops,
+ .ieee80211_ops = &iwl4965_hw_ops,
};
static struct iwl_base_params iwl4965_base_params = {
@@ -2318,21 +2152,18 @@ static struct iwl_base_params iwl4965_base_params = {
.pll_cfg_val = 0,
.set_l0s = true,
.use_bsm = true,
- .use_isr_legacy = true,
- .broken_powersave = true,
.led_compensation = 61,
.chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
- .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
+ .wd_timeout = IWL_DEF_WD_TIMEOUT,
.temperature_kelvin = true,
.max_event_log_size = 512,
- .tx_power_by_driver = true,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
};
-struct iwl_cfg iwl4965_agn_cfg = {
+struct iwl_cfg iwl4965_cfg = {
.name = "Intel(R) Wireless WiFi Link 4965AGN",
.fw_name_pre = IWL4965_FW_PRE,
.ucode_api_max = IWL4965_UCODE_API_MAX,
@@ -2343,8 +2174,9 @@ struct iwl_cfg iwl4965_agn_cfg = {
.eeprom_ver = EEPROM_4965_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
.ops = &iwl4965_ops,
- .mod_params = &iwlagn_mod_params,
+ .mod_params = &iwl4965_mod_params,
.base_params = &iwl4965_base_params,
+ .led_mode = IWL_LED_BLINK,
/*
* Force use of chains B and C for scan RX on 5 GHz band
* because the device has off-channel reception on chain A.
@@ -2354,4 +2186,3 @@ struct iwl_cfg iwl4965_agn_cfg = {
/* Module firmware */
MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
-
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.h b/drivers/net/wireless/iwlegacy/iwl-4965.h
new file mode 100644
index 000000000000..01f8163daf16
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.h
@@ -0,0 +1,282 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_4965_h__
+#define __iwl_4965_h__
+
+#include "iwl-dev.h"
+
+/* configuration for the _4965 devices */
+extern struct iwl_cfg iwl4965_cfg;
+
+extern struct iwl_mod_params iwl4965_mod_params;
+
+extern struct ieee80211_ops iwl4965_hw_ops;
+
+/* tx queue */
+void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
+ int sta_id, int tid, int freed);
+
+/* RXON */
+void iwl4965_set_rxon_chain(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+
+/* uCode */
+int iwl4965_verify_ucode(struct iwl_priv *priv);
+
+/* lib */
+void iwl4965_check_abort_status(struct iwl_priv *priv,
+ u8 frame_count, u32 status);
+
+void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+int iwl4965_hw_nic_init(struct iwl_priv *priv);
+int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display);
+
+/* rx */
+void iwl4965_rx_queue_restock(struct iwl_priv *priv);
+void iwl4965_rx_replenish(struct iwl_priv *priv);
+void iwl4965_rx_replenish_now(struct iwl_priv *priv);
+void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
+int iwl4965_rxq_stop(struct iwl_priv *priv);
+int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
+void iwl4965_rx_reply_rx(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+void iwl4965_rx_handle(struct iwl_priv *priv);
+
+/* tx */
+void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
+int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset, u8 pad);
+int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq);
+void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
+ struct ieee80211_tx_info *info);
+int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
+int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid);
+int iwl4965_txq_check_empty(struct iwl_priv *priv,
+ int sta_id, u8 tid, int txq_id);
+void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
+void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
+int iwl4965_txq_ctx_alloc(struct iwl_priv *priv);
+void iwl4965_txq_ctx_reset(struct iwl_priv *priv);
+void iwl4965_txq_ctx_stop(struct iwl_priv *priv);
+void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask);
+
+/*
+ * Acquire priv->lock before calling this function !
+ */
+void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index);
+/**
+ * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
+ * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
+ * @scd_retry: (1) Indicates queue will be used in aggregation mode
+ *
+ * NOTE: Acquire priv->lock before calling this function !
+ */
+void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ int tx_fifo_id, int scd_retry);
+
+static inline u32 iwl4965_tx_status_to_mac80211(u32 status)
+{
+ status &= TX_STATUS_MSK;
+
+ switch (status) {
+ case TX_STATUS_SUCCESS:
+ case TX_STATUS_DIRECT_DONE:
+ return IEEE80211_TX_STAT_ACK;
+ case TX_STATUS_FAIL_DEST_PS:
+ return IEEE80211_TX_STAT_TX_FILTERED;
+ default:
+ return 0;
+ }
+}
+
+static inline bool iwl4965_is_tx_success(u32 status)
+{
+ status &= TX_STATUS_MSK;
+ return (status == TX_STATUS_SUCCESS) ||
+ (status == TX_STATUS_DIRECT_DONE);
+}
+
+u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
+
+/* rx */
+void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+bool iwl4965_good_plcp_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+void iwl4965_rx_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+void iwl4965_reply_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+
+/* scan */
+int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
+
+/* station mgmt */
+int iwl4965_manage_ibss_station(struct iwl_priv *priv,
+ struct ieee80211_vif *vif, bool add);
+
+/* hcmd */
+int iwl4965_send_beacon_cmd(struct iwl_priv *priv);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+const char *iwl4965_get_tx_fail_reason(u32 status);
+#else
+static inline const char *
+iwl4965_get_tx_fail_reason(u32 status) { return ""; }
+#endif
+
+/* station management */
+int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+int iwl4965_add_bssid_station(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ const u8 *addr, u8 *sta_id_r);
+int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *key);
+int iwl4965_set_default_wep_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *key);
+int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+int iwl4965_set_dynamic_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *key, u8 sta_id);
+int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *key, u8 sta_id);
+void iwl4965_update_tkip_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
+int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv,
+ int sta_id, int tid);
+int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ int tid, u16 ssn);
+int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
+ int tid);
+void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv,
+ int sta_id, int cnt);
+int iwl4965_update_bcast_stations(struct iwl_priv *priv);
+
+/* rate */
+static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx)
+{
+ return BIT(ant_idx) << RATE_MCS_ANT_POS;
+}
+
+static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags)
+{
+ return le32_to_cpu(rate_n_flags) & 0xFF;
+}
+
+static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags)
+{
+ return cpu_to_le32(flags|(u32)rate);
+}
+
+/* eeprom */
+void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
+int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv);
+void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv);
+int iwl4965_eeprom_check_version(struct iwl_priv *priv);
+
+/* mac80211 handlers (for 4965) */
+void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+int iwl4965_mac_start(struct ieee80211_hw *hw);
+void iwl4965_mac_stop(struct ieee80211_hw *hw);
+void iwl4965_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast);
+int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key);
+int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size);
+int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch);
+
+#endif /* __iwl_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-commands.h b/drivers/net/wireless/iwlegacy/iwl-commands.h
new file mode 100644
index 000000000000..17a1d504348e
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-commands.h
@@ -0,0 +1,3405 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+/*
+ * Please use this file (iwl-commands.h) only for uCode API definitions.
+ * Please use iwl-xxxx-hw.h for hardware-related definitions.
+ * Please use iwl-dev.h for driver implementation definitions.
+ */
+
+#ifndef __iwl_legacy_commands_h__
+#define __iwl_legacy_commands_h__
+
+struct iwl_priv;
+
+/* uCode version contains 4 values: Major/Minor/API/Serial */
+#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
+#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
+#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
+#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
+
+
+/* Tx rates */
+#define IWL_CCK_RATES 4
+#define IWL_OFDM_RATES 8
+#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES)
+
+enum {
+ REPLY_ALIVE = 0x1,
+ REPLY_ERROR = 0x2,
+
+ /* RXON and QOS commands */
+ REPLY_RXON = 0x10,
+ REPLY_RXON_ASSOC = 0x11,
+ REPLY_QOS_PARAM = 0x13,
+ REPLY_RXON_TIMING = 0x14,
+
+ /* Multi-Station support */
+ REPLY_ADD_STA = 0x18,
+ REPLY_REMOVE_STA = 0x19,
+
+ /* Security */
+ REPLY_WEPKEY = 0x20,
+
+ /* RX, TX, LEDs */
+ REPLY_3945_RX = 0x1b, /* 3945 only */
+ REPLY_TX = 0x1c,
+ REPLY_RATE_SCALE = 0x47, /* 3945 only */
+ REPLY_LEDS_CMD = 0x48,
+ REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
+
+ /* 802.11h related */
+ REPLY_CHANNEL_SWITCH = 0x72,
+ CHANNEL_SWITCH_NOTIFICATION = 0x73,
+ REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74,
+ SPECTRUM_MEASURE_NOTIFICATION = 0x75,
+
+ /* Power Management */
+ POWER_TABLE_CMD = 0x77,
+ PM_SLEEP_NOTIFICATION = 0x7A,
+ PM_DEBUG_STATISTIC_NOTIFIC = 0x7B,
+
+ /* Scan commands and notifications */
+ REPLY_SCAN_CMD = 0x80,
+ REPLY_SCAN_ABORT_CMD = 0x81,
+ SCAN_START_NOTIFICATION = 0x82,
+ SCAN_RESULTS_NOTIFICATION = 0x83,
+ SCAN_COMPLETE_NOTIFICATION = 0x84,
+
+ /* IBSS/AP commands */
+ BEACON_NOTIFICATION = 0x90,
+ REPLY_TX_BEACON = 0x91,
+
+ /* Miscellaneous commands */
+ REPLY_TX_PWR_TABLE_CMD = 0x97,
+
+ /* Bluetooth device coexistence config command */
+ REPLY_BT_CONFIG = 0x9b,
+
+ /* Statistics */
+ REPLY_STATISTICS_CMD = 0x9c,
+ STATISTICS_NOTIFICATION = 0x9d,
+
+ /* RF-KILL commands and notifications */
+ CARD_STATE_NOTIFICATION = 0xa1,
+
+ /* Missed beacons notification */
+ MISSED_BEACONS_NOTIFICATION = 0xa2,
+
+ REPLY_CT_KILL_CONFIG_CMD = 0xa4,
+ SENSITIVITY_CMD = 0xa8,
+ REPLY_PHY_CALIBRATION_CMD = 0xb0,
+ REPLY_RX_PHY_CMD = 0xc0,
+ REPLY_RX_MPDU_CMD = 0xc1,
+ REPLY_RX = 0xc3,
+ REPLY_COMPRESSED_BA = 0xc5,
+
+ REPLY_MAX = 0xff
+};
+
+/******************************************************************************
+ * (0)
+ * Commonly used structures and definitions:
+ * Command header, rate_n_flags, txpower
+ *
+ *****************************************************************************/
+
+/* iwl_cmd_header flags value */
+#define IWL_CMD_FAILED_MSK 0x40
+
+#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
+#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
+#define SEQ_TO_INDEX(s) ((s) & 0xff)
+#define INDEX_TO_SEQ(i) ((i) & 0xff)
+#define SEQ_HUGE_FRAME cpu_to_le16(0x4000)
+#define SEQ_RX_FRAME cpu_to_le16(0x8000)
+
+/**
+ * struct iwl_cmd_header
+ *
+ * This header format appears in the beginning of each command sent from the
+ * driver, and each response/notification received from uCode.
+ */
+struct iwl_cmd_header {
+ u8 cmd; /* Command ID: REPLY_RXON, etc. */
+ u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
+ /*
+ * The driver sets up the sequence number to values of its choosing.
+ * uCode does not use this value, but passes it back to the driver
+ * when sending the response to each driver-originated command, so
+ * the driver can match the response to the command. Since the values
+ * don't get used by uCode, the driver may set up an arbitrary format.
+ *
+ * There is one exception: uCode sets bit 15 when it originates
+ * the response/notification, i.e. when the response/notification
+ * is not a direct response to a command sent by the driver. For
+ * example, uCode issues REPLY_3945_RX when it sends a received frame
+ * to the driver; it is not a direct response to any driver command.
+ *
+ * The Linux driver uses the following format:
+ *
+ * 0:7 tfd index - position within TX queue
+ * 8:12 TX queue id
+ * 13 reserved
+ * 14 huge - driver sets this to indicate command is in the
+ * 'huge' storage at the end of the command buffers
+ * 15 unsolicited RX or uCode-originated notification
+ */
+ __le16 sequence;
+
+ /* command or response/notification data follows immediately */
+ u8 data[0];
+} __packed;
+
+
+/**
+ * struct iwl3945_tx_power
+ *
+ * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH
+ *
+ * Each entry contains two values:
+ * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
+ * linear value that multiplies the output of the digital signal processor,
+ * before being sent to the analog radio.
+ * 2) Radio gain. This sets the analog gain of the radio Tx path.
+ * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
+ *
+ * Driver obtains values from struct iwl3945_tx_power power_gain_table[][].
+ */
+struct iwl3945_tx_power {
+ u8 tx_gain; /* gain for analog radio */
+ u8 dsp_atten; /* gain for DSP */
+} __packed;
+
+/**
+ * struct iwl3945_power_per_rate
+ *
+ * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ */
+struct iwl3945_power_per_rate {
+ u8 rate; /* plcp */
+ struct iwl3945_tx_power tpc;
+ u8 reserved;
+} __packed;
+
+/**
+ * iwl4965 rate_n_flags bit fields
+ *
+ * rate_n_flags format is used in following iwl4965 commands:
+ * REPLY_RX (response only)
+ * REPLY_RX_MPDU (response only)
+ * REPLY_TX (both command and response)
+ * REPLY_TX_LINK_QUALITY_CMD
+ *
+ * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
+ * 2-0: 0) 6 Mbps
+ * 1) 12 Mbps
+ * 2) 18 Mbps
+ * 3) 24 Mbps
+ * 4) 36 Mbps
+ * 5) 48 Mbps
+ * 6) 54 Mbps
+ * 7) 60 Mbps
+ *
+ * 4-3: 0) Single stream (SISO)
+ * 1) Dual stream (MIMO)
+ * 2) Triple stream (MIMO)
+ *
+ * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
+ *
+ * Legacy OFDM rate format for bits 7:0 (bit 8 must be "0", bit 9 "0"):
+ * 3-0: 0xD) 6 Mbps
+ * 0xF) 9 Mbps
+ * 0x5) 12 Mbps
+ * 0x7) 18 Mbps
+ * 0x9) 24 Mbps
+ * 0xB) 36 Mbps
+ * 0x1) 48 Mbps
+ * 0x3) 54 Mbps
+ *
+ * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"):
+ * 6-0: 10) 1 Mbps
+ * 20) 2 Mbps
+ * 55) 5.5 Mbps
+ * 110) 11 Mbps
+ */
+#define RATE_MCS_CODE_MSK 0x7
+#define RATE_MCS_SPATIAL_POS 3
+#define RATE_MCS_SPATIAL_MSK 0x18
+#define RATE_MCS_HT_DUP_POS 5
+#define RATE_MCS_HT_DUP_MSK 0x20
+
+/* Bit 8: (1) HT format, (0) legacy format in bits 7:0 */
+#define RATE_MCS_FLAGS_POS 8
+#define RATE_MCS_HT_POS 8
+#define RATE_MCS_HT_MSK 0x100
+
+/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */
+#define RATE_MCS_CCK_POS 9
+#define RATE_MCS_CCK_MSK 0x200
+
+/* Bit 10: (1) Use Green Field preamble */
+#define RATE_MCS_GF_POS 10
+#define RATE_MCS_GF_MSK 0x400
+
+/* Bit 11: (1) Use 40Mhz HT40 chnl width, (0) use 20 MHz legacy chnl width */
+#define RATE_MCS_HT40_POS 11
+#define RATE_MCS_HT40_MSK 0x800
+
+/* Bit 12: (1) Duplicate data on both 20MHz chnls. HT40 (bit 11) must be set. */
+#define RATE_MCS_DUP_POS 12
+#define RATE_MCS_DUP_MSK 0x1000
+
+/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
+#define RATE_MCS_SGI_POS 13
+#define RATE_MCS_SGI_MSK 0x2000
+
+/**
+ * rate_n_flags Tx antenna masks
+ * 4965 has 2 transmitters
+ * bit14:16
+ */
+#define RATE_MCS_ANT_POS 14
+#define RATE_MCS_ANT_A_MSK 0x04000
+#define RATE_MCS_ANT_B_MSK 0x08000
+#define RATE_MCS_ANT_C_MSK 0x10000
+#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | RATE_MCS_ANT_B_MSK)
+#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
+#define RATE_ANT_NUM 3
+
+#define POWER_TABLE_NUM_ENTRIES 33
+#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32
+#define POWER_TABLE_CCK_ENTRY 32
+
+#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24
+#define IWL_PWR_CCK_ENTRIES 2
+
+/**
+ * union iwl4965_tx_power_dual_stream
+ *
+ * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Use __le32 version (struct tx_power_dual_stream) when building command.
+ *
+ * Driver provides radio gain and DSP attenuation settings to device in pairs,
+ * one value for each transmitter chain. The first value is for transmitter A,
+ * second for transmitter B.
+ *
+ * For SISO bit rates, both values in a pair should be identical.
+ * For MIMO rates, one value may be different from the other,
+ * in order to balance the Tx output between the two transmitters.
+ *
+ * See more details in doc for TXPOWER in iwl-4965-hw.h.
+ */
+union iwl4965_tx_power_dual_stream {
+ struct {
+ u8 radio_tx_gain[2];
+ u8 dsp_predis_atten[2];
+ } s;
+ u32 dw;
+};
+
+/**
+ * struct tx_power_dual_stream
+ *
+ * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ *
+ * Same format as iwl_tx_power_dual_stream, but __le32
+ */
+struct tx_power_dual_stream {
+ __le32 dw;
+} __packed;
+
+/**
+ * struct iwl4965_tx_power_db
+ *
+ * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ */
+struct iwl4965_tx_power_db {
+ struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES];
+} __packed;
+
+/******************************************************************************
+ * (0a)
+ * Alive and Error Commands & Responses:
+ *
+ *****************************************************************************/
+
+#define UCODE_VALID_OK cpu_to_le32(0x1)
+#define INITIALIZE_SUBTYPE (9)
+
+/*
+ * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command)
+ *
+ * uCode issues this "initialize alive" notification once the initialization
+ * uCode image has completed its work, and is ready to load the runtime image.
+ * This is the *first* "alive" notification that the driver will receive after
+ * rebooting uCode; the "initialize" alive is indicated by subtype field == 9.
+ *
+ * See comments documenting "BSM" (bootstrap state machine).
+ *
+ * For 4965, this notification contains important calibration data for
+ * calculating txpower settings:
+ *
+ * 1) Power supply voltage indication. The voltage sensor outputs higher
+ * values for lower voltage, and vice verse.
+ *
+ * 2) Temperature measurement parameters, for each of two channel widths
+ * (20 MHz and 40 MHz) supported by the radios. Temperature sensing
+ * is done via one of the receiver chains, and channel width influences
+ * the results.
+ *
+ * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
+ * for each of 5 frequency ranges.
+ */
+struct iwl_init_alive_resp {
+ u8 ucode_minor;
+ u8 ucode_major;
+ __le16 reserved1;
+ u8 sw_rev[8];
+ u8 ver_type;
+ u8 ver_subtype; /* "9" for initialize alive */
+ __le16 reserved2;
+ __le32 log_event_table_ptr;
+ __le32 error_event_table_ptr;
+ __le32 timestamp;
+ __le32 is_valid;
+
+ /* calibration values from "initialize" uCode */
+ __le32 voltage; /* signed, higher value is lower voltage */
+ __le32 therm_r1[2]; /* signed, 1st for normal, 2nd for HT40 */
+ __le32 therm_r2[2]; /* signed */
+ __le32 therm_r3[2]; /* signed */
+ __le32 therm_r4[2]; /* signed */
+ __le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups,
+ * 2 Tx chains */
+} __packed;
+
+
+/**
+ * REPLY_ALIVE = 0x1 (response only, not a command)
+ *
+ * uCode issues this "alive" notification once the runtime image is ready
+ * to receive commands from the driver. This is the *second* "alive"
+ * notification that the driver will receive after rebooting uCode;
+ * this "alive" is indicated by subtype field != 9.
+ *
+ * See comments documenting "BSM" (bootstrap state machine).
+ *
+ * This response includes two pointers to structures within the device's
+ * data SRAM (access via HBUS_TARG_MEM_* regs) that are useful for debugging:
+ *
+ * 1) log_event_table_ptr indicates base of the event log. This traces
+ * a 256-entry history of uCode execution within a circular buffer.
+ * Its header format is:
+ *
+ * __le32 log_size; log capacity (in number of entries)
+ * __le32 type; (1) timestamp with each entry, (0) no timestamp
+ * __le32 wraps; # times uCode has wrapped to top of circular buffer
+ * __le32 write_index; next circular buffer entry that uCode would fill
+ *
+ * The header is followed by the circular buffer of log entries. Entries
+ * with timestamps have the following format:
+ *
+ * __le32 event_id; range 0 - 1500
+ * __le32 timestamp; low 32 bits of TSF (of network, if associated)
+ * __le32 data; event_id-specific data value
+ *
+ * Entries without timestamps contain only event_id and data.
+ *
+ *
+ * 2) error_event_table_ptr indicates base of the error log. This contains
+ * information about any uCode error that occurs. For 4965, the format
+ * of the error log is:
+ *
+ * __le32 valid; (nonzero) valid, (0) log is empty
+ * __le32 error_id; type of error
+ * __le32 pc; program counter
+ * __le32 blink1; branch link
+ * __le32 blink2; branch link
+ * __le32 ilink1; interrupt link
+ * __le32 ilink2; interrupt link
+ * __le32 data1; error-specific data
+ * __le32 data2; error-specific data
+ * __le32 line; source code line of error
+ * __le32 bcon_time; beacon timer
+ * __le32 tsf_low; network timestamp function timer
+ * __le32 tsf_hi; network timestamp function timer
+ * __le32 gp1; GP1 timer register
+ * __le32 gp2; GP2 timer register
+ * __le32 gp3; GP3 timer register
+ * __le32 ucode_ver; uCode version
+ * __le32 hw_ver; HW Silicon version
+ * __le32 brd_ver; HW board version
+ * __le32 log_pc; log program counter
+ * __le32 frame_ptr; frame pointer
+ * __le32 stack_ptr; stack pointer
+ * __le32 hcmd; last host command
+ * __le32 isr0; isr status register LMPM_NIC_ISR0: rxtx_flag
+ * __le32 isr1; isr status register LMPM_NIC_ISR1: host_flag
+ * __le32 isr2; isr status register LMPM_NIC_ISR2: enc_flag
+ * __le32 isr3; isr status register LMPM_NIC_ISR3: time_flag
+ * __le32 isr4; isr status register LMPM_NIC_ISR4: wico interrupt
+ * __le32 isr_pref; isr status register LMPM_NIC_PREF_STAT
+ * __le32 wait_event; wait event() caller address
+ * __le32 l2p_control; L2pControlField
+ * __le32 l2p_duration; L2pDurationField
+ * __le32 l2p_mhvalid; L2pMhValidBits
+ * __le32 l2p_addr_match; L2pAddrMatchStat
+ * __le32 lmpm_pmg_sel; indicate which clocks are turned on (LMPM_PMG_SEL)
+ * __le32 u_timestamp; indicate when the date and time of the compilation
+ * __le32 reserved;
+ *
+ * The Linux driver can print both logs to the system log when a uCode error
+ * occurs.
+ */
+struct iwl_alive_resp {
+ u8 ucode_minor;
+ u8 ucode_major;
+ __le16 reserved1;
+ u8 sw_rev[8];
+ u8 ver_type;
+ u8 ver_subtype; /* not "9" for runtime alive */
+ __le16 reserved2;
+ __le32 log_event_table_ptr; /* SRAM address for event log */
+ __le32 error_event_table_ptr; /* SRAM address for error log */
+ __le32 timestamp;
+ __le32 is_valid;
+} __packed;
+
+/*
+ * REPLY_ERROR = 0x2 (response only, not a command)
+ */
+struct iwl_error_resp {
+ __le32 error_type;
+ u8 cmd_id;
+ u8 reserved1;
+ __le16 bad_cmd_seq_num;
+ __le32 error_info;
+ __le64 timestamp;
+} __packed;
+
+/******************************************************************************
+ * (1)
+ * RXON Commands & Responses:
+ *
+ *****************************************************************************/
+
+/*
+ * Rx config defines & structure
+ */
+/* rx_config device types */
+enum {
+ RXON_DEV_TYPE_AP = 1,
+ RXON_DEV_TYPE_ESS = 3,
+ RXON_DEV_TYPE_IBSS = 4,
+ RXON_DEV_TYPE_SNIFFER = 6,
+};
+
+
+#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0)
+#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0)
+#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1)
+#define RXON_RX_CHAIN_VALID_POS (1)
+#define RXON_RX_CHAIN_FORCE_SEL_MSK cpu_to_le16(0x7 << 4)
+#define RXON_RX_CHAIN_FORCE_SEL_POS (4)
+#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK cpu_to_le16(0x7 << 7)
+#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
+#define RXON_RX_CHAIN_CNT_MSK cpu_to_le16(0x3 << 10)
+#define RXON_RX_CHAIN_CNT_POS (10)
+#define RXON_RX_CHAIN_MIMO_CNT_MSK cpu_to_le16(0x3 << 12)
+#define RXON_RX_CHAIN_MIMO_CNT_POS (12)
+#define RXON_RX_CHAIN_MIMO_FORCE_MSK cpu_to_le16(0x1 << 14)
+#define RXON_RX_CHAIN_MIMO_FORCE_POS (14)
+
+/* rx_config flags */
+/* band & modulation selection */
+#define RXON_FLG_BAND_24G_MSK cpu_to_le32(1 << 0)
+#define RXON_FLG_CCK_MSK cpu_to_le32(1 << 1)
+/* auto detection enable */
+#define RXON_FLG_AUTO_DETECT_MSK cpu_to_le32(1 << 2)
+/* TGg protection when tx */
+#define RXON_FLG_TGG_PROTECT_MSK cpu_to_le32(1 << 3)
+/* cck short slot & preamble */
+#define RXON_FLG_SHORT_SLOT_MSK cpu_to_le32(1 << 4)
+#define RXON_FLG_SHORT_PREAMBLE_MSK cpu_to_le32(1 << 5)
+/* antenna selection */
+#define RXON_FLG_DIS_DIV_MSK cpu_to_le32(1 << 7)
+#define RXON_FLG_ANT_SEL_MSK cpu_to_le32(0x0f00)
+#define RXON_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
+#define RXON_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
+/* radar detection enable */
+#define RXON_FLG_RADAR_DETECT_MSK cpu_to_le32(1 << 12)
+#define RXON_FLG_TGJ_NARROW_BAND_MSK cpu_to_le32(1 << 13)
+/* rx response to host with 8-byte TSF
+* (according to ON_AIR deassertion) */
+#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15)
+
+
+/* HT flags */
+#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22)
+#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22)
+
+#define RXON_FLG_HT_OPERATING_MODE_POS (23)
+
+#define RXON_FLG_HT_PROT_MSK cpu_to_le32(0x1 << 23)
+#define RXON_FLG_HT40_PROT_MSK cpu_to_le32(0x2 << 23)
+
+#define RXON_FLG_CHANNEL_MODE_POS (25)
+#define RXON_FLG_CHANNEL_MODE_MSK cpu_to_le32(0x3 << 25)
+
+/* channel mode */
+enum {
+ CHANNEL_MODE_LEGACY = 0,
+ CHANNEL_MODE_PURE_40 = 1,
+ CHANNEL_MODE_MIXED = 2,
+ CHANNEL_MODE_RESERVED = 3,
+};
+#define RXON_FLG_CHANNEL_MODE_LEGACY \
+ cpu_to_le32(CHANNEL_MODE_LEGACY << RXON_FLG_CHANNEL_MODE_POS)
+#define RXON_FLG_CHANNEL_MODE_PURE_40 \
+ cpu_to_le32(CHANNEL_MODE_PURE_40 << RXON_FLG_CHANNEL_MODE_POS)
+#define RXON_FLG_CHANNEL_MODE_MIXED \
+ cpu_to_le32(CHANNEL_MODE_MIXED << RXON_FLG_CHANNEL_MODE_POS)
+
+/* CTS to self (if spec allows) flag */
+#define RXON_FLG_SELF_CTS_EN cpu_to_le32(0x1<<30)
+
+/* rx_config filter flags */
+/* accept all data frames */
+#define RXON_FILTER_PROMISC_MSK cpu_to_le32(1 << 0)
+/* pass control & management to host */
+#define RXON_FILTER_CTL2HOST_MSK cpu_to_le32(1 << 1)
+/* accept multi-cast */
+#define RXON_FILTER_ACCEPT_GRP_MSK cpu_to_le32(1 << 2)
+/* don't decrypt uni-cast frames */
+#define RXON_FILTER_DIS_DECRYPT_MSK cpu_to_le32(1 << 3)
+/* don't decrypt multi-cast frames */
+#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4)
+/* STA is associated */
+#define RXON_FILTER_ASSOC_MSK cpu_to_le32(1 << 5)
+/* transfer to host non bssid beacons in associated state */
+#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6)
+
+/**
+ * REPLY_RXON = 0x10 (command, has simple generic response)
+ *
+ * RXON tunes the radio tuner to a service channel, and sets up a number
+ * of parameters that are used primarily for Rx, but also for Tx operations.
+ *
+ * NOTE: When tuning to a new channel, driver must set the
+ * RXON_FILTER_ASSOC_MSK to 0. This will clear station-dependent
+ * info within the device, including the station tables, tx retry
+ * rate tables, and txpower tables. Driver must build a new station
+ * table and txpower table before transmitting anything on the RXON
+ * channel.
+ *
+ * NOTE: All RXONs wipe clean the internal txpower table. Driver must
+ * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10),
+ * regardless of whether RXON_FILTER_ASSOC_MSK is set.
+ */
+
+struct iwl3945_rxon_cmd {
+ u8 node_addr[6];
+ __le16 reserved1;
+ u8 bssid_addr[6];
+ __le16 reserved2;
+ u8 wlap_bssid_addr[6];
+ __le16 reserved3;
+ u8 dev_type;
+ u8 air_propagation;
+ __le16 reserved4;
+ u8 ofdm_basic_rates;
+ u8 cck_basic_rates;
+ __le16 assoc_id;
+ __le32 flags;
+ __le32 filter_flags;
+ __le16 channel;
+ __le16 reserved5;
+} __packed;
+
+struct iwl4965_rxon_cmd {
+ u8 node_addr[6];
+ __le16 reserved1;
+ u8 bssid_addr[6];
+ __le16 reserved2;
+ u8 wlap_bssid_addr[6];
+ __le16 reserved3;
+ u8 dev_type;
+ u8 air_propagation;
+ __le16 rx_chain;
+ u8 ofdm_basic_rates;
+ u8 cck_basic_rates;
+ __le16 assoc_id;
+ __le32 flags;
+ __le32 filter_flags;
+ __le16 channel;
+ u8 ofdm_ht_single_stream_basic_rates;
+ u8 ofdm_ht_dual_stream_basic_rates;
+} __packed;
+
+/* Create a common rxon cmd which will be typecast into the 3945 or 4965
+ * specific rxon cmd, depending on where it is called from.
+ */
+struct iwl_legacy_rxon_cmd {
+ u8 node_addr[6];
+ __le16 reserved1;
+ u8 bssid_addr[6];
+ __le16 reserved2;
+ u8 wlap_bssid_addr[6];
+ __le16 reserved3;
+ u8 dev_type;
+ u8 air_propagation;
+ __le16 rx_chain;
+ u8 ofdm_basic_rates;
+ u8 cck_basic_rates;
+ __le16 assoc_id;
+ __le32 flags;
+ __le32 filter_flags;
+ __le16 channel;
+ u8 ofdm_ht_single_stream_basic_rates;
+ u8 ofdm_ht_dual_stream_basic_rates;
+ u8 reserved4;
+ u8 reserved5;
+} __packed;
+
+
+/*
+ * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
+ */
+struct iwl3945_rxon_assoc_cmd {
+ __le32 flags;
+ __le32 filter_flags;
+ u8 ofdm_basic_rates;
+ u8 cck_basic_rates;
+ __le16 reserved;
+} __packed;
+
+struct iwl4965_rxon_assoc_cmd {
+ __le32 flags;
+ __le32 filter_flags;
+ u8 ofdm_basic_rates;
+ u8 cck_basic_rates;
+ u8 ofdm_ht_single_stream_basic_rates;
+ u8 ofdm_ht_dual_stream_basic_rates;
+ __le16 rx_chain_select_flags;
+ __le16 reserved;
+} __packed;
+
+#define IWL_CONN_MAX_LISTEN_INTERVAL 10
+#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
+#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
+
+/*
+ * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
+ */
+struct iwl_rxon_time_cmd {
+ __le64 timestamp;
+ __le16 beacon_interval;
+ __le16 atim_window;
+ __le32 beacon_init_val;
+ __le16 listen_interval;
+ u8 dtim_period;
+ u8 delta_cp_bss_tbtts;
+} __packed;
+
+/*
+ * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
+ */
+struct iwl3945_channel_switch_cmd {
+ u8 band;
+ u8 expect_beacon;
+ __le16 channel;
+ __le32 rxon_flags;
+ __le32 rxon_filter_flags;
+ __le32 switch_time;
+ struct iwl3945_power_per_rate power[IWL_MAX_RATES];
+} __packed;
+
+struct iwl4965_channel_switch_cmd {
+ u8 band;
+ u8 expect_beacon;
+ __le16 channel;
+ __le32 rxon_flags;
+ __le32 rxon_filter_flags;
+ __le32 switch_time;
+ struct iwl4965_tx_power_db tx_power;
+} __packed;
+
+/*
+ * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
+ */
+struct iwl_csa_notification {
+ __le16 band;
+ __le16 channel;
+ __le32 status; /* 0 - OK, 1 - fail */
+} __packed;
+
+/******************************************************************************
+ * (2)
+ * Quality-of-Service (QOS) Commands & Responses:
+ *
+ *****************************************************************************/
+
+/**
+ * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM
+ * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd
+ *
+ * @cw_min: Contention window, start value in numbers of slots.
+ * Should be a power-of-2, minus 1. Device's default is 0x0f.
+ * @cw_max: Contention window, max value in numbers of slots.
+ * Should be a power-of-2, minus 1. Device's default is 0x3f.
+ * @aifsn: Number of slots in Arbitration Interframe Space (before
+ * performing random backoff timing prior to Tx). Device default 1.
+ * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
+ *
+ * Device will automatically increase contention window by (2*CW) + 1 for each
+ * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
+ * value, to cap the CW value.
+ */
+struct iwl_ac_qos {
+ __le16 cw_min;
+ __le16 cw_max;
+ u8 aifsn;
+ u8 reserved1;
+ __le16 edca_txop;
+} __packed;
+
+/* QoS flags defines */
+#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01)
+#define QOS_PARAM_FLG_TGN_MSK cpu_to_le32(0x02)
+#define QOS_PARAM_FLG_TXOP_TYPE_MSK cpu_to_le32(0x10)
+
+/* Number of Access Categories (AC) (EDCA), queues 0..3 */
+#define AC_NUM 4
+
+/*
+ * REPLY_QOS_PARAM = 0x13 (command, has simple generic response)
+ *
+ * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
+ * 0: Background, 1: Best Effort, 2: Video, 3: Voice.
+ */
+struct iwl_qosparam_cmd {
+ __le32 qos_flags;
+ struct iwl_ac_qos ac[AC_NUM];
+} __packed;
+
+/******************************************************************************
+ * (3)
+ * Add/Modify Stations Commands & Responses:
+ *
+ *****************************************************************************/
+/*
+ * Multi station support
+ */
+
+/* Special, dedicated locations within device's station table */
+#define IWL_AP_ID 0
+#define IWL_STA_ID 2
+#define IWL3945_BROADCAST_ID 24
+#define IWL3945_STATION_COUNT 25
+#define IWL4965_BROADCAST_ID 31
+#define IWL4965_STATION_COUNT 32
+
+#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/
+#define IWL_INVALID_STATION 255
+
+#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
+#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
+#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17)
+#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18)
+#define STA_FLG_MAX_AGG_SIZE_POS (19)
+#define STA_FLG_MAX_AGG_SIZE_MSK cpu_to_le32(3 << 19)
+#define STA_FLG_HT40_EN_MSK cpu_to_le32(1 << 21)
+#define STA_FLG_MIMO_DIS_MSK cpu_to_le32(1 << 22)
+#define STA_FLG_AGG_MPDU_DENSITY_POS (23)
+#define STA_FLG_AGG_MPDU_DENSITY_MSK cpu_to_le32(7 << 23)
+
+/* Use in mode field. 1: modify existing entry, 0: add new station entry */
+#define STA_CONTROL_MODIFY_MSK 0x01
+
+/* key flags __le16*/
+#define STA_KEY_FLG_ENCRYPT_MSK cpu_to_le16(0x0007)
+#define STA_KEY_FLG_NO_ENC cpu_to_le16(0x0000)
+#define STA_KEY_FLG_WEP cpu_to_le16(0x0001)
+#define STA_KEY_FLG_CCMP cpu_to_le16(0x0002)
+#define STA_KEY_FLG_TKIP cpu_to_le16(0x0003)
+
+#define STA_KEY_FLG_KEYID_POS 8
+#define STA_KEY_FLG_INVALID cpu_to_le16(0x0800)
+/* wep key is either from global key (0) or from station info array (1) */
+#define STA_KEY_FLG_MAP_KEY_MSK cpu_to_le16(0x0008)
+
+/* wep key in STA: 5-bytes (0) or 13-bytes (1) */
+#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000)
+#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000)
+#define STA_KEY_MAX_NUM 8
+
+/* Flags indicate whether to modify vs. don't change various station params */
+#define STA_MODIFY_KEY_MASK 0x01
+#define STA_MODIFY_TID_DISABLE_TX 0x02
+#define STA_MODIFY_TX_RATE_MSK 0x04
+#define STA_MODIFY_ADDBA_TID_MSK 0x08
+#define STA_MODIFY_DELBA_TID_MSK 0x10
+#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20
+
+/* Receiver address (actually, Rx station's index into station table),
+ * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
+#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
+
+struct iwl4965_keyinfo {
+ __le16 key_flags;
+ u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
+ u8 reserved1;
+ __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */
+ u8 key_offset;
+ u8 reserved2;
+ u8 key[16]; /* 16-byte unicast decryption key */
+} __packed;
+
+/**
+ * struct sta_id_modify
+ * @addr[ETH_ALEN]: station's MAC address
+ * @sta_id: index of station in uCode's station table
+ * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
+ *
+ * Driver selects unused table index when adding new station,
+ * or the index to a pre-existing station entry when modifying that station.
+ * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP).
+ *
+ * modify_mask flags select which parameters to modify vs. leave alone.
+ */
+struct sta_id_modify {
+ u8 addr[ETH_ALEN];
+ __le16 reserved1;
+ u8 sta_id;
+ u8 modify_mask;
+ __le16 reserved2;
+} __packed;
+
+/*
+ * REPLY_ADD_STA = 0x18 (command)
+ *
+ * The device contains an internal table of per-station information,
+ * with info on security keys, aggregation parameters, and Tx rates for
+ * initial Tx attempt and any retries (4965 devices uses
+ * REPLY_TX_LINK_QUALITY_CMD,
+ * 3945 uses REPLY_RATE_SCALE to set up rate tables).
+ *
+ * REPLY_ADD_STA sets up the table entry for one station, either creating
+ * a new entry, or modifying a pre-existing one.
+ *
+ * NOTE: RXON command (without "associated" bit set) wipes the station table
+ * clean. Moving into RF_KILL state does this also. Driver must set up
+ * new station table before transmitting anything on the RXON channel
+ * (except active scans or active measurements; those commands carry
+ * their own txpower/rate setup data).
+ *
+ * When getting started on a new channel, driver must set up the
+ * IWL_BROADCAST_ID entry (last entry in the table). For a client
+ * station in a BSS, once an AP is selected, driver sets up the AP STA
+ * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP
+ * are all that are needed for a BSS client station. If the device is
+ * used as AP, or in an IBSS network, driver must set up station table
+ * entries for all STAs in network, starting with index IWL_STA_ID.
+ */
+
+struct iwl3945_addsta_cmd {
+ u8 mode; /* 1: modify existing, 0: add new station */
+ u8 reserved[3];
+ struct sta_id_modify sta;
+ struct iwl4965_keyinfo key;
+ __le32 station_flags; /* STA_FLG_* */
+ __le32 station_flags_msk; /* STA_FLG_* */
+
+ /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
+ * corresponding to bit (e.g. bit 5 controls TID 5).
+ * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
+ __le16 tid_disable_tx;
+
+ __le16 rate_n_flags;
+
+ /* TID for which to add block-ack support.
+ * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+ u8 add_immediate_ba_tid;
+
+ /* TID for which to remove block-ack support.
+ * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
+ u8 remove_immediate_ba_tid;
+
+ /* Starting Sequence Number for added block-ack support.
+ * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+ __le16 add_immediate_ba_ssn;
+} __packed;
+
+struct iwl4965_addsta_cmd {
+ u8 mode; /* 1: modify existing, 0: add new station */
+ u8 reserved[3];
+ struct sta_id_modify sta;
+ struct iwl4965_keyinfo key;
+ __le32 station_flags; /* STA_FLG_* */
+ __le32 station_flags_msk; /* STA_FLG_* */
+
+ /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
+ * corresponding to bit (e.g. bit 5 controls TID 5).
+ * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
+ __le16 tid_disable_tx;
+
+ __le16 reserved1;
+
+ /* TID for which to add block-ack support.
+ * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+ u8 add_immediate_ba_tid;
+
+ /* TID for which to remove block-ack support.
+ * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
+ u8 remove_immediate_ba_tid;
+
+ /* Starting Sequence Number for added block-ack support.
+ * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+ __le16 add_immediate_ba_ssn;
+
+ /*
+ * Number of packets OK to transmit to station even though
+ * it is asleep -- used to synchronise PS-poll and u-APSD
+ * responses while ucode keeps track of STA sleep state.
+ */
+ __le16 sleep_tx_count;
+
+ __le16 reserved2;
+} __packed;
+
+/* Wrapper struct for 3945 and 4965 addsta_cmd structures */
+struct iwl_legacy_addsta_cmd {
+ u8 mode; /* 1: modify existing, 0: add new station */
+ u8 reserved[3];
+ struct sta_id_modify sta;
+ struct iwl4965_keyinfo key;
+ __le32 station_flags; /* STA_FLG_* */
+ __le32 station_flags_msk; /* STA_FLG_* */
+
+ /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
+ * corresponding to bit (e.g. bit 5 controls TID 5).
+ * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
+ __le16 tid_disable_tx;
+
+ __le16 rate_n_flags; /* 3945 only */
+
+ /* TID for which to add block-ack support.
+ * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+ u8 add_immediate_ba_tid;
+
+ /* TID for which to remove block-ack support.
+ * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
+ u8 remove_immediate_ba_tid;
+
+ /* Starting Sequence Number for added block-ack support.
+ * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+ __le16 add_immediate_ba_ssn;
+
+ /*
+ * Number of packets OK to transmit to station even though
+ * it is asleep -- used to synchronise PS-poll and u-APSD
+ * responses while ucode keeps track of STA sleep state.
+ */
+ __le16 sleep_tx_count;
+
+ __le16 reserved2;
+} __packed;
+
+
+#define ADD_STA_SUCCESS_MSK 0x1
+#define ADD_STA_NO_ROOM_IN_TABLE 0x2
+#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4
+#define ADD_STA_MODIFY_NON_EXIST_STA 0x8
+/*
+ * REPLY_ADD_STA = 0x18 (response)
+ */
+struct iwl_add_sta_resp {
+ u8 status; /* ADD_STA_* */
+} __packed;
+
+#define REM_STA_SUCCESS_MSK 0x1
+/*
+ * REPLY_REM_STA = 0x19 (response)
+ */
+struct iwl_rem_sta_resp {
+ u8 status;
+} __packed;
+
+/*
+ * REPLY_REM_STA = 0x19 (command)
+ */
+struct iwl_rem_sta_cmd {
+ u8 num_sta; /* number of removed stations */
+ u8 reserved[3];
+ u8 addr[ETH_ALEN]; /* MAC addr of the first station */
+ u8 reserved2[2];
+} __packed;
+
+#define IWL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0))
+#define IWL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1))
+#define IWL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2))
+#define IWL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3))
+#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
+
+#define IWL_DROP_SINGLE 0
+#define IWL_DROP_SELECTED 1
+#define IWL_DROP_ALL 2
+
+/*
+ * REPLY_WEP_KEY = 0x20
+ */
+struct iwl_wep_key {
+ u8 key_index;
+ u8 key_offset;
+ u8 reserved1[2];
+ u8 key_size;
+ u8 reserved2[3];
+ u8 key[16];
+} __packed;
+
+struct iwl_wep_cmd {
+ u8 num_keys;
+ u8 global_key_type;
+ u8 flags;
+ u8 reserved;
+ struct iwl_wep_key key[0];
+} __packed;
+
+#define WEP_KEY_WEP_TYPE 1
+#define WEP_KEYS_MAX 4
+#define WEP_INVALID_OFFSET 0xff
+#define WEP_KEY_LEN_64 5
+#define WEP_KEY_LEN_128 13
+
+/******************************************************************************
+ * (4)
+ * Rx Responses:
+ *
+ *****************************************************************************/
+
+#define RX_RES_STATUS_NO_CRC32_ERROR cpu_to_le32(1 << 0)
+#define RX_RES_STATUS_NO_RXE_OVERFLOW cpu_to_le32(1 << 1)
+
+#define RX_RES_PHY_FLAGS_BAND_24_MSK cpu_to_le16(1 << 0)
+#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1)
+#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2)
+#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3)
+#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0
+#define RX_RES_PHY_FLAGS_ANTENNA_POS 4
+
+#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
+#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
+#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8)
+#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8)
+#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8)
+#define RX_RES_STATUS_SEC_TYPE_ERR (0x7 << 8)
+
+#define RX_RES_STATUS_STATION_FOUND (1<<6)
+#define RX_RES_STATUS_NO_STATION_INFO_MISMATCH (1<<7)
+
+#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11)
+#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11)
+#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11)
+#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11)
+#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11)
+
+#define RX_MPDU_RES_STATUS_ICV_OK (0x20)
+#define RX_MPDU_RES_STATUS_MIC_OK (0x40)
+#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
+#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
+
+
+struct iwl3945_rx_frame_stats {
+ u8 phy_count;
+ u8 id;
+ u8 rssi;
+ u8 agc;
+ __le16 sig_avg;
+ __le16 noise_diff;
+ u8 payload[0];
+} __packed;
+
+struct iwl3945_rx_frame_hdr {
+ __le16 channel;
+ __le16 phy_flags;
+ u8 reserved1;
+ u8 rate;
+ __le16 len;
+ u8 payload[0];
+} __packed;
+
+struct iwl3945_rx_frame_end {
+ __le32 status;
+ __le64 timestamp;
+ __le32 beacon_timestamp;
+} __packed;
+
+/*
+ * REPLY_3945_RX = 0x1b (response only, not a command)
+ *
+ * NOTE: DO NOT dereference from casts to this structure
+ * It is provided only for calculating minimum data set size.
+ * The actual offsets of the hdr and end are dynamic based on
+ * stats.phy_count
+ */
+struct iwl3945_rx_frame {
+ struct iwl3945_rx_frame_stats stats;
+ struct iwl3945_rx_frame_hdr hdr;
+ struct iwl3945_rx_frame_end end;
+} __packed;
+
+#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame))
+
+/* Fixed (non-configurable) rx data from phy */
+
+#define IWL49_RX_RES_PHY_CNT 14
+#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
+#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
+#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
+#define IWL49_AGC_DB_POS (7)
+struct iwl4965_rx_non_cfg_phy {
+ __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
+ __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
+ u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
+ u8 pad[0];
+} __packed;
+
+
+/*
+ * REPLY_RX = 0xc3 (response only, not a command)
+ * Used only for legacy (non 11n) frames.
+ */
+struct iwl_rx_phy_res {
+ u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
+ u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
+ u8 stat_id; /* configurable DSP phy data set ID */
+ u8 reserved1;
+ __le64 timestamp; /* TSF at on air rise */
+ __le32 beacon_time_stamp; /* beacon at on-air rise */
+ __le16 phy_flags; /* general phy flags: band, modulation, ... */
+ __le16 channel; /* channel number */
+ u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
+ __le32 rate_n_flags; /* RATE_MCS_* */
+ __le16 byte_count; /* frame's byte-count */
+ __le16 frame_time; /* frame's time on the air */
+} __packed;
+
+struct iwl_rx_mpdu_res_start {
+ __le16 byte_count;
+ __le16 reserved;
+} __packed;
+
+
+/******************************************************************************
+ * (5)
+ * Tx Commands & Responses:
+ *
+ * Driver must place each REPLY_TX command into one of the prioritized Tx
+ * queues in host DRAM, shared between driver and device (see comments for
+ * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode
+ * are preparing to transmit, the device pulls the Tx command over the PCI
+ * bus via one of the device's Tx DMA channels, to fill an internal FIFO
+ * from which data will be transmitted.
+ *
+ * uCode handles all timing and protocol related to control frames
+ * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler
+ * handle reception of block-acks; uCode updates the host driver via
+ * REPLY_COMPRESSED_BA.
+ *
+ * uCode handles retrying Tx when an ACK is expected but not received.
+ * This includes trying lower data rates than the one requested in the Tx
+ * command, as set up by the REPLY_RATE_SCALE (for 3945) or
+ * REPLY_TX_LINK_QUALITY_CMD (4965).
+ *
+ * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
+ * This command must be executed after every RXON command, before Tx can occur.
+ *****************************************************************************/
+
+/* REPLY_TX Tx flags field */
+
+/*
+ * 1: Use Request-To-Send protocol before this frame.
+ * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK.
+ */
+#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
+
+/*
+ * 1: Transmit Clear-To-Send to self before this frame.
+ * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
+ * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK.
+ */
+#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
+
+/* 1: Expect ACK from receiving station
+ * 0: Don't expect ACK (MAC header's duration field s/b 0)
+ * Set this for unicast frames, but not broadcast/multicast. */
+#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
+
+/* For 4965 devices:
+ * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
+ * Tx command's initial_rate_index indicates first rate to try;
+ * uCode walks through table for additional Tx attempts.
+ * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
+ * This rate will be used for all Tx attempts; it will not be scaled. */
+#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4)
+
+/* 1: Expect immediate block-ack.
+ * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */
+#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6)
+
+/*
+ * 1: Frame requires full Tx-Op protection.
+ * Set this if either RTS or CTS Tx Flag gets set.
+ */
+#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
+
+/* Tx antenna selection field; used only for 3945, reserved (0) for 4965 devices.
+ * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
+#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
+#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
+#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
+
+/* 1: uCode overrides sequence control field in MAC header.
+ * 0: Driver provides sequence control field in MAC header.
+ * Set this for management frames, non-QOS data frames, non-unicast frames,
+ * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */
+#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
+
+/* 1: This frame is non-last MPDU; more fragments are coming.
+ * 0: Last fragment, or not using fragmentation. */
+#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14)
+
+/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame.
+ * 0: No TSF required in outgoing frame.
+ * Set this for transmitting beacons and probe responses. */
+#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16)
+
+/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword
+ * alignment of frame's payload data field.
+ * 0: No pad
+ * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4
+ * field (but not both). Driver must align frame data (i.e. data following
+ * MAC header) to DWORD boundary. */
+#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20)
+
+/* accelerate aggregation support
+ * 0 - no CCMP encryption; 1 - CCMP encryption */
+#define TX_CMD_FLG_AGG_CCMP_MSK cpu_to_le32(1 << 22)
+
+/* HCCA-AP - disable duration overwriting. */
+#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
+
+
+/*
+ * TX command security control
+ */
+#define TX_CMD_SEC_WEP 0x01
+#define TX_CMD_SEC_CCM 0x02
+#define TX_CMD_SEC_TKIP 0x03
+#define TX_CMD_SEC_MSK 0x03
+#define TX_CMD_SEC_SHIFT 6
+#define TX_CMD_SEC_KEY128 0x08
+
+/*
+ * security overhead sizes
+ */
+#define WEP_IV_LEN 4
+#define WEP_ICV_LEN 4
+#define CCMP_MIC_LEN 8
+#define TKIP_ICV_LEN 4
+
+/*
+ * REPLY_TX = 0x1c (command)
+ */
+
+struct iwl3945_tx_cmd {
+ /*
+ * MPDU byte count:
+ * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
+ * + 8 byte IV for CCM or TKIP (not used for WEP)
+ * + Data payload
+ * + 8-byte MIC (not used for CCM/WEP)
+ * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
+ * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
+ * Range: 14-2342 bytes.
+ */
+ __le16 len;
+
+ /*
+ * MPDU or MSDU byte count for next frame.
+ * Used for fragmentation and bursting, but not 11n aggregation.
+ * Same as "len", but for next frame. Set to 0 if not applicable.
+ */
+ __le16 next_frame_len;
+
+ __le32 tx_flags; /* TX_CMD_FLG_* */
+
+ u8 rate;
+
+ /* Index of recipient station in uCode's station table */
+ u8 sta_id;
+ u8 tid_tspec;
+ u8 sec_ctl;
+ u8 key[16];
+ union {
+ u8 byte[8];
+ __le16 word[4];
+ __le32 dw[2];
+ } tkip_mic;
+ __le32 next_frame_info;
+ union {
+ __le32 life_time;
+ __le32 attempt;
+ } stop_time;
+ u8 supp_rates[2];
+ u8 rts_retry_limit; /*byte 50 */
+ u8 data_retry_limit; /*byte 51 */
+ union {
+ __le16 pm_frame_timeout;
+ __le16 attempt_duration;
+ } timeout;
+
+ /*
+ * Duration of EDCA burst Tx Opportunity, in 32-usec units.
+ * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
+ */
+ __le16 driver_txop;
+
+ /*
+ * MAC header goes here, followed by 2 bytes padding if MAC header
+ * length is 26 or 30 bytes, followed by payload data
+ */
+ u8 payload[0];
+ struct ieee80211_hdr hdr[0];
+} __packed;
+
+/*
+ * REPLY_TX = 0x1c (response)
+ */
+struct iwl3945_tx_resp {
+ u8 failure_rts;
+ u8 failure_frame;
+ u8 bt_kill_count;
+ u8 rate;
+ __le32 wireless_media_time;
+ __le32 status; /* TX status */
+} __packed;
+
+
+/*
+ * 4965 uCode updates these Tx attempt count values in host DRAM.
+ * Used for managing Tx retries when expecting block-acks.
+ * Driver should set these fields to 0.
+ */
+struct iwl_dram_scratch {
+ u8 try_cnt; /* Tx attempts */
+ u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */
+ __le16 reserved;
+} __packed;
+
+struct iwl_tx_cmd {
+ /*
+ * MPDU byte count:
+ * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
+ * + 8 byte IV for CCM or TKIP (not used for WEP)
+ * + Data payload
+ * + 8-byte MIC (not used for CCM/WEP)
+ * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
+ * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
+ * Range: 14-2342 bytes.
+ */
+ __le16 len;
+
+ /*
+ * MPDU or MSDU byte count for next frame.
+ * Used for fragmentation and bursting, but not 11n aggregation.
+ * Same as "len", but for next frame. Set to 0 if not applicable.
+ */
+ __le16 next_frame_len;
+
+ __le32 tx_flags; /* TX_CMD_FLG_* */
+
+ /* uCode may modify this field of the Tx command (in host DRAM!).
+ * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
+ struct iwl_dram_scratch scratch;
+
+ /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
+ __le32 rate_n_flags; /* RATE_MCS_* */
+
+ /* Index of destination station in uCode's station table */
+ u8 sta_id;
+
+ /* Type of security encryption: CCM or TKIP */
+ u8 sec_ctl; /* TX_CMD_SEC_* */
+
+ /*
+ * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
+ * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
+ * data frames, this field may be used to selectively reduce initial
+ * rate (via non-0 value) for special frames (e.g. management), while
+ * still supporting rate scaling for all frames.
+ */
+ u8 initial_rate_index;
+ u8 reserved;
+ u8 key[16];
+ __le16 next_frame_flags;
+ __le16 reserved2;
+ union {
+ __le32 life_time;
+ __le32 attempt;
+ } stop_time;
+
+ /* Host DRAM physical address pointer to "scratch" in this command.
+ * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */
+ __le32 dram_lsb_ptr;
+ u8 dram_msb_ptr;
+
+ u8 rts_retry_limit; /*byte 50 */
+ u8 data_retry_limit; /*byte 51 */
+ u8 tid_tspec;
+ union {
+ __le16 pm_frame_timeout;
+ __le16 attempt_duration;
+ } timeout;
+
+ /*
+ * Duration of EDCA burst Tx Opportunity, in 32-usec units.
+ * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
+ */
+ __le16 driver_txop;
+
+ /*
+ * MAC header goes here, followed by 2 bytes padding if MAC header
+ * length is 26 or 30 bytes, followed by payload data
+ */
+ u8 payload[0];
+ struct ieee80211_hdr hdr[0];
+} __packed;
+
+/* TX command response is sent after *3945* transmission attempts.
+ *
+ * NOTES:
+ *
+ * TX_STATUS_FAIL_NEXT_FRAG
+ *
+ * If the fragment flag in the MAC header for the frame being transmitted
+ * is set and there is insufficient time to transmit the next frame, the
+ * TX status will be returned with 'TX_STATUS_FAIL_NEXT_FRAG'.
+ *
+ * TX_STATUS_FIFO_UNDERRUN
+ *
+ * Indicates the host did not provide bytes to the FIFO fast enough while
+ * a TX was in progress.
+ *
+ * TX_STATUS_FAIL_MGMNT_ABORT
+ *
+ * This status is only possible if the ABORT ON MGMT RX parameter was
+ * set to true with the TX command.
+ *
+ * If the MSB of the status parameter is set then an abort sequence is
+ * required. This sequence consists of the host activating the TX Abort
+ * control line, and then waiting for the TX Abort command response. This
+ * indicates that a the device is no longer in a transmit state, and that the
+ * command FIFO has been cleared. The host must then deactivate the TX Abort
+ * control line. Receiving is still allowed in this case.
+ */
+enum {
+ TX_3945_STATUS_SUCCESS = 0x01,
+ TX_3945_STATUS_DIRECT_DONE = 0x02,
+ TX_3945_STATUS_FAIL_SHORT_LIMIT = 0x82,
+ TX_3945_STATUS_FAIL_LONG_LIMIT = 0x83,
+ TX_3945_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
+ TX_3945_STATUS_FAIL_MGMNT_ABORT = 0x85,
+ TX_3945_STATUS_FAIL_NEXT_FRAG = 0x86,
+ TX_3945_STATUS_FAIL_LIFE_EXPIRE = 0x87,
+ TX_3945_STATUS_FAIL_DEST_PS = 0x88,
+ TX_3945_STATUS_FAIL_ABORTED = 0x89,
+ TX_3945_STATUS_FAIL_BT_RETRY = 0x8a,
+ TX_3945_STATUS_FAIL_STA_INVALID = 0x8b,
+ TX_3945_STATUS_FAIL_FRAG_DROPPED = 0x8c,
+ TX_3945_STATUS_FAIL_TID_DISABLE = 0x8d,
+ TX_3945_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
+ TX_3945_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
+ TX_3945_STATUS_FAIL_TX_LOCKED = 0x90,
+ TX_3945_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
+};
+
+/*
+ * TX command response is sent after *4965* transmission attempts.
+ *
+ * both postpone and abort status are expected behavior from uCode. there is
+ * no special operation required from driver; except for RFKILL_FLUSH,
+ * which required tx flush host command to flush all the tx frames in queues
+ */
+enum {
+ TX_STATUS_SUCCESS = 0x01,
+ TX_STATUS_DIRECT_DONE = 0x02,
+ /* postpone TX */
+ TX_STATUS_POSTPONE_DELAY = 0x40,
+ TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
+ TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
+ TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
+ /* abort TX */
+ TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
+ TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
+ TX_STATUS_FAIL_LONG_LIMIT = 0x83,
+ TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
+ TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
+ TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
+ TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
+ TX_STATUS_FAIL_DEST_PS = 0x88,
+ TX_STATUS_FAIL_HOST_ABORTED = 0x89,
+ TX_STATUS_FAIL_BT_RETRY = 0x8a,
+ TX_STATUS_FAIL_STA_INVALID = 0x8b,
+ TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
+ TX_STATUS_FAIL_TID_DISABLE = 0x8d,
+ TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
+ TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
+ TX_STATUS_FAIL_PASSIVE_NO_RX = 0x90,
+ TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
+};
+
+#define TX_PACKET_MODE_REGULAR 0x0000
+#define TX_PACKET_MODE_BURST_SEQ 0x0100
+#define TX_PACKET_MODE_BURST_FIRST 0x0200
+
+enum {
+ TX_POWER_PA_NOT_ACTIVE = 0x0,
+};
+
+enum {
+ TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
+ TX_STATUS_DELAY_MSK = 0x00000040,
+ TX_STATUS_ABORT_MSK = 0x00000080,
+ TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */
+ TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */
+ TX_RESERVED = 0x00780000, /* bits 19:22 */
+ TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */
+ TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
+};
+
+/* *******************************
+ * TX aggregation status
+ ******************************* */
+
+enum {
+ AGG_TX_STATE_TRANSMITTED = 0x00,
+ AGG_TX_STATE_UNDERRUN_MSK = 0x01,
+ AGG_TX_STATE_FEW_BYTES_MSK = 0x04,
+ AGG_TX_STATE_ABORT_MSK = 0x08,
+ AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10,
+ AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20,
+ AGG_TX_STATE_SCD_QUERY_MSK = 0x80,
+ AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100,
+ AGG_TX_STATE_RESPONSE_MSK = 0x1ff,
+ AGG_TX_STATE_DUMP_TX_MSK = 0x200,
+ AGG_TX_STATE_DELAY_TX_MSK = 0x400
+};
+
+#define AGG_TX_STATUS_MSK 0x00000fff /* bits 0:11 */
+#define AGG_TX_TRY_MSK 0x0000f000 /* bits 12:15 */
+
+#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \
+ AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK)
+
+/* # tx attempts for first frame in aggregation */
+#define AGG_TX_STATE_TRY_CNT_POS 12
+#define AGG_TX_STATE_TRY_CNT_MSK 0xf000
+
+/* Command ID and sequence number of Tx command for this frame */
+#define AGG_TX_STATE_SEQ_NUM_POS 16
+#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
+
+/*
+ * REPLY_TX = 0x1c (response)
+ *
+ * This response may be in one of two slightly different formats, indicated
+ * by the frame_count field:
+ *
+ * 1) No aggregation (frame_count == 1). This reports Tx results for
+ * a single frame. Multiple attempts, at various bit rates, may have
+ * been made for this frame.
+ *
+ * 2) Aggregation (frame_count > 1). This reports Tx results for
+ * 2 or more frames that used block-acknowledge. All frames were
+ * transmitted at same rate. Rate scaling may have been used if first
+ * frame in this new agg block failed in previous agg block(s).
+ *
+ * Note that, for aggregation, ACK (block-ack) status is not delivered here;
+ * block-ack has not been received by the time the 4965 device records
+ * this status.
+ * This status relates to reasons the tx might have been blocked or aborted
+ * within the sending station (this 4965 device), rather than whether it was
+ * received successfully by the destination station.
+ */
+struct agg_tx_status {
+ __le16 status;
+ __le16 sequence;
+} __packed;
+
+struct iwl4965_tx_resp {
+ u8 frame_count; /* 1 no aggregation, >1 aggregation */
+ u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
+ u8 failure_rts; /* # failures due to unsuccessful RTS */
+ u8 failure_frame; /* # failures due to no ACK (unused for agg) */
+
+ /* For non-agg: Rate at which frame was successful.
+ * For agg: Rate at which all frames were transmitted. */
+ __le32 rate_n_flags; /* RATE_MCS_* */
+
+ /* For non-agg: RTS + CTS + frame tx attempts time + ACK.
+ * For agg: RTS + CTS + aggregation tx time + block-ack time. */
+ __le16 wireless_media_time; /* uSecs */
+
+ __le16 reserved;
+ __le32 pa_power1; /* RF power amplifier measurement (not used) */
+ __le32 pa_power2;
+
+ /*
+ * For non-agg: frame status TX_STATUS_*
+ * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status
+ * fields follow this one, up to frame_count.
+ * Bit fields:
+ * 11- 0: AGG_TX_STATE_* status code
+ * 15-12: Retry count for 1st frame in aggregation (retries
+ * occur if tx failed for this frame when it was a
+ * member of a previous aggregation block). If rate
+ * scaling is used, retry count indicates the rate
+ * table entry used for all frames in the new agg.
+ * 31-16: Sequence # for this frame's Tx cmd (not SSN!)
+ */
+ union {
+ __le32 status;
+ struct agg_tx_status agg_status[0]; /* for each agg frame */
+ } u;
+} __packed;
+
+/*
+ * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
+ *
+ * Reports Block-Acknowledge from recipient station
+ */
+struct iwl_compressed_ba_resp {
+ __le32 sta_addr_lo32;
+ __le16 sta_addr_hi16;
+ __le16 reserved;
+
+ /* Index of recipient (BA-sending) station in uCode's station table */
+ u8 sta_id;
+ u8 tid;
+ __le16 seq_ctl;
+ __le64 bitmap;
+ __le16 scd_flow;
+ __le16 scd_ssn;
+} __packed;
+
+/*
+ * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
+ *
+ * See details under "TXPOWER" in iwl-4965-hw.h.
+ */
+
+struct iwl3945_txpowertable_cmd {
+ u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
+ u8 reserved;
+ __le16 channel;
+ struct iwl3945_power_per_rate power[IWL_MAX_RATES];
+} __packed;
+
+struct iwl4965_txpowertable_cmd {
+ u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
+ u8 reserved;
+ __le16 channel;
+ struct iwl4965_tx_power_db tx_power;
+} __packed;
+
+
+/**
+ * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response
+ *
+ * REPLY_RATE_SCALE = 0x47 (command, has simple generic response)
+ *
+ * NOTE: The table of rates passed to the uCode via the
+ * RATE_SCALE command sets up the corresponding order of
+ * rates used for all related commands, including rate
+ * masks, etc.
+ *
+ * For example, if you set 9MB (PLCP 0x0f) as the first
+ * rate in the rate table, the bit mask for that rate
+ * when passed through ofdm_basic_rates on the REPLY_RXON
+ * command would be bit 0 (1 << 0)
+ */
+struct iwl3945_rate_scaling_info {
+ __le16 rate_n_flags;
+ u8 try_cnt;
+ u8 next_rate_index;
+} __packed;
+
+struct iwl3945_rate_scaling_cmd {
+ u8 table_id;
+ u8 reserved[3];
+ struct iwl3945_rate_scaling_info table[IWL_MAX_RATES];
+} __packed;
+
+
+/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
+#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0)
+
+/* # of EDCA prioritized tx fifos */
+#define LINK_QUAL_AC_NUM AC_NUM
+
+/* # entries in rate scale table to support Tx retries */
+#define LINK_QUAL_MAX_RETRY_NUM 16
+
+/* Tx antenna selection values */
+#define LINK_QUAL_ANT_A_MSK (1 << 0)
+#define LINK_QUAL_ANT_B_MSK (1 << 1)
+#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
+
+
+/**
+ * struct iwl_link_qual_general_params
+ *
+ * Used in REPLY_TX_LINK_QUALITY_CMD
+ */
+struct iwl_link_qual_general_params {
+ u8 flags;
+
+ /* No entries at or above this (driver chosen) index contain MIMO */
+ u8 mimo_delimiter;
+
+ /* Best single antenna to use for single stream (legacy, SISO). */
+ u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */
+
+ /* Best antennas to use for MIMO (unused for 4965, assumes both). */
+ u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
+
+ /*
+ * If driver needs to use different initial rates for different
+ * EDCA QOS access categories (as implemented by tx fifos 0-3),
+ * this table will set that up, by indicating the indexes in the
+ * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
+ * Otherwise, driver should set all entries to 0.
+ *
+ * Entry usage:
+ * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
+ * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
+ */
+ u8 start_rate_index[LINK_QUAL_AC_NUM];
+} __packed;
+
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
+#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
+#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
+
+#define LINK_QUAL_AGG_DISABLE_START_DEF (3)
+#define LINK_QUAL_AGG_DISABLE_START_MAX (255)
+#define LINK_QUAL_AGG_DISABLE_START_MIN (0)
+
+#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (31)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
+
+/**
+ * struct iwl_link_qual_agg_params
+ *
+ * Used in REPLY_TX_LINK_QUALITY_CMD
+ */
+struct iwl_link_qual_agg_params {
+
+ /*
+ *Maximum number of uSec in aggregation.
+ * default set to 4000 (4 milliseconds) if not configured in .cfg
+ */
+ __le16 agg_time_limit;
+
+ /*
+ * Number of Tx retries allowed for a frame, before that frame will
+ * no longer be considered for the start of an aggregation sequence
+ * (scheduler will then try to tx it as single frame).
+ * Driver should set this to 3.
+ */
+ u8 agg_dis_start_th;
+
+ /*
+ * Maximum number of frames in aggregation.
+ * 0 = no limit (default). 1 = no aggregation.
+ * Other values = max # frames in aggregation.
+ */
+ u8 agg_frame_cnt_limit;
+
+ __le32 reserved;
+} __packed;
+
+/*
+ * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
+ *
+ * For 4965 devices only; 3945 uses REPLY_RATE_SCALE.
+ *
+ * Each station in the 4965 device's internal station table has its own table
+ * of 16
+ * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when
+ * an ACK is not received. This command replaces the entire table for
+ * one station.
+ *
+ * NOTE: Station must already be in 4965 device's station table.
+ * Use REPLY_ADD_STA.
+ *
+ * The rate scaling procedures described below work well. Of course, other
+ * procedures are possible, and may work better for particular environments.
+ *
+ *
+ * FILLING THE RATE TABLE
+ *
+ * Given a particular initial rate and mode, as determined by the rate
+ * scaling algorithm described below, the Linux driver uses the following
+ * formula to fill the rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table in the
+ * Link Quality command:
+ *
+ *
+ * 1) If using High-throughput (HT) (SISO or MIMO) initial rate:
+ * a) Use this same initial rate for first 3 entries.
+ * b) Find next lower available rate using same mode (SISO or MIMO),
+ * use for next 3 entries. If no lower rate available, switch to
+ * legacy mode (no HT40 channel, no MIMO, no short guard interval).
+ * c) If using MIMO, set command's mimo_delimiter to number of entries
+ * using MIMO (3 or 6).
+ * d) After trying 2 HT rates, switch to legacy mode (no HT40 channel,
+ * no MIMO, no short guard interval), at the next lower bit rate
+ * (e.g. if second HT bit rate was 54, try 48 legacy), and follow
+ * legacy procedure for remaining table entries.
+ *
+ * 2) If using legacy initial rate:
+ * a) Use the initial rate for only one entry.
+ * b) For each following entry, reduce the rate to next lower available
+ * rate, until reaching the lowest available rate.
+ * c) When reducing rate, also switch antenna selection.
+ * d) Once lowest available rate is reached, repeat this rate until
+ * rate table is filled (16 entries), switching antenna each entry.
+ *
+ *
+ * ACCUMULATING HISTORY
+ *
+ * The rate scaling algorithm for 4965 devices, as implemented in Linux driver,
+ * uses two sets of frame Tx success history: One for the current/active
+ * modulation mode, and one for a speculative/search mode that is being
+ * attempted. If the speculative mode turns out to be more effective (i.e.
+ * actual transfer rate is better), then the driver continues to use the
+ * speculative mode as the new current active mode.
+ *
+ * Each history set contains, separately for each possible rate, data for a
+ * sliding window of the 62 most recent tx attempts at that rate. The data
+ * includes a shifting bitmap of success(1)/failure(0), and sums of successful
+ * and attempted frames, from which the driver can additionally calculate a
+ * success ratio (success / attempted) and number of failures
+ * (attempted - success), and control the size of the window (attempted).
+ * The driver uses the bit map to remove successes from the success sum, as
+ * the oldest tx attempts fall out of the window.
+ *
+ * When the 4965 device makes multiple tx attempts for a given frame, each
+ * attempt might be at a different rate, and have different modulation
+ * characteristics (e.g. antenna, fat channel, short guard interval), as set
+ * up in the rate scaling table in the Link Quality command. The driver must
+ * determine which rate table entry was used for each tx attempt, to determine
+ * which rate-specific history to update, and record only those attempts that
+ * match the modulation characteristics of the history set.
+ *
+ * When using block-ack (aggregation), all frames are transmitted at the same
+ * rate, since there is no per-attempt acknowledgment from the destination
+ * station. The Tx response struct iwl_tx_resp indicates the Tx rate in
+ * rate_n_flags field. After receiving a block-ack, the driver can update
+ * history for the entire block all at once.
+ *
+ *
+ * FINDING BEST STARTING RATE:
+ *
+ * When working with a selected initial modulation mode (see below), the
+ * driver attempts to find a best initial rate. The initial rate is the
+ * first entry in the Link Quality command's rate table.
+ *
+ * 1) Calculate actual throughput (success ratio * expected throughput, see
+ * table below) for current initial rate. Do this only if enough frames
+ * have been attempted to make the value meaningful: at least 6 failed
+ * tx attempts, or at least 8 successes. If not enough, don't try rate
+ * scaling yet.
+ *
+ * 2) Find available rates adjacent to current initial rate. Available means:
+ * a) supported by hardware &&
+ * b) supported by association &&
+ * c) within any constraints selected by user
+ *
+ * 3) Gather measured throughputs for adjacent rates. These might not have
+ * enough history to calculate a throughput. That's okay, we might try
+ * using one of them anyway!
+ *
+ * 4) Try decreasing rate if, for current rate:
+ * a) success ratio is < 15% ||
+ * b) lower adjacent rate has better measured throughput ||
+ * c) higher adjacent rate has worse throughput, and lower is unmeasured
+ *
+ * As a sanity check, if decrease was determined above, leave rate
+ * unchanged if:
+ * a) lower rate unavailable
+ * b) success ratio at current rate > 85% (very good)
+ * c) current measured throughput is better than expected throughput
+ * of lower rate (under perfect 100% tx conditions, see table below)
+ *
+ * 5) Try increasing rate if, for current rate:
+ * a) success ratio is < 15% ||
+ * b) both adjacent rates' throughputs are unmeasured (try it!) ||
+ * b) higher adjacent rate has better measured throughput ||
+ * c) lower adjacent rate has worse throughput, and higher is unmeasured
+ *
+ * As a sanity check, if increase was determined above, leave rate
+ * unchanged if:
+ * a) success ratio at current rate < 70%. This is not particularly
+ * good performance; higher rate is sure to have poorer success.
+ *
+ * 6) Re-evaluate the rate after each tx frame. If working with block-
+ * acknowledge, history and statistics may be calculated for the entire
+ * block (including prior history that fits within the history windows),
+ * before re-evaluation.
+ *
+ * FINDING BEST STARTING MODULATION MODE:
+ *
+ * After working with a modulation mode for a "while" (and doing rate scaling),
+ * the driver searches for a new initial mode in an attempt to improve
+ * throughput. The "while" is measured by numbers of attempted frames:
+ *
+ * For legacy mode, search for new mode after:
+ * 480 successful frames, or 160 failed frames
+ * For high-throughput modes (SISO or MIMO), search for new mode after:
+ * 4500 successful frames, or 400 failed frames
+ *
+ * Mode switch possibilities are (3 for each mode):
+ *
+ * For legacy:
+ * Change antenna, try SISO (if HT association), try MIMO (if HT association)
+ * For SISO:
+ * Change antenna, try MIMO, try shortened guard interval (SGI)
+ * For MIMO:
+ * Try SISO antenna A, SISO antenna B, try shortened guard interval (SGI)
+ *
+ * When trying a new mode, use the same bit rate as the old/current mode when
+ * trying antenna switches and shortened guard interval. When switching to
+ * SISO from MIMO or legacy, or to MIMO from SISO or legacy, use a rate
+ * for which the expected throughput (under perfect conditions) is about the
+ * same or slightly better than the actual measured throughput delivered by
+ * the old/current mode.
+ *
+ * Actual throughput can be estimated by multiplying the expected throughput
+ * by the success ratio (successful / attempted tx frames). Frame size is
+ * not considered in this calculation; it assumes that frame size will average
+ * out to be fairly consistent over several samples. The following are
+ * metric values for expected throughput assuming 100% success ratio.
+ * Only G band has support for CCK rates:
+ *
+ * RATE: 1 2 5 11 6 9 12 18 24 36 48 54 60
+ *
+ * G: 7 13 35 58 40 57 72 98 121 154 177 186 186
+ * A: 0 0 0 0 40 57 72 98 121 154 177 186 186
+ * SISO 20MHz: 0 0 0 0 42 42 76 102 124 159 183 193 202
+ * SGI SISO 20MHz: 0 0 0 0 46 46 82 110 132 168 192 202 211
+ * MIMO 20MHz: 0 0 0 0 74 74 123 155 179 214 236 244 251
+ * SGI MIMO 20MHz: 0 0 0 0 81 81 131 164 188 222 243 251 257
+ * SISO 40MHz: 0 0 0 0 77 77 127 160 184 220 242 250 257
+ * SGI SISO 40MHz: 0 0 0 0 83 83 135 169 193 229 250 257 264
+ * MIMO 40MHz: 0 0 0 0 123 123 182 214 235 264 279 285 289
+ * SGI MIMO 40MHz: 0 0 0 0 131 131 191 222 242 270 284 289 293
+ *
+ * After the new mode has been tried for a short while (minimum of 6 failed
+ * frames or 8 successful frames), compare success ratio and actual throughput
+ * estimate of the new mode with the old. If either is better with the new
+ * mode, continue to use the new mode.
+ *
+ * Continue comparing modes until all 3 possibilities have been tried.
+ * If moving from legacy to HT, try all 3 possibilities from the new HT
+ * mode. After trying all 3, a best mode is found. Continue to use this mode
+ * for the longer "while" described above (e.g. 480 successful frames for
+ * legacy), and then repeat the search process.
+ *
+ */
+struct iwl_link_quality_cmd {
+
+ /* Index of destination/recipient station in uCode's station table */
+ u8 sta_id;
+ u8 reserved1;
+ __le16 control; /* not used */
+ struct iwl_link_qual_general_params general_params;
+ struct iwl_link_qual_agg_params agg_params;
+
+ /*
+ * Rate info; when using rate-scaling, Tx command's initial_rate_index
+ * specifies 1st Tx rate attempted, via index into this table.
+ * 4965 devices works its way through table when retrying Tx.
+ */
+ struct {
+ __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */
+ } rs_table[LINK_QUAL_MAX_RETRY_NUM];
+ __le32 reserved2;
+} __packed;
+
+/*
+ * BT configuration enable flags:
+ * bit 0 - 1: BT channel announcement enabled
+ * 0: disable
+ * bit 1 - 1: priority of BT device enabled
+ * 0: disable
+ */
+#define BT_COEX_DISABLE (0x0)
+#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0)
+#define BT_ENABLE_PRIORITY BIT(1)
+
+#define BT_COEX_ENABLE (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY)
+
+#define BT_LEAD_TIME_DEF (0x1E)
+
+#define BT_MAX_KILL_DEF (0x5)
+
+/*
+ * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
+ *
+ * 3945 and 4965 devices support hardware handshake with Bluetooth device on
+ * same platform. Bluetooth device alerts wireless device when it will Tx;
+ * wireless device can delay or kill its own Tx to accommodate.
+ */
+struct iwl_bt_cmd {
+ u8 flags;
+ u8 lead_time;
+ u8 max_kill;
+ u8 reserved;
+ __le32 kill_ack_mask;
+ __le32 kill_cts_mask;
+} __packed;
+
+
+/******************************************************************************
+ * (6)
+ * Spectrum Management (802.11h) Commands, Responses, Notifications:
+ *
+ *****************************************************************************/
+
+/*
+ * Spectrum Management
+ */
+#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK | \
+ RXON_FILTER_CTL2HOST_MSK | \
+ RXON_FILTER_ACCEPT_GRP_MSK | \
+ RXON_FILTER_DIS_DECRYPT_MSK | \
+ RXON_FILTER_DIS_GRP_DECRYPT_MSK | \
+ RXON_FILTER_ASSOC_MSK | \
+ RXON_FILTER_BCON_AWARE_MSK)
+
+struct iwl_measure_channel {
+ __le32 duration; /* measurement duration in extended beacon
+ * format */
+ u8 channel; /* channel to measure */
+ u8 type; /* see enum iwl_measure_type */
+ __le16 reserved;
+} __packed;
+
+/*
+ * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
+ */
+struct iwl_spectrum_cmd {
+ __le16 len; /* number of bytes starting from token */
+ u8 token; /* token id */
+ u8 id; /* measurement id -- 0 or 1 */
+ u8 origin; /* 0 = TGh, 1 = other, 2 = TGk */
+ u8 periodic; /* 1 = periodic */
+ __le16 path_loss_timeout;
+ __le32 start_time; /* start time in extended beacon format */
+ __le32 reserved2;
+ __le32 flags; /* rxon flags */
+ __le32 filter_flags; /* rxon filter flags */
+ __le16 channel_count; /* minimum 1, maximum 10 */
+ __le16 reserved3;
+ struct iwl_measure_channel channels[10];
+} __packed;
+
+/*
+ * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
+ */
+struct iwl_spectrum_resp {
+ u8 token;
+ u8 id; /* id of the prior command replaced, or 0xff */
+ __le16 status; /* 0 - command will be handled
+ * 1 - cannot handle (conflicts with another
+ * measurement) */
+} __packed;
+
+enum iwl_measurement_state {
+ IWL_MEASUREMENT_START = 0,
+ IWL_MEASUREMENT_STOP = 1,
+};
+
+enum iwl_measurement_status {
+ IWL_MEASUREMENT_OK = 0,
+ IWL_MEASUREMENT_CONCURRENT = 1,
+ IWL_MEASUREMENT_CSA_CONFLICT = 2,
+ IWL_MEASUREMENT_TGH_CONFLICT = 3,
+ /* 4-5 reserved */
+ IWL_MEASUREMENT_STOPPED = 6,
+ IWL_MEASUREMENT_TIMEOUT = 7,
+ IWL_MEASUREMENT_PERIODIC_FAILED = 8,
+};
+
+#define NUM_ELEMENTS_IN_HISTOGRAM 8
+
+struct iwl_measurement_histogram {
+ __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
+ __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
+} __packed;
+
+/* clear channel availability counters */
+struct iwl_measurement_cca_counters {
+ __le32 ofdm;
+ __le32 cck;
+} __packed;
+
+enum iwl_measure_type {
+ IWL_MEASURE_BASIC = (1 << 0),
+ IWL_MEASURE_CHANNEL_LOAD = (1 << 1),
+ IWL_MEASURE_HISTOGRAM_RPI = (1 << 2),
+ IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
+ IWL_MEASURE_FRAME = (1 << 4),
+ /* bits 5:6 are reserved */
+ IWL_MEASURE_IDLE = (1 << 7),
+};
+
+/*
+ * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command)
+ */
+struct iwl_spectrum_notification {
+ u8 id; /* measurement id -- 0 or 1 */
+ u8 token;
+ u8 channel_index; /* index in measurement channel list */
+ u8 state; /* 0 - start, 1 - stop */
+ __le32 start_time; /* lower 32-bits of TSF */
+ u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */
+ u8 channel;
+ u8 type; /* see enum iwl_measurement_type */
+ u8 reserved1;
+ /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
+ * valid if applicable for measurement type requested. */
+ __le32 cca_ofdm; /* cca fraction time in 40Mhz clock periods */
+ __le32 cca_cck; /* cca fraction time in 44Mhz clock periods */
+ __le32 cca_time; /* channel load time in usecs */
+ u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 -
+ * unidentified */
+ u8 reserved2[3];
+ struct iwl_measurement_histogram histogram;
+ __le32 stop_time; /* lower 32-bits of TSF */
+ __le32 status; /* see iwl_measurement_status */
+} __packed;
+
+/******************************************************************************
+ * (7)
+ * Power Management Commands, Responses, Notifications:
+ *
+ *****************************************************************************/
+
+/**
+ * struct iwl_powertable_cmd - Power Table Command
+ * @flags: See below:
+ *
+ * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
+ *
+ * PM allow:
+ * bit 0 - '0' Driver not allow power management
+ * '1' Driver allow PM (use rest of parameters)
+ *
+ * uCode send sleep notifications:
+ * bit 1 - '0' Don't send sleep notification
+ * '1' send sleep notification (SEND_PM_NOTIFICATION)
+ *
+ * Sleep over DTIM
+ * bit 2 - '0' PM have to walk up every DTIM
+ * '1' PM could sleep over DTIM till listen Interval.
+ *
+ * PCI power managed
+ * bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1)
+ * '1' !(PCI_CFG_LINK_CTRL & 0x1)
+ *
+ * Fast PD
+ * bit 4 - '1' Put radio to sleep when receiving frame for others
+ *
+ * Force sleep Modes
+ * bit 31/30- '00' use both mac/xtal sleeps
+ * '01' force Mac sleep
+ * '10' force xtal sleep
+ * '11' Illegal set
+ *
+ * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then
+ * ucode assume sleep over DTIM is allowed and we don't need to wake up
+ * for every DTIM.
+ */
+#define IWL_POWER_VEC_SIZE 5
+
+#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
+#define IWL_POWER_POWER_SAVE_ENA_MSK cpu_to_le16(BIT(0))
+#define IWL_POWER_POWER_MANAGEMENT_ENA_MSK cpu_to_le16(BIT(1))
+#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2))
+#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
+#define IWL_POWER_FAST_PD cpu_to_le16(BIT(4))
+#define IWL_POWER_BEACON_FILTERING cpu_to_le16(BIT(5))
+#define IWL_POWER_SHADOW_REG_ENA cpu_to_le16(BIT(6))
+#define IWL_POWER_CT_KILL_SET cpu_to_le16(BIT(7))
+
+struct iwl3945_powertable_cmd {
+ __le16 flags;
+ u8 reserved[2];
+ __le32 rx_data_timeout;
+ __le32 tx_data_timeout;
+ __le32 sleep_interval[IWL_POWER_VEC_SIZE];
+} __packed;
+
+struct iwl_powertable_cmd {
+ __le16 flags;
+ u8 keep_alive_seconds; /* 3945 reserved */
+ u8 debug_flags; /* 3945 reserved */
+ __le32 rx_data_timeout;
+ __le32 tx_data_timeout;
+ __le32 sleep_interval[IWL_POWER_VEC_SIZE];
+ __le32 keep_alive_beacons;
+} __packed;
+
+/*
+ * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
+ * all devices identical.
+ */
+struct iwl_sleep_notification {
+ u8 pm_sleep_mode;
+ u8 pm_wakeup_src;
+ __le16 reserved;
+ __le32 sleep_time;
+ __le32 tsf_low;
+ __le32 bcon_timer;
+} __packed;
+
+/* Sleep states. all devices identical. */
+enum {
+ IWL_PM_NO_SLEEP = 0,
+ IWL_PM_SLP_MAC = 1,
+ IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
+ IWL_PM_SLP_FULL_MAC_CARD_STATE = 3,
+ IWL_PM_SLP_PHY = 4,
+ IWL_PM_SLP_REPENT = 5,
+ IWL_PM_WAKEUP_BY_TIMER = 6,
+ IWL_PM_WAKEUP_BY_DRIVER = 7,
+ IWL_PM_WAKEUP_BY_RFKILL = 8,
+ /* 3 reserved */
+ IWL_PM_NUM_OF_MODES = 12,
+};
+
+/*
+ * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
+ */
+struct iwl_card_state_notif {
+ __le32 flags;
+} __packed;
+
+#define HW_CARD_DISABLED 0x01
+#define SW_CARD_DISABLED 0x02
+#define CT_CARD_DISABLED 0x04
+#define RXON_CARD_DISABLED 0x10
+
+struct iwl_ct_kill_config {
+ __le32 reserved;
+ __le32 critical_temperature_M;
+ __le32 critical_temperature_R;
+} __packed;
+
+/******************************************************************************
+ * (8)
+ * Scan Commands, Responses, Notifications:
+ *
+ *****************************************************************************/
+
+#define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0)
+#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1)
+
+/**
+ * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
+ *
+ * One for each channel in the scan list.
+ * Each channel can independently select:
+ * 1) SSID for directed active scans
+ * 2) Txpower setting (for rate specified within Tx command)
+ * 3) How long to stay on-channel (behavior may be modified by quiet_time,
+ * quiet_plcp_th, good_CRC_th)
+ *
+ * To avoid uCode errors, make sure the following are true (see comments
+ * under struct iwl_scan_cmd about max_out_time and quiet_time):
+ * 1) If using passive_dwell (i.e. passive_dwell != 0):
+ * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
+ * 2) quiet_time <= active_dwell
+ * 3) If restricting off-channel time (i.e. max_out_time !=0):
+ * passive_dwell < max_out_time
+ * active_dwell < max_out_time
+ */
+struct iwl3945_scan_channel {
+ /*
+ * type is defined as:
+ * 0:0 1 = active, 0 = passive
+ * 1:4 SSID direct bit map; if a bit is set, then corresponding
+ * SSID IE is transmitted in probe request.
+ * 5:7 reserved
+ */
+ u8 type;
+ u8 channel; /* band is selected by iwl3945_scan_cmd "flags" field */
+ struct iwl3945_tx_power tpc;
+ __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
+ __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
+} __packed;
+
+/* set number of direct probes u8 type */
+#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
+
+struct iwl_scan_channel {
+ /*
+ * type is defined as:
+ * 0:0 1 = active, 0 = passive
+ * 1:20 SSID direct bit map; if a bit is set, then corresponding
+ * SSID IE is transmitted in probe request.
+ * 21:31 reserved
+ */
+ __le32 type;
+ __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */
+ u8 tx_gain; /* gain for analog radio */
+ u8 dsp_atten; /* gain for DSP */
+ __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
+ __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
+} __packed;
+
+/* set number of direct probes __le32 type */
+#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
+
+/**
+ * struct iwl_ssid_ie - directed scan network information element
+ *
+ * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
+ * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
+ * each channel may select different ssids from among the 20 (4) entries.
+ * SSID IEs get transmitted in reverse order of entry.
+ */
+struct iwl_ssid_ie {
+ u8 id;
+ u8 len;
+ u8 ssid[32];
+} __packed;
+
+#define PROBE_OPTION_MAX_3945 4
+#define PROBE_OPTION_MAX 20
+#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
+#define IWL_GOOD_CRC_TH_DISABLED 0
+#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
+#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
+#define IWL_MAX_SCAN_SIZE 1024
+#define IWL_MAX_CMD_SIZE 4096
+
+/*
+ * REPLY_SCAN_CMD = 0x80 (command)
+ *
+ * The hardware scan command is very powerful; the driver can set it up to
+ * maintain (relatively) normal network traffic while doing a scan in the
+ * background. The max_out_time and suspend_time control the ratio of how
+ * long the device stays on an associated network channel ("service channel")
+ * vs. how long it's away from the service channel, i.e. tuned to other channels
+ * for scanning.
+ *
+ * max_out_time is the max time off-channel (in usec), and suspend_time
+ * is how long (in "extended beacon" format) that the scan is "suspended"
+ * after returning to the service channel. That is, suspend_time is the
+ * time that we stay on the service channel, doing normal work, between
+ * scan segments. The driver may set these parameters differently to support
+ * scanning when associated vs. not associated, and light vs. heavy traffic
+ * loads when associated.
+ *
+ * After receiving this command, the device's scan engine does the following;
+ *
+ * 1) Sends SCAN_START notification to driver
+ * 2) Checks to see if it has time to do scan for one channel
+ * 3) Sends NULL packet, with power-save (PS) bit set to 1,
+ * to tell AP that we're going off-channel
+ * 4) Tunes to first channel in scan list, does active or passive scan
+ * 5) Sends SCAN_RESULT notification to driver
+ * 6) Checks to see if it has time to do scan on *next* channel in list
+ * 7) Repeats 4-6 until it no longer has time to scan the next channel
+ * before max_out_time expires
+ * 8) Returns to service channel
+ * 9) Sends NULL packet with PS=0 to tell AP that we're back
+ * 10) Stays on service channel until suspend_time expires
+ * 11) Repeats entire process 2-10 until list is complete
+ * 12) Sends SCAN_COMPLETE notification
+ *
+ * For fast, efficient scans, the scan command also has support for staying on
+ * a channel for just a short time, if doing active scanning and getting no
+ * responses to the transmitted probe request. This time is controlled by
+ * quiet_time, and the number of received packets below which a channel is
+ * considered "quiet" is controlled by quiet_plcp_threshold.
+ *
+ * For active scanning on channels that have regulatory restrictions against
+ * blindly transmitting, the scan can listen before transmitting, to make sure
+ * that there is already legitimate activity on the channel. If enough
+ * packets are cleanly received on the channel (controlled by good_CRC_th,
+ * typical value 1), the scan engine starts transmitting probe requests.
+ *
+ * Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
+ *
+ * To avoid uCode errors, see timing restrictions described under
+ * struct iwl_scan_channel.
+ */
+
+struct iwl3945_scan_cmd {
+ __le16 len;
+ u8 reserved0;
+ u8 channel_count; /* # channels in channel list */
+ __le16 quiet_time; /* dwell only this # millisecs on quiet channel
+ * (only for active scan) */
+ __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
+ __le16 good_CRC_th; /* passive -> active promotion threshold */
+ __le16 reserved1;
+ __le32 max_out_time; /* max usec to be away from associated (service)
+ * channel */
+ __le32 suspend_time; /* pause scan this long (in "extended beacon
+ * format") when returning to service channel:
+ * 3945; 31:24 # beacons, 19:0 additional usec,
+ * 4965; 31:22 # beacons, 21:0 additional usec.
+ */
+ __le32 flags; /* RXON_FLG_* */
+ __le32 filter_flags; /* RXON_FILTER_* */
+
+ /* For active scans (set to all-0s for passive scans).
+ * Does not include payload. Must specify Tx rate; no rate scaling. */
+ struct iwl3945_tx_cmd tx_cmd;
+
+ /* For directed active scans (set to all-0s otherwise) */
+ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
+
+ /*
+ * Probe request frame, followed by channel list.
+ *
+ * Size of probe request frame is specified by byte count in tx_cmd.
+ * Channel list follows immediately after probe request frame.
+ * Number of channels in list is specified by channel_count.
+ * Each channel in list is of type:
+ *
+ * struct iwl3945_scan_channel channels[0];
+ *
+ * NOTE: Only one band of channels can be scanned per pass. You
+ * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
+ * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+ * before requesting another scan.
+ */
+ u8 data[0];
+} __packed;
+
+struct iwl_scan_cmd {
+ __le16 len;
+ u8 reserved0;
+ u8 channel_count; /* # channels in channel list */
+ __le16 quiet_time; /* dwell only this # millisecs on quiet channel
+ * (only for active scan) */
+ __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */
+ __le16 good_CRC_th; /* passive -> active promotion threshold */
+ __le16 rx_chain; /* RXON_RX_CHAIN_* */
+ __le32 max_out_time; /* max usec to be away from associated (service)
+ * channel */
+ __le32 suspend_time; /* pause scan this long (in "extended beacon
+ * format") when returning to service chnl:
+ * 3945; 31:24 # beacons, 19:0 additional usec,
+ * 4965; 31:22 # beacons, 21:0 additional usec.
+ */
+ __le32 flags; /* RXON_FLG_* */
+ __le32 filter_flags; /* RXON_FILTER_* */
+
+ /* For active scans (set to all-0s for passive scans).
+ * Does not include payload. Must specify Tx rate; no rate scaling. */
+ struct iwl_tx_cmd tx_cmd;
+
+ /* For directed active scans (set to all-0s otherwise) */
+ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+
+ /*
+ * Probe request frame, followed by channel list.
+ *
+ * Size of probe request frame is specified by byte count in tx_cmd.
+ * Channel list follows immediately after probe request frame.
+ * Number of channels in list is specified by channel_count.
+ * Each channel in list is of type:
+ *
+ * struct iwl_scan_channel channels[0];
+ *
+ * NOTE: Only one band of channels can be scanned per pass. You
+ * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
+ * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+ * before requesting another scan.
+ */
+ u8 data[0];
+} __packed;
+
+/* Can abort will notify by complete notification with abort status. */
+#define CAN_ABORT_STATUS cpu_to_le32(0x1)
+/* complete notification statuses */
+#define ABORT_STATUS 0x2
+
+/*
+ * REPLY_SCAN_CMD = 0x80 (response)
+ */
+struct iwl_scanreq_notification {
+ __le32 status; /* 1: okay, 2: cannot fulfill request */
+} __packed;
+
+/*
+ * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
+ */
+struct iwl_scanstart_notification {
+ __le32 tsf_low;
+ __le32 tsf_high;
+ __le32 beacon_timer;
+ u8 channel;
+ u8 band;
+ u8 reserved[2];
+ __le32 status;
+} __packed;
+
+#define SCAN_OWNER_STATUS 0x1;
+#define MEASURE_OWNER_STATUS 0x2;
+
+#define IWL_PROBE_STATUS_OK 0
+#define IWL_PROBE_STATUS_TX_FAILED BIT(0)
+/* error statuses combined with TX_FAILED */
+#define IWL_PROBE_STATUS_FAIL_TTL BIT(1)
+#define IWL_PROBE_STATUS_FAIL_BT BIT(2)
+
+#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */
+/*
+ * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
+ */
+struct iwl_scanresults_notification {
+ u8 channel;
+ u8 band;
+ u8 probe_status;
+ u8 num_probe_not_sent; /* not enough time to send */
+ __le32 tsf_low;
+ __le32 tsf_high;
+ __le32 statistics[NUMBER_OF_STATISTICS];
+} __packed;
+
+/*
+ * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
+ */
+struct iwl_scancomplete_notification {
+ u8 scanned_channels;
+ u8 status;
+ u8 last_channel;
+ __le32 tsf_low;
+ __le32 tsf_high;
+} __packed;
+
+
+/******************************************************************************
+ * (9)
+ * IBSS/AP Commands and Notifications:
+ *
+ *****************************************************************************/
+
+enum iwl_ibss_manager {
+ IWL_NOT_IBSS_MANAGER = 0,
+ IWL_IBSS_MANAGER = 1,
+};
+
+/*
+ * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
+ */
+
+struct iwl3945_beacon_notif {
+ struct iwl3945_tx_resp beacon_notify_hdr;
+ __le32 low_tsf;
+ __le32 high_tsf;
+ __le32 ibss_mgr_status;
+} __packed;
+
+struct iwl4965_beacon_notif {
+ struct iwl4965_tx_resp beacon_notify_hdr;
+ __le32 low_tsf;
+ __le32 high_tsf;
+ __le32 ibss_mgr_status;
+} __packed;
+
+/*
+ * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
+ */
+
+struct iwl3945_tx_beacon_cmd {
+ struct iwl3945_tx_cmd tx;
+ __le16 tim_idx;
+ u8 tim_size;
+ u8 reserved1;
+ struct ieee80211_hdr frame[0]; /* beacon frame */
+} __packed;
+
+struct iwl_tx_beacon_cmd {
+ struct iwl_tx_cmd tx;
+ __le16 tim_idx;
+ u8 tim_size;
+ u8 reserved1;
+ struct ieee80211_hdr frame[0]; /* beacon frame */
+} __packed;
+
+/******************************************************************************
+ * (10)
+ * Statistics Commands and Notifications:
+ *
+ *****************************************************************************/
+
+#define IWL_TEMP_CONVERT 260
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
+
+/* Used for passing to driver number of successes and failures per rate */
+struct rate_histogram {
+ union {
+ __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
+ __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
+ __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
+ } success;
+ union {
+ __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
+ __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
+ __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
+ } failed;
+} __packed;
+
+/* statistics command response */
+
+struct iwl39_statistics_rx_phy {
+ __le32 ina_cnt;
+ __le32 fina_cnt;
+ __le32 plcp_err;
+ __le32 crc32_err;
+ __le32 overrun_err;
+ __le32 early_overrun_err;
+ __le32 crc32_good;
+ __le32 false_alarm_cnt;
+ __le32 fina_sync_err_cnt;
+ __le32 sfd_timeout;
+ __le32 fina_timeout;
+ __le32 unresponded_rts;
+ __le32 rxe_frame_limit_overrun;
+ __le32 sent_ack_cnt;
+ __le32 sent_cts_cnt;
+} __packed;
+
+struct iwl39_statistics_rx_non_phy {
+ __le32 bogus_cts; /* CTS received when not expecting CTS */
+ __le32 bogus_ack; /* ACK received when not expecting ACK */
+ __le32 non_bssid_frames; /* number of frames with BSSID that
+ * doesn't belong to the STA BSSID */
+ __le32 filtered_frames; /* count frames that were dumped in the
+ * filtering process */
+ __le32 non_channel_beacons; /* beacons with our bss id but not on
+ * our serving channel */
+} __packed;
+
+struct iwl39_statistics_rx {
+ struct iwl39_statistics_rx_phy ofdm;
+ struct iwl39_statistics_rx_phy cck;
+ struct iwl39_statistics_rx_non_phy general;
+} __packed;
+
+struct iwl39_statistics_tx {
+ __le32 preamble_cnt;
+ __le32 rx_detected_cnt;
+ __le32 bt_prio_defer_cnt;
+ __le32 bt_prio_kill_cnt;
+ __le32 few_bytes_cnt;
+ __le32 cts_timeout;
+ __le32 ack_timeout;
+ __le32 expected_ack_cnt;
+ __le32 actual_ack_cnt;
+} __packed;
+
+struct statistics_dbg {
+ __le32 burst_check;
+ __le32 burst_count;
+ __le32 wait_for_silence_timeout_cnt;
+ __le32 reserved[3];
+} __packed;
+
+struct iwl39_statistics_div {
+ __le32 tx_on_a;
+ __le32 tx_on_b;
+ __le32 exec_time;
+ __le32 probe_time;
+} __packed;
+
+struct iwl39_statistics_general {
+ __le32 temperature;
+ struct statistics_dbg dbg;
+ __le32 sleep_time;
+ __le32 slots_out;
+ __le32 slots_idle;
+ __le32 ttl_timestamp;
+ struct iwl39_statistics_div div;
+} __packed;
+
+struct statistics_rx_phy {
+ __le32 ina_cnt;
+ __le32 fina_cnt;
+ __le32 plcp_err;
+ __le32 crc32_err;
+ __le32 overrun_err;
+ __le32 early_overrun_err;
+ __le32 crc32_good;
+ __le32 false_alarm_cnt;
+ __le32 fina_sync_err_cnt;
+ __le32 sfd_timeout;
+ __le32 fina_timeout;
+ __le32 unresponded_rts;
+ __le32 rxe_frame_limit_overrun;
+ __le32 sent_ack_cnt;
+ __le32 sent_cts_cnt;
+ __le32 sent_ba_rsp_cnt;
+ __le32 dsp_self_kill;
+ __le32 mh_format_err;
+ __le32 re_acq_main_rssi_sum;
+ __le32 reserved3;
+} __packed;
+
+struct statistics_rx_ht_phy {
+ __le32 plcp_err;
+ __le32 overrun_err;
+ __le32 early_overrun_err;
+ __le32 crc32_good;
+ __le32 crc32_err;
+ __le32 mh_format_err;
+ __le32 agg_crc32_good;
+ __le32 agg_mpdu_cnt;
+ __le32 agg_cnt;
+ __le32 unsupport_mcs;
+} __packed;
+
+#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
+
+struct statistics_rx_non_phy {
+ __le32 bogus_cts; /* CTS received when not expecting CTS */
+ __le32 bogus_ack; /* ACK received when not expecting ACK */
+ __le32 non_bssid_frames; /* number of frames with BSSID that
+ * doesn't belong to the STA BSSID */
+ __le32 filtered_frames; /* count frames that were dumped in the
+ * filtering process */
+ __le32 non_channel_beacons; /* beacons with our bss id but not on
+ * our serving channel */
+ __le32 channel_beacons; /* beacons with our bss id and in our
+ * serving channel */
+ __le32 num_missed_bcon; /* number of missed beacons */
+ __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
+ * ADC was in saturation */
+ __le32 ina_detection_search_time;/* total time (in 0.8us) searched
+ * for INA */
+ __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
+ __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
+ __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
+ __le32 interference_data_flag; /* flag for interference data
+ * availability. 1 when data is
+ * available. */
+ __le32 channel_load; /* counts RX Enable time in uSec */
+ __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
+ * and CCK) counter */
+ __le32 beacon_rssi_a;
+ __le32 beacon_rssi_b;
+ __le32 beacon_rssi_c;
+ __le32 beacon_energy_a;
+ __le32 beacon_energy_b;
+ __le32 beacon_energy_c;
+} __packed;
+
+struct statistics_rx {
+ struct statistics_rx_phy ofdm;
+ struct statistics_rx_phy cck;
+ struct statistics_rx_non_phy general;
+ struct statistics_rx_ht_phy ofdm_ht;
+} __packed;
+
+/**
+ * struct statistics_tx_power - current tx power
+ *
+ * @ant_a: current tx power on chain a in 1/2 dB step
+ * @ant_b: current tx power on chain b in 1/2 dB step
+ * @ant_c: current tx power on chain c in 1/2 dB step
+ */
+struct statistics_tx_power {
+ u8 ant_a;
+ u8 ant_b;
+ u8 ant_c;
+ u8 reserved;
+} __packed;
+
+struct statistics_tx_non_phy_agg {
+ __le32 ba_timeout;
+ __le32 ba_reschedule_frames;
+ __le32 scd_query_agg_frame_cnt;
+ __le32 scd_query_no_agg;
+ __le32 scd_query_agg;
+ __le32 scd_query_mismatch;
+ __le32 frame_not_ready;
+ __le32 underrun;
+ __le32 bt_prio_kill;
+ __le32 rx_ba_rsp_cnt;
+} __packed;
+
+struct statistics_tx {
+ __le32 preamble_cnt;
+ __le32 rx_detected_cnt;
+ __le32 bt_prio_defer_cnt;
+ __le32 bt_prio_kill_cnt;
+ __le32 few_bytes_cnt;
+ __le32 cts_timeout;
+ __le32 ack_timeout;
+ __le32 expected_ack_cnt;
+ __le32 actual_ack_cnt;
+ __le32 dump_msdu_cnt;
+ __le32 burst_abort_next_frame_mismatch_cnt;
+ __le32 burst_abort_missing_next_frame_cnt;
+ __le32 cts_timeout_collision;
+ __le32 ack_or_ba_timeout_collision;
+ struct statistics_tx_non_phy_agg agg;
+
+ __le32 reserved1;
+} __packed;
+
+
+struct statistics_div {
+ __le32 tx_on_a;
+ __le32 tx_on_b;
+ __le32 exec_time;
+ __le32 probe_time;
+ __le32 reserved1;
+ __le32 reserved2;
+} __packed;
+
+struct statistics_general_common {
+ __le32 temperature; /* radio temperature */
+ struct statistics_dbg dbg;
+ __le32 sleep_time;
+ __le32 slots_out;
+ __le32 slots_idle;
+ __le32 ttl_timestamp;
+ struct statistics_div div;
+ __le32 rx_enable_counter;
+ /*
+ * num_of_sos_states:
+ * count the number of times we have to re-tune
+ * in order to get out of bad PHY status
+ */
+ __le32 num_of_sos_states;
+} __packed;
+
+struct statistics_general {
+ struct statistics_general_common common;
+ __le32 reserved2;
+ __le32 reserved3;
+} __packed;
+
+#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0)
+#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1)
+#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2)
+
+/*
+ * REPLY_STATISTICS_CMD = 0x9c,
+ * all devices identical.
+ *
+ * This command triggers an immediate response containing uCode statistics.
+ * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
+ *
+ * If the CLEAR_STATS configuration flag is set, uCode will clear its
+ * internal copy of the statistics (counters) after issuing the response.
+ * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below).
+ *
+ * If the DISABLE_NOTIF configuration flag is set, uCode will not issue
+ * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag
+ * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
+ */
+#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
+#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
+struct iwl_statistics_cmd {
+ __le32 configuration_flags; /* IWL_STATS_CONF_* */
+} __packed;
+
+/*
+ * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
+ *
+ * By default, uCode issues this notification after receiving a beacon
+ * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
+ * REPLY_STATISTICS_CMD 0x9c, above.
+ *
+ * Statistics counters continue to increment beacon after beacon, but are
+ * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
+ * 0x9c with CLEAR_STATS bit set (see above).
+ *
+ * uCode also issues this notification during scans. uCode clears statistics
+ * appropriately so that each notification contains statistics for only the
+ * one channel that has just been scanned.
+ */
+#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
+#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
+
+struct iwl3945_notif_statistics {
+ __le32 flag;
+ struct iwl39_statistics_rx rx;
+ struct iwl39_statistics_tx tx;
+ struct iwl39_statistics_general general;
+} __packed;
+
+struct iwl_notif_statistics {
+ __le32 flag;
+ struct statistics_rx rx;
+ struct statistics_tx tx;
+ struct statistics_general general;
+} __packed;
+
+/*
+ * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
+ *
+ * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
+ * in regardless of how many missed beacons, which mean when driver receive the
+ * notification, inside the command, it can find all the beacons information
+ * which include number of total missed beacons, number of consecutive missed
+ * beacons, number of beacons received and number of beacons expected to
+ * receive.
+ *
+ * If uCode detected consecutive_missed_beacons > 5, it will reset the radio
+ * in order to bring the radio/PHY back to working state; which has no relation
+ * to when driver will perform sensitivity calibration.
+ *
+ * Driver should set it own missed_beacon_threshold to decide when to perform
+ * sensitivity calibration based on number of consecutive missed beacons in
+ * order to improve overall performance, especially in noisy environment.
+ *
+ */
+
+#define IWL_MISSED_BEACON_THRESHOLD_MIN (1)
+#define IWL_MISSED_BEACON_THRESHOLD_DEF (5)
+#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF
+
+struct iwl_missed_beacon_notif {
+ __le32 consecutive_missed_beacons;
+ __le32 total_missed_becons;
+ __le32 num_expected_beacons;
+ __le32 num_recvd_beacons;
+} __packed;
+
+
+/******************************************************************************
+ * (11)
+ * Rx Calibration Commands:
+ *
+ * With the uCode used for open source drivers, most Tx calibration (except
+ * for Tx Power) and most Rx calibration is done by uCode during the
+ * "initialize" phase of uCode boot. Driver must calibrate only:
+ *
+ * 1) Tx power (depends on temperature), described elsewhere
+ * 2) Receiver gain balance (optimize MIMO, and detect disconnected antennas)
+ * 3) Receiver sensitivity (to optimize signal detection)
+ *
+ *****************************************************************************/
+
+/**
+ * SENSITIVITY_CMD = 0xa8 (command, has simple generic response)
+ *
+ * This command sets up the Rx signal detector for a sensitivity level that
+ * is high enough to lock onto all signals within the associated network,
+ * but low enough to ignore signals that are below a certain threshold, so as
+ * not to have too many "false alarms". False alarms are signals that the
+ * Rx DSP tries to lock onto, but then discards after determining that they
+ * are noise.
+ *
+ * The optimum number of false alarms is between 5 and 50 per 200 TUs
+ * (200 * 1024 uSecs, i.e. 204.8 milliseconds) of actual Rx time (i.e.
+ * time listening, not transmitting). Driver must adjust sensitivity so that
+ * the ratio of actual false alarms to actual Rx time falls within this range.
+ *
+ * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each
+ * received beacon. These provide information to the driver to analyze the
+ * sensitivity. Don't analyze statistics that come in from scanning, or any
+ * other non-associated-network source. Pertinent statistics include:
+ *
+ * From "general" statistics (struct statistics_rx_non_phy):
+ *
+ * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
+ * Measure of energy of desired signal. Used for establishing a level
+ * below which the device does not detect signals.
+ *
+ * (beacon_silence_rssi_[abc] & 0x0FF00) >> 8 (unsigned, units in dB)
+ * Measure of background noise in silent period after beacon.
+ *
+ * channel_load
+ * uSecs of actual Rx time during beacon period (varies according to
+ * how much time was spent transmitting).
+ *
+ * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately:
+ *
+ * false_alarm_cnt
+ * Signal locks abandoned early (before phy-level header).
+ *
+ * plcp_err
+ * Signal locks abandoned late (during phy-level header).
+ *
+ * NOTE: Both false_alarm_cnt and plcp_err increment monotonically from
+ * beacon to beacon, i.e. each value is an accumulation of all errors
+ * before and including the latest beacon. Values will wrap around to 0
+ * after counting up to 2^32 - 1. Driver must differentiate vs.
+ * previous beacon's values to determine # false alarms in the current
+ * beacon period.
+ *
+ * Total number of false alarms = false_alarms + plcp_errs
+ *
+ * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd
+ * (notice that the start points for OFDM are at or close to settings for
+ * maximum sensitivity):
+ *
+ * START / MIN / MAX
+ * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120
+ * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210
+ * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140
+ * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270
+ *
+ * If actual rate of OFDM false alarms (+ plcp_errors) is too high
+ * (greater than 50 for each 204.8 msecs listening), reduce sensitivity
+ * by *adding* 1 to all 4 of the table entries above, up to the max for
+ * each entry. Conversely, if false alarm rate is too low (less than 5
+ * for each 204.8 msecs listening), *subtract* 1 from each entry to
+ * increase sensitivity.
+ *
+ * For CCK sensitivity, keep track of the following:
+ *
+ * 1). 20-beacon history of maximum background noise, indicated by
+ * (beacon_silence_rssi_[abc] & 0x0FF00), units in dB, across the
+ * 3 receivers. For any given beacon, the "silence reference" is
+ * the maximum of last 60 samples (20 beacons * 3 receivers).
+ *
+ * 2). 10-beacon history of strongest signal level, as indicated
+ * by (beacon_energy_[abc] & 0x0FF00) >> 8, across the 3 receivers,
+ * i.e. the strength of the signal through the best receiver at the
+ * moment. These measurements are "upside down", with lower values
+ * for stronger signals, so max energy will be *minimum* value.
+ *
+ * Then for any given beacon, the driver must determine the *weakest*
+ * of the strongest signals; this is the minimum level that needs to be
+ * successfully detected, when using the best receiver at the moment.
+ * "Max cck energy" is the maximum (higher value means lower energy!)
+ * of the last 10 minima. Once this is determined, driver must add
+ * a little margin by adding "6" to it.
+ *
+ * 3). Number of consecutive beacon periods with too few false alarms.
+ * Reset this to 0 at the first beacon period that falls within the
+ * "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
+ *
+ * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd
+ * (notice that the start points for CCK are at maximum sensitivity):
+ *
+ * START / MIN / MAX
+ * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200
+ * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400
+ * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100
+ *
+ * If actual rate of CCK false alarms (+ plcp_errors) is too high
+ * (greater than 50 for each 204.8 msecs listening), method for reducing
+ * sensitivity is:
+ *
+ * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
+ * up to max 400.
+ *
+ * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160,
+ * sensitivity has been reduced a significant amount; bring it up to
+ * a moderate 161. Otherwise, *add* 3, up to max 200.
+ *
+ * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160,
+ * sensitivity has been reduced only a moderate or small amount;
+ * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX,
+ * down to min 0. Otherwise (if gain has been significantly reduced),
+ * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value.
+ *
+ * b) Save a snapshot of the "silence reference".
+ *
+ * If actual rate of CCK false alarms (+ plcp_errors) is too low
+ * (less than 5 for each 204.8 msecs listening), method for increasing
+ * sensitivity is used only if:
+ *
+ * 1a) Previous beacon did not have too many false alarms
+ * 1b) AND difference between previous "silence reference" and current
+ * "silence reference" (prev - current) is 2 or more,
+ * OR 2) 100 or more consecutive beacon periods have had rate of
+ * less than 5 false alarms per 204.8 milliseconds rx time.
+ *
+ * Method for increasing sensitivity:
+ *
+ * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX,
+ * down to min 125.
+ *
+ * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
+ * down to min 200.
+ *
+ * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100.
+ *
+ * If actual rate of CCK false alarms (+ plcp_errors) is within good range
+ * (between 5 and 50 for each 204.8 msecs listening):
+ *
+ * 1) Save a snapshot of the silence reference.
+ *
+ * 2) If previous beacon had too many CCK false alarms (+ plcp_errors),
+ * give some extra margin to energy threshold by *subtracting* 8
+ * from value in HD_MIN_ENERGY_CCK_DET_INDEX.
+ *
+ * For all cases (too few, too many, good range), make sure that the CCK
+ * detection threshold (energy) is below the energy level for robust
+ * detection over the past 10 beacon periods, the "Max cck energy".
+ * Lower values mean higher energy; this means making sure that the value
+ * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
+ *
+ */
+
+/*
+ * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd)
+ */
+#define HD_TABLE_SIZE (11) /* number of entries */
+#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */
+#define HD_MIN_ENERGY_OFDM_DET_INDEX (1)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6)
+#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7)
+#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9)
+#define HD_OFDM_ENERGY_TH_IN_INDEX (10)
+
+/* Control field in struct iwl_sensitivity_cmd */
+#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0)
+#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1)
+
+/**
+ * struct iwl_sensitivity_cmd
+ * @control: (1) updates working table, (0) updates default table
+ * @table: energy threshold values, use HD_* as index into table
+ *
+ * Always use "1" in "control" to update uCode's working table and DSP.
+ */
+struct iwl_sensitivity_cmd {
+ __le16 control; /* always use "1" */
+ __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */
+} __packed;
+
+
+/**
+ * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response)
+ *
+ * This command sets the relative gains of 4965 device's 3 radio receiver chains.
+ *
+ * After the first association, driver should accumulate signal and noise
+ * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20
+ * beacons from the associated network (don't collect statistics that come
+ * in from scanning, or any other non-network source).
+ *
+ * DISCONNECTED ANTENNA:
+ *
+ * Driver should determine which antennas are actually connected, by comparing
+ * average beacon signal levels for the 3 Rx chains. Accumulate (add) the
+ * following values over 20 beacons, one accumulator for each of the chains
+ * a/b/c, from struct statistics_rx_non_phy:
+ *
+ * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
+ *
+ * Find the strongest signal from among a/b/c. Compare the other two to the
+ * strongest. If any signal is more than 15 dB (times 20, unless you
+ * divide the accumulated values by 20) below the strongest, the driver
+ * considers that antenna to be disconnected, and should not try to use that
+ * antenna/chain for Rx or Tx. If both A and B seem to be disconnected,
+ * driver should declare the stronger one as connected, and attempt to use it
+ * (A and B are the only 2 Tx chains!).
+ *
+ *
+ * RX BALANCE:
+ *
+ * Driver should balance the 3 receivers (but just the ones that are connected
+ * to antennas, see above) for gain, by comparing the average signal levels
+ * detected during the silence after each beacon (background noise).
+ * Accumulate (add) the following values over 20 beacons, one accumulator for
+ * each of the chains a/b/c, from struct statistics_rx_non_phy:
+ *
+ * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
+ *
+ * Find the weakest background noise level from among a/b/c. This Rx chain
+ * will be the reference, with 0 gain adjustment. Attenuate other channels by
+ * finding noise difference:
+ *
+ * (accum_noise[i] - accum_noise[reference]) / 30
+ *
+ * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
+ * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the
+ * driver should limit the difference results to a range of 0-3 (0-4.5 dB),
+ * and set bit 2 to indicate "reduce gain". The value for the reference
+ * (weakest) chain should be "0".
+ *
+ * diff_gain_[abc] bit fields:
+ * 2: (1) reduce gain, (0) increase gain
+ * 1-0: amount of gain, units of 1.5 dB
+ */
+
+/* Phy calibration command for series */
+/* The default calibrate table size if not specified by firmware */
+#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
+enum {
+ IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
+ IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
+};
+
+#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
+
+struct iwl_calib_hdr {
+ u8 op_code;
+ u8 first_group;
+ u8 groups_num;
+ u8 data_valid;
+} __packed;
+
+/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
+struct iwl_calib_diff_gain_cmd {
+ struct iwl_calib_hdr hdr;
+ s8 diff_gain_a; /* see above */
+ s8 diff_gain_b;
+ s8 diff_gain_c;
+ u8 reserved1;
+} __packed;
+
+/******************************************************************************
+ * (12)
+ * Miscellaneous Commands:
+ *
+ *****************************************************************************/
+
+/*
+ * LEDs Command & Response
+ * REPLY_LEDS_CMD = 0x48 (command, has simple generic response)
+ *
+ * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
+ * this command turns it on or off, or sets up a periodic blinking cycle.
+ */
+struct iwl_led_cmd {
+ __le32 interval; /* "interval" in uSec */
+ u8 id; /* 1: Activity, 2: Link, 3: Tech */
+ u8 off; /* # intervals off while blinking;
+ * "0", with >0 "on" value, turns LED on */
+ u8 on; /* # intervals on while blinking;
+ * "0", regardless of "off", turns LED off */
+ u8 reserved;
+} __packed;
+
+
+/******************************************************************************
+ * (13)
+ * Union of all expected notifications/responses:
+ *
+ *****************************************************************************/
+
+struct iwl_rx_packet {
+ /*
+ * The first 4 bytes of the RX frame header contain both the RX frame
+ * size and some flags.
+ * Bit fields:
+ * 31: flag flush RB request
+ * 30: flag ignore TC (terminal counter) request
+ * 29: flag fast IRQ request
+ * 28-14: Reserved
+ * 13-00: RX frame size
+ */
+ __le32 len_n_flags;
+ struct iwl_cmd_header hdr;
+ union {
+ struct iwl3945_rx_frame rx_frame;
+ struct iwl3945_tx_resp tx_resp;
+ struct iwl3945_beacon_notif beacon_status;
+
+ struct iwl_alive_resp alive_frame;
+ struct iwl_spectrum_notification spectrum_notif;
+ struct iwl_csa_notification csa_notif;
+ struct iwl_error_resp err_resp;
+ struct iwl_card_state_notif card_state_notif;
+ struct iwl_add_sta_resp add_sta;
+ struct iwl_rem_sta_resp rem_sta;
+ struct iwl_sleep_notification sleep_notif;
+ struct iwl_spectrum_resp spectrum;
+ struct iwl_notif_statistics stats;
+ struct iwl_compressed_ba_resp compressed_ba;
+ struct iwl_missed_beacon_notif missed_beacon;
+ __le32 status;
+ u8 raw[0];
+ } u;
+} __packed;
+
+#endif /* __iwl_legacy_commands_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
new file mode 100644
index 000000000000..d418b647be80
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -0,0 +1,2674 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-debug.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-power.h"
+#include "iwl-sta.h"
+#include "iwl-helpers.h"
+
+
+MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
+MODULE_VERSION(IWLWIFI_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+/*
+ * set bt_coex_active to true, uCode will do kill/defer
+ * every time the priority line is asserted (BT is sending signals on the
+ * priority line in the PCIx).
+ * set bt_coex_active to false, uCode will ignore the BT activity and
+ * perform the normal operation
+ *
+ * User might experience transmit issue on some platform due to WiFi/BT
+ * co-exist problem. The possible behaviors are:
+ * Able to scan and finding all the available AP
+ * Not able to associate with any AP
+ * On those platforms, WiFi communication can be restored by set
+ * "bt_coex_active" module parameter to "false"
+ *
+ * default: bt_coex_active = true (BT_COEX_ENABLE)
+ */
+static bool bt_coex_active = true;
+module_param(bt_coex_active, bool, S_IRUGO);
+MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
+
+u32 iwlegacy_debug_level;
+EXPORT_SYMBOL(iwlegacy_debug_level);
+
+const u8 iwlegacy_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+EXPORT_SYMBOL(iwlegacy_bcast_addr);
+
+
+/* This function both allocates and initializes hw and priv. */
+struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg)
+{
+ struct iwl_priv *priv;
+ /* mac80211 allocates memory for this device instance, including
+ * space for this driver's private structure */
+ struct ieee80211_hw *hw;
+
+ hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
+ cfg->ops->ieee80211_ops);
+ if (hw == NULL) {
+ pr_err("%s: Can not allocate network device\n",
+ cfg->name);
+ goto out;
+ }
+
+ priv = hw->priv;
+ priv->hw = hw;
+
+out:
+ return hw;
+}
+EXPORT_SYMBOL(iwl_legacy_alloc_all);
+
+#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
+#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
+static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv,
+ struct ieee80211_sta_ht_cap *ht_info,
+ enum ieee80211_band band)
+{
+ u16 max_bit_rate = 0;
+ u8 rx_chains_num = priv->hw_params.rx_chains_num;
+ u8 tx_chains_num = priv->hw_params.tx_chains_num;
+
+ ht_info->cap = 0;
+ memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+
+ ht_info->ht_supported = true;
+
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+ max_bit_rate = MAX_BIT_RATE_20_MHZ;
+ if (priv->hw_params.ht40_channel & BIT(band)) {
+ ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
+ ht_info->mcs.rx_mask[4] = 0x01;
+ max_bit_rate = MAX_BIT_RATE_40_MHZ;
+ }
+
+ if (priv->cfg->mod_params->amsdu_size_8K)
+ ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+ ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
+ ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
+
+ ht_info->mcs.rx_mask[0] = 0xFF;
+ if (rx_chains_num >= 2)
+ ht_info->mcs.rx_mask[1] = 0xFF;
+ if (rx_chains_num >= 3)
+ ht_info->mcs.rx_mask[2] = 0xFF;
+
+ /* Highest supported Rx data rate */
+ max_bit_rate *= rx_chains_num;
+ WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
+ ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
+
+ /* Tx MCS capabilities */
+ ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ if (tx_chains_num != rx_chains_num) {
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ }
+}
+
+/**
+ * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom
+ */
+int iwl_legacy_init_geos(struct iwl_priv *priv)
+{
+ struct iwl_channel_info *ch;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *channels;
+ struct ieee80211_channel *geo_ch;
+ struct ieee80211_rate *rates;
+ int i = 0;
+
+ if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
+ priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
+ IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
+ set_bit(STATUS_GEO_CONFIGURED, &priv->status);
+ return 0;
+ }
+
+ channels = kzalloc(sizeof(struct ieee80211_channel) *
+ priv->channel_count, GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
+ GFP_KERNEL);
+ if (!rates) {
+ kfree(channels);
+ return -ENOMEM;
+ }
+
+ /* 5.2GHz channels start after the 2.4GHz channels */
+ sband = &priv->bands[IEEE80211_BAND_5GHZ];
+ sband->channels = &channels[ARRAY_SIZE(iwlegacy_eeprom_band_1)];
+ /* just OFDM */
+ sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
+ sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
+
+ if (priv->cfg->sku & IWL_SKU_N)
+ iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
+ IEEE80211_BAND_5GHZ);
+
+ sband = &priv->bands[IEEE80211_BAND_2GHZ];
+ sband->channels = channels;
+ /* OFDM & CCK */
+ sband->bitrates = rates;
+ sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
+
+ if (priv->cfg->sku & IWL_SKU_N)
+ iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
+ IEEE80211_BAND_2GHZ);
+
+ priv->ieee_channels = channels;
+ priv->ieee_rates = rates;
+
+ for (i = 0; i < priv->channel_count; i++) {
+ ch = &priv->channel_info[i];
+
+ if (!iwl_legacy_is_channel_valid(ch))
+ continue;
+
+ if (iwl_legacy_is_channel_a_band(ch))
+ sband = &priv->bands[IEEE80211_BAND_5GHZ];
+ else
+ sband = &priv->bands[IEEE80211_BAND_2GHZ];
+
+ geo_ch = &sband->channels[sband->n_channels++];
+
+ geo_ch->center_freq =
+ ieee80211_channel_to_frequency(ch->channel, ch->band);
+ geo_ch->max_power = ch->max_power_avg;
+ geo_ch->max_antenna_gain = 0xff;
+ geo_ch->hw_value = ch->channel;
+
+ if (iwl_legacy_is_channel_valid(ch)) {
+ if (!(ch->flags & EEPROM_CHANNEL_IBSS))
+ geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
+
+ if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
+ geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+
+ if (ch->flags & EEPROM_CHANNEL_RADAR)
+ geo_ch->flags |= IEEE80211_CHAN_RADAR;
+
+ geo_ch->flags |= ch->ht40_extension_channel;
+
+ if (ch->max_power_avg > priv->tx_power_device_lmt)
+ priv->tx_power_device_lmt = ch->max_power_avg;
+ } else {
+ geo_ch->flags |= IEEE80211_CHAN_DISABLED;
+ }
+
+ IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
+ ch->channel, geo_ch->center_freq,
+ iwl_legacy_is_channel_a_band(ch) ? "5.2" : "2.4",
+ geo_ch->flags & IEEE80211_CHAN_DISABLED ?
+ "restricted" : "valid",
+ geo_ch->flags);
+ }
+
+ if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
+ priv->cfg->sku & IWL_SKU_A) {
+ IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
+ "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
+ priv->pci_dev->device,
+ priv->pci_dev->subsystem_device);
+ priv->cfg->sku &= ~IWL_SKU_A;
+ }
+
+ IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
+ priv->bands[IEEE80211_BAND_2GHZ].n_channels,
+ priv->bands[IEEE80211_BAND_5GHZ].n_channels);
+
+ set_bit(STATUS_GEO_CONFIGURED, &priv->status);
+
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_init_geos);
+
+/*
+ * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos
+ */
+void iwl_legacy_free_geos(struct iwl_priv *priv)
+{
+ kfree(priv->ieee_channels);
+ kfree(priv->ieee_rates);
+ clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
+}
+EXPORT_SYMBOL(iwl_legacy_free_geos);
+
+static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv,
+ enum ieee80211_band band,
+ u16 channel, u8 extension_chan_offset)
+{
+ const struct iwl_channel_info *ch_info;
+
+ ch_info = iwl_legacy_get_channel_info(priv, band, channel);
+ if (!iwl_legacy_is_channel_valid(ch_info))
+ return false;
+
+ if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
+ return !(ch_info->ht40_extension_channel &
+ IEEE80211_CHAN_NO_HT40PLUS);
+ else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
+ return !(ch_info->ht40_extension_channel &
+ IEEE80211_CHAN_NO_HT40MINUS);
+
+ return false;
+}
+
+bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_sta_ht_cap *ht_cap)
+{
+ if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
+ return false;
+
+ /*
+ * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
+ * the bit will not set if it is pure 40MHz case
+ */
+ if (ht_cap && !ht_cap->ht_supported)
+ return false;
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+ if (priv->disable_ht40)
+ return false;
+#endif
+
+ return iwl_legacy_is_channel_extension(priv, priv->band,
+ le16_to_cpu(ctx->staging.channel),
+ ctx->ht.extension_chan_offset);
+}
+EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed);
+
+static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
+{
+ u16 new_val;
+ u16 beacon_factor;
+
+ /*
+ * If mac80211 hasn't given us a beacon interval, program
+ * the default into the device.
+ */
+ if (!beacon_val)
+ return DEFAULT_BEACON_INTERVAL;
+
+ /*
+ * If the beacon interval we obtained from the peer
+ * is too large, we'll have to wake up more often
+ * (and in IBSS case, we'll beacon too much)
+ *
+ * For example, if max_beacon_val is 4096, and the
+ * requested beacon interval is 7000, we'll have to
+ * use 3500 to be able to wake up on the beacons.
+ *
+ * This could badly influence beacon detection stats.
+ */
+
+ beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
+ new_val = beacon_val / beacon_factor;
+
+ if (!new_val)
+ new_val = max_beacon_val;
+
+ return new_val;
+}
+
+int
+iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ u64 tsf;
+ s32 interval_tm, rem;
+ struct ieee80211_conf *conf = NULL;
+ u16 beacon_int;
+ struct ieee80211_vif *vif = ctx->vif;
+
+ conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
+
+ lockdep_assert_held(&priv->mutex);
+
+ memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
+
+ ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
+ ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
+
+ beacon_int = vif ? vif->bss_conf.beacon_int : 0;
+
+ /*
+ * TODO: For IBSS we need to get atim_window from mac80211,
+ * for now just always use 0
+ */
+ ctx->timing.atim_window = 0;
+
+ beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int,
+ priv->hw_params.max_beacon_itrvl * TIME_UNIT);
+ ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
+
+ tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
+ interval_tm = beacon_int * TIME_UNIT;
+ rem = do_div(tsf, interval_tm);
+ ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
+
+ ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
+
+ IWL_DEBUG_ASSOC(priv,
+ "beacon interval %d beacon timer %d beacon tim %d\n",
+ le16_to_cpu(ctx->timing.beacon_interval),
+ le32_to_cpu(ctx->timing.beacon_init_val),
+ le16_to_cpu(ctx->timing.atim_window));
+
+ return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
+ sizeof(ctx->timing), &ctx->timing);
+}
+EXPORT_SYMBOL(iwl_legacy_send_rxon_timing);
+
+void
+iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ int hw_decrypt)
+{
+ struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
+
+ if (hw_decrypt)
+ rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
+ else
+ rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
+
+}
+EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto);
+
+/* validate RXON structure is valid */
+int
+iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
+ bool error = false;
+
+ if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
+ if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
+ IWL_WARN(priv, "check 2.4G: wrong narrow\n");
+ error = true;
+ }
+ if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
+ IWL_WARN(priv, "check 2.4G: wrong radar\n");
+ error = true;
+ }
+ } else {
+ if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
+ IWL_WARN(priv, "check 5.2G: not short slot!\n");
+ error = true;
+ }
+ if (rxon->flags & RXON_FLG_CCK_MSK) {
+ IWL_WARN(priv, "check 5.2G: CCK!\n");
+ error = true;
+ }
+ }
+ if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
+ IWL_WARN(priv, "mac/bssid mcast!\n");
+ error = true;
+ }
+
+ /* make sure basic rates 6Mbps and 1Mbps are supported */
+ if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
+ (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
+ IWL_WARN(priv, "neither 1 nor 6 are basic\n");
+ error = true;
+ }
+
+ if (le16_to_cpu(rxon->assoc_id) > 2007) {
+ IWL_WARN(priv, "aid > 2007\n");
+ error = true;
+ }
+
+ if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
+ == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
+ IWL_WARN(priv, "CCK and short slot\n");
+ error = true;
+ }
+
+ if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
+ == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
+ IWL_WARN(priv, "CCK and auto detect");
+ error = true;
+ }
+
+ if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
+ RXON_FLG_TGG_PROTECT_MSK)) ==
+ RXON_FLG_TGG_PROTECT_MSK) {
+ IWL_WARN(priv, "TGg but no auto-detect\n");
+ error = true;
+ }
+
+ if (error)
+ IWL_WARN(priv, "Tuning to channel %d\n",
+ le16_to_cpu(rxon->channel));
+
+ if (error) {
+ IWL_ERR(priv, "Invalid RXON\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd);
+
+/**
+ * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
+ * @priv: staging_rxon is compared to active_rxon
+ *
+ * If the RXON structure is changing enough to require a new tune,
+ * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
+ * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
+ */
+int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ const struct iwl_legacy_rxon_cmd *staging = &ctx->staging;
+ const struct iwl_legacy_rxon_cmd *active = &ctx->active;
+
+#define CHK(cond) \
+ if ((cond)) { \
+ IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
+ return 1; \
+ }
+
+#define CHK_NEQ(c1, c2) \
+ if ((c1) != (c2)) { \
+ IWL_DEBUG_INFO(priv, "need full RXON - " \
+ #c1 " != " #c2 " - %d != %d\n", \
+ (c1), (c2)); \
+ return 1; \
+ }
+
+ /* These items are only settable from the full RXON command */
+ CHK(!iwl_legacy_is_associated_ctx(ctx));
+ CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
+ CHK(compare_ether_addr(staging->node_addr, active->node_addr));
+ CHK(compare_ether_addr(staging->wlap_bssid_addr,
+ active->wlap_bssid_addr));
+ CHK_NEQ(staging->dev_type, active->dev_type);
+ CHK_NEQ(staging->channel, active->channel);
+ CHK_NEQ(staging->air_propagation, active->air_propagation);
+ CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
+ active->ofdm_ht_single_stream_basic_rates);
+ CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
+ active->ofdm_ht_dual_stream_basic_rates);
+ CHK_NEQ(staging->assoc_id, active->assoc_id);
+
+ /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
+ * be updated with the RXON_ASSOC command -- however only some
+ * flag transitions are allowed using RXON_ASSOC */
+
+ /* Check if we are not switching bands */
+ CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
+ active->flags & RXON_FLG_BAND_24G_MSK);
+
+ /* Check if we are switching association toggle */
+ CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
+ active->filter_flags & RXON_FILTER_ASSOC_MSK);
+
+#undef CHK
+#undef CHK_NEQ
+
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_full_rxon_required);
+
+u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ /*
+ * Assign the lowest rate -- should really get this from
+ * the beacon skb from mac80211.
+ */
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
+ return IWL_RATE_1M_PLCP;
+ else
+ return IWL_RATE_6M_PLCP;
+}
+EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp);
+
+static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
+ struct iwl_ht_config *ht_conf,
+ struct iwl_rxon_context *ctx)
+{
+ struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
+
+ if (!ctx->ht.enabled) {
+ rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
+ RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
+ RXON_FLG_HT40_PROT_MSK |
+ RXON_FLG_HT_PROT_MSK);
+ return;
+ }
+
+ rxon->flags |= cpu_to_le32(ctx->ht.protection <<
+ RXON_FLG_HT_OPERATING_MODE_POS);
+
+ /* Set up channel bandwidth:
+ * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
+ /* clear the HT channel mode before set the mode */
+ rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
+ RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+ if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) {
+ /* pure ht40 */
+ if (ctx->ht.protection ==
+ IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
+ /* Note: control channel is opposite of extension channel */
+ switch (ctx->ht.extension_chan_offset) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ rxon->flags &=
+ ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ rxon->flags |=
+ RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+ break;
+ }
+ } else {
+ /* Note: control channel is opposite of extension channel */
+ switch (ctx->ht.extension_chan_offset) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ rxon->flags &=
+ ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ rxon->flags |=
+ RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_NONE:
+ default:
+ /* channel location only valid if in Mixed mode */
+ IWL_ERR(priv,
+ "invalid extension channel offset\n");
+ break;
+ }
+ }
+ } else {
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
+ }
+
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+
+ IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
+ "extension channel offset 0x%x\n",
+ le32_to_cpu(rxon->flags), ctx->ht.protection,
+ ctx->ht.extension_chan_offset);
+}
+
+void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
+{
+ struct iwl_rxon_context *ctx;
+
+ for_each_context(priv, ctx)
+ _iwl_legacy_set_rxon_ht(priv, ht_conf, ctx);
+}
+EXPORT_SYMBOL(iwl_legacy_set_rxon_ht);
+
+/* Return valid, unused, channel for a passive scan to reset the RF */
+u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
+ enum ieee80211_band band)
+{
+ const struct iwl_channel_info *ch_info;
+ int i;
+ u8 channel = 0;
+ u8 min, max;
+ struct iwl_rxon_context *ctx;
+
+ if (band == IEEE80211_BAND_5GHZ) {
+ min = 14;
+ max = priv->channel_count;
+ } else {
+ min = 0;
+ max = 14;
+ }
+
+ for (i = min; i < max; i++) {
+ bool busy = false;
+
+ for_each_context(priv, ctx) {
+ busy = priv->channel_info[i].channel ==
+ le16_to_cpu(ctx->staging.channel);
+ if (busy)
+ break;
+ }
+
+ if (busy)
+ continue;
+
+ channel = priv->channel_info[i].channel;
+ ch_info = iwl_legacy_get_channel_info(priv, band, channel);
+ if (iwl_legacy_is_channel_valid(ch_info))
+ break;
+ }
+
+ return channel;
+}
+EXPORT_SYMBOL(iwl_legacy_get_single_channel_number);
+
+/**
+ * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON
+ * @ch: requested channel as a pointer to struct ieee80211_channel
+
+ * NOTE: Does not commit to the hardware; it sets appropriate bit fields
+ * in the staging RXON flag structure based on the ch->band
+ */
+int
+iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
+ struct iwl_rxon_context *ctx)
+{
+ enum ieee80211_band band = ch->band;
+ u16 channel = ch->hw_value;
+
+ if ((le16_to_cpu(ctx->staging.channel) == channel) &&
+ (priv->band == band))
+ return 0;
+
+ ctx->staging.channel = cpu_to_le16(channel);
+ if (band == IEEE80211_BAND_5GHZ)
+ ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
+ else
+ ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+
+ priv->band = band;
+
+ IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
+
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_set_rxon_channel);
+
+void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ enum ieee80211_band band,
+ struct ieee80211_vif *vif)
+{
+ if (band == IEEE80211_BAND_5GHZ) {
+ ctx->staging.flags &=
+ ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
+ | RXON_FLG_CCK_MSK);
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ } else {
+ /* Copied from iwl_post_associate() */
+ if (vif && vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+
+ ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+ ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
+ ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_set_flags_for_band);
+
+/*
+ * initialize rxon structure with default values from eeprom
+ */
+void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ const struct iwl_channel_info *ch_info;
+
+ memset(&ctx->staging, 0, sizeof(ctx->staging));
+
+ if (!ctx->vif) {
+ ctx->staging.dev_type = ctx->unused_devtype;
+ } else
+ switch (ctx->vif->type) {
+
+ case NL80211_IFTYPE_STATION:
+ ctx->staging.dev_type = ctx->station_devtype;
+ ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
+ break;
+
+ case NL80211_IFTYPE_ADHOC:
+ ctx->staging.dev_type = ctx->ibss_devtype;
+ ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
+ ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
+ RXON_FILTER_ACCEPT_GRP_MSK;
+ break;
+
+ default:
+ IWL_ERR(priv, "Unsupported interface type %d\n",
+ ctx->vif->type);
+ break;
+ }
+
+#if 0
+ /* TODO: Figure out when short_preamble would be set and cache from
+ * that */
+ if (!hw_to_local(priv->hw)->short_preamble)
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+#endif
+
+ ch_info = iwl_legacy_get_channel_info(priv, priv->band,
+ le16_to_cpu(ctx->active.channel));
+
+ if (!ch_info)
+ ch_info = &priv->channel_info[0];
+
+ ctx->staging.channel = cpu_to_le16(ch_info->channel);
+ priv->band = ch_info->band;
+
+ iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
+
+ ctx->staging.ofdm_basic_rates =
+ (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
+ ctx->staging.cck_basic_rates =
+ (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
+
+ /* clear both MIX and PURE40 mode flag */
+ ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
+ RXON_FLG_CHANNEL_MODE_PURE_40);
+ if (ctx->vif)
+ memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
+
+ ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
+ ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
+}
+EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config);
+
+void iwl_legacy_set_rate(struct iwl_priv *priv)
+{
+ const struct ieee80211_supported_band *hw = NULL;
+ struct ieee80211_rate *rate;
+ struct iwl_rxon_context *ctx;
+ int i;
+
+ hw = iwl_get_hw_mode(priv, priv->band);
+ if (!hw) {
+ IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
+ return;
+ }
+
+ priv->active_rate = 0;
+
+ for (i = 0; i < hw->n_bitrates; i++) {
+ rate = &(hw->bitrates[i]);
+ if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
+ priv->active_rate |= (1 << rate->hw_value);
+ }
+
+ IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
+
+ for_each_context(priv, ctx) {
+ ctx->staging.cck_basic_rates =
+ (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
+
+ ctx->staging.ofdm_basic_rates =
+ (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_set_rate);
+
+void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
+{
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ if (priv->switch_rxon.switch_in_progress) {
+ ieee80211_chswitch_done(ctx->vif, is_success);
+ mutex_lock(&priv->mutex);
+ priv->switch_rxon.switch_in_progress = false;
+ mutex_unlock(&priv->mutex);
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_chswitch_done);
+
+void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
+
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
+
+ if (priv->switch_rxon.switch_in_progress) {
+ if (!le32_to_cpu(csa->status) &&
+ (csa->channel == priv->switch_rxon.channel)) {
+ rxon->channel = csa->channel;
+ ctx->staging.channel = csa->channel;
+ IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
+ le16_to_cpu(csa->channel));
+ iwl_legacy_chswitch_done(priv, true);
+ } else {
+ IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
+ le16_to_cpu(csa->channel));
+ iwl_legacy_chswitch_done(priv, false);
+ }
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_rx_csa);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
+
+ IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
+ iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
+ IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
+ le16_to_cpu(rxon->channel));
+ IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
+ IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
+ le32_to_cpu(rxon->filter_flags));
+ IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
+ IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
+ rxon->ofdm_basic_rates);
+ IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
+ rxon->cck_basic_rates);
+ IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
+ IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
+ IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
+ le16_to_cpu(rxon->assoc_id));
+}
+EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd);
+#endif
+/**
+ * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card
+ */
+void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
+{
+ /* Set the FW error flag -- cleared on iwl_down */
+ set_bit(STATUS_FW_ERROR, &priv->status);
+
+ /* Cancel currently queued command. */
+ clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+
+ IWL_ERR(priv, "Loaded firmware version: %s\n",
+ priv->hw->wiphy->fw_version);
+
+ priv->cfg->ops->lib->dump_nic_error_log(priv);
+ if (priv->cfg->ops->lib->dump_fh)
+ priv->cfg->ops->lib->dump_fh(priv, NULL, false);
+ priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS)
+ iwl_legacy_print_rx_config_cmd(priv,
+ &priv->contexts[IWL_RXON_CTX_BSS]);
+#endif
+
+ wake_up_interruptible(&priv->wait_command_queue);
+
+ /* Keep the restart process from trying to send host
+ * commands by clearing the INIT status bit */
+ clear_bit(STATUS_READY, &priv->status);
+
+ if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
+ IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
+ "Restarting adapter due to uCode error.\n");
+
+ if (priv->cfg->mod_params->restart_fw)
+ queue_work(priv->workqueue, &priv->restart);
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_irq_handle_error);
+
+static int iwl_legacy_apm_stop_master(struct iwl_priv *priv)
+{
+ int ret = 0;
+
+ /* stop device's busmaster DMA activity */
+ iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+
+ ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+ if (ret)
+ IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
+
+ IWL_DEBUG_INFO(priv, "stop master\n");
+
+ return ret;
+}
+
+void iwl_legacy_apm_stop(struct iwl_priv *priv)
+{
+ IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
+
+ /* Stop device's DMA activity */
+ iwl_legacy_apm_stop_master(priv);
+
+ /* Reset the entire device */
+ iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ udelay(10);
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+EXPORT_SYMBOL(iwl_legacy_apm_stop);
+
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
+ * NOTE: This does not load uCode nor start the embedded processor
+ */
+int iwl_legacy_apm_init(struct iwl_priv *priv)
+{
+ int ret = 0;
+ u16 lctl;
+
+ IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
+
+ /*
+ * Use "set_bit" below rather than "write", to preserve any hardware
+ * bits already set by default after reset.
+ */
+
+ /* Disable L0S exit timer (platform NMI Work/Around) */
+ iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+
+ /*
+ * Disable L0s without affecting L1;
+ * don't wait for ICH L0s (ICH bug W/A)
+ */
+ iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+ /* Set FH wait threshold to maximum (HW error during stress W/A) */
+ iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG,
+ CSR_DBG_HPET_MEM_REG_VAL);
+
+ /*
+ * Enable HAP INTA (interrupt from management bus) to
+ * wake device's PCI Express link L1a -> L0s
+ * NOTE: This is no-op for 3945 (non-existant bit)
+ */
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+ /*
+ * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
+ * Check if BIOS (or OS) enabled L1-ASPM on this device.
+ * If so (likely), disable L0S, so device moves directly L0->L1;
+ * costs negligible amount of power savings.
+ * If not (unlikely), enable L0S, so there is at least some
+ * power savings, even without L1.
+ */
+ if (priv->cfg->base_params->set_l0s) {
+ lctl = iwl_legacy_pcie_link_ctl(priv);
+ if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
+ PCI_CFG_LINK_CTRL_VAL_L1_EN) {
+ /* L1-ASPM enabled; disable(!) L0S */
+ iwl_legacy_set_bit(priv, CSR_GIO_REG,
+ CSR_GIO_REG_VAL_L0S_ENABLED);
+ IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
+ } else {
+ /* L1-ASPM disabled; enable(!) L0S */
+ iwl_legacy_clear_bit(priv, CSR_GIO_REG,
+ CSR_GIO_REG_VAL_L0S_ENABLED);
+ IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
+ }
+ }
+
+ /* Configure analog phase-lock-loop before activating to D0A */
+ if (priv->cfg->base_params->pll_cfg_val)
+ iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG,
+ priv->cfg->base_params->pll_cfg_val);
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is supported, e.g. iwl_legacy_write_prph()
+ * and accesses to uCode SRAM.
+ */
+ ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+ if (ret < 0) {
+ IWL_DEBUG_INFO(priv, "Failed to init the card\n");
+ goto out;
+ }
+
+ /*
+ * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
+ * BSM (Boostrap State Machine) is only in 3945 and 4965.
+ *
+ * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
+ * do not disable clocks. This preserves any hardware bits already
+ * set by default in "CLK_CTRL_REG" after reset.
+ */
+ if (priv->cfg->base_params->use_bsm)
+ iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
+ else
+ iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(20);
+
+ /* Disable L1-Active */
+ iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_apm_init);
+
+
+int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
+{
+ int ret;
+ s8 prev_tx_power;
+ bool defer;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (priv->tx_power_user_lmt == tx_power && !force)
+ return 0;
+
+ if (!priv->cfg->ops->lib->send_tx_power)
+ return -EOPNOTSUPP;
+
+ if (tx_power < IWL4965_TX_POWER_TARGET_POWER_MIN) {
+ IWL_WARN(priv,
+ "Requested user TXPOWER %d below lower limit %d.\n",
+ tx_power,
+ IWL4965_TX_POWER_TARGET_POWER_MIN);
+ return -EINVAL;
+ }
+
+ if (tx_power > priv->tx_power_device_lmt) {
+ IWL_WARN(priv,
+ "Requested user TXPOWER %d above upper limit %d.\n",
+ tx_power, priv->tx_power_device_lmt);
+ return -EINVAL;
+ }
+
+ if (!iwl_legacy_is_ready_rf(priv))
+ return -EIO;
+
+ /* scan complete and commit_rxon use tx_power_next value,
+ * it always need to be updated for newest request */
+ priv->tx_power_next = tx_power;
+
+ /* do not set tx power when scanning or channel changing */
+ defer = test_bit(STATUS_SCANNING, &priv->status) ||
+ memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
+ if (defer && !force) {
+ IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
+ return 0;
+ }
+
+ prev_tx_power = priv->tx_power_user_lmt;
+ priv->tx_power_user_lmt = tx_power;
+
+ ret = priv->cfg->ops->lib->send_tx_power(priv);
+
+ /* if fail to set tx_power, restore the orig. tx power */
+ if (ret) {
+ priv->tx_power_user_lmt = prev_tx_power;
+ priv->tx_power_next = prev_tx_power;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_set_tx_power);
+
+void iwl_legacy_send_bt_config(struct iwl_priv *priv)
+{
+ struct iwl_bt_cmd bt_cmd = {
+ .lead_time = BT_LEAD_TIME_DEF,
+ .max_kill = BT_MAX_KILL_DEF,
+ .kill_ack_mask = 0,
+ .kill_cts_mask = 0,
+ };
+
+ if (!bt_coex_active)
+ bt_cmd.flags = BT_COEX_DISABLE;
+ else
+ bt_cmd.flags = BT_COEX_ENABLE;
+
+ IWL_DEBUG_INFO(priv, "BT coex %s\n",
+ (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
+
+ if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG,
+ sizeof(struct iwl_bt_cmd), &bt_cmd))
+ IWL_ERR(priv, "failed to send BT Coex Config\n");
+}
+EXPORT_SYMBOL(iwl_legacy_send_bt_config);
+
+int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
+{
+ struct iwl_statistics_cmd statistics_cmd = {
+ .configuration_flags =
+ clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
+ };
+
+ if (flags & CMD_ASYNC)
+ return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
+ sizeof(struct iwl_statistics_cmd),
+ &statistics_cmd, NULL);
+ else
+ return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
+ sizeof(struct iwl_statistics_cmd),
+ &statistics_cmd);
+}
+EXPORT_SYMBOL(iwl_legacy_send_statistics_request);
+
+void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
+ IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
+ sleep->pm_sleep_mode, sleep->pm_wakeup_src);
+#endif
+}
+EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif);
+
+void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
+ "notification for %s:\n", len,
+ iwl_legacy_get_cmd_string(pkt->hdr.cmd));
+ iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
+}
+EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif);
+
+void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+ IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
+ "seq 0x%04X ser 0x%08X\n",
+ le32_to_cpu(pkt->u.err_resp.error_type),
+ iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id),
+ pkt->u.err_resp.cmd_id,
+ le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
+ le32_to_cpu(pkt->u.err_resp.error_info));
+}
+EXPORT_SYMBOL(iwl_legacy_rx_reply_error);
+
+void iwl_legacy_clear_isr_stats(struct iwl_priv *priv)
+{
+ memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
+}
+
+int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx;
+ unsigned long flags;
+ int q;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (!iwl_legacy_is_ready_rf(priv)) {
+ IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
+ return -EIO;
+ }
+
+ if (queue >= AC_NUM) {
+ IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
+ return 0;
+ }
+
+ q = AC_NUM - 1 - queue;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ for_each_context(priv, ctx) {
+ ctx->qos_data.def_qos_parm.ac[q].cw_min =
+ cpu_to_le16(params->cw_min);
+ ctx->qos_data.def_qos_parm.ac[q].cw_max =
+ cpu_to_le16(params->cw_max);
+ ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
+ ctx->qos_data.def_qos_parm.ac[q].edca_txop =
+ cpu_to_le16((params->txop * 32));
+
+ ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_conf_tx);
+
+int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ return priv->ibss_manager == IWL_IBSS_MANAGER;
+}
+EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon);
+
+static int
+iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ iwl_legacy_connection_init_rx_config(priv, ctx);
+
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+
+ return iwl_legacy_commit_rxon(priv, ctx);
+}
+
+static int iwl_legacy_setup_interface(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ struct ieee80211_vif *vif = ctx->vif;
+ int err;
+
+ lockdep_assert_held(&priv->mutex);
+
+ /*
+ * This variable will be correct only when there's just
+ * a single context, but all code using it is for hardware
+ * that supports only one context.
+ */
+ priv->iw_mode = vif->type;
+
+ ctx->is_active = true;
+
+ err = iwl_legacy_set_mode(priv, ctx);
+ if (err) {
+ if (!ctx->always_active)
+ ctx->is_active = false;
+ return err;
+ }
+
+ return 0;
+}
+
+int
+iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *tmp, *ctx = NULL;
+ int err;
+
+ IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
+ vif->type, vif->addr);
+
+ mutex_lock(&priv->mutex);
+
+ if (!iwl_legacy_is_ready_rf(priv)) {
+ IWL_WARN(priv, "Try to add interface when device not ready\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ for_each_context(priv, tmp) {
+ u32 possible_modes =
+ tmp->interface_modes | tmp->exclusive_interface_modes;
+
+ if (tmp->vif) {
+ /* check if this busy context is exclusive */
+ if (tmp->exclusive_interface_modes &
+ BIT(tmp->vif->type)) {
+ err = -EINVAL;
+ goto out;
+ }
+ continue;
+ }
+
+ if (!(possible_modes & BIT(vif->type)))
+ continue;
+
+ /* have maybe usable context w/o interface */
+ ctx = tmp;
+ break;
+ }
+
+ if (!ctx) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ vif_priv->ctx = ctx;
+ ctx->vif = vif;
+
+ err = iwl_legacy_setup_interface(priv, ctx);
+ if (!err)
+ goto out;
+
+ ctx->vif = NULL;
+ priv->iw_mode = NL80211_IFTYPE_STATION;
+ out:
+ mutex_unlock(&priv->mutex);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ return err;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_add_interface);
+
+static void iwl_legacy_teardown_interface(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ bool mode_change)
+{
+ struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (priv->scan_vif == vif) {
+ iwl_legacy_scan_cancel_timeout(priv, 200);
+ iwl_legacy_force_scan_end(priv);
+ }
+
+ if (!mode_change) {
+ iwl_legacy_set_mode(priv, ctx);
+ if (!ctx->always_active)
+ ctx->is_active = false;
+ }
+}
+
+void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ mutex_lock(&priv->mutex);
+
+ WARN_ON(ctx->vif != vif);
+ ctx->vif = NULL;
+
+ iwl_legacy_teardown_interface(priv, vif, false);
+
+ memset(priv->bssid, 0, ETH_ALEN);
+ mutex_unlock(&priv->mutex);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+}
+EXPORT_SYMBOL(iwl_legacy_mac_remove_interface);
+
+int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv)
+{
+ if (!priv->txq)
+ priv->txq = kzalloc(
+ sizeof(struct iwl_tx_queue) *
+ priv->cfg->base_params->num_of_queues,
+ GFP_KERNEL);
+ if (!priv->txq) {
+ IWL_ERR(priv, "Not enough memory for txq\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem);
+
+void iwl_legacy_txq_mem(struct iwl_priv *priv)
+{
+ kfree(priv->txq);
+ priv->txq = NULL;
+}
+EXPORT_SYMBOL(iwl_legacy_txq_mem);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+
+#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
+
+void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
+{
+ priv->tx_traffic_idx = 0;
+ priv->rx_traffic_idx = 0;
+ if (priv->tx_traffic)
+ memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
+ if (priv->rx_traffic)
+ memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
+}
+
+int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
+{
+ u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
+
+ if (iwlegacy_debug_level & IWL_DL_TX) {
+ if (!priv->tx_traffic) {
+ priv->tx_traffic =
+ kzalloc(traffic_size, GFP_KERNEL);
+ if (!priv->tx_traffic)
+ return -ENOMEM;
+ }
+ }
+ if (iwlegacy_debug_level & IWL_DL_RX) {
+ if (!priv->rx_traffic) {
+ priv->rx_traffic =
+ kzalloc(traffic_size, GFP_KERNEL);
+ if (!priv->rx_traffic)
+ return -ENOMEM;
+ }
+ }
+ iwl_legacy_reset_traffic_log(priv);
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem);
+
+void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
+{
+ kfree(priv->tx_traffic);
+ priv->tx_traffic = NULL;
+
+ kfree(priv->rx_traffic);
+ priv->rx_traffic = NULL;
+}
+EXPORT_SYMBOL(iwl_legacy_free_traffic_mem);
+
+void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
+ u16 length, struct ieee80211_hdr *header)
+{
+ __le16 fc;
+ u16 len;
+
+ if (likely(!(iwlegacy_debug_level & IWL_DL_TX)))
+ return;
+
+ if (!priv->tx_traffic)
+ return;
+
+ fc = header->frame_control;
+ if (ieee80211_is_data(fc)) {
+ len = (length > IWL_TRAFFIC_ENTRY_SIZE)
+ ? IWL_TRAFFIC_ENTRY_SIZE : length;
+ memcpy((priv->tx_traffic +
+ (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
+ header, len);
+ priv->tx_traffic_idx =
+ (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame);
+
+void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
+ u16 length, struct ieee80211_hdr *header)
+{
+ __le16 fc;
+ u16 len;
+
+ if (likely(!(iwlegacy_debug_level & IWL_DL_RX)))
+ return;
+
+ if (!priv->rx_traffic)
+ return;
+
+ fc = header->frame_control;
+ if (ieee80211_is_data(fc)) {
+ len = (length > IWL_TRAFFIC_ENTRY_SIZE)
+ ? IWL_TRAFFIC_ENTRY_SIZE : length;
+ memcpy((priv->rx_traffic +
+ (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
+ header, len);
+ priv->rx_traffic_idx =
+ (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame);
+
+const char *iwl_legacy_get_mgmt_string(int cmd)
+{
+ switch (cmd) {
+ IWL_CMD(MANAGEMENT_ASSOC_REQ);
+ IWL_CMD(MANAGEMENT_ASSOC_RESP);
+ IWL_CMD(MANAGEMENT_REASSOC_REQ);
+ IWL_CMD(MANAGEMENT_REASSOC_RESP);
+ IWL_CMD(MANAGEMENT_PROBE_REQ);
+ IWL_CMD(MANAGEMENT_PROBE_RESP);
+ IWL_CMD(MANAGEMENT_BEACON);
+ IWL_CMD(MANAGEMENT_ATIM);
+ IWL_CMD(MANAGEMENT_DISASSOC);
+ IWL_CMD(MANAGEMENT_AUTH);
+ IWL_CMD(MANAGEMENT_DEAUTH);
+ IWL_CMD(MANAGEMENT_ACTION);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+const char *iwl_legacy_get_ctrl_string(int cmd)
+{
+ switch (cmd) {
+ IWL_CMD(CONTROL_BACK_REQ);
+ IWL_CMD(CONTROL_BACK);
+ IWL_CMD(CONTROL_PSPOLL);
+ IWL_CMD(CONTROL_RTS);
+ IWL_CMD(CONTROL_CTS);
+ IWL_CMD(CONTROL_ACK);
+ IWL_CMD(CONTROL_CFEND);
+ IWL_CMD(CONTROL_CFENDACK);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv)
+{
+ memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
+ memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
+}
+
+/*
+ * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined,
+ * iwl_legacy_update_stats function will
+ * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
+ * Use debugFs to display the rx/rx_statistics
+ * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL
+ * information will be recorded, but DATA pkt still will be recorded
+ * for the reason of iwl_led.c need to control the led blinking based on
+ * number of tx and rx data.
+ *
+ */
+void
+iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
+{
+ struct traffic_stats *stats;
+
+ if (is_tx)
+ stats = &priv->tx_stats;
+ else
+ stats = &priv->rx_stats;
+
+ if (ieee80211_is_mgmt(fc)) {
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+ stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
+ stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+ stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
+ stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
+ stats->mgmt[MANAGEMENT_PROBE_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
+ stats->mgmt[MANAGEMENT_PROBE_RESP]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_BEACON):
+ stats->mgmt[MANAGEMENT_BEACON]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ATIM):
+ stats->mgmt[MANAGEMENT_ATIM]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
+ stats->mgmt[MANAGEMENT_DISASSOC]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_AUTH):
+ stats->mgmt[MANAGEMENT_AUTH]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ stats->mgmt[MANAGEMENT_DEAUTH]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ACTION):
+ stats->mgmt[MANAGEMENT_ACTION]++;
+ break;
+ }
+ } else if (ieee80211_is_ctl(fc)) {
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
+ stats->ctrl[CONTROL_BACK_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_BACK):
+ stats->ctrl[CONTROL_BACK]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
+ stats->ctrl[CONTROL_PSPOLL]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_RTS):
+ stats->ctrl[CONTROL_RTS]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_CTS):
+ stats->ctrl[CONTROL_CTS]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ACK):
+ stats->ctrl[CONTROL_ACK]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_CFEND):
+ stats->ctrl[CONTROL_CFEND]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
+ stats->ctrl[CONTROL_CFENDACK]++;
+ break;
+ }
+ } else {
+ /* data */
+ stats->data_cnt++;
+ stats->data_bytes += len;
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_update_stats);
+#endif
+
+static void _iwl_legacy_force_rf_reset(struct iwl_priv *priv)
+{
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ if (!iwl_legacy_is_any_associated(priv)) {
+ IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
+ return;
+ }
+ /*
+ * There is no easy and better way to force reset the radio,
+ * the only known method is switching channel which will force to
+ * reset and tune the radio.
+ * Use internal short scan (single channel) operation to should
+ * achieve this objective.
+ * Driver should reset the radio when number of consecutive missed
+ * beacon, or any other uCode error condition detected.
+ */
+ IWL_DEBUG_INFO(priv, "perform radio reset.\n");
+ iwl_legacy_internal_short_hw_scan(priv);
+}
+
+
+int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external)
+{
+ struct iwl_force_reset *force_reset;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return -EINVAL;
+
+ if (mode >= IWL_MAX_FORCE_RESET) {
+ IWL_DEBUG_INFO(priv, "invalid reset request.\n");
+ return -EINVAL;
+ }
+ force_reset = &priv->force_reset[mode];
+ force_reset->reset_request_count++;
+ if (!external) {
+ if (force_reset->last_force_reset_jiffies &&
+ time_after(force_reset->last_force_reset_jiffies +
+ force_reset->reset_duration, jiffies)) {
+ IWL_DEBUG_INFO(priv, "force reset rejected\n");
+ force_reset->reset_reject_count++;
+ return -EAGAIN;
+ }
+ }
+ force_reset->reset_success_count++;
+ force_reset->last_force_reset_jiffies = jiffies;
+ IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
+ switch (mode) {
+ case IWL_RF_RESET:
+ _iwl_legacy_force_rf_reset(priv);
+ break;
+ case IWL_FW_RESET:
+ /*
+ * if the request is from external(ex: debugfs),
+ * then always perform the request in regardless the module
+ * parameter setting
+ * if the request is from internal (uCode error or driver
+ * detect failure), then fw_restart module parameter
+ * need to be check before performing firmware reload
+ */
+ if (!external && !priv->cfg->mod_params->restart_fw) {
+ IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
+ "module parameter setting\n");
+ break;
+ }
+ IWL_ERR(priv, "On demand firmware reload\n");
+ /* Set the FW error flag -- cleared on iwl_down */
+ set_bit(STATUS_FW_ERROR, &priv->status);
+ wake_up_interruptible(&priv->wait_command_queue);
+ /*
+ * Keep the restart process from trying to send host
+ * commands by clearing the INIT status bit
+ */
+ clear_bit(STATUS_READY, &priv->status);
+ queue_work(priv->workqueue, &priv->restart);
+ break;
+ }
+ return 0;
+}
+
+int
+iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+ struct iwl_rxon_context *tmp;
+ u32 interface_modes;
+ int err;
+
+ newtype = ieee80211_iftype_p2p(newtype, newp2p);
+
+ mutex_lock(&priv->mutex);
+
+ interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
+
+ if (!(interface_modes & BIT(newtype))) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (ctx->exclusive_interface_modes & BIT(newtype)) {
+ for_each_context(priv, tmp) {
+ if (ctx == tmp)
+ continue;
+
+ if (!tmp->vif)
+ continue;
+
+ /*
+ * The current mode switch would be exclusive, but
+ * another context is active ... refuse the switch.
+ */
+ err = -EBUSY;
+ goto out;
+ }
+ }
+
+ /* success */
+ iwl_legacy_teardown_interface(priv, vif, true);
+ vif->type = newtype;
+ err = iwl_legacy_setup_interface(priv, ctx);
+ WARN_ON(err);
+ /*
+ * We've switched internally, but submitting to the
+ * device may have failed for some reason. Mask this
+ * error, because otherwise mac80211 will not switch
+ * (and set the interface type back) and we'll be
+ * out of sync with it.
+ */
+ err = 0;
+
+ out:
+ mutex_unlock(&priv->mutex);
+ return err;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_change_interface);
+
+/*
+ * On every watchdog tick we check (latest) time stamp. If it does not
+ * change during timeout period and queue is not empty we reset firmware.
+ */
+static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt)
+{
+ struct iwl_tx_queue *txq = &priv->txq[cnt];
+ struct iwl_queue *q = &txq->q;
+ unsigned long timeout;
+ int ret;
+
+ if (q->read_ptr == q->write_ptr) {
+ txq->time_stamp = jiffies;
+ return 0;
+ }
+
+ timeout = txq->time_stamp +
+ msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
+
+ if (time_after(jiffies, timeout)) {
+ IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
+ q->id, priv->cfg->base_params->wd_timeout);
+ ret = iwl_legacy_force_reset(priv, IWL_FW_RESET, false);
+ return (ret == -EAGAIN) ? 0 : 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Making watchdog tick be a quarter of timeout assure we will
+ * discover the queue hung between timeout and 1.25*timeout
+ */
+#define IWL_WD_TICK(timeout) ((timeout) / 4)
+
+/*
+ * Watchdog timer callback, we check each tx queue for stuck, if if hung
+ * we reset the firmware. If everything is fine just rearm the timer.
+ */
+void iwl_legacy_bg_watchdog(unsigned long data)
+{
+ struct iwl_priv *priv = (struct iwl_priv *)data;
+ int cnt;
+ unsigned long timeout;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ timeout = priv->cfg->base_params->wd_timeout;
+ if (timeout == 0)
+ return;
+
+ /* monitor and check for stuck cmd queue */
+ if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue))
+ return;
+
+ /* monitor and check for other stuck queues */
+ if (iwl_legacy_is_any_associated(priv)) {
+ for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
+ /* skip as we already checked the command queue */
+ if (cnt == priv->cmd_queue)
+ continue;
+ if (iwl_legacy_check_stuck_queue(priv, cnt))
+ return;
+ }
+ }
+
+ mod_timer(&priv->watchdog, jiffies +
+ msecs_to_jiffies(IWL_WD_TICK(timeout)));
+}
+EXPORT_SYMBOL(iwl_legacy_bg_watchdog);
+
+void iwl_legacy_setup_watchdog(struct iwl_priv *priv)
+{
+ unsigned int timeout = priv->cfg->base_params->wd_timeout;
+
+ if (timeout)
+ mod_timer(&priv->watchdog,
+ jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
+ else
+ del_timer(&priv->watchdog);
+}
+EXPORT_SYMBOL(iwl_legacy_setup_watchdog);
+
+/*
+ * extended beacon time format
+ * time in usec will be changed into a 32-bit value in extended:internal format
+ * the extended part is the beacon counts
+ * the internal part is the time in usec within one beacon interval
+ */
+u32
+iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
+ u32 usec, u32 beacon_interval)
+{
+ u32 quot;
+ u32 rem;
+ u32 interval = beacon_interval * TIME_UNIT;
+
+ if (!interval || !usec)
+ return 0;
+
+ quot = (usec / interval) &
+ (iwl_legacy_beacon_time_mask_high(priv,
+ priv->hw_params.beacon_time_tsf_bits) >>
+ priv->hw_params.beacon_time_tsf_bits);
+ rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv,
+ priv->hw_params.beacon_time_tsf_bits);
+
+ return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
+}
+EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons);
+
+/* base is usually what we get from ucode with each received frame,
+ * the same as HW timer counter counting down
+ */
+__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
+ u32 addon, u32 beacon_interval)
+{
+ u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv,
+ priv->hw_params.beacon_time_tsf_bits);
+ u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv,
+ priv->hw_params.beacon_time_tsf_bits);
+ u32 interval = beacon_interval * TIME_UNIT;
+ u32 res = (base & iwl_legacy_beacon_time_mask_high(priv,
+ priv->hw_params.beacon_time_tsf_bits)) +
+ (addon & iwl_legacy_beacon_time_mask_high(priv,
+ priv->hw_params.beacon_time_tsf_bits));
+
+ if (base_low > addon_low)
+ res += base_low - addon_low;
+ else if (base_low < addon_low) {
+ res += interval + base_low - addon_low;
+ res += (1 << priv->hw_params.beacon_time_tsf_bits);
+ } else
+ res += (1 << priv->hw_params.beacon_time_tsf_bits);
+
+ return cpu_to_le32(res);
+}
+EXPORT_SYMBOL(iwl_legacy_add_beacon_time);
+
+#ifdef CONFIG_PM
+
+int iwl_legacy_pci_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct iwl_priv *priv = pci_get_drvdata(pdev);
+
+ /*
+ * This function is called when system goes into suspend state
+ * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
+ * first but since iwl_mac_stop() has no knowledge of who the caller is,
+ * it will not call apm_ops.stop() to stop the DMA operation.
+ * Calling apm_ops.stop here to make sure we stop the DMA.
+ */
+ iwl_legacy_apm_stop(priv);
+
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_pci_suspend);
+
+int iwl_legacy_pci_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct iwl_priv *priv = pci_get_drvdata(pdev);
+ bool hw_rfkill = false;
+
+ /*
+ * We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state.
+ */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+ iwl_legacy_enable_interrupts(priv);
+
+ if (!(iwl_read32(priv, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+ hw_rfkill = true;
+
+ if (hw_rfkill)
+ set_bit(STATUS_RF_KILL_HW, &priv->status);
+ else
+ clear_bit(STATUS_RF_KILL_HW, &priv->status);
+
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
+
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_pci_resume);
+
+const struct dev_pm_ops iwl_legacy_pm_ops = {
+ .suspend = iwl_legacy_pci_suspend,
+ .resume = iwl_legacy_pci_resume,
+ .freeze = iwl_legacy_pci_suspend,
+ .thaw = iwl_legacy_pci_resume,
+ .poweroff = iwl_legacy_pci_suspend,
+ .restore = iwl_legacy_pci_resume,
+};
+EXPORT_SYMBOL(iwl_legacy_pm_ops);
+
+#endif /* CONFIG_PM */
+
+static void
+iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ if (!ctx->is_active)
+ return;
+
+ ctx->qos_data.def_qos_parm.qos_flags = 0;
+
+ if (ctx->qos_data.qos_active)
+ ctx->qos_data.def_qos_parm.qos_flags |=
+ QOS_PARAM_FLG_UPDATE_EDCA_MSK;
+
+ if (ctx->ht.enabled)
+ ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
+
+ IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+ ctx->qos_data.qos_active,
+ ctx->qos_data.def_qos_parm.qos_flags);
+
+ iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd,
+ sizeof(struct iwl_qosparam_cmd),
+ &ctx->qos_data.def_qos_parm, NULL);
+}
+
+/**
+ * iwl_legacy_mac_config - mac80211 config callback
+ */
+int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct iwl_priv *priv = hw->priv;
+ const struct iwl_channel_info *ch_info;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_channel *channel = conf->channel;
+ struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+ struct iwl_rxon_context *ctx;
+ unsigned long flags = 0;
+ int ret = 0;
+ u16 ch;
+ int scan_active = 0;
+ bool ht_changed[NUM_IWL_RXON_CTX] = {};
+
+ if (WARN_ON(!priv->cfg->ops->legacy))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->mutex);
+
+ IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
+ channel->hw_value, changed);
+
+ if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
+ test_bit(STATUS_SCANNING, &priv->status))) {
+ scan_active = 1;
+ IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
+ }
+
+ if (changed & (IEEE80211_CONF_CHANGE_SMPS |
+ IEEE80211_CONF_CHANGE_CHANNEL)) {
+ /* mac80211 uses static for non-HT which is what we want */
+ priv->current_ht_config.smps = conf->smps_mode;
+
+ /*
+ * Recalculate chain counts.
+ *
+ * If monitor mode is enabled then mac80211 will
+ * set up the SM PS mode to OFF if an HT channel is
+ * configured.
+ */
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ for_each_context(priv, ctx)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+ }
+
+ /* during scanning mac80211 will delay channel setting until
+ * scan finish with changed = 0
+ */
+ if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
+ if (scan_active)
+ goto set_ch_out;
+
+ ch = channel->hw_value;
+ ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
+ if (!iwl_legacy_is_channel_valid(ch_info)) {
+ IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
+ ret = -EINVAL;
+ goto set_ch_out;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ for_each_context(priv, ctx) {
+ /* Configure HT40 channels */
+ if (ctx->ht.enabled != conf_is_ht(conf)) {
+ ctx->ht.enabled = conf_is_ht(conf);
+ ht_changed[ctx->ctxid] = true;
+ }
+ if (ctx->ht.enabled) {
+ if (conf_is_ht40_minus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ctx->ht.is_40mhz = true;
+ } else if (conf_is_ht40_plus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ctx->ht.is_40mhz = true;
+ } else {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ctx->ht.is_40mhz = false;
+ }
+ } else
+ ctx->ht.is_40mhz = false;
+
+ /*
+ * Default to no protection. Protection mode will
+ * later be set from BSS config in iwl_ht_conf
+ */
+ ctx->ht.protection =
+ IEEE80211_HT_OP_MODE_PROTECTION_NONE;
+
+ /* if we are switching from ht to 2.4 clear flags
+ * from any ht related info since 2.4 does not
+ * support ht */
+ if ((le16_to_cpu(ctx->staging.channel) != ch))
+ ctx->staging.flags = 0;
+
+ iwl_legacy_set_rxon_channel(priv, channel, ctx);
+ iwl_legacy_set_rxon_ht(priv, ht_conf);
+
+ iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
+ ctx->vif);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (priv->cfg->ops->legacy->update_bcast_stations)
+ ret =
+ priv->cfg->ops->legacy->update_bcast_stations(priv);
+
+ set_ch_out:
+ /* The list of supported rates and rate mask can be different
+ * for each band; since the band may have changed, reset
+ * the rate mask to what mac80211 lists */
+ iwl_legacy_set_rate(priv);
+ }
+
+ if (changed & (IEEE80211_CONF_CHANGE_PS |
+ IEEE80211_CONF_CHANGE_IDLE)) {
+ ret = iwl_legacy_power_update_mode(priv, false);
+ if (ret)
+ IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
+ priv->tx_power_user_lmt, conf->power_level);
+
+ iwl_legacy_set_tx_power(priv, conf->power_level, false);
+ }
+
+ if (!iwl_legacy_is_ready(priv)) {
+ IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
+ goto out;
+ }
+
+ if (scan_active)
+ goto out;
+
+ for_each_context(priv, ctx) {
+ if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
+ iwl_legacy_commit_rxon(priv, ctx);
+ else
+ IWL_DEBUG_INFO(priv,
+ "Not re-sending same RXON configuration.\n");
+ if (ht_changed[ctx->ctxid])
+ iwl_legacy_update_qos(priv, ctx);
+ }
+
+out:
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ mutex_unlock(&priv->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_config);
+
+void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+ unsigned long flags;
+ /* IBSS can only be the IWL_RXON_CTX_BSS context */
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+ if (WARN_ON(!priv->cfg->ops->legacy))
+ return;
+
+ mutex_lock(&priv->mutex);
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ spin_lock_irqsave(&priv->lock, flags);
+ memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* new association get rid of ibss beacon skb */
+ if (priv->beacon_skb)
+ dev_kfree_skb(priv->beacon_skb);
+
+ priv->beacon_skb = NULL;
+
+ priv->timestamp = 0;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ iwl_legacy_scan_cancel_timeout(priv, 100);
+ if (!iwl_legacy_is_ready_rf(priv)) {
+ IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
+ mutex_unlock(&priv->mutex);
+ return;
+ }
+
+ /* we are restarting association process
+ * clear RXON_FILTER_ASSOC_MSK bit
+ */
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ iwl_legacy_commit_rxon(priv, ctx);
+
+ iwl_legacy_set_rate(priv);
+
+ mutex_unlock(&priv->mutex);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
+
+static void iwl_legacy_ht_conf(struct iwl_priv *priv,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+ struct ieee80211_sta *sta;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+
+ IWL_DEBUG_ASSOC(priv, "enter:\n");
+
+ if (!ctx->ht.enabled)
+ return;
+
+ ctx->ht.protection =
+ bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+ ctx->ht.non_gf_sta_present =
+ !!(bss_conf->ht_operation_mode &
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+
+ ht_conf->single_chain_sufficient = false;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (sta) {
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ int maxstreams;
+
+ maxstreams = (ht_cap->mcs.tx_params &
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
+ >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+ maxstreams += 1;
+
+ if ((ht_cap->mcs.rx_mask[1] == 0) &&
+ (ht_cap->mcs.rx_mask[2] == 0))
+ ht_conf->single_chain_sufficient = true;
+ if (maxstreams <= 1)
+ ht_conf->single_chain_sufficient = true;
+ } else {
+ /*
+ * If at all, this can only happen through a race
+ * when the AP disconnects us while we're still
+ * setting up the connection, in that case mac80211
+ * will soon tell us about that.
+ */
+ ht_conf->single_chain_sufficient = true;
+ }
+ rcu_read_unlock();
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ht_conf->single_chain_sufficient = true;
+ break;
+ default:
+ break;
+ }
+
+ IWL_DEBUG_ASSOC(priv, "leave\n");
+}
+
+static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+
+ /*
+ * inform the ucode that there is no longer an
+ * association and that no more packets should be
+ * sent
+ */
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ ctx->staging.assoc_id = 0;
+ iwl_legacy_commit_rxon(priv, ctx);
+}
+
+static void iwl_legacy_beacon_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_priv *priv = hw->priv;
+ unsigned long flags;
+ __le64 timestamp;
+ struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+
+ if (!skb)
+ return;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (!priv->beacon_ctx) {
+ IWL_ERR(priv, "update beacon but no beacon context!\n");
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (priv->beacon_skb)
+ dev_kfree_skb(priv->beacon_skb);
+
+ priv->beacon_skb = skb;
+
+ timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
+ priv->timestamp = le64_to_cpu(timestamp);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (!iwl_legacy_is_ready_rf(priv)) {
+ IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
+ return;
+ }
+
+ priv->cfg->ops->legacy->post_associate(priv);
+}
+
+void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
+ int ret;
+
+ if (WARN_ON(!priv->cfg->ops->legacy))
+ return;
+
+ IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
+
+ if (!iwl_legacy_is_alive(priv))
+ return;
+
+ mutex_lock(&priv->mutex);
+
+ if (changes & BSS_CHANGED_QOS) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ ctx->qos_data.qos_active = bss_conf->qos;
+ iwl_legacy_update_qos(priv, ctx);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ /*
+ * the add_interface code must make sure we only ever
+ * have a single interface that could be beaconing at
+ * any time.
+ */
+ if (vif->bss_conf.enable_beacon)
+ priv->beacon_ctx = ctx;
+ else
+ priv->beacon_ctx = NULL;
+ }
+
+ if (changes & BSS_CHANGED_BSSID) {
+ IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
+
+ /*
+ * If there is currently a HW scan going on in the
+ * background then we need to cancel it else the RXON
+ * below/in post_associate will fail.
+ */
+ if (iwl_legacy_scan_cancel_timeout(priv, 100)) {
+ IWL_WARN(priv,
+ "Aborted scan still in progress after 100ms\n");
+ IWL_DEBUG_MAC80211(priv,
+ "leaving - scan abort failed.\n");
+ mutex_unlock(&priv->mutex);
+ return;
+ }
+
+ /* mac80211 only sets assoc when in STATION mode */
+ if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
+ memcpy(ctx->staging.bssid_addr,
+ bss_conf->bssid, ETH_ALEN);
+
+ /* currently needed in a few places */
+ memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
+ } else {
+ ctx->staging.filter_flags &=
+ ~RXON_FILTER_ASSOC_MSK;
+ }
+
+ }
+
+ /*
+ * This needs to be after setting the BSSID in case
+ * mac80211 decides to do both changes at once because
+ * it will invoke post_associate.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
+ iwl_legacy_beacon_update(hw, vif);
+
+ if (changes & BSS_CHANGED_ERP_PREAMBLE) {
+ IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
+ bss_conf->use_short_preamble);
+ if (bss_conf->use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+ }
+
+ if (changes & BSS_CHANGED_ERP_CTS_PROT) {
+ IWL_DEBUG_MAC80211(priv,
+ "ERP_CTS %d\n", bss_conf->use_cts_prot);
+ if (bss_conf->use_cts_prot &&
+ (priv->band != IEEE80211_BAND_5GHZ))
+ ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+ if (bss_conf->use_cts_prot)
+ ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
+ }
+
+ if (changes & BSS_CHANGED_BASIC_RATES) {
+ /* XXX use this information
+ *
+ * To do that, remove code from iwl_legacy_set_rate() and put something
+ * like this here:
+ *
+ if (A-band)
+ ctx->staging.ofdm_basic_rates =
+ bss_conf->basic_rates;
+ else
+ ctx->staging.ofdm_basic_rates =
+ bss_conf->basic_rates >> 4;
+ ctx->staging.cck_basic_rates =
+ bss_conf->basic_rates & 0xF;
+ */
+ }
+
+ if (changes & BSS_CHANGED_HT) {
+ iwl_legacy_ht_conf(priv, vif);
+
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+ }
+
+ if (changes & BSS_CHANGED_ASSOC) {
+ IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
+ if (bss_conf->assoc) {
+ priv->timestamp = bss_conf->timestamp;
+
+ if (!iwl_legacy_is_rfkill(priv))
+ priv->cfg->ops->legacy->post_associate(priv);
+ } else
+ iwl_legacy_set_no_assoc(priv, vif);
+ }
+
+ if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) {
+ IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
+ changes);
+ ret = iwl_legacy_send_rxon_assoc(priv, ctx);
+ if (!ret) {
+ /* Sync active_rxon with latest change. */
+ memcpy((void *)&ctx->active,
+ &ctx->staging,
+ sizeof(struct iwl_legacy_rxon_cmd));
+ }
+ }
+
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ if (vif->bss_conf.enable_beacon) {
+ memcpy(ctx->staging.bssid_addr,
+ bss_conf->bssid, ETH_ALEN);
+ memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
+ priv->cfg->ops->legacy->config_ap(priv);
+ } else
+ iwl_legacy_set_no_assoc(priv, vif);
+ }
+
+ if (changes & BSS_CHANGED_IBSS) {
+ ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
+ bss_conf->ibss_joined);
+ if (ret)
+ IWL_ERR(priv, "failed to %s IBSS station %pM\n",
+ bss_conf->ibss_joined ? "add" : "remove",
+ bss_conf->bssid);
+ }
+
+ mutex_unlock(&priv->mutex);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
+
+irqreturn_t iwl_legacy_isr(int irq, void *data)
+{
+ struct iwl_priv *priv = data;
+ u32 inta, inta_mask;
+ u32 inta_fh;
+ unsigned long flags;
+ if (!priv)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Disable (but don't clear!) interrupts here to avoid
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here. */
+ inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
+ iwl_write32(priv, CSR_INT_MASK, 0x00000000);
+
+ /* Discover which interrupts are active/pending */
+ inta = iwl_read32(priv, CSR_INT);
+ inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ if (!inta && !inta_fh) {
+ IWL_DEBUG_ISR(priv,
+ "Ignore interrupt, inta == 0, inta_fh == 0\n");
+ goto none;
+ }
+
+ if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
+ /* Hardware disappeared. It might have already raised
+ * an interrupt */
+ IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+ goto unplugged;
+ }
+
+ IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
+ inta, inta_mask, inta_fh);
+
+ inta &= ~CSR_INT_BIT_SCD;
+
+ /* iwl_irq_tasklet() will service interrupts and re-enable them */
+ if (likely(inta || inta_fh))
+ tasklet_schedule(&priv->irq_tasklet);
+
+unplugged:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_HANDLED;
+
+none:
+ /* re-enable interrupts here since we don't have anything to service. */
+ /* only Re-enable if diabled by irq */
+ if (test_bit(STATUS_INT_ENABLED, &priv->status))
+ iwl_legacy_enable_interrupts(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_NONE;
+}
+EXPORT_SYMBOL(iwl_legacy_isr);
+
+/*
+ * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
+ * function.
+ */
+void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
+ struct ieee80211_tx_info *info,
+ __le16 fc, __le32 *tx_flags)
+{
+ if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+ *tx_flags |= TX_CMD_FLG_RTS_MSK;
+ *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
+ *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+
+ if (!ieee80211_is_mgmt(fc))
+ return;
+
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_AUTH):
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+ *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+ *tx_flags |= TX_CMD_FLG_CTS_MSK;
+ break;
+ }
+ } else if (info->control.rates[0].flags &
+ IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+ *tx_flags |= TX_CMD_FLG_CTS_MSK;
+ *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
new file mode 100644
index 000000000000..f03b463e4378
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-core.h
@@ -0,0 +1,646 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_core_h__
+#define __iwl_legacy_core_h__
+
+/************************
+ * forward declarations *
+ ************************/
+struct iwl_host_cmd;
+struct iwl_cmd;
+
+
+#define IWLWIFI_VERSION "in-tree:"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
+#define DRV_AUTHOR "<ilw@linux.intel.com>"
+
+#define IWL_PCI_DEVICE(dev, subdev, cfg) \
+ .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
+ .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
+ .driver_data = (kernel_ulong_t)&(cfg)
+
+#define TIME_UNIT 1024
+
+#define IWL_SKU_G 0x1
+#define IWL_SKU_A 0x2
+#define IWL_SKU_N 0x8
+
+#define IWL_CMD(x) case x: return #x
+
+struct iwl_hcmd_ops {
+ int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
+ int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
+ void (*set_rxon_chain)(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+};
+
+struct iwl_hcmd_utils_ops {
+ u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
+ u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd,
+ u8 *data);
+ int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
+ void (*post_scan)(struct iwl_priv *priv);
+};
+
+struct iwl_apm_ops {
+ int (*init)(struct iwl_priv *priv);
+ void (*config)(struct iwl_priv *priv);
+};
+
+struct iwl_debugfs_ops {
+ ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+};
+
+struct iwl_temp_ops {
+ void (*temperature)(struct iwl_priv *priv);
+};
+
+struct iwl_lib_ops {
+ /* set hw dependent parameters */
+ int (*set_hw_params)(struct iwl_priv *priv);
+ /* Handling TX */
+ void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ u16 byte_cnt);
+ int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ dma_addr_t addr,
+ u16 len, u8 reset, u8 pad);
+ void (*txq_free_tfd)(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq);
+ int (*txq_init)(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq);
+ /* setup Rx handler */
+ void (*rx_handler_setup)(struct iwl_priv *priv);
+ /* alive notification after init uCode load */
+ void (*init_alive_start)(struct iwl_priv *priv);
+ /* check validity of rtc data address */
+ int (*is_valid_rtc_data_addr)(u32 addr);
+ /* 1st ucode load */
+ int (*load_ucode)(struct iwl_priv *priv);
+ int (*dump_nic_event_log)(struct iwl_priv *priv,
+ bool full_log, char **buf, bool display);
+ void (*dump_nic_error_log)(struct iwl_priv *priv);
+ int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
+ int (*set_channel_switch)(struct iwl_priv *priv,
+ struct ieee80211_channel_switch *ch_switch);
+ /* power management */
+ struct iwl_apm_ops apm_ops;
+
+ /* power */
+ int (*send_tx_power) (struct iwl_priv *priv);
+ void (*update_chain_flags)(struct iwl_priv *priv);
+
+ /* eeprom operations (as defined in iwl-eeprom.h) */
+ struct iwl_eeprom_ops eeprom_ops;
+
+ /* temperature */
+ struct iwl_temp_ops temp_ops;
+ /* check for plcp health */
+ bool (*check_plcp_health)(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+
+ struct iwl_debugfs_ops debugfs_ops;
+
+};
+
+struct iwl_led_ops {
+ int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
+};
+
+struct iwl_legacy_ops {
+ void (*post_associate)(struct iwl_priv *priv);
+ void (*config_ap)(struct iwl_priv *priv);
+ /* station management */
+ int (*update_bcast_stations)(struct iwl_priv *priv);
+ int (*manage_ibss_station)(struct iwl_priv *priv,
+ struct ieee80211_vif *vif, bool add);
+};
+
+struct iwl_ops {
+ const struct iwl_lib_ops *lib;
+ const struct iwl_hcmd_ops *hcmd;
+ const struct iwl_hcmd_utils_ops *utils;
+ const struct iwl_led_ops *led;
+ const struct iwl_nic_ops *nic;
+ const struct iwl_legacy_ops *legacy;
+ const struct ieee80211_ops *ieee80211_ops;
+};
+
+struct iwl_mod_params {
+ int sw_crypto; /* def: 0 = using hardware encryption */
+ int disable_hw_scan; /* def: 0 = use h/w scan */
+ int num_of_queues; /* def: HW dependent */
+ int disable_11n; /* def: 0 = 11n capabilities enabled */
+ int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
+ int antenna; /* def: 0 = both antennas (use diversity) */
+ int restart_fw; /* def: 1 = restart firmware */
+};
+
+/*
+ * @led_compensation: compensate on the led on/off time per HW according
+ * to the deviation to achieve the desired led frequency.
+ * The detail algorithm is described in iwl-led.c
+ * @chain_noise_num_beacons: number of beacons used to compute chain noise
+ * @plcp_delta_threshold: plcp error rate threshold used to trigger
+ * radio tuning when there is a high receiving plcp error rate
+ * @wd_timeout: TX queues watchdog timeout
+ * @temperature_kelvin: temperature report by uCode in kelvin
+ * @max_event_log_size: size of event log buffer size for ucode event logging
+ * @ucode_tracing: support ucode continuous tracing
+ * @sensitivity_calib_by_driver: driver has the capability to perform
+ * sensitivity calibration operation
+ * @chain_noise_calib_by_driver: driver has the capability to perform
+ * chain noise calibration operation
+ */
+struct iwl_base_params {
+ int eeprom_size;
+ int num_of_queues; /* def: HW dependent */
+ int num_of_ampdu_queues;/* def: HW dependent */
+ /* for iwl_legacy_apm_init() */
+ u32 pll_cfg_val;
+ bool set_l0s;
+ bool use_bsm;
+
+ u16 led_compensation;
+ int chain_noise_num_beacons;
+ u8 plcp_delta_threshold;
+ unsigned int wd_timeout;
+ bool temperature_kelvin;
+ u32 max_event_log_size;
+ const bool ucode_tracing;
+ const bool sensitivity_calib_by_driver;
+ const bool chain_noise_calib_by_driver;
+};
+
+/**
+ * struct iwl_cfg
+ * @fw_name_pre: Firmware filename prefix. The api version and extension
+ * (.ucode) will be added to filename before loading from disk. The
+ * filename is constructed as fw_name_pre<api>.ucode.
+ * @ucode_api_max: Highest version of uCode API supported by driver.
+ * @ucode_api_min: Lowest version of uCode API supported by driver.
+ * @scan_antennas: available antenna for scan operation
+ * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
+ *
+ * We enable the driver to be backward compatible wrt API version. The
+ * driver specifies which APIs it supports (with @ucode_api_max being the
+ * highest and @ucode_api_min the lowest). Firmware will only be loaded if
+ * it has a supported API version. The firmware's API version will be
+ * stored in @iwl_priv, enabling the driver to make runtime changes based
+ * on firmware version used.
+ *
+ * For example,
+ * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
+ * Driver interacts with Firmware API version >= 2.
+ * } else {
+ * Driver interacts with Firmware API version 1.
+ * }
+ *
+ * The ideal usage of this infrastructure is to treat a new ucode API
+ * release as a new hardware revision. That is, through utilizing the
+ * iwl_hcmd_utils_ops etc. we accommodate different command structures
+ * and flows between hardware versions as well as their API
+ * versions.
+ *
+ */
+struct iwl_cfg {
+ /* params specific to an individual device within a device family */
+ const char *name;
+ const char *fw_name_pre;
+ const unsigned int ucode_api_max;
+ const unsigned int ucode_api_min;
+ u8 valid_tx_ant;
+ u8 valid_rx_ant;
+ unsigned int sku;
+ u16 eeprom_ver;
+ u16 eeprom_calib_ver;
+ const struct iwl_ops *ops;
+ /* module based parameters which can be set from modprobe cmd */
+ const struct iwl_mod_params *mod_params;
+ /* params not likely to change within a device family */
+ struct iwl_base_params *base_params;
+ /* params likely to change within a device family */
+ u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
+ u8 scan_tx_antennas[IEEE80211_NUM_BANDS];
+ enum iwl_led_mode led_mode;
+};
+
+/***************************
+ * L i b *
+ ***************************/
+
+struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg);
+int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *params);
+int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw);
+void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ int hw_decrypt);
+int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+int iwl_legacy_set_rxon_channel(struct iwl_priv *priv,
+ struct ieee80211_channel *ch,
+ struct iwl_rxon_context *ctx);
+void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ enum ieee80211_band band,
+ struct ieee80211_vif *vif);
+u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
+ enum ieee80211_band band);
+void iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
+ struct iwl_ht_config *ht_conf);
+bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_sta_ht_cap *ht_cap);
+void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+void iwl_legacy_set_rate(struct iwl_priv *priv);
+int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
+ struct ieee80211_hdr *hdr,
+ u32 decrypt_res,
+ struct ieee80211_rx_status *stats);
+void iwl_legacy_irq_handle_error(struct iwl_priv *priv);
+int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p);
+int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv);
+void iwl_legacy_txq_mem(struct iwl_priv *priv);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv);
+void iwl_legacy_free_traffic_mem(struct iwl_priv *priv);
+void iwl_legacy_reset_traffic_log(struct iwl_priv *priv);
+void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
+ u16 length, struct ieee80211_hdr *header);
+void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
+ u16 length, struct ieee80211_hdr *header);
+const char *iwl_legacy_get_mgmt_string(int cmd);
+const char *iwl_legacy_get_ctrl_string(int cmd);
+void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv);
+void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
+ u16 len);
+#else
+static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
+{
+ return 0;
+}
+static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
+{
+}
+static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
+{
+}
+static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
+ u16 length, struct ieee80211_hdr *header)
+{
+}
+static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
+ u16 length, struct ieee80211_hdr *header)
+{
+}
+static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx,
+ __le16 fc, u16 len)
+{
+}
+#endif
+/*****************************************************
+ * RX handlers.
+ * **************************************************/
+void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+
+/*****************************************************
+* RX
+******************************************************/
+void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv);
+void iwl_legacy_cmd_queue_free(struct iwl_priv *priv);
+int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv);
+void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
+ struct iwl_rx_queue *q);
+int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q);
+void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+/* Handlers */
+void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt);
+void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success);
+void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
+
+/* TX helpers */
+
+/*****************************************************
+* TX
+******************************************************/
+void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq);
+int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+ int slots_num, u32 txq_id);
+void iwl_legacy_tx_queue_reset(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ int slots_num, u32 txq_id);
+void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
+void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id);
+void iwl_legacy_setup_watchdog(struct iwl_priv *priv);
+/*****************************************************
+ * TX power
+ ****************************************************/
+int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
+
+/*******************************************************************************
+ * Rate
+ ******************************************************************************/
+
+u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+
+/*******************************************************************************
+ * Scanning
+ ******************************************************************************/
+void iwl_legacy_init_scan_params(struct iwl_priv *priv);
+int iwl_legacy_scan_cancel(struct iwl_priv *priv);
+int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
+void iwl_legacy_force_scan_end(struct iwl_priv *priv);
+int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req);
+void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv);
+int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external);
+u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv,
+ struct ieee80211_mgmt *frame,
+ const u8 *ta, const u8 *ie, int ie_len, int left);
+void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv);
+u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
+ enum ieee80211_band band,
+ u8 n_probes);
+u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
+ enum ieee80211_band band,
+ struct ieee80211_vif *vif);
+void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv);
+void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv);
+
+/* For faster active scanning, scan will move to the next channel if fewer than
+ * PLCP_QUIET_THRESH packets are heard on this channel within
+ * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
+ * time if it's a quiet channel (nothing responded to our probe, and there's
+ * no other traffic).
+ * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
+#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
+#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
+
+#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
+
+/*****************************************************
+ * S e n d i n g H o s t C o m m a n d s *
+ *****************************************************/
+
+const char *iwl_legacy_get_cmd_string(u8 cmd);
+int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv,
+ struct iwl_host_cmd *cmd);
+int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
+int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id,
+ u16 len, const void *data);
+int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
+ const void *data,
+ void (*callback)(struct iwl_priv *priv,
+ struct iwl_device_cmd *cmd,
+ struct iwl_rx_packet *pkt));
+
+int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
+
+
+/*****************************************************
+ * PCI *
+ *****************************************************/
+
+static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv)
+{
+ int pos;
+ u16 pci_lnk_ctl;
+ pos = pci_find_capability(priv->pci_dev, PCI_CAP_ID_EXP);
+ pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
+ return pci_lnk_ctl;
+}
+
+void iwl_legacy_bg_watchdog(unsigned long data);
+u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
+ u32 usec, u32 beacon_interval);
+__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
+ u32 addon, u32 beacon_interval);
+
+#ifdef CONFIG_PM
+int iwl_legacy_pci_suspend(struct device *device);
+int iwl_legacy_pci_resume(struct device *device);
+extern const struct dev_pm_ops iwl_legacy_pm_ops;
+
+#define IWL_LEGACY_PM_OPS (&iwl_legacy_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define IWL_LEGACY_PM_OPS NULL
+
+#endif /* !CONFIG_PM */
+
+/*****************************************************
+* Error Handling Debugging
+******************************************************/
+void iwl4965_dump_nic_error_log(struct iwl_priv *priv);
+int iwl4965_dump_nic_event_log(struct iwl_priv *priv,
+ bool full_log, char **buf, bool display);
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+#else
+static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+}
+#endif
+
+void iwl_legacy_clear_isr_stats(struct iwl_priv *priv);
+
+/*****************************************************
+* GEOS
+******************************************************/
+int iwl_legacy_init_geos(struct iwl_priv *priv);
+void iwl_legacy_free_geos(struct iwl_priv *priv);
+
+/*************** DRIVER STATUS FUNCTIONS *****/
+
+#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
+/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
+#define STATUS_INT_ENABLED 2
+#define STATUS_RF_KILL_HW 3
+#define STATUS_CT_KILL 4
+#define STATUS_INIT 5
+#define STATUS_ALIVE 6
+#define STATUS_READY 7
+#define STATUS_TEMPERATURE 8
+#define STATUS_GEO_CONFIGURED 9
+#define STATUS_EXIT_PENDING 10
+#define STATUS_STATISTICS 12
+#define STATUS_SCANNING 13
+#define STATUS_SCAN_ABORTING 14
+#define STATUS_SCAN_HW 15
+#define STATUS_POWER_PMI 16
+#define STATUS_FW_ERROR 17
+
+
+static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
+{
+ /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
+ * set but EXIT_PENDING is not */
+ return test_bit(STATUS_READY, &priv->status) &&
+ test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
+ !test_bit(STATUS_EXIT_PENDING, &priv->status);
+}
+
+static inline int iwl_legacy_is_alive(struct iwl_priv *priv)
+{
+ return test_bit(STATUS_ALIVE, &priv->status);
+}
+
+static inline int iwl_legacy_is_init(struct iwl_priv *priv)
+{
+ return test_bit(STATUS_INIT, &priv->status);
+}
+
+static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv)
+{
+ return test_bit(STATUS_RF_KILL_HW, &priv->status);
+}
+
+static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv)
+{
+ return iwl_legacy_is_rfkill_hw(priv);
+}
+
+static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv)
+{
+ return test_bit(STATUS_CT_KILL, &priv->status);
+}
+
+static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv)
+{
+
+ if (iwl_legacy_is_rfkill(priv))
+ return 0;
+
+ return iwl_legacy_is_ready(priv);
+}
+
+extern void iwl_legacy_send_bt_config(struct iwl_priv *priv);
+extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv,
+ u8 flags, bool clear);
+void iwl_legacy_apm_stop(struct iwl_priv *priv);
+int iwl_legacy_apm_init(struct iwl_priv *priv);
+
+int iwl_legacy_send_rxon_timing(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
+}
+static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
+}
+static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
+ struct iwl_priv *priv, enum ieee80211_band band)
+{
+ return priv->hw->wiphy->bands[band];
+}
+
+/* mac80211 handlers */
+int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
+void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw);
+void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes);
+void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
+ struct ieee80211_tx_info *info,
+ __le16 fc, __le32 *tx_flags);
+
+irqreturn_t iwl_legacy_isr(int irq, void *data);
+
+#endif /* __iwl_legacy_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-csr.h b/drivers/net/wireless/iwlegacy/iwl-csr.h
new file mode 100644
index 000000000000..668a9616c269
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-csr.h
@@ -0,0 +1,422 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_legacy_csr_h__
+#define __iwl_legacy_csr_h__
+/*
+ * CSR (control and status registers)
+ *
+ * CSR registers are mapped directly into PCI bus space, and are accessible
+ * whenever platform supplies power to device, even when device is in
+ * low power states due to driver-invoked device resets
+ * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
+ *
+ * Use iwl_write32() and iwl_read32() family to access these registers;
+ * these provide simple PCI bus access, without waking up the MAC.
+ * Do not use iwl_legacy_write_direct32() family for these registers;
+ * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
+ * The MAC (uCode processor, etc.) does not need to be powered up for accessing
+ * the CSR registers.
+ *
+ * NOTE: Device does need to be awake in order to read this memory
+ * via CSR_EEPROM register
+ */
+#define CSR_BASE (0x000)
+
+#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
+#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
+#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
+#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
+#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/
+#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
+#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
+#define CSR_GP_CNTRL (CSR_BASE+0x024)
+
+/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */
+#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005)
+
+/*
+ * Hardware revision info
+ * Bit fields:
+ * 31-8: Reserved
+ * 7-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions
+ * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D
+ * 1-0: "Dash" (-) value, as in A-1, etc.
+ *
+ * NOTE: Revision step affects calculation of CCK txpower for 4965.
+ * NOTE: See also CSR_HW_REV_WA_REG (work-around for bug in 4965).
+ */
+#define CSR_HW_REV (CSR_BASE+0x028)
+
+/*
+ * EEPROM memory reads
+ *
+ * NOTE: Device must be awake, initialized via apm_ops.init(),
+ * in order to read.
+ */
+#define CSR_EEPROM_REG (CSR_BASE+0x02c)
+#define CSR_EEPROM_GP (CSR_BASE+0x030)
+
+#define CSR_GIO_REG (CSR_BASE+0x03C)
+#define CSR_GP_UCODE_REG (CSR_BASE+0x048)
+#define CSR_GP_DRIVER_REG (CSR_BASE+0x050)
+
+/*
+ * UCODE-DRIVER GP (general purpose) mailbox registers.
+ * SET/CLR registers set/clear bit(s) if "1" is written.
+ */
+#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054)
+#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058)
+#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c)
+#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060)
+
+#define CSR_LED_REG (CSR_BASE+0x094)
+#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0)
+
+/* GIO Chicken Bits (PCI Express bus link power management) */
+#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
+
+/* Analog phase-lock-loop configuration */
+#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
+
+/*
+ * CSR Hardware Revision Workaround Register. Indicates hardware rev;
+ * "step" determines CCK backoff for txpower calculation. Used for 4965 only.
+ * See also CSR_HW_REV register.
+ * Bit fields:
+ * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step
+ * 1-0: "Dash" (-) value, as in C-1, etc.
+ */
+#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C)
+
+#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240)
+#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250)
+
+/* Bits for CSR_HW_IF_CONFIG_REG */
+#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010)
+#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00)
+#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
+#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
+
+#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MB (0x00000100)
+#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MM (0x00000200)
+#define CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC (0x00000400)
+#define CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE (0x00000800)
+#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000)
+#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000)
+
+#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
+#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
+#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
+
+#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
+#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
+
+/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
+ * acknowledged (reset) by host writing "1" to flagged bits. */
+#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
+#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
+#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
+#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
+#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
+#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
+#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
+#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
+#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
+#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
+#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
+
+#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
+ CSR_INT_BIT_HW_ERR | \
+ CSR_INT_BIT_FH_TX | \
+ CSR_INT_BIT_SW_ERR | \
+ CSR_INT_BIT_RF_KILL | \
+ CSR_INT_BIT_SW_RX | \
+ CSR_INT_BIT_WAKEUP | \
+ CSR_INT_BIT_ALIVE)
+
+/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
+#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
+#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
+#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
+#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
+#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
+#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
+#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
+#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
+
+#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
+ CSR39_FH_INT_BIT_RX_CHNL2 | \
+ CSR_FH_INT_BIT_RX_CHNL1 | \
+ CSR_FH_INT_BIT_RX_CHNL0)
+
+
+#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \
+ CSR_FH_INT_BIT_TX_CHNL1 | \
+ CSR_FH_INT_BIT_TX_CHNL0)
+
+#define CSR49_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
+ CSR_FH_INT_BIT_RX_CHNL1 | \
+ CSR_FH_INT_BIT_RX_CHNL0)
+
+#define CSR49_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \
+ CSR_FH_INT_BIT_TX_CHNL0)
+
+/* GPIO */
+#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200)
+#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000)
+#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC (0x00000200)
+
+/* RESET */
+#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
+#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002)
+#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
+#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
+#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
+#define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000)
+
+/*
+ * GP (general purpose) CONTROL REGISTER
+ * Bit fields:
+ * 27: HW_RF_KILL_SW
+ * Indicates state of (platform's) hardware RF-Kill switch
+ * 26-24: POWER_SAVE_TYPE
+ * Indicates current power-saving mode:
+ * 000 -- No power saving
+ * 001 -- MAC power-down
+ * 010 -- PHY (radio) power-down
+ * 011 -- Error
+ * 9-6: SYS_CONFIG
+ * Indicates current system configuration, reflecting pins on chip
+ * as forced high/low by device circuit board.
+ * 4: GOING_TO_SLEEP
+ * Indicates MAC is entering a power-saving sleep power-down.
+ * Not a good time to access device-internal resources.
+ * 3: MAC_ACCESS_REQ
+ * Host sets this to request and maintain MAC wakeup, to allow host
+ * access to device-internal resources. Host must wait for
+ * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
+ * device registers.
+ * 2: INIT_DONE
+ * Host sets this to put device into fully operational D0 power mode.
+ * Host resets this after SW_RESET to put device into low power mode.
+ * 0: MAC_CLOCK_READY
+ * Indicates MAC (ucode processor, etc.) is powered up and can run.
+ * Internal resources are accessible.
+ * NOTE: This does not indicate that the processor is actually running.
+ * NOTE: This does not indicate that 4965 or 3945 has completed
+ * init or post-power-down restore of internal SRAM memory.
+ * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
+ * SRAM is restored and uCode is in normal operation mode.
+ * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ * do not need to save/restore it.
+ * NOTE: After device reset, this bit remains "0" until host sets
+ * INIT_DONE
+ */
+#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
+#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
+#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
+#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
+
+#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
+
+#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000)
+#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000)
+#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
+
+
+/* EEPROM REG */
+#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
+#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
+#define CSR_EEPROM_REG_MSK_ADDR (0x0000FFFC)
+#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
+
+/* EEPROM GP */
+#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
+#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
+#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002)
+#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004)
+
+/* GP REG */
+#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
+#define CSR_GP_REG_NO_POWER_SAVE (0x00000000)
+#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000)
+#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000)
+#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000)
+
+
+/* CSR GIO */
+#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
+
+/*
+ * UCODE-DRIVER GP (general purpose) mailbox register 1
+ * Host driver and uCode write and/or read this register to communicate with
+ * each other.
+ * Bit fields:
+ * 4: UCODE_DISABLE
+ * Host sets this to request permanent halt of uCode, same as
+ * sending CARD_STATE command with "halt" bit set.
+ * 3: CT_KILL_EXIT
+ * Host sets this to request exit from CT_KILL state, i.e. host thinks
+ * device temperature is low enough to continue normal operation.
+ * 2: CMD_BLOCKED
+ * Host sets this during RF KILL power-down sequence (HW, SW, CT KILL)
+ * to release uCode to clear all Tx and command queues, enter
+ * unassociated mode, and power down.
+ * NOTE: Some devices also use HBUS_TARG_MBX_C register for this bit.
+ * 1: SW_BIT_RFKILL
+ * Host sets this when issuing CARD_STATE command to request
+ * device sleep.
+ * 0: MAC_SLEEP
+ * uCode sets this when preparing a power-saving power-down.
+ * uCode resets this when power-up is complete and SRAM is sane.
+ * NOTE: 3945/4965 saves internal SRAM data to host when powering down,
+ * and must restore this data after powering back up.
+ * MAC_SLEEP is the best indication that restore is complete.
+ * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ * do not need to save/restore it.
+ */
+#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001)
+#define CSR_UCODE_SW_BIT_RFKILL (0x00000002)
+#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
+#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008)
+
+/* GIO Chicken Bits (PCI Express bus link power management) */
+#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
+#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
+
+/* LED */
+#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF)
+#define CSR_LED_REG_TRUN_ON (0x78)
+#define CSR_LED_REG_TRUN_OFF (0x38)
+
+/* ANA_PLL */
+#define CSR39_ANA_PLL_CFG_VAL (0x01000000)
+
+/* HPET MEM debug */
+#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
+
+/* DRAM INT TABLE */
+#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
+#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
+
+/*
+ * HBUS (Host-side Bus)
+ *
+ * HBUS registers are mapped directly into PCI bus space, but are used
+ * to indirectly access device's internal memory or registers that
+ * may be powered-down.
+ *
+ * Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family
+ * for these registers;
+ * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
+ * to make sure the MAC (uCode processor, etc.) is powered up for accessing
+ * internal resources.
+ *
+ * Do not use iwl_write32()/iwl_read32() family to access these registers;
+ * these provide only simple PCI bus access, without waking up the MAC.
+ */
+#define HBUS_BASE (0x400)
+
+/*
+ * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM
+ * structures, error log, event log, verifying uCode load).
+ * First write to address register, then read from or write to data register
+ * to complete the job. Once the address register is set up, accesses to
+ * data registers auto-increment the address by one dword.
+ * Bit usage for address registers (read or write):
+ * 0-31: memory address within device
+ */
+#define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c)
+#define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010)
+#define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018)
+#define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c)
+
+/* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */
+#define HBUS_TARG_MBX_C (HBUS_BASE+0x030)
+#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004)
+
+/*
+ * Registers for accessing device's internal peripheral registers
+ * (e.g. SCD, BSM, etc.). First write to address register,
+ * then read from or write to data register to complete the job.
+ * Bit usage for address registers (read or write):
+ * 0-15: register address (offset) within device
+ * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword)
+ */
+#define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044)
+#define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048)
+#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c)
+#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
+
+/*
+ * Per-Tx-queue write pointer (index, really!)
+ * Indicates index to next TFD that driver will fill (1 past latest filled).
+ * Bit usage:
+ * 0-7: queue write index
+ * 11-8: queue selector
+ */
+#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
+
+#endif /* !__iwl_legacy_csr_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-debug.h b/drivers/net/wireless/iwlegacy/iwl-debug.h
new file mode 100644
index 000000000000..ae13112701bf
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-debug.h
@@ -0,0 +1,198 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_debug_h__
+#define __iwl_legacy_debug_h__
+
+struct iwl_priv;
+extern u32 iwlegacy_debug_level;
+
+#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a)
+#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a)
+#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a)
+#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a)
+
+#define iwl_print_hex_error(priv, p, len) \
+do { \
+ print_hex_dump(KERN_ERR, "iwl data: ", \
+ DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
+} while (0)
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+#define IWL_DEBUG(__priv, level, fmt, args...) \
+do { \
+ if (iwl_legacy_get_debug_level(__priv) & (level)) \
+ dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
+ "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
+ __func__ , ## args); \
+} while (0)
+
+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \
+do { \
+ if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit()) \
+ dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
+ "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
+ __func__ , ## args); \
+} while (0)
+
+#define iwl_print_hex_dump(priv, level, p, len) \
+do { \
+ if (iwl_legacy_get_debug_level(priv) & level) \
+ print_hex_dump(KERN_DEBUG, "iwl data: ", \
+ DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
+} while (0)
+
+#else
+#define IWL_DEBUG(__priv, level, fmt, args...)
+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
+static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
+ const void *p, u32 len)
+{}
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name);
+void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv);
+#else
+static inline int
+iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
+{
+ return 0;
+}
+static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
+{
+}
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
+
+/*
+ * To use the debug system:
+ *
+ * If you are defining a new debug classification, simply add it to the #define
+ * list here in the form of
+ *
+ * #define IWL_DL_xxxx VALUE
+ *
+ * where xxxx should be the name of the classification (for example, WEP).
+ *
+ * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
+ * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
+ * to send output to that classification.
+ *
+ * The active debug levels can be accessed via files
+ *
+ * /sys/module/iwl4965/parameters/debug{50}
+ * /sys/module/iwl3945/parameters/debug
+ * /sys/class/net/wlan0/device/debug_level
+ *
+ * when CONFIG_IWLWIFI_LEGACY_DEBUG=y.
+ */
+
+/* 0x0000000F - 0x00000001 */
+#define IWL_DL_INFO (1 << 0)
+#define IWL_DL_MAC80211 (1 << 1)
+#define IWL_DL_HCMD (1 << 2)
+#define IWL_DL_STATE (1 << 3)
+/* 0x000000F0 - 0x00000010 */
+#define IWL_DL_MACDUMP (1 << 4)
+#define IWL_DL_HCMD_DUMP (1 << 5)
+#define IWL_DL_EEPROM (1 << 6)
+#define IWL_DL_RADIO (1 << 7)
+/* 0x00000F00 - 0x00000100 */
+#define IWL_DL_POWER (1 << 8)
+#define IWL_DL_TEMP (1 << 9)
+#define IWL_DL_NOTIF (1 << 10)
+#define IWL_DL_SCAN (1 << 11)
+/* 0x0000F000 - 0x00001000 */
+#define IWL_DL_ASSOC (1 << 12)
+#define IWL_DL_DROP (1 << 13)
+#define IWL_DL_TXPOWER (1 << 14)
+#define IWL_DL_AP (1 << 15)
+/* 0x000F0000 - 0x00010000 */
+#define IWL_DL_FW (1 << 16)
+#define IWL_DL_RF_KILL (1 << 17)
+#define IWL_DL_FW_ERRORS (1 << 18)
+#define IWL_DL_LED (1 << 19)
+/* 0x00F00000 - 0x00100000 */
+#define IWL_DL_RATE (1 << 20)
+#define IWL_DL_CALIB (1 << 21)
+#define IWL_DL_WEP (1 << 22)
+#define IWL_DL_TX (1 << 23)
+/* 0x0F000000 - 0x01000000 */
+#define IWL_DL_RX (1 << 24)
+#define IWL_DL_ISR (1 << 25)
+#define IWL_DL_HT (1 << 26)
+#define IWL_DL_IO (1 << 27)
+/* 0xF0000000 - 0x10000000 */
+#define IWL_DL_11H (1 << 28)
+#define IWL_DL_STATS (1 << 29)
+#define IWL_DL_TX_REPLY (1 << 30)
+#define IWL_DL_QOS (1 << 31)
+
+#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
+#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
+#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
+#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
+#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
+#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
+#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a)
+#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
+#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
+#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
+#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
+#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
+#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
+#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
+#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
+#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
+#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
+#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
+ IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
+#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a)
+#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a)
+#define IWL_DEBUG_IO(p, f, a...) IWL_DEBUG(p, IWL_DL_IO, f, ## a)
+#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
+#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \
+ IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
+#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a)
+#define IWL_DEBUG_ASSOC(p, f, a...) \
+ IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
+#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \
+ IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
+#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a)
+#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
+#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
+ IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
+#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
+#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
+ IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
+#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
+#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
+#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
+#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
+
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
new file mode 100644
index 000000000000..2d32438b4cb8
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
@@ -0,0 +1,1467 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+#include <linux/ieee80211.h>
+#include <net/mac80211.h>
+
+
+#include "iwl-dev.h"
+#include "iwl-debug.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+
+/* create and remove of files */
+#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, priv, \
+ &iwl_legacy_dbgfs_##name##_ops)) \
+ goto err; \
+} while (0)
+
+#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
+ struct dentry *__tmp; \
+ __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
+ parent, ptr); \
+ if (IS_ERR(__tmp) || !__tmp) \
+ goto err; \
+} while (0)
+
+#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
+ struct dentry *__tmp; \
+ __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
+ parent, ptr); \
+ if (IS_ERR(__tmp) || !__tmp) \
+ goto err; \
+} while (0)
+
+/* file operation */
+#define DEBUGFS_READ_FUNC(name) \
+static ssize_t iwl_legacy_dbgfs_##name##_read(struct file *file, \
+ char __user *user_buf, \
+ size_t count, loff_t *ppos);
+
+#define DEBUGFS_WRITE_FUNC(name) \
+static ssize_t iwl_legacy_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos);
+
+
+static int
+iwl_legacy_dbgfs_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+#define DEBUGFS_READ_FILE_OPS(name) \
+ DEBUGFS_READ_FUNC(name); \
+static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
+ .read = iwl_legacy_dbgfs_##name##_read, \
+ .open = iwl_legacy_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_WRITE_FILE_OPS(name) \
+ DEBUGFS_WRITE_FUNC(name); \
+static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
+ .write = iwl_legacy_dbgfs_##name##_write, \
+ .open = iwl_legacy_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
+ DEBUGFS_READ_FUNC(name); \
+ DEBUGFS_WRITE_FUNC(name); \
+static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
+ .write = iwl_legacy_dbgfs_##name##_write, \
+ .read = iwl_legacy_dbgfs_##name##_read, \
+ .open = iwl_legacy_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+static ssize_t iwl_legacy_dbgfs_tx_statistics_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ char *buf;
+ int pos = 0;
+
+ int cnt;
+ ssize_t ret;
+ const size_t bufsz = 100 +
+ sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
+ for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\t%25s\t\t: %u\n",
+ iwl_legacy_get_mgmt_string(cnt),
+ priv->tx_stats.mgmt[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
+ for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\t%25s\t\t: %u\n",
+ iwl_legacy_get_ctrl_string(cnt),
+ priv->tx_stats.ctrl[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
+ priv->tx_stats.data_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
+ priv->tx_stats.data_bytes);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+iwl_legacy_dbgfs_clear_traffic_statistics_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ u32 clear_flag;
+ char buf[8];
+ int buf_size;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%x", &clear_flag) != 1)
+ return -EFAULT;
+ iwl_legacy_clear_traffic_stats(priv);
+
+ return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_rx_statistics_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ char *buf;
+ int pos = 0;
+ int cnt;
+ ssize_t ret;
+ const size_t bufsz = 100 +
+ sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
+ for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\t%25s\t\t: %u\n",
+ iwl_legacy_get_mgmt_string(cnt),
+ priv->rx_stats.mgmt[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
+ for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\t%25s\t\t: %u\n",
+ iwl_legacy_get_ctrl_string(cnt),
+ priv->rx_stats.ctrl[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
+ priv->rx_stats.data_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
+ priv->rx_stats.data_bytes);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+#define BYTE1_MASK 0x000000ff;
+#define BYTE2_MASK 0x0000ffff;
+#define BYTE3_MASK 0x00ffffff;
+static ssize_t iwl_legacy_dbgfs_sram_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ u32 val;
+ char *buf;
+ ssize_t ret;
+ int i;
+ int pos = 0;
+ struct iwl_priv *priv = file->private_data;
+ size_t bufsz;
+
+ /* default is to dump the entire data segment */
+ if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
+ priv->dbgfs_sram_offset = 0x800000;
+ if (priv->ucode_type == UCODE_INIT)
+ priv->dbgfs_sram_len = priv->ucode_init_data.len;
+ else
+ priv->dbgfs_sram_len = priv->ucode_data.len;
+ }
+ bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10;
+ buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
+ priv->dbgfs_sram_len);
+ pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
+ priv->dbgfs_sram_offset);
+ for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
+ val = iwl_legacy_read_targ_mem(priv, priv->dbgfs_sram_offset + \
+ priv->dbgfs_sram_len - i);
+ if (i < 4) {
+ switch (i) {
+ case 1:
+ val &= BYTE1_MASK;
+ break;
+ case 2:
+ val &= BYTE2_MASK;
+ break;
+ case 3:
+ val &= BYTE3_MASK;
+ break;
+ }
+ }
+ if (!(i % 16))
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_sram_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[64];
+ int buf_size;
+ u32 offset, len;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
+ priv->dbgfs_sram_offset = offset;
+ priv->dbgfs_sram_len = len;
+ } else {
+ priv->dbgfs_sram_offset = 0;
+ priv->dbgfs_sram_len = 0;
+ }
+
+ return count;
+}
+
+static ssize_t
+iwl_legacy_dbgfs_stations_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ struct iwl_station_entry *station;
+ int max_sta = priv->hw_params.max_stations;
+ char *buf;
+ int i, j, pos = 0;
+ ssize_t ret;
+ /* Add 30 for initial string */
+ const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
+
+ buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
+ priv->num_stations);
+
+ for (i = 0; i < max_sta; i++) {
+ station = &priv->stations[i];
+ if (!station->used)
+ continue;
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "station %d - addr: %pM, flags: %#x\n",
+ i, station->sta.sta.addr,
+ station->sta.station_flags_msk);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "TID\tseq_num\ttxq_id\tframes\ttfds\t");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "start_idx\tbitmap\t\t\trate_n_flags\n");
+
+ for (j = 0; j < MAX_TID_COUNT; j++) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
+ j, station->tid[j].seq_number,
+ station->tid[j].agg.txq_id,
+ station->tid[j].agg.frame_count,
+ station->tid[j].tfds_in_queue,
+ station->tid[j].agg.start_idx,
+ station->tid[j].agg.bitmap,
+ station->tid[j].agg.rate_n_flags);
+
+ if (station->tid[j].agg.wait_for_ba)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " - waitforba");
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file,
+ char __user *user_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ ssize_t ret;
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0, ofs = 0, buf_size = 0;
+ const u8 *ptr;
+ char *buf;
+ u16 eeprom_ver;
+ size_t eeprom_len = priv->cfg->base_params->eeprom_size;
+ buf_size = 4 * eeprom_len + 256;
+
+ if (eeprom_len % 16) {
+ IWL_ERR(priv, "NVM size is not multiple of 16.\n");
+ return -ENODATA;
+ }
+
+ ptr = priv->eeprom;
+ if (!ptr) {
+ IWL_ERR(priv, "Invalid EEPROM memory\n");
+ return -ENOMEM;
+ }
+
+ /* 4 characters for byte 0xYY */
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+ eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
+ pos += scnprintf(buf + pos, buf_size - pos, "EEPROM "
+ "version: 0x%x\n", eeprom_ver);
+ for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
+ pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
+ hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
+ buf_size - pos, 0);
+ pos += strlen(buf + pos);
+ if (buf_size - pos > 0)
+ buf[pos++] = '\n';
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_log_event_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char *buf;
+ int pos = 0;
+ ssize_t ret = -ENOMEM;
+
+ ret = pos = priv->cfg->ops->lib->dump_nic_event_log(
+ priv, true, &buf, true);
+ if (buf) {
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ }
+ return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_log_event_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ u32 event_log_flag;
+ char buf[8];
+ int buf_size;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &event_log_flag) != 1)
+ return -EFAULT;
+ if (event_log_flag == 1)
+ priv->cfg->ops->lib->dump_nic_event_log(priv, true,
+ NULL, false);
+
+ return count;
+}
+
+
+
+static ssize_t
+iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ struct ieee80211_channel *channels = NULL;
+ const struct ieee80211_supported_band *supp_band = NULL;
+ int pos = 0, i, bufsz = PAGE_SIZE;
+ char *buf;
+ ssize_t ret;
+
+ if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
+ if (supp_band) {
+ channels = supp_band->channels;
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Displaying %d channels in 2.4GHz band 802.11bg):\n",
+ supp_band->n_channels);
+
+ for (i = 0; i < supp_band->n_channels; i++)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "%d: %ddBm: BSS%s%s, %s.\n",
+ channels[i].hw_value,
+ channels[i].max_power,
+ channels[i].flags & IEEE80211_CHAN_RADAR ?
+ " (IEEE 802.11h required)" : "",
+ ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
+ || (channels[i].flags &
+ IEEE80211_CHAN_RADAR)) ? "" :
+ ", IBSS",
+ channels[i].flags &
+ IEEE80211_CHAN_PASSIVE_SCAN ?
+ "passive only" : "active/passive");
+ }
+ supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
+ if (supp_band) {
+ channels = supp_band->channels;
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Displaying %d channels in 5.2GHz band (802.11a)\n",
+ supp_band->n_channels);
+
+ for (i = 0; i < supp_band->n_channels; i++)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "%d: %ddBm: BSS%s%s, %s.\n",
+ channels[i].hw_value,
+ channels[i].max_power,
+ channels[i].flags & IEEE80211_CHAN_RADAR ?
+ " (IEEE 802.11h required)" : "",
+ ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
+ || (channels[i].flags &
+ IEEE80211_CHAN_RADAR)) ? "" :
+ ", IBSS",
+ channels[i].flags &
+ IEEE80211_CHAN_PASSIVE_SCAN ?
+ "passive only" : "active/passive");
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_status_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ char buf[512];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
+ test_bit(STATUS_HCMD_ACTIVE, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
+ test_bit(STATUS_INT_ENABLED, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
+ test_bit(STATUS_RF_KILL_HW, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
+ test_bit(STATUS_CT_KILL, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
+ test_bit(STATUS_INIT, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
+ test_bit(STATUS_ALIVE, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
+ test_bit(STATUS_READY, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
+ test_bit(STATUS_TEMPERATURE, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
+ test_bit(STATUS_GEO_CONFIGURED, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
+ test_bit(STATUS_EXIT_PENDING, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
+ test_bit(STATUS_STATISTICS, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
+ test_bit(STATUS_SCANNING, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
+ test_bit(STATUS_SCAN_ABORTING, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
+ test_bit(STATUS_SCAN_HW, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
+ test_bit(STATUS_POWER_PMI, &priv->status));
+ pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
+ test_bit(STATUS_FW_ERROR, &priv->status));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_interrupt_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ int cnt = 0;
+ char *buf;
+ int bufsz = 24 * 64; /* 24 items * 64 char per item */
+ ssize_t ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Interrupt Statistics Report:\n");
+
+ pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
+ priv->isr_stats.hw);
+ pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
+ priv->isr_stats.sw);
+ if (priv->isr_stats.sw || priv->isr_stats.hw) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tLast Restarting Code: 0x%X\n",
+ priv->isr_stats.err_code);
+ }
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
+ priv->isr_stats.sch);
+ pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
+ priv->isr_stats.alive);
+#endif
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "HW RF KILL switch toggled:\t %u\n",
+ priv->isr_stats.rfkill);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
+ priv->isr_stats.ctkill);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
+ priv->isr_stats.wakeup);
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Rx command responses:\t\t %u\n",
+ priv->isr_stats.rx);
+ for (cnt = 0; cnt < REPLY_MAX; cnt++) {
+ if (priv->isr_stats.rx_handlers[cnt] > 0)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tRx handler[%36s]:\t\t %u\n",
+ iwl_legacy_get_cmd_string(cnt),
+ priv->isr_stats.rx_handlers[cnt]);
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
+ priv->isr_stats.tx);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
+ priv->isr_stats.unhandled);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_interrupt_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ u32 reset_flag;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%x", &reset_flag) != 1)
+ return -EFAULT;
+ if (reset_flag == 0)
+ iwl_legacy_clear_isr_stats(priv);
+
+ return count;
+}
+
+static ssize_t
+iwl_legacy_dbgfs_qos_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ struct iwl_rxon_context *ctx;
+ int pos = 0, i;
+ char buf[256 * NUM_IWL_RXON_CTX];
+ const size_t bufsz = sizeof(buf);
+
+ for_each_context(priv, ctx) {
+ pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
+ ctx->ctxid);
+ for (i = 0; i < AC_NUM; i++) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tcw_min\tcw_max\taifsn\ttxop\n");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "AC[%d]\t%u\t%u\t%u\t%u\n", i,
+ ctx->qos_data.def_qos_parm.ac[i].cw_min,
+ ctx->qos_data.def_qos_parm.ac[i].cw_max,
+ ctx->qos_data.def_qos_parm.ac[i].aifsn,
+ ctx->qos_data.def_qos_parm.ac[i].edca_txop);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ }
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_disable_ht40_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int ht40;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &ht40) != 1)
+ return -EFAULT;
+ if (!iwl_legacy_is_any_associated(priv))
+ priv->disable_ht40 = ht40 ? true : false;
+ else {
+ IWL_ERR(priv, "Sta associated with AP - "
+ "Change to 40MHz channel support is not allowed\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[100];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "11n 40MHz Mode: %s\n",
+ priv->disable_ht40 ? "Disabled" : "Enabled");
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+DEBUGFS_READ_WRITE_FILE_OPS(sram);
+DEBUGFS_READ_WRITE_FILE_OPS(log_event);
+DEBUGFS_READ_FILE_OPS(nvm);
+DEBUGFS_READ_FILE_OPS(stations);
+DEBUGFS_READ_FILE_OPS(channels);
+DEBUGFS_READ_FILE_OPS(status);
+DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+DEBUGFS_READ_FILE_OPS(qos);
+DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
+
+static ssize_t iwl_legacy_dbgfs_traffic_log_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0, ofs = 0;
+ int cnt = 0, entry;
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ char *buf;
+ int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
+ (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
+ const u8 *ptr;
+ ssize_t ret;
+
+ if (!priv->txq) {
+ IWL_ERR(priv, "txq not ready\n");
+ return -EAGAIN;
+ }
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate buffer\n");
+ return -ENOMEM;
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
+ for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
+ txq = &priv->txq[cnt];
+ q = &txq->q;
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "q[%d]: read_ptr: %u, write_ptr: %u\n",
+ cnt, q->read_ptr, q->write_ptr);
+ }
+ if (priv->tx_traffic && (iwlegacy_debug_level & IWL_DL_TX)) {
+ ptr = priv->tx_traffic;
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
+ for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
+ for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
+ entry++, ofs += 16) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "0x%.4x ", ofs);
+ hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
+ buf + pos, bufsz - pos, 0);
+ pos += strlen(buf + pos);
+ if (bufsz - pos > 0)
+ buf[pos++] = '\n';
+ }
+ }
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "read: %u, write: %u\n",
+ rxq->read, rxq->write);
+
+ if (priv->rx_traffic && (iwlegacy_debug_level & IWL_DL_RX)) {
+ ptr = priv->rx_traffic;
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
+ for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
+ for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
+ entry++, ofs += 16) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "0x%.4x ", ofs);
+ hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
+ buf + pos, bufsz - pos, 0);
+ pos += strlen(buf + pos);
+ if (bufsz - pos > 0)
+ buf[pos++] = '\n';
+ }
+ }
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_traffic_log_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int traffic_log;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &traffic_log) != 1)
+ return -EFAULT;
+ if (traffic_log == 0)
+ iwl_legacy_reset_traffic_log(priv);
+
+ return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_tx_queue_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ struct iwl_tx_queue *txq;
+ struct iwl_queue *q;
+ char *buf;
+ int pos = 0;
+ int cnt;
+ int ret;
+ const size_t bufsz = sizeof(char) * 64 *
+ priv->cfg->base_params->num_of_queues;
+
+ if (!priv->txq) {
+ IWL_ERR(priv, "txq not ready\n");
+ return -EAGAIN;
+ }
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
+ txq = &priv->txq[cnt];
+ q = &txq->q;
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "hwq %.2d: read=%u write=%u stop=%d"
+ " swq_id=%#.2x (ac %d/hwq %d)\n",
+ cnt, q->read_ptr, q->write_ptr,
+ !!test_bit(cnt, priv->queue_stopped),
+ txq->swq_id, txq->swq_id & 3,
+ (txq->swq_id >> 2) & 0x1f);
+ if (cnt >= 4)
+ continue;
+ /* for the ACs, display the stop count too */
+ pos += scnprintf(buf + pos, bufsz - pos,
+ " stop-count: %d\n",
+ atomic_read(&priv->queue_stop_count[cnt]));
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_rx_queue_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
+ rxq->read);
+ pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
+ rxq->write);
+ pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
+ rxq->free_count);
+ if (rxq->rb_stts) {
+ pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
+ le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
+ } else {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "closed_rb_num: Not Allocated\n");
+ }
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_ucode_rx_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
+ user_buf, count, ppos);
+}
+
+static ssize_t iwl_legacy_dbgfs_ucode_tx_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
+ user_buf, count, ppos);
+}
+
+static ssize_t iwl_legacy_dbgfs_ucode_general_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
+ user_buf, count, ppos);
+}
+
+static ssize_t iwl_legacy_dbgfs_sensitivity_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ int cnt = 0;
+ char *buf;
+ int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100;
+ ssize_t ret;
+ struct iwl_sensitivity_data *data;
+
+ data = &priv->sensitivity_data;
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
+ data->auto_corr_ofdm);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "auto_corr_ofdm_mrc:\t\t %u\n",
+ data->auto_corr_ofdm_mrc);
+ pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
+ data->auto_corr_ofdm_x1);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "auto_corr_ofdm_mrc_x1:\t\t %u\n",
+ data->auto_corr_ofdm_mrc_x1);
+ pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
+ data->auto_corr_cck);
+ pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
+ data->auto_corr_cck_mrc);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "last_bad_plcp_cnt_ofdm:\t\t %u\n",
+ data->last_bad_plcp_cnt_ofdm);
+ pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
+ data->last_fa_cnt_ofdm);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "last_bad_plcp_cnt_cck:\t\t %u\n",
+ data->last_bad_plcp_cnt_cck);
+ pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
+ data->last_fa_cnt_cck);
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
+ data->nrg_curr_state);
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
+ data->nrg_prev_state);
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
+ for (cnt = 0; cnt < 10; cnt++) {
+ pos += scnprintf(buf + pos, bufsz - pos, " %u",
+ data->nrg_value[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
+ for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
+ pos += scnprintf(buf + pos, bufsz - pos, " %u",
+ data->nrg_silence_rssi[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
+ data->nrg_silence_ref);
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
+ data->nrg_energy_idx);
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
+ data->nrg_silence_idx);
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
+ data->nrg_th_cck);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "nrg_auto_corr_silence_diff:\t %u\n",
+ data->nrg_auto_corr_silence_diff);
+ pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
+ data->num_in_cck_no_fa);
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
+ data->nrg_th_ofdm);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+
+static ssize_t iwl_legacy_dbgfs_chain_noise_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ int cnt = 0;
+ char *buf;
+ int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100;
+ ssize_t ret;
+ struct iwl_chain_noise_data *data;
+
+ data = &priv->chain_noise_data;
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IWL_ERR(priv, "Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
+ data->active_chains);
+ pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
+ data->chain_noise_a);
+ pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
+ data->chain_noise_b);
+ pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
+ data->chain_noise_c);
+ pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
+ data->chain_signal_a);
+ pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
+ data->chain_signal_b);
+ pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
+ data->chain_signal_c);
+ pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
+ data->beacon_count);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
+ for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+ pos += scnprintf(buf + pos, bufsz - pos, " %u",
+ data->disconn_array[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
+ for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+ pos += scnprintf(buf + pos, bufsz - pos, " %u",
+ data->delta_gain_code[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
+ data->radio_write);
+ pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
+ data->state);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_power_save_status_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[60];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ u32 pwrsave_status;
+
+ pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
+ CSR_GP_REG_POWER_SAVE_STATUS_MSK;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
+ pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
+ (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
+ (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
+ (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
+ "error");
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int clear;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &clear) != 1)
+ return -EFAULT;
+
+ /* make request to uCode to retrieve statistics information */
+ mutex_lock(&priv->mutex);
+ iwl_legacy_send_statistics_request(priv, CMD_SYNC, true);
+ mutex_unlock(&priv->mutex);
+
+ return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_ucode_tracing_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char buf[128];
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
+ priv->event_log.ucode_trace ? "On" : "Off");
+ pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n",
+ priv->event_log.non_wraps_count);
+ pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n",
+ priv->event_log.wraps_once_count);
+ pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
+ priv->event_log.wraps_more_count);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_ucode_tracing_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int trace;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &trace) != 1)
+ return -EFAULT;
+
+ if (trace) {
+ priv->event_log.ucode_trace = true;
+ /* schedule the ucode timer to occur in UCODE_TRACE_PERIOD */
+ mod_timer(&priv->ucode_trace,
+ jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
+ } else {
+ priv->event_log.ucode_trace = false;
+ del_timer_sync(&priv->ucode_trace);
+ }
+
+ return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int len = 0;
+ char buf[20];
+
+ len = sprintf(buf, "0x%04X\n",
+ le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_legacy_dbgfs_rxon_filter_flags_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int len = 0;
+ char buf[20];
+
+ len = sprintf(buf, "0x%04X\n",
+ le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_legacy_dbgfs_fh_reg_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char *buf;
+ int pos = 0;
+ ssize_t ret = -EFAULT;
+
+ if (priv->cfg->ops->lib->dump_fh) {
+ ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
+ if (buf) {
+ ret = simple_read_from_buffer(user_buf,
+ count, ppos, buf, pos);
+ kfree(buf);
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t iwl_legacy_dbgfs_missed_beacon_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char buf[12];
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
+ priv->missed_beacon_threshold);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int missed;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &missed) != 1)
+ return -EINVAL;
+
+ if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
+ missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
+ priv->missed_beacon_threshold =
+ IWL_MISSED_BEACON_THRESHOLD_DEF;
+ else
+ priv->missed_beacon_threshold = missed;
+
+ return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_plcp_delta_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char buf[12];
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
+ priv->cfg->base_params->plcp_delta_threshold);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_plcp_delta_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int plcp;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &plcp) != 1)
+ return -EINVAL;
+ if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
+ (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
+ priv->cfg->base_params->plcp_delta_threshold =
+ IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
+ else
+ priv->cfg->base_params->plcp_delta_threshold = plcp;
+ return count;
+}
+
+static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int i, pos = 0;
+ char buf[300];
+ const size_t bufsz = sizeof(buf);
+ struct iwl_force_reset *force_reset;
+
+ for (i = 0; i < IWL_MAX_FORCE_RESET; i++) {
+ force_reset = &priv->force_reset[i];
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Force reset method %d\n", i);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request: %d\n",
+ force_reset->reset_request_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request success: %d\n",
+ force_reset->reset_success_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request reject: %d\n",
+ force_reset->reset_reject_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\treset duration: %lu\n",
+ force_reset->reset_duration);
+ }
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int reset, ret;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &reset) != 1)
+ return -EINVAL;
+ switch (reset) {
+ case IWL_RF_RESET:
+ case IWL_FW_RESET:
+ ret = iwl_legacy_force_reset(priv, reset, true);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return ret ? ret : count;
+}
+
+static ssize_t iwl_legacy_dbgfs_wd_timeout_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int timeout;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &timeout) != 1)
+ return -EINVAL;
+ if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
+ timeout = IWL_DEF_WD_TIMEOUT;
+
+ priv->cfg->base_params->wd_timeout = timeout;
+ iwl_legacy_setup_watchdog(priv);
+ return count;
+}
+
+DEBUGFS_READ_FILE_OPS(rx_statistics);
+DEBUGFS_READ_FILE_OPS(tx_statistics);
+DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
+DEBUGFS_READ_FILE_OPS(rx_queue);
+DEBUGFS_READ_FILE_OPS(tx_queue);
+DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_general_stats);
+DEBUGFS_READ_FILE_OPS(sensitivity);
+DEBUGFS_READ_FILE_OPS(chain_noise);
+DEBUGFS_READ_FILE_OPS(power_save_status);
+DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
+DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
+DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
+DEBUGFS_READ_FILE_OPS(fh_reg);
+DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
+DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
+DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
+DEBUGFS_READ_FILE_OPS(rxon_flags);
+DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
+DEBUGFS_WRITE_FILE_OPS(wd_timeout);
+
+/*
+ * Create the debugfs files and directories
+ *
+ */
+int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
+{
+ struct dentry *phyd = priv->hw->wiphy->debugfsdir;
+ struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
+
+ dir_drv = debugfs_create_dir(name, phyd);
+ if (!dir_drv)
+ return -ENOMEM;
+
+ priv->debugfs_dir = dir_drv;
+
+ dir_data = debugfs_create_dir("data", dir_drv);
+ if (!dir_data)
+ goto err;
+ dir_rf = debugfs_create_dir("rf", dir_drv);
+ if (!dir_rf)
+ goto err;
+ dir_debug = debugfs_create_dir("debug", dir_drv);
+ if (!dir_debug)
+ goto err;
+
+ DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
+
+ if (priv->cfg->base_params->sensitivity_calib_by_driver)
+ DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
+ if (priv->cfg->base_params->chain_noise_calib_by_driver)
+ DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
+ if (priv->cfg->base_params->ucode_tracing)
+ DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
+ if (priv->cfg->base_params->sensitivity_calib_by_driver)
+ DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
+ &priv->disable_sens_cal);
+ if (priv->cfg->base_params->chain_noise_calib_by_driver)
+ DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
+ &priv->disable_chain_noise_cal);
+ DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
+ &priv->disable_tx_power_cal);
+ return 0;
+
+err:
+ IWL_ERR(priv, "Can't create the debugfs directory\n");
+ iwl_legacy_dbgfs_unregister(priv);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(iwl_legacy_dbgfs_register);
+
+/**
+ * Remove the debugfs files and directories
+ *
+ */
+void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
+{
+ if (!priv->debugfs_dir)
+ return;
+
+ debugfs_remove_recursive(priv->debugfs_dir);
+ priv->debugfs_dir = NULL;
+}
+EXPORT_SYMBOL(iwl_legacy_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
new file mode 100644
index 000000000000..9ee849d669f3
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-dev.h
@@ -0,0 +1,1426 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+/*
+ * Please use this file (iwl-dev.h) for driver implementation definitions.
+ * Please use iwl-commands.h for uCode API definitions.
+ * Please use iwl-4965-hw.h for hardware-related definitions.
+ */
+
+#ifndef __iwl_legacy_dev_h__
+#define __iwl_legacy_dev_h__
+
+#include <linux/pci.h> /* for struct pci_device_id */
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/wait.h>
+#include <net/ieee80211_radiotap.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-csr.h"
+#include "iwl-prph.h"
+#include "iwl-fh.h"
+#include "iwl-debug.h"
+#include "iwl-4965-hw.h"
+#include "iwl-3945-hw.h"
+#include "iwl-led.h"
+#include "iwl-power.h"
+#include "iwl-legacy-rs.h"
+
+struct iwl_tx_queue;
+
+/* CT-KILL constants */
+#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
+
+/* Default noise level to report when noise measurement is not available.
+ * This may be because we're:
+ * 1) Not associated (4965, no beacon statistics being sent to driver)
+ * 2) Scanning (noise measurement does not apply to associated channel)
+ * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
+ * Use default noise value of -127 ... this is below the range of measurable
+ * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ * Also, -127 works better than 0 when averaging frames with/without
+ * noise info (e.g. averaging might be done in app); measured dBm values are
+ * always negative ... using a negative value as the default keeps all
+ * averages within an s8's (used in some apps) range of negative values. */
+#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
+
+/*
+ * RTS threshold here is total size [2347] minus 4 FCS bytes
+ * Per spec:
+ * a value of 0 means RTS on all data/management packets
+ * a value > max MSDU size means no RTS
+ * else RTS for data/management frames where MPDU is larger
+ * than RTS value.
+ */
+#define DEFAULT_RTS_THRESHOLD 2347U
+#define MIN_RTS_THRESHOLD 0U
+#define MAX_RTS_THRESHOLD 2347U
+#define MAX_MSDU_SIZE 2304U
+#define MAX_MPDU_SIZE 2346U
+#define DEFAULT_BEACON_INTERVAL 100U
+#define DEFAULT_SHORT_RETRY_LIMIT 7U
+#define DEFAULT_LONG_RETRY_LIMIT 4U
+
+struct iwl_rx_mem_buffer {
+ dma_addr_t page_dma;
+ struct page *page;
+ struct list_head list;
+};
+
+#define rxb_addr(r) page_address(r->page)
+
+/* defined below */
+struct iwl_device_cmd;
+
+struct iwl_cmd_meta {
+ /* only for SYNC commands, iff the reply skb is wanted */
+ struct iwl_host_cmd *source;
+ /*
+ * only for ASYNC commands
+ * (which is somewhat stupid -- look at iwl-sta.c for instance
+ * which duplicates a bunch of code because the callback isn't
+ * invoked for SYNC commands, if it were and its result passed
+ * through it would be simpler...)
+ */
+ void (*callback)(struct iwl_priv *priv,
+ struct iwl_device_cmd *cmd,
+ struct iwl_rx_packet *pkt);
+
+ /* The CMD_SIZE_HUGE flag bit indicates that the command
+ * structure is stored at the end of the shared queue memory. */
+ u32 flags;
+
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_LEN(len);
+};
+
+/*
+ * Generic queue structure
+ *
+ * Contains common data for Rx and Tx queues
+ */
+struct iwl_queue {
+ int n_bd; /* number of BDs in this queue */
+ int write_ptr; /* 1-st empty entry (index) host_w*/
+ int read_ptr; /* last used entry (index) host_r*/
+ /* use for monitoring and recovering the stuck queue */
+ dma_addr_t dma_addr; /* physical addr for BD's */
+ int n_window; /* safe queue window */
+ u32 id;
+ int low_mark; /* low watermark, resume queue if free
+ * space more than this */
+ int high_mark; /* high watermark, stop queue if free
+ * space less than this */
+} __packed;
+
+/* One for each TFD */
+struct iwl_tx_info {
+ struct sk_buff *skb;
+ struct iwl_rxon_context *ctx;
+};
+
+/**
+ * struct iwl_tx_queue - Tx Queue for DMA
+ * @q: generic Rx/Tx queue descriptor
+ * @bd: base of circular buffer of TFDs
+ * @cmd: array of command/TX buffer pointers
+ * @meta: array of meta data for each command/tx buffer
+ * @dma_addr_cmd: physical address of cmd/tx buffer array
+ * @txb: array of per-TFD driver data
+ * @time_stamp: time (in jiffies) of last read_ptr change
+ * @need_update: indicates need to update read/write index
+ * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
+ *
+ * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
+ * descriptors) and required locking structures.
+ */
+#define TFD_TX_CMD_SLOTS 256
+#define TFD_CMD_SLOTS 32
+
+struct iwl_tx_queue {
+ struct iwl_queue q;
+ void *tfds;
+ struct iwl_device_cmd **cmd;
+ struct iwl_cmd_meta *meta;
+ struct iwl_tx_info *txb;
+ unsigned long time_stamp;
+ u8 need_update;
+ u8 sched_retry;
+ u8 active;
+ u8 swq_id;
+};
+
+#define IWL_NUM_SCAN_RATES (2)
+
+struct iwl4965_channel_tgd_info {
+ u8 type;
+ s8 max_power;
+};
+
+struct iwl4965_channel_tgh_info {
+ s64 last_radar_time;
+};
+
+#define IWL4965_MAX_RATE (33)
+
+struct iwl3945_clip_group {
+ /* maximum power level to prevent clipping for each rate, derived by
+ * us from this band's saturation power in EEPROM */
+ const s8 clip_powers[IWL_MAX_RATES];
+};
+
+/* current Tx power values to use, one for each rate for each channel.
+ * requested power is limited by:
+ * -- regulatory EEPROM limits for this channel
+ * -- hardware capabilities (clip-powers)
+ * -- spectrum management
+ * -- user preference (e.g. iwconfig)
+ * when requested power is set, base power index must also be set. */
+struct iwl3945_channel_power_info {
+ struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
+ s8 power_table_index; /* actual (compenst'd) index into gain table */
+ s8 base_power_index; /* gain index for power at factory temp. */
+ s8 requested_power; /* power (dBm) requested for this chnl/rate */
+};
+
+/* current scan Tx power values to use, one for each scan rate for each
+ * channel. */
+struct iwl3945_scan_power_info {
+ struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
+ s8 power_table_index; /* actual (compenst'd) index into gain table */
+ s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
+};
+
+/*
+ * One for each channel, holds all channel setup data
+ * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
+ * with one another!
+ */
+struct iwl_channel_info {
+ struct iwl4965_channel_tgd_info tgd;
+ struct iwl4965_channel_tgh_info tgh;
+ struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
+ struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
+ * HT40 channel */
+
+ u8 channel; /* channel number */
+ u8 flags; /* flags copied from EEPROM */
+ s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
+ s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
+ s8 min_power; /* always 0 */
+ s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
+
+ u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
+ u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
+ enum ieee80211_band band;
+
+ /* HT40 channel info */
+ s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
+ u8 ht40_flags; /* flags copied from EEPROM */
+ u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
+
+ /* Radio/DSP gain settings for each "normal" data Tx rate.
+ * These include, in addition to RF and DSP gain, a few fields for
+ * remembering/modifying gain settings (indexes). */
+ struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE];
+
+ /* Radio/DSP gain settings for each scan rate, for directed scans. */
+ struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
+};
+
+#define IWL_TX_FIFO_BK 0 /* shared */
+#define IWL_TX_FIFO_BE 1
+#define IWL_TX_FIFO_VI 2 /* shared */
+#define IWL_TX_FIFO_VO 3
+#define IWL_TX_FIFO_UNUSED -1
+
+/* Minimum number of queues. MAX_NUM is defined in hw specific files.
+ * Set the minimum to accommodate the 4 standard TX queues, 1 command
+ * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
+#define IWL_MIN_NUM_QUEUES 10
+
+#define IWL_DEFAULT_CMD_QUEUE_NUM 4
+
+#define IEEE80211_DATA_LEN 2304
+#define IEEE80211_4ADDR_LEN 30
+#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+struct iwl_frame {
+ union {
+ struct ieee80211_hdr frame;
+ struct iwl_tx_beacon_cmd beacon;
+ u8 raw[IEEE80211_FRAME_LEN];
+ u8 cmd[360];
+ } u;
+ struct list_head list;
+};
+
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+
+enum {
+ CMD_SYNC = 0,
+ CMD_SIZE_NORMAL = 0,
+ CMD_NO_SKB = 0,
+ CMD_SIZE_HUGE = (1 << 0),
+ CMD_ASYNC = (1 << 1),
+ CMD_WANT_SKB = (1 << 2),
+};
+
+#define DEF_CMD_PAYLOAD_SIZE 320
+
+/**
+ * struct iwl_device_cmd
+ *
+ * For allocation of the command and tx queues, this establishes the overall
+ * size of the largest command we send to uCode, except for a scan command
+ * (which is relatively huge; space is allocated separately).
+ */
+struct iwl_device_cmd {
+ struct iwl_cmd_header hdr; /* uCode API */
+ union {
+ u32 flags;
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ struct iwl_tx_cmd tx;
+ u8 payload[DEF_CMD_PAYLOAD_SIZE];
+ } __packed cmd;
+} __packed;
+
+#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
+
+
+struct iwl_host_cmd {
+ const void *data;
+ unsigned long reply_page;
+ void (*callback)(struct iwl_priv *priv,
+ struct iwl_device_cmd *cmd,
+ struct iwl_rx_packet *pkt);
+ u32 flags;
+ u16 len;
+ u8 id;
+};
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
+
+/**
+ * struct iwl_rx_queue - Rx queue
+ * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
+ * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @read: Shared index to newest available Rx buffer
+ * @write: Shared index to oldest written Rx packet
+ * @free_count: Number of pre-allocated buffers in rx_free
+ * @rx_free: list of free SKBs for use
+ * @rx_used: List of Rx buffers with no SKB
+ * @need_update: flag to indicate we need to update read/write index
+ * @rb_stts: driver's pointer to receive buffer status
+ * @rb_stts_dma: bus address of receive buffer status
+ *
+ * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
+ */
+struct iwl_rx_queue {
+ __le32 *bd;
+ dma_addr_t bd_dma;
+ struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
+ struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+ u32 read;
+ u32 write;
+ u32 free_count;
+ u32 write_actual;
+ struct list_head rx_free;
+ struct list_head rx_used;
+ int need_update;
+ struct iwl_rb_status *rb_stts;
+ dma_addr_t rb_stts_dma;
+ spinlock_t lock;
+};
+
+#define IWL_SUPPORTED_RATES_IE_LEN 8
+
+#define MAX_TID_COUNT 9
+
+#define IWL_INVALID_RATE 0xFF
+#define IWL_INVALID_VALUE -1
+
+/**
+ * struct iwl_ht_agg -- aggregation status while waiting for block-ack
+ * @txq_id: Tx queue used for Tx attempt
+ * @frame_count: # frames attempted by Tx command
+ * @wait_for_ba: Expect block-ack before next Tx reply
+ * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
+ * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
+ * @bitmap1: High order, one bit for each frame pending ACK in Tx window
+ * @rate_n_flags: Rate at which Tx was attempted
+ *
+ * If REPLY_TX indicates that aggregation was attempted, driver must wait
+ * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
+ * until block ack arrives.
+ */
+struct iwl_ht_agg {
+ u16 txq_id;
+ u16 frame_count;
+ u16 wait_for_ba;
+ u16 start_idx;
+ u64 bitmap;
+ u32 rate_n_flags;
+#define IWL_AGG_OFF 0
+#define IWL_AGG_ON 1
+#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
+#define IWL_EMPTYING_HW_QUEUE_DELBA 3
+ u8 state;
+};
+
+
+struct iwl_tid_data {
+ u16 seq_number; /* 4965 only */
+ u16 tfds_in_queue;
+ struct iwl_ht_agg agg;
+};
+
+struct iwl_hw_key {
+ u32 cipher;
+ int keylen;
+ u8 keyidx;
+ u8 key[32];
+};
+
+union iwl_ht_rate_supp {
+ u16 rates;
+ struct {
+ u8 siso_rate;
+ u8 mimo_rate;
+ };
+};
+
+#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
+#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
+#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
+#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
+#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
+#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
+#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
+
+/*
+ * Maximal MPDU density for TX aggregation
+ * 4 - 2us density
+ * 5 - 4us density
+ * 6 - 8us density
+ * 7 - 16us density
+ */
+#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
+#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
+#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
+#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
+#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
+#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
+#define CFG_HT_MPDU_DENSITY_MIN (0x1)
+
+struct iwl_ht_config {
+ bool single_chain_sufficient;
+ enum ieee80211_smps_mode smps; /* current smps mode */
+};
+
+/* QoS structures */
+struct iwl_qos_info {
+ int qos_active;
+ struct iwl_qosparam_cmd def_qos_parm;
+};
+
+/*
+ * Structure should be accessed with sta_lock held. When station addition
+ * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
+ * the commands (iwl_legacy_addsta_cmd and iwl_link_quality_cmd) without
+ * sta_lock held.
+ */
+struct iwl_station_entry {
+ struct iwl_legacy_addsta_cmd sta;
+ struct iwl_tid_data tid[MAX_TID_COUNT];
+ u8 used, ctxid;
+ struct iwl_hw_key keyinfo;
+ struct iwl_link_quality_cmd *lq;
+};
+
+struct iwl_station_priv_common {
+ struct iwl_rxon_context *ctx;
+ u8 sta_id;
+};
+
+/*
+ * iwl_station_priv: Driver's private station information
+ *
+ * When mac80211 creates a station it reserves some space (hw->sta_data_size)
+ * in the structure for use by driver. This structure is places in that
+ * space.
+ *
+ * The common struct MUST be first because it is shared between
+ * 3945 and 4965!
+ */
+struct iwl_station_priv {
+ struct iwl_station_priv_common common;
+ struct iwl_lq_sta lq_sta;
+ atomic_t pending_frames;
+ bool client;
+ bool asleep;
+};
+
+/**
+ * struct iwl_vif_priv - driver's private per-interface information
+ *
+ * When mac80211 allocates a virtual interface, it can allocate
+ * space for us to put data into.
+ */
+struct iwl_vif_priv {
+ struct iwl_rxon_context *ctx;
+ u8 ibss_bssid_sta_id;
+};
+
+/* one for each uCode image (inst/data, boot/init/runtime) */
+struct fw_desc {
+ void *v_addr; /* access by driver */
+ dma_addr_t p_addr; /* access by card's busmaster DMA */
+ u32 len; /* bytes */
+};
+
+/* uCode file layout */
+struct iwl_ucode_header {
+ __le32 ver; /* major/minor/API/serial */
+ struct {
+ __le32 inst_size; /* bytes of runtime code */
+ __le32 data_size; /* bytes of runtime data */
+ __le32 init_size; /* bytes of init code */
+ __le32 init_data_size; /* bytes of init data */
+ __le32 boot_size; /* bytes of bootstrap code */
+ u8 data[0]; /* in same order as sizes */
+ } v1;
+};
+
+struct iwl4965_ibss_seq {
+ u8 mac[ETH_ALEN];
+ u16 seq_num;
+ u16 frag_num;
+ unsigned long packet_time;
+ struct list_head list;
+};
+
+struct iwl_sensitivity_ranges {
+ u16 min_nrg_cck;
+ u16 max_nrg_cck;
+
+ u16 nrg_th_cck;
+ u16 nrg_th_ofdm;
+
+ u16 auto_corr_min_ofdm;
+ u16 auto_corr_min_ofdm_mrc;
+ u16 auto_corr_min_ofdm_x1;
+ u16 auto_corr_min_ofdm_mrc_x1;
+
+ u16 auto_corr_max_ofdm;
+ u16 auto_corr_max_ofdm_mrc;
+ u16 auto_corr_max_ofdm_x1;
+ u16 auto_corr_max_ofdm_mrc_x1;
+
+ u16 auto_corr_max_cck;
+ u16 auto_corr_max_cck_mrc;
+ u16 auto_corr_min_cck;
+ u16 auto_corr_min_cck_mrc;
+
+ u16 barker_corr_th_min;
+ u16 barker_corr_th_min_mrc;
+ u16 nrg_th_cca;
+};
+
+
+#define KELVIN_TO_CELSIUS(x) ((x)-273)
+#define CELSIUS_TO_KELVIN(x) ((x)+273)
+
+
+/**
+ * struct iwl_hw_params
+ * @max_txq_num: Max # Tx queues supported
+ * @dma_chnl_num: Number of Tx DMA/FIFO channels
+ * @scd_bc_tbls_size: size of scheduler byte count tables
+ * @tfd_size: TFD size
+ * @tx/rx_chains_num: Number of TX/RX chains
+ * @valid_tx/rx_ant: usable antennas
+ * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
+ * @max_rxq_log: Log-base-2 of max_rxq_size
+ * @rx_page_order: Rx buffer page order
+ * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
+ * @max_stations:
+ * @ht40_channel: is 40MHz width possible in band 2.4
+ * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
+ * @sw_crypto: 0 for hw, 1 for sw
+ * @max_xxx_size: for ucode uses
+ * @ct_kill_threshold: temperature threshold
+ * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
+ * @struct iwl_sensitivity_ranges: range of sensitivity values
+ */
+struct iwl_hw_params {
+ u8 max_txq_num;
+ u8 dma_chnl_num;
+ u16 scd_bc_tbls_size;
+ u32 tfd_size;
+ u8 tx_chains_num;
+ u8 rx_chains_num;
+ u8 valid_tx_ant;
+ u8 valid_rx_ant;
+ u16 max_rxq_size;
+ u16 max_rxq_log;
+ u32 rx_page_order;
+ u32 rx_wrt_ptr_reg;
+ u8 max_stations;
+ u8 ht40_channel;
+ u8 max_beacon_itrvl; /* in 1024 ms */
+ u32 max_inst_size;
+ u32 max_data_size;
+ u32 max_bsm_size;
+ u32 ct_kill_threshold; /* value in hw-dependent units */
+ u16 beacon_time_tsf_bits;
+ const struct iwl_sensitivity_ranges *sens;
+};
+
+
+/******************************************************************************
+ *
+ * Functions implemented in core module which are forward declared here
+ * for use by iwl-[4-5].c
+ *
+ * NOTE: The implementation of these functions are not hardware specific
+ * which is why they are in the core module files.
+ *
+ * Naming convention --
+ * iwl_ <-- Is part of iwlwifi
+ * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
+ * iwl4965_bg_ <-- Called from work queue context
+ * iwl4965_mac_ <-- mac80211 callback
+ *
+ ****************************************************************************/
+extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
+extern const u8 iwlegacy_bcast_addr[ETH_ALEN];
+extern int iwl_legacy_queue_space(const struct iwl_queue *q);
+static inline int iwl_legacy_queue_used(const struct iwl_queue *q, int i)
+{
+ return q->write_ptr >= q->read_ptr ?
+ (i >= q->read_ptr && i < q->write_ptr) :
+ !(i < q->read_ptr && i >= q->write_ptr);
+}
+
+
+static inline u8 iwl_legacy_get_cmd_index(struct iwl_queue *q, u32 index,
+ int is_huge)
+{
+ /*
+ * This is for init calibration result and scan command which
+ * required buffer > TFD_MAX_PAYLOAD_SIZE,
+ * the big buffer at end of command array
+ */
+ if (is_huge)
+ return q->n_window; /* must be power of 2 */
+
+ /* Otherwise, use normal size buffers */
+ return index & (q->n_window - 1);
+}
+
+
+struct iwl_dma_ptr {
+ dma_addr_t dma;
+ void *addr;
+ size_t size;
+};
+
+#define IWL_OPERATION_MODE_AUTO 0
+#define IWL_OPERATION_MODE_HT_ONLY 1
+#define IWL_OPERATION_MODE_MIXED 2
+#define IWL_OPERATION_MODE_20MHZ 3
+
+#define IWL_TX_CRC_SIZE 4
+#define IWL_TX_DELIMITER_SIZE 4
+
+#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
+
+/* Sensitivity and chain noise calibration */
+#define INITIALIZATION_VALUE 0xFFFF
+#define IWL4965_CAL_NUM_BEACONS 20
+#define IWL_CAL_NUM_BEACONS 16
+#define MAXIMUM_ALLOWED_PATHLOSS 15
+
+#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
+
+#define MAX_FA_OFDM 50
+#define MIN_FA_OFDM 5
+#define MAX_FA_CCK 50
+#define MIN_FA_CCK 5
+
+#define AUTO_CORR_STEP_OFDM 1
+
+#define AUTO_CORR_STEP_CCK 3
+#define AUTO_CORR_MAX_TH_CCK 160
+
+#define NRG_DIFF 2
+#define NRG_STEP_CCK 2
+#define NRG_MARGIN 8
+#define MAX_NUMBER_CCK_NO_FA 100
+
+#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
+
+#define CHAIN_A 0
+#define CHAIN_B 1
+#define CHAIN_C 2
+#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
+#define ALL_BAND_FILTER 0xFF00
+#define IN_BAND_FILTER 0xFF
+#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
+
+#define NRG_NUM_PREV_STAT_L 20
+#define NUM_RX_CHAINS 3
+
+enum iwl4965_false_alarm_state {
+ IWL_FA_TOO_MANY = 0,
+ IWL_FA_TOO_FEW = 1,
+ IWL_FA_GOOD_RANGE = 2,
+};
+
+enum iwl4965_chain_noise_state {
+ IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
+ IWL_CHAIN_NOISE_ACCUMULATE,
+ IWL_CHAIN_NOISE_CALIBRATED,
+ IWL_CHAIN_NOISE_DONE,
+};
+
+enum iwl4965_calib_enabled_state {
+ IWL_CALIB_DISABLED = 0, /* must be 0 */
+ IWL_CALIB_ENABLED = 1,
+};
+
+/*
+ * enum iwl_calib
+ * defines the order in which results of initial calibrations
+ * should be sent to the runtime uCode
+ */
+enum iwl_calib {
+ IWL_CALIB_MAX,
+};
+
+/* Opaque calibration results */
+struct iwl_calib_result {
+ void *buf;
+ size_t buf_len;
+};
+
+enum ucode_type {
+ UCODE_NONE = 0,
+ UCODE_INIT,
+ UCODE_RT
+};
+
+/* Sensitivity calib data */
+struct iwl_sensitivity_data {
+ u32 auto_corr_ofdm;
+ u32 auto_corr_ofdm_mrc;
+ u32 auto_corr_ofdm_x1;
+ u32 auto_corr_ofdm_mrc_x1;
+ u32 auto_corr_cck;
+ u32 auto_corr_cck_mrc;
+
+ u32 last_bad_plcp_cnt_ofdm;
+ u32 last_fa_cnt_ofdm;
+ u32 last_bad_plcp_cnt_cck;
+ u32 last_fa_cnt_cck;
+
+ u32 nrg_curr_state;
+ u32 nrg_prev_state;
+ u32 nrg_value[10];
+ u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
+ u32 nrg_silence_ref;
+ u32 nrg_energy_idx;
+ u32 nrg_silence_idx;
+ u32 nrg_th_cck;
+ s32 nrg_auto_corr_silence_diff;
+ u32 num_in_cck_no_fa;
+ u32 nrg_th_ofdm;
+
+ u16 barker_corr_th_min;
+ u16 barker_corr_th_min_mrc;
+ u16 nrg_th_cca;
+};
+
+/* Chain noise (differential Rx gain) calib data */
+struct iwl_chain_noise_data {
+ u32 active_chains;
+ u32 chain_noise_a;
+ u32 chain_noise_b;
+ u32 chain_noise_c;
+ u32 chain_signal_a;
+ u32 chain_signal_b;
+ u32 chain_signal_c;
+ u16 beacon_count;
+ u8 disconn_array[NUM_RX_CHAINS];
+ u8 delta_gain_code[NUM_RX_CHAINS];
+ u8 radio_write;
+ u8 state;
+};
+
+#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
+#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
+
+#define IWL_TRAFFIC_ENTRIES (256)
+#define IWL_TRAFFIC_ENTRY_SIZE (64)
+
+enum {
+ MEASUREMENT_READY = (1 << 0),
+ MEASUREMENT_ACTIVE = (1 << 1),
+};
+
+/* interrupt statistics */
+struct isr_statistics {
+ u32 hw;
+ u32 sw;
+ u32 err_code;
+ u32 sch;
+ u32 alive;
+ u32 rfkill;
+ u32 ctkill;
+ u32 wakeup;
+ u32 rx;
+ u32 rx_handlers[REPLY_MAX];
+ u32 tx;
+ u32 unhandled;
+};
+
+/* management statistics */
+enum iwl_mgmt_stats {
+ MANAGEMENT_ASSOC_REQ = 0,
+ MANAGEMENT_ASSOC_RESP,
+ MANAGEMENT_REASSOC_REQ,
+ MANAGEMENT_REASSOC_RESP,
+ MANAGEMENT_PROBE_REQ,
+ MANAGEMENT_PROBE_RESP,
+ MANAGEMENT_BEACON,
+ MANAGEMENT_ATIM,
+ MANAGEMENT_DISASSOC,
+ MANAGEMENT_AUTH,
+ MANAGEMENT_DEAUTH,
+ MANAGEMENT_ACTION,
+ MANAGEMENT_MAX,
+};
+/* control statistics */
+enum iwl_ctrl_stats {
+ CONTROL_BACK_REQ = 0,
+ CONTROL_BACK,
+ CONTROL_PSPOLL,
+ CONTROL_RTS,
+ CONTROL_CTS,
+ CONTROL_ACK,
+ CONTROL_CFEND,
+ CONTROL_CFENDACK,
+ CONTROL_MAX,
+};
+
+struct traffic_stats {
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+ u32 mgmt[MANAGEMENT_MAX];
+ u32 ctrl[CONTROL_MAX];
+ u32 data_cnt;
+ u64 data_bytes;
+#endif
+};
+
+/*
+ * iwl_switch_rxon: "channel switch" structure
+ *
+ * @ switch_in_progress: channel switch in progress
+ * @ channel: new channel
+ */
+struct iwl_switch_rxon {
+ bool switch_in_progress;
+ __le16 channel;
+};
+
+/*
+ * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
+ * to perform continuous uCode event logging operation if enabled
+ */
+#define UCODE_TRACE_PERIOD (100)
+
+/*
+ * iwl_event_log: current uCode event log position
+ *
+ * @ucode_trace: enable/disable ucode continuous trace timer
+ * @num_wraps: how many times the event buffer wraps
+ * @next_entry: the entry just before the next one that uCode would fill
+ * @non_wraps_count: counter for no wrap detected when dump ucode events
+ * @wraps_once_count: counter for wrap once detected when dump ucode events
+ * @wraps_more_count: counter for wrap more than once detected
+ * when dump ucode events
+ */
+struct iwl_event_log {
+ bool ucode_trace;
+ u32 num_wraps;
+ u32 next_entry;
+ int non_wraps_count;
+ int wraps_once_count;
+ int wraps_more_count;
+};
+
+/*
+ * host interrupt timeout value
+ * used with setting interrupt coalescing timer
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+ * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
+ */
+#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
+#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
+#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
+#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
+#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
+#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
+
+/*
+ * This is the threshold value of plcp error rate per 100mSecs. It is
+ * used to set and check for the validity of plcp_delta.
+ */
+#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (1)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50)
+#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100)
+#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE (0)
+
+#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
+#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
+
+/* TX queue watchdog timeouts in mSecs */
+#define IWL_DEF_WD_TIMEOUT (2000)
+#define IWL_LONG_WD_TIMEOUT (10000)
+#define IWL_MAX_WD_TIMEOUT (120000)
+
+enum iwl_reset {
+ IWL_RF_RESET = 0,
+ IWL_FW_RESET,
+ IWL_MAX_FORCE_RESET,
+};
+
+struct iwl_force_reset {
+ int reset_request_count;
+ int reset_success_count;
+ int reset_reject_count;
+ unsigned long reset_duration;
+ unsigned long last_force_reset_jiffies;
+};
+
+/* extend beacon time format bit shifting */
+/*
+ * for _3945 devices
+ * bits 31:24 - extended
+ * bits 23:0 - interval
+ */
+#define IWL3945_EXT_BEACON_TIME_POS 24
+/*
+ * for _4965 devices
+ * bits 31:22 - extended
+ * bits 21:0 - interval
+ */
+#define IWL4965_EXT_BEACON_TIME_POS 22
+
+enum iwl_rxon_context_id {
+ IWL_RXON_CTX_BSS,
+
+ NUM_IWL_RXON_CTX
+};
+
+struct iwl_rxon_context {
+ struct ieee80211_vif *vif;
+
+ const u8 *ac_to_fifo;
+ const u8 *ac_to_queue;
+ u8 mcast_queue;
+
+ /*
+ * We could use the vif to indicate active, but we
+ * also need it to be active during disabling when
+ * we already removed the vif for type setting.
+ */
+ bool always_active, is_active;
+
+ bool ht_need_multiple_chains;
+
+ enum iwl_rxon_context_id ctxid;
+
+ u32 interface_modes, exclusive_interface_modes;
+ u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
+
+ /*
+ * We declare this const so it can only be
+ * changed via explicit cast within the
+ * routines that actually update the physical
+ * hardware.
+ */
+ const struct iwl_legacy_rxon_cmd active;
+ struct iwl_legacy_rxon_cmd staging;
+
+ struct iwl_rxon_time_cmd timing;
+
+ struct iwl_qos_info qos_data;
+
+ u8 bcast_sta_id, ap_sta_id;
+
+ u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
+ u8 qos_cmd;
+ u8 wep_key_cmd;
+
+ struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
+ u8 key_mapping_keys;
+
+ __le32 station_flags;
+
+ struct {
+ bool non_gf_sta_present;
+ u8 protection;
+ bool enabled, is_40mhz;
+ u8 extension_chan_offset;
+ } ht;
+};
+
+struct iwl_priv {
+
+ /* ieee device used by generic ieee processing code */
+ struct ieee80211_hw *hw;
+ struct ieee80211_channel *ieee_channels;
+ struct ieee80211_rate *ieee_rates;
+ struct iwl_cfg *cfg;
+
+ /* temporary frame storage list */
+ struct list_head free_frames;
+ int frames_count;
+
+ enum ieee80211_band band;
+ int alloc_rxb_page;
+
+ void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
+
+ struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+
+ /* spectrum measurement report caching */
+ struct iwl_spectrum_notification measure_report;
+ u8 measurement_status;
+
+ /* ucode beacon time */
+ u32 ucode_beacon_time;
+ int missed_beacon_threshold;
+
+ /* track IBSS manager (last beacon) status */
+ u32 ibss_manager;
+
+ /* storing the jiffies when the plcp error rate is received */
+ unsigned long plcp_jiffies;
+
+ /* force reset */
+ struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
+
+ /* we allocate array of iwl_channel_info for NIC's valid channels.
+ * Access via channel # using indirect index array */
+ struct iwl_channel_info *channel_info; /* channel info array */
+ u8 channel_count; /* # of channels */
+
+ /* thermal calibration */
+ s32 temperature; /* degrees Kelvin */
+ s32 last_temperature;
+
+ /* init calibration results */
+ struct iwl_calib_result calib_results[IWL_CALIB_MAX];
+
+ /* Scan related variables */
+ unsigned long scan_start;
+ unsigned long scan_start_tsf;
+ void *scan_cmd;
+ enum ieee80211_band scan_band;
+ struct cfg80211_scan_request *scan_request;
+ struct ieee80211_vif *scan_vif;
+ bool is_internal_short_scan;
+ u8 scan_tx_ant[IEEE80211_NUM_BANDS];
+ u8 mgmt_tx_ant;
+
+ /* spinlock */
+ spinlock_t lock; /* protect general shared data */
+ spinlock_t hcmd_lock; /* protect hcmd */
+ spinlock_t reg_lock; /* protect hw register access */
+ struct mutex mutex;
+ struct mutex sync_cmd_mutex; /* enable serialization of sync commands */
+
+ /* basic pci-network driver stuff */
+ struct pci_dev *pci_dev;
+
+ /* pci hardware address support */
+ void __iomem *hw_base;
+ u32 hw_rev;
+ u32 hw_wa_rev;
+ u8 rev_id;
+
+ /* microcode/device supports multiple contexts */
+ u8 valid_contexts;
+
+ /* command queue number */
+ u8 cmd_queue;
+
+ /* max number of station keys */
+ u8 sta_key_max_num;
+
+ /* EEPROM MAC addresses */
+ struct mac_address addresses[1];
+
+ /* uCode images, save to reload in case of failure */
+ int fw_index; /* firmware we're trying to load */
+ u32 ucode_ver; /* version of ucode, copy of
+ iwl_ucode.ver */
+ struct fw_desc ucode_code; /* runtime inst */
+ struct fw_desc ucode_data; /* runtime data original */
+ struct fw_desc ucode_data_backup; /* runtime data save/restore */
+ struct fw_desc ucode_init; /* initialization inst */
+ struct fw_desc ucode_init_data; /* initialization data */
+ struct fw_desc ucode_boot; /* bootstrap inst */
+ enum ucode_type ucode_type;
+ u8 ucode_write_complete; /* the image write is complete */
+ char firmware_name[25];
+
+ struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
+
+ struct iwl_switch_rxon switch_rxon;
+
+ /* 1st responses from initialize and runtime uCode images.
+ * _4965's initialize alive response contains some calibration data. */
+ struct iwl_init_alive_resp card_alive_init;
+ struct iwl_alive_resp card_alive;
+
+ u16 active_rate;
+
+ u8 start_calib;
+ struct iwl_sensitivity_data sensitivity_data;
+ struct iwl_chain_noise_data chain_noise_data;
+ __le16 sensitivity_tbl[HD_TABLE_SIZE];
+
+ struct iwl_ht_config current_ht_config;
+
+ /* Rate scaling data */
+ u8 retry_rate;
+
+ wait_queue_head_t wait_command_queue;
+
+ int activity_timer_active;
+
+ /* Rx and Tx DMA processing queues */
+ struct iwl_rx_queue rxq;
+ struct iwl_tx_queue *txq;
+ unsigned long txq_ctx_active_msk;
+ struct iwl_dma_ptr kw; /* keep warm address */
+ struct iwl_dma_ptr scd_bc_tbls;
+
+ u32 scd_base_addr; /* scheduler sram base address */
+
+ unsigned long status;
+
+ /* counts mgmt, ctl, and data packets */
+ struct traffic_stats tx_stats;
+ struct traffic_stats rx_stats;
+
+ /* counts interrupts */
+ struct isr_statistics isr_stats;
+
+ struct iwl_power_mgr power_data;
+
+ /* context information */
+ u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
+
+ /* station table variables */
+
+ /* Note: if lock and sta_lock are needed, lock must be acquired first */
+ spinlock_t sta_lock;
+ int num_stations;
+ struct iwl_station_entry stations[IWL_STATION_COUNT];
+ unsigned long ucode_key_table;
+
+ /* queue refcounts */
+#define IWL_MAX_HW_QUEUES 32
+ unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
+ /* for each AC */
+ atomic_t queue_stop_count[4];
+
+ /* Indication if ieee80211_ops->open has been called */
+ u8 is_open;
+
+ u8 mac80211_registered;
+
+ /* eeprom -- this is in the card's little endian byte order */
+ u8 *eeprom;
+ struct iwl_eeprom_calib_info *calib_info;
+
+ enum nl80211_iftype iw_mode;
+
+ /* Last Rx'd beacon timestamp */
+ u64 timestamp;
+
+ union {
+#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
+ struct {
+ void *shared_virt;
+ dma_addr_t shared_phys;
+
+ struct delayed_work thermal_periodic;
+ struct delayed_work rfkill_poll;
+
+ struct iwl3945_notif_statistics statistics;
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+ struct iwl3945_notif_statistics accum_statistics;
+ struct iwl3945_notif_statistics delta_statistics;
+ struct iwl3945_notif_statistics max_delta;
+#endif
+
+ u32 sta_supp_rates;
+ int last_rx_rssi; /* From Rx packet statistics */
+
+ /* Rx'd packet timing information */
+ u32 last_beacon_time;
+ u64 last_tsf;
+
+ /*
+ * each calibration channel group in the
+ * EEPROM has a derived clip setting for
+ * each rate.
+ */
+ const struct iwl3945_clip_group clip_groups[5];
+
+ } _3945;
+#endif
+#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
+ struct {
+ /*
+ * reporting the number of tids has AGG on. 0 means
+ * no AGGREGATION
+ */
+ u8 agg_tids_count;
+
+ struct iwl_rx_phy_res last_phy_res;
+ bool last_phy_res_valid;
+
+ struct completion firmware_loading_complete;
+
+ /*
+ * chain noise reset and gain commands are the
+ * two extra calibration commands follows the standard
+ * phy calibration commands
+ */
+ u8 phy_calib_chain_noise_reset_cmd;
+ u8 phy_calib_chain_noise_gain_cmd;
+
+ struct iwl_notif_statistics statistics;
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+ struct iwl_notif_statistics accum_statistics;
+ struct iwl_notif_statistics delta_statistics;
+ struct iwl_notif_statistics max_delta;
+#endif
+
+ } _4965;
+#endif
+ };
+
+ struct iwl_hw_params hw_params;
+
+ u32 inta_mask;
+
+ struct workqueue_struct *workqueue;
+
+ struct work_struct restart;
+ struct work_struct scan_completed;
+ struct work_struct rx_replenish;
+ struct work_struct abort_scan;
+
+ struct iwl_rxon_context *beacon_ctx;
+ struct sk_buff *beacon_skb;
+
+ struct work_struct start_internal_scan;
+ struct work_struct tx_flush;
+
+ struct tasklet_struct irq_tasklet;
+
+ struct delayed_work init_alive_start;
+ struct delayed_work alive_start;
+ struct delayed_work scan_check;
+
+ /* TX Power */
+ s8 tx_power_user_lmt;
+ s8 tx_power_device_lmt;
+ s8 tx_power_next;
+
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ /* debugging info */
+ u32 debug_level; /* per device debugging will override global
+ iwlegacy_debug_level if set */
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
+ /* debugfs */
+ u16 tx_traffic_idx;
+ u16 rx_traffic_idx;
+ u8 *tx_traffic;
+ u8 *rx_traffic;
+ struct dentry *debugfs_dir;
+ u32 dbgfs_sram_offset, dbgfs_sram_len;
+ bool disable_ht40;
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
+
+ struct work_struct txpower_work;
+ u32 disable_sens_cal;
+ u32 disable_chain_noise_cal;
+ u32 disable_tx_power_cal;
+ struct work_struct run_time_calib_work;
+ struct timer_list statistics_periodic;
+ struct timer_list ucode_trace;
+ struct timer_list watchdog;
+ bool hw_ready;
+
+ struct iwl_event_log event_log;
+
+ struct led_classdev led;
+ unsigned long blink_on, blink_off;
+ bool led_registered;
+}; /*iwl_priv */
+
+static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
+{
+ set_bit(txq_id, &priv->txq_ctx_active_msk);
+}
+
+static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
+{
+ clear_bit(txq_id, &priv->txq_ctx_active_msk);
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+/*
+ * iwl_legacy_get_debug_level: Return active debug level for device
+ *
+ * Using sysfs it is possible to set per device debug level. This debug
+ * level will be used if set, otherwise the global debug level which can be
+ * set via module parameter is used.
+ */
+static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
+{
+ if (priv->debug_level)
+ return priv->debug_level;
+ else
+ return iwlegacy_debug_level;
+}
+#else
+static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
+{
+ return iwlegacy_debug_level;
+}
+#endif
+
+
+static inline struct ieee80211_hdr *
+iwl_legacy_tx_queue_get_hdr(struct iwl_priv *priv,
+ int txq_id, int idx)
+{
+ if (priv->txq[txq_id].txb[idx].skb)
+ return (struct ieee80211_hdr *)priv->txq[txq_id].
+ txb[idx].skb->data;
+ return NULL;
+}
+
+static inline struct iwl_rxon_context *
+iwl_legacy_rxon_ctx_from_vif(struct ieee80211_vif *vif)
+{
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+ return vif_priv->ctx;
+}
+
+#define for_each_context(priv, ctx) \
+ for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
+ ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
+ if (priv->valid_contexts & BIT(ctx->ctxid))
+
+static inline int iwl_legacy_is_associated(struct iwl_priv *priv,
+ enum iwl_rxon_context_id ctxid)
+{
+ return (priv->contexts[ctxid].active.filter_flags &
+ RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+}
+
+static inline int iwl_legacy_is_any_associated(struct iwl_priv *priv)
+{
+ return iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
+}
+
+static inline int iwl_legacy_is_associated_ctx(struct iwl_rxon_context *ctx)
+{
+ return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+}
+
+static inline int iwl_legacy_is_channel_valid(const struct iwl_channel_info *ch_info)
+{
+ if (ch_info == NULL)
+ return 0;
+ return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
+}
+
+static inline int iwl_legacy_is_channel_radar(const struct iwl_channel_info *ch_info)
+{
+ return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
+}
+
+static inline u8 iwl_legacy_is_channel_a_band(const struct iwl_channel_info *ch_info)
+{
+ return ch_info->band == IEEE80211_BAND_5GHZ;
+}
+
+static inline int
+iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch)
+{
+ return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
+}
+
+static inline void
+__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
+{
+ __free_pages(page, priv->hw_params.rx_page_order);
+ priv->alloc_rxb_page--;
+}
+
+static inline void iwl_legacy_free_pages(struct iwl_priv *priv, unsigned long page)
+{
+ free_pages(page, priv->hw_params.rx_page_order);
+ priv->alloc_rxb_page--;
+}
+#endif /* __iwl_legacy_dev_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
new file mode 100644
index 000000000000..080b852b33bd
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
@@ -0,0 +1,45 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+
+/* sparse doesn't like tracepoint macros */
+#ifndef __CHECKER__
+#include "iwl-dev.h"
+
+#define CREATE_TRACE_POINTS
+#include "iwl-devtrace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_event);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_cont_event);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_wrap_event);
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
new file mode 100644
index 000000000000..9612aa0f6ec4
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
@@ -0,0 +1,270 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#define __IWLWIFI_LEGACY_DEVICE_TRACE
+
+#include <linux/tracepoint.h>
+
+#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif
+
+
+#define PRIV_ENTRY __field(struct iwl_priv *, priv)
+#define PRIV_ASSIGN (__entry->priv = priv)
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_legacy_io
+
+TRACE_EVENT(iwlwifi_legacy_dev_ioread32,
+ TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
+ TP_ARGS(priv, offs, val),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __field(u32, offs)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%p] read io[%#x] = %#x", __entry->priv,
+ __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_iowrite8,
+ TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
+ TP_ARGS(priv, offs, val),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __field(u32, offs)
+ __field(u8, val)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
+ __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_iowrite32,
+ TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
+ TP_ARGS(priv, offs, val),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __field(u32, offs)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
+ __entry->offs, __entry->val)
+);
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_legacy_ucode
+
+TRACE_EVENT(iwlwifi_legacy_dev_ucode_cont_event,
+ TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+ TP_ARGS(priv, time, data, ev),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+
+ __field(u32, time)
+ __field(u32, data)
+ __field(u32, ev)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->time = time;
+ __entry->data = data;
+ __entry->ev = ev;
+ ),
+ TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
+ __entry->priv, __entry->time, __entry->data, __entry->ev)
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_ucode_wrap_event,
+ TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry),
+ TP_ARGS(priv, wraps, n_entry, p_entry),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+
+ __field(u32, wraps)
+ __field(u32, n_entry)
+ __field(u32, p_entry)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->wraps = wraps;
+ __entry->n_entry = n_entry;
+ __entry->p_entry = p_entry;
+ ),
+ TP_printk("[%p] wraps=#%02d n=0x%X p=0x%X",
+ __entry->priv, __entry->wraps, __entry->n_entry,
+ __entry->p_entry)
+);
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi
+
+TRACE_EVENT(iwlwifi_legacy_dev_hcmd,
+ TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
+ TP_ARGS(priv, hcmd, len, flags),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __dynamic_array(u8, hcmd, len)
+ __field(u32, flags)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ memcpy(__get_dynamic_array(hcmd), hcmd, len);
+ __entry->flags = flags;
+ ),
+ TP_printk("[%p] hcmd %#.2x (%ssync)",
+ __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
+ __entry->flags & CMD_ASYNC ? "a" : "")
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_rx,
+ TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
+ TP_ARGS(priv, rxbuf, len),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __dynamic_array(u8, rxbuf, len)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
+ ),
+ TP_printk("[%p] RX cmd %#.2x",
+ __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_tx,
+ TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
+ void *buf0, size_t buf0_len,
+ void *buf1, size_t buf1_len),
+ TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+
+ __field(size_t, framelen)
+ __dynamic_array(u8, tfd, tfdlen)
+
+ /*
+ * Do not insert between or below these items,
+ * we want to keep the frame together (except
+ * for the possible padding).
+ */
+ __dynamic_array(u8, buf0, buf0_len)
+ __dynamic_array(u8, buf1, buf1_len)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->framelen = buf0_len + buf1_len;
+ memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
+ memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
+ memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
+ ),
+ TP_printk("[%p] TX %.2x (%zu bytes)",
+ __entry->priv,
+ ((u8 *)__get_dynamic_array(buf0))[0],
+ __entry->framelen)
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_ucode_error,
+ TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
+ u32 data1, u32 data2, u32 line, u32 blink1,
+ u32 blink2, u32 ilink1, u32 ilink2),
+ TP_ARGS(priv, desc, time, data1, data2, line,
+ blink1, blink2, ilink1, ilink2),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __field(u32, desc)
+ __field(u32, time)
+ __field(u32, data1)
+ __field(u32, data2)
+ __field(u32, line)
+ __field(u32, blink1)
+ __field(u32, blink2)
+ __field(u32, ilink1)
+ __field(u32, ilink2)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->desc = desc;
+ __entry->time = time;
+ __entry->data1 = data1;
+ __entry->data2 = data2;
+ __entry->line = line;
+ __entry->blink1 = blink1;
+ __entry->blink2 = blink2;
+ __entry->ilink1 = ilink1;
+ __entry->ilink2 = ilink2;
+ ),
+ TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
+ "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
+ __entry->priv, __entry->desc, __entry->time, __entry->data1,
+ __entry->data2, __entry->line, __entry->blink1,
+ __entry->blink2, __entry->ilink1, __entry->ilink2)
+);
+
+TRACE_EVENT(iwlwifi_legacy_dev_ucode_event,
+ TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+ TP_ARGS(priv, time, data, ev),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+
+ __field(u32, time)
+ __field(u32, data)
+ __field(u32, ev)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->time = time;
+ __entry->data = data;
+ __entry->ev = ev;
+ ),
+ TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
+ __entry->priv, __entry->time, __entry->data, __entry->ev)
+);
+#endif /* __IWLWIFI_DEVICE_TRACE */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE iwl-devtrace
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
new file mode 100644
index 000000000000..04c5648027df
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
@@ -0,0 +1,561 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <net/mac80211.h>
+
+#include "iwl-commands.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-debug.h"
+#include "iwl-eeprom.h"
+#include "iwl-io.h"
+
+/************************** EEPROM BANDS ****************************
+ *
+ * The iwlegacy_eeprom_band definitions below provide the mapping from the
+ * EEPROM contents to the specific channel number supported for each
+ * band.
+ *
+ * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
+ * definition below maps to physical channel 42 in the 5.2GHz spectrum.
+ * The specific geography and calibration information for that channel
+ * is contained in the eeprom map itself.
+ *
+ * During init, we copy the eeprom information and channel map
+ * information into priv->channel_info_24/52 and priv->channel_map_24/52
+ *
+ * channel_map_24/52 provides the index in the channel_info array for a
+ * given channel. We have to have two separate maps as there is channel
+ * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
+ * band_2
+ *
+ * A value of 0xff stored in the channel_map indicates that the channel
+ * is not supported by the hardware at all.
+ *
+ * A value of 0xfe in the channel_map indicates that the channel is not
+ * valid for Tx with the current hardware. This means that
+ * while the system can tune and receive on a given channel, it may not
+ * be able to associate or transmit any frames on that
+ * channel. There is no corresponding channel information for that
+ * entry.
+ *
+ *********************************************************************/
+
+/* 2.4 GHz */
+const u8 iwlegacy_eeprom_band_1[14] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+};
+
+/* 5.2 GHz bands */
+static const u8 iwlegacy_eeprom_band_2[] = { /* 4915-5080MHz */
+ 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
+};
+
+static const u8 iwlegacy_eeprom_band_3[] = { /* 5170-5320MHz */
+ 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+};
+
+static const u8 iwlegacy_eeprom_band_4[] = { /* 5500-5700MHz */
+ 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+};
+
+static const u8 iwlegacy_eeprom_band_5[] = { /* 5725-5825MHz */
+ 145, 149, 153, 157, 161, 165
+};
+
+static const u8 iwlegacy_eeprom_band_6[] = { /* 2.4 ht40 channel */
+ 1, 2, 3, 4, 5, 6, 7
+};
+
+static const u8 iwlegacy_eeprom_band_7[] = { /* 5.2 ht40 channel */
+ 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
+};
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+******************************************************************************/
+
+static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv)
+{
+ u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+ int ret = 0;
+
+ IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
+ switch (gp) {
+ case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
+ case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
+ break;
+ default:
+ IWL_ERR(priv, "bad EEPROM signature,"
+ "EEPROM_GP=0x%08x\n", gp);
+ ret = -ENOENT;
+ break;
+ }
+ return ret;
+}
+
+const u8
+*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
+{
+ BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
+ return &priv->eeprom[offset];
+}
+EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr);
+
+u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset)
+{
+ if (!priv->eeprom)
+ return 0;
+ return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
+}
+EXPORT_SYMBOL(iwl_legacy_eeprom_query16);
+
+/**
+ * iwl_legacy_eeprom_init - read EEPROM contents
+ *
+ * Load the EEPROM contents from adapter into priv->eeprom
+ *
+ * NOTE: This routine uses the non-debug IO access functions.
+ */
+int iwl_legacy_eeprom_init(struct iwl_priv *priv)
+{
+ __le16 *e;
+ u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
+ int sz;
+ int ret;
+ u16 addr;
+
+ /* allocate eeprom */
+ sz = priv->cfg->base_params->eeprom_size;
+ IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
+ priv->eeprom = kzalloc(sz, GFP_KERNEL);
+ if (!priv->eeprom) {
+ ret = -ENOMEM;
+ goto alloc_err;
+ }
+ e = (__le16 *)priv->eeprom;
+
+ priv->cfg->ops->lib->apm_ops.init(priv);
+
+ ret = iwl_legacy_eeprom_verify_signature(priv);
+ if (ret < 0) {
+ IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
+ ret = -ENOENT;
+ goto err;
+ }
+
+ /* Make sure driver (instead of uCode) is allowed to read EEPROM */
+ ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
+ if (ret < 0) {
+ IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
+ ret = -ENOENT;
+ goto err;
+ }
+
+ /* eeprom is an array of 16bit values */
+ for (addr = 0; addr < sz; addr += sizeof(u16)) {
+ u32 r;
+
+ _iwl_legacy_write32(priv, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+
+ ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ IWL_EEPROM_ACCESS_TIMEOUT);
+ if (ret < 0) {
+ IWL_ERR(priv, "Time out reading EEPROM[%d]\n",
+ addr);
+ goto done;
+ }
+ r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG);
+ e[addr / 2] = cpu_to_le16(r >> 16);
+ }
+
+ IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
+ "EEPROM",
+ iwl_legacy_eeprom_query16(priv, EEPROM_VERSION));
+
+ ret = 0;
+done:
+ priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
+
+err:
+ if (ret)
+ iwl_legacy_eeprom_free(priv);
+ /* Reset chip to save power until we load uCode during "up". */
+ iwl_legacy_apm_stop(priv);
+alloc_err:
+ return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_eeprom_init);
+
+void iwl_legacy_eeprom_free(struct iwl_priv *priv)
+{
+ kfree(priv->eeprom);
+ priv->eeprom = NULL;
+}
+EXPORT_SYMBOL(iwl_legacy_eeprom_free);
+
+static void iwl_legacy_init_band_reference(const struct iwl_priv *priv,
+ int eep_band, int *eeprom_ch_count,
+ const struct iwl_eeprom_channel **eeprom_ch_info,
+ const u8 **eeprom_ch_index)
+{
+ u32 offset = priv->cfg->ops->lib->
+ eeprom_ops.regulatory_bands[eep_band - 1];
+ switch (eep_band) {
+ case 1: /* 2.4GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_1);
+ *eeprom_ch_info = (struct iwl_eeprom_channel *)
+ iwl_legacy_eeprom_query_addr(priv, offset);
+ *eeprom_ch_index = iwlegacy_eeprom_band_1;
+ break;
+ case 2: /* 4.9GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_2);
+ *eeprom_ch_info = (struct iwl_eeprom_channel *)
+ iwl_legacy_eeprom_query_addr(priv, offset);
+ *eeprom_ch_index = iwlegacy_eeprom_band_2;
+ break;
+ case 3: /* 5.2GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_3);
+ *eeprom_ch_info = (struct iwl_eeprom_channel *)
+ iwl_legacy_eeprom_query_addr(priv, offset);
+ *eeprom_ch_index = iwlegacy_eeprom_band_3;
+ break;
+ case 4: /* 5.5GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_4);
+ *eeprom_ch_info = (struct iwl_eeprom_channel *)
+ iwl_legacy_eeprom_query_addr(priv, offset);
+ *eeprom_ch_index = iwlegacy_eeprom_band_4;
+ break;
+ case 5: /* 5.7GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_5);
+ *eeprom_ch_info = (struct iwl_eeprom_channel *)
+ iwl_legacy_eeprom_query_addr(priv, offset);
+ *eeprom_ch_index = iwlegacy_eeprom_band_5;
+ break;
+ case 6: /* 2.4GHz ht40 channels */
+ *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_6);
+ *eeprom_ch_info = (struct iwl_eeprom_channel *)
+ iwl_legacy_eeprom_query_addr(priv, offset);
+ *eeprom_ch_index = iwlegacy_eeprom_band_6;
+ break;
+ case 7: /* 5 GHz ht40 channels */
+ *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_7);
+ *eeprom_ch_info = (struct iwl_eeprom_channel *)
+ iwl_legacy_eeprom_query_addr(priv, offset);
+ *eeprom_ch_index = iwlegacy_eeprom_band_7;
+ break;
+ default:
+ BUG();
+ return;
+ }
+}
+
+#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
+ ? # x " " : "")
+/**
+ * iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
+ *
+ * Does not set up a command, or touch hardware.
+ */
+static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv,
+ enum ieee80211_band band, u16 channel,
+ const struct iwl_eeprom_channel *eeprom_ch,
+ u8 clear_ht40_extension_channel)
+{
+ struct iwl_channel_info *ch_info;
+
+ ch_info = (struct iwl_channel_info *)
+ iwl_legacy_get_channel_info(priv, band, channel);
+
+ if (!iwl_legacy_is_channel_valid(ch_info))
+ return -1;
+
+ IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
+ " Ad-Hoc %ssupported\n",
+ ch_info->channel,
+ iwl_legacy_is_channel_a_band(ch_info) ?
+ "5.2" : "2.4",
+ CHECK_AND_PRINT(IBSS),
+ CHECK_AND_PRINT(ACTIVE),
+ CHECK_AND_PRINT(RADAR),
+ CHECK_AND_PRINT(WIDE),
+ CHECK_AND_PRINT(DFS),
+ eeprom_ch->flags,
+ eeprom_ch->max_power_avg,
+ ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
+ && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
+ "" : "not ");
+
+ ch_info->ht40_eeprom = *eeprom_ch;
+ ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
+ ch_info->ht40_flags = eeprom_ch->flags;
+ if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
+ ch_info->ht40_extension_channel &=
+ ~clear_ht40_extension_channel;
+
+ return 0;
+}
+
+#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
+ ? # x " " : "")
+
+/**
+ * iwl_legacy_init_channel_map - Set up driver's info for all possible channels
+ */
+int iwl_legacy_init_channel_map(struct iwl_priv *priv)
+{
+ int eeprom_ch_count = 0;
+ const u8 *eeprom_ch_index = NULL;
+ const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
+ int band, ch;
+ struct iwl_channel_info *ch_info;
+
+ if (priv->channel_count) {
+ IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
+ return 0;
+ }
+
+ IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
+
+ priv->channel_count =
+ ARRAY_SIZE(iwlegacy_eeprom_band_1) +
+ ARRAY_SIZE(iwlegacy_eeprom_band_2) +
+ ARRAY_SIZE(iwlegacy_eeprom_band_3) +
+ ARRAY_SIZE(iwlegacy_eeprom_band_4) +
+ ARRAY_SIZE(iwlegacy_eeprom_band_5);
+
+ IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
+ priv->channel_count);
+
+ priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
+ priv->channel_count, GFP_KERNEL);
+ if (!priv->channel_info) {
+ IWL_ERR(priv, "Could not allocate channel_info\n");
+ priv->channel_count = 0;
+ return -ENOMEM;
+ }
+
+ ch_info = priv->channel_info;
+
+ /* Loop through the 5 EEPROM bands adding them in order to the
+ * channel map we maintain (that contains additional information than
+ * what just in the EEPROM) */
+ for (band = 1; band <= 5; band++) {
+
+ iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
+ &eeprom_ch_info, &eeprom_ch_index);
+
+ /* Loop through each band adding each of the channels */
+ for (ch = 0; ch < eeprom_ch_count; ch++) {
+ ch_info->channel = eeprom_ch_index[ch];
+ ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
+ IEEE80211_BAND_5GHZ;
+
+ /* permanently store EEPROM's channel regulatory flags
+ * and max power in channel info database. */
+ ch_info->eeprom = eeprom_ch_info[ch];
+
+ /* Copy the run-time flags so they are there even on
+ * invalid channels */
+ ch_info->flags = eeprom_ch_info[ch].flags;
+ /* First write that ht40 is not enabled, and then enable
+ * one by one */
+ ch_info->ht40_extension_channel =
+ IEEE80211_CHAN_NO_HT40;
+
+ if (!(iwl_legacy_is_channel_valid(ch_info))) {
+ IWL_DEBUG_EEPROM(priv,
+ "Ch. %d Flags %x [%sGHz] - "
+ "No traffic\n",
+ ch_info->channel,
+ ch_info->flags,
+ iwl_legacy_is_channel_a_band(ch_info) ?
+ "5.2" : "2.4");
+ ch_info++;
+ continue;
+ }
+
+ /* Initialize regulatory-based run-time data */
+ ch_info->max_power_avg = ch_info->curr_txpow =
+ eeprom_ch_info[ch].max_power_avg;
+ ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
+ ch_info->min_power = 0;
+
+ IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
+ "%s%s%s%s%s%s(0x%02x %ddBm):"
+ " Ad-Hoc %ssupported\n",
+ ch_info->channel,
+ iwl_legacy_is_channel_a_band(ch_info) ?
+ "5.2" : "2.4",
+ CHECK_AND_PRINT_I(VALID),
+ CHECK_AND_PRINT_I(IBSS),
+ CHECK_AND_PRINT_I(ACTIVE),
+ CHECK_AND_PRINT_I(RADAR),
+ CHECK_AND_PRINT_I(WIDE),
+ CHECK_AND_PRINT_I(DFS),
+ eeprom_ch_info[ch].flags,
+ eeprom_ch_info[ch].max_power_avg,
+ ((eeprom_ch_info[ch].
+ flags & EEPROM_CHANNEL_IBSS)
+ && !(eeprom_ch_info[ch].
+ flags & EEPROM_CHANNEL_RADAR))
+ ? "" : "not ");
+
+ /* Set the tx_power_user_lmt to the highest power
+ * supported by any channel */
+ if (eeprom_ch_info[ch].max_power_avg >
+ priv->tx_power_user_lmt)
+ priv->tx_power_user_lmt =
+ eeprom_ch_info[ch].max_power_avg;
+
+ ch_info++;
+ }
+ }
+
+ /* Check if we do have HT40 channels */
+ if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
+ EEPROM_REGULATORY_BAND_NO_HT40 &&
+ priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
+ EEPROM_REGULATORY_BAND_NO_HT40)
+ return 0;
+
+ /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
+ for (band = 6; band <= 7; band++) {
+ enum ieee80211_band ieeeband;
+
+ iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
+ &eeprom_ch_info, &eeprom_ch_index);
+
+ /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
+ ieeeband =
+ (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+
+ /* Loop through each band adding each of the channels */
+ for (ch = 0; ch < eeprom_ch_count; ch++) {
+ /* Set up driver's info for lower half */
+ iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
+ eeprom_ch_index[ch],
+ &eeprom_ch_info[ch],
+ IEEE80211_CHAN_NO_HT40PLUS);
+
+ /* Set up driver's info for upper half */
+ iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
+ eeprom_ch_index[ch] + 4,
+ &eeprom_ch_info[ch],
+ IEEE80211_CHAN_NO_HT40MINUS);
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_init_channel_map);
+
+/*
+ * iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map
+ */
+void iwl_legacy_free_channel_map(struct iwl_priv *priv)
+{
+ kfree(priv->channel_info);
+ priv->channel_count = 0;
+}
+EXPORT_SYMBOL(iwl_legacy_free_channel_map);
+
+/**
+ * iwl_legacy_get_channel_info - Find driver's private channel info
+ *
+ * Based on band and channel number.
+ */
+const struct
+iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv,
+ enum ieee80211_band band, u16 channel)
+{
+ int i;
+
+ switch (band) {
+ case IEEE80211_BAND_5GHZ:
+ for (i = 14; i < priv->channel_count; i++) {
+ if (priv->channel_info[i].channel == channel)
+ return &priv->channel_info[i];
+ }
+ break;
+ case IEEE80211_BAND_2GHZ:
+ if (channel >= 1 && channel <= 14)
+ return &priv->channel_info[channel - 1];
+ break;
+ default:
+ BUG();
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(iwl_legacy_get_channel_info);
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.h b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
new file mode 100644
index 000000000000..c59c81002022
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
@@ -0,0 +1,344 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_eeprom_h__
+#define __iwl_legacy_eeprom_h__
+
+#include <net/mac80211.h>
+
+struct iwl_priv;
+
+/*
+ * EEPROM access time values:
+ *
+ * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
+ * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
+ * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
+ * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
+ */
+#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
+
+#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
+#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
+
+
+/*
+ * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
+ *
+ * IBSS and/or AP operation is allowed *only* on those channels with
+ * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
+ * RADAR detection is not supported by the 4965 driver, but is a
+ * requirement for establishing a new network for legal operation on channels
+ * requiring RADAR detection or restricting ACTIVE scanning.
+ *
+ * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
+ * It only indicates that 20 MHz channel use is supported; HT40 channel
+ * usage is indicated by a separate set of regulatory flags for each
+ * HT40 channel pair.
+ *
+ * NOTE: Using a channel inappropriately will result in a uCode error!
+ */
+#define IWL_NUM_TX_CALIB_GROUPS 5
+enum {
+ EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
+ EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
+ /* Bit 2 Reserved */
+ EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
+ EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
+ EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
+ /* Bit 6 Reserved (was Narrow Channel) */
+ EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
+};
+
+/* SKU Capabilities */
+/* 3945 only */
+#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
+#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
+
+/* *regulatory* channel data format in eeprom, one for each channel.
+ * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
+struct iwl_eeprom_channel {
+ u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
+ s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
+} __packed;
+
+/* 3945 Specific */
+#define EEPROM_3945_EEPROM_VERSION (0x2f)
+
+/* 4965 has two radio transmitters (and 3 radio receivers) */
+#define EEPROM_TX_POWER_TX_CHAINS (2)
+
+/* 4965 has room for up to 8 sets of txpower calibration data */
+#define EEPROM_TX_POWER_BANDS (8)
+
+/* 4965 factory calibration measures txpower gain settings for
+ * each of 3 target output levels */
+#define EEPROM_TX_POWER_MEASUREMENTS (3)
+
+/* 4965 Specific */
+/* 4965 driver does not work with txpower calibration version < 5 */
+#define EEPROM_4965_TX_POWER_VERSION (5)
+#define EEPROM_4965_EEPROM_VERSION (0x2f)
+#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
+#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
+#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
+#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
+
+/* 2.4 GHz */
+extern const u8 iwlegacy_eeprom_band_1[14];
+
+/*
+ * factory calibration data for one txpower level, on one channel,
+ * measured on one of the 2 tx chains (radio transmitter and associated
+ * antenna). EEPROM contains:
+ *
+ * 1) Temperature (degrees Celsius) of device when measurement was made.
+ *
+ * 2) Gain table index used to achieve the target measurement power.
+ * This refers to the "well-known" gain tables (see iwl-4965-hw.h).
+ *
+ * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
+ *
+ * 4) RF power amplifier detector level measurement (not used).
+ */
+struct iwl_eeprom_calib_measure {
+ u8 temperature; /* Device temperature (Celsius) */
+ u8 gain_idx; /* Index into gain table */
+ u8 actual_pow; /* Measured RF output power, half-dBm */
+ s8 pa_det; /* Power amp detector level (not used) */
+} __packed;
+
+
+/*
+ * measurement set for one channel. EEPROM contains:
+ *
+ * 1) Channel number measured
+ *
+ * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
+ * (a.k.a. "tx chains") (6 measurements altogether)
+ */
+struct iwl_eeprom_calib_ch_info {
+ u8 ch_num;
+ struct iwl_eeprom_calib_measure
+ measurements[EEPROM_TX_POWER_TX_CHAINS]
+ [EEPROM_TX_POWER_MEASUREMENTS];
+} __packed;
+
+/*
+ * txpower subband info.
+ *
+ * For each frequency subband, EEPROM contains the following:
+ *
+ * 1) First and last channels within range of the subband. "0" values
+ * indicate that this sample set is not being used.
+ *
+ * 2) Sample measurement sets for 2 channels close to the range endpoints.
+ */
+struct iwl_eeprom_calib_subband_info {
+ u8 ch_from; /* channel number of lowest channel in subband */
+ u8 ch_to; /* channel number of highest channel in subband */
+ struct iwl_eeprom_calib_ch_info ch1;
+ struct iwl_eeprom_calib_ch_info ch2;
+} __packed;
+
+
+/*
+ * txpower calibration info. EEPROM contains:
+ *
+ * 1) Factory-measured saturation power levels (maximum levels at which
+ * tx power amplifier can output a signal without too much distortion).
+ * There is one level for 2.4 GHz band and one for 5 GHz band. These
+ * values apply to all channels within each of the bands.
+ *
+ * 2) Factory-measured power supply voltage level. This is assumed to be
+ * constant (i.e. same value applies to all channels/bands) while the
+ * factory measurements are being made.
+ *
+ * 3) Up to 8 sets of factory-measured txpower calibration values.
+ * These are for different frequency ranges, since txpower gain
+ * characteristics of the analog radio circuitry vary with frequency.
+ *
+ * Not all sets need to be filled with data;
+ * struct iwl_eeprom_calib_subband_info contains range of channels
+ * (0 if unused) for each set of data.
+ */
+struct iwl_eeprom_calib_info {
+ u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
+ u8 saturation_power52; /* half-dBm */
+ __le16 voltage; /* signed */
+ struct iwl_eeprom_calib_subband_info
+ band_info[EEPROM_TX_POWER_BANDS];
+} __packed;
+
+
+/* General */
+#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
+#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
+#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
+#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
+#define EEPROM_VERSION (2*0x44) /* 2 bytes */
+#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
+#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
+#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
+#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
+#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
+
+/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
+#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
+#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
+#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
+#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
+#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
+#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+
+#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
+#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
+
+/*
+ * Per-channel regulatory data.
+ *
+ * Each channel that *might* be supported by iwl has a fixed location
+ * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
+ * txpower (MSB).
+ *
+ * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
+ * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
+ *
+ * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ */
+#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
+#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
+
+/*
+ * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
+ * 5.0 GHz channels 7, 8, 11, 12, 16
+ * (4915-5080MHz) (none of these is ever supported)
+ */
+#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
+
+/*
+ * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+ * (5170-5320MHz)
+ */
+#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
+
+/*
+ * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+ * (5500-5700MHz)
+ */
+#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
+
+/*
+ * 5.7 GHz channels 145, 149, 153, 157, 161, 165
+ * (5725-5825MHz)
+ */
+#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
+
+/*
+ * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
+ *
+ * The channel listed is the center of the lower 20 MHz half of the channel.
+ * The overall center frequency is actually 2 channels (10 MHz) above that,
+ * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
+ * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
+ * and the overall HT40 channel width centers on channel 3.
+ *
+ * NOTE: The RXON command uses 20 MHz channel numbers to specify the
+ * control channel to which to tune. RXON also specifies whether the
+ * control channel is the upper or lower half of a HT40 channel.
+ *
+ * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
+ */
+#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
+
+/*
+ * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
+ * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
+ */
+#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
+
+#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
+
+struct iwl_eeprom_ops {
+ const u32 regulatory_bands[7];
+ int (*acquire_semaphore) (struct iwl_priv *priv);
+ void (*release_semaphore) (struct iwl_priv *priv);
+};
+
+
+int iwl_legacy_eeprom_init(struct iwl_priv *priv);
+void iwl_legacy_eeprom_free(struct iwl_priv *priv);
+const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv,
+ size_t offset);
+u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset);
+int iwl_legacy_init_channel_map(struct iwl_priv *priv);
+void iwl_legacy_free_channel_map(struct iwl_priv *priv);
+const struct iwl_channel_info *iwl_legacy_get_channel_info(
+ const struct iwl_priv *priv,
+ enum ieee80211_band band, u16 channel);
+
+#endif /* __iwl_legacy_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-fh.h b/drivers/net/wireless/iwlegacy/iwl-fh.h
new file mode 100644
index 000000000000..4e20c7e5c883
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-fh.h
@@ -0,0 +1,513 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_legacy_fh_h__
+#define __iwl_legacy_fh_h__
+
+/****************************/
+/* Flow Handler Definitions */
+/****************************/
+
+/**
+ * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
+ * Addresses are offsets from device's PCI hardware base address.
+ */
+#define FH_MEM_LOWER_BOUND (0x1000)
+#define FH_MEM_UPPER_BOUND (0x2000)
+
+/**
+ * Keep-Warm (KW) buffer base address.
+ *
+ * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
+ * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
+ * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
+ * from going into a power-savings mode that would cause higher DRAM latency,
+ * and possible data over/under-runs, before all Tx/Rx is complete.
+ *
+ * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
+ * of the buffer, which must be 4K aligned. Once this is set up, the 4965
+ * automatically invokes keep-warm accesses when normal accesses might not
+ * be sufficient to maintain fast DRAM response.
+ *
+ * Bit fields:
+ * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
+ */
+#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
+
+
+/**
+ * TFD Circular Buffers Base (CBBC) addresses
+ *
+ * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
+ * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
+ * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
+ * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
+ * aligned (address bits 0-7 must be 0).
+ *
+ * Bit fields in each pointer register:
+ * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
+ */
+#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
+#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
+
+/* Find TFD CB base pointer for given queue (range 0-15). */
+#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
+
+
+/**
+ * Rx SRAM Control and Status Registers (RSCSR)
+ *
+ * These registers provide handshake between driver and 4965 for the Rx queue
+ * (this queue handles *all* command responses, notifications, Rx data, etc.
+ * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
+ * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
+ * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
+ * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
+ * mapping between RBDs and RBs.
+ *
+ * Driver must allocate host DRAM memory for the following, and set the
+ * physical address of each into 4965 registers:
+ *
+ * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
+ * entries (although any power of 2, up to 4096, is selectable by driver).
+ * Each entry (1 dword) points to a receive buffer (RB) of consistent size
+ * (typically 4K, although 8K or 16K are also selectable by driver).
+ * Driver sets up RB size and number of RBDs in the CB via Rx config
+ * register FH_MEM_RCSR_CHNL0_CONFIG_REG.
+ *
+ * Bit fields within one RBD:
+ * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
+ *
+ * Driver sets physical address [35:8] of base of RBD circular buffer
+ * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
+ *
+ * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
+ * (RBs) have been filled, via a "write pointer", actually the index of
+ * the RB's corresponding RBD within the circular buffer. Driver sets
+ * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
+ *
+ * Bit fields in lower dword of Rx status buffer (upper dword not used
+ * by driver; see struct iwl4965_shared, val0):
+ * 31-12: Not used by driver
+ * 11- 0: Index of last filled Rx buffer descriptor
+ * (4965 writes, driver reads this value)
+ *
+ * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
+ * enter pointers to these RBs into contiguous RBD circular buffer entries,
+ * and update the 4965's "write" index register,
+ * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
+ *
+ * This "write" index corresponds to the *next* RBD that the driver will make
+ * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
+ * the circular buffer. This value should initially be 0 (before preparing any
+ * RBs), should be 8 after preparing the first 8 RBs (for example), and must
+ * wrap back to 0 at the end of the circular buffer (but don't wrap before
+ * "read" index has advanced past 1! See below).
+ * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
+ *
+ * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
+ * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
+ * to tell the driver the index of the latest filled RBD. The driver must
+ * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
+ *
+ * The driver must also internally keep track of a third index, which is the
+ * next RBD to process. When receiving an Rx interrupt, driver should process
+ * all filled but unprocessed RBs up to, but not including, the RB
+ * corresponding to the "read" index. For example, if "read" index becomes "1",
+ * driver may process the RB pointed to by RBD 0. Depending on volume of
+ * traffic, there may be many RBs to process.
+ *
+ * If read index == write index, 4965 thinks there is no room to put new data.
+ * Due to this, the maximum number of filled RBs is 255, instead of 256. To
+ * be safe, make sure that there is a gap of at least 2 RBDs between "write"
+ * and "read" indexes; that is, make sure that there are no more than 254
+ * buffers waiting to be filled.
+ */
+#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
+#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
+#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
+
+/**
+ * Physical base address of 8-byte Rx Status buffer.
+ * Bit fields:
+ * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
+ */
+#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
+
+/**
+ * Physical base address of Rx Buffer Descriptor Circular Buffer.
+ * Bit fields:
+ * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
+ */
+#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
+
+/**
+ * Rx write pointer (index, really!).
+ * Bit fields:
+ * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
+ * NOTE: For 256-entry circular buffer, use only bits [7:0].
+ */
+#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
+#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
+
+
+/**
+ * Rx Config/Status Registers (RCSR)
+ * Rx Config Reg for channel 0 (only channel used)
+ *
+ * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
+ * normal operation (see bit fields).
+ *
+ * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
+ * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for
+ * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
+ *
+ * Bit fields:
+ * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ * '10' operate normally
+ * 29-24: reserved
+ * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
+ * min "5" for 32 RBDs, max "12" for 4096 RBDs.
+ * 19-18: reserved
+ * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
+ * '10' 12K, '11' 16K.
+ * 15-14: reserved
+ * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
+ * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
+ * typical value 0x10 (about 1/2 msec)
+ * 3- 0: reserved
+ */
+#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
+#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
+#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
+
+#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
+
+#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
+#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
+#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
+#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
+#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
+#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
+
+#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
+#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
+#define RX_RB_TIMEOUT (0x10)
+
+#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
+#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
+#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
+
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
+
+#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
+#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
+#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
+
+#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
+
+/**
+ * Rx Shared Status Registers (RSSR)
+ *
+ * After stopping Rx DMA channel (writing 0 to
+ * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
+ * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
+ *
+ * Bit fields:
+ * 24: 1 = Channel 0 is idle
+ *
+ * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
+ * contain default values that should not be altered by the driver.
+ */
+#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
+#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
+
+#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
+#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
+#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
+ (FH_MEM_RSSR_LOWER_BOUND + 0x008)
+
+#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
+
+#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
+
+/* TFDB Area - TFDs buffer table */
+#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
+#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900)
+#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958)
+#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
+#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
+
+/**
+ * Transmit DMA Channel Control/Status Registers (TCSR)
+ *
+ * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
+ * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
+ * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
+ *
+ * To use a Tx DMA channel, driver must initialize its
+ * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
+ *
+ * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
+ *
+ * All other bits should be 0.
+ *
+ * Bit fields:
+ * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ * '10' operate normally
+ * 29- 4: Reserved, set to "0"
+ * 3: Enable internal DMA requests (1, normal operation), disable (0)
+ * 2- 0: Reserved, set to "0"
+ */
+#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
+#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
+
+/* Find Control/Status reg for given Tx DMA/FIFO channel */
+#define FH49_TCSR_CHNL_NUM (7)
+#define FH50_TCSR_CHNL_NUM (8)
+
+/* TCSR: tx_config register values */
+#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
+ (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
+#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
+ (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
+#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
+ (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
+
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
+
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
+
+/**
+ * Tx Shared Status Registers (TSSR)
+ *
+ * After stopping Tx DMA channel (writing 0 to
+ * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
+ * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
+ * (channel's buffers empty | no pending requests).
+ *
+ * Bit fields:
+ * 31-24: 1 = Channel buffers empty (channel 7:0)
+ * 23-16: 1 = No pending requests (channel 7:0)
+ */
+#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
+#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
+
+#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
+
+/**
+ * Bit fields for TSSR(Tx Shared Status & Control) error status register:
+ * 31: Indicates an address error when accessed to internal memory
+ * uCode/driver must write "1" in order to clear this flag
+ * 30: Indicates that Host did not send the expected number of dwords to FH
+ * uCode/driver must write "1" in order to clear this flag
+ * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
+ * command was received from the scheduler while the TRB was already full
+ * with previous command
+ * uCode/driver must write "1" in order to clear this flag
+ * 7-0: Each status bit indicates a channel's TxCredit error. When an error
+ * bit is set, it indicates that the FH has received a full indication
+ * from the RTC TxFIFO and the current value of the TxCredit counter was
+ * not equal to zero. This mean that the credit mechanism was not
+ * synchronized to the TxFIFO status
+ * uCode/driver must write "1" in order to clear this flag
+ */
+#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
+
+#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
+
+/* Tx service channels */
+#define FH_SRVC_CHNL (9)
+#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8)
+#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
+#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
+ (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
+
+#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
+/* Instruct FH to increment the retry count of a packet when
+ * it is brought from the memory to TX-FIFO
+ */
+#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
+
+#define RX_QUEUE_SIZE 256
+#define RX_QUEUE_MASK 255
+#define RX_QUEUE_SIZE_LOG 8
+
+/*
+ * RX related structures and functions
+ */
+#define RX_FREE_BUFFERS 64
+#define RX_LOW_WATERMARK 8
+
+/* Size of one Rx buffer in host DRAM */
+#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
+#define IWL_RX_BUF_SIZE_4K (4 * 1024)
+#define IWL_RX_BUF_SIZE_8K (8 * 1024)
+
+/**
+ * struct iwl_rb_status - reseve buffer status
+ * host memory mapped FH registers
+ * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
+ * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
+ * @finished_rb_num [0:11] - Indicates the index of the current RB
+ * in which the last frame was written to
+ * @finished_fr_num [0:11] - Indicates the index of the RX Frame
+ * which was transfered
+ */
+struct iwl_rb_status {
+ __le16 closed_rb_num;
+ __le16 closed_fr_num;
+ __le16 finished_rb_num;
+ __le16 finished_fr_nam;
+ __le32 __unused; /* 3945 only */
+} __packed;
+
+
+#define TFD_QUEUE_SIZE_MAX (256)
+#define TFD_QUEUE_SIZE_BC_DUP (64)
+#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
+#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
+#define IWL_NUM_OF_TBS 20
+
+static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr)
+{
+ return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
+}
+/**
+ * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
+ *
+ * This structure contains dma address and length of transmission address
+ *
+ * @lo: low [31:0] portion of the dma address of TX buffer
+ * every even is unaligned on 16 bit boundary
+ * @hi_n_len 0-3 [35:32] portion of dma
+ * 4-15 length of the tx buffer
+ */
+struct iwl_tfd_tb {
+ __le32 lo;
+ __le16 hi_n_len;
+} __packed;
+
+/**
+ * struct iwl_tfd
+ *
+ * Transmit Frame Descriptor (TFD)
+ *
+ * @ __reserved1[3] reserved
+ * @ num_tbs 0-4 number of active tbs
+ * 5 reserved
+ * 6-7 padding (not used)
+ * @ tbs[20] transmit frame buffer descriptors
+ * @ __pad padding
+ *
+ * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
+ * Both driver and device share these circular buffers, each of which must be
+ * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
+ *
+ * Driver must indicate the physical address of the base of each
+ * circular buffer via the FH_MEM_CBBC_QUEUE registers.
+ *
+ * Each TFD contains pointer/size information for up to 20 data buffers
+ * in host DRAM. These buffers collectively contain the (one) frame described
+ * by the TFD. Each buffer must be a single contiguous block of memory within
+ * itself, but buffers may be scattered in host DRAM. Each buffer has max size
+ * of (4K - 4). The concatenates all of a TFD's buffers into a single
+ * Tx frame, up to 8 KBytes in size.
+ *
+ * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
+ */
+struct iwl_tfd {
+ u8 __reserved1[3];
+ u8 num_tbs;
+ struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
+ __le32 __pad;
+} __packed;
+
+/* Keep Warm Size */
+#define IWL_KW_SIZE 0x1000 /* 4k */
+
+#endif /* !__iwl_legacy_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
new file mode 100644
index 000000000000..9d721cbda5bb
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
@@ -0,0 +1,271 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <net/mac80211.h>
+
+#include "iwl-dev.h"
+#include "iwl-debug.h"
+#include "iwl-eeprom.h"
+#include "iwl-core.h"
+
+
+const char *iwl_legacy_get_cmd_string(u8 cmd)
+{
+ switch (cmd) {
+ IWL_CMD(REPLY_ALIVE);
+ IWL_CMD(REPLY_ERROR);
+ IWL_CMD(REPLY_RXON);
+ IWL_CMD(REPLY_RXON_ASSOC);
+ IWL_CMD(REPLY_QOS_PARAM);
+ IWL_CMD(REPLY_RXON_TIMING);
+ IWL_CMD(REPLY_ADD_STA);
+ IWL_CMD(REPLY_REMOVE_STA);
+ IWL_CMD(REPLY_WEPKEY);
+ IWL_CMD(REPLY_3945_RX);
+ IWL_CMD(REPLY_TX);
+ IWL_CMD(REPLY_RATE_SCALE);
+ IWL_CMD(REPLY_LEDS_CMD);
+ IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
+ IWL_CMD(REPLY_CHANNEL_SWITCH);
+ IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
+ IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
+ IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
+ IWL_CMD(POWER_TABLE_CMD);
+ IWL_CMD(PM_SLEEP_NOTIFICATION);
+ IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
+ IWL_CMD(REPLY_SCAN_CMD);
+ IWL_CMD(REPLY_SCAN_ABORT_CMD);
+ IWL_CMD(SCAN_START_NOTIFICATION);
+ IWL_CMD(SCAN_RESULTS_NOTIFICATION);
+ IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
+ IWL_CMD(BEACON_NOTIFICATION);
+ IWL_CMD(REPLY_TX_BEACON);
+ IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
+ IWL_CMD(REPLY_BT_CONFIG);
+ IWL_CMD(REPLY_STATISTICS_CMD);
+ IWL_CMD(STATISTICS_NOTIFICATION);
+ IWL_CMD(CARD_STATE_NOTIFICATION);
+ IWL_CMD(MISSED_BEACONS_NOTIFICATION);
+ IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
+ IWL_CMD(SENSITIVITY_CMD);
+ IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
+ IWL_CMD(REPLY_RX_PHY_CMD);
+ IWL_CMD(REPLY_RX_MPDU_CMD);
+ IWL_CMD(REPLY_RX);
+ IWL_CMD(REPLY_COMPRESSED_BA);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_get_cmd_string);
+
+#define HOST_COMPLETE_TIMEOUT (HZ / 2)
+
+static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv,
+ struct iwl_device_cmd *cmd,
+ struct iwl_rx_packet *pkt)
+{
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+ IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
+ iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+ return;
+ }
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ switch (cmd->hdr.cmd) {
+ case REPLY_TX_LINK_QUALITY_CMD:
+ case SENSITIVITY_CMD:
+ IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
+ iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+ break;
+ default:
+ IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
+ iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+ }
+#endif
+}
+
+static int
+iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+{
+ int ret;
+
+ BUG_ON(!(cmd->flags & CMD_ASYNC));
+
+ /* An asynchronous command can not expect an SKB to be set. */
+ BUG_ON(cmd->flags & CMD_WANT_SKB);
+
+ /* Assign a generic callback if one is not provided */
+ if (!cmd->callback)
+ cmd->callback = iwl_legacy_generic_cmd_callback;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return -EBUSY;
+
+ ret = iwl_legacy_enqueue_hcmd(priv, cmd);
+ if (ret < 0) {
+ IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
+ iwl_legacy_get_cmd_string(cmd->id), ret);
+ return ret;
+ }
+ return 0;
+}
+
+int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+{
+ int cmd_idx;
+ int ret;
+
+ BUG_ON(cmd->flags & CMD_ASYNC);
+
+ /* A synchronous command can not have a callback set. */
+ BUG_ON(cmd->callback);
+
+ IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
+ iwl_legacy_get_cmd_string(cmd->id));
+ mutex_lock(&priv->sync_cmd_mutex);
+
+ set_bit(STATUS_HCMD_ACTIVE, &priv->status);
+ IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
+ iwl_legacy_get_cmd_string(cmd->id));
+
+ cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd);
+ if (cmd_idx < 0) {
+ ret = cmd_idx;
+ IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
+ iwl_legacy_get_cmd_string(cmd->id), ret);
+ goto out;
+ }
+
+ ret = wait_event_interruptible_timeout(priv->wait_command_queue,
+ !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
+ HOST_COMPLETE_TIMEOUT);
+ if (!ret) {
+ if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
+ IWL_ERR(priv,
+ "Error sending %s: time out after %dms.\n",
+ iwl_legacy_get_cmd_string(cmd->id),
+ jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+ clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+ IWL_DEBUG_INFO(priv,
+ "Clearing HCMD_ACTIVE for command %s\n",
+ iwl_legacy_get_cmd_string(cmd->id));
+ ret = -ETIMEDOUT;
+ goto cancel;
+ }
+ }
+
+ if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
+ IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
+ iwl_legacy_get_cmd_string(cmd->id));
+ ret = -ECANCELED;
+ goto fail;
+ }
+ if (test_bit(STATUS_FW_ERROR, &priv->status)) {
+ IWL_ERR(priv, "Command %s failed: FW Error\n",
+ iwl_legacy_get_cmd_string(cmd->id));
+ ret = -EIO;
+ goto fail;
+ }
+ if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
+ IWL_ERR(priv, "Error: Response NULL in '%s'\n",
+ iwl_legacy_get_cmd_string(cmd->id));
+ ret = -EIO;
+ goto cancel;
+ }
+
+ ret = 0;
+ goto out;
+
+cancel:
+ if (cmd->flags & CMD_WANT_SKB) {
+ /*
+ * Cancel the CMD_WANT_SKB flag for the cmd in the
+ * TX cmd queue. Otherwise in case the cmd comes
+ * in later, it will possibly set an invalid
+ * address (cmd->meta.source).
+ */
+ priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
+ ~CMD_WANT_SKB;
+ }
+fail:
+ if (cmd->reply_page) {
+ iwl_legacy_free_pages(priv, cmd->reply_page);
+ cmd->reply_page = 0;
+ }
+out:
+ mutex_unlock(&priv->sync_cmd_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_send_cmd_sync);
+
+int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+{
+ if (cmd->flags & CMD_ASYNC)
+ return iwl_legacy_send_cmd_async(priv, cmd);
+
+ return iwl_legacy_send_cmd_sync(priv, cmd);
+}
+EXPORT_SYMBOL(iwl_legacy_send_cmd);
+
+int
+iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
+{
+ struct iwl_host_cmd cmd = {
+ .id = id,
+ .len = len,
+ .data = data,
+ };
+
+ return iwl_legacy_send_cmd_sync(priv, &cmd);
+}
+EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu);
+
+int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv,
+ u8 id, u16 len, const void *data,
+ void (*callback)(struct iwl_priv *priv,
+ struct iwl_device_cmd *cmd,
+ struct iwl_rx_packet *pkt))
+{
+ struct iwl_host_cmd cmd = {
+ .id = id,
+ .len = len,
+ .data = data,
+ };
+
+ cmd.flags |= CMD_ASYNC;
+ cmd.callback = callback;
+
+ return iwl_legacy_send_cmd_async(priv, &cmd);
+}
+EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h
new file mode 100644
index 000000000000..02132e755831
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-helpers.h
@@ -0,0 +1,181 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_helpers_h__
+#define __iwl_legacy_helpers_h__
+
+#include <linux/ctype.h>
+#include <net/mac80211.h>
+
+#include "iwl-io.h"
+
+#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+
+
+static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf(
+ struct ieee80211_hw *hw)
+{
+ return &hw->conf;
+}
+
+/**
+ * iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning
+ * @index -- current index
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd)
+{
+ return ++index & (n_bd - 1);
+}
+
+/**
+ * iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end
+ * @index -- current index
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd)
+{
+ return --index & (n_bd - 1);
+}
+
+/* TODO: Move fw_desc functions to iwl-pci.ko */
+static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev,
+ struct fw_desc *desc)
+{
+ if (desc->v_addr)
+ dma_free_coherent(&pci_dev->dev, desc->len,
+ desc->v_addr, desc->p_addr);
+ desc->v_addr = NULL;
+ desc->len = 0;
+}
+
+static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev,
+ struct fw_desc *desc)
+{
+ if (!desc->len) {
+ desc->v_addr = NULL;
+ return -EINVAL;
+ }
+
+ desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
+ &desc->p_addr, GFP_KERNEL);
+ return (desc->v_addr != NULL) ? 0 : -ENOMEM;
+}
+
+/*
+ * we have 8 bits used like this:
+ *
+ * 7 6 5 4 3 2 1 0
+ * | | | | | | | |
+ * | | | | | | +-+-------- AC queue (0-3)
+ * | | | | | |
+ * | +-+-+-+-+------------ HW queue ID
+ * |
+ * +---------------------- unused
+ */
+static inline void
+iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
+{
+ BUG_ON(ac > 3); /* only have 2 bits */
+ BUG_ON(hwq > 31); /* only use 5 bits */
+
+ txq->swq_id = (hwq << 2) | ac;
+}
+
+static inline void iwl_legacy_wake_queue(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq)
+{
+ u8 queue = txq->swq_id;
+ u8 ac = queue & 3;
+ u8 hwq = (queue >> 2) & 0x1f;
+
+ if (test_and_clear_bit(hwq, priv->queue_stopped))
+ if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
+ ieee80211_wake_queue(priv->hw, ac);
+}
+
+static inline void iwl_legacy_stop_queue(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq)
+{
+ u8 queue = txq->swq_id;
+ u8 ac = queue & 3;
+ u8 hwq = (queue >> 2) & 0x1f;
+
+ if (!test_and_set_bit(hwq, priv->queue_stopped))
+ if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
+ ieee80211_stop_queue(priv->hw, ac);
+}
+
+#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
+#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
+
+static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv)
+{
+ clear_bit(STATUS_INT_ENABLED, &priv->status);
+
+ /* disable interrupts from uCode/NIC to host */
+ iwl_write32(priv, CSR_INT_MASK, 0x00000000);
+
+ /* acknowledge/clear/reset any interrupts still pending
+ * from uCode or flow handler (Rx/Tx DMA) */
+ iwl_write32(priv, CSR_INT, 0xffffffff);
+ iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
+ IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
+}
+
+static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv)
+{
+ IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
+ set_bit(STATUS_INT_ENABLED, &priv->status);
+ iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
+}
+
+/**
+ * iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time
+ * @priv -- pointer to iwl_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv,
+ u16 tsf_bits)
+{
+ return (1 << tsf_bits) - 1;
+}
+
+/**
+ * iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time
+ * @priv -- pointer to iwl_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv,
+ u16 tsf_bits)
+{
+ return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
+}
+
+#endif /* __iwl_legacy_helpers_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-io.h b/drivers/net/wireless/iwlegacy/iwl-io.h
new file mode 100644
index 000000000000..5cc5d342914f
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-io.h
@@ -0,0 +1,545 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_io_h__
+#define __iwl_legacy_io_h__
+
+#include <linux/io.h>
+
+#include "iwl-dev.h"
+#include "iwl-debug.h"
+#include "iwl-devtrace.h"
+
+/*
+ * IO, register, and NIC memory access functions
+ *
+ * NOTE on naming convention and macro usage for these
+ *
+ * A single _ prefix before a an access function means that no state
+ * check or debug information is printed when that function is called.
+ *
+ * A double __ prefix before an access function means that state is checked
+ * and the current line number and caller function name are printed in addition
+ * to any other debug output.
+ *
+ * The non-prefixed name is the #define that maps the caller into a
+ * #define that provides the caller's name and __LINE__ to the double
+ * prefix version.
+ *
+ * If you wish to call the function without any debug or state checking,
+ * you should use the single _ prefix version (as is used by dependent IO
+ * routines, for example _iwl_legacy_read_direct32 calls the non-check version of
+ * _iwl_legacy_read32.)
+ *
+ * These declarations are *extremely* useful in quickly isolating code deltas
+ * which result in misconfiguration of the hardware I/O. In combination with
+ * git-bisect and the IO debug level you can quickly determine the specific
+ * commit which breaks the IO sequence to the hardware.
+ *
+ */
+
+static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val)
+{
+ trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val);
+ iowrite8(val, priv->hw_base + ofs);
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline void
+__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv,
+ u32 ofs, u8 val)
+{
+ IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l);
+ _iwl_legacy_write8(priv, ofs, val);
+}
+#define iwl_write8(priv, ofs, val) \
+ __iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val)
+#else
+#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val)
+#endif
+
+
+static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val)
+{
+ trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val);
+ iowrite32(val, priv->hw_base + ofs);
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline void
+__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv,
+ u32 ofs, u32 val)
+{
+ IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
+ _iwl_legacy_write32(priv, ofs, val);
+}
+#define iwl_write32(priv, ofs, val) \
+ __iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val)
+#else
+#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val)
+#endif
+
+static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs)
+{
+ u32 val = ioread32(priv->hw_base + ofs);
+ trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val);
+ return val;
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline u32
+__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
+{
+ IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l);
+ return _iwl_legacy_read32(priv, ofs);
+}
+#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs)
+#else
+#define iwl_read32(p, o) _iwl_legacy_read32(p, o)
+#endif
+
+#define IWL_POLL_INTERVAL 10 /* microseconds */
+static inline int
+_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr,
+ u32 bits, u32 mask, int timeout)
+{
+ int t = 0;
+
+ do {
+ if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask))
+ return t;
+ udelay(IWL_POLL_INTERVAL);
+ t += IWL_POLL_INTERVAL;
+ } while (t < timeout);
+
+ return -ETIMEDOUT;
+}
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline int __iwl_legacy_poll_bit(const char *f, u32 l,
+ struct iwl_priv *priv, u32 addr,
+ u32 bits, u32 mask, int timeout)
+{
+ int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout);
+ IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
+ addr, bits, mask,
+ unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l);
+ return ret;
+}
+#define iwl_poll_bit(priv, addr, bits, mask, timeout) \
+ __iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \
+ bits, mask, timeout)
+#else
+#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t)
+#endif
+
+static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
+{
+ _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask);
+}
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline void __iwl_legacy_set_bit(const char *f, u32 l,
+ struct iwl_priv *priv, u32 reg, u32 mask)
+{
+ u32 val = _iwl_legacy_read32(priv, reg) | mask;
+ IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg,
+ mask, val);
+ _iwl_legacy_write32(priv, reg, val);
+}
+static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&p->reg_lock, reg_flags);
+ __iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m);
+ spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+#else
+static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&p->reg_lock, reg_flags);
+ _iwl_legacy_set_bit(p, r, m);
+ spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+#endif
+
+static inline void
+_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
+{
+ _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask);
+}
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline void
+__iwl_legacy_clear_bit(const char *f, u32 l,
+ struct iwl_priv *priv, u32 reg, u32 mask)
+{
+ u32 val = _iwl_legacy_read32(priv, reg) & ~mask;
+ IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
+ _iwl_legacy_write32(priv, reg, val);
+}
+static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&p->reg_lock, reg_flags);
+ __iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m);
+ spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+#else
+static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&p->reg_lock, reg_flags);
+ _iwl_legacy_clear_bit(p, r, m);
+ spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+#endif
+
+static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv)
+{
+ int ret;
+ u32 val;
+
+ /* this bit wakes up the NIC */
+ _iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /*
+ * These bits say the device is running, and should keep running for
+ * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
+ * but they do not indicate that embedded SRAM is restored yet;
+ * 3945 and 4965 have volatile SRAM, and must save/restore contents
+ * to/from host DRAM when sleeping/waking for power-saving.
+ * Each direction takes approximately 1/4 millisecond; with this
+ * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
+ * series of register accesses are expected (e.g. reading Event Log),
+ * to keep device from sleeping.
+ *
+ * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
+ * SRAM is okay/restored. We don't check that here because this call
+ * is just for hardware register access; but GP1 MAC_SLEEP check is a
+ * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
+ *
+ */
+ ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
+ if (ret < 0) {
+ val = _iwl_legacy_read32(priv, CSR_GP_CNTRL);
+ IWL_ERR(priv,
+ "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
+ _iwl_legacy_write32(priv, CSR_RESET,
+ CSR_RESET_REG_FLAG_FORCE_NMI);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l,
+ struct iwl_priv *priv)
+{
+ IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l);
+ return _iwl_legacy_grab_nic_access(priv);
+}
+#define iwl_grab_nic_access(priv) \
+ __iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv)
+#else
+#define iwl_grab_nic_access(priv) \
+ _iwl_legacy_grab_nic_access(priv)
+#endif
+
+static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv)
+{
+ _iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+}
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline void __iwl_legacy_release_nic_access(const char *f, u32 l,
+ struct iwl_priv *priv)
+{
+
+ IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l);
+ _iwl_legacy_release_nic_access(priv);
+}
+#define iwl_release_nic_access(priv) \
+ __iwl_legacy_release_nic_access(__FILE__, __LINE__, priv)
+#else
+#define iwl_release_nic_access(priv) \
+ _iwl_legacy_release_nic_access(priv)
+#endif
+
+static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
+{
+ return _iwl_legacy_read32(priv, reg);
+}
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l,
+ struct iwl_priv *priv, u32 reg)
+{
+ u32 value = _iwl_legacy_read_direct32(priv, reg);
+ IWL_DEBUG_IO(priv,
+ "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
+ f, l);
+ return value;
+}
+static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
+{
+ u32 value;
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
+ value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg);
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return value;
+}
+
+#else
+static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
+{
+ u32 value;
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
+ value = _iwl_legacy_read_direct32(priv, reg);
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return value;
+
+}
+#endif
+
+static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv,
+ u32 reg, u32 value)
+{
+ _iwl_legacy_write32(priv, reg, value);
+}
+static inline void
+iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ if (!iwl_grab_nic_access(priv)) {
+ _iwl_legacy_write_direct32(priv, reg, value);
+ iwl_release_nic_access(priv);
+ }
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv,
+ u32 reg, u32 len, u32 *values)
+{
+ u32 count = sizeof(u32);
+
+ if ((priv != NULL) && (values != NULL)) {
+ for (; 0 < len; len -= count, reg += count, values++)
+ iwl_legacy_write_direct32(priv, reg, *values);
+ }
+}
+
+static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr,
+ u32 mask, int timeout)
+{
+ int t = 0;
+
+ do {
+ if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask)
+ return t;
+ udelay(IWL_POLL_INTERVAL);
+ t += IWL_POLL_INTERVAL;
+ } while (t < timeout);
+
+ return -ETIMEDOUT;
+}
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l,
+ struct iwl_priv *priv,
+ u32 addr, u32 mask, int timeout)
+{
+ int ret = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout);
+
+ if (unlikely(ret == -ETIMEDOUT))
+ IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - "
+ "timedout - %s %d\n", addr, mask, f, l);
+ else
+ IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
+ "- %s %d\n", addr, mask, ret, f, l);
+ return ret;
+}
+#define iwl_poll_direct_bit(priv, addr, mask, timeout) \
+__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
+#else
+#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit
+#endif
+
+static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
+{
+ _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
+ rmb();
+ return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
+}
+static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
+{
+ unsigned long reg_flags;
+ u32 val;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
+ val = _iwl_legacy_read_prph(priv, reg);
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return val;
+}
+
+static inline void _iwl_legacy_write_prph(struct iwl_priv *priv,
+ u32 addr, u32 val)
+{
+ _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
+ ((addr & 0x0000FFFF) | (3 << 24)));
+ wmb();
+ _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
+}
+
+static inline void
+iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ if (!iwl_grab_nic_access(priv)) {
+ _iwl_legacy_write_prph(priv, addr, val);
+ iwl_release_nic_access(priv);
+ }
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+#define _iwl_legacy_set_bits_prph(priv, reg, mask) \
+_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask))
+
+static inline void
+iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
+ _iwl_legacy_set_bits_prph(priv, reg, mask);
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \
+_iwl_legacy_write_prph(priv, reg, \
+ ((_iwl_legacy_read_prph(priv, reg) & mask) | bits))
+
+static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
+ u32 bits, u32 mask)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
+ _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask);
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static inline void iwl_legacy_clear_bits_prph(struct iwl_priv
+ *priv, u32 reg, u32 mask)
+{
+ unsigned long reg_flags;
+ u32 val;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
+ val = _iwl_legacy_read_prph(priv, reg);
+ _iwl_legacy_write_prph(priv, reg, (val & ~mask));
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr)
+{
+ unsigned long reg_flags;
+ u32 value;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
+
+ _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
+ rmb();
+ value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return value;
+}
+
+static inline void
+iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ if (!iwl_grab_nic_access(priv)) {
+ _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
+ wmb();
+ _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
+ iwl_release_nic_access(priv);
+ }
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static inline void
+iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
+ u32 len, u32 *values)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ if (!iwl_grab_nic_access(priv)) {
+ _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
+ wmb();
+ for (; 0 < len; len -= sizeof(u32), values++)
+ _iwl_legacy_write_direct32(priv,
+ HBUS_TARG_MEM_WDAT, *values);
+
+ iwl_release_nic_access(priv);
+ }
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c
new file mode 100644
index 000000000000..15eb8b707157
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-led.c
@@ -0,0 +1,188 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+
+/* default: IWL_LED_BLINK(0) using blinking index table */
+static int led_mode;
+module_param(led_mode, int, S_IRUGO);
+MODULE_PARM_DESC(led_mode, "0=system default, "
+ "1=On(RF On)/Off(RF Off), 2=blinking");
+
+static const struct ieee80211_tpt_blink iwl_blink[] = {
+ { .throughput = 0 * 1024 - 1, .blink_time = 334 },
+ { .throughput = 1 * 1024 - 1, .blink_time = 260 },
+ { .throughput = 5 * 1024 - 1, .blink_time = 220 },
+ { .throughput = 10 * 1024 - 1, .blink_time = 190 },
+ { .throughput = 20 * 1024 - 1, .blink_time = 170 },
+ { .throughput = 50 * 1024 - 1, .blink_time = 150 },
+ { .throughput = 70 * 1024 - 1, .blink_time = 130 },
+ { .throughput = 100 * 1024 - 1, .blink_time = 110 },
+ { .throughput = 200 * 1024 - 1, .blink_time = 80 },
+ { .throughput = 300 * 1024 - 1, .blink_time = 50 },
+};
+
+/*
+ * Adjust led blink rate to compensate on a MAC Clock difference on every HW
+ * Led blink rate analysis showed an average deviation of 0% on 3945,
+ * 5% on 4965 HW.
+ * Need to compensate on the led on/off time per HW according to the deviation
+ * to achieve the desired led frequency
+ * The calculation is: (100-averageDeviation)/100 * blinkTime
+ * For code efficiency the calculation will be:
+ * compensation = (100 - averageDeviation) * 64 / 100
+ * NewBlinkTime = (compensation * BlinkTime) / 64
+ */
+static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv,
+ u8 time, u16 compensation)
+{
+ if (!compensation) {
+ IWL_ERR(priv, "undefined blink compensation: "
+ "use pre-defined blinking time\n");
+ return time;
+ }
+
+ return (u8)((time * compensation) >> 6);
+}
+
+/* Set led pattern command */
+static int iwl_legacy_led_cmd(struct iwl_priv *priv,
+ unsigned long on,
+ unsigned long off)
+{
+ struct iwl_led_cmd led_cmd = {
+ .id = IWL_LED_LINK,
+ .interval = IWL_DEF_LED_INTRVL
+ };
+ int ret;
+
+ if (!test_bit(STATUS_READY, &priv->status))
+ return -EBUSY;
+
+ if (priv->blink_on == on && priv->blink_off == off)
+ return 0;
+
+ IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
+ priv->cfg->base_params->led_compensation);
+ led_cmd.on = iwl_legacy_blink_compensation(priv, on,
+ priv->cfg->base_params->led_compensation);
+ led_cmd.off = iwl_legacy_blink_compensation(priv, off,
+ priv->cfg->base_params->led_compensation);
+
+ ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
+ if (!ret) {
+ priv->blink_on = on;
+ priv->blink_off = off;
+ }
+ return ret;
+}
+
+static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
+ unsigned long on = 0;
+
+ if (brightness > 0)
+ on = IWL_LED_SOLID;
+
+ iwl_legacy_led_cmd(priv, on, 0);
+}
+
+static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
+
+ return iwl_legacy_led_cmd(priv, *delay_on, *delay_off);
+}
+
+void iwl_legacy_leds_init(struct iwl_priv *priv)
+{
+ int mode = led_mode;
+ int ret;
+
+ if (mode == IWL_LED_DEFAULT)
+ mode = priv->cfg->led_mode;
+
+ priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
+ wiphy_name(priv->hw->wiphy));
+ priv->led.brightness_set = iwl_legacy_led_brightness_set;
+ priv->led.blink_set = iwl_legacy_led_blink_set;
+ priv->led.max_brightness = 1;
+
+ switch (mode) {
+ case IWL_LED_DEFAULT:
+ WARN_ON(1);
+ break;
+ case IWL_LED_BLINK:
+ priv->led.default_trigger =
+ ieee80211_create_tpt_led_trigger(priv->hw,
+ IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
+ iwl_blink, ARRAY_SIZE(iwl_blink));
+ break;
+ case IWL_LED_RF_STATE:
+ priv->led.default_trigger =
+ ieee80211_get_radio_led_name(priv->hw);
+ break;
+ }
+
+ ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
+ if (ret) {
+ kfree(priv->led.name);
+ return;
+ }
+
+ priv->led_registered = true;
+}
+EXPORT_SYMBOL(iwl_legacy_leds_init);
+
+void iwl_legacy_leds_exit(struct iwl_priv *priv)
+{
+ if (!priv->led_registered)
+ return;
+
+ led_classdev_unregister(&priv->led);
+ kfree(priv->led.name);
+}
+EXPORT_SYMBOL(iwl_legacy_leds_exit);
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.h b/drivers/net/wireless/iwlegacy/iwl-led.h
new file mode 100644
index 000000000000..f0791f70f79d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-led.h
@@ -0,0 +1,56 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_leds_h__
+#define __iwl_legacy_leds_h__
+
+
+struct iwl_priv;
+
+#define IWL_LED_SOLID 11
+#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
+
+#define IWL_LED_ACTIVITY (0<<1)
+#define IWL_LED_LINK (1<<1)
+
+/*
+ * LED mode
+ * IWL_LED_DEFAULT: use device default
+ * IWL_LED_RF_STATE: turn LED on/off based on RF state
+ * LED ON = RF ON
+ * LED OFF = RF OFF
+ * IWL_LED_BLINK: adjust led blink rate based on blink table
+ */
+enum iwl_led_mode {
+ IWL_LED_DEFAULT,
+ IWL_LED_RF_STATE,
+ IWL_LED_BLINK,
+};
+
+void iwl_legacy_leds_init(struct iwl_priv *priv);
+void iwl_legacy_leds_exit(struct iwl_priv *priv);
+
+#endif /* __iwl_legacy_leds_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
new file mode 100644
index 000000000000..38647e481eb0
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
@@ -0,0 +1,456 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_rs_h__
+#define __iwl_legacy_rs_h__
+
+struct iwl_rate_info {
+ u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
+ u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
+ u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
+ u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
+ u8 prev_ieee; /* previous rate in IEEE speeds */
+ u8 next_ieee; /* next rate in IEEE speeds */
+ u8 prev_rs; /* previous rate used in rs algo */
+ u8 next_rs; /* next rate used in rs algo */
+ u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
+ u8 next_rs_tgg; /* next rate used in TGG rs algo */
+};
+
+struct iwl3945_rate_info {
+ u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
+ u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
+ u8 prev_ieee; /* previous rate in IEEE speeds */
+ u8 next_ieee; /* next rate in IEEE speeds */
+ u8 prev_rs; /* previous rate used in rs algo */
+ u8 next_rs; /* next rate used in rs algo */
+ u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
+ u8 next_rs_tgg; /* next rate used in TGG rs algo */
+ u8 table_rs_index; /* index in rate scale table cmd */
+ u8 prev_table_rs; /* prev in rate table cmd */
+};
+
+
+/*
+ * These serve as indexes into
+ * struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
+ */
+enum {
+ IWL_RATE_1M_INDEX = 0,
+ IWL_RATE_2M_INDEX,
+ IWL_RATE_5M_INDEX,
+ IWL_RATE_11M_INDEX,
+ IWL_RATE_6M_INDEX,
+ IWL_RATE_9M_INDEX,
+ IWL_RATE_12M_INDEX,
+ IWL_RATE_18M_INDEX,
+ IWL_RATE_24M_INDEX,
+ IWL_RATE_36M_INDEX,
+ IWL_RATE_48M_INDEX,
+ IWL_RATE_54M_INDEX,
+ IWL_RATE_60M_INDEX,
+ IWL_RATE_COUNT,
+ IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */
+ IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1,
+ IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
+ IWL_RATE_INVALID = IWL_RATE_COUNT,
+};
+
+enum {
+ IWL_RATE_6M_INDEX_TABLE = 0,
+ IWL_RATE_9M_INDEX_TABLE,
+ IWL_RATE_12M_INDEX_TABLE,
+ IWL_RATE_18M_INDEX_TABLE,
+ IWL_RATE_24M_INDEX_TABLE,
+ IWL_RATE_36M_INDEX_TABLE,
+ IWL_RATE_48M_INDEX_TABLE,
+ IWL_RATE_54M_INDEX_TABLE,
+ IWL_RATE_1M_INDEX_TABLE,
+ IWL_RATE_2M_INDEX_TABLE,
+ IWL_RATE_5M_INDEX_TABLE,
+ IWL_RATE_11M_INDEX_TABLE,
+ IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
+};
+
+enum {
+ IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
+ IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
+ IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
+ IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
+ IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
+};
+
+/* #define vs. enum to keep from defaulting to 'large integer' */
+#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
+#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
+#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
+#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
+#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
+#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
+#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
+#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
+#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
+#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
+#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
+#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
+#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
+
+/* uCode API values for legacy bit rates, both OFDM and CCK */
+enum {
+ IWL_RATE_6M_PLCP = 13,
+ IWL_RATE_9M_PLCP = 15,
+ IWL_RATE_12M_PLCP = 5,
+ IWL_RATE_18M_PLCP = 7,
+ IWL_RATE_24M_PLCP = 9,
+ IWL_RATE_36M_PLCP = 11,
+ IWL_RATE_48M_PLCP = 1,
+ IWL_RATE_54M_PLCP = 3,
+ IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
+ IWL_RATE_1M_PLCP = 10,
+ IWL_RATE_2M_PLCP = 20,
+ IWL_RATE_5M_PLCP = 55,
+ IWL_RATE_11M_PLCP = 110,
+ /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
+};
+
+/* uCode API values for OFDM high-throughput (HT) bit rates */
+enum {
+ IWL_RATE_SISO_6M_PLCP = 0,
+ IWL_RATE_SISO_12M_PLCP = 1,
+ IWL_RATE_SISO_18M_PLCP = 2,
+ IWL_RATE_SISO_24M_PLCP = 3,
+ IWL_RATE_SISO_36M_PLCP = 4,
+ IWL_RATE_SISO_48M_PLCP = 5,
+ IWL_RATE_SISO_54M_PLCP = 6,
+ IWL_RATE_SISO_60M_PLCP = 7,
+ IWL_RATE_MIMO2_6M_PLCP = 0x8,
+ IWL_RATE_MIMO2_12M_PLCP = 0x9,
+ IWL_RATE_MIMO2_18M_PLCP = 0xa,
+ IWL_RATE_MIMO2_24M_PLCP = 0xb,
+ IWL_RATE_MIMO2_36M_PLCP = 0xc,
+ IWL_RATE_MIMO2_48M_PLCP = 0xd,
+ IWL_RATE_MIMO2_54M_PLCP = 0xe,
+ IWL_RATE_MIMO2_60M_PLCP = 0xf,
+ IWL_RATE_SISO_INVM_PLCP,
+ IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
+};
+
+/* MAC header values for bit rates */
+enum {
+ IWL_RATE_6M_IEEE = 12,
+ IWL_RATE_9M_IEEE = 18,
+ IWL_RATE_12M_IEEE = 24,
+ IWL_RATE_18M_IEEE = 36,
+ IWL_RATE_24M_IEEE = 48,
+ IWL_RATE_36M_IEEE = 72,
+ IWL_RATE_48M_IEEE = 96,
+ IWL_RATE_54M_IEEE = 108,
+ IWL_RATE_60M_IEEE = 120,
+ IWL_RATE_1M_IEEE = 2,
+ IWL_RATE_2M_IEEE = 4,
+ IWL_RATE_5M_IEEE = 11,
+ IWL_RATE_11M_IEEE = 22,
+};
+
+#define IWL_CCK_BASIC_RATES_MASK \
+ (IWL_RATE_1M_MASK | \
+ IWL_RATE_2M_MASK)
+
+#define IWL_CCK_RATES_MASK \
+ (IWL_CCK_BASIC_RATES_MASK | \
+ IWL_RATE_5M_MASK | \
+ IWL_RATE_11M_MASK)
+
+#define IWL_OFDM_BASIC_RATES_MASK \
+ (IWL_RATE_6M_MASK | \
+ IWL_RATE_12M_MASK | \
+ IWL_RATE_24M_MASK)
+
+#define IWL_OFDM_RATES_MASK \
+ (IWL_OFDM_BASIC_RATES_MASK | \
+ IWL_RATE_9M_MASK | \
+ IWL_RATE_18M_MASK | \
+ IWL_RATE_36M_MASK | \
+ IWL_RATE_48M_MASK | \
+ IWL_RATE_54M_MASK)
+
+#define IWL_BASIC_RATES_MASK \
+ (IWL_OFDM_BASIC_RATES_MASK | \
+ IWL_CCK_BASIC_RATES_MASK)
+
+#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
+#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
+
+#define IWL_INVALID_VALUE -1
+
+#define IWL_MIN_RSSI_VAL -100
+#define IWL_MAX_RSSI_VAL 0
+
+/* These values specify how many Tx frame attempts before
+ * searching for a new modulation mode */
+#define IWL_LEGACY_FAILURE_LIMIT 160
+#define IWL_LEGACY_SUCCESS_LIMIT 480
+#define IWL_LEGACY_TABLE_COUNT 160
+
+#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
+#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
+#define IWL_NONE_LEGACY_TABLE_COUNT 1500
+
+/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
+#define IWL_RS_GOOD_RATIO 12800 /* 100% */
+#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
+#define IWL_RATE_HIGH_TH 10880 /* 85% */
+#define IWL_RATE_INCREASE_TH 6400 /* 50% */
+#define IWL_RATE_DECREASE_TH 1920 /* 15% */
+
+/* possible actions when in legacy mode */
+#define IWL_LEGACY_SWITCH_ANTENNA1 0
+#define IWL_LEGACY_SWITCH_ANTENNA2 1
+#define IWL_LEGACY_SWITCH_SISO 2
+#define IWL_LEGACY_SWITCH_MIMO2_AB 3
+#define IWL_LEGACY_SWITCH_MIMO2_AC 4
+#define IWL_LEGACY_SWITCH_MIMO2_BC 5
+
+/* possible actions when in siso mode */
+#define IWL_SISO_SWITCH_ANTENNA1 0
+#define IWL_SISO_SWITCH_ANTENNA2 1
+#define IWL_SISO_SWITCH_MIMO2_AB 2
+#define IWL_SISO_SWITCH_MIMO2_AC 3
+#define IWL_SISO_SWITCH_MIMO2_BC 4
+#define IWL_SISO_SWITCH_GI 5
+
+/* possible actions when in mimo mode */
+#define IWL_MIMO2_SWITCH_ANTENNA1 0
+#define IWL_MIMO2_SWITCH_ANTENNA2 1
+#define IWL_MIMO2_SWITCH_SISO_A 2
+#define IWL_MIMO2_SWITCH_SISO_B 3
+#define IWL_MIMO2_SWITCH_SISO_C 4
+#define IWL_MIMO2_SWITCH_GI 5
+
+#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
+
+#define IWL_ACTION_LIMIT 3 /* # possible actions */
+
+#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
+
+/* load per tid defines for A-MPDU activation */
+#define IWL_AGG_TPT_THREHOLD 0
+#define IWL_AGG_LOAD_THRESHOLD 10
+#define IWL_AGG_ALL_TID 0xff
+#define TID_QUEUE_CELL_SPACING 50 /*mS */
+#define TID_QUEUE_MAX_SIZE 20
+#define TID_ROUND_VALUE 5 /* mS */
+#define TID_MAX_LOAD_COUNT 8
+
+#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
+#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
+
+extern const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
+
+enum iwl_table_type {
+ LQ_NONE,
+ LQ_G, /* legacy types */
+ LQ_A,
+ LQ_SISO, /* high-throughput types */
+ LQ_MIMO2,
+ LQ_MAX,
+};
+
+#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
+#define is_siso(tbl) ((tbl) == LQ_SISO)
+#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
+#define is_mimo(tbl) (is_mimo2(tbl))
+#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
+#define is_a_band(tbl) ((tbl) == LQ_A)
+#define is_g_and(tbl) ((tbl) == LQ_G)
+
+#define ANT_NONE 0x0
+#define ANT_A BIT(0)
+#define ANT_B BIT(1)
+#define ANT_AB (ANT_A | ANT_B)
+#define ANT_C BIT(2)
+#define ANT_AC (ANT_A | ANT_C)
+#define ANT_BC (ANT_B | ANT_C)
+#define ANT_ABC (ANT_AB | ANT_C)
+
+#define IWL_MAX_MCS_DISPLAY_SIZE 12
+
+struct iwl_rate_mcs_info {
+ char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
+ char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
+};
+
+/**
+ * struct iwl_rate_scale_data -- tx success history for one rate
+ */
+struct iwl_rate_scale_data {
+ u64 data; /* bitmap of successful frames */
+ s32 success_counter; /* number of frames successful */
+ s32 success_ratio; /* per-cent * 128 */
+ s32 counter; /* number of frames attempted */
+ s32 average_tpt; /* success ratio * expected throughput */
+ unsigned long stamp;
+};
+
+/**
+ * struct iwl_scale_tbl_info -- tx params and success history for all rates
+ *
+ * There are two of these in struct iwl_lq_sta,
+ * one for "active", and one for "search".
+ */
+struct iwl_scale_tbl_info {
+ enum iwl_table_type lq_type;
+ u8 ant_type;
+ u8 is_SGI; /* 1 = short guard interval */
+ u8 is_ht40; /* 1 = 40 MHz channel width */
+ u8 is_dup; /* 1 = duplicated data streams */
+ u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
+ u8 max_search; /* maximun number of tables we can search */
+ s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
+ u32 current_rate; /* rate_n_flags, uCode API format */
+ struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
+};
+
+struct iwl_traffic_load {
+ unsigned long time_stamp; /* age of the oldest statistics */
+ u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
+ * slice */
+ u32 total; /* total num of packets during the
+ * last TID_MAX_TIME_DIFF */
+ u8 queue_count; /* number of queues that has
+ * been used since the last cleanup */
+ u8 head; /* start of the circular buffer */
+};
+
+/**
+ * struct iwl_lq_sta -- driver's rate scaling private structure
+ *
+ * Pointer to this gets passed back and forth between driver and mac80211.
+ */
+struct iwl_lq_sta {
+ u8 active_tbl; /* index of active table, range 0-1 */
+ u8 enable_counter; /* indicates HT mode */
+ u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
+ u8 search_better_tbl; /* 1: currently trying alternate mode */
+ s32 last_tpt;
+
+ /* The following determine when to search for a new mode */
+ u32 table_count_limit;
+ u32 max_failure_limit; /* # failed frames before new search */
+ u32 max_success_limit; /* # successful frames before new search */
+ u32 table_count;
+ u32 total_failed; /* total failed frames, any/all rates */
+ u32 total_success; /* total successful frames, any/all rates */
+ u64 flush_timer; /* time staying in mode before new search */
+
+ u8 action_counter; /* # mode-switch actions tried */
+ u8 is_green;
+ u8 is_dup;
+ enum ieee80211_band band;
+
+ /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
+ u32 supp_rates;
+ u16 active_legacy_rate;
+ u16 active_siso_rate;
+ u16 active_mimo2_rate;
+ s8 max_rate_idx; /* Max rate set by user */
+ u8 missed_rate_counter;
+
+ struct iwl_link_quality_cmd lq;
+ struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
+ struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
+ u8 tx_agg_tid_en;
+#ifdef CONFIG_MAC80211_DEBUGFS
+ struct dentry *rs_sta_dbgfs_scale_table_file;
+ struct dentry *rs_sta_dbgfs_stats_table_file;
+ struct dentry *rs_sta_dbgfs_rate_scale_data_file;
+ struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
+ u32 dbg_fixed_rate;
+#endif
+ struct iwl_priv *drv;
+
+ /* used to be in sta_info */
+ int last_txrate_idx;
+ /* last tx rate_n_flags */
+ u32 last_rate_n_flags;
+ /* packets destined for this STA are aggregated */
+ u8 is_agg;
+};
+
+static inline u8 iwl4965_num_of_ant(u8 mask)
+{
+ return !!((mask) & ANT_A) +
+ !!((mask) & ANT_B) +
+ !!((mask) & ANT_C);
+}
+
+static inline u8 iwl4965_first_antenna(u8 mask)
+{
+ if (mask & ANT_A)
+ return ANT_A;
+ if (mask & ANT_B)
+ return ANT_B;
+ return ANT_C;
+}
+
+
+/**
+ * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
+ *
+ * The specific throughput table used is based on the type of network
+ * the associated with, including A, B, G, and G w/ TGG protection
+ */
+extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
+
+/* Initialize station's rate scaling information after adding station */
+extern void iwl4965_rs_rate_init(struct iwl_priv *priv,
+ struct ieee80211_sta *sta, u8 sta_id);
+extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
+ struct ieee80211_sta *sta, u8 sta_id);
+
+/**
+ * iwl_rate_control_register - Register the rate control algorithm callbacks
+ *
+ * Since the rate control algorithm is hardware specific, there is no need
+ * or reason to place it as a stand alone module. The driver can call
+ * iwl_rate_control_register in order to register the rate control callbacks
+ * with the mac80211 subsystem. This should be performed prior to calling
+ * ieee80211_register_hw
+ *
+ */
+extern int iwl4965_rate_control_register(void);
+extern int iwl3945_rate_control_register(void);
+
+/**
+ * iwl_rate_control_unregister - Unregister the rate control callbacks
+ *
+ * This should be called after calling ieee80211_unregister_hw, but before
+ * the driver is unloaded.
+ */
+extern void iwl4965_rate_control_unregister(void);
+extern void iwl3945_rate_control_unregister(void);
+
+#endif /* __iwl_legacy_rs__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.c b/drivers/net/wireless/iwlegacy/iwl-power.c
new file mode 100644
index 000000000000..903ef0d6d6cb
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-power.c
@@ -0,0 +1,165 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <net/mac80211.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-commands.h"
+#include "iwl-debug.h"
+#include "iwl-power.h"
+
+/*
+ * Setting power level allows the card to go to sleep when not busy.
+ *
+ * We calculate a sleep command based on the required latency, which
+ * we get from mac80211. In order to handle thermal throttling, we can
+ * also use pre-defined power levels.
+ */
+
+/*
+ * This defines the old power levels. They are still used by default
+ * (level 1) and for thermal throttle (levels 3 through 5)
+ */
+
+struct iwl_power_vec_entry {
+ struct iwl_powertable_cmd cmd;
+ u8 no_dtim; /* number of skip dtim */
+};
+
+static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv,
+ struct iwl_powertable_cmd *cmd)
+{
+ memset(cmd, 0, sizeof(*cmd));
+
+ if (priv->power_data.pci_pm)
+ cmd->flags |= IWL_POWER_PCI_PM_MSK;
+
+ IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
+}
+
+static int
+iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
+{
+ IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
+ IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
+ IWL_DEBUG_POWER(priv, "Tx timeout = %u\n",
+ le32_to_cpu(cmd->tx_data_timeout));
+ IWL_DEBUG_POWER(priv, "Rx timeout = %u\n",
+ le32_to_cpu(cmd->rx_data_timeout));
+ IWL_DEBUG_POWER(priv,
+ "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
+ le32_to_cpu(cmd->sleep_interval[0]),
+ le32_to_cpu(cmd->sleep_interval[1]),
+ le32_to_cpu(cmd->sleep_interval[2]),
+ le32_to_cpu(cmd->sleep_interval[3]),
+ le32_to_cpu(cmd->sleep_interval[4]));
+
+ return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD,
+ sizeof(struct iwl_powertable_cmd), cmd);
+}
+
+int
+iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
+ bool force)
+{
+ int ret;
+ bool update_chains;
+
+ lockdep_assert_held(&priv->mutex);
+
+ /* Don't update the RX chain when chain noise calibration is running */
+ update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
+ priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
+
+ if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
+ return 0;
+
+ if (!iwl_legacy_is_ready_rf(priv))
+ return -EIO;
+
+ /* scan complete use sleep_power_next, need to be updated */
+ memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
+ if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
+ IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
+ return 0;
+ }
+
+ if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
+ set_bit(STATUS_POWER_PMI, &priv->status);
+
+ ret = iwl_legacy_set_power(priv, cmd);
+ if (!ret) {
+ if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
+ clear_bit(STATUS_POWER_PMI, &priv->status);
+
+ if (priv->cfg->ops->lib->update_chain_flags && update_chains)
+ priv->cfg->ops->lib->update_chain_flags(priv);
+ else if (priv->cfg->ops->lib->update_chain_flags)
+ IWL_DEBUG_POWER(priv,
+ "Cannot update the power, chain noise "
+ "calibration running: %d\n",
+ priv->chain_noise_data.state);
+
+ memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
+ } else
+ IWL_ERR(priv, "set power fail, ret = %d", ret);
+
+ return ret;
+}
+
+int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force)
+{
+ struct iwl_powertable_cmd cmd;
+
+ iwl_legacy_power_sleep_cam_cmd(priv, &cmd);
+ return iwl_legacy_power_set_mode(priv, &cmd, force);
+}
+EXPORT_SYMBOL(iwl_legacy_power_update_mode);
+
+/* initialize to default */
+void iwl_legacy_power_initialize(struct iwl_priv *priv)
+{
+ u16 lctl = iwl_legacy_pcie_link_ctl(priv);
+
+ priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
+
+ priv->power_data.debug_sleep_level_override = -1;
+
+ memset(&priv->power_data.sleep_cmd, 0,
+ sizeof(priv->power_data.sleep_cmd));
+}
+EXPORT_SYMBOL(iwl_legacy_power_initialize);
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.h b/drivers/net/wireless/iwlegacy/iwl-power.h
new file mode 100644
index 000000000000..d30b36acdc4a
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-power.h
@@ -0,0 +1,55 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+#ifndef __iwl_legacy_power_setting_h__
+#define __iwl_legacy_power_setting_h__
+
+#include "iwl-commands.h"
+
+enum iwl_power_level {
+ IWL_POWER_INDEX_1,
+ IWL_POWER_INDEX_2,
+ IWL_POWER_INDEX_3,
+ IWL_POWER_INDEX_4,
+ IWL_POWER_INDEX_5,
+ IWL_POWER_NUM
+};
+
+struct iwl_power_mgr {
+ struct iwl_powertable_cmd sleep_cmd;
+ struct iwl_powertable_cmd sleep_cmd_next;
+ int debug_sleep_level_override;
+ bool pci_pm;
+};
+
+int
+iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
+ bool force);
+int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force);
+void iwl_legacy_power_initialize(struct iwl_priv *priv);
+
+#endif /* __iwl_legacy_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-prph.h b/drivers/net/wireless/iwlegacy/iwl-prph.h
new file mode 100644
index 000000000000..30a493003ab0
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-prph.h
@@ -0,0 +1,523 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_prph_h__
+#define __iwl_legacy_prph_h__
+
+/*
+ * Registers in this file are internal, not PCI bus memory mapped.
+ * Driver accesses these via HBUS_TARG_PRPH_* registers.
+ */
+#define PRPH_BASE (0x00000)
+#define PRPH_END (0xFFFFF)
+
+/* APMG (power management) constants */
+#define APMG_BASE (PRPH_BASE + 0x3000)
+#define APMG_CLK_CTRL_REG (APMG_BASE + 0x0000)
+#define APMG_CLK_EN_REG (APMG_BASE + 0x0004)
+#define APMG_CLK_DIS_REG (APMG_BASE + 0x0008)
+#define APMG_PS_CTRL_REG (APMG_BASE + 0x000c)
+#define APMG_PCIDEV_STT_REG (APMG_BASE + 0x0010)
+#define APMG_RFKILL_REG (APMG_BASE + 0x0014)
+#define APMG_RTC_INT_STT_REG (APMG_BASE + 0x001c)
+#define APMG_RTC_INT_MSK_REG (APMG_BASE + 0x0020)
+#define APMG_DIGITAL_SVR_REG (APMG_BASE + 0x0058)
+#define APMG_ANALOG_SVR_REG (APMG_BASE + 0x006C)
+
+#define APMS_CLK_VAL_MRB_FUNC_MODE (0x00000001)
+#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200)
+#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800)
+
+#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000)
+#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
+#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
+#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
+#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
+#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000)
+#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
+#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
+
+#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
+
+/**
+ * BSM (Bootstrap State Machine)
+ *
+ * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
+ * in special SRAM that does not power down when the embedded control
+ * processor is sleeping (e.g. for periodic power-saving shutdowns of radio).
+ *
+ * When powering back up after sleeps (or during initial uCode load), the BSM
+ * internally loads the short bootstrap program from the special SRAM into the
+ * embedded processor's instruction SRAM, and starts the processor so it runs
+ * the bootstrap program.
+ *
+ * This bootstrap program loads (via PCI busmaster DMA) instructions and data
+ * images for a uCode program from host DRAM locations. The host driver
+ * indicates DRAM locations and sizes for instruction and data images via the
+ * four BSM_DRAM_* registers. Once the bootstrap program loads the new program,
+ * the new program starts automatically.
+ *
+ * The uCode used for open-source drivers includes two programs:
+ *
+ * 1) Initialization -- performs hardware calibration and sets up some
+ * internal data, then notifies host via "initialize alive" notification
+ * (struct iwl_init_alive_resp) that it has completed all of its work.
+ * After signal from host, it then loads and starts the runtime program.
+ * The initialization program must be used when initially setting up the
+ * NIC after loading the driver.
+ *
+ * 2) Runtime/Protocol -- performs all normal runtime operations. This
+ * notifies host via "alive" notification (struct iwl_alive_resp) that it
+ * is ready to be used.
+ *
+ * When initializing the NIC, the host driver does the following procedure:
+ *
+ * 1) Load bootstrap program (instructions only, no data image for bootstrap)
+ * into bootstrap memory. Use dword writes starting at BSM_SRAM_LOWER_BOUND
+ *
+ * 2) Point (via BSM_DRAM_*) to the "initialize" uCode data and instruction
+ * images in host DRAM.
+ *
+ * 3) Set up BSM to copy from BSM SRAM into uCode instruction SRAM when asked:
+ * BSM_WR_MEM_SRC_REG = 0
+ * BSM_WR_MEM_DST_REG = RTC_INST_LOWER_BOUND
+ * BSM_WR_MEM_DWCOUNT_REG = # dwords in bootstrap instruction image
+ *
+ * 4) Load bootstrap into instruction SRAM:
+ * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START
+ *
+ * 5) Wait for load completion:
+ * Poll BSM_WR_CTRL_REG for BSM_WR_CTRL_REG_BIT_START = 0
+ *
+ * 6) Enable future boot loads whenever NIC's power management triggers it:
+ * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START_EN
+ *
+ * 7) Start the NIC by removing all reset bits:
+ * CSR_RESET = 0
+ *
+ * The bootstrap uCode (already in instruction SRAM) loads initialization
+ * uCode. Initialization uCode performs data initialization, sends
+ * "initialize alive" notification to host, and waits for a signal from
+ * host to load runtime code.
+ *
+ * 4) Point (via BSM_DRAM_*) to the "runtime" uCode data and instruction
+ * images in host DRAM. The last register loaded must be the instruction
+ * byte count register ("1" in MSbit tells initialization uCode to load
+ * the runtime uCode):
+ * BSM_DRAM_INST_BYTECOUNT_REG = byte count | BSM_DRAM_INST_LOAD
+ *
+ * 5) Wait for "alive" notification, then issue normal runtime commands.
+ *
+ * Data caching during power-downs:
+ *
+ * Just before the embedded controller powers down (e.g for automatic
+ * power-saving modes, or for RFKILL), uCode stores (via PCI busmaster DMA)
+ * a current snapshot of the embedded processor's data SRAM into host DRAM.
+ * This caches the data while the embedded processor's memory is powered down.
+ * Location and size are controlled by BSM_DRAM_DATA_* registers.
+ *
+ * NOTE: Instruction SRAM does not need to be saved, since that doesn't
+ * change during operation; the original image (from uCode distribution
+ * file) can be used for reload.
+ *
+ * When powering back up, the BSM loads the bootstrap program. Bootstrap looks
+ * at the BSM_DRAM_* registers, which now point to the runtime instruction
+ * image and the cached (modified) runtime data (*not* the initialization
+ * uCode). Bootstrap reloads these runtime images into SRAM, and restarts the
+ * uCode from where it left off before the power-down.
+ *
+ * NOTE: Initialization uCode does *not* run as part of the save/restore
+ * procedure.
+ *
+ * This save/restore method is mostly for autonomous power management during
+ * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and
+ * RFKILL should use complete restarts (with total re-initialization) of uCode,
+ * allowing total shutdown (including BSM memory).
+ *
+ * Note that, during normal operation, the host DRAM that held the initial
+ * startup data for the runtime code is now being used as a backup data cache
+ * for modified data! If you need to completely re-initialize the NIC, make
+ * sure that you use the runtime data image from the uCode distribution file,
+ * not the modified/saved runtime data. You may want to store a separate
+ * "clean" runtime data image in DRAM to avoid disk reads of distribution file.
+ */
+
+/* BSM bit fields */
+#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */
+#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/
+#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */
+
+/* BSM addresses */
+#define BSM_BASE (PRPH_BASE + 0x3400)
+#define BSM_END (PRPH_BASE + 0x3800)
+
+#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */
+#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */
+#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */
+#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */
+#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */
+
+/*
+ * Pointers and size regs for bootstrap load and data SRAM save/restore.
+ * NOTE: 3945 pointers use bits 31:0 of DRAM address.
+ * 4965 pointers use bits 35:4 of DRAM address.
+ */
+#define BSM_DRAM_INST_PTR_REG (BSM_BASE + 0x090)
+#define BSM_DRAM_INST_BYTECOUNT_REG (BSM_BASE + 0x094)
+#define BSM_DRAM_DATA_PTR_REG (BSM_BASE + 0x098)
+#define BSM_DRAM_DATA_BYTECOUNT_REG (BSM_BASE + 0x09C)
+
+/*
+ * BSM special memory, stays powered on during power-save sleeps.
+ * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1)
+ */
+#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800)
+#define BSM_SRAM_SIZE (1024) /* bytes */
+
+
+/* 3945 Tx scheduler registers */
+#define ALM_SCD_BASE (PRPH_BASE + 0x2E00)
+#define ALM_SCD_MODE_REG (ALM_SCD_BASE + 0x000)
+#define ALM_SCD_ARASTAT_REG (ALM_SCD_BASE + 0x004)
+#define ALM_SCD_TXFACT_REG (ALM_SCD_BASE + 0x010)
+#define ALM_SCD_TXF4MF_REG (ALM_SCD_BASE + 0x014)
+#define ALM_SCD_TXF5MF_REG (ALM_SCD_BASE + 0x020)
+#define ALM_SCD_SBYP_MODE_1_REG (ALM_SCD_BASE + 0x02C)
+#define ALM_SCD_SBYP_MODE_2_REG (ALM_SCD_BASE + 0x030)
+
+/**
+ * Tx Scheduler
+ *
+ * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs
+ * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in
+ * host DRAM. It steers each frame's Tx command (which contains the frame
+ * data) into one of up to 7 prioritized Tx DMA FIFO channels within the
+ * device. A queue maps to only one (selectable by driver) Tx DMA channel,
+ * but one DMA channel may take input from several queues.
+ *
+ * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows
+ * (cf. default_queue_to_tx_fifo in iwl-4965.c):
+ *
+ * 0 -- EDCA BK (background) frames, lowest priority
+ * 1 -- EDCA BE (best effort) frames, normal priority
+ * 2 -- EDCA VI (video) frames, higher priority
+ * 3 -- EDCA VO (voice) and management frames, highest priority
+ * 4 -- Commands (e.g. RXON, etc.)
+ * 5 -- unused (HCCA)
+ * 6 -- unused (HCCA)
+ * 7 -- not used by driver (device-internal only)
+ *
+ *
+ * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
+ * In addition, driver can map the remaining queues to Tx DMA/FIFO
+ * channels 0-3 to support 11n aggregation via EDCA DMA channels.
+ *
+ * The driver sets up each queue to work in one of two modes:
+ *
+ * 1) Scheduler-Ack, in which the scheduler automatically supports a
+ * block-ack (BA) window of up to 64 TFDs. In this mode, each queue
+ * contains TFDs for a unique combination of Recipient Address (RA)
+ * and Traffic Identifier (TID), that is, traffic of a given
+ * Quality-Of-Service (QOS) priority, destined for a single station.
+ *
+ * In scheduler-ack mode, the scheduler keeps track of the Tx status of
+ * each frame within the BA window, including whether it's been transmitted,
+ * and whether it's been acknowledged by the receiving station. The device
+ * automatically processes block-acks received from the receiving STA,
+ * and reschedules un-acked frames to be retransmitted (successful
+ * Tx completion may end up being out-of-order).
+ *
+ * The driver must maintain the queue's Byte Count table in host DRAM
+ * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode.
+ * This mode does not support fragmentation.
+ *
+ * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
+ * The device may automatically retry Tx, but will retry only one frame
+ * at a time, until receiving ACK from receiving station, or reaching
+ * retry limit and giving up.
+ *
+ * The command queue (#4/#9) must use this mode!
+ * This mode does not require use of the Byte Count table in host DRAM.
+ *
+ * Driver controls scheduler operation via 3 means:
+ * 1) Scheduler registers
+ * 2) Shared scheduler data base in internal 4956 SRAM
+ * 3) Shared data in host DRAM
+ *
+ * Initialization:
+ *
+ * When loading, driver should allocate memory for:
+ * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs.
+ * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory
+ * (1024 bytes for each queue).
+ *
+ * After receiving "Alive" response from uCode, driver must initialize
+ * the scheduler (especially for queue #4/#9, the command queue, otherwise
+ * the driver can't issue commands!):
+ */
+
+/**
+ * Max Tx window size is the max number of contiguous TFDs that the scheduler
+ * can keep track of at one time when creating block-ack chains of frames.
+ * Note that "64" matches the number of ack bits in a block-ack packet.
+ * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
+ * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
+ */
+#define SCD_WIN_SIZE 64
+#define SCD_FRAME_LIMIT 64
+
+/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
+#define IWL49_SCD_START_OFFSET 0xa02c00
+
+/*
+ * 4965 tells driver SRAM address for internal scheduler structs via this reg.
+ * Value is valid only after "Alive" response from uCode.
+ */
+#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0)
+
+/*
+ * Driver may need to update queue-empty bits after changing queue's
+ * write and read pointers (indexes) during (re-)initialization (i.e. when
+ * scheduler is not tracking what's happening).
+ * Bit fields:
+ * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit
+ * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty
+ * NOTE: This register is not used by Linux driver.
+ */
+#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4)
+
+/*
+ * Physical base address of array of byte count (BC) circular buffers (CBs).
+ * Each Tx queue has a BC CB in host DRAM to support Scheduler-ACK mode.
+ * This register points to BC CB for queue 0, must be on 1024-byte boundary.
+ * Others are spaced by 1024 bytes.
+ * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
+ * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff).
+ * Bit fields:
+ * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned.
+ */
+#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10)
+
+/*
+ * Enables any/all Tx DMA/FIFO channels.
+ * Scheduler generates requests for only the active channels.
+ * Set this to 0xff to enable all 8 channels (normal usage).
+ * Bit fields:
+ * 7- 0: Enable (1), disable (0), one bit for each channel 0-7
+ */
+#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c)
+/*
+ * Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
+ * Initialized and updated by driver as new TFDs are added to queue.
+ * NOTE: If using Block Ack, index must correspond to frame's
+ * Start Sequence Number; index = (SSN & 0xff)
+ * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
+ */
+#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4)
+
+/*
+ * Queue (x) Read Pointers (indexes, really!), one for each Tx queue.
+ * For FIFO mode, index indicates next frame to transmit.
+ * For Scheduler-ACK mode, index indicates first frame in Tx window.
+ * Initialized by driver, updated by scheduler.
+ */
+#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4)
+
+/*
+ * Select which queues work in chain mode (1) vs. not (0).
+ * Use chain mode to build chains of aggregated frames.
+ * Bit fields:
+ * 31-16: Reserved
+ * 15-00: Mode, one bit for each queue -- 1: Chain mode, 0: one-at-a-time
+ * NOTE: If driver sets up queue for chain mode, it should be also set up
+ * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
+ */
+#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0)
+
+/*
+ * Select which queues interrupt driver when scheduler increments
+ * a queue's read pointer (index).
+ * Bit fields:
+ * 31-16: Reserved
+ * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
+ * NOTE: This functionality is apparently a no-op; driver relies on interrupts
+ * from Rx queue to read Tx command responses and update Tx queues.
+ */
+#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4)
+
+/*
+ * Queue search status registers. One for each queue.
+ * Sets up queue mode and assigns queue to Tx DMA channel.
+ * Bit fields:
+ * 19-10: Write mask/enable bits for bits 0-9
+ * 9: Driver should init to "0"
+ * 8: Scheduler-ACK mode (1), non-Scheduler-ACK (i.e. FIFO) mode (0).
+ * Driver should init to "1" for aggregation mode, or "0" otherwise.
+ * 7-6: Driver should init to "0"
+ * 5: Window Size Left; indicates whether scheduler can request
+ * another TFD, based on window size, etc. Driver should init
+ * this bit to "1" for aggregation mode, or "0" for non-agg.
+ * 4-1: Tx FIFO to use (range 0-7).
+ * 0: Queue is active (1), not active (0).
+ * Other bits should be written as "0"
+ *
+ * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled
+ * via SCD_QUEUECHAIN_SEL.
+ */
+#define IWL49_SCD_QUEUE_STATUS_BITS(x)\
+ (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4)
+
+/* Bit field positions */
+#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
+#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
+#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
+#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
+
+/* Write masks */
+#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
+#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
+
+/**
+ * 4965 internal SRAM structures for scheduler, shared with driver ...
+ *
+ * Driver should clear and initialize the following areas after receiving
+ * "Alive" response from 4965 uCode, i.e. after initial
+ * uCode load, or after a uCode load done for error recovery:
+ *
+ * SCD_CONTEXT_DATA_OFFSET (size 128 bytes)
+ * SCD_TX_STTS_BITMAP_OFFSET (size 256 bytes)
+ * SCD_TRANSLATE_TBL_OFFSET (size 32 bytes)
+ *
+ * Driver accesses SRAM via HBUS_TARG_MEM_* registers.
+ * Driver reads base address of this scheduler area from SCD_SRAM_BASE_ADDR.
+ * All OFFSET values must be added to this base address.
+ */
+
+/*
+ * Queue context. One 8-byte entry for each of 16 queues.
+ *
+ * Driver should clear this entire area (size 0x80) to 0 after receiving
+ * "Alive" notification from uCode. Additionally, driver should init
+ * each queue's entry as follows:
+ *
+ * LS Dword bit fields:
+ * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64.
+ *
+ * MS Dword bit fields:
+ * 16-22: Frame limit. Driver should init to 10 (0xa).
+ *
+ * Driver should init all other bits to 0.
+ *
+ * Init must be done after driver receives "Alive" response from 4965 uCode,
+ * and when setting up queue for aggregation.
+ */
+#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380
+#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
+ (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
+
+#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
+#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
+#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
+#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
+
+/*
+ * Tx Status Bitmap
+ *
+ * Driver should clear this entire area (size 0x100) to 0 after receiving
+ * "Alive" notification from uCode. Area is used only by device itself;
+ * no other support (besides clearing) is required from driver.
+ */
+#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
+
+/*
+ * RAxTID to queue translation mapping.
+ *
+ * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
+ * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
+ * one QOS priority level destined for one station (for this wireless link,
+ * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit
+ * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK
+ * mode, the device ignores the mapping value.
+ *
+ * Bit fields, for each 16-bit map:
+ * 15-9: Reserved, set to 0
+ * 8-4: Index into device's station table for recipient station
+ * 3-0: Traffic ID (tid), range 0-15
+ *
+ * Driver should clear this entire area (size 32 bytes) to 0 after receiving
+ * "Alive" notification from uCode. To update a 16-bit map value, driver
+ * must read a dword-aligned value from device SRAM, replace the 16-bit map
+ * value of interest, and write the dword value back into device SRAM.
+ */
+#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500
+
+/* Find translation table dword to read/write for given queue */
+#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
+ ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
+
+#define IWL_SCD_TXFIFO_POS_TID (0)
+#define IWL_SCD_TXFIFO_POS_RA (4)
+#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
+
+/*********************** END TX SCHEDULER *************************************/
+
+#endif /* __iwl_legacy_prph_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-rx.c b/drivers/net/wireless/iwlegacy/iwl-rx.c
new file mode 100644
index 000000000000..654cf233a384
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-rx.c
@@ -0,0 +1,302 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+#include <asm/unaligned.h>
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+/************************** RX-FUNCTIONS ****************************/
+/*
+ * Rx theory of operation
+ *
+ * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
+ * each of which point to Receive Buffers to be filled by the NIC. These get
+ * used not only for Rx frames, but for any command response or notification
+ * from the NIC. The driver and NIC manage the Rx buffers by means
+ * of indexes into the circular buffer.
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two index registers for managing the Rx buffers.
+ *
+ * The READ index maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ index is managed by the firmware once the card is enabled.
+ *
+ * The WRITE index maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * INDEX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ index
+ * and fire the RX interrupt. The driver can then query the READ index and
+ * process as many packets as possible, moving the WRITE index forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
+ * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ * to replenish the iwl->rxq->rx_free.
+ * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
+ * iwl->rxq is replenished and the READ INDEX is updated (updating the
+ * 'processed' and 'read' driver indexes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ * detached from the iwl->rxq. The driver 'processed' index is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
+ * were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * iwl_legacy_rx_queue_alloc() Allocates rx_free
+ * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
+ * iwl_rx_queue_restock
+ * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
+ * queue, updates firmware pointers, and updates
+ * the WRITE index. If insufficient rx_free buffers
+ * are available, schedules iwl_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
+ * READ INDEX, detaching the SKB from the pool.
+ * Moves the packet buffer from queue to rx_used.
+ * Calls iwl_rx_queue_restock to refill any empty
+ * slots.
+ * ...
+ *
+ */
+
+/**
+ * iwl_legacy_rx_queue_space - Return number of free slots available in queue.
+ */
+int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q)
+{
+ int s = q->read - q->write;
+ if (s <= 0)
+ s += RX_QUEUE_SIZE;
+ /* keep some buffer to not confuse full and empty queue */
+ s -= 2;
+ if (s < 0)
+ s = 0;
+ return s;
+}
+EXPORT_SYMBOL(iwl_legacy_rx_queue_space);
+
+/**
+ * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue
+ */
+void
+iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
+ struct iwl_rx_queue *q)
+{
+ unsigned long flags;
+ u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
+ u32 reg;
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ if (q->need_update == 0)
+ goto exit_unlock;
+
+ /* If power-saving is in use, make sure device is awake */
+ if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+ reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ IWL_DEBUG_INFO(priv,
+ "Rx queue requesting wakeup,"
+ " GP1 = 0x%x\n", reg);
+ iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ goto exit_unlock;
+ }
+
+ q->write_actual = (q->write & ~0x7);
+ iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
+ q->write_actual);
+
+ /* Else device is assumed to be awake */
+ } else {
+ /* Device expects a multiple of 8 */
+ q->write_actual = (q->write & ~0x7);
+ iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
+ q->write_actual);
+ }
+
+ q->need_update = 0;
+
+ exit_unlock:
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr);
+
+int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv)
+{
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ struct device *dev = &priv->pci_dev->dev;
+ int i;
+
+ spin_lock_init(&rxq->lock);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+
+ /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
+ rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
+ GFP_KERNEL);
+ if (!rxq->bd)
+ goto err_bd;
+
+ rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
+ &rxq->rb_stts_dma, GFP_KERNEL);
+ if (!rxq->rb_stts)
+ goto err_rb;
+
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ rxq->need_update = 0;
+ return 0;
+
+err_rb:
+ dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->bd_dma);
+err_bd:
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc);
+
+
+void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
+
+ if (!report->state) {
+ IWL_DEBUG_11H(priv,
+ "Spectrum Measure Notification: Start\n");
+ return;
+ }
+
+ memcpy(&priv->measure_report, report, sizeof(*report));
+ priv->measurement_status |= MEASUREMENT_READY;
+}
+EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif);
+
+void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt)
+{
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+ if (iwl_legacy_is_any_associated(priv)) {
+ if (priv->cfg->ops->lib->check_plcp_health) {
+ if (!priv->cfg->ops->lib->check_plcp_health(
+ priv, pkt)) {
+ /*
+ * high plcp error detected
+ * reset Radio
+ */
+ iwl_legacy_force_reset(priv,
+ IWL_RF_RESET, false);
+ }
+ }
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_recover_from_statistics);
+
+/*
+ * returns non-zero if packet should be dropped
+ */
+int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
+ struct ieee80211_hdr *hdr,
+ u32 decrypt_res,
+ struct ieee80211_rx_status *stats)
+{
+ u16 fc = le16_to_cpu(hdr->frame_control);
+
+ /*
+ * All contexts have the same setting here due to it being
+ * a module parameter, so OK to check any context.
+ */
+ if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
+ RXON_FILTER_DIS_DECRYPT_MSK)
+ return 0;
+
+ if (!(fc & IEEE80211_FCTL_PROTECTED))
+ return 0;
+
+ IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
+ switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
+ case RX_RES_STATUS_SEC_TYPE_TKIP:
+ /* The uCode has got a bad phase 1 Key, pushes the packet.
+ * Decryption will be done in SW. */
+ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+ RX_RES_STATUS_BAD_KEY_TTAK)
+ break;
+
+ case RX_RES_STATUS_SEC_TYPE_WEP:
+ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+ RX_RES_STATUS_BAD_ICV_MIC) {
+ /* bad ICV, the packet is destroyed since the
+ * decryption is inplace, drop it */
+ IWL_DEBUG_RX(priv, "Packet destroyed\n");
+ return -1;
+ }
+ case RX_RES_STATUS_SEC_TYPE_CCMP:
+ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+ RX_RES_STATUS_DECRYPT_OK) {
+ IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
+ stats->flag |= RX_FLAG_DECRYPTED;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag);
diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c
new file mode 100644
index 000000000000..60f597f796ca
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-scan.c
@@ -0,0 +1,625 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+
+/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
+ * sending probe req. This should be set long enough to hear probe responses
+ * from more than one AP. */
+#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
+#define IWL_ACTIVE_DWELL_TIME_52 (20)
+
+#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
+#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
+
+/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
+ * Must be set longer than active dwell time.
+ * For the most reliable scan, set > AP beacon interval (typically 100msec). */
+#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
+#define IWL_PASSIVE_DWELL_TIME_52 (10)
+#define IWL_PASSIVE_DWELL_BASE (100)
+#define IWL_CHANNEL_TUNE_TIME 5
+
+static int iwl_legacy_send_scan_abort(struct iwl_priv *priv)
+{
+ int ret;
+ struct iwl_rx_packet *pkt;
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_SCAN_ABORT_CMD,
+ .flags = CMD_WANT_SKB,
+ };
+
+ /* Exit instantly with error when device is not ready
+ * to receive scan abort command or it does not perform
+ * hardware scan currently */
+ if (!test_bit(STATUS_READY, &priv->status) ||
+ !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
+ !test_bit(STATUS_SCAN_HW, &priv->status) ||
+ test_bit(STATUS_FW_ERROR, &priv->status) ||
+ test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return -EIO;
+
+ ret = iwl_legacy_send_cmd_sync(priv, &cmd);
+ if (ret)
+ return ret;
+
+ pkt = (struct iwl_rx_packet *)cmd.reply_page;
+ if (pkt->u.status != CAN_ABORT_STATUS) {
+ /* The scan abort will return 1 for success or
+ * 2 for "failure". A failure condition can be
+ * due to simply not being in an active scan which
+ * can occur if we send the scan abort before we
+ * the microcode has notified us that a scan is
+ * completed. */
+ IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
+ ret = -EIO;
+ }
+
+ iwl_legacy_free_pages(priv, cmd.reply_page);
+ return ret;
+}
+
+static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted)
+{
+ /* check if scan was requested from mac80211 */
+ if (priv->scan_request) {
+ IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
+ ieee80211_scan_completed(priv->hw, aborted);
+ }
+
+ priv->is_internal_short_scan = false;
+ priv->scan_vif = NULL;
+ priv->scan_request = NULL;
+}
+
+void iwl_legacy_force_scan_end(struct iwl_priv *priv)
+{
+ lockdep_assert_held(&priv->mutex);
+
+ if (!test_bit(STATUS_SCANNING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
+ return;
+ }
+
+ IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
+ clear_bit(STATUS_SCANNING, &priv->status);
+ clear_bit(STATUS_SCAN_HW, &priv->status);
+ clear_bit(STATUS_SCAN_ABORTING, &priv->status);
+ iwl_legacy_complete_scan(priv, true);
+}
+
+static void iwl_legacy_do_scan_abort(struct iwl_priv *priv)
+{
+ int ret;
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (!test_bit(STATUS_SCANNING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
+ return;
+ }
+
+ if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
+ return;
+ }
+
+ ret = iwl_legacy_send_scan_abort(priv);
+ if (ret) {
+ IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
+ iwl_legacy_force_scan_end(priv);
+ } else
+ IWL_DEBUG_SCAN(priv, "Sucessfully send scan abort\n");
+}
+
+/**
+ * iwl_scan_cancel - Cancel any currently executing HW scan
+ */
+int iwl_legacy_scan_cancel(struct iwl_priv *priv)
+{
+ IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
+ queue_work(priv->workqueue, &priv->abort_scan);
+ return 0;
+}
+EXPORT_SYMBOL(iwl_legacy_scan_cancel);
+
+/**
+ * iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan
+ * @ms: amount of time to wait (in milliseconds) for scan to abort
+ *
+ */
+int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(ms);
+
+ lockdep_assert_held(&priv->mutex);
+
+ IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
+
+ iwl_legacy_do_scan_abort(priv);
+
+ while (time_before_eq(jiffies, timeout)) {
+ if (!test_bit(STATUS_SCAN_HW, &priv->status))
+ break;
+ msleep(20);
+ }
+
+ return test_bit(STATUS_SCAN_HW, &priv->status);
+}
+EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout);
+
+/* Service response to REPLY_SCAN_CMD (0x80) */
+static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_scanreq_notification *notif =
+ (struct iwl_scanreq_notification *)pkt->u.raw;
+
+ IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
+#endif
+}
+
+/* Service SCAN_START_NOTIFICATION (0x82) */
+static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_scanstart_notification *notif =
+ (struct iwl_scanstart_notification *)pkt->u.raw;
+ priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
+ IWL_DEBUG_SCAN(priv, "Scan start: "
+ "%d [802.11%s] "
+ "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
+ notif->channel,
+ notif->band ? "bg" : "a",
+ le32_to_cpu(notif->tsf_high),
+ le32_to_cpu(notif->tsf_low),
+ notif->status, notif->beacon_timer);
+}
+
+/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
+static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_scanresults_notification *notif =
+ (struct iwl_scanresults_notification *)pkt->u.raw;
+
+ IWL_DEBUG_SCAN(priv, "Scan ch.res: "
+ "%d [802.11%s] "
+ "(TSF: 0x%08X:%08X) - %d "
+ "elapsed=%lu usec\n",
+ notif->channel,
+ notif->band ? "bg" : "a",
+ le32_to_cpu(notif->tsf_high),
+ le32_to_cpu(notif->tsf_low),
+ le32_to_cpu(notif->statistics[0]),
+ le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
+#endif
+}
+
+/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
+static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
+#endif
+
+ IWL_DEBUG_SCAN(priv,
+ "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
+ scan_notif->scanned_channels,
+ scan_notif->tsf_low,
+ scan_notif->tsf_high, scan_notif->status);
+
+ /* The HW is no longer scanning */
+ clear_bit(STATUS_SCAN_HW, &priv->status);
+
+ IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
+ (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
+ jiffies_to_msecs(jiffies - priv->scan_start));
+
+ queue_work(priv->workqueue, &priv->scan_completed);
+}
+
+void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv)
+{
+ /* scan handlers */
+ priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan;
+ priv->rx_handlers[SCAN_START_NOTIFICATION] =
+ iwl_legacy_rx_scan_start_notif;
+ priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
+ iwl_legacy_rx_scan_results_notif;
+ priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
+ iwl_legacy_rx_scan_complete_notif;
+}
+EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers);
+
+inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
+ enum ieee80211_band band,
+ u8 n_probes)
+{
+ if (band == IEEE80211_BAND_5GHZ)
+ return IWL_ACTIVE_DWELL_TIME_52 +
+ IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
+ else
+ return IWL_ACTIVE_DWELL_TIME_24 +
+ IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
+}
+EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time);
+
+u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
+ enum ieee80211_band band,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_rxon_context *ctx;
+ u16 passive = (band == IEEE80211_BAND_2GHZ) ?
+ IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
+ IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
+
+ if (iwl_legacy_is_any_associated(priv)) {
+ /*
+ * If we're associated, we clamp the maximum passive
+ * dwell time to be 98% of the smallest beacon interval
+ * (minus 2 * channel tune time)
+ */
+ for_each_context(priv, ctx) {
+ u16 value;
+
+ if (!iwl_legacy_is_associated_ctx(ctx))
+ continue;
+ value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
+ if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
+ value = IWL_PASSIVE_DWELL_BASE;
+ value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
+ passive = min(value, passive);
+ }
+ }
+
+ return passive;
+}
+EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time);
+
+void iwl_legacy_init_scan_params(struct iwl_priv *priv)
+{
+ u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
+ if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
+ priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
+ if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
+ priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
+}
+EXPORT_SYMBOL(iwl_legacy_init_scan_params);
+
+static int __must_check iwl_legacy_scan_initiate(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ bool internal,
+ enum ieee80211_band band)
+{
+ int ret;
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (WARN_ON(!priv->cfg->ops->utils->request_scan))
+ return -EOPNOTSUPP;
+
+ cancel_delayed_work(&priv->scan_check);
+
+ if (!iwl_legacy_is_ready_rf(priv)) {
+ IWL_WARN(priv, "Request scan called when driver not ready.\n");
+ return -EIO;
+ }
+
+ if (test_bit(STATUS_SCAN_HW, &priv->status)) {
+ IWL_DEBUG_SCAN(priv,
+ "Multiple concurrent scan requests in parallel.\n");
+ return -EBUSY;
+ }
+
+ if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
+ return -EBUSY;
+ }
+
+ IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
+ internal ? "internal short " : "");
+
+ set_bit(STATUS_SCANNING, &priv->status);
+ priv->is_internal_short_scan = internal;
+ priv->scan_start = jiffies;
+ priv->scan_band = band;
+
+ ret = priv->cfg->ops->utils->request_scan(priv, vif);
+ if (ret) {
+ clear_bit(STATUS_SCANNING, &priv->status);
+ priv->is_internal_short_scan = false;
+ return ret;
+ }
+
+ queue_delayed_work(priv->workqueue, &priv->scan_check,
+ IWL_SCAN_CHECK_WATCHDOG);
+
+ return 0;
+}
+
+int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
+{
+ struct iwl_priv *priv = hw->priv;
+ int ret;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (req->n_channels == 0)
+ return -EINVAL;
+
+ mutex_lock(&priv->mutex);
+
+ if (test_bit(STATUS_SCANNING, &priv->status) &&
+ !priv->is_internal_short_scan) {
+ IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ /* mac80211 will only ask for one band at a time */
+ priv->scan_request = req;
+ priv->scan_vif = vif;
+
+ /*
+ * If an internal scan is in progress, just set
+ * up the scan_request as per above.
+ */
+ if (priv->is_internal_short_scan) {
+ IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n");
+ ret = 0;
+ } else
+ ret = iwl_legacy_scan_initiate(priv, vif, false,
+ req->channels[0]->band);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+out_unlock:
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_hw_scan);
+
+/*
+ * internal short scan, this function should only been called while associated.
+ * It will reset and tune the radio to prevent possible RF related problem
+ */
+void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv)
+{
+ queue_work(priv->workqueue, &priv->start_internal_scan);
+}
+
+static void iwl_legacy_bg_start_internal_scan(struct work_struct *work)
+{
+ struct iwl_priv *priv =
+ container_of(work, struct iwl_priv, start_internal_scan);
+
+ IWL_DEBUG_SCAN(priv, "Start internal scan\n");
+
+ mutex_lock(&priv->mutex);
+
+ if (priv->is_internal_short_scan == true) {
+ IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
+ goto unlock;
+ }
+
+ if (test_bit(STATUS_SCANNING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
+ goto unlock;
+ }
+
+ if (iwl_legacy_scan_initiate(priv, NULL, true, priv->band))
+ IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
+ unlock:
+ mutex_unlock(&priv->mutex);
+}
+
+static void iwl_legacy_bg_scan_check(struct work_struct *data)
+{
+ struct iwl_priv *priv =
+ container_of(data, struct iwl_priv, scan_check.work);
+
+ IWL_DEBUG_SCAN(priv, "Scan check work\n");
+
+ /* Since we are here firmware does not finish scan and
+ * most likely is in bad shape, so we don't bother to
+ * send abort command, just force scan complete to mac80211 */
+ mutex_lock(&priv->mutex);
+ iwl_legacy_force_scan_end(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+/**
+ * iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request
+ */
+
+u16
+iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
+ const u8 *ta, const u8 *ies, int ie_len, int left)
+{
+ int len = 0;
+ u8 *pos = NULL;
+
+ /* Make sure there is enough space for the probe request,
+ * two mandatory IEs and the data */
+ left -= 24;
+ if (left < 0)
+ return 0;
+
+ frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+ memcpy(frame->da, iwlegacy_bcast_addr, ETH_ALEN);
+ memcpy(frame->sa, ta, ETH_ALEN);
+ memcpy(frame->bssid, iwlegacy_bcast_addr, ETH_ALEN);
+ frame->seq_ctrl = 0;
+
+ len += 24;
+
+ /* ...next IE... */
+ pos = &frame->u.probe_req.variable[0];
+
+ /* fill in our indirect SSID IE */
+ left -= 2;
+ if (left < 0)
+ return 0;
+ *pos++ = WLAN_EID_SSID;
+ *pos++ = 0;
+
+ len += 2;
+
+ if (WARN_ON(left < ie_len))
+ return len;
+
+ if (ies && ie_len) {
+ memcpy(pos, ies, ie_len);
+ len += ie_len;
+ }
+
+ return (u16)len;
+}
+EXPORT_SYMBOL(iwl_legacy_fill_probe_req);
+
+static void iwl_legacy_bg_abort_scan(struct work_struct *work)
+{
+ struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
+
+ IWL_DEBUG_SCAN(priv, "Abort scan work\n");
+
+ /* We keep scan_check work queued in case when firmware will not
+ * report back scan completed notification */
+ mutex_lock(&priv->mutex);
+ iwl_legacy_scan_cancel_timeout(priv, 200);
+ mutex_unlock(&priv->mutex);
+}
+
+static void iwl_legacy_bg_scan_completed(struct work_struct *work)
+{
+ struct iwl_priv *priv =
+ container_of(work, struct iwl_priv, scan_completed);
+ bool aborted;
+
+ IWL_DEBUG_SCAN(priv, "Completed %sscan.\n",
+ priv->is_internal_short_scan ? "internal short " : "");
+
+ cancel_delayed_work(&priv->scan_check);
+
+ mutex_lock(&priv->mutex);
+
+ aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
+ if (aborted)
+ IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
+
+ if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
+ goto out_settings;
+ }
+
+ if (priv->is_internal_short_scan && !aborted) {
+ int err;
+
+ /* Check if mac80211 requested scan during our internal scan */
+ if (priv->scan_request == NULL)
+ goto out_complete;
+
+ /* If so request a new scan */
+ err = iwl_legacy_scan_initiate(priv, priv->scan_vif, false,
+ priv->scan_request->channels[0]->band);
+ if (err) {
+ IWL_DEBUG_SCAN(priv,
+ "failed to initiate pending scan: %d\n", err);
+ aborted = true;
+ goto out_complete;
+ }
+
+ goto out;
+ }
+
+out_complete:
+ iwl_legacy_complete_scan(priv, aborted);
+
+out_settings:
+ /* Can we still talk to firmware ? */
+ if (!iwl_legacy_is_ready_rf(priv))
+ goto out;
+
+ /*
+ * We do not commit power settings while scan is pending,
+ * do it now if the settings changed.
+ */
+ iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next,
+ false);
+ iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
+
+ priv->cfg->ops->utils->post_scan(priv);
+
+out:
+ mutex_unlock(&priv->mutex);
+}
+
+void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv)
+{
+ INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed);
+ INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan);
+ INIT_WORK(&priv->start_internal_scan,
+ iwl_legacy_bg_start_internal_scan);
+ INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check);
+}
+EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work);
+
+void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv)
+{
+ cancel_work_sync(&priv->start_internal_scan);
+ cancel_work_sync(&priv->abort_scan);
+ cancel_work_sync(&priv->scan_completed);
+
+ if (cancel_delayed_work_sync(&priv->scan_check)) {
+ mutex_lock(&priv->mutex);
+ iwl_legacy_force_scan_end(priv);
+ mutex_unlock(&priv->mutex);
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
new file mode 100644
index 000000000000..9f70a4723103
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
@@ -0,0 +1,92 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_legacy_spectrum_h__
+#define __iwl_legacy_spectrum_h__
+enum { /* ieee80211_basic_report.map */
+ IEEE80211_BASIC_MAP_BSS = (1 << 0),
+ IEEE80211_BASIC_MAP_OFDM = (1 << 1),
+ IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2),
+ IEEE80211_BASIC_MAP_RADAR = (1 << 3),
+ IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4),
+ /* Bits 5-7 are reserved */
+
+};
+struct ieee80211_basic_report {
+ u8 channel;
+ __le64 start_time;
+ __le16 duration;
+ u8 map;
+} __packed;
+
+enum { /* ieee80211_measurement_request.mode */
+ /* Bit 0 is reserved */
+ IEEE80211_MEASUREMENT_ENABLE = (1 << 1),
+ IEEE80211_MEASUREMENT_REQUEST = (1 << 2),
+ IEEE80211_MEASUREMENT_REPORT = (1 << 3),
+ /* Bits 4-7 are reserved */
+};
+
+enum {
+ IEEE80211_REPORT_BASIC = 0, /* required */
+ IEEE80211_REPORT_CCA = 1, /* optional */
+ IEEE80211_REPORT_RPI = 2, /* optional */
+ /* 3-255 reserved */
+};
+
+struct ieee80211_measurement_params {
+ u8 channel;
+ __le64 start_time;
+ __le16 duration;
+} __packed;
+
+struct ieee80211_info_element {
+ u8 id;
+ u8 len;
+ u8 data[0];
+} __packed;
+
+struct ieee80211_measurement_request {
+ struct ieee80211_info_element ie;
+ u8 token;
+ u8 mode;
+ u8 type;
+ struct ieee80211_measurement_params params[0];
+} __packed;
+
+struct ieee80211_measurement_report {
+ struct ieee80211_info_element ie;
+ u8 token;
+ u8 mode;
+ u8 type;
+ union {
+ struct ieee80211_basic_report basic[0];
+ } u;
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.c b/drivers/net/wireless/iwlegacy/iwl-sta.c
new file mode 100644
index 000000000000..47c9da3834ea
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-sta.c
@@ -0,0 +1,816 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+#include <linux/lockdep.h>
+
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+
+/* priv->sta_lock must be held */
+static void iwl_legacy_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
+{
+
+ if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
+ IWL_ERR(priv,
+ "ACTIVATE a non DRIVER active station id %u addr %pM\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
+ IWL_DEBUG_ASSOC(priv,
+ "STA id %u addr %pM already present"
+ " in uCode (according to driver)\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+ } else {
+ priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
+ IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+ }
+}
+
+static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv,
+ struct iwl_legacy_addsta_cmd *addsta,
+ struct iwl_rx_packet *pkt,
+ bool sync)
+{
+ u8 sta_id = addsta->sta.sta_id;
+ unsigned long flags;
+ int ret = -EIO;
+
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+ IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
+ pkt->hdr.flags);
+ return ret;
+ }
+
+ IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
+ sta_id);
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+
+ switch (pkt->u.add_sta.status) {
+ case ADD_STA_SUCCESS_MSK:
+ IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
+ iwl_legacy_sta_ucode_activate(priv, sta_id);
+ ret = 0;
+ break;
+ case ADD_STA_NO_ROOM_IN_TABLE:
+ IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
+ sta_id);
+ break;
+ case ADD_STA_NO_BLOCK_ACK_RESOURCE:
+ IWL_ERR(priv,
+ "Adding station %d failed, no block ack resource.\n",
+ sta_id);
+ break;
+ case ADD_STA_MODIFY_NON_EXIST_STA:
+ IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
+ sta_id);
+ break;
+ default:
+ IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
+ pkt->u.add_sta.status);
+ break;
+ }
+
+ IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
+ priv->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+
+ /*
+ * XXX: The MAC address in the command buffer is often changed from
+ * the original sent to the device. That is, the MAC address
+ * written to the command buffer often is not the same MAC adress
+ * read from the command buffer when the command returns. This
+ * issue has not yet been resolved and this debugging is left to
+ * observe the problem.
+ */
+ IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
+ priv->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
+ addsta->sta.addr);
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return ret;
+}
+
+static void iwl_legacy_add_sta_callback(struct iwl_priv *priv,
+ struct iwl_device_cmd *cmd,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_legacy_addsta_cmd *addsta =
+ (struct iwl_legacy_addsta_cmd *)cmd->cmd.payload;
+
+ iwl_legacy_process_add_sta_resp(priv, addsta, pkt, false);
+
+}
+
+int iwl_legacy_send_add_sta(struct iwl_priv *priv,
+ struct iwl_legacy_addsta_cmd *sta, u8 flags)
+{
+ struct iwl_rx_packet *pkt = NULL;
+ int ret = 0;
+ u8 data[sizeof(*sta)];
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_ADD_STA,
+ .flags = flags,
+ .data = data,
+ };
+ u8 sta_id __maybe_unused = sta->sta.sta_id;
+
+ IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
+ sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
+
+ if (flags & CMD_ASYNC)
+ cmd.callback = iwl_legacy_add_sta_callback;
+ else {
+ cmd.flags |= CMD_WANT_SKB;
+ might_sleep();
+ }
+
+ cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
+ ret = iwl_legacy_send_cmd(priv, &cmd);
+
+ if (ret || (flags & CMD_ASYNC))
+ return ret;
+
+ if (ret == 0) {
+ pkt = (struct iwl_rx_packet *)cmd.reply_page;
+ ret = iwl_legacy_process_add_sta_resp(priv, sta, pkt, true);
+ }
+ iwl_legacy_free_pages(priv, cmd.reply_page);
+
+ return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_send_add_sta);
+
+static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
+ struct ieee80211_sta *sta,
+ struct iwl_rxon_context *ctx)
+{
+ struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
+ __le32 sta_flags;
+ u8 mimo_ps_mode;
+
+ if (!sta || !sta_ht_inf->ht_supported)
+ goto done;
+
+ mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
+ IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
+ (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
+ "static" :
+ (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
+ "dynamic" : "disabled");
+
+ sta_flags = priv->stations[index].sta.station_flags;
+
+ sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
+
+ switch (mimo_ps_mode) {
+ case WLAN_HT_CAP_SM_PS_STATIC:
+ sta_flags |= STA_FLG_MIMO_DIS_MSK;
+ break;
+ case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
+ break;
+ case WLAN_HT_CAP_SM_PS_DISABLED:
+ break;
+ default:
+ IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
+ break;
+ }
+
+ sta_flags |= cpu_to_le32(
+ (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
+
+ sta_flags |= cpu_to_le32(
+ (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
+
+ if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+ sta_flags |= STA_FLG_HT40_EN_MSK;
+ else
+ sta_flags &= ~STA_FLG_HT40_EN_MSK;
+
+ priv->stations[index].sta.station_flags = sta_flags;
+ done:
+ return;
+}
+
+/**
+ * iwl_legacy_prep_station - Prepare station information for addition
+ *
+ * should be called with sta_lock held
+ */
+u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
+{
+ struct iwl_station_entry *station;
+ int i;
+ u8 sta_id = IWL_INVALID_STATION;
+ u16 rate;
+
+ if (is_ap)
+ sta_id = ctx->ap_sta_id;
+ else if (is_broadcast_ether_addr(addr))
+ sta_id = ctx->bcast_sta_id;
+ else
+ for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
+ if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
+ addr)) {
+ sta_id = i;
+ break;
+ }
+
+ if (!priv->stations[i].used &&
+ sta_id == IWL_INVALID_STATION)
+ sta_id = i;
+ }
+
+ /*
+ * These two conditions have the same outcome, but keep them
+ * separate
+ */
+ if (unlikely(sta_id == IWL_INVALID_STATION))
+ return sta_id;
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
+ IWL_DEBUG_INFO(priv,
+ "STA %d already in process of being added.\n",
+ sta_id);
+ return sta_id;
+ }
+
+ if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
+ (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
+ !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
+ IWL_DEBUG_ASSOC(priv,
+ "STA %d (%pM) already added, not adding again.\n",
+ sta_id, addr);
+ return sta_id;
+ }
+
+ station = &priv->stations[sta_id];
+ station->used = IWL_STA_DRIVER_ACTIVE;
+ IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
+ sta_id, addr);
+ priv->num_stations++;
+
+ /* Set up the REPLY_ADD_STA command to send to device */
+ memset(&station->sta, 0, sizeof(struct iwl_legacy_addsta_cmd));
+ memcpy(station->sta.sta.addr, addr, ETH_ALEN);
+ station->sta.mode = 0;
+ station->sta.sta.sta_id = sta_id;
+ station->sta.station_flags = ctx->station_flags;
+ station->ctxid = ctx->ctxid;
+
+ if (sta) {
+ struct iwl_station_priv_common *sta_priv;
+
+ sta_priv = (void *)sta->drv_priv;
+ sta_priv->ctx = ctx;
+ }
+
+ /*
+ * OK to call unconditionally, since local stations (IBSS BSSID
+ * STA and broadcast STA) pass in a NULL sta, and mac80211
+ * doesn't allow HT IBSS.
+ */
+ iwl_legacy_set_ht_add_station(priv, sta_id, sta, ctx);
+
+ /* 3945 only */
+ rate = (priv->band == IEEE80211_BAND_5GHZ) ?
+ IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP;
+ /* Turn on both antennas for the station... */
+ station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
+
+ return sta_id;
+
+}
+EXPORT_SYMBOL_GPL(iwl_legacy_prep_station);
+
+#define STA_WAIT_TIMEOUT (HZ/2)
+
+/**
+ * iwl_legacy_add_station_common -
+ */
+int
+iwl_legacy_add_station_common(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ const u8 *addr, bool is_ap,
+ struct ieee80211_sta *sta, u8 *sta_id_r)
+{
+ unsigned long flags_spin;
+ int ret = 0;
+ u8 sta_id;
+ struct iwl_legacy_addsta_cmd sta_cmd;
+
+ *sta_id_r = 0;
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ sta_id = iwl_legacy_prep_station(priv, ctx, addr, is_ap, sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
+ addr);
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ return -EINVAL;
+ }
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
+ IWL_DEBUG_INFO(priv,
+ "STA %d already in process of being added.\n",
+ sta_id);
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ return -EEXIST;
+ }
+
+ if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
+ (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
+ IWL_DEBUG_ASSOC(priv,
+ "STA %d (%pM) already added, not adding again.\n",
+ sta_id, addr);
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ return -EEXIST;
+ }
+
+ priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+ sizeof(struct iwl_legacy_addsta_cmd));
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+
+ /* Add station to device's station table */
+ ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ if (ret) {
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ IWL_ERR(priv, "Adding station %pM failed.\n",
+ priv->stations[sta_id].sta.sta.addr);
+ priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ }
+ *sta_id_r = sta_id;
+ return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_add_station_common);
+
+/**
+ * iwl_legacy_sta_ucode_deactivate - deactivate ucode status for a station
+ *
+ * priv->sta_lock must be held
+ */
+static void iwl_legacy_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
+{
+ /* Ucode must be active and driver must be non active */
+ if ((priv->stations[sta_id].used &
+ (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
+ IWL_STA_UCODE_ACTIVE)
+ IWL_ERR(priv, "removed non active STA %u\n", sta_id);
+
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
+
+ memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
+ IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
+}
+
+static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
+ const u8 *addr, int sta_id,
+ bool temporary)
+{
+ struct iwl_rx_packet *pkt;
+ int ret;
+
+ unsigned long flags_spin;
+ struct iwl_rem_sta_cmd rm_sta_cmd;
+
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_REMOVE_STA,
+ .len = sizeof(struct iwl_rem_sta_cmd),
+ .flags = CMD_SYNC,
+ .data = &rm_sta_cmd,
+ };
+
+ memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
+ rm_sta_cmd.num_sta = 1;
+ memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
+
+ cmd.flags |= CMD_WANT_SKB;
+
+ ret = iwl_legacy_send_cmd(priv, &cmd);
+
+ if (ret)
+ return ret;
+
+ pkt = (struct iwl_rx_packet *)cmd.reply_page;
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+ IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
+ pkt->hdr.flags);
+ ret = -EIO;
+ }
+
+ if (!ret) {
+ switch (pkt->u.rem_sta.status) {
+ case REM_STA_SUCCESS_MSK:
+ if (!temporary) {
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ iwl_legacy_sta_ucode_deactivate(priv, sta_id);
+ spin_unlock_irqrestore(&priv->sta_lock,
+ flags_spin);
+ }
+ IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
+ break;
+ }
+ }
+ iwl_legacy_free_pages(priv, cmd.reply_page);
+
+ return ret;
+}
+
+/**
+ * iwl_legacy_remove_station - Remove driver's knowledge of station.
+ */
+int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id,
+ const u8 *addr)
+{
+ unsigned long flags;
+
+ if (!iwl_legacy_is_ready(priv)) {
+ IWL_DEBUG_INFO(priv,
+ "Unable to remove station %pM, device not ready.\n",
+ addr);
+ /*
+ * It is typical for stations to be removed when we are
+ * going down. Return success since device will be down
+ * soon anyway
+ */
+ return 0;
+ }
+
+ IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
+ sta_id, addr);
+
+ if (WARN_ON(sta_id == IWL_INVALID_STATION))
+ return -EINVAL;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+
+ if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
+ IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
+ addr);
+ goto out_err;
+ }
+
+ if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
+ IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
+ addr);
+ goto out_err;
+ }
+
+ if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
+ kfree(priv->stations[sta_id].lq);
+ priv->stations[sta_id].lq = NULL;
+ }
+
+ priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+
+ priv->num_stations--;
+
+ BUG_ON(priv->num_stations < 0);
+
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ return iwl_legacy_send_remove_station(priv, addr, sta_id, false);
+out_err:
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(iwl_legacy_remove_station);
+
+/**
+ * iwl_legacy_clear_ucode_stations - clear ucode station table bits
+ *
+ * This function clears all the bits in the driver indicating
+ * which stations are active in the ucode. Call when something
+ * other than explicit station management would cause this in
+ * the ucode, e.g. unassociated RXON.
+ */
+void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ int i;
+ unsigned long flags_spin;
+ bool cleared = false;
+
+ IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
+
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ for (i = 0; i < priv->hw_params.max_stations; i++) {
+ if (ctx && ctx->ctxid != priv->stations[i].ctxid)
+ continue;
+
+ if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
+ IWL_DEBUG_INFO(priv,
+ "Clearing ucode active for station %d\n", i);
+ priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+ cleared = true;
+ }
+ }
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+
+ if (!cleared)
+ IWL_DEBUG_INFO(priv,
+ "No active stations found to be cleared\n");
+}
+EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations);
+
+/**
+ * iwl_legacy_restore_stations() - Restore driver known stations to device
+ *
+ * All stations considered active by driver, but not present in ucode, is
+ * restored.
+ *
+ * Function sleeps.
+ */
+void
+iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ struct iwl_legacy_addsta_cmd sta_cmd;
+ struct iwl_link_quality_cmd lq;
+ unsigned long flags_spin;
+ int i;
+ bool found = false;
+ int ret;
+ bool send_lq;
+
+ if (!iwl_legacy_is_ready(priv)) {
+ IWL_DEBUG_INFO(priv,
+ "Not ready yet, not restoring any stations.\n");
+ return;
+ }
+
+ IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ for (i = 0; i < priv->hw_params.max_stations; i++) {
+ if (ctx->ctxid != priv->stations[i].ctxid)
+ continue;
+ if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
+ !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
+ IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
+ priv->stations[i].sta.sta.addr);
+ priv->stations[i].sta.mode = 0;
+ priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
+ found = true;
+ }
+ }
+
+ for (i = 0; i < priv->hw_params.max_stations; i++) {
+ if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
+ memcpy(&sta_cmd, &priv->stations[i].sta,
+ sizeof(struct iwl_legacy_addsta_cmd));
+ send_lq = false;
+ if (priv->stations[i].lq) {
+ memcpy(&lq, priv->stations[i].lq,
+ sizeof(struct iwl_link_quality_cmd));
+ send_lq = true;
+ }
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ if (ret) {
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ IWL_ERR(priv, "Adding station %pM failed.\n",
+ priv->stations[i].sta.sta.addr);
+ priv->stations[i].used &=
+ ~IWL_STA_DRIVER_ACTIVE;
+ priv->stations[i].used &=
+ ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&priv->sta_lock,
+ flags_spin);
+ }
+ /*
+ * Rate scaling has already been initialized, send
+ * current LQ command
+ */
+ if (send_lq)
+ iwl_legacy_send_lq_cmd(priv, ctx, &lq,
+ CMD_SYNC, true);
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ if (!found)
+ IWL_DEBUG_INFO(priv, "Restoring all known stations"
+ " .... no stations to be restored.\n");
+ else
+ IWL_DEBUG_INFO(priv, "Restoring all known stations"
+ " .... complete.\n");
+}
+EXPORT_SYMBOL(iwl_legacy_restore_stations);
+
+int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->sta_key_max_num; i++)
+ if (!test_and_set_bit(i, &priv->ucode_key_table))
+ return i;
+
+ return WEP_INVALID_OFFSET;
+}
+EXPORT_SYMBOL(iwl_legacy_get_free_ucode_key_index);
+
+void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ for (i = 0; i < priv->hw_params.max_stations; i++) {
+ if (!(priv->stations[i].used & IWL_STA_BCAST))
+ continue;
+
+ priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+ priv->num_stations--;
+ BUG_ON(priv->num_stations < 0);
+ kfree(priv->stations[i].lq);
+ priv->stations[i].lq = NULL;
+ }
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+}
+EXPORT_SYMBOL_GPL(iwl_legacy_dealloc_bcast_stations);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+static void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
+ struct iwl_link_quality_cmd *lq)
+{
+ int i;
+ IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
+ IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
+ lq->general_params.single_stream_ant_msk,
+ lq->general_params.dual_stream_ant_msk);
+
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+ IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
+ i, lq->rs_table[i].rate_n_flags);
+}
+#else
+static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
+ struct iwl_link_quality_cmd *lq)
+{
+}
+#endif
+
+/**
+ * iwl_legacy_is_lq_table_valid() - Test one aspect of LQ cmd for validity
+ *
+ * It sometimes happens when a HT rate has been in use and we
+ * loose connectivity with AP then mac80211 will first tell us that the
+ * current channel is not HT anymore before removing the station. In such a
+ * scenario the RXON flags will be updated to indicate we are not
+ * communicating HT anymore, but the LQ command may still contain HT rates.
+ * Test for this to prevent driver from sending LQ command between the time
+ * RXON flags are updated and when LQ command is updated.
+ */
+static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct iwl_link_quality_cmd *lq)
+{
+ int i;
+
+ if (ctx->ht.enabled)
+ return true;
+
+ IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
+ ctx->active.channel);
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+ if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
+ RATE_MCS_HT_MSK) {
+ IWL_DEBUG_INFO(priv,
+ "index %d of LQ expects HT channel\n",
+ i);
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * iwl_legacy_send_lq_cmd() - Send link quality command
+ * @init: This command is sent as part of station initialization right
+ * after station has been added.
+ *
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
+ */
+int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ struct iwl_link_quality_cmd *lq, u8 flags, bool init)
+{
+ int ret = 0;
+ unsigned long flags_spin;
+
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_TX_LINK_QUALITY_CMD,
+ .len = sizeof(struct iwl_link_quality_cmd),
+ .flags = flags,
+ .data = lq,
+ };
+
+ if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
+ return -EINVAL;
+
+
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+
+ iwl_legacy_dump_lq_cmd(priv, lq);
+ BUG_ON(init && (cmd.flags & CMD_ASYNC));
+
+ if (iwl_legacy_is_lq_table_valid(priv, ctx, lq))
+ ret = iwl_legacy_send_cmd(priv, &cmd);
+ else
+ ret = -EINVAL;
+
+ if (cmd.flags & CMD_ASYNC)
+ return ret;
+
+ if (init) {
+ IWL_DEBUG_INFO(priv, "init LQ command complete,"
+ " clearing sta addition status for sta %d\n",
+ lq->sta_id);
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_send_lq_cmd);
+
+int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv;
+ int ret;
+
+ IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
+ sta->addr);
+ mutex_lock(&priv->mutex);
+ IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
+ sta->addr);
+ ret = iwl_legacy_remove_station(priv, sta_common->sta_id, sta->addr);
+ if (ret)
+ IWL_ERR(priv, "Error removing station %pM\n",
+ sta->addr);
+ mutex_unlock(&priv->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(iwl_legacy_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.h b/drivers/net/wireless/iwlegacy/iwl-sta.h
new file mode 100644
index 000000000000..67bd75fe01a1
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-sta.h
@@ -0,0 +1,148 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#ifndef __iwl_legacy_sta_h__
+#define __iwl_legacy_sta_h__
+
+#include "iwl-dev.h"
+
+#define HW_KEY_DYNAMIC 0
+#define HW_KEY_DEFAULT 1
+
+#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
+#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
+#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
+ being activated */
+#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
+ (this is for the IBSS BSSID stations) */
+#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
+
+
+void iwl_legacy_restore_stations(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
+void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv);
+int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv);
+int iwl_legacy_send_add_sta(struct iwl_priv *priv,
+ struct iwl_legacy_addsta_cmd *sta, u8 flags);
+int iwl_legacy_add_station_common(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ const u8 *addr, bool is_ap,
+ struct ieee80211_sta *sta, u8 *sta_id_r);
+int iwl_legacy_remove_station(struct iwl_priv *priv,
+ const u8 sta_id,
+ const u8 *addr);
+int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+u8 iwl_legacy_prep_station(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ const u8 *addr, bool is_ap,
+ struct ieee80211_sta *sta);
+
+int iwl_legacy_send_lq_cmd(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct iwl_link_quality_cmd *lq,
+ u8 flags, bool init);
+
+/**
+ * iwl_legacy_clear_driver_stations - clear knowledge of all stations from driver
+ * @priv: iwl priv struct
+ *
+ * This is called during iwl_down() to make sure that in the case
+ * we're coming there from a hardware restart mac80211 will be
+ * able to reconfigure stations -- if we're getting there in the
+ * normal down flow then the stations will already be cleared.
+ */
+static inline void iwl_legacy_clear_driver_stations(struct iwl_priv *priv)
+{
+ unsigned long flags;
+ struct iwl_rxon_context *ctx;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ memset(priv->stations, 0, sizeof(priv->stations));
+ priv->num_stations = 0;
+
+ priv->ucode_key_table = 0;
+
+ for_each_context(priv, ctx) {
+ /*
+ * Remove all key information that is not stored as part
+ * of station information since mac80211 may not have had
+ * a chance to remove all the keys. When device is
+ * reconfigured by mac80211 after an error all keys will
+ * be reconfigured.
+ */
+ memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
+ ctx->key_mapping_keys = 0;
+ }
+
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+}
+
+static inline int iwl_legacy_sta_id(struct ieee80211_sta *sta)
+{
+ if (WARN_ON(!sta))
+ return IWL_INVALID_STATION;
+
+ return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
+}
+
+/**
+ * iwl_legacy_sta_id_or_broadcast - return sta_id or broadcast sta
+ * @priv: iwl priv
+ * @context: the current context
+ * @sta: mac80211 station
+ *
+ * In certain circumstances mac80211 passes a station pointer
+ * that may be %NULL, for example during TX or key setup. In
+ * that case, we need to use the broadcast station, so this
+ * inline wraps that pattern.
+ */
+static inline int iwl_legacy_sta_id_or_broadcast(struct iwl_priv *priv,
+ struct iwl_rxon_context *context,
+ struct ieee80211_sta *sta)
+{
+ int sta_id;
+
+ if (!sta)
+ return context->bcast_sta_id;
+
+ sta_id = iwl_legacy_sta_id(sta);
+
+ /*
+ * mac80211 should not be passing a partially
+ * initialised station!
+ */
+ WARN_ON(sta_id == IWL_INVALID_STATION);
+
+ return sta_id;
+}
+#endif /* __iwl_legacy_sta_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
new file mode 100644
index 000000000000..a227773cb384
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl-tx.c
@@ -0,0 +1,660 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-sta.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+
+/**
+ * iwl_legacy_txq_update_write_ptr - Send new write index to hardware
+ */
+void
+iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
+{
+ u32 reg = 0;
+ int txq_id = txq->q.id;
+
+ if (txq->need_update == 0)
+ return;
+
+ /* if we're trying to save power */
+ if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+ /* wake up nic if it's powered down ...
+ * uCode will wake up, and interrupt us again, so next
+ * time we'll skip this part. */
+ reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ IWL_DEBUG_INFO(priv,
+ "Tx queue %d requesting wakeup,"
+ " GP1 = 0x%x\n", txq_id, reg);
+ iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ return;
+ }
+
+ iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
+ txq->q.write_ptr | (txq_id << 8));
+
+ /*
+ * else not in power-save mode,
+ * uCode will never sleep when we're
+ * trying to tx (during RFKILL, we're not trying to tx).
+ */
+ } else
+ iwl_write32(priv, HBUS_TARG_WRPTR,
+ txq->q.write_ptr | (txq_id << 8));
+ txq->need_update = 0;
+}
+EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr);
+
+/**
+ * iwl_legacy_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
+{
+ struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+
+ if (q->n_bd == 0)
+ return;
+
+ while (q->write_ptr != q->read_ptr) {
+ priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+ q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_tx_queue_unmap);
+
+/**
+ * iwl_legacy_tx_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id)
+{
+ struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct device *dev = &priv->pci_dev->dev;
+ int i;
+
+ iwl_legacy_tx_queue_unmap(priv, txq_id);
+
+ /* De-alloc array of command/tx buffers */
+ for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
+ kfree(txq->cmd[i]);
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->q.n_bd)
+ dma_free_coherent(dev, priv->hw_params.tfd_size *
+ txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+
+ /* De-alloc array of per-TFD driver data */
+ kfree(txq->txb);
+ txq->txb = NULL;
+
+ /* deallocate arrays */
+ kfree(txq->cmd);
+ kfree(txq->meta);
+ txq->cmd = NULL;
+ txq->meta = NULL;
+
+ /* 0-fill queue descriptor structure */
+ memset(txq, 0, sizeof(*txq));
+}
+EXPORT_SYMBOL(iwl_legacy_tx_queue_free);
+
+/**
+ * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
+ */
+void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv)
+{
+ struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
+ struct iwl_queue *q = &txq->q;
+ bool huge = false;
+ int i;
+
+ if (q->n_bd == 0)
+ return;
+
+ while (q->read_ptr != q->write_ptr) {
+ /* we have no way to tell if it is a huge cmd ATM */
+ i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0);
+
+ if (txq->meta[i].flags & CMD_SIZE_HUGE)
+ huge = true;
+ else
+ pci_unmap_single(priv->pci_dev,
+ dma_unmap_addr(&txq->meta[i], mapping),
+ dma_unmap_len(&txq->meta[i], len),
+ PCI_DMA_BIDIRECTIONAL);
+
+ q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
+ }
+
+ if (huge) {
+ i = q->n_window;
+ pci_unmap_single(priv->pci_dev,
+ dma_unmap_addr(&txq->meta[i], mapping),
+ dma_unmap_len(&txq->meta[i], len),
+ PCI_DMA_BIDIRECTIONAL);
+ }
+}
+EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap);
+
+/**
+ * iwl_legacy_cmd_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void iwl_legacy_cmd_queue_free(struct iwl_priv *priv)
+{
+ struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
+ struct device *dev = &priv->pci_dev->dev;
+ int i;
+
+ iwl_legacy_cmd_queue_unmap(priv);
+
+ /* De-alloc array of command/tx buffers */
+ for (i = 0; i <= TFD_CMD_SLOTS; i++)
+ kfree(txq->cmd[i]);
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->q.n_bd)
+ dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
+ txq->tfds, txq->q.dma_addr);
+
+ /* deallocate arrays */
+ kfree(txq->cmd);
+ kfree(txq->meta);
+ txq->cmd = NULL;
+ txq->meta = NULL;
+
+ /* 0-fill queue descriptor structure */
+ memset(txq, 0, sizeof(*txq));
+}
+EXPORT_SYMBOL(iwl_legacy_cmd_queue_free);
+
+/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
+ * DMA services
+ *
+ * Theory of operation
+ *
+ * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
+ * of buffer descriptors, each of which points to one or more data buffers for
+ * the device to read from or fill. Driver and device exchange status of each
+ * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
+ * entries in each circular buffer, to protect against confusing empty and full
+ * queue states.
+ *
+ * The device reads or writes the data in the queues via the device's several
+ * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
+ *
+ * For Tx queue, there are low mark and high mark limits. If, after queuing
+ * the packet for Tx, free space become < low mark, Tx queue stopped. When
+ * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
+ * Tx queue resumed.
+ *
+ * See more detailed info in iwl-4965-hw.h.
+ ***************************************************/
+
+int iwl_legacy_queue_space(const struct iwl_queue *q)
+{
+ int s = q->read_ptr - q->write_ptr;
+
+ if (q->read_ptr > q->write_ptr)
+ s -= q->n_bd;
+
+ if (s <= 0)
+ s += q->n_window;
+ /* keep some reserve to not confuse empty and full situations */
+ s -= 2;
+ if (s < 0)
+ s = 0;
+ return s;
+}
+EXPORT_SYMBOL(iwl_legacy_queue_space);
+
+
+/**
+ * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes
+ */
+static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
+ int count, int slots_num, u32 id)
+{
+ q->n_bd = count;
+ q->n_window = slots_num;
+ q->id = id;
+
+ /* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap
+ * and iwl_legacy_queue_dec_wrap are broken. */
+ BUG_ON(!is_power_of_2(count));
+
+ /* slots_num must be power-of-two size, otherwise
+ * iwl_legacy_get_cmd_index is broken. */
+ BUG_ON(!is_power_of_2(slots_num));
+
+ q->low_mark = q->n_window / 4;
+ if (q->low_mark < 4)
+ q->low_mark = 4;
+
+ q->high_mark = q->n_window / 8;
+ if (q->high_mark < 2)
+ q->high_mark = 2;
+
+ q->write_ptr = q->read_ptr = 0;
+
+ return 0;
+}
+
+/**
+ * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
+ */
+static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq, u32 id)
+{
+ struct device *dev = &priv->pci_dev->dev;
+ size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
+
+ /* Driver private data, only for Tx (not command) queues,
+ * not shared with device. */
+ if (id != priv->cmd_queue) {
+ txq->txb = kzalloc(sizeof(txq->txb[0]) *
+ TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
+ if (!txq->txb) {
+ IWL_ERR(priv, "kmalloc for auxiliary BD "
+ "structures failed\n");
+ goto error;
+ }
+ } else {
+ txq->txb = NULL;
+ }
+
+ /* Circular buffer of transmit frame descriptors (TFDs),
+ * shared with device */
+ txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
+ GFP_KERNEL);
+ if (!txq->tfds) {
+ IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
+ goto error;
+ }
+ txq->q.id = id;
+
+ return 0;
+
+ error:
+ kfree(txq->txb);
+ txq->txb = NULL;
+
+ return -ENOMEM;
+}
+
+/**
+ * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue
+ */
+int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+ int slots_num, u32 txq_id)
+{
+ int i, len;
+ int ret;
+ int actual_slots = slots_num;
+
+ /*
+ * Alloc buffer array for commands (Tx or other types of commands).
+ * For the command queue (#4/#9), allocate command space + one big
+ * command for scan, since scan command is very huge; the system will
+ * not have two scans at the same time, so only one is needed.
+ * For normal Tx queues (all other queues), no super-size command
+ * space is needed.
+ */
+ if (txq_id == priv->cmd_queue)
+ actual_slots++;
+
+ txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
+ GFP_KERNEL);
+ txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
+ GFP_KERNEL);
+
+ if (!txq->meta || !txq->cmd)
+ goto out_free_arrays;
+
+ len = sizeof(struct iwl_device_cmd);
+ for (i = 0; i < actual_slots; i++) {
+ /* only happens for cmd queue */
+ if (i == slots_num)
+ len = IWL_MAX_CMD_SIZE;
+
+ txq->cmd[i] = kmalloc(len, GFP_KERNEL);
+ if (!txq->cmd[i])
+ goto err;
+ }
+
+ /* Alloc driver data array and TFD circular buffer */
+ ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id);
+ if (ret)
+ goto err;
+
+ txq->need_update = 0;
+
+ /*
+ * For the default queues 0-3, set up the swq_id
+ * already -- all others need to get one later
+ * (if they need one at all).
+ */
+ if (txq_id < 4)
+ iwl_legacy_set_swq_id(txq, txq_id, txq_id);
+
+ /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+ * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */
+ BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+
+ /* Initialize queue's high/low-water marks, and head/tail indexes */
+ iwl_legacy_queue_init(priv, &txq->q,
+ TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+ /* Tell device where to find queue */
+ priv->cfg->ops->lib->txq_init(priv, txq);
+
+ return 0;
+err:
+ for (i = 0; i < actual_slots; i++)
+ kfree(txq->cmd[i]);
+out_free_arrays:
+ kfree(txq->meta);
+ kfree(txq->cmd);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(iwl_legacy_tx_queue_init);
+
+void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+ int slots_num, u32 txq_id)
+{
+ int actual_slots = slots_num;
+
+ if (txq_id == priv->cmd_queue)
+ actual_slots++;
+
+ memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
+
+ txq->need_update = 0;
+
+ /* Initialize queue's high/low-water marks, and head/tail indexes */
+ iwl_legacy_queue_init(priv, &txq->q,
+ TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+ /* Tell device where to find queue */
+ priv->cfg->ops->lib->txq_init(priv, txq);
+}
+EXPORT_SYMBOL(iwl_legacy_tx_queue_reset);
+
+/*************** HOST COMMAND QUEUE FUNCTIONS *****/
+
+/**
+ * iwl_legacy_enqueue_hcmd - enqueue a uCode command
+ * @priv: device private data point
+ * @cmd: a point to the ucode command structure
+ *
+ * The function returns < 0 values to indicate the operation is
+ * failed. On success, it turns the index (> 0) of command in the
+ * command queue.
+ */
+int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+{
+ struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
+ struct iwl_queue *q = &txq->q;
+ struct iwl_device_cmd *out_cmd;
+ struct iwl_cmd_meta *out_meta;
+ dma_addr_t phys_addr;
+ unsigned long flags;
+ int len;
+ u32 idx;
+ u16 fix_size;
+
+ cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
+ fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
+
+ /* If any of the command structures end up being larger than
+ * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
+ * we will need to increase the size of the TFD entries
+ * Also, check to see if command buffer should not exceed the size
+ * of device_cmd and max_cmd_size. */
+ BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
+ !(cmd->flags & CMD_SIZE_HUGE));
+ BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
+
+ if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) {
+ IWL_WARN(priv, "Not sending command - %s KILL\n",
+ iwl_legacy_is_rfkill(priv) ? "RF" : "CT");
+ return -EIO;
+ }
+
+ if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ IWL_ERR(priv, "No space in command queue\n");
+ IWL_ERR(priv, "Restarting adapter due to queue full\n");
+ queue_work(priv->workqueue, &priv->restart);
+ return -ENOSPC;
+ }
+
+ spin_lock_irqsave(&priv->hcmd_lock, flags);
+
+ /* If this is a huge cmd, mark the huge flag also on the meta.flags
+ * of the _original_ cmd. This is used for DMA mapping clean up.
+ */
+ if (cmd->flags & CMD_SIZE_HUGE) {
+ idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
+ txq->meta[idx].flags = CMD_SIZE_HUGE;
+ }
+
+ idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
+ out_cmd = txq->cmd[idx];
+ out_meta = &txq->meta[idx];
+
+ memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
+ out_meta->flags = cmd->flags;
+ if (cmd->flags & CMD_WANT_SKB)
+ out_meta->source = cmd;
+ if (cmd->flags & CMD_ASYNC)
+ out_meta->callback = cmd->callback;
+
+ out_cmd->hdr.cmd = cmd->id;
+ memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
+
+ /* At this point, the out_cmd now has all of the incoming cmd
+ * information */
+
+ out_cmd->hdr.flags = 0;
+ out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
+ INDEX_TO_SEQ(q->write_ptr));
+ if (cmd->flags & CMD_SIZE_HUGE)
+ out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
+ len = sizeof(struct iwl_device_cmd);
+ if (idx == TFD_CMD_SLOTS)
+ len = IWL_MAX_CMD_SIZE;
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ switch (out_cmd->hdr.cmd) {
+ case REPLY_TX_LINK_QUALITY_CMD:
+ case SENSITIVITY_CMD:
+ IWL_DEBUG_HC_DUMP(priv,
+ "Sending command %s (#%x), seq: 0x%04X, "
+ "%d bytes at %d[%d]:%d\n",
+ iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
+ out_cmd->hdr.cmd,
+ le16_to_cpu(out_cmd->hdr.sequence), fix_size,
+ q->write_ptr, idx, priv->cmd_queue);
+ break;
+ default:
+ IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
+ "%d bytes at %d[%d]:%d\n",
+ iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
+ out_cmd->hdr.cmd,
+ le16_to_cpu(out_cmd->hdr.sequence), fix_size,
+ q->write_ptr, idx, priv->cmd_queue);
+ }
+#endif
+ txq->need_update = 1;
+
+ if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
+ /* Set up entry in queue's byte count circular buffer */
+ priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
+
+ phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
+ fix_size, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_addr_set(out_meta, mapping, phys_addr);
+ dma_unmap_len_set(out_meta, len, fix_size);
+
+ trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr,
+ fix_size, cmd->flags);
+
+ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+ phys_addr, fix_size, 1,
+ U32_PAD(cmd->len));
+
+ /* Increment and update queue's write index */
+ q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
+ iwl_legacy_txq_update_write_ptr(priv, txq);
+
+ spin_unlock_irqrestore(&priv->hcmd_lock, flags);
+ return idx;
+}
+
+/**
+ * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
+ *
+ * When FW advances 'R' index, all entries between old and new 'R' index
+ * need to be reclaimed. As result, some free space forms. If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
+ int idx, int cmd_idx)
+{
+ struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+ int nfreed = 0;
+
+ if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) {
+ IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
+ "is out of range [0-%d] %d %d.\n", txq_id,
+ idx, q->n_bd, q->write_ptr, q->read_ptr);
+ return;
+ }
+
+ for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+ q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ if (nfreed++ > 0) {
+ IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
+ q->write_ptr, q->read_ptr);
+ queue_work(priv->workqueue, &priv->restart);
+ }
+
+ }
+}
+
+/**
+ * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
+ * @rxb: Rx buffer to reclaim
+ *
+ * If an Rx buffer has an async callback associated with it the callback
+ * will be executed. The attached skb (if present) will only be freed
+ * if the callback returns 1
+ */
+void
+iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int index = SEQ_TO_INDEX(sequence);
+ int cmd_index;
+ bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
+ struct iwl_device_cmd *cmd;
+ struct iwl_cmd_meta *meta;
+ struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
+
+ /* If a Tx command is being handled and it isn't in the actual
+ * command queue then there a command routing bug has been introduced
+ * in the queue management code. */
+ if (WARN(txq_id != priv->cmd_queue,
+ "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
+ txq_id, priv->cmd_queue, sequence,
+ priv->txq[priv->cmd_queue].q.read_ptr,
+ priv->txq[priv->cmd_queue].q.write_ptr)) {
+ iwl_print_hex_error(priv, pkt, 32);
+ return;
+ }
+
+ /* If this is a huge cmd, clear the huge flag on the meta.flags
+ * of the _original_ cmd. So that iwl_legacy_cmd_queue_free won't unmap
+ * the DMA buffer for the scan (huge) command.
+ */
+ if (huge) {
+ cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, 0);
+ txq->meta[cmd_index].flags = 0;
+ }
+ cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge);
+ cmd = txq->cmd[cmd_index];
+ meta = &txq->meta[cmd_index];
+
+ pci_unmap_single(priv->pci_dev,
+ dma_unmap_addr(meta, mapping),
+ dma_unmap_len(meta, len),
+ PCI_DMA_BIDIRECTIONAL);
+
+ /* Input error checking is done when commands are added to queue. */
+ if (meta->flags & CMD_WANT_SKB) {
+ meta->source->reply_page = (unsigned long)rxb_addr(rxb);
+ rxb->page = NULL;
+ } else if (meta->callback)
+ meta->callback(priv, cmd, pkt);
+
+ iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
+
+ if (!(meta->flags & CMD_ASYNC)) {
+ clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+ IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
+ iwl_legacy_get_cmd_string(cmd->hdr.cmd));
+ wake_up_interruptible(&priv->wait_command_queue);
+ }
+ meta->flags = 0;
+}
+EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
index 7edf8c2fb8c7..ab87e1b73529 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -69,7 +69,7 @@
#define DRV_DESCRIPTION \
"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
#define VD "d"
#else
#define VD
@@ -81,7 +81,7 @@
* this was configurable.
*/
#define DRV_VERSION IWLWIFI_VERSION VD "s"
-#define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -163,7 +163,7 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
== STA_KEY_FLG_NO_ENC)
priv->stations[sta_id].sta.key.key_offset =
- iwl_get_free_ucode_key_index(priv);
+ iwl_legacy_get_free_ucode_key_index(priv);
/* else, we are overriding an existing key => no need to allocated room
* in uCode. */
@@ -176,7 +176,8 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
- ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+ ret = iwl_legacy_send_add_sta(priv,
+ &priv->stations[sta_id].sta, CMD_ASYNC);
spin_unlock_irqrestore(&priv->sta_lock, flags);
@@ -200,7 +201,7 @@ static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
{
unsigned long flags;
- struct iwl_addsta_cmd sta_cmd;
+ struct iwl_legacy_addsta_cmd sta_cmd;
spin_lock_irqsave(&priv->sta_lock, flags);
memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
@@ -209,11 +210,11 @@ static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd));
spin_unlock_irqrestore(&priv->sta_lock, flags);
IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
- return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
@@ -317,7 +318,7 @@ unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
int left)
{
- if (!iwl_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
+ if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
return 0;
if (priv->beacon_skb->len > left)
@@ -343,12 +344,12 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
return -ENOMEM;
}
- rate = iwl_rate_get_lowest_plcp(priv,
+ rate = iwl_legacy_get_lowest_plcp(priv,
&priv->contexts[IWL_RXON_CTX_BSS]);
frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
- rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
+ rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
&frame->u.cmd[0]);
iwl3945_free_frame(priv, frame);
@@ -442,7 +443,7 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
}
- priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags);
+ iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
if (ieee80211_is_mgmt(fc)) {
@@ -474,7 +475,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
dma_addr_t phys_addr;
dma_addr_t txcmd_phys;
int txq_id = skb_get_queue_mapping(skb);
- u16 len, idx, len_org, hdr_len; /* TODO: len_org is not used */
+ u16 len, idx, hdr_len;
u8 id;
u8 unicast;
u8 sta_id;
@@ -484,7 +485,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
- if (iwl_is_rfkill(priv)) {
+ if (iwl_legacy_is_rfkill(priv)) {
IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
goto drop_unlock;
}
@@ -499,7 +500,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
fc = hdr->frame_control;
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
if (ieee80211_is_auth(fc))
IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
else if (ieee80211_is_assoc_req(fc))
@@ -513,7 +514,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
hdr_len = ieee80211_hdrlen(fc);
/* Find index into station table for destination station */
- sta_id = iwl_sta_id_or_broadcast(
+ sta_id = iwl_legacy_sta_id_or_broadcast(
priv, &priv->contexts[IWL_RXON_CTX_BSS],
info->control.sta);
if (sta_id == IWL_INVALID_STATION) {
@@ -535,12 +536,12 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
txq = &priv->txq[txq_id];
q = &txq->q;
- if ((iwl_queue_space(q) < q->high_mark))
+ if ((iwl_legacy_queue_space(q) < q->high_mark))
goto drop;
spin_lock_irqsave(&priv->lock, flags);
- idx = get_cmd_index(q, q->write_ptr, 0);
+ idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
/* Set up driver data for this TFD */
memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
@@ -581,8 +582,8 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
len = (u16)skb->len;
tx_cmd->len = cpu_to_le16(len);
- iwl_dbg_log_tx_data_frame(priv, len, hdr);
- iwl_update_stats(priv, true, fc, len);
+ iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
+ iwl_legacy_update_stats(priv, true, fc, len);
tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
@@ -611,15 +612,8 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
*/
len = sizeof(struct iwl3945_tx_cmd) +
sizeof(struct iwl_cmd_header) + hdr_len;
-
- len_org = len;
len = (len + 3) & ~3;
- if (len_org != len)
- len_org = 1;
- else
- len_org = 0;
-
/* Physical address of this Tx command's header (not MAC header!),
* within command buffer array. */
txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
@@ -648,20 +642,20 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_txq_update_write_ptr(priv, txq);
+ q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
+ iwl_legacy_txq_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->lock, flags);
- if ((iwl_queue_space(q) < q->high_mark)
+ if ((iwl_legacy_queue_space(q) < q->high_mark)
&& priv->mac80211_registered) {
if (wait_write_ptr) {
spin_lock_irqsave(&priv->lock, flags);
txq->need_update = 1;
- iwl_txq_update_write_ptr(priv, txq);
+ iwl_legacy_txq_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->lock, flags);
}
- iwl_stop_queue(priv, skb_get_queue_mapping(skb));
+ iwl_legacy_stop_queue(priv, txq);
}
return 0;
@@ -689,8 +683,8 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
int duration = le16_to_cpu(params->duration);
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- if (iwl_is_associated(priv, IWL_RXON_CTX_BSS))
- add_time = iwl_usecs_to_beacons(priv,
+ if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
+ add_time = iwl_legacy_usecs_to_beacons(priv,
le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
le16_to_cpu(ctx->timing.beacon_interval));
@@ -703,9 +697,9 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
cmd.len = sizeof(spectrum);
spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
- if (iwl_is_associated(priv, IWL_RXON_CTX_BSS))
+ if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
spectrum.start_time =
- iwl_add_beacon_time(priv,
+ iwl_legacy_add_beacon_time(priv,
priv->_3945.last_beacon_time, add_time,
le16_to_cpu(ctx->timing.beacon_interval));
else
@@ -718,7 +712,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
spectrum.flags |= RXON_FLG_BAND_24G_MSK |
RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
- rc = iwl_send_cmd_sync(priv, &cmd);
+ rc = iwl_legacy_send_cmd_sync(priv, &cmd);
if (rc)
return rc;
@@ -745,7 +739,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
break;
}
- iwl_free_pages(priv, cmd.reply_page);
+ iwl_legacy_free_pages(priv, cmd.reply_page);
return rc;
}
@@ -789,45 +783,19 @@ static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
#endif
IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
}
-static void iwl3945_bg_beacon_update(struct work_struct *work)
-{
- struct iwl_priv *priv =
- container_of(work, struct iwl_priv, beacon_update);
- struct sk_buff *beacon;
-
- /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
- beacon = ieee80211_beacon_get(priv->hw,
- priv->contexts[IWL_RXON_CTX_BSS].vif);
-
- if (!beacon) {
- IWL_ERR(priv, "update beacon failed\n");
- return;
- }
-
- mutex_lock(&priv->mutex);
- /* new beacon skb is allocated every time; dispose previous.*/
- if (priv->beacon_skb)
- dev_kfree_skb(priv->beacon_skb);
-
- priv->beacon_skb = beacon;
- mutex_unlock(&priv->mutex);
-
- iwl3945_send_beacon_cmd(priv);
-}
-
static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
u8 rate = beacon->beacon_notify_hdr.rate;
IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
@@ -841,9 +809,6 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
- if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
- (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
- queue_work(priv->workqueue, &priv->beacon_update);
}
/* Handle notification from uCode that card's power state is changing
@@ -868,7 +833,7 @@ static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
clear_bit(STATUS_RF_KILL_HW, &priv->status);
- iwl_scan_cancel(priv);
+ iwl_legacy_scan_cancel(priv);
if ((test_bit(STATUS_RF_KILL_HW, &status) !=
test_bit(STATUS_RF_KILL_HW, &priv->status)))
@@ -891,13 +856,13 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
{
priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
- priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
- priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
+ priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
+ priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
- iwl_rx_spectrum_measure_notif;
- priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
+ iwl_legacy_rx_spectrum_measure_notif;
+ priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
- iwl_rx_pm_debug_statistics_notif;
+ iwl_legacy_rx_pm_debug_statistics_notif;
priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
/*
@@ -908,7 +873,7 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
- iwl_setup_rx_scan_handlers(priv);
+ iwl_legacy_setup_rx_scan_handlers(priv);
priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
/* Set up hardware specific Rx handlers */
@@ -1009,7 +974,7 @@ static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
spin_lock_irqsave(&rxq->lock, flags);
write = rxq->write & ~0x7;
- while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
+ while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
/* Get next free Rx buffer, remove from free list */
element = rxq->rx_free.next;
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
@@ -1035,7 +1000,7 @@ static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
spin_lock_irqsave(&rxq->lock, flags);
rxq->need_update = 1;
spin_unlock_irqrestore(&rxq->lock, flags);
- iwl_rx_queue_update_write_ptr(priv, rxq);
+ iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
}
}
@@ -1129,7 +1094,7 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
- __iwl_free_pages(priv, rxq->pool[i].page);
+ __iwl_legacy_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
}
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -1176,7 +1141,7 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
- __iwl_free_pages(priv, rxq->pool[i].page);
+ __iwl_legacy_free_pages(priv, rxq->pool[i].page);
rxq->pool[i].page = NULL;
}
}
@@ -1281,7 +1246,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
len += sizeof(u32); /* account for status word */
- trace_iwlwifi_dev_rx(priv, pkt, len);
+ trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
/* Reclaim a command buffer only if this packet is a response
* to a (driver-originated) command.
@@ -1298,14 +1263,14 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
* rx_handlers table. See iwl3945_setup_rx_handlers() */
if (priv->rx_handlers[pkt->hdr.cmd]) {
IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
- get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+ iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
} else {
/* No handling needed */
IWL_DEBUG_RX(priv,
"r %d i %d No handler needed for %s, 0x%02x\n",
- r, i, get_cmd_string(pkt->hdr.cmd),
+ r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
pkt->hdr.cmd);
}
@@ -1318,10 +1283,10 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
if (reclaim) {
/* Invoke any callbacks, transfer the buffer to caller,
- * and fire off the (possibly) blocking iwl_send_cmd()
+ * and fire off the (possibly) blocking iwl_legacy_send_cmd()
* as we reclaim the driver command queue */
if (rxb->page)
- iwl_tx_cmd_complete(priv, rxb);
+ iwl_legacy_tx_cmd_complete(priv, rxb);
else
IWL_WARN(priv, "Claim null rxb?\n");
}
@@ -1363,14 +1328,14 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
}
/* call this function to flush any scheduled tasklet */
-static inline void iwl_synchronize_irq(struct iwl_priv *priv)
+static inline void iwl3945_synchronize_irq(struct iwl_priv *priv)
{
/* wait to make sure we flush pending tasklet*/
synchronize_irq(priv->pci_dev->irq);
tasklet_kill(&priv->irq_tasklet);
}
-static const char *desc_lookup(int i)
+static const char *iwl3945_desc_lookup(int i)
{
switch (i) {
case 1:
@@ -1407,7 +1372,7 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
}
- count = iwl_read_targ_mem(priv, base);
+ count = iwl_legacy_read_targ_mem(priv, base);
if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
IWL_ERR(priv, "Start IWL Error Log Dump:\n");
@@ -1420,25 +1385,25 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
for (i = ERROR_START_OFFSET;
i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
i += ERROR_ELEM_SIZE) {
- desc = iwl_read_targ_mem(priv, base + i);
+ desc = iwl_legacy_read_targ_mem(priv, base + i);
time =
- iwl_read_targ_mem(priv, base + i + 1 * sizeof(u32));
+ iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32));
blink1 =
- iwl_read_targ_mem(priv, base + i + 2 * sizeof(u32));
+ iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32));
blink2 =
- iwl_read_targ_mem(priv, base + i + 3 * sizeof(u32));
+ iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32));
ilink1 =
- iwl_read_targ_mem(priv, base + i + 4 * sizeof(u32));
+ iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32));
ilink2 =
- iwl_read_targ_mem(priv, base + i + 5 * sizeof(u32));
+ iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32));
data1 =
- iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32));
+ iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32));
IWL_ERR(priv,
"%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
- desc_lookup(desc), desc, time, blink1, blink2,
+ iwl3945_desc_lookup(desc), desc, time, blink1, blink2,
ilink1, ilink2, data1);
- trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, 0,
+ trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0,
0, blink1, blink2, ilink1, ilink2);
}
}
@@ -1477,14 +1442,14 @@ static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
iwl_grab_nic_access(priv);
/* Set starting address; reads will auto-increment */
- _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
+ _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
rmb();
/* "time" is actually "data" for mode 0 (no timestamp).
* place event id # at far right for easier visual parsing. */
for (i = 0; i < num_events; i++) {
- ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
if (mode == 0) {
/* data, ev */
if (bufsz) {
@@ -1493,11 +1458,12 @@ static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
time, ev);
} else {
IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
- trace_iwlwifi_dev_ucode_event(priv, 0,
+ trace_iwlwifi_legacy_dev_ucode_event(priv, 0,
time, ev);
}
} else {
- data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ data = _iwl_legacy_read_direct32(priv,
+ HBUS_TARG_MEM_RDAT);
if (bufsz) {
pos += scnprintf(*buf + pos, bufsz - pos,
"%010u:0x%08x:%04u\n",
@@ -1505,7 +1471,7 @@ static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
} else {
IWL_ERR(priv, "%010u\t0x%08x\t%04u\n",
time, data, ev);
- trace_iwlwifi_dev_ucode_event(priv, time,
+ trace_iwlwifi_legacy_dev_ucode_event(priv, time,
data, ev);
}
}
@@ -1576,10 +1542,10 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
}
/* event log header */
- capacity = iwl_read_targ_mem(priv, base);
- mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
- num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
- next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
+ capacity = iwl_legacy_read_targ_mem(priv, base);
+ mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
+ num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32)));
+ next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32)));
if (capacity > priv->cfg->base_params->max_event_log_size) {
IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
@@ -1601,8 +1567,8 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
return pos;
}
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
#else
@@ -1613,7 +1579,7 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n",
size);
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
if (display) {
if (full_log)
bufsz = capacity * 48;
@@ -1623,7 +1589,7 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
if (!*buf)
return -ENOMEM;
}
- if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
+ if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
/* if uCode has wrapped back to top of log,
* start at the oldest entry,
* i.e the next one that uCode would fill.
@@ -1653,7 +1619,7 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
u32 inta, handled = 0;
u32 inta_fh;
unsigned long flags;
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
u32 inta_mask;
#endif
@@ -1671,8 +1637,8 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(priv) & IWL_DL_ISR) {
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
/* just for debug */
inta_mask = iwl_read32(priv, CSR_INT_MASK);
IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
@@ -1696,18 +1662,18 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
IWL_ERR(priv, "Hardware error detected. Restarting.\n");
/* Tell the device to stop sending interrupts */
- iwl_disable_interrupts(priv);
+ iwl_legacy_disable_interrupts(priv);
priv->isr_stats.hw++;
- iwl_irq_handle_error(priv);
+ iwl_legacy_irq_handle_error(priv);
handled |= CSR_INT_BIT_HW_ERR;
return;
}
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
/* NIC fires this, but we don't use it, redundant with WAKEUP */
if (inta & CSR_INT_BIT_SCD) {
IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
@@ -1730,20 +1696,20 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
IWL_ERR(priv, "Microcode SW error detected. "
"Restarting 0x%X.\n", inta);
priv->isr_stats.sw++;
- iwl_irq_handle_error(priv);
+ iwl_legacy_irq_handle_error(priv);
handled |= CSR_INT_BIT_SW_ERR;
}
/* uCode wakes up after power-down sleep */
if (inta & CSR_INT_BIT_WAKEUP) {
IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
- iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
- iwl_txq_update_write_ptr(priv, &priv->txq[0]);
- iwl_txq_update_write_ptr(priv, &priv->txq[1]);
- iwl_txq_update_write_ptr(priv, &priv->txq[2]);
- iwl_txq_update_write_ptr(priv, &priv->txq[3]);
- iwl_txq_update_write_ptr(priv, &priv->txq[4]);
- iwl_txq_update_write_ptr(priv, &priv->txq[5]);
+ iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
+ iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]);
+ iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]);
+ iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]);
+ iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]);
+ iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]);
+ iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]);
priv->isr_stats.wakeup++;
handled |= CSR_INT_BIT_WAKEUP;
@@ -1763,7 +1729,7 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
priv->isr_stats.tx++;
iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
- iwl_write_direct32(priv, FH39_TCSR_CREDIT
+ iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT
(FH39_SRVC_CHNL), 0x0);
handled |= CSR_INT_BIT_FH_TX;
}
@@ -1782,10 +1748,10 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
/* Re-enable all interrupts */
/* only Re-enable if disabled by irq */
if (test_bit(STATUS_INT_ENABLED, &priv->status))
- iwl_enable_interrupts(priv);
+ iwl_legacy_enable_interrupts(priv);
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
inta = iwl_read32(priv, CSR_INT);
inta_mask = iwl_read32(priv, CSR_INT_MASK);
inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
@@ -1812,14 +1778,14 @@ static int iwl3945_get_single_channel_for_scan(struct iwl_priv *priv,
return added;
}
- active_dwell = iwl_get_active_dwell_time(priv, band, 0);
- passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
+ active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0);
+ passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
if (passive_dwell <= active_dwell)
passive_dwell = active_dwell + 1;
- channel = iwl_get_single_channel_number(priv, band);
+ channel = iwl_legacy_get_single_channel_number(priv, band);
if (channel) {
scan_ch->channel = channel;
@@ -1855,8 +1821,8 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
if (!sband)
return 0;
- active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
- passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
+ active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
+ passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
if (passive_dwell <= active_dwell)
passive_dwell = active_dwell + 1;
@@ -1869,10 +1835,12 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
scan_ch->channel = chan->hw_value;
- ch_info = iwl_get_channel_info(priv, band, scan_ch->channel);
- if (!is_channel_valid(ch_info)) {
- IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
- scan_ch->channel);
+ ch_info = iwl_legacy_get_channel_info(priv, band,
+ scan_ch->channel);
+ if (!iwl_legacy_is_channel_valid(ch_info)) {
+ IWL_DEBUG_SCAN(priv,
+ "Channel %d is INVALID for this band.\n",
+ scan_ch->channel);
continue;
}
@@ -1881,7 +1849,7 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
/* If passive , set up for auto-switch
* and use long active_dwell time.
*/
- if (!is_active || is_channel_passive(ch_info) ||
+ if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
(chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
scan_ch->type = 0; /* passive */
if (IWL_UCODE_API(priv->ucode_ver) == 1)
@@ -1961,12 +1929,12 @@ static void iwl3945_init_hw_rates(struct iwl_priv *priv,
static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
{
- iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code);
- iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data);
- iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
- iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init);
- iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
- iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
}
/**
@@ -1982,7 +1950,7 @@ static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 le
IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
- iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+ iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
IWL39_RTC_INST_LOWER_BOUND);
errcnt = 0;
@@ -1990,7 +1958,7 @@ static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 le
/* read data comes through single port, auto-incr addr */
/* NOTE: Use the debugless read so we don't flood kernel log
* if IWL_DL_IO is set */
- val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image)) {
IWL_ERR(priv, "uCode INST section is invalid at "
"offset 0x%x, is 0x%x, s/b 0x%x\n",
@@ -2029,9 +1997,9 @@ static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32
/* read data comes through single port, auto-incr addr */
/* NOTE: Use the debugless read so we don't flood kernel log
* if IWL_DL_IO is set */
- iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
+ iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
i + IWL39_RTC_INST_LOWER_BOUND);
- val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image)) {
#if 0 /* Enable this if you want to see details */
IWL_ERR(priv, "uCode INST section is invalid at "
@@ -2107,7 +2075,7 @@ static void iwl3945_nic_start(struct iwl_priv *priv)
#define IWL3945_UCODE_GET(item) \
static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
{ \
- return le32_to_cpu(ucode->u.v1.item); \
+ return le32_to_cpu(ucode->v1.item); \
}
static u32 iwl3945_ucode_get_header_size(u32 api_ver)
@@ -2117,7 +2085,7 @@ static u32 iwl3945_ucode_get_header_size(u32 api_ver)
static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
{
- return (u8 *) ucode->u.v1.data;
+ return (u8 *) ucode->v1.data;
}
IWL3945_UCODE_GET(inst_size);
@@ -2292,13 +2260,13 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
* 1) unmodified from disk
* 2) backup cache for save/restore during power-downs */
priv->ucode_code.len = inst_size;
- iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
priv->ucode_data.len = data_size;
- iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
priv->ucode_data_backup.len = data_size;
- iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
!priv->ucode_data_backup.v_addr)
@@ -2307,10 +2275,10 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
/* Initialization instructions and data */
if (init_size && init_data_size) {
priv->ucode_init.len = init_size;
- iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
priv->ucode_init_data.len = init_data_size;
- iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
goto err_pci_alloc;
@@ -2319,7 +2287,7 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
/* Bootstrap (instructions only, no data) */
if (boot_size) {
priv->ucode_boot.len = boot_size;
- iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
if (!priv->ucode_boot.v_addr)
goto err_pci_alloc;
@@ -2406,14 +2374,14 @@ static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
pdata = priv->ucode_data_backup.p_addr;
/* Tell bootstrap uCode where to find image to load */
- iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
+ iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
+ iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
+ iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
priv->ucode_data.len);
/* Inst byte count must be last to set up, bit 31 signals uCode
* that all new ptr/size info is in place */
- iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
+ iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
priv->ucode_code.len | BSM_DRAM_INST_LOAD);
IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
@@ -2494,7 +2462,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
goto restart;
}
- rfkill = iwl_read_prph(priv, APMG_RFKILL_REG);
+ rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG);
IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
if (rfkill & 0x1) {
@@ -2515,24 +2483,19 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
/* After the ALIVE response, we can send commands to 3945 uCode */
set_bit(STATUS_ALIVE, &priv->status);
- if (priv->cfg->ops->lib->recover_from_tx_stall) {
- /* Enable timer to monitor the driver queues */
- mod_timer(&priv->monitor_recover,
- jiffies +
- msecs_to_jiffies(
- priv->cfg->base_params->monitor_recover_period));
- }
+ /* Enable watchdog to monitor the driver tx queues */
+ iwl_legacy_setup_watchdog(priv);
- if (iwl_is_rfkill(priv))
+ if (iwl_legacy_is_rfkill(priv))
return;
ieee80211_wake_queues(priv->hw);
- priv->active_rate = IWL_RATES_MASK;
+ priv->active_rate = IWL_RATES_MASK_3945;
- iwl_power_update_mode(priv, true);
+ iwl_legacy_power_update_mode(priv, true);
- if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
+ if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
struct iwl3945_rxon_cmd *active_rxon =
(struct iwl3945_rxon_cmd *)(&ctx->active);
@@ -2540,21 +2503,20 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
} else {
/* Initialize our rx_config data */
- iwl_connection_init_rx_config(priv, ctx);
+ iwl_legacy_connection_init_rx_config(priv, ctx);
}
/* Configure Bluetooth device coexistence support */
- priv->cfg->ops->hcmd->send_bt_config(priv);
+ iwl_legacy_send_bt_config(priv);
+
+ set_bit(STATUS_READY, &priv->status);
/* Configure the adapter for unassociated operation */
iwl3945_commit_rxon(priv, ctx);
iwl3945_reg_txpower_periodic(priv);
- iwl_leds_init(priv);
-
IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
- set_bit(STATUS_READY, &priv->status);
wake_up_interruptible(&priv->wait_command_queue);
return;
@@ -2572,19 +2534,18 @@ static void __iwl3945_down(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
- iwl_scan_cancel_timeout(priv, 200);
+ iwl_legacy_scan_cancel_timeout(priv, 200);
exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
/* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
* to prevent rearm timer */
- if (priv->cfg->ops->lib->recover_from_tx_stall)
- del_timer_sync(&priv->monitor_recover);
+ del_timer_sync(&priv->watchdog);
/* Station information will now be cleared in device */
- iwl_clear_ucode_stations(priv, NULL);
- iwl_dealloc_bcast_stations(priv);
- iwl_clear_driver_stations(priv);
+ iwl_legacy_clear_ucode_stations(priv, NULL);
+ iwl_legacy_dealloc_bcast_stations(priv);
+ iwl_legacy_clear_driver_stations(priv);
/* Unblock any waiting calls */
wake_up_interruptible_all(&priv->wait_command_queue);
@@ -2599,16 +2560,16 @@ static void __iwl3945_down(struct iwl_priv *priv)
/* tell the device to stop sending interrupts */
spin_lock_irqsave(&priv->lock, flags);
- iwl_disable_interrupts(priv);
+ iwl_legacy_disable_interrupts(priv);
spin_unlock_irqrestore(&priv->lock, flags);
- iwl_synchronize_irq(priv);
+ iwl3945_synchronize_irq(priv);
if (priv->mac80211_registered)
ieee80211_stop_queues(priv->hw);
/* If we have not previously called iwl3945_init() then
* clear all bits but the RF Kill bits and return */
- if (!iwl_is_init(priv)) {
+ if (!iwl_legacy_is_init(priv)) {
priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
STATUS_RF_KILL_HW |
test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
@@ -2633,11 +2594,11 @@ static void __iwl3945_down(struct iwl_priv *priv)
iwl3945_hw_rxq_stop(priv);
/* Power-down device's busmaster DMA clocks */
- iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
udelay(5);
/* Stop the device, and put it in low power state */
- iwl_apm_stop(priv);
+ iwl_legacy_apm_stop(priv);
exit:
memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
@@ -2668,7 +2629,8 @@ static int iwl3945_alloc_bcast_station(struct iwl_priv *priv)
u8 sta_id;
spin_lock_irqsave(&priv->sta_lock, flags);
- sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL);
+ sta_id = iwl_legacy_prep_station(priv, ctx,
+ iwlegacy_bcast_addr, false, NULL);
if (sta_id == IWL_INVALID_STATION) {
IWL_ERR(priv, "Unable to prepare broadcast station\n");
spin_unlock_irqrestore(&priv->sta_lock, flags);
@@ -2726,7 +2688,7 @@ static int __iwl3945_up(struct iwl_priv *priv)
/* clear (again), then enable host interrupts */
iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- iwl_enable_interrupts(priv);
+ iwl_legacy_enable_interrupts(priv);
/* really make sure rfkill handshake bits are cleared */
iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
@@ -2868,21 +2830,18 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
- if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
+ if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
u16 interval = 0;
u32 extra;
u32 suspend_time = 100;
u32 scan_suspend_time = 100;
- unsigned long flags;
IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
- spin_lock_irqsave(&priv->lock, flags);
if (priv->is_internal_short_scan)
interval = 0;
else
interval = vif->bss_conf.beacon_int;
- spin_unlock_irqrestore(&priv->lock, flags);
scan->suspend_time = 0;
scan->max_out_time = cpu_to_le32(200 * 1024);
@@ -2959,7 +2918,7 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
if (!priv->is_internal_short_scan) {
scan->tx_cmd.len = cpu_to_le16(
- iwl_fill_probe_req(priv,
+ iwl_legacy_fill_probe_req(priv,
(struct ieee80211_mgmt *)scan->data,
vif->addr,
priv->scan_request->ie,
@@ -2968,9 +2927,9 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
} else {
/* use bcast addr, will not be transmitted but must be valid */
scan->tx_cmd.len = cpu_to_le16(
- iwl_fill_probe_req(priv,
+ iwl_legacy_fill_probe_req(priv,
(struct ieee80211_mgmt *)scan->data,
- iwl_bcast_addr, NULL, 0,
+ iwlegacy_bcast_addr, NULL, 0,
IWL_MAX_SCAN_SIZE - sizeof(*scan)));
}
/* select Rx antennas */
@@ -2998,7 +2957,7 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->len = cpu_to_le16(cmd.len);
set_bit(STATUS_SCAN_HW, &priv->status);
- ret = iwl_send_cmd_sync(priv, &cmd);
+ ret = iwl_legacy_send_cmd_sync(priv, &cmd);
if (ret)
clear_bit(STATUS_SCAN_HW, &priv->status);
return ret;
@@ -3057,52 +3016,47 @@ static void iwl3945_bg_rx_replenish(struct work_struct *data)
mutex_unlock(&priv->mutex);
}
-void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
+void iwl3945_post_associate(struct iwl_priv *priv)
{
int rc = 0;
struct ieee80211_conf *conf = NULL;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- if (!vif || !priv->is_open)
- return;
-
- if (vif->type == NL80211_IFTYPE_AP) {
- IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
+ if (!ctx->vif || !priv->is_open)
return;
- }
IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
- vif->bss_conf.aid, ctx->active.bssid_addr);
+ ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- iwl_scan_cancel_timeout(priv, 200);
+ iwl_legacy_scan_cancel_timeout(priv, 200);
- conf = ieee80211_get_hw_conf(priv->hw);
+ conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
iwl3945_commit_rxon(priv, ctx);
- rc = iwl_send_rxon_timing(priv, ctx);
+ rc = iwl_legacy_send_rxon_timing(priv, ctx);
if (rc)
IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
"Attempting to continue.\n");
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
+ ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
- vif->bss_conf.aid, vif->bss_conf.beacon_int);
+ ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int);
- if (vif->bss_conf.use_short_preamble)
+ if (ctx->vif->bss_conf.use_short_preamble)
ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
else
ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
- if (vif->bss_conf.use_short_slot)
+ if (ctx->vif->bss_conf.use_short_slot)
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
else
ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
@@ -3110,7 +3064,7 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
iwl3945_commit_rxon(priv, ctx);
- switch (vif->type) {
+ switch (ctx->vif->type) {
case NL80211_IFTYPE_STATION:
iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
break;
@@ -3119,7 +3073,7 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
break;
default:
IWL_ERR(priv, "%s Should not be called in %d mode\n",
- __func__, vif->type);
+ __func__, ctx->vif->type);
break;
}
}
@@ -3182,8 +3136,6 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
* no need to poll the killswitch state anymore */
cancel_delayed_work(&priv->_3945.rfkill_poll);
- iwl_led_start(priv);
-
priv->is_open = 1;
IWL_DEBUG_MAC80211(priv, "leave\n");
return 0;
@@ -3218,7 +3170,7 @@ static void iwl3945_mac_stop(struct ieee80211_hw *hw)
IWL_DEBUG_MAC80211(priv, "leave\n");
}
-static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct iwl_priv *priv = hw->priv;
@@ -3231,26 +3183,26 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
dev_kfree_skb_any(skb);
IWL_DEBUG_MAC80211(priv, "leave\n");
- return NETDEV_TX_OK;
}
-void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
+void iwl3945_config_ap(struct iwl_priv *priv)
{
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct ieee80211_vif *vif = ctx->vif;
int rc = 0;
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
/* The following should be done only at AP bring up */
- if (!(iwl_is_associated(priv, IWL_RXON_CTX_BSS))) {
+ if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) {
/* RXON - unassoc (to set timing command) */
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
iwl3945_commit_rxon(priv, ctx);
/* RXON Timing */
- rc = iwl_send_rxon_timing(priv, ctx);
+ rc = iwl_legacy_send_rxon_timing(priv, ctx);
if (rc)
IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
"Attempting to continue.\n");
@@ -3277,10 +3229,6 @@ void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
iwl3945_commit_rxon(priv, ctx);
}
iwl3945_send_beacon_cmd(priv);
-
- /* FIXME - we need to add code here to detect a totally new
- * configuration, reset the AP, unassoc, rxon timing, assoc,
- * clear sta table, add BCAST sta... */
}
static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -3300,17 +3248,25 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
- static_key = !iwl_is_associated(priv, IWL_RXON_CTX_BSS);
+ /*
+ * To support IBSS RSN, don't program group keys in IBSS, the
+ * hardware will then not attempt to decrypt the frames.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
if (!static_key) {
- sta_id = iwl_sta_id_or_broadcast(
+ sta_id = iwl_legacy_sta_id_or_broadcast(
priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
if (sta_id == IWL_INVALID_STATION)
return -EINVAL;
}
mutex_lock(&priv->mutex);
- iwl_scan_cancel_timeout(priv, 100);
+ iwl_legacy_scan_cancel_timeout(priv, 100);
switch (cmd) {
case SET_KEY:
@@ -3355,7 +3311,8 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
sta_priv->common.sta_id = IWL_INVALID_STATION;
- ret = iwl_add_station_common(priv, &priv->contexts[IWL_RXON_CTX_BSS],
+ ret = iwl_legacy_add_station_common(priv,
+ &priv->contexts[IWL_RXON_CTX_BSS],
sta->addr, is_ap, sta, &sta_id);
if (ret) {
IWL_ERR(priv, "Unable to add station %pM (%d)\n",
@@ -3407,16 +3364,16 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
ctx->staging.filter_flags |= filter_or;
/*
- * Committing directly here breaks for some reason,
- * but we'll eventually commit the filter flags
- * change anyway.
+ * Not committing directly because hardware can perform a scan,
+ * but even if hw is ready, committing here breaks for some reason,
+ * we'll eventually commit the filter flags change anyway.
*/
mutex_unlock(&priv->mutex);
/*
* Receiving all multicast frames is always enabled by the
- * default flags setup in iwl_connection_init_rx_config()
+ * default flags setup in iwl_legacy_connection_init_rx_config()
* since we currently do not support programming multicast
* filters into the device.
*/
@@ -3431,7 +3388,7 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
*
*****************************************************************************/
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
/*
* The following adds a new attribute to the sysfs representation
@@ -3444,13 +3401,13 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
* level that is used instead of the global debug level if it (the per
* device debug level) is set.
*/
-static ssize_t show_debug_level(struct device *d,
+static ssize_t iwl3945_show_debug_level(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
- return sprintf(buf, "0x%08X\n", iwl_get_debug_level(priv));
+ return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
}
-static ssize_t store_debug_level(struct device *d,
+static ssize_t iwl3945_store_debug_level(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -3463,7 +3420,7 @@ static ssize_t store_debug_level(struct device *d,
IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
else {
priv->debug_level = val;
- if (iwl_alloc_traffic_mem(priv))
+ if (iwl_legacy_alloc_traffic_mem(priv))
IWL_ERR(priv,
"Not enough memory to generate traffic log\n");
}
@@ -3471,31 +3428,31 @@ static ssize_t store_debug_level(struct device *d,
}
static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
- show_debug_level, store_debug_level);
+ iwl3945_show_debug_level, iwl3945_store_debug_level);
-#endif /* CONFIG_IWLWIFI_DEBUG */
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-static ssize_t show_temperature(struct device *d,
+static ssize_t iwl3945_show_temperature(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
- if (!iwl_is_alive(priv))
+ if (!iwl_legacy_is_alive(priv))
return -EAGAIN;
return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
}
-static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
+static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL);
-static ssize_t show_tx_power(struct device *d,
+static ssize_t iwl3945_show_tx_power(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
}
-static ssize_t store_tx_power(struct device *d,
+static ssize_t iwl3945_store_tx_power(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -3512,9 +3469,9 @@ static ssize_t store_tx_power(struct device *d,
return count;
}
-static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
+static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power);
-static ssize_t show_flags(struct device *d,
+static ssize_t iwl3945_show_flags(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
@@ -3523,7 +3480,7 @@ static ssize_t show_flags(struct device *d,
return sprintf(buf, "0x%04X\n", ctx->active.flags);
}
-static ssize_t store_flags(struct device *d,
+static ssize_t iwl3945_store_flags(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -3534,7 +3491,7 @@ static ssize_t store_flags(struct device *d,
mutex_lock(&priv->mutex);
if (le32_to_cpu(ctx->staging.flags) != flags) {
/* Cancel any currently running scans... */
- if (iwl_scan_cancel_timeout(priv, 100))
+ if (iwl_legacy_scan_cancel_timeout(priv, 100))
IWL_WARN(priv, "Could not cancel scan.\n");
else {
IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
@@ -3548,9 +3505,9 @@ static ssize_t store_flags(struct device *d,
return count;
}
-static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
+static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags);
-static ssize_t show_filter_flags(struct device *d,
+static ssize_t iwl3945_show_filter_flags(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
@@ -3560,7 +3517,7 @@ static ssize_t show_filter_flags(struct device *d,
le32_to_cpu(ctx->active.filter_flags));
}
-static ssize_t store_filter_flags(struct device *d,
+static ssize_t iwl3945_store_filter_flags(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -3571,7 +3528,7 @@ static ssize_t store_filter_flags(struct device *d,
mutex_lock(&priv->mutex);
if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
/* Cancel any currently running scans... */
- if (iwl_scan_cancel_timeout(priv, 100))
+ if (iwl_legacy_scan_cancel_timeout(priv, 100))
IWL_WARN(priv, "Could not cancel scan.\n");
else {
IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
@@ -3586,10 +3543,10 @@ static ssize_t store_filter_flags(struct device *d,
return count;
}
-static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
- store_filter_flags);
+static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags,
+ iwl3945_store_filter_flags);
-static ssize_t show_measurement(struct device *d,
+static ssize_t iwl3945_show_measurement(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
@@ -3621,7 +3578,7 @@ static ssize_t show_measurement(struct device *d,
return len;
}
-static ssize_t store_measurement(struct device *d,
+static ssize_t iwl3945_store_measurement(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -3658,9 +3615,9 @@ static ssize_t store_measurement(struct device *d,
}
static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
- show_measurement, store_measurement);
+ iwl3945_show_measurement, iwl3945_store_measurement);
-static ssize_t store_retry_rate(struct device *d,
+static ssize_t iwl3945_store_retry_rate(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -3673,38 +3630,38 @@ static ssize_t store_retry_rate(struct device *d,
return count;
}
-static ssize_t show_retry_rate(struct device *d,
+static ssize_t iwl3945_show_retry_rate(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "%d", priv->retry_rate);
}
-static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate,
- store_retry_rate);
+static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate,
+ iwl3945_store_retry_rate);
-static ssize_t show_channels(struct device *d,
+static ssize_t iwl3945_show_channels(struct device *d,
struct device_attribute *attr, char *buf)
{
/* all this shit doesn't belong into sysfs anyway */
return 0;
}
-static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
+static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL);
-static ssize_t show_antenna(struct device *d,
+static ssize_t iwl3945_show_antenna(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
- if (!iwl_is_alive(priv))
+ if (!iwl_legacy_is_alive(priv))
return -EAGAIN;
return sprintf(buf, "%d\n", iwl3945_mod_params.antenna);
}
-static ssize_t store_antenna(struct device *d,
+static ssize_t iwl3945_store_antenna(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -3729,20 +3686,20 @@ static ssize_t store_antenna(struct device *d,
return count;
}
-static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
+static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna);
-static ssize_t show_status(struct device *d,
+static ssize_t iwl3945_show_status(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
- if (!iwl_is_alive(priv))
+ if (!iwl_legacy_is_alive(priv))
return -EAGAIN;
return sprintf(buf, "0x%08x\n", (int)priv->status);
}
-static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
+static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL);
-static ssize_t dump_error_log(struct device *d,
+static ssize_t iwl3945_dump_error_log(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -3755,7 +3712,7 @@ static ssize_t dump_error_log(struct device *d,
return strnlen(buf, count);
}
-static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
+static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log);
/*****************************************************************************
*
@@ -3771,21 +3728,17 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
INIT_WORK(&priv->restart, iwl3945_bg_restart);
INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
- INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
- iwl_setup_scan_deferred_work(priv);
+ iwl_legacy_setup_scan_deferred_work(priv);
iwl3945_hw_setup_deferred_work(priv);
- if (priv->cfg->ops->lib->recover_from_tx_stall) {
- init_timer(&priv->monitor_recover);
- priv->monitor_recover.data = (unsigned long)priv;
- priv->monitor_recover.function =
- priv->cfg->ops->lib->recover_from_tx_stall;
- }
+ init_timer(&priv->watchdog);
+ priv->watchdog.data = (unsigned long)priv;
+ priv->watchdog.function = iwl_legacy_bg_watchdog;
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
iwl3945_irq_tasklet, (unsigned long)priv);
@@ -3797,9 +3750,8 @@ static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
cancel_delayed_work_sync(&priv->init_alive_start);
cancel_delayed_work(&priv->alive_start);
- cancel_work_sync(&priv->beacon_update);
- iwl_cancel_scan_deferred_work(priv);
+ iwl_legacy_cancel_scan_deferred_work(priv);
}
static struct attribute *iwl3945_sysfs_entries[] = {
@@ -3813,7 +3765,7 @@ static struct attribute *iwl3945_sysfs_entries[] = {
&dev_attr_status.attr,
&dev_attr_temperature.attr,
&dev_attr_tx_power.attr,
-#ifdef CONFIG_IWLWIFI_DEBUG
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
&dev_attr_debug_level.attr,
#endif
NULL
@@ -3824,22 +3776,23 @@ static struct attribute_group iwl3945_attribute_group = {
.attrs = iwl3945_sysfs_entries,
};
-static struct ieee80211_ops iwl3945_hw_ops = {
+struct ieee80211_ops iwl3945_hw_ops = {
.tx = iwl3945_mac_tx,
.start = iwl3945_mac_start,
.stop = iwl3945_mac_stop,
- .add_interface = iwl_mac_add_interface,
- .remove_interface = iwl_mac_remove_interface,
- .config = iwl_mac_config,
+ .add_interface = iwl_legacy_mac_add_interface,
+ .remove_interface = iwl_legacy_mac_remove_interface,
+ .change_interface = iwl_legacy_mac_change_interface,
+ .config = iwl_legacy_mac_config,
.configure_filter = iwl3945_configure_filter,
.set_key = iwl3945_mac_set_key,
- .conf_tx = iwl_mac_conf_tx,
- .reset_tsf = iwl_mac_reset_tsf,
- .bss_info_changed = iwl_bss_info_changed,
- .hw_scan = iwl_mac_hw_scan,
+ .conf_tx = iwl_legacy_mac_conf_tx,
+ .reset_tsf = iwl_legacy_mac_reset_tsf,
+ .bss_info_changed = iwl_legacy_mac_bss_info_changed,
+ .hw_scan = iwl_legacy_mac_hw_scan,
.sta_add = iwl3945_mac_sta_add,
- .sta_remove = iwl_mac_sta_remove,
- .tx_last_beacon = iwl_mac_tx_last_beacon,
+ .sta_remove = iwl_legacy_mac_sta_remove,
+ .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
};
static int iwl3945_init_drv(struct iwl_priv *priv)
@@ -3865,7 +3818,15 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
priv->iw_mode = NL80211_IFTYPE_STATION;
priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
+ /* initialize force reset */
+ priv->force_reset[IWL_RF_RESET].reset_duration =
+ IWL_DELAY_NEXT_FORCE_RF_RESET;
+ priv->force_reset[IWL_FW_RESET].reset_duration =
+ IWL_DELAY_NEXT_FORCE_FW_RELOAD;
+
+
priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
+ priv->tx_power_next = IWL_DEFAULT_TX_POWER;
if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
@@ -3873,7 +3834,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
ret = -EINVAL;
goto err;
}
- ret = iwl_init_channel_map(priv);
+ ret = iwl_legacy_init_channel_map(priv);
if (ret) {
IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
goto err;
@@ -3885,7 +3846,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
goto err_free_channel_map;
}
- ret = iwlcore_init_geos(priv);
+ ret = iwl_legacy_init_geos(priv);
if (ret) {
IWL_ERR(priv, "initializing geos failed: %d\n", ret);
goto err_free_channel_map;
@@ -3895,7 +3856,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
return 0;
err_free_channel_map:
- iwl_free_channel_map(priv);
+ iwl_legacy_free_channel_map(priv);
err:
return ret;
}
@@ -3915,15 +3876,12 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SPECTRUM_MGMT;
- if (!priv->cfg->base_params->broken_powersave)
- hw->flags |= IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
-
hw->wiphy->interface_modes =
priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_DISABLE_BEACON_HINTS;
+ WIPHY_FLAG_DISABLE_BEACON_HINTS |
+ WIPHY_FLAG_IBSS_RSN;
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
/* we create the 802.11 header and a zero-length SSID element */
@@ -3940,6 +3898,8 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&priv->bands[IEEE80211_BAND_5GHZ];
+ iwl_legacy_leds_init(priv);
+
ret = ieee80211_register_hw(priv->hw);
if (ret) {
IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
@@ -3965,7 +3925,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
/* mac80211 allocates memory for this device instance, including
* space for this driver's private structure */
- hw = iwl_alloc_all(cfg, &iwl3945_hw_ops);
+ hw = iwl_legacy_alloc_all(cfg);
if (hw == NULL) {
pr_err("Can not allocate network device\n");
err = -ENOMEM;
@@ -4005,13 +3965,12 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
iwl3945_hw_ops.hw_scan = NULL;
}
-
IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
priv->cfg = cfg;
priv->pci_dev = pdev;
priv->inta_mask = CSR_INI_SET_MASK;
- if (iwl_alloc_traffic_mem(priv))
+ if (iwl_legacy_alloc_traffic_mem(priv))
IWL_ERR(priv, "Not enough memory to generate traffic log\n");
/***************************
@@ -4075,7 +4034,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
* ********************/
/* Read the EEPROM */
- err = iwl_eeprom_init(priv);
+ err = iwl_legacy_eeprom_init(priv);
if (err) {
IWL_ERR(priv, "Unable to init EEPROM\n");
goto out_iounmap;
@@ -4112,12 +4071,12 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
* ********************/
spin_lock_irqsave(&priv->lock, flags);
- iwl_disable_interrupts(priv);
+ iwl_legacy_disable_interrupts(priv);
spin_unlock_irqrestore(&priv->lock, flags);
pci_enable_msi(priv->pci_dev);
- err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr,
+ err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
IRQF_SHARED, DRV_NAME, priv);
if (err) {
IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
@@ -4130,24 +4089,24 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
goto out_release_irq;
}
- iwl_set_rxon_channel(priv,
+ iwl_legacy_set_rxon_channel(priv,
&priv->bands[IEEE80211_BAND_2GHZ].channels[5],
&priv->contexts[IWL_RXON_CTX_BSS]);
iwl3945_setup_deferred_work(priv);
iwl3945_setup_rx_handlers(priv);
- iwl_power_initialize(priv);
+ iwl_legacy_power_initialize(priv);
/*********************************
* 8. Setup and Register mac80211
* *******************************/
- iwl_enable_interrupts(priv);
+ iwl_legacy_enable_interrupts(priv);
err = iwl3945_setup_mac(priv);
if (err)
goto out_remove_sysfs;
- err = iwl_dbgfs_register(priv, DRV_NAME);
+ err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
if (err)
IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
@@ -4165,12 +4124,12 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
free_irq(priv->pci_dev->irq, priv);
out_disable_msi:
pci_disable_msi(priv->pci_dev);
- iwlcore_free_geos(priv);
- iwl_free_channel_map(priv);
+ iwl_legacy_free_geos(priv);
+ iwl_legacy_free_channel_map(priv);
out_unset_hw_params:
iwl3945_unset_hw_params(priv);
out_eeprom_free:
- iwl_eeprom_free(priv);
+ iwl_legacy_eeprom_free(priv);
out_iounmap:
pci_iounmap(pdev, priv->hw_base);
out_pci_release_regions:
@@ -4179,7 +4138,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
out_ieee80211_free_hw:
- iwl_free_traffic_mem(priv);
+ iwl_legacy_free_traffic_mem(priv);
ieee80211_free_hw(priv->hw);
out:
return err;
@@ -4195,10 +4154,12 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
- iwl_dbgfs_unregister(priv);
+ iwl_legacy_dbgfs_unregister(priv);
set_bit(STATUS_EXIT_PENDING, &priv->status);
+ iwl_legacy_leds_exit(priv);
+
if (priv->mac80211_registered) {
ieee80211_unregister_hw(priv->hw);
priv->mac80211_registered = 0;
@@ -4213,16 +4174,16 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
* paths to avoid running iwl_down() at all before leaving driver.
* This (inexpensive) call *makes sure* device is reset.
*/
- iwl_apm_stop(priv);
+ iwl_legacy_apm_stop(priv);
/* make sure we flush any pending irq or
* tasklet for the driver
*/
spin_lock_irqsave(&priv->lock, flags);
- iwl_disable_interrupts(priv);
+ iwl_legacy_disable_interrupts(priv);
spin_unlock_irqrestore(&priv->lock, flags);
- iwl_synchronize_irq(priv);
+ iwl3945_synchronize_irq(priv);
sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
@@ -4244,7 +4205,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
* until now... */
destroy_workqueue(priv->workqueue);
priv->workqueue = NULL;
- iwl_free_traffic_mem(priv);
+ iwl_legacy_free_traffic_mem(priv);
free_irq(pdev->irq, priv);
pci_disable_msi(pdev);
@@ -4254,8 +4215,8 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
- iwl_free_channel_map(priv);
- iwlcore_free_geos(priv);
+ iwl_legacy_free_channel_map(priv);
+ iwl_legacy_free_geos(priv);
kfree(priv->scan_cmd);
if (priv->beacon_skb)
dev_kfree_skb(priv->beacon_skb);
@@ -4275,10 +4236,7 @@ static struct pci_driver iwl3945_driver = {
.id_table = iwl3945_hw_card_ids,
.probe = iwl3945_pci_probe,
.remove = __devexit_p(iwl3945_pci_remove),
-#ifdef CONFIG_PM
- .suspend = iwl_pci_suspend,
- .resume = iwl_pci_resume,
-#endif
+ .driver.pm = IWL_LEGACY_PM_OPS,
};
static int __init iwl3945_init(void)
@@ -4319,17 +4277,17 @@ module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
MODULE_PARM_DESC(swcrypto,
- "using software crypto (default 1 [software])\n");
-#ifdef CONFIG_IWLWIFI_DEBUG
-module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "debug output mask");
-#endif
+ "using software crypto (default 1 [software])");
module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
- int, S_IRUGO);
+ int, S_IRUGO);
MODULE_PARM_DESC(disable_hw_scan,
- "disable hardware scanning (default 0) (deprecated)");
-module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error");
+ "disable hardware scanning (default 0) (deprecated)");
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
module_exit(iwl3945_exit);
module_init(iwl3945_init);
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
new file mode 100644
index 000000000000..91b3d8b9d7a5
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
@@ -0,0 +1,3632 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#define DRV_NAME "iwl4965"
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-sta.h"
+#include "iwl-4965-calib.h"
+#include "iwl-4965.h"
+#include "iwl-4965-led.h"
+
+
+/******************************************************************************
+ *
+ * module boiler plate
+ *
+ ******************************************************************************/
+
+/*
+ * module name, copyright, version, etc.
+ */
+#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+#define DRV_VERSION IWLWIFI_VERSION VD
+
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("iwl4965");
+
+void iwl4965_update_chain_flags(struct iwl_priv *priv)
+{
+ struct iwl_rxon_context *ctx;
+
+ if (priv->cfg->ops->hcmd->set_rxon_chain) {
+ for_each_context(priv, ctx) {
+ priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+ if (ctx->active.rx_chain != ctx->staging.rx_chain)
+ iwl_legacy_commit_rxon(priv, ctx);
+ }
+ }
+}
+
+static void iwl4965_clear_free_frames(struct iwl_priv *priv)
+{
+ struct list_head *element;
+
+ IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
+ priv->frames_count);
+
+ while (!list_empty(&priv->free_frames)) {
+ element = priv->free_frames.next;
+ list_del(element);
+ kfree(list_entry(element, struct iwl_frame, list));
+ priv->frames_count--;
+ }
+
+ if (priv->frames_count) {
+ IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
+ priv->frames_count);
+ priv->frames_count = 0;
+ }
+}
+
+static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
+{
+ struct iwl_frame *frame;
+ struct list_head *element;
+ if (list_empty(&priv->free_frames)) {
+ frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+ if (!frame) {
+ IWL_ERR(priv, "Could not allocate frame!\n");
+ return NULL;
+ }
+
+ priv->frames_count++;
+ return frame;
+ }
+
+ element = priv->free_frames.next;
+ list_del(element);
+ return list_entry(element, struct iwl_frame, list);
+}
+
+static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+ list_add(&frame->list, &priv->free_frames);
+}
+
+static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv,
+ struct ieee80211_hdr *hdr,
+ int left)
+{
+ lockdep_assert_held(&priv->mutex);
+
+ if (!priv->beacon_skb)
+ return 0;
+
+ if (priv->beacon_skb->len > left)
+ return 0;
+
+ memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
+
+ return priv->beacon_skb->len;
+}
+
+/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
+static void iwl4965_set_beacon_tim(struct iwl_priv *priv,
+ struct iwl_tx_beacon_cmd *tx_beacon_cmd,
+ u8 *beacon, u32 frame_size)
+{
+ u16 tim_idx;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
+
+ /*
+ * The index is relative to frame start but we start looking at the
+ * variable-length part of the beacon.
+ */
+ tim_idx = mgmt->u.beacon.variable - beacon;
+
+ /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+ while ((tim_idx < (frame_size - 2)) &&
+ (beacon[tim_idx] != WLAN_EID_TIM))
+ tim_idx += beacon[tim_idx+1] + 2;
+
+ /* If TIM field was found, set variables */
+ if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
+ tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
+ tx_beacon_cmd->tim_size = beacon[tim_idx+1];
+ } else
+ IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
+}
+
+static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
+ struct iwl_frame *frame)
+{
+ struct iwl_tx_beacon_cmd *tx_beacon_cmd;
+ u32 frame_size;
+ u32 rate_flags;
+ u32 rate;
+ /*
+ * We have to set up the TX command, the TX Beacon command, and the
+ * beacon contents.
+ */
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (!priv->beacon_ctx) {
+ IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
+ return 0;
+ }
+
+ /* Initialize memory */
+ tx_beacon_cmd = &frame->u.beacon;
+ memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
+
+ /* Set up TX beacon contents */
+ frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame,
+ sizeof(frame->u) - sizeof(*tx_beacon_cmd));
+ if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
+ return 0;
+ if (!frame_size)
+ return 0;
+
+ /* Set up TX command fields */
+ tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
+ tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
+ tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+ tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
+ TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
+
+ /* Set up TX beacon command fields */
+ iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
+ frame_size);
+
+ /* Set up packet rate and flags */
+ rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx);
+ priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
+ priv->hw_params.valid_tx_ant);
+ rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
+ if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
+ rate_flags |= RATE_MCS_CCK_MSK;
+ tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate,
+ rate_flags);
+
+ return sizeof(*tx_beacon_cmd) + frame_size;
+}
+
+int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
+{
+ struct iwl_frame *frame;
+ unsigned int frame_size;
+ int rc;
+
+ frame = iwl4965_get_free_frame(priv);
+ if (!frame) {
+ IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
+ "command.\n");
+ return -ENOMEM;
+ }
+
+ frame_size = iwl4965_hw_get_beacon_cmd(priv, frame);
+ if (!frame_size) {
+ IWL_ERR(priv, "Error configuring the beacon command\n");
+ iwl4965_free_frame(priv, frame);
+ return -EINVAL;
+ }
+
+ rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
+ &frame->u.cmd[0]);
+
+ iwl4965_free_frame(priv, frame);
+
+ return rc;
+}
+
+static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+ dma_addr_t addr = get_unaligned_le32(&tb->lo);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ addr |=
+ ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
+
+ return addr;
+}
+
+static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
+static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
+ dma_addr_t addr, u16 len)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+ u16 hi_n_len = len << 4;
+
+ put_unaligned_le32(addr, &tb->lo);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+
+ tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+ tfd->num_tbs = idx + 1;
+}
+
+static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd)
+{
+ return tfd->num_tbs & 0x1f;
+}
+
+/**
+ * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * @priv - driver private data
+ * @txq - tx queue
+ *
+ * Does NOT advance any TFD circular buffer read/write indexes
+ * Does NOT free the TFD itself (which is within circular buffer)
+ */
+void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
+{
+ struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
+ struct iwl_tfd *tfd;
+ struct pci_dev *dev = priv->pci_dev;
+ int index = txq->q.read_ptr;
+ int i;
+ int num_tbs;
+
+ tfd = &tfd_tmp[index];
+
+ /* Sanity check on number of chunks */
+ num_tbs = iwl4965_tfd_get_num_tbs(tfd);
+
+ if (num_tbs >= IWL_NUM_OF_TBS) {
+ IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
+ /* @todo issue fatal error, it is quite serious situation */
+ return;
+ }
+
+ /* Unmap tx_cmd */
+ if (num_tbs)
+ pci_unmap_single(dev,
+ dma_unmap_addr(&txq->meta[index], mapping),
+ dma_unmap_len(&txq->meta[index], len),
+ PCI_DMA_BIDIRECTIONAL);
+
+ /* Unmap chunks, if any. */
+ for (i = 1; i < num_tbs; i++)
+ pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i),
+ iwl4965_tfd_tb_get_len(tfd, i),
+ PCI_DMA_TODEVICE);
+
+ /* free SKB */
+ if (txq->txb) {
+ struct sk_buff *skb;
+
+ skb = txq->txb[txq->q.read_ptr].skb;
+
+ /* can be called from irqs-disabled context */
+ if (skb) {
+ dev_kfree_skb_any(skb);
+ txq->txb[txq->q.read_ptr].skb = NULL;
+ }
+ }
+}
+
+int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ dma_addr_t addr, u16 len,
+ u8 reset, u8 pad)
+{
+ struct iwl_queue *q;
+ struct iwl_tfd *tfd, *tfd_tmp;
+ u32 num_tbs;
+
+ q = &txq->q;
+ tfd_tmp = (struct iwl_tfd *)txq->tfds;
+ tfd = &tfd_tmp[q->write_ptr];
+
+ if (reset)
+ memset(tfd, 0, sizeof(*tfd));
+
+ num_tbs = iwl4965_tfd_get_num_tbs(tfd);
+
+ /* Each TFD can point to a maximum 20 Tx buffers */
+ if (num_tbs >= IWL_NUM_OF_TBS) {
+ IWL_ERR(priv, "Error can not send more than %d chunks\n",
+ IWL_NUM_OF_TBS);
+ return -EINVAL;
+ }
+
+ BUG_ON(addr & ~DMA_BIT_MASK(36));
+ if (unlikely(addr & ~IWL_TX_DMA_MASK))
+ IWL_ERR(priv, "Unaligned address = %llx\n",
+ (unsigned long long)addr);
+
+ iwl4965_tfd_set_tb(tfd, num_tbs, addr, len);
+
+ return 0;
+}
+
+/*
+ * Tell nic where to find circular buffer of Tx Frame Descriptors for
+ * given Tx queue, and enable the DMA channel used for that queue.
+ *
+ * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
+ * channels supported in hardware.
+ */
+int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq)
+{
+ int txq_id = txq->q.id;
+
+ /* Circular buffer (TFD queue in DRAM) physical base address */
+ iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
+ txq->q.dma_addr >> 8);
+
+ return 0;
+}
+
+/******************************************************************************
+ *
+ * Generic RX handler implementations
+ *
+ ******************************************************************************/
+static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_alive_resp *palive;
+ struct delayed_work *pwork;
+
+ palive = &pkt->u.alive_frame;
+
+ IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
+ "0x%01X 0x%01X\n",
+ palive->is_valid, palive->ver_type,
+ palive->ver_subtype);
+
+ if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
+ IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
+ memcpy(&priv->card_alive_init,
+ &pkt->u.alive_frame,
+ sizeof(struct iwl_init_alive_resp));
+ pwork = &priv->init_alive_start;
+ } else {
+ IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
+ memcpy(&priv->card_alive, &pkt->u.alive_frame,
+ sizeof(struct iwl_alive_resp));
+ pwork = &priv->alive_start;
+ }
+
+ /* We delay the ALIVE response by 5ms to
+ * give the HW RF Kill time to activate... */
+ if (palive->is_valid == UCODE_VALID_OK)
+ queue_delayed_work(priv->workqueue, pwork,
+ msecs_to_jiffies(5));
+ else
+ IWL_WARN(priv, "uCode did not respond OK.\n");
+}
+
+/**
+ * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
+ *
+ * This callback is provided in order to send a statistics request.
+ *
+ * This timer function is continually reset to execute within
+ * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
+ * was received. We need to ensure we receive the statistics in order
+ * to update the temperature used for calibrating the TXPOWER.
+ */
+static void iwl4965_bg_statistics_periodic(unsigned long data)
+{
+ struct iwl_priv *priv = (struct iwl_priv *)data;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ /* dont send host command if rf-kill is on */
+ if (!iwl_legacy_is_ready_rf(priv))
+ return;
+
+ iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false);
+}
+
+
+static void iwl4965_print_cont_event_trace(struct iwl_priv *priv, u32 base,
+ u32 start_idx, u32 num_events,
+ u32 mode)
+{
+ u32 i;
+ u32 ptr; /* SRAM byte address of log data */
+ u32 ev, time, data; /* event log data */
+ unsigned long reg_flags;
+
+ if (mode == 0)
+ ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
+ else
+ ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
+
+ /* Make sure device is powered up for SRAM reads */
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ if (iwl_grab_nic_access(priv)) {
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return;
+ }
+
+ /* Set starting address; reads will auto-increment */
+ _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
+ rmb();
+
+ /*
+ * "time" is actually "data" for mode 0 (no timestamp).
+ * place event id # at far right for easier visual parsing.
+ */
+ for (i = 0; i < num_events; i++) {
+ ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ if (mode == 0) {
+ trace_iwlwifi_legacy_dev_ucode_cont_event(priv,
+ 0, time, ev);
+ } else {
+ data = _iwl_legacy_read_direct32(priv,
+ HBUS_TARG_MEM_RDAT);
+ trace_iwlwifi_legacy_dev_ucode_cont_event(priv,
+ time, data, ev);
+ }
+ }
+ /* Allow device to power down */
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static void iwl4965_continuous_event_trace(struct iwl_priv *priv)
+{
+ u32 capacity; /* event log capacity in # entries */
+ u32 base; /* SRAM byte address of event log header */
+ u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
+ u32 num_wraps; /* # times uCode wrapped to top of log */
+ u32 next_entry; /* index of next entry to be written by uCode */
+
+ if (priv->ucode_type == UCODE_INIT)
+ base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
+ else
+ base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+ if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+ capacity = iwl_legacy_read_targ_mem(priv, base);
+ num_wraps = iwl_legacy_read_targ_mem(priv,
+ base + (2 * sizeof(u32)));
+ mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
+ next_entry = iwl_legacy_read_targ_mem(priv,
+ base + (3 * sizeof(u32)));
+ } else
+ return;
+
+ if (num_wraps == priv->event_log.num_wraps) {
+ iwl4965_print_cont_event_trace(priv,
+ base, priv->event_log.next_entry,
+ next_entry - priv->event_log.next_entry,
+ mode);
+ priv->event_log.non_wraps_count++;
+ } else {
+ if ((num_wraps - priv->event_log.num_wraps) > 1)
+ priv->event_log.wraps_more_count++;
+ else
+ priv->event_log.wraps_once_count++;
+ trace_iwlwifi_legacy_dev_ucode_wrap_event(priv,
+ num_wraps - priv->event_log.num_wraps,
+ next_entry, priv->event_log.next_entry);
+ if (next_entry < priv->event_log.next_entry) {
+ iwl4965_print_cont_event_trace(priv, base,
+ priv->event_log.next_entry,
+ capacity - priv->event_log.next_entry,
+ mode);
+
+ iwl4965_print_cont_event_trace(priv, base, 0,
+ next_entry, mode);
+ } else {
+ iwl4965_print_cont_event_trace(priv, base,
+ next_entry, capacity - next_entry,
+ mode);
+
+ iwl4965_print_cont_event_trace(priv, base, 0,
+ next_entry, mode);
+ }
+ }
+ priv->event_log.num_wraps = num_wraps;
+ priv->event_log.next_entry = next_entry;
+}
+
+/**
+ * iwl4965_bg_ucode_trace - Timer callback to log ucode event
+ *
+ * The timer is continually set to execute every
+ * UCODE_TRACE_PERIOD milliseconds after the last timer expired
+ * this function is to perform continuous uCode event logging operation
+ * if enabled
+ */
+static void iwl4965_bg_ucode_trace(unsigned long data)
+{
+ struct iwl_priv *priv = (struct iwl_priv *)data;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ if (priv->event_log.ucode_trace) {
+ iwl4965_continuous_event_trace(priv);
+ /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
+ mod_timer(&priv->ucode_trace,
+ jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
+ }
+}
+
+static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl4965_beacon_notif *beacon =
+ (struct iwl4965_beacon_notif *)pkt->u.raw;
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
+
+ IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
+ "tsf %d %d rate %d\n",
+ le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
+ beacon->beacon_notify_hdr.failure_frame,
+ le32_to_cpu(beacon->ibss_mgr_status),
+ le32_to_cpu(beacon->high_tsf),
+ le32_to_cpu(beacon->low_tsf), rate);
+#endif
+
+ priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+}
+
+static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv)
+{
+ unsigned long flags;
+
+ IWL_DEBUG_POWER(priv, "Stop all queues\n");
+
+ if (priv->mac80211_registered)
+ ieee80211_stop_queues(priv->hw);
+
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+ iwl_read32(priv, CSR_UCODE_DRV_GP1);
+
+ spin_lock_irqsave(&priv->reg_lock, flags);
+ if (!iwl_grab_nic_access(priv))
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, flags);
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
+ unsigned long status = priv->status;
+
+ IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
+ (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & CT_CARD_DISABLED) ?
+ "Reached" : "Not reached");
+
+ if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
+ CT_CARD_DISABLED)) {
+
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
+ HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+
+ if (!(flags & RXON_CARD_DISABLED)) {
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+ iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
+ HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+ }
+ }
+
+ if (flags & CT_CARD_DISABLED)
+ iwl4965_perform_ct_kill_task(priv);
+
+ if (flags & HW_CARD_DISABLED)
+ set_bit(STATUS_RF_KILL_HW, &priv->status);
+ else
+ clear_bit(STATUS_RF_KILL_HW, &priv->status);
+
+ if (!(flags & RXON_CARD_DISABLED))
+ iwl_legacy_scan_cancel(priv);
+
+ if ((test_bit(STATUS_RF_KILL_HW, &status) !=
+ test_bit(STATUS_RF_KILL_HW, &priv->status)))
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy,
+ test_bit(STATUS_RF_KILL_HW, &priv->status));
+ else
+ wake_up_interruptible(&priv->wait_command_queue);
+}
+
+/**
+ * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ *
+ * This function chains into the hardware specific files for them to setup
+ * any hardware specific handlers as well.
+ */
+static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
+{
+ priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
+ priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
+ priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
+ priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
+ iwl_legacy_rx_spectrum_measure_notif;
+ priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
+ priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
+ iwl_legacy_rx_pm_debug_statistics_notif;
+ priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
+
+ /*
+ * The same handler is used for both the REPLY to a discrete
+ * statistics request from the host as well as for the periodic
+ * statistics notifications (after received beacons) from the uCode.
+ */
+ priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics;
+ priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics;
+
+ iwl_legacy_setup_rx_scan_handlers(priv);
+
+ /* status change handler */
+ priv->rx_handlers[CARD_STATE_NOTIFICATION] =
+ iwl4965_rx_card_state_notif;
+
+ priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
+ iwl4965_rx_missed_beacon_notif;
+ /* Rx handlers */
+ priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
+ priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
+ /* block ack */
+ priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
+ /* Set up hardware specific Rx handlers */
+ priv->cfg->ops->lib->rx_handler_setup(priv);
+}
+
+/**
+ * iwl4965_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the priv->rx_handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+void iwl4965_rx_handle(struct iwl_priv *priv)
+{
+ struct iwl_rx_mem_buffer *rxb;
+ struct iwl_rx_packet *pkt;
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ u32 r, i;
+ int reclaim;
+ unsigned long flags;
+ u8 fill_rx = 0;
+ u32 count = 8;
+ int total_empty;
+
+ /* uCode's read index (stored in shared DRAM) indicates the last Rx
+ * buffer that the driver may process (last buffer filled by ucode). */
+ r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
+ i = rxq->read;
+
+ /* Rx interrupt, but nothing sent from uCode */
+ if (i == r)
+ IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
+
+ /* calculate total frames need to be restock after handling RX */
+ total_empty = r - rxq->write_actual;
+ if (total_empty < 0)
+ total_empty += RX_QUEUE_SIZE;
+
+ if (total_empty > (RX_QUEUE_SIZE / 2))
+ fill_rx = 1;
+
+ while (i != r) {
+ int len;
+
+ rxb = rxq->queue[i];
+
+ /* If an RXB doesn't have a Rx queue slot associated with it,
+ * then a bug has been introduced in the queue refilling
+ * routines -- catch it here */
+ BUG_ON(rxb == NULL);
+
+ rxq->queue[i] = NULL;
+
+ pci_unmap_page(priv->pci_dev, rxb->page_dma,
+ PAGE_SIZE << priv->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ pkt = rxb_addr(rxb);
+
+ len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ len += sizeof(u32); /* account for status word */
+ trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
+
+ /* Reclaim a command buffer only if this packet is a response
+ * to a (driver-originated) command.
+ * If the packet (e.g. Rx frame) originated from uCode,
+ * there is no command buffer to reclaim.
+ * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+ * but apparently a few don't get set; catch them here. */
+ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+ (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
+ (pkt->hdr.cmd != REPLY_RX) &&
+ (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
+ (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
+ (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
+ (pkt->hdr.cmd != REPLY_TX);
+
+ /* Based on type of command response or notification,
+ * handle those that need handling via function in
+ * rx_handlers table. See iwl4965_setup_rx_handlers() */
+ if (priv->rx_handlers[pkt->hdr.cmd]) {
+ IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
+ i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
+ pkt->hdr.cmd);
+ priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
+ priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
+ } else {
+ /* No handling needed */
+ IWL_DEBUG_RX(priv,
+ "r %d i %d No handler needed for %s, 0x%02x\n",
+ r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
+ pkt->hdr.cmd);
+ }
+
+ /*
+ * XXX: After here, we should always check rxb->page
+ * against NULL before touching it or its virtual
+ * memory (pkt). Because some rx_handler might have
+ * already taken or freed the pages.
+ */
+
+ if (reclaim) {
+ /* Invoke any callbacks, transfer the buffer to caller,
+ * and fire off the (possibly) blocking iwl_legacy_send_cmd()
+ * as we reclaim the driver command queue */
+ if (rxb->page)
+ iwl_legacy_tx_cmd_complete(priv, rxb);
+ else
+ IWL_WARN(priv, "Claim null rxb?\n");
+ }
+
+ /* Reuse the page if possible. For notification packets and
+ * SKBs that fail to Rx correctly, add them back into the
+ * rx_free list for reuse later. */
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (rxb->page != NULL) {
+ rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
+ 0, PAGE_SIZE << priv->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ } else
+ list_add_tail(&rxb->list, &rxq->rx_used);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ i = (i + 1) & RX_QUEUE_MASK;
+ /* If there are a lot of unused frames,
+ * restock the Rx queue so ucode wont assert. */
+ if (fill_rx) {
+ count++;
+ if (count >= 8) {
+ rxq->read = i;
+ iwl4965_rx_replenish_now(priv);
+ count = 0;
+ }
+ }
+ }
+
+ /* Backtrack one entry */
+ rxq->read = i;
+ if (fill_rx)
+ iwl4965_rx_replenish_now(priv);
+ else
+ iwl4965_rx_queue_restock(priv);
+}
+
+/* call this function to flush any scheduled tasklet */
+static inline void iwl4965_synchronize_irq(struct iwl_priv *priv)
+{
+ /* wait to make sure we flush pending tasklet*/
+ synchronize_irq(priv->pci_dev->irq);
+ tasklet_kill(&priv->irq_tasklet);
+}
+
+static void iwl4965_irq_tasklet(struct iwl_priv *priv)
+{
+ u32 inta, handled = 0;
+ u32 inta_fh;
+ unsigned long flags;
+ u32 i;
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ u32 inta_mask;
+#endif
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Ack/clear/reset pending uCode interrupts.
+ * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+ * and will clear only when CSR_FH_INT_STATUS gets cleared. */
+ inta = iwl_read32(priv, CSR_INT);
+ iwl_write32(priv, CSR_INT, inta);
+
+ /* Ack/clear/reset pending flow-handler (DMA) interrupts.
+ * Any new interrupts that happen after this, either while we're
+ * in this tasklet, or later, will show up in next ISR/tasklet. */
+ inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
+ iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
+ /* just for debug */
+ inta_mask = iwl_read32(priv, CSR_INT_MASK);
+ IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
+ inta, inta_mask, inta_fh);
+ }
+#endif
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
+ * atomic, make sure that inta covers all the interrupts that
+ * we've discovered, even if FH interrupt came in just after
+ * reading CSR_INT. */
+ if (inta_fh & CSR49_FH_INT_RX_MASK)
+ inta |= CSR_INT_BIT_FH_RX;
+ if (inta_fh & CSR49_FH_INT_TX_MASK)
+ inta |= CSR_INT_BIT_FH_TX;
+
+ /* Now service all interrupt bits discovered above. */
+ if (inta & CSR_INT_BIT_HW_ERR) {
+ IWL_ERR(priv, "Hardware error detected. Restarting.\n");
+
+ /* Tell the device to stop sending interrupts */
+ iwl_legacy_disable_interrupts(priv);
+
+ priv->isr_stats.hw++;
+ iwl_legacy_irq_handle_error(priv);
+
+ handled |= CSR_INT_BIT_HW_ERR;
+
+ return;
+ }
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
+ /* NIC fires this, but we don't use it, redundant with WAKEUP */
+ if (inta & CSR_INT_BIT_SCD) {
+ IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
+ "the frame/frames.\n");
+ priv->isr_stats.sch++;
+ }
+
+ /* Alive notification via Rx interrupt will do the real work */
+ if (inta & CSR_INT_BIT_ALIVE) {
+ IWL_DEBUG_ISR(priv, "Alive interrupt\n");
+ priv->isr_stats.alive++;
+ }
+ }
+#endif
+ /* Safely ignore these bits for debug checks below */
+ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+ /* HW RF KILL switch toggled */
+ if (inta & CSR_INT_BIT_RF_KILL) {
+ int hw_rf_kill = 0;
+ if (!(iwl_read32(priv, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+ hw_rf_kill = 1;
+
+ IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
+ hw_rf_kill ? "disable radio" : "enable radio");
+
+ priv->isr_stats.rfkill++;
+
+ /* driver only loads ucode once setting the interface up.
+ * the driver allows loading the ucode even if the radio
+ * is killed. Hence update the killswitch state here. The
+ * rfkill handler will care about restarting if needed.
+ */
+ if (!test_bit(STATUS_ALIVE, &priv->status)) {
+ if (hw_rf_kill)
+ set_bit(STATUS_RF_KILL_HW, &priv->status);
+ else
+ clear_bit(STATUS_RF_KILL_HW, &priv->status);
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
+ }
+
+ handled |= CSR_INT_BIT_RF_KILL;
+ }
+
+ /* Chip got too hot and stopped itself */
+ if (inta & CSR_INT_BIT_CT_KILL) {
+ IWL_ERR(priv, "Microcode CT kill error detected.\n");
+ priv->isr_stats.ctkill++;
+ handled |= CSR_INT_BIT_CT_KILL;
+ }
+
+ /* Error detected by uCode */
+ if (inta & CSR_INT_BIT_SW_ERR) {
+ IWL_ERR(priv, "Microcode SW error detected. "
+ " Restarting 0x%X.\n", inta);
+ priv->isr_stats.sw++;
+ iwl_legacy_irq_handle_error(priv);
+ handled |= CSR_INT_BIT_SW_ERR;
+ }
+
+ /*
+ * uCode wakes up after power-down sleep.
+ * Tell device about any new tx or host commands enqueued,
+ * and about any Rx buffers made available while asleep.
+ */
+ if (inta & CSR_INT_BIT_WAKEUP) {
+ IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
+ iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
+ for (i = 0; i < priv->hw_params.max_txq_num; i++)
+ iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]);
+ priv->isr_stats.wakeup++;
+ handled |= CSR_INT_BIT_WAKEUP;
+ }
+
+ /* All uCode command responses, including Tx command responses,
+ * Rx "responses" (frame-received notification), and other
+ * notifications from uCode come through here*/
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+ iwl4965_rx_handle(priv);
+ priv->isr_stats.rx++;
+ handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+ }
+
+ /* This "Tx" DMA channel is used only for loading uCode */
+ if (inta & CSR_INT_BIT_FH_TX) {
+ IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
+ priv->isr_stats.tx++;
+ handled |= CSR_INT_BIT_FH_TX;
+ /* Wake up uCode load routine, now that load is complete */
+ priv->ucode_write_complete = 1;
+ wake_up_interruptible(&priv->wait_command_queue);
+ }
+
+ if (inta & ~handled) {
+ IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
+ priv->isr_stats.unhandled++;
+ }
+
+ if (inta & ~(priv->inta_mask)) {
+ IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
+ inta & ~priv->inta_mask);
+ IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
+ }
+
+ /* Re-enable all interrupts */
+ /* only Re-enable if diabled by irq */
+ if (test_bit(STATUS_INT_ENABLED, &priv->status))
+ iwl_legacy_enable_interrupts(priv);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
+ inta = iwl_read32(priv, CSR_INT);
+ inta_mask = iwl_read32(priv, CSR_INT_MASK);
+ inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
+ IWL_DEBUG_ISR(priv,
+ "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
+ "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
+ }
+#endif
+}
+
+/*****************************************************************************
+ *
+ * sysfs attributes
+ *
+ *****************************************************************************/
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+
+/*
+ * The following adds a new attribute to the sysfs representation
+ * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
+ * used for controlling the debug level.
+ *
+ * See the level definitions in iwl for details.
+ *
+ * The debug_level being managed using sysfs below is a per device debug
+ * level that is used instead of the global debug level if it (the per
+ * device debug level) is set.
+ */
+static ssize_t iwl4965_show_debug_level(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct iwl_priv *priv = dev_get_drvdata(d);
+ return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
+}
+static ssize_t iwl4965_store_debug_level(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct iwl_priv *priv = dev_get_drvdata(d);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret)
+ IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
+ else {
+ priv->debug_level = val;
+ if (iwl_legacy_alloc_traffic_mem(priv))
+ IWL_ERR(priv,
+ "Not enough memory to generate traffic log\n");
+ }
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
+ iwl4965_show_debug_level, iwl4965_store_debug_level);
+
+
+#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
+
+
+static ssize_t iwl4965_show_temperature(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct iwl_priv *priv = dev_get_drvdata(d);
+
+ if (!iwl_legacy_is_alive(priv))
+ return -EAGAIN;
+
+ return sprintf(buf, "%d\n", priv->temperature);
+}
+
+static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL);
+
+static ssize_t iwl4965_show_tx_power(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct iwl_priv *priv = dev_get_drvdata(d);
+
+ if (!iwl_legacy_is_ready_rf(priv))
+ return sprintf(buf, "off\n");
+ else
+ return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
+}
+
+static ssize_t iwl4965_store_tx_power(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct iwl_priv *priv = dev_get_drvdata(d);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ IWL_INFO(priv, "%s is not in decimal form.\n", buf);
+ else {
+ ret = iwl_legacy_set_tx_power(priv, val, false);
+ if (ret)
+ IWL_ERR(priv, "failed setting tx power (0x%d).\n",
+ ret);
+ else
+ ret = count;
+ }
+ return ret;
+}
+
+static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO,
+ iwl4965_show_tx_power, iwl4965_store_tx_power);
+
+static struct attribute *iwl_sysfs_entries[] = {
+ &dev_attr_temperature.attr,
+ &dev_attr_tx_power.attr,
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ &dev_attr_debug_level.attr,
+#endif
+ NULL
+};
+
+static struct attribute_group iwl_attribute_group = {
+ .name = NULL, /* put in device directory */
+ .attrs = iwl_sysfs_entries,
+};
+
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
+{
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
+ iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
+}
+
+static void iwl4965_nic_start(struct iwl_priv *priv)
+{
+ /* Remove all resets to allow NIC to operate */
+ iwl_write32(priv, CSR_RESET, 0);
+}
+
+static void iwl4965_ucode_callback(const struct firmware *ucode_raw,
+ void *context);
+static int iwl4965_mac_setup_register(struct iwl_priv *priv,
+ u32 max_probe_length);
+
+static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first)
+{
+ const char *name_pre = priv->cfg->fw_name_pre;
+ char tag[8];
+
+ if (first) {
+ priv->fw_index = priv->cfg->ucode_api_max;
+ sprintf(tag, "%d", priv->fw_index);
+ } else {
+ priv->fw_index--;
+ sprintf(tag, "%d", priv->fw_index);
+ }
+
+ if (priv->fw_index < priv->cfg->ucode_api_min) {
+ IWL_ERR(priv, "no suitable firmware found!\n");
+ return -ENOENT;
+ }
+
+ sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
+
+ IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n",
+ priv->firmware_name);
+
+ return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
+ &priv->pci_dev->dev, GFP_KERNEL, priv,
+ iwl4965_ucode_callback);
+}
+
+struct iwl4965_firmware_pieces {
+ const void *inst, *data, *init, *init_data, *boot;
+ size_t inst_size, data_size, init_size, init_data_size, boot_size;
+};
+
+static int iwl4965_load_firmware(struct iwl_priv *priv,
+ const struct firmware *ucode_raw,
+ struct iwl4965_firmware_pieces *pieces)
+{
+ struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
+ u32 api_ver, hdr_size;
+ const u8 *src;
+
+ priv->ucode_ver = le32_to_cpu(ucode->ver);
+ api_ver = IWL_UCODE_API(priv->ucode_ver);
+
+ switch (api_ver) {
+ default:
+ case 0:
+ case 1:
+ case 2:
+ hdr_size = 24;
+ if (ucode_raw->size < hdr_size) {
+ IWL_ERR(priv, "File size too small!\n");
+ return -EINVAL;
+ }
+ pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
+ pieces->data_size = le32_to_cpu(ucode->v1.data_size);
+ pieces->init_size = le32_to_cpu(ucode->v1.init_size);
+ pieces->init_data_size =
+ le32_to_cpu(ucode->v1.init_data_size);
+ pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
+ src = ucode->v1.data;
+ break;
+ }
+
+ /* Verify size of file vs. image size info in file's header */
+ if (ucode_raw->size != hdr_size + pieces->inst_size +
+ pieces->data_size + pieces->init_size +
+ pieces->init_data_size + pieces->boot_size) {
+
+ IWL_ERR(priv,
+ "uCode file size %d does not match expected size\n",
+ (int)ucode_raw->size);
+ return -EINVAL;
+ }
+
+ pieces->inst = src;
+ src += pieces->inst_size;
+ pieces->data = src;
+ src += pieces->data_size;
+ pieces->init = src;
+ src += pieces->init_size;
+ pieces->init_data = src;
+ src += pieces->init_data_size;
+ pieces->boot = src;
+ src += pieces->boot_size;
+
+ return 0;
+}
+
+/**
+ * iwl4965_ucode_callback - callback when firmware was loaded
+ *
+ * If loaded successfully, copies the firmware into buffers
+ * for the card to fetch (via DMA).
+ */
+static void
+iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context)
+{
+ struct iwl_priv *priv = context;
+ struct iwl_ucode_header *ucode;
+ int err;
+ struct iwl4965_firmware_pieces pieces;
+ const unsigned int api_max = priv->cfg->ucode_api_max;
+ const unsigned int api_min = priv->cfg->ucode_api_min;
+ u32 api_ver;
+
+ u32 max_probe_length = 200;
+ u32 standard_phy_calibration_size =
+ IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
+
+ memset(&pieces, 0, sizeof(pieces));
+
+ if (!ucode_raw) {
+ if (priv->fw_index <= priv->cfg->ucode_api_max)
+ IWL_ERR(priv,
+ "request for firmware file '%s' failed.\n",
+ priv->firmware_name);
+ goto try_again;
+ }
+
+ IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
+ priv->firmware_name, ucode_raw->size);
+
+ /* Make sure that we got at least the API version number */
+ if (ucode_raw->size < 4) {
+ IWL_ERR(priv, "File size way too small!\n");
+ goto try_again;
+ }
+
+ /* Data from ucode file: header followed by uCode images */
+ ucode = (struct iwl_ucode_header *)ucode_raw->data;
+
+ err = iwl4965_load_firmware(priv, ucode_raw, &pieces);
+
+ if (err)
+ goto try_again;
+
+ api_ver = IWL_UCODE_API(priv->ucode_ver);
+
+ /*
+ * api_ver should match the api version forming part of the
+ * firmware filename ... but we don't check for that and only rely
+ * on the API version read from firmware header from here on forward
+ */
+ if (api_ver < api_min || api_ver > api_max) {
+ IWL_ERR(priv,
+ "Driver unable to support your firmware API. "
+ "Driver supports v%u, firmware is v%u.\n",
+ api_max, api_ver);
+ goto try_again;
+ }
+
+ if (api_ver != api_max)
+ IWL_ERR(priv,
+ "Firmware has old API version. Expected v%u, "
+ "got v%u. New firmware can be obtained "
+ "from http://www.intellinuxwireless.org.\n",
+ api_max, api_ver);
+
+ IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
+ IWL_UCODE_MAJOR(priv->ucode_ver),
+ IWL_UCODE_MINOR(priv->ucode_ver),
+ IWL_UCODE_API(priv->ucode_ver),
+ IWL_UCODE_SERIAL(priv->ucode_ver));
+
+ snprintf(priv->hw->wiphy->fw_version,
+ sizeof(priv->hw->wiphy->fw_version),
+ "%u.%u.%u.%u",
+ IWL_UCODE_MAJOR(priv->ucode_ver),
+ IWL_UCODE_MINOR(priv->ucode_ver),
+ IWL_UCODE_API(priv->ucode_ver),
+ IWL_UCODE_SERIAL(priv->ucode_ver));
+
+ /*
+ * For any of the failures below (before allocating pci memory)
+ * we will try to load a version with a smaller API -- maybe the
+ * user just got a corrupted version of the latest API.
+ */
+
+ IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
+ priv->ucode_ver);
+ IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
+ pieces.inst_size);
+ IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
+ pieces.data_size);
+ IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
+ pieces.init_size);
+ IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
+ pieces.init_data_size);
+ IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
+ pieces.boot_size);
+
+ /* Verify that uCode images will fit in card's SRAM */
+ if (pieces.inst_size > priv->hw_params.max_inst_size) {
+ IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
+ pieces.inst_size);
+ goto try_again;
+ }
+
+ if (pieces.data_size > priv->hw_params.max_data_size) {
+ IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
+ pieces.data_size);
+ goto try_again;
+ }
+
+ if (pieces.init_size > priv->hw_params.max_inst_size) {
+ IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
+ pieces.init_size);
+ goto try_again;
+ }
+
+ if (pieces.init_data_size > priv->hw_params.max_data_size) {
+ IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
+ pieces.init_data_size);
+ goto try_again;
+ }
+
+ if (pieces.boot_size > priv->hw_params.max_bsm_size) {
+ IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
+ pieces.boot_size);
+ goto try_again;
+ }
+
+ /* Allocate ucode buffers for card's bus-master loading ... */
+
+ /* Runtime instructions and 2 copies of data:
+ * 1) unmodified from disk
+ * 2) backup cache for save/restore during power-downs */
+ priv->ucode_code.len = pieces.inst_size;
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
+
+ priv->ucode_data.len = pieces.data_size;
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
+
+ priv->ucode_data_backup.len = pieces.data_size;
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
+
+ if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
+ !priv->ucode_data_backup.v_addr)
+ goto err_pci_alloc;
+
+ /* Initialization instructions and data */
+ if (pieces.init_size && pieces.init_data_size) {
+ priv->ucode_init.len = pieces.init_size;
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
+
+ priv->ucode_init_data.len = pieces.init_data_size;
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
+
+ if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
+ goto err_pci_alloc;
+ }
+
+ /* Bootstrap (instructions only, no data) */
+ if (pieces.boot_size) {
+ priv->ucode_boot.len = pieces.boot_size;
+ iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
+
+ if (!priv->ucode_boot.v_addr)
+ goto err_pci_alloc;
+ }
+
+ /* Now that we can no longer fail, copy information */
+
+ priv->sta_key_max_num = STA_KEY_MAX_NUM;
+
+ /* Copy images into buffers for card's bus-master reads ... */
+
+ /* Runtime instructions (first block of data in file) */
+ IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
+ pieces.inst_size);
+ memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
+
+ IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
+ priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
+
+ /*
+ * Runtime data
+ * NOTE: Copy into backup buffer will be done in iwl_up()
+ */
+ IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
+ pieces.data_size);
+ memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
+ memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
+
+ /* Initialization instructions */
+ if (pieces.init_size) {
+ IWL_DEBUG_INFO(priv,
+ "Copying (but not loading) init instr len %Zd\n",
+ pieces.init_size);
+ memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
+ }
+
+ /* Initialization data */
+ if (pieces.init_data_size) {
+ IWL_DEBUG_INFO(priv,
+ "Copying (but not loading) init data len %Zd\n",
+ pieces.init_data_size);
+ memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
+ pieces.init_data_size);
+ }
+
+ /* Bootstrap instructions */
+ IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
+ pieces.boot_size);
+ memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
+
+ /*
+ * figure out the offset of chain noise reset and gain commands
+ * base on the size of standard phy calibration commands table size
+ */
+ priv->_4965.phy_calib_chain_noise_reset_cmd =
+ standard_phy_calibration_size;
+ priv->_4965.phy_calib_chain_noise_gain_cmd =
+ standard_phy_calibration_size + 1;
+
+ /**************************************************
+ * This is still part of probe() in a sense...
+ *
+ * 9. Setup and register with mac80211 and debugfs
+ **************************************************/
+ err = iwl4965_mac_setup_register(priv, max_probe_length);
+ if (err)
+ goto out_unbind;
+
+ err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
+ if (err)
+ IWL_ERR(priv,
+ "failed to create debugfs files. Ignoring error: %d\n", err);
+
+ err = sysfs_create_group(&priv->pci_dev->dev.kobj,
+ &iwl_attribute_group);
+ if (err) {
+ IWL_ERR(priv, "failed to create sysfs device attributes\n");
+ goto out_unbind;
+ }
+
+ /* We have our copies now, allow OS release its copies */
+ release_firmware(ucode_raw);
+ complete(&priv->_4965.firmware_loading_complete);
+ return;
+
+ try_again:
+ /* try next, if any */
+ if (iwl4965_request_firmware(priv, false))
+ goto out_unbind;
+ release_firmware(ucode_raw);
+ return;
+
+ err_pci_alloc:
+ IWL_ERR(priv, "failed to allocate pci memory\n");
+ iwl4965_dealloc_ucode_pci(priv);
+ out_unbind:
+ complete(&priv->_4965.firmware_loading_complete);
+ device_release_driver(&priv->pci_dev->dev);
+ release_firmware(ucode_raw);
+}
+
+static const char * const desc_lookup_text[] = {
+ "OK",
+ "FAIL",
+ "BAD_PARAM",
+ "BAD_CHECKSUM",
+ "NMI_INTERRUPT_WDG",
+ "SYSASSERT",
+ "FATAL_ERROR",
+ "BAD_COMMAND",
+ "HW_ERROR_TUNE_LOCK",
+ "HW_ERROR_TEMPERATURE",
+ "ILLEGAL_CHAN_FREQ",
+ "VCC_NOT_STABLE",
+ "FH_ERROR",
+ "NMI_INTERRUPT_HOST",
+ "NMI_INTERRUPT_ACTION_PT",
+ "NMI_INTERRUPT_UNKNOWN",
+ "UCODE_VERSION_MISMATCH",
+ "HW_ERROR_ABS_LOCK",
+ "HW_ERROR_CAL_LOCK_FAIL",
+ "NMI_INTERRUPT_INST_ACTION_PT",
+ "NMI_INTERRUPT_DATA_ACTION_PT",
+ "NMI_TRM_HW_ER",
+ "NMI_INTERRUPT_TRM",
+ "NMI_INTERRUPT_BREAK_POINT"
+ "DEBUG_0",
+ "DEBUG_1",
+ "DEBUG_2",
+ "DEBUG_3",
+};
+
+static struct { char *name; u8 num; } advanced_lookup[] = {
+ { "NMI_INTERRUPT_WDG", 0x34 },
+ { "SYSASSERT", 0x35 },
+ { "UCODE_VERSION_MISMATCH", 0x37 },
+ { "BAD_COMMAND", 0x38 },
+ { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
+ { "FATAL_ERROR", 0x3D },
+ { "NMI_TRM_HW_ERR", 0x46 },
+ { "NMI_INTERRUPT_TRM", 0x4C },
+ { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
+ { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
+ { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
+ { "NMI_INTERRUPT_HOST", 0x66 },
+ { "NMI_INTERRUPT_ACTION_PT", 0x7C },
+ { "NMI_INTERRUPT_UNKNOWN", 0x84 },
+ { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+ { "ADVANCED_SYSASSERT", 0 },
+};
+
+static const char *iwl4965_desc_lookup(u32 num)
+{
+ int i;
+ int max = ARRAY_SIZE(desc_lookup_text);
+
+ if (num < max)
+ return desc_lookup_text[num];
+
+ max = ARRAY_SIZE(advanced_lookup) - 1;
+ for (i = 0; i < max; i++) {
+ if (advanced_lookup[i].num == num)
+ break;
+ }
+ return advanced_lookup[i].name;
+}
+
+#define ERROR_START_OFFSET (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE (7 * sizeof(u32))
+
+void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
+{
+ u32 data2, line;
+ u32 desc, time, count, base, data1;
+ u32 blink1, blink2, ilink1, ilink2;
+ u32 pc, hcmd;
+
+ if (priv->ucode_type == UCODE_INIT) {
+ base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
+ } else {
+ base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
+ }
+
+ if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+ IWL_ERR(priv,
+ "Not valid error log pointer 0x%08X for %s uCode\n",
+ base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
+ return;
+ }
+
+ count = iwl_legacy_read_targ_mem(priv, base);
+
+ if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
+ IWL_ERR(priv, "Start IWL Error Log Dump:\n");
+ IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
+ priv->status, count);
+ }
+
+ desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32));
+ priv->isr_stats.err_code = desc;
+ pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32));
+ blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32));
+ blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32));
+ ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32));
+ ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32));
+ data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32));
+ data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32));
+ line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32));
+ time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32));
+ hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32));
+
+ trace_iwlwifi_legacy_dev_ucode_error(priv, desc,
+ time, data1, data2, line,
+ blink1, blink2, ilink1, ilink2);
+
+ IWL_ERR(priv, "Desc Time "
+ "data1 data2 line\n");
+ IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
+ iwl4965_desc_lookup(desc), desc, time, data1, data2, line);
+ IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
+ IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
+ pc, blink1, blink2, ilink1, ilink2, hcmd);
+}
+
+#define EVENT_START_OFFSET (4 * sizeof(u32))
+
+/**
+ * iwl4965_print_event_log - Dump error event log to syslog
+ *
+ */
+static int iwl4965_print_event_log(struct iwl_priv *priv, u32 start_idx,
+ u32 num_events, u32 mode,
+ int pos, char **buf, size_t bufsz)
+{
+ u32 i;
+ u32 base; /* SRAM byte address of event log header */
+ u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
+ u32 ptr; /* SRAM byte address of log data */
+ u32 ev, time, data; /* event log data */
+ unsigned long reg_flags;
+
+ if (num_events == 0)
+ return pos;
+
+ if (priv->ucode_type == UCODE_INIT) {
+ base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
+ } else {
+ base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+ }
+
+ if (mode == 0)
+ event_size = 2 * sizeof(u32);
+ else
+ event_size = 3 * sizeof(u32);
+
+ ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
+
+ /* Make sure device is powered up for SRAM reads */
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ iwl_grab_nic_access(priv);
+
+ /* Set starting address; reads will auto-increment */
+ _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
+ rmb();
+
+ /* "time" is actually "data" for mode 0 (no timestamp).
+ * place event id # at far right for easier visual parsing. */
+ for (i = 0; i < num_events; i++) {
+ ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ if (mode == 0) {
+ /* data, ev */
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "EVT_LOG:0x%08x:%04u\n",
+ time, ev);
+ } else {
+ trace_iwlwifi_legacy_dev_ucode_event(priv, 0,
+ time, ev);
+ IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
+ time, ev);
+ }
+ } else {
+ data = _iwl_legacy_read_direct32(priv,
+ HBUS_TARG_MEM_RDAT);
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "EVT_LOGT:%010u:0x%08x:%04u\n",
+ time, data, ev);
+ } else {
+ IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
+ time, data, ev);
+ trace_iwlwifi_legacy_dev_ucode_event(priv, time,
+ data, ev);
+ }
+ }
+ }
+
+ /* Allow device to power down */
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return pos;
+}
+
+/**
+ * iwl4965_print_last_event_logs - Dump the newest # of event log to syslog
+ */
+static int iwl4965_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
+ u32 num_wraps, u32 next_entry,
+ u32 size, u32 mode,
+ int pos, char **buf, size_t bufsz)
+{
+ /*
+ * display the newest DEFAULT_LOG_ENTRIES entries
+ * i.e the entries just before the next ont that uCode would fill.
+ */
+ if (num_wraps) {
+ if (next_entry < size) {
+ pos = iwl4965_print_event_log(priv,
+ capacity - (size - next_entry),
+ size - next_entry, mode,
+ pos, buf, bufsz);
+ pos = iwl4965_print_event_log(priv, 0,
+ next_entry, mode,
+ pos, buf, bufsz);
+ } else
+ pos = iwl4965_print_event_log(priv, next_entry - size,
+ size, mode, pos, buf, bufsz);
+ } else {
+ if (next_entry < size) {
+ pos = iwl4965_print_event_log(priv, 0, next_entry,
+ mode, pos, buf, bufsz);
+ } else {
+ pos = iwl4965_print_event_log(priv, next_entry - size,
+ size, mode, pos, buf, bufsz);
+ }
+ }
+ return pos;
+}
+
+#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
+
+int iwl4965_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
+ char **buf, bool display)
+{
+ u32 base; /* SRAM byte address of event log header */
+ u32 capacity; /* event log capacity in # entries */
+ u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
+ u32 num_wraps; /* # times uCode wrapped to top of log */
+ u32 next_entry; /* index of next entry to be written by uCode */
+ u32 size; /* # entries that we'll print */
+ int pos = 0;
+ size_t bufsz = 0;
+
+ if (priv->ucode_type == UCODE_INIT) {
+ base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
+ } else {
+ base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+ }
+
+ if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+ IWL_ERR(priv,
+ "Invalid event log pointer 0x%08X for %s uCode\n",
+ base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
+ return -EINVAL;
+ }
+
+ /* event log header */
+ capacity = iwl_legacy_read_targ_mem(priv, base);
+ mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
+ num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32)));
+ next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32)));
+
+ size = num_wraps ? capacity : next_entry;
+
+ /* bail out if nothing in log */
+ if (size == 0) {
+ IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
+ return pos;
+ }
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
+ size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
+ ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
+#else
+ size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
+ ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
+#endif
+ IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
+ size);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+ if (display) {
+ if (full_log)
+ bufsz = capacity * 48;
+ else
+ bufsz = size * 48;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ }
+ if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
+ /*
+ * if uCode has wrapped back to top of log,
+ * start at the oldest entry,
+ * i.e the next one that uCode would fill.
+ */
+ if (num_wraps)
+ pos = iwl4965_print_event_log(priv, next_entry,
+ capacity - next_entry, mode,
+ pos, buf, bufsz);
+ /* (then/else) start at top of log */
+ pos = iwl4965_print_event_log(priv, 0,
+ next_entry, mode, pos, buf, bufsz);
+ } else
+ pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
+#else
+ pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
+#endif
+ return pos;
+}
+
+static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
+{
+ struct iwl_ct_kill_config cmd;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ cmd.critical_temperature_R =
+ cpu_to_le32(priv->hw_params.ct_kill_threshold);
+
+ ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
+ sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
+ else
+ IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
+ "succeeded, "
+ "critical temperature is %d\n",
+ priv->hw_params.ct_kill_threshold);
+}
+
+static const s8 default_queue_to_tx_fifo[] = {
+ IWL_TX_FIFO_VO,
+ IWL_TX_FIFO_VI,
+ IWL_TX_FIFO_BE,
+ IWL_TX_FIFO_BK,
+ IWL49_CMD_FIFO_NUM,
+ IWL_TX_FIFO_UNUSED,
+ IWL_TX_FIFO_UNUSED,
+};
+
+static int iwl4965_alive_notify(struct iwl_priv *priv)
+{
+ u32 a;
+ unsigned long flags;
+ int i, chan;
+ u32 reg_val;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Clear 4965's internal Tx Scheduler data base */
+ priv->scd_base_addr = iwl_legacy_read_prph(priv,
+ IWL49_SCD_SRAM_BASE_ADDR);
+ a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
+ for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
+ iwl_legacy_write_targ_mem(priv, a, 0);
+ for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
+ iwl_legacy_write_targ_mem(priv, a, 0);
+ for (; a < priv->scd_base_addr +
+ IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
+ iwl_legacy_write_targ_mem(priv, a, 0);
+
+ /* Tel 4965 where to find Tx byte count tables */
+ iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
+ priv->scd_bc_tbls.dma >> 10);
+
+ /* Enable DMA channel */
+ for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
+ iwl_legacy_write_direct32(priv,
+ FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+ /* Update FH chicken bits */
+ reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
+ iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
+ reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+ /* Disable chain mode for all queues */
+ iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
+
+ /* Initialize each Tx queue (including the command queue) */
+ for (i = 0; i < priv->hw_params.max_txq_num; i++) {
+
+ /* TFD circular buffer read/write indexes */
+ iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
+ iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
+
+ /* Max Tx Window size for Scheduler-ACK mode */
+ iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
+ IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
+ (SCD_WIN_SIZE <<
+ IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
+ IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
+
+ /* Frame limit */
+ iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
+ IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
+ sizeof(u32),
+ (SCD_FRAME_LIMIT <<
+ IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
+
+ }
+ iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
+ (1 << priv->hw_params.max_txq_num) - 1);
+
+ /* Activate all Tx DMA/FIFO channels */
+ iwl4965_txq_set_sched(priv, IWL_MASK(0, 6));
+
+ iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
+
+ /* make sure all queue are not stopped */
+ memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
+ for (i = 0; i < 4; i++)
+ atomic_set(&priv->queue_stop_count[i], 0);
+
+ /* reset to 0 to enable all the queue first */
+ priv->txq_ctx_active_msk = 0;
+ /* Map each Tx/cmd queue to its corresponding fifo */
+ BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
+
+ for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
+ int ac = default_queue_to_tx_fifo[i];
+
+ iwl_txq_ctx_activate(priv, i);
+
+ if (ac == IWL_TX_FIFO_UNUSED)
+ continue;
+
+ iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+/**
+ * iwl4965_alive_start - called after REPLY_ALIVE notification received
+ * from protocol/runtime uCode (initialization uCode's
+ * Alive gets handled by iwl_init_alive_start()).
+ */
+static void iwl4965_alive_start(struct iwl_priv *priv)
+{
+ int ret = 0;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+ IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
+
+ if (priv->card_alive.is_valid != UCODE_VALID_OK) {
+ /* We had an error bringing up the hardware, so take it
+ * all the way back down so we can try again */
+ IWL_DEBUG_INFO(priv, "Alive failed.\n");
+ goto restart;
+ }
+
+ /* Initialize uCode has loaded Runtime uCode ... verify inst image.
+ * This is a paranoid check, because we would not have gotten the
+ * "runtime" alive if code weren't properly loaded. */
+ if (iwl4965_verify_ucode(priv)) {
+ /* Runtime instruction load was bad;
+ * take it all the way back down so we can try again */
+ IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
+ goto restart;
+ }
+
+ ret = iwl4965_alive_notify(priv);
+ if (ret) {
+ IWL_WARN(priv,
+ "Could not complete ALIVE transition [ntf]: %d\n", ret);
+ goto restart;
+ }
+
+
+ /* After the ALIVE response, we can send host commands to the uCode */
+ set_bit(STATUS_ALIVE, &priv->status);
+
+ /* Enable watchdog to monitor the driver tx queues */
+ iwl_legacy_setup_watchdog(priv);
+
+ if (iwl_legacy_is_rfkill(priv))
+ return;
+
+ ieee80211_wake_queues(priv->hw);
+
+ priv->active_rate = IWL_RATES_MASK;
+
+ if (iwl_legacy_is_associated_ctx(ctx)) {
+ struct iwl_legacy_rxon_cmd *active_rxon =
+ (struct iwl_legacy_rxon_cmd *)&ctx->active;
+ /* apply any changes in staging */
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ } else {
+ struct iwl_rxon_context *tmp;
+ /* Initialize our rx_config data */
+ for_each_context(priv, tmp)
+ iwl_legacy_connection_init_rx_config(priv, tmp);
+
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+ }
+
+ /* Configure bluetooth coexistence if enabled */
+ iwl_legacy_send_bt_config(priv);
+
+ iwl4965_reset_run_time_calib(priv);
+
+ set_bit(STATUS_READY, &priv->status);
+
+ /* Configure the adapter for unassociated operation */
+ iwl_legacy_commit_rxon(priv, ctx);
+
+ /* At this point, the NIC is initialized and operational */
+ iwl4965_rf_kill_ct_config(priv);
+
+ IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
+ wake_up_interruptible(&priv->wait_command_queue);
+
+ iwl_legacy_power_update_mode(priv, true);
+ IWL_DEBUG_INFO(priv, "Updated power mode\n");
+
+ return;
+
+ restart:
+ queue_work(priv->workqueue, &priv->restart);
+}
+
+static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
+
+static void __iwl4965_down(struct iwl_priv *priv)
+{
+ unsigned long flags;
+ int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
+
+ IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
+
+ iwl_legacy_scan_cancel_timeout(priv, 200);
+
+ exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
+
+ /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
+ * to prevent rearm timer */
+ del_timer_sync(&priv->watchdog);
+
+ iwl_legacy_clear_ucode_stations(priv, NULL);
+ iwl_legacy_dealloc_bcast_stations(priv);
+ iwl_legacy_clear_driver_stations(priv);
+
+ /* Unblock any waiting calls */
+ wake_up_interruptible_all(&priv->wait_command_queue);
+
+ /* Wipe out the EXIT_PENDING status bit if we are not actually
+ * exiting the module */
+ if (!exit_pending)
+ clear_bit(STATUS_EXIT_PENDING, &priv->status);
+
+ /* stop and reset the on-board processor */
+ iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ /* tell the device to stop sending interrupts */
+ spin_lock_irqsave(&priv->lock, flags);
+ iwl_legacy_disable_interrupts(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ iwl4965_synchronize_irq(priv);
+
+ if (priv->mac80211_registered)
+ ieee80211_stop_queues(priv->hw);
+
+ /* If we have not previously called iwl_init() then
+ * clear all bits but the RF Kill bit and return */
+ if (!iwl_legacy_is_init(priv)) {
+ priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
+ STATUS_RF_KILL_HW |
+ test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
+ STATUS_GEO_CONFIGURED |
+ test_bit(STATUS_EXIT_PENDING, &priv->status) <<
+ STATUS_EXIT_PENDING;
+ goto exit;
+ }
+
+ /* ...otherwise clear out all the status bits but the RF Kill
+ * bit and continue taking the NIC down. */
+ priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
+ STATUS_RF_KILL_HW |
+ test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
+ STATUS_GEO_CONFIGURED |
+ test_bit(STATUS_FW_ERROR, &priv->status) <<
+ STATUS_FW_ERROR |
+ test_bit(STATUS_EXIT_PENDING, &priv->status) <<
+ STATUS_EXIT_PENDING;
+
+ iwl4965_txq_ctx_stop(priv);
+ iwl4965_rxq_stop(priv);
+
+ /* Power-down device's busmaster DMA clocks */
+ iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(5);
+
+ /* Make sure (redundant) we've released our request to stay awake */
+ iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /* Stop the device, and put it in low power state */
+ iwl_legacy_apm_stop(priv);
+
+ exit:
+ memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
+
+ dev_kfree_skb(priv->beacon_skb);
+ priv->beacon_skb = NULL;
+
+ /* clear out any free frames */
+ iwl4965_clear_free_frames(priv);
+}
+
+static void iwl4965_down(struct iwl_priv *priv)
+{
+ mutex_lock(&priv->mutex);
+ __iwl4965_down(priv);
+ mutex_unlock(&priv->mutex);
+
+ iwl4965_cancel_deferred_work(priv);
+}
+
+#define HW_READY_TIMEOUT (50)
+
+static int iwl4965_set_hw_ready(struct iwl_priv *priv)
+{
+ int ret = 0;
+
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+ /* See if we got it */
+ ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ HW_READY_TIMEOUT);
+ if (ret != -ETIMEDOUT)
+ priv->hw_ready = true;
+ else
+ priv->hw_ready = false;
+
+ IWL_DEBUG_INFO(priv, "hardware %s\n",
+ (priv->hw_ready == 1) ? "ready" : "not ready");
+ return ret;
+}
+
+static int iwl4965_prepare_card_hw(struct iwl_priv *priv)
+{
+ int ret = 0;
+
+ IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n");
+
+ ret = iwl4965_set_hw_ready(priv);
+ if (priv->hw_ready)
+ return ret;
+
+ /* If HW is not ready, prepare the conditions to check again */
+ iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PREPARE);
+
+ ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
+ ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+
+ /* HW should be ready by now, check again. */
+ if (ret != -ETIMEDOUT)
+ iwl4965_set_hw_ready(priv);
+
+ return ret;
+}
+
+#define MAX_HW_RESTARTS 5
+
+static int __iwl4965_up(struct iwl_priv *priv)
+{
+ struct iwl_rxon_context *ctx;
+ int i;
+ int ret;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
+ IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
+ return -EIO;
+ }
+
+ if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
+ IWL_ERR(priv, "ucode not available for device bringup\n");
+ return -EIO;
+ }
+
+ for_each_context(priv, ctx) {
+ ret = iwl4965_alloc_bcast_station(priv, ctx);
+ if (ret) {
+ iwl_legacy_dealloc_bcast_stations(priv);
+ return ret;
+ }
+ }
+
+ iwl4965_prepare_card_hw(priv);
+
+ if (!priv->hw_ready) {
+ IWL_WARN(priv, "Exit HW not ready\n");
+ return -EIO;
+ }
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ if (iwl_read32(priv,
+ CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+ clear_bit(STATUS_RF_KILL_HW, &priv->status);
+ else
+ set_bit(STATUS_RF_KILL_HW, &priv->status);
+
+ if (iwl_legacy_is_rfkill(priv)) {
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
+
+ iwl_legacy_enable_interrupts(priv);
+ IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
+ return 0;
+ }
+
+ iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
+
+ /* must be initialised before iwl_hw_nic_init */
+ priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
+
+ ret = iwl4965_hw_nic_init(priv);
+ if (ret) {
+ IWL_ERR(priv, "Unable to init nic\n");
+ return ret;
+ }
+
+ /* make sure rfkill handshake bits are cleared */
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable host interrupts */
+ iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
+ iwl_legacy_enable_interrupts(priv);
+
+ /* really make sure rfkill handshake bits are cleared */
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+ /* Copy original ucode data image from disk into backup cache.
+ * This will be used to initialize the on-board processor's
+ * data SRAM for a clean start when the runtime program first loads. */
+ memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
+ priv->ucode_data.len);
+
+ for (i = 0; i < MAX_HW_RESTARTS; i++) {
+
+ /* load bootstrap state machine,
+ * load bootstrap program into processor's memory,
+ * prepare to load the "initialize" uCode */
+ ret = priv->cfg->ops->lib->load_ucode(priv);
+
+ if (ret) {
+ IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n",
+ ret);
+ continue;
+ }
+
+ /* start card; "initialize" will load runtime ucode */
+ iwl4965_nic_start(priv);
+
+ IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
+
+ return 0;
+ }
+
+ set_bit(STATUS_EXIT_PENDING, &priv->status);
+ __iwl4965_down(priv);
+ clear_bit(STATUS_EXIT_PENDING, &priv->status);
+
+ /* tried to restart and config the device for as long as our
+ * patience could withstand */
+ IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
+ return -EIO;
+}
+
+
+/*****************************************************************************
+ *
+ * Workqueue callbacks
+ *
+ *****************************************************************************/
+
+static void iwl4965_bg_init_alive_start(struct work_struct *data)
+{
+ struct iwl_priv *priv =
+ container_of(data, struct iwl_priv, init_alive_start.work);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ mutex_lock(&priv->mutex);
+ priv->cfg->ops->lib->init_alive_start(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static void iwl4965_bg_alive_start(struct work_struct *data)
+{
+ struct iwl_priv *priv =
+ container_of(data, struct iwl_priv, alive_start.work);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ mutex_lock(&priv->mutex);
+ iwl4965_alive_start(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static void iwl4965_bg_run_time_calib_work(struct work_struct *work)
+{
+ struct iwl_priv *priv = container_of(work, struct iwl_priv,
+ run_time_calib_work);
+
+ mutex_lock(&priv->mutex);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
+ test_bit(STATUS_SCANNING, &priv->status)) {
+ mutex_unlock(&priv->mutex);
+ return;
+ }
+
+ if (priv->start_calib) {
+ iwl4965_chain_noise_calibration(priv,
+ (void *)&priv->_4965.statistics);
+ iwl4965_sensitivity_calibration(priv,
+ (void *)&priv->_4965.statistics);
+ }
+
+ mutex_unlock(&priv->mutex);
+}
+
+static void iwl4965_bg_restart(struct work_struct *data)
+{
+ struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
+ struct iwl_rxon_context *ctx;
+
+ mutex_lock(&priv->mutex);
+ for_each_context(priv, ctx)
+ ctx->vif = NULL;
+ priv->is_open = 0;
+
+ __iwl4965_down(priv);
+
+ mutex_unlock(&priv->mutex);
+ iwl4965_cancel_deferred_work(priv);
+ ieee80211_restart_hw(priv->hw);
+ } else {
+ iwl4965_down(priv);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ mutex_lock(&priv->mutex);
+ __iwl4965_up(priv);
+ mutex_unlock(&priv->mutex);
+ }
+}
+
+static void iwl4965_bg_rx_replenish(struct work_struct *data)
+{
+ struct iwl_priv *priv =
+ container_of(data, struct iwl_priv, rx_replenish);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ mutex_lock(&priv->mutex);
+ iwl4965_rx_replenish(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+#define UCODE_READY_TIMEOUT (4 * HZ)
+
+/*
+ * Not a mac80211 entry point function, but it fits in with all the
+ * other mac80211 functions grouped here.
+ */
+static int iwl4965_mac_setup_register(struct iwl_priv *priv,
+ u32 max_probe_length)
+{
+ int ret;
+ struct ieee80211_hw *hw = priv->hw;
+ struct iwl_rxon_context *ctx;
+
+ hw->rate_control_algorithm = "iwl-4965-rs";
+
+ /* Tell mac80211 our characteristics */
+ hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_NEED_DTIM_PERIOD |
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+
+ if (priv->cfg->sku & IWL_SKU_N)
+ hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS;
+
+ hw->sta_data_size = sizeof(struct iwl_station_priv);
+ hw->vif_data_size = sizeof(struct iwl_vif_priv);
+
+ for_each_context(priv, ctx) {
+ hw->wiphy->interface_modes |= ctx->interface_modes;
+ hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
+ }
+
+ hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
+ WIPHY_FLAG_DISABLE_BEACON_HINTS;
+
+ /*
+ * For now, disable PS by default because it affects
+ * RX performance significantly.
+ */
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+ /* we create the 802.11 header and a zero-length SSID element */
+ hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
+
+ /* Default value; 4 EDCA QOS priorities */
+ hw->queues = 4;
+
+ hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+
+ if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
+ priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &priv->bands[IEEE80211_BAND_2GHZ];
+ if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
+ priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &priv->bands[IEEE80211_BAND_5GHZ];
+
+ iwl_legacy_leds_init(priv);
+
+ ret = ieee80211_register_hw(priv->hw);
+ if (ret) {
+ IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
+ return ret;
+ }
+ priv->mac80211_registered = 1;
+
+ return 0;
+}
+
+
+int iwl4965_mac_start(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+ int ret;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ /* we should be verifying the device is ready to be opened */
+ mutex_lock(&priv->mutex);
+ ret = __iwl4965_up(priv);
+ mutex_unlock(&priv->mutex);
+
+ if (ret)
+ return ret;
+
+ if (iwl_legacy_is_rfkill(priv))
+ goto out;
+
+ IWL_DEBUG_INFO(priv, "Start UP work done.\n");
+
+ /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
+ * mac80211 will not be run successfully. */
+ ret = wait_event_interruptible_timeout(priv->wait_command_queue,
+ test_bit(STATUS_READY, &priv->status),
+ UCODE_READY_TIMEOUT);
+ if (!ret) {
+ if (!test_bit(STATUS_READY, &priv->status)) {
+ IWL_ERR(priv, "START_ALIVE timeout after %dms.\n",
+ jiffies_to_msecs(UCODE_READY_TIMEOUT));
+ return -ETIMEDOUT;
+ }
+ }
+
+ iwl4965_led_enable(priv);
+
+out:
+ priv->is_open = 1;
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ return 0;
+}
+
+void iwl4965_mac_stop(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (!priv->is_open)
+ return;
+
+ priv->is_open = 0;
+
+ iwl4965_down(priv);
+
+ flush_workqueue(priv->workqueue);
+
+ /* enable interrupts again in order to receive rfkill changes */
+ iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
+ iwl_legacy_enable_interrupts(priv);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ IWL_DEBUG_MACDUMP(priv, "enter\n");
+
+ IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+ ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+ if (iwl4965_tx_skb(priv, skb))
+ dev_kfree_skb_any(skb);
+
+ IWL_DEBUG_MACDUMP(priv, "leave\n");
+}
+
+void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
+ iv32, phase1key);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *ctx = vif_priv->ctx;
+ int ret;
+ u8 sta_id;
+ bool is_default_wep_key = false;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (priv->cfg->mod_params->sw_crypto) {
+ IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
+ if (sta_id == IWL_INVALID_STATION)
+ return -EINVAL;
+
+ mutex_lock(&priv->mutex);
+ iwl_legacy_scan_cancel_timeout(priv, 100);
+
+ /*
+ * If we are getting WEP group key and we didn't receive any key mapping
+ * so far, we are in legacy wep mode (group key only), otherwise we are
+ * in 1X mode.
+ * In legacy wep mode, we use another host command to the uCode.
+ */
+ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
+ !sta) {
+ if (cmd == SET_KEY)
+ is_default_wep_key = !ctx->key_mapping_keys;
+ else
+ is_default_wep_key =
+ (key->hw_key_idx == HW_KEY_DEFAULT);
+ }
+
+ switch (cmd) {
+ case SET_KEY:
+ if (is_default_wep_key)
+ ret = iwl4965_set_default_wep_key(priv,
+ vif_priv->ctx, key);
+ else
+ ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx,
+ key, sta_id);
+
+ IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
+ break;
+ case DISABLE_KEY:
+ if (is_default_wep_key)
+ ret = iwl4965_remove_default_wep_key(priv, ctx, key);
+ else
+ ret = iwl4965_remove_dynamic_key(priv, ctx,
+ key, sta_id);
+
+ IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&priv->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return ret;
+}
+
+int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
+{
+ struct iwl_priv *priv = hw->priv;
+ int ret = -EINVAL;
+
+ IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
+ sta->addr, tid);
+
+ if (!(priv->cfg->sku & IWL_SKU_N))
+ return -EACCES;
+
+ mutex_lock(&priv->mutex);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ IWL_DEBUG_HT(priv, "start Rx\n");
+ ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ IWL_DEBUG_HT(priv, "stop Rx\n");
+ ret = iwl4965_sta_rx_agg_stop(priv, sta, tid);
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ ret = 0;
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ IWL_DEBUG_HT(priv, "start Tx\n");
+ ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn);
+ if (ret == 0) {
+ priv->_4965.agg_tids_count++;
+ IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n",
+ priv->_4965.agg_tids_count);
+ }
+ break;
+ case IEEE80211_AMPDU_TX_STOP:
+ IWL_DEBUG_HT(priv, "stop Tx\n");
+ ret = iwl4965_tx_agg_stop(priv, vif, sta, tid);
+ if ((ret == 0) && (priv->_4965.agg_tids_count > 0)) {
+ priv->_4965.agg_tids_count--;
+ IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n",
+ priv->_4965.agg_tids_count);
+ }
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ ret = 0;
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ret = 0;
+ break;
+ }
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+
+int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+ int ret;
+ u8 sta_id;
+
+ IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
+ sta->addr);
+ mutex_lock(&priv->mutex);
+ IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
+ sta->addr);
+ sta_priv->common.sta_id = IWL_INVALID_STATION;
+
+ atomic_set(&sta_priv->pending_frames, 0);
+
+ ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr,
+ is_ap, sta, &sta_id);
+ if (ret) {
+ IWL_ERR(priv, "Unable to add station %pM (%d)\n",
+ sta->addr, ret);
+ /* Should we return success if return code is EEXIST ? */
+ mutex_unlock(&priv->mutex);
+ return ret;
+ }
+
+ sta_priv->common.sta_id = sta_id;
+
+ /* Initialize rate scaling */
+ IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
+ sta->addr);
+ iwl4965_rs_rate_init(priv, sta, sta_id);
+ mutex_unlock(&priv->mutex);
+
+ return 0;
+}
+
+void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ struct iwl_priv *priv = hw->priv;
+ const struct iwl_channel_info *ch_info;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_channel *channel = ch_switch->channel;
+ struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ u16 ch;
+ unsigned long flags = 0;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (iwl_legacy_is_rfkill(priv))
+ goto out_exit;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
+ test_bit(STATUS_SCANNING, &priv->status))
+ goto out_exit;
+
+ if (!iwl_legacy_is_associated_ctx(ctx))
+ goto out_exit;
+
+ /* channel switch in progress */
+ if (priv->switch_rxon.switch_in_progress == true)
+ goto out_exit;
+
+ mutex_lock(&priv->mutex);
+ if (priv->cfg->ops->lib->set_channel_switch) {
+
+ ch = channel->hw_value;
+ if (le16_to_cpu(ctx->active.channel) != ch) {
+ ch_info = iwl_legacy_get_channel_info(priv,
+ channel->band,
+ ch);
+ if (!iwl_legacy_is_channel_valid(ch_info)) {
+ IWL_DEBUG_MAC80211(priv, "invalid channel\n");
+ goto out;
+ }
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->current_ht_config.smps = conf->smps_mode;
+
+ /* Configure HT40 channels */
+ ctx->ht.enabled = conf_is_ht(conf);
+ if (ctx->ht.enabled) {
+ if (conf_is_ht40_minus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ctx->ht.is_40mhz = true;
+ } else if (conf_is_ht40_plus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ctx->ht.is_40mhz = true;
+ } else {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ctx->ht.is_40mhz = false;
+ }
+ } else
+ ctx->ht.is_40mhz = false;
+
+ if ((le16_to_cpu(ctx->staging.channel) != ch))
+ ctx->staging.flags = 0;
+
+ iwl_legacy_set_rxon_channel(priv, channel, ctx);
+ iwl_legacy_set_rxon_ht(priv, ht_conf);
+ iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
+ ctx->vif);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ iwl_legacy_set_rate(priv);
+ /*
+ * at this point, staging_rxon has the
+ * configuration for channel switch
+ */
+ if (priv->cfg->ops->lib->set_channel_switch(priv,
+ ch_switch))
+ priv->switch_rxon.switch_in_progress = false;
+ }
+ }
+out:
+ mutex_unlock(&priv->mutex);
+out_exit:
+ if (!priv->switch_rxon.switch_in_progress)
+ ieee80211_chswitch_done(ctx->vif, false);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+void iwl4965_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct iwl_priv *priv = hw->priv;
+ __le32 filter_or = 0, filter_nand = 0;
+ struct iwl_rxon_context *ctx;
+
+#define CHK(test, flag) do { \
+ if (*total_flags & (test)) \
+ filter_or |= (flag); \
+ else \
+ filter_nand |= (flag); \
+ } while (0)
+
+ IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
+ changed_flags, *total_flags);
+
+ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
+ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+ mutex_lock(&priv->mutex);
+
+ for_each_context(priv, ctx) {
+ ctx->staging.filter_flags &= ~filter_nand;
+ ctx->staging.filter_flags |= filter_or;
+
+ /*
+ * Not committing directly because hardware can perform a scan,
+ * but we'll eventually commit the filter flags change anyway.
+ */
+ }
+
+ mutex_unlock(&priv->mutex);
+
+ /*
+ * Receiving all multicast frames is always enabled by the
+ * default flags setup in iwl_legacy_connection_init_rx_config()
+ * since we currently do not support programming multicast
+ * filters into the device.
+ */
+ *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+/*****************************************************************************
+ *
+ * driver setup and teardown
+ *
+ *****************************************************************************/
+
+static void iwl4965_bg_txpower_work(struct work_struct *work)
+{
+ struct iwl_priv *priv = container_of(work, struct iwl_priv,
+ txpower_work);
+
+ /* If a scan happened to start before we got here
+ * then just return; the statistics notification will
+ * kick off another scheduled work to compensate for
+ * any temperature delta we missed here. */
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
+ test_bit(STATUS_SCANNING, &priv->status))
+ return;
+
+ mutex_lock(&priv->mutex);
+
+ /* Regardless of if we are associated, we must reconfigure the
+ * TX power since frames can be sent on non-radar channels while
+ * not associated */
+ priv->cfg->ops->lib->send_tx_power(priv);
+
+ /* Update last_temperature to keep is_calib_needed from running
+ * when it isn't needed... */
+ priv->last_temperature = priv->temperature;
+
+ mutex_unlock(&priv->mutex);
+}
+
+static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
+{
+ priv->workqueue = create_singlethread_workqueue(DRV_NAME);
+
+ init_waitqueue_head(&priv->wait_command_queue);
+
+ INIT_WORK(&priv->restart, iwl4965_bg_restart);
+ INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
+ INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work);
+ INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
+ INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
+
+ iwl_legacy_setup_scan_deferred_work(priv);
+
+ INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
+
+ init_timer(&priv->statistics_periodic);
+ priv->statistics_periodic.data = (unsigned long)priv;
+ priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
+
+ init_timer(&priv->ucode_trace);
+ priv->ucode_trace.data = (unsigned long)priv;
+ priv->ucode_trace.function = iwl4965_bg_ucode_trace;
+
+ init_timer(&priv->watchdog);
+ priv->watchdog.data = (unsigned long)priv;
+ priv->watchdog.function = iwl_legacy_bg_watchdog;
+
+ tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
+ iwl4965_irq_tasklet, (unsigned long)priv);
+}
+
+static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
+{
+ cancel_work_sync(&priv->txpower_work);
+ cancel_delayed_work_sync(&priv->init_alive_start);
+ cancel_delayed_work(&priv->alive_start);
+ cancel_work_sync(&priv->run_time_calib_work);
+
+ iwl_legacy_cancel_scan_deferred_work(priv);
+
+ del_timer_sync(&priv->statistics_periodic);
+ del_timer_sync(&priv->ucode_trace);
+}
+
+static void iwl4965_init_hw_rates(struct iwl_priv *priv,
+ struct ieee80211_rate *rates)
+{
+ int i;
+
+ for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
+ rates[i].bitrate = iwlegacy_rates[i].ieee * 5;
+ rates[i].hw_value = i; /* Rate scaling will work on indexes */
+ rates[i].hw_value_short = i;
+ rates[i].flags = 0;
+ if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
+ /*
+ * If CCK != 1M then set short preamble rate flag.
+ */
+ rates[i].flags |=
+ (iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ?
+ 0 : IEEE80211_RATE_SHORT_PREAMBLE;
+ }
+ }
+}
+/*
+ * Acquire priv->lock before calling this function !
+ */
+void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
+{
+ iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
+ (index & 0xff) | (txq_id << 8));
+ iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
+}
+
+void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ int tx_fifo_id, int scd_retry)
+{
+ int txq_id = txq->q.id;
+
+ /* Find out whether to activate Tx queue */
+ int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
+
+ /* Set up and activate */
+ iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
+ (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+ (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
+ (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
+ (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
+ IWL49_SCD_QUEUE_STTS_REG_MSK);
+
+ txq->sched_retry = scd_retry;
+
+ IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
+ active ? "Activate" : "Deactivate",
+ scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
+}
+
+
+static int iwl4965_init_drv(struct iwl_priv *priv)
+{
+ int ret;
+
+ spin_lock_init(&priv->sta_lock);
+ spin_lock_init(&priv->hcmd_lock);
+
+ INIT_LIST_HEAD(&priv->free_frames);
+
+ mutex_init(&priv->mutex);
+ mutex_init(&priv->sync_cmd_mutex);
+
+ priv->ieee_channels = NULL;
+ priv->ieee_rates = NULL;
+ priv->band = IEEE80211_BAND_2GHZ;
+
+ priv->iw_mode = NL80211_IFTYPE_STATION;
+ priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
+ priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
+ priv->_4965.agg_tids_count = 0;
+
+ /* initialize force reset */
+ priv->force_reset[IWL_RF_RESET].reset_duration =
+ IWL_DELAY_NEXT_FORCE_RF_RESET;
+ priv->force_reset[IWL_FW_RESET].reset_duration =
+ IWL_DELAY_NEXT_FORCE_FW_RELOAD;
+
+ /* Choose which receivers/antennas to use */
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv,
+ &priv->contexts[IWL_RXON_CTX_BSS]);
+
+ iwl_legacy_init_scan_params(priv);
+
+ /* Set the tx_power_user_lmt to the lowest power level
+ * this value will get overwritten by channel max power avg
+ * from eeprom */
+ priv->tx_power_user_lmt = IWL4965_TX_POWER_TARGET_POWER_MIN;
+ priv->tx_power_next = IWL4965_TX_POWER_TARGET_POWER_MIN;
+
+ ret = iwl_legacy_init_channel_map(priv);
+ if (ret) {
+ IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
+ goto err;
+ }
+
+ ret = iwl_legacy_init_geos(priv);
+ if (ret) {
+ IWL_ERR(priv, "initializing geos failed: %d\n", ret);
+ goto err_free_channel_map;
+ }
+ iwl4965_init_hw_rates(priv, priv->ieee_rates);
+
+ return 0;
+
+err_free_channel_map:
+ iwl_legacy_free_channel_map(priv);
+err:
+ return ret;
+}
+
+static void iwl4965_uninit_drv(struct iwl_priv *priv)
+{
+ iwl4965_calib_free_results(priv);
+ iwl_legacy_free_geos(priv);
+ iwl_legacy_free_channel_map(priv);
+ kfree(priv->scan_cmd);
+}
+
+static void iwl4965_hw_detect(struct iwl_priv *priv)
+{
+ priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
+ priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
+ pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
+ IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
+}
+
+static int iwl4965_set_hw_params(struct iwl_priv *priv)
+{
+ priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
+ priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
+ if (priv->cfg->mod_params->amsdu_size_8K)
+ priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
+ else
+ priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
+
+ priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
+
+ if (priv->cfg->mod_params->disable_11n)
+ priv->cfg->sku &= ~IWL_SKU_N;
+
+ /* Device-specific setup */
+ return priv->cfg->ops->lib->set_hw_params(priv);
+}
+
+static const u8 iwl4965_bss_ac_to_fifo[] = {
+ IWL_TX_FIFO_VO,
+ IWL_TX_FIFO_VI,
+ IWL_TX_FIFO_BE,
+ IWL_TX_FIFO_BK,
+};
+
+static const u8 iwl4965_bss_ac_to_queue[] = {
+ 0, 1, 2, 3,
+};
+
+static int
+iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err = 0, i;
+ struct iwl_priv *priv;
+ struct ieee80211_hw *hw;
+ struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
+ unsigned long flags;
+ u16 pci_cmd;
+
+ /************************
+ * 1. Allocating HW data
+ ************************/
+
+ hw = iwl_legacy_alloc_all(cfg);
+ if (!hw) {
+ err = -ENOMEM;
+ goto out;
+ }
+ priv = hw->priv;
+ /* At this point both hw and priv are allocated. */
+
+ /*
+ * The default context is always valid,
+ * more may be discovered when firmware
+ * is loaded.
+ */
+ priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
+
+ for (i = 0; i < NUM_IWL_RXON_CTX; i++)
+ priv->contexts[i].ctxid = i;
+
+ priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
+ priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
+ priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
+ priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
+ priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
+ priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
+ priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
+ priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
+ priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo;
+ priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue;
+ priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
+ BIT(NL80211_IFTYPE_ADHOC);
+ priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
+ BIT(NL80211_IFTYPE_STATION);
+ priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
+ priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
+ priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
+ priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
+
+ BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1);
+
+ SET_IEEE80211_DEV(hw, &pdev->dev);
+
+ IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
+ priv->cfg = cfg;
+ priv->pci_dev = pdev;
+ priv->inta_mask = CSR_INI_SET_MASK;
+
+ if (iwl_legacy_alloc_traffic_mem(priv))
+ IWL_ERR(priv, "Not enough memory to generate traffic log\n");
+
+ /**************************
+ * 2. Initializing PCI bus
+ **************************/
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
+ if (pci_enable_device(pdev)) {
+ err = -ENODEV;
+ goto out_ieee80211_free_hw;
+ }
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (err) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ /* both attempts failed: */
+ if (err) {
+ IWL_WARN(priv, "No suitable DMA available.\n");
+ goto out_pci_disable_device;
+ }
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err)
+ goto out_pci_disable_device;
+
+ pci_set_drvdata(pdev, priv);
+
+
+ /***********************
+ * 3. Read REV register
+ ***********************/
+ priv->hw_base = pci_iomap(pdev, 0, 0);
+ if (!priv->hw_base) {
+ err = -ENODEV;
+ goto out_pci_release_regions;
+ }
+
+ IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
+ (unsigned long long) pci_resource_len(pdev, 0));
+ IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
+
+ /* these spin locks will be used in apm_ops.init and EEPROM access
+ * we should init now
+ */
+ spin_lock_init(&priv->reg_lock);
+ spin_lock_init(&priv->lock);
+
+ /*
+ * stop and reset the on-board processor just in case it is in a
+ * strange state ... like being left stranded by a primary kernel
+ * and this is now the kdump kernel trying to start up
+ */
+ iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ iwl4965_hw_detect(priv);
+ IWL_INFO(priv, "Detected %s, REV=0x%X\n",
+ priv->cfg->name, priv->hw_rev);
+
+ /* We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+ iwl4965_prepare_card_hw(priv);
+ if (!priv->hw_ready) {
+ IWL_WARN(priv, "Failed, HW not ready\n");
+ goto out_iounmap;
+ }
+
+ /*****************
+ * 4. Read EEPROM
+ *****************/
+ /* Read the EEPROM */
+ err = iwl_legacy_eeprom_init(priv);
+ if (err) {
+ IWL_ERR(priv, "Unable to init EEPROM\n");
+ goto out_iounmap;
+ }
+ err = iwl4965_eeprom_check_version(priv);
+ if (err)
+ goto out_free_eeprom;
+
+ if (err)
+ goto out_free_eeprom;
+
+ /* extract MAC Address */
+ iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr);
+ IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
+ priv->hw->wiphy->addresses = priv->addresses;
+ priv->hw->wiphy->n_addresses = 1;
+
+ /************************
+ * 5. Setup HW constants
+ ************************/
+ if (iwl4965_set_hw_params(priv)) {
+ IWL_ERR(priv, "failed to set hw parameters\n");
+ goto out_free_eeprom;
+ }
+
+ /*******************
+ * 6. Setup priv
+ *******************/
+
+ err = iwl4965_init_drv(priv);
+ if (err)
+ goto out_free_eeprom;
+ /* At this point both hw and priv are initialized. */
+
+ /********************
+ * 7. Setup services
+ ********************/
+ spin_lock_irqsave(&priv->lock, flags);
+ iwl_legacy_disable_interrupts(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ pci_enable_msi(priv->pci_dev);
+
+ err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
+ IRQF_SHARED, DRV_NAME, priv);
+ if (err) {
+ IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
+ goto out_disable_msi;
+ }
+
+ iwl4965_setup_deferred_work(priv);
+ iwl4965_setup_rx_handlers(priv);
+
+ /*********************************************
+ * 8. Enable interrupts and read RFKILL state
+ *********************************************/
+
+ /* enable interrupts if needed: hw bug w/a */
+ pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
+ if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+ pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
+ }
+
+ iwl_legacy_enable_interrupts(priv);
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ if (iwl_read32(priv, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+ clear_bit(STATUS_RF_KILL_HW, &priv->status);
+ else
+ set_bit(STATUS_RF_KILL_HW, &priv->status);
+
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy,
+ test_bit(STATUS_RF_KILL_HW, &priv->status));
+
+ iwl_legacy_power_initialize(priv);
+
+ init_completion(&priv->_4965.firmware_loading_complete);
+
+ err = iwl4965_request_firmware(priv, true);
+ if (err)
+ goto out_destroy_workqueue;
+
+ return 0;
+
+ out_destroy_workqueue:
+ destroy_workqueue(priv->workqueue);
+ priv->workqueue = NULL;
+ free_irq(priv->pci_dev->irq, priv);
+ out_disable_msi:
+ pci_disable_msi(priv->pci_dev);
+ iwl4965_uninit_drv(priv);
+ out_free_eeprom:
+ iwl_legacy_eeprom_free(priv);
+ out_iounmap:
+ pci_iounmap(pdev, priv->hw_base);
+ out_pci_release_regions:
+ pci_set_drvdata(pdev, NULL);
+ pci_release_regions(pdev);
+ out_pci_disable_device:
+ pci_disable_device(pdev);
+ out_ieee80211_free_hw:
+ iwl_legacy_free_traffic_mem(priv);
+ ieee80211_free_hw(priv->hw);
+ out:
+ return err;
+}
+
+static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
+{
+ struct iwl_priv *priv = pci_get_drvdata(pdev);
+ unsigned long flags;
+
+ if (!priv)
+ return;
+
+ wait_for_completion(&priv->_4965.firmware_loading_complete);
+
+ IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
+
+ iwl_legacy_dbgfs_unregister(priv);
+ sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
+
+ /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
+ * to be called and iwl4965_down since we are removing the device
+ * we need to set STATUS_EXIT_PENDING bit.
+ */
+ set_bit(STATUS_EXIT_PENDING, &priv->status);
+
+ iwl_legacy_leds_exit(priv);
+
+ if (priv->mac80211_registered) {
+ ieee80211_unregister_hw(priv->hw);
+ priv->mac80211_registered = 0;
+ } else {
+ iwl4965_down(priv);
+ }
+
+ /*
+ * Make sure device is reset to low power before unloading driver.
+ * This may be redundant with iwl4965_down(), but there are paths to
+ * run iwl4965_down() without calling apm_ops.stop(), and there are
+ * paths to avoid running iwl4965_down() at all before leaving driver.
+ * This (inexpensive) call *makes sure* device is reset.
+ */
+ iwl_legacy_apm_stop(priv);
+
+ /* make sure we flush any pending irq or
+ * tasklet for the driver
+ */
+ spin_lock_irqsave(&priv->lock, flags);
+ iwl_legacy_disable_interrupts(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ iwl4965_synchronize_irq(priv);
+
+ iwl4965_dealloc_ucode_pci(priv);
+
+ if (priv->rxq.bd)
+ iwl4965_rx_queue_free(priv, &priv->rxq);
+ iwl4965_hw_txq_ctx_free(priv);
+
+ iwl_legacy_eeprom_free(priv);
+
+
+ /*netif_stop_queue(dev); */
+ flush_workqueue(priv->workqueue);
+
+ /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
+ * priv->workqueue... so we can't take down the workqueue
+ * until now... */
+ destroy_workqueue(priv->workqueue);
+ priv->workqueue = NULL;
+ iwl_legacy_free_traffic_mem(priv);
+
+ free_irq(priv->pci_dev->irq, priv);
+ pci_disable_msi(priv->pci_dev);
+ pci_iounmap(pdev, priv->hw_base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ iwl4965_uninit_drv(priv);
+
+ dev_kfree_skb(priv->beacon_skb);
+
+ ieee80211_free_hw(priv->hw);
+}
+
+/*
+ * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
+ * must be called under priv->lock and mac access
+ */
+void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
+{
+ iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask);
+}
+
+/*****************************************************************************
+ *
+ * driver and module entry point
+ *
+ *****************************************************************************/
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = {
+#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965)
+ {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)},
+ {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)},
+#endif /* CONFIG_IWL4965 */
+
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
+
+static struct pci_driver iwl4965_driver = {
+ .name = DRV_NAME,
+ .id_table = iwl4965_hw_card_ids,
+ .probe = iwl4965_pci_probe,
+ .remove = __devexit_p(iwl4965_pci_remove),
+ .driver.pm = IWL_LEGACY_PM_OPS,
+};
+
+static int __init iwl4965_init(void)
+{
+
+ int ret;
+ pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+ pr_info(DRV_COPYRIGHT "\n");
+
+ ret = iwl4965_rate_control_register();
+ if (ret) {
+ pr_err("Unable to register rate control algorithm: %d\n", ret);
+ return ret;
+ }
+
+ ret = pci_register_driver(&iwl4965_driver);
+ if (ret) {
+ pr_err("Unable to initialize PCI module\n");
+ goto error_register;
+ }
+
+ return ret;
+
+error_register:
+ iwl4965_rate_control_unregister();
+ return ret;
+}
+
+static void __exit iwl4965_exit(void)
+{
+ pci_unregister_driver(&iwl4965_driver);
+ iwl4965_rate_control_unregister();
+}
+
+module_exit(iwl4965_exit);
+module_init(iwl4965_init);
+
+#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
+module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+
+module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
+MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
+module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
+MODULE_PARM_DESC(queues_num, "number of hw queues.");
+module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
+MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
+module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
+ int, S_IRUGO);
+MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
+module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index b82364258dc5..17d555f2215a 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,14 +1,52 @@
-config IWLWIFI
- tristate "Intel Wireless Wifi"
+config IWLAGN
+ tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlagn) "
depends on PCI && MAC80211
select FW_LOADER
+ select NEW_LEDS
+ select LEDS_CLASS
+ select LEDS_TRIGGERS
+ select MAC80211_LEDS
+ ---help---
+ Select to build the driver supporting the:
+
+ Intel Wireless WiFi Link Next-Gen AGN
+
+ This option enables support for use with the following hardware:
+ Intel Wireless WiFi Link 6250AGN Adapter
+ Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN)
+ Intel WiFi Link 1000BGN
+ Intel Wireless WiFi 5150AGN
+ Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN
+ Intel 6005 Series Wi-Fi Adapters
+ Intel 6030 Series Wi-Fi Adapters
+ Intel Wireless WiFi Link 6150BGN 2 Adapter
+ Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
+ Intel 2000 Series Wi-Fi Adapters
+
+
+ This driver uses the kernel's mac80211 subsystem.
+
+ In order to use this driver, you will need a microcode (uCode)
+ image for it. You can obtain the microcode from:
+
+ <http://intellinuxwireless.org/>.
+
+ The microcode is typically installed in /lib/firmware. You can
+ look in the hotplug script /etc/hotplug/firmware.agent to
+ determine which directory FIRMWARE_DIR is set to when the script
+ runs.
+
+ If you want to compile the driver as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want),
+ say M here and read <file:Documentation/kbuild/modules.txt>. The
+ module will be called iwlagn.
menu "Debugging Options"
- depends on IWLWIFI
+ depends on IWLAGN
config IWLWIFI_DEBUG
- bool "Enable full debugging output in iwlagn and iwl3945 drivers"
- depends on IWLWIFI
+ bool "Enable full debugging output in the iwlagn driver"
+ depends on IWLAGN
---help---
This option will enable debug tracing output for the iwlwifi drivers
@@ -33,7 +71,7 @@ config IWLWIFI_DEBUG
config IWLWIFI_DEBUGFS
bool "iwlagn debugfs support"
- depends on IWLWIFI && MAC80211_DEBUGFS
+ depends on IWLAGN && MAC80211_DEBUGFS
---help---
Enable creation of debugfs files for the iwlwifi drivers. This
is a low-impact option that allows getting insight into the
@@ -41,13 +79,13 @@ config IWLWIFI_DEBUGFS
config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
bool "Experimental uCode support"
- depends on IWLWIFI && IWLWIFI_DEBUG
+ depends on IWLAGN && IWLWIFI_DEBUG
---help---
Enable use of experimental ucode for testing and debugging.
config IWLWIFI_DEVICE_TRACING
bool "iwlwifi device access tracing"
- depends on IWLWIFI
+ depends on IWLAGN
depends on EVENT_TRACING
help
Say Y here to trace all commands, including TX frames and IO
@@ -64,70 +102,19 @@ config IWLWIFI_DEVICE_TRACING
occur.
endmenu
-config IWLAGN
- tristate "Intel Wireless WiFi Next Gen AGN (iwlagn)"
- depends on IWLWIFI
- ---help---
- Select to build the driver supporting the:
-
- Intel Wireless WiFi Link Next-Gen AGN
-
- This driver uses the kernel's mac80211 subsystem.
-
- In order to use this driver, you will need a microcode (uCode)
- image for it. You can obtain the microcode from:
-
- <http://intellinuxwireless.org/>.
-
- The microcode is typically installed in /lib/firmware. You can
- look in the hotplug script /etc/hotplug/firmware.agent to
- determine which directory FIRMWARE_DIR is set to when the script
- runs.
-
- If you want to compile the driver as a module ( = code which can be
- inserted in and removed from the running kernel whenever you want),
- say M here and read <file:Documentation/kbuild/modules.txt>. The
- module will be called iwlagn.
-
-
-config IWL4965
- bool "Intel Wireless WiFi 4965AGN"
- depends on IWLAGN
- ---help---
- This option enables support for Intel Wireless WiFi Link 4965AGN
-
-config IWL5000
- bool "Intel Wireless-N/Advanced-N/Ultimate-N WiFi Link"
+config IWL_P2P
+ bool "iwlwifi experimental P2P support"
depends on IWLAGN
- ---help---
- This option enables support for use with the following hardware:
- Intel Wireless WiFi Link 6250AGN Adapter
- Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN)
- Intel WiFi Link 1000BGN
- Intel Wireless WiFi 5150AGN
- Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN
-
-config IWL3945
- tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
- depends on IWLWIFI
- ---help---
- Select to build the driver supporting the:
-
- Intel PRO/Wireless 3945ABG/BG Network Connection
-
- This driver uses the kernel's mac80211 subsystem.
-
- In order to use this driver, you will need a microcode (uCode)
- image for it. You can obtain the microcode from:
+ help
+ This option enables experimental P2P support for some devices
+ based on microcode support. Since P2P support is still under
+ development, this option may even enable it for some devices
+ now that turn out to not support it in the future due to
+ microcode restrictions.
- <http://intellinuxwireless.org/>.
+ To determine if your microcode supports the experimental P2P
+ offered by this option, check if the driver advertises AP
+ support when it is loaded.
- The microcode is typically installed in /lib/firmware. You can
- look in the hotplug script /etc/hotplug/firmware.agent to
- determine which directory FIRMWARE_DIR is set to when the script
- runs.
+ Say Y only if you want to experiment with P2P.
- If you want to compile the driver as a module ( = code which can be
- inserted in and removed from the running kernel whenever you want),
- say M here and read <file:Documentation/kbuild/modules.txt>. The
- module will be called iwl3945.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 63edbe2e557f..9d6ee836426c 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,28 +1,23 @@
-obj-$(CONFIG_IWLWIFI) += iwlcore.o
-iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
-iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o
-iwlcore-objs += iwl-scan.o iwl-led.o
-iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
-iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
-
-CFLAGS_iwl-devtrace.o := -I$(src)
-
# AGN
obj-$(CONFIG_IWLAGN) += iwlagn.o
-iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o
-iwlagn-objs += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o
-iwlagn-objs += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o
+iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o
+iwlagn-objs += iwl-agn-ucode.o iwl-agn-tx.o
+iwlagn-objs += iwl-agn-lib.o iwl-agn-calib.o
iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o
-iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
-iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
-iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
-iwlagn-$(CONFIG_IWL5000) += iwl-6000.o
-iwlagn-$(CONFIG_IWL5000) += iwl-1000.o
+iwlagn-objs += iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
+iwlagn-objs += iwl-rx.o iwl-tx.o iwl-sta.o
+iwlagn-objs += iwl-scan.o iwl-led.o
+iwlagn-objs += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o
+iwlagn-objs += iwl-5000.o
+iwlagn-objs += iwl-6000.o
+iwlagn-objs += iwl-1000.o
+iwlagn-objs += iwl-2000.o
-# 3945
-obj-$(CONFIG_IWL3945) += iwl3945.o
-iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
-iwl3945-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-3945-debugfs.o
+iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
+iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
+iwlagn-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
+
+CFLAGS_iwl-devtrace.o := -I$(src)
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index db540910b110..27c5007e577c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -49,7 +49,7 @@
#include "iwl-agn-debugfs.h"
/* Highest firmware API version supported */
-#define IWL1000_UCODE_API_MAX 3
+#define IWL1000_UCODE_API_MAX 5
#define IWL100_UCODE_API_MAX 5
/* Lowest firmware API version supported */
@@ -147,7 +147,11 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+ if (priv->cfg->rx_with_siso_diversity)
+ priv->hw_params.rx_chains_num = 1;
+ else
+ priv->hw_params.rx_chains_num =
+ num_of_ant(priv->cfg->valid_rx_ant);
priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
@@ -211,14 +215,16 @@ static struct iwl_lib_ops iwl1000_lib = {
.calib_version = iwlagn_eeprom_calib_version,
.query_addr = iwlagn_eeprom_query_addr,
},
- .post_associate = iwl_post_associate,
- .isr = iwl_isr_ict,
- .config_ap = iwl_config_ap,
+ .isr_ops = {
+ .isr = iwl_isr_ict,
+ .free = iwl_free_isr_ict,
+ .alloc = iwl_alloc_isr_ict,
+ .reset = iwl_reset_ict,
+ .disable = iwl_disable_ict,
+ },
.temp_ops = {
.temperature = iwlagn_temperature,
},
- .manage_ibss_station = iwlagn_manage_ibss_station,
- .update_bcast_stations = iwl_update_bcast_stations,
.debugfs_ops = {
.rx_stats_read = iwl_ucode_rx_stats_read,
.tx_stats_read = iwl_ucode_tx_stats_read,
@@ -226,9 +232,6 @@ static struct iwl_lib_ops iwl1000_lib = {
.bt_stats_read = iwl_ucode_bt_stats_read,
.reply_tx_error = iwl_reply_tx_error_read,
},
- .recover_from_tx_stall = iwl_bg_monitor_recover,
- .check_plcp_health = iwl_good_plcp_health,
- .check_ack_health = iwl_good_ack_health,
.txfifo_flush = iwlagn_txfifo_flush,
.dev_txfifo_flush = iwlagn_dev_txfifo_flush,
.tt_ops = {
@@ -243,6 +246,7 @@ static const struct iwl_ops iwl1000_ops = {
.hcmd = &iwlagn_hcmd,
.utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
+ .ieee80211_ops = &iwlagn_hw_ops,
};
static struct iwl_base_params iwl1000_base_params = {
@@ -259,7 +263,7 @@ static struct iwl_base_params iwl1000_base_params = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
+ .wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 128,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -270,66 +274,49 @@ static struct iwl_ht_params iwl1000_ht_params = {
.use_rts_for_aggregation = true, /* use rts/cts protection */
};
+#define IWL_DEVICE_1000 \
+ .fw_name_pre = IWL1000_FW_PRE, \
+ .ucode_api_max = IWL1000_UCODE_API_MAX, \
+ .ucode_api_min = IWL1000_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
+ .ops = &iwl1000_ops, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl1000_base_params, \
+ .led_mode = IWL_LED_BLINK
+
struct iwl_cfg iwl1000_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
- .fw_name_pre = IWL1000_FW_PRE,
- .ucode_api_max = IWL1000_UCODE_API_MAX,
- .ucode_api_min = IWL1000_UCODE_API_MIN,
- .sku = IWL_SKU_G|IWL_SKU_N,
- .valid_tx_ant = ANT_A,
- .valid_rx_ant = ANT_AB,
- .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
- .ops = &iwl1000_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl1000_base_params,
+ IWL_DEVICE_1000,
.ht_params = &iwl1000_ht_params,
};
struct iwl_cfg iwl1000_bg_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1000 BG",
- .fw_name_pre = IWL1000_FW_PRE,
- .ucode_api_max = IWL1000_UCODE_API_MAX,
- .ucode_api_min = IWL1000_UCODE_API_MIN,
- .sku = IWL_SKU_G,
- .valid_tx_ant = ANT_A,
- .valid_rx_ant = ANT_AB,
- .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
- .ops = &iwl1000_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl1000_base_params,
+ IWL_DEVICE_1000,
};
+#define IWL_DEVICE_100 \
+ .fw_name_pre = IWL100_FW_PRE, \
+ .ucode_api_max = IWL100_UCODE_API_MAX, \
+ .ucode_api_min = IWL100_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
+ .ops = &iwl1000_ops, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl1000_base_params, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .rx_with_siso_diversity = true
+
struct iwl_cfg iwl100_bgn_cfg = {
- .name = "Intel(R) 100 Series 1x1 BGN",
- .fw_name_pre = IWL100_FW_PRE,
- .ucode_api_max = IWL100_UCODE_API_MAX,
- .ucode_api_min = IWL100_UCODE_API_MIN,
- .sku = IWL_SKU_G|IWL_SKU_N,
- .valid_tx_ant = ANT_A,
- .valid_rx_ant = ANT_A,
- .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
- .ops = &iwl1000_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl1000_base_params,
+ .name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
+ IWL_DEVICE_100,
.ht_params = &iwl1000_ht_params,
};
struct iwl_cfg iwl100_bg_cfg = {
- .name = "Intel(R) 100 Series 1x1 BG",
- .fw_name_pre = IWL100_FW_PRE,
- .ucode_api_max = IWL100_UCODE_API_MAX,
- .ucode_api_min = IWL100_UCODE_API_MIN,
- .sku = IWL_SKU_G,
- .valid_tx_ant = ANT_A,
- .valid_rx_ant = ANT_A,
- .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
- .ops = &iwl1000_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl1000_base_params,
+ .name = "Intel(R) Centrino(R) Wireless-N 100 BG",
+ IWL_DEVICE_100,
};
MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
new file mode 100644
index 000000000000..d7b6126408c9
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -0,0 +1,560 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-sta.h"
+#include "iwl-agn.h"
+#include "iwl-helpers.h"
+#include "iwl-agn-hw.h"
+#include "iwl-6000-hw.h"
+#include "iwl-agn-led.h"
+#include "iwl-agn-debugfs.h"
+
+/* Highest firmware API version supported */
+#define IWL2030_UCODE_API_MAX 5
+#define IWL2000_UCODE_API_MAX 5
+#define IWL200_UCODE_API_MAX 5
+
+/* Lowest firmware API version supported */
+#define IWL2030_UCODE_API_MIN 5
+#define IWL2000_UCODE_API_MIN 5
+#define IWL200_UCODE_API_MIN 5
+
+#define IWL2030_FW_PRE "iwlwifi-2030-"
+#define _IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE #api ".ucode"
+#define IWL2030_MODULE_FIRMWARE(api) _IWL2030_MODULE_FIRMWARE(api)
+
+#define IWL2000_FW_PRE "iwlwifi-2000-"
+#define _IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE #api ".ucode"
+#define IWL2000_MODULE_FIRMWARE(api) _IWL2000_MODULE_FIRMWARE(api)
+
+#define IWL200_FW_PRE "iwlwifi-200-"
+#define _IWL200_MODULE_FIRMWARE(api) IWL200_FW_PRE #api ".ucode"
+#define IWL200_MODULE_FIRMWARE(api) _IWL200_MODULE_FIRMWARE(api)
+
+static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
+{
+ /* want Celsius */
+ priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
+ priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
+}
+
+/* NIC configuration for 2000 series */
+static void iwl2000_nic_config(struct iwl_priv *priv)
+{
+ u16 radio_cfg;
+
+ radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
+
+ /* write radio config values to register */
+ if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX)
+ iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
+ EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
+ EEPROM_RF_CFG_DASH_MSK(radio_cfg));
+
+ /* set CSR_HW_CONFIG_REG for uCode use */
+ iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+ CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+
+ if (priv->cfg->iq_invert)
+ iwl_set_bit(priv, CSR_GP_DRIVER_REG,
+ CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
+
+}
+
+static struct iwl_sensitivity_ranges iwl2000_sensitivity = {
+ .min_nrg_cck = 97,
+ .max_nrg_cck = 0, /* not used, set to 0 */
+ .auto_corr_min_ofdm = 80,
+ .auto_corr_min_ofdm_mrc = 128,
+ .auto_corr_min_ofdm_x1 = 105,
+ .auto_corr_min_ofdm_mrc_x1 = 192,
+
+ .auto_corr_max_ofdm = 145,
+ .auto_corr_max_ofdm_mrc = 232,
+ .auto_corr_max_ofdm_x1 = 110,
+ .auto_corr_max_ofdm_mrc_x1 = 232,
+
+ .auto_corr_min_cck = 125,
+ .auto_corr_max_cck = 175,
+ .auto_corr_min_cck_mrc = 160,
+ .auto_corr_max_cck_mrc = 310,
+ .nrg_th_cck = 97,
+ .nrg_th_ofdm = 100,
+
+ .barker_corr_th_min = 190,
+ .barker_corr_th_min_mrc = 390,
+ .nrg_th_cca = 62,
+};
+
+static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
+{
+ if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
+ priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
+ priv->cfg->base_params->num_of_queues =
+ priv->cfg->mod_params->num_of_queues;
+
+ priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
+ priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
+ priv->hw_params.scd_bc_tbls_size =
+ priv->cfg->base_params->num_of_queues *
+ sizeof(struct iwlagn_scd_bc_tbl);
+ priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
+ priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
+ priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
+
+ priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
+ priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
+
+ priv->hw_params.max_bsm_size = 0;
+ priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
+ BIT(IEEE80211_BAND_5GHZ);
+ priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
+
+ priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ if (priv->cfg->rx_with_siso_diversity)
+ priv->hw_params.rx_chains_num = 1;
+ else
+ priv->hw_params.rx_chains_num =
+ num_of_ant(priv->cfg->valid_rx_ant);
+ priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
+ priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+
+ iwl2000_set_ct_threshold(priv);
+
+ /* Set initial sensitivity parameters */
+ /* Set initial calibration set */
+ priv->hw_params.sens = &iwl2000_sensitivity;
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_XTAL) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_BASE_BAND);
+ if (priv->cfg->need_dc_calib)
+ priv->hw_params.calib_rt_cfg |= BIT(IWL_CALIB_CFG_DC_IDX);
+ if (priv->cfg->need_temp_offset_calib)
+ priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
+
+ priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+
+ return 0;
+}
+
+static int iwl2030_hw_channel_switch(struct iwl_priv *priv,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ /*
+ * MULTI-FIXME
+ * See iwl_mac_channel_switch.
+ */
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct iwl6000_channel_switch_cmd cmd;
+ const struct iwl_channel_info *ch_info;
+ u32 switch_time_in_usec, ucode_switch_time;
+ u16 ch;
+ u32 tsf_low;
+ u8 switch_count;
+ u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
+ struct ieee80211_vif *vif = ctx->vif;
+ struct iwl_host_cmd hcmd = {
+ .id = REPLY_CHANNEL_SWITCH,
+ .len = sizeof(cmd),
+ .flags = CMD_SYNC,
+ .data = &cmd,
+ };
+
+ cmd.band = priv->band == IEEE80211_BAND_2GHZ;
+ ch = ch_switch->channel->hw_value;
+ IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
+ ctx->active.channel, ch);
+ cmd.channel = cpu_to_le16(ch);
+ cmd.rxon_flags = ctx->staging.flags;
+ cmd.rxon_filter_flags = ctx->staging.filter_flags;
+ switch_count = ch_switch->count;
+ tsf_low = ch_switch->timestamp & 0x0ffffffff;
+ /*
+ * calculate the ucode channel switch time
+ * adding TSF as one of the factor for when to switch
+ */
+ if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
+ if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
+ beacon_interval)) {
+ switch_count -= (priv->ucode_beacon_time -
+ tsf_low) / beacon_interval;
+ } else
+ switch_count = 0;
+ }
+ if (switch_count <= 1)
+ cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
+ else {
+ switch_time_in_usec =
+ vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+ ucode_switch_time = iwl_usecs_to_beacons(priv,
+ switch_time_in_usec,
+ beacon_interval);
+ cmd.switch_time = iwl_add_beacon_time(priv,
+ priv->ucode_beacon_time,
+ ucode_switch_time,
+ beacon_interval);
+ }
+ IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
+ cmd.switch_time);
+ ch_info = iwl_get_channel_info(priv, priv->band, ch);
+ if (ch_info)
+ cmd.expect_beacon = is_channel_radar(ch_info);
+ else {
+ IWL_ERR(priv, "invalid channel switch from %u to %u\n",
+ ctx->active.channel, ch);
+ return -EFAULT;
+ }
+ priv->switch_rxon.channel = cmd.channel;
+ priv->switch_rxon.switch_in_progress = true;
+
+ return iwl_send_cmd_sync(priv, &hcmd);
+}
+
+static struct iwl_lib_ops iwl2000_lib = {
+ .set_hw_params = iwl2000_hw_set_hw_params,
+ .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
+ .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
+ .txq_set_sched = iwlagn_txq_set_sched,
+ .txq_agg_enable = iwlagn_txq_agg_enable,
+ .txq_agg_disable = iwlagn_txq_agg_disable,
+ .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
+ .txq_free_tfd = iwl_hw_txq_free_tfd,
+ .txq_init = iwl_hw_tx_queue_init,
+ .rx_handler_setup = iwlagn_rx_handler_setup,
+ .setup_deferred_work = iwlagn_bt_setup_deferred_work,
+ .cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
+ .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
+ .load_ucode = iwlagn_load_ucode,
+ .dump_nic_event_log = iwl_dump_nic_event_log,
+ .dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_csr = iwl_dump_csr,
+ .dump_fh = iwl_dump_fh,
+ .init_alive_start = iwlagn_init_alive_start,
+ .alive_notify = iwlagn_alive_notify,
+ .send_tx_power = iwlagn_send_tx_power,
+ .update_chain_flags = iwl_update_chain_flags,
+ .set_channel_switch = iwl2030_hw_channel_switch,
+ .apm_ops = {
+ .init = iwl_apm_init,
+ .config = iwl2000_nic_config,
+ },
+ .eeprom_ops = {
+ .regulatory_bands = {
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
+ EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_REG_BAND_52_HT40_CHANNELS
+ },
+ .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
+ .release_semaphore = iwlcore_eeprom_release_semaphore,
+ .calib_version = iwlagn_eeprom_calib_version,
+ .query_addr = iwlagn_eeprom_query_addr,
+ .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
+ },
+ .isr_ops = {
+ .isr = iwl_isr_ict,
+ .free = iwl_free_isr_ict,
+ .alloc = iwl_alloc_isr_ict,
+ .reset = iwl_reset_ict,
+ .disable = iwl_disable_ict,
+ },
+ .temp_ops = {
+ .temperature = iwlagn_temperature,
+ },
+ .debugfs_ops = {
+ .rx_stats_read = iwl_ucode_rx_stats_read,
+ .tx_stats_read = iwl_ucode_tx_stats_read,
+ .general_stats_read = iwl_ucode_general_stats_read,
+ .bt_stats_read = iwl_ucode_bt_stats_read,
+ .reply_tx_error = iwl_reply_tx_error_read,
+ },
+ .txfifo_flush = iwlagn_txfifo_flush,
+ .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
+ .tt_ops = {
+ .lower_power_detection = iwl_tt_is_low_power_state,
+ .tt_power_mode = iwl_tt_current_power_mode,
+ .ct_kill_check = iwl_check_for_ct_kill,
+ }
+};
+
+static const struct iwl_ops iwl2000_ops = {
+ .lib = &iwl2000_lib,
+ .hcmd = &iwlagn_hcmd,
+ .utils = &iwlagn_hcmd_utils,
+ .led = &iwlagn_led_ops,
+ .ieee80211_ops = &iwlagn_hw_ops,
+};
+
+static const struct iwl_ops iwl2030_ops = {
+ .lib = &iwl2000_lib,
+ .hcmd = &iwlagn_bt_hcmd,
+ .utils = &iwlagn_hcmd_utils,
+ .led = &iwlagn_led_ops,
+ .ieee80211_ops = &iwlagn_hw_ops,
+};
+
+static const struct iwl_ops iwl200_ops = {
+ .lib = &iwl2000_lib,
+ .hcmd = &iwlagn_hcmd,
+ .utils = &iwlagn_hcmd_utils,
+ .led = &iwlagn_led_ops,
+ .ieee80211_ops = &iwlagn_hw_ops,
+};
+
+static const struct iwl_ops iwl230_ops = {
+ .lib = &iwl2000_lib,
+ .hcmd = &iwlagn_bt_hcmd,
+ .utils = &iwlagn_hcmd_utils,
+ .led = &iwlagn_led_ops,
+ .ieee80211_ops = &iwlagn_hw_ops,
+};
+
+static struct iwl_base_params iwl2000_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .pll_cfg_val = 0,
+ .set_l0s = true,
+ .use_bsm = false,
+ .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
+ .shadow_ram_support = true,
+ .led_compensation = 51,
+ .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .wd_timeout = IWL_DEF_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+ .shadow_reg_enable = true,
+};
+
+
+static struct iwl_base_params iwl2030_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .pll_cfg_val = 0,
+ .set_l0s = true,
+ .use_bsm = false,
+ .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
+ .shadow_ram_support = true,
+ .led_compensation = 57,
+ .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+ .shadow_reg_enable = true,
+};
+
+static struct iwl_ht_params iwl2000_ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+};
+
+static struct iwl_bt_params iwl2030_bt_params = {
+ .bt_statistics = true,
+ /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
+ .advanced_bt_coexist = true,
+ .agg_time_limit = BT_AGG_THRESHOLD_DEF,
+ .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
+ .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
+ .bt_sco_disable = true,
+ .bt_session_2 = true,
+};
+
+#define IWL_DEVICE_2000 \
+ .fw_name_pre = IWL2000_FW_PRE, \
+ .ucode_api_max = IWL2000_UCODE_API_MAX, \
+ .ucode_api_min = IWL2000_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
+ .ops = &iwl2000_ops, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl2000_base_params, \
+ .need_dc_calib = true, \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .iq_invert = true \
+
+struct iwl_cfg iwl2000_2bgn_cfg = {
+ .name = "2000 Series 2x2 BGN",
+ IWL_DEVICE_2000,
+ .ht_params = &iwl2000_ht_params,
+};
+
+struct iwl_cfg iwl2000_2bg_cfg = {
+ .name = "2000 Series 2x2 BG",
+ IWL_DEVICE_2000,
+};
+
+#define IWL_DEVICE_2030 \
+ .fw_name_pre = IWL2030_FW_PRE, \
+ .ucode_api_max = IWL2030_UCODE_API_MAX, \
+ .ucode_api_min = IWL2030_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
+ .ops = &iwl2030_ops, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl2030_base_params, \
+ .bt_params = &iwl2030_bt_params, \
+ .need_dc_calib = true, \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .adv_pm = true, \
+ .iq_invert = true \
+
+struct iwl_cfg iwl2030_2bgn_cfg = {
+ .name = "2000 Series 2x2 BGN/BT",
+ IWL_DEVICE_2030,
+ .ht_params = &iwl2000_ht_params,
+};
+
+struct iwl_cfg iwl2030_2bg_cfg = {
+ .name = "2000 Series 2x2 BG/BT",
+ IWL_DEVICE_2030,
+};
+
+#define IWL_DEVICE_6035 \
+ .fw_name_pre = IWL2030_FW_PRE, \
+ .ucode_api_max = IWL2030_UCODE_API_MAX, \
+ .ucode_api_min = IWL2030_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_6035_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_6035_TX_POWER_VERSION, \
+ .ops = &iwl2030_ops, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl2030_base_params, \
+ .bt_params = &iwl2030_bt_params, \
+ .need_dc_calib = true, \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .adv_pm = true \
+
+struct iwl_cfg iwl6035_2agn_cfg = {
+ .name = "2000 Series 2x2 AGN/BT",
+ IWL_DEVICE_6035,
+ .ht_params = &iwl2000_ht_params,
+};
+
+struct iwl_cfg iwl6035_2abg_cfg = {
+ .name = "2000 Series 2x2 ABG/BT",
+ IWL_DEVICE_6035,
+};
+
+struct iwl_cfg iwl6035_2bg_cfg = {
+ .name = "2000 Series 2x2 BG/BT",
+ IWL_DEVICE_6035,
+};
+
+#define IWL_DEVICE_200 \
+ .fw_name_pre = IWL200_FW_PRE, \
+ .ucode_api_max = IWL200_UCODE_API_MAX, \
+ .ucode_api_min = IWL200_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
+ .ops = &iwl200_ops, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl2000_base_params, \
+ .need_dc_calib = true, \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .adv_pm = true, \
+ .rx_with_siso_diversity = true \
+
+struct iwl_cfg iwl200_bg_cfg = {
+ .name = "200 Series 1x1 BG",
+ IWL_DEVICE_200,
+};
+
+struct iwl_cfg iwl200_bgn_cfg = {
+ .name = "200 Series 1x1 BGN",
+ IWL_DEVICE_200,
+ .ht_params = &iwl2000_ht_params,
+};
+
+#define IWL_DEVICE_230 \
+ .fw_name_pre = IWL200_FW_PRE, \
+ .ucode_api_max = IWL200_UCODE_API_MAX, \
+ .ucode_api_min = IWL200_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
+ .ops = &iwl230_ops, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl2030_base_params, \
+ .bt_params = &iwl2030_bt_params, \
+ .need_dc_calib = true, \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .adv_pm = true, \
+ .rx_with_siso_diversity = true \
+
+struct iwl_cfg iwl230_bg_cfg = {
+ .name = "200 Series 1x1 BG/BT",
+ IWL_DEVICE_230,
+};
+
+struct iwl_cfg iwl230_bgn_cfg = {
+ .name = "200 Series 1x1 BGN/BT",
+ IWL_DEVICE_230,
+ .ht_params = &iwl2000_ht_params,
+};
+
+MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL200_MODULE_FIRMWARE(IWL200_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index fd9fbc93ea1b..3ea31b659d1a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -51,7 +51,7 @@
#include "iwl-agn-debugfs.h"
/* Highest firmware API version supported */
-#define IWL5000_UCODE_API_MAX 2
+#define IWL5000_UCODE_API_MAX 5
#define IWL5150_UCODE_API_MAX 2
/* Lowest firmware API version supported */
@@ -385,14 +385,16 @@ static struct iwl_lib_ops iwl5000_lib = {
.calib_version = iwlagn_eeprom_calib_version,
.query_addr = iwlagn_eeprom_query_addr,
},
- .post_associate = iwl_post_associate,
- .isr = iwl_isr_ict,
- .config_ap = iwl_config_ap,
+ .isr_ops = {
+ .isr = iwl_isr_ict,
+ .free = iwl_free_isr_ict,
+ .alloc = iwl_alloc_isr_ict,
+ .reset = iwl_reset_ict,
+ .disable = iwl_disable_ict,
+ },
.temp_ops = {
.temperature = iwlagn_temperature,
},
- .manage_ibss_station = iwlagn_manage_ibss_station,
- .update_bcast_stations = iwl_update_bcast_stations,
.debugfs_ops = {
.rx_stats_read = iwl_ucode_rx_stats_read,
.tx_stats_read = iwl_ucode_tx_stats_read,
@@ -400,9 +402,6 @@ static struct iwl_lib_ops iwl5000_lib = {
.bt_stats_read = iwl_ucode_bt_stats_read,
.reply_tx_error = iwl_reply_tx_error_read,
},
- .recover_from_tx_stall = iwl_bg_monitor_recover,
- .check_plcp_health = iwl_good_plcp_health,
- .check_ack_health = iwl_good_ack_health,
.txfifo_flush = iwlagn_txfifo_flush,
.dev_txfifo_flush = iwlagn_dev_txfifo_flush,
.tt_ops = {
@@ -453,14 +452,16 @@ static struct iwl_lib_ops iwl5150_lib = {
.calib_version = iwlagn_eeprom_calib_version,
.query_addr = iwlagn_eeprom_query_addr,
},
- .post_associate = iwl_post_associate,
- .isr = iwl_isr_ict,
- .config_ap = iwl_config_ap,
+ .isr_ops = {
+ .isr = iwl_isr_ict,
+ .free = iwl_free_isr_ict,
+ .alloc = iwl_alloc_isr_ict,
+ .reset = iwl_reset_ict,
+ .disable = iwl_disable_ict,
+ },
.temp_ops = {
.temperature = iwl5150_temperature,
},
- .manage_ibss_station = iwlagn_manage_ibss_station,
- .update_bcast_stations = iwl_update_bcast_stations,
.debugfs_ops = {
.rx_stats_read = iwl_ucode_rx_stats_read,
.tx_stats_read = iwl_ucode_tx_stats_read,
@@ -468,9 +469,6 @@ static struct iwl_lib_ops iwl5150_lib = {
.bt_stats_read = iwl_ucode_bt_stats_read,
.reply_tx_error = iwl_reply_tx_error_read,
},
- .recover_from_tx_stall = iwl_bg_monitor_recover,
- .check_plcp_health = iwl_good_plcp_health,
- .check_ack_health = iwl_good_ack_health,
.txfifo_flush = iwlagn_txfifo_flush,
.dev_txfifo_flush = iwlagn_dev_txfifo_flush,
.tt_ops = {
@@ -485,6 +483,7 @@ static const struct iwl_ops iwl5000_ops = {
.hcmd = &iwlagn_hcmd,
.utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
+ .ieee80211_ops = &iwlagn_hw_ops,
};
static const struct iwl_ops iwl5150_ops = {
@@ -492,6 +491,7 @@ static const struct iwl_ops iwl5150_ops = {
.hcmd = &iwlagn_hcmd,
.utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
+ .ieee80211_ops = &iwlagn_hw_ops,
};
static struct iwl_base_params iwl5000_base_params = {
@@ -505,7 +505,7 @@ static struct iwl_base_params iwl5000_base_params = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
@@ -516,66 +516,43 @@ static struct iwl_ht_params iwl5000_ht_params = {
.use_rts_for_aggregation = true, /* use rts/cts protection */
};
+#define IWL_DEVICE_5000 \
+ .fw_name_pre = IWL5000_FW_PRE, \
+ .ucode_api_max = IWL5000_UCODE_API_MAX, \
+ .ucode_api_min = IWL5000_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_5000_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
+ .ops = &iwl5000_ops, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl5000_base_params, \
+ .led_mode = IWL_LED_BLINK
+
struct iwl_cfg iwl5300_agn_cfg = {
.name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
- .fw_name_pre = IWL5000_FW_PRE,
- .ucode_api_max = IWL5000_UCODE_API_MAX,
- .ucode_api_min = IWL5000_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
- .valid_tx_ant = ANT_ABC,
- .valid_rx_ant = ANT_ABC,
- .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .ops = &iwl5000_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl5000_base_params,
+ IWL_DEVICE_5000,
.ht_params = &iwl5000_ht_params,
};
struct iwl_cfg iwl5100_bgn_cfg = {
.name = "Intel(R) WiFi Link 5100 BGN",
- .fw_name_pre = IWL5000_FW_PRE,
- .ucode_api_max = IWL5000_UCODE_API_MAX,
- .ucode_api_min = IWL5000_UCODE_API_MIN,
- .sku = IWL_SKU_G|IWL_SKU_N,
- .valid_tx_ant = ANT_B,
- .valid_rx_ant = ANT_AB,
- .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .ops = &iwl5000_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl5000_base_params,
+ IWL_DEVICE_5000,
+ .valid_tx_ant = ANT_B, /* .cfg overwrite */
+ .valid_rx_ant = ANT_AB, /* .cfg overwrite */
.ht_params = &iwl5000_ht_params,
};
struct iwl_cfg iwl5100_abg_cfg = {
.name = "Intel(R) WiFi Link 5100 ABG",
- .fw_name_pre = IWL5000_FW_PRE,
- .ucode_api_max = IWL5000_UCODE_API_MAX,
- .ucode_api_min = IWL5000_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G,
- .valid_tx_ant = ANT_B,
- .valid_rx_ant = ANT_AB,
- .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .ops = &iwl5000_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl5000_base_params,
+ IWL_DEVICE_5000,
+ .valid_tx_ant = ANT_B, /* .cfg overwrite */
+ .valid_rx_ant = ANT_AB, /* .cfg overwrite */
};
struct iwl_cfg iwl5100_agn_cfg = {
.name = "Intel(R) WiFi Link 5100 AGN",
- .fw_name_pre = IWL5000_FW_PRE,
- .ucode_api_max = IWL5000_UCODE_API_MAX,
- .ucode_api_min = IWL5000_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
- .valid_tx_ant = ANT_B,
- .valid_rx_ant = ANT_AB,
- .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
- .ops = &iwl5000_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl5000_base_params,
+ IWL_DEVICE_5000,
+ .valid_tx_ant = ANT_B, /* .cfg overwrite */
+ .valid_rx_ant = ANT_AB, /* .cfg overwrite */
.ht_params = &iwl5000_ht_params,
};
@@ -584,48 +561,39 @@ struct iwl_cfg iwl5350_agn_cfg = {
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
.ucode_api_min = IWL5000_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
- .valid_tx_ant = ANT_ABC,
- .valid_rx_ant = ANT_ABC,
.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
.ops = &iwl5000_ops,
.mod_params = &iwlagn_mod_params,
.base_params = &iwl5000_base_params,
.ht_params = &iwl5000_ht_params,
+ .led_mode = IWL_LED_BLINK,
+ .internal_wimax_coex = true,
};
+#define IWL_DEVICE_5150 \
+ .fw_name_pre = IWL5150_FW_PRE, \
+ .ucode_api_max = IWL5150_UCODE_API_MAX, \
+ .ucode_api_min = IWL5150_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_5050_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
+ .ops = &iwl5150_ops, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl5000_base_params, \
+ .need_dc_calib = true, \
+ .led_mode = IWL_LED_BLINK, \
+ .internal_wimax_coex = true
+
struct iwl_cfg iwl5150_agn_cfg = {
.name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
- .fw_name_pre = IWL5150_FW_PRE,
- .ucode_api_max = IWL5150_UCODE_API_MAX,
- .ucode_api_min = IWL5150_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
- .valid_tx_ant = ANT_A,
- .valid_rx_ant = ANT_AB,
- .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
- .ops = &iwl5150_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl5000_base_params,
+ IWL_DEVICE_5150,
.ht_params = &iwl5000_ht_params,
- .need_dc_calib = true,
+
};
struct iwl_cfg iwl5150_abg_cfg = {
.name = "Intel(R) WiMAX/WiFi Link 5150 ABG",
- .fw_name_pre = IWL5150_FW_PRE,
- .ucode_api_max = IWL5150_UCODE_API_MAX,
- .ucode_api_min = IWL5150_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G,
- .valid_tx_ant = ANT_A,
- .valid_rx_ant = ANT_AB,
- .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
- .ops = &iwl5150_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl5000_base_params,
- .need_dc_calib = true,
+ IWL_DEVICE_5150,
};
MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 11e6532fc573..a745b01c0ec1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -53,13 +53,11 @@
#define IWL6000_UCODE_API_MAX 4
#define IWL6050_UCODE_API_MAX 5
#define IWL6000G2_UCODE_API_MAX 5
-#define IWL130_UCODE_API_MAX 5
/* Lowest firmware API version supported */
#define IWL6000_UCODE_API_MIN 4
#define IWL6050_UCODE_API_MIN 4
#define IWL6000G2_UCODE_API_MIN 4
-#define IWL130_UCODE_API_MIN 5
#define IWL6000_FW_PRE "iwlwifi-6000-"
#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
@@ -69,17 +67,13 @@
#define _IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode"
#define IWL6050_MODULE_FIRMWARE(api) _IWL6050_MODULE_FIRMWARE(api)
-#define IWL6000G2A_FW_PRE "iwlwifi-6000g2a-"
-#define _IWL6000G2A_MODULE_FIRMWARE(api) IWL6000G2A_FW_PRE #api ".ucode"
-#define IWL6000G2A_MODULE_FIRMWARE(api) _IWL6000G2A_MODULE_FIRMWARE(api)
+#define IWL6005_FW_PRE "iwlwifi-6000g2a-"
+#define _IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE #api ".ucode"
+#define IWL6005_MODULE_FIRMWARE(api) _IWL6005_MODULE_FIRMWARE(api)
-#define IWL6000G2B_FW_PRE "iwlwifi-6000g2b-"
-#define _IWL6000G2B_MODULE_FIRMWARE(api) IWL6000G2B_FW_PRE #api ".ucode"
-#define IWL6000G2B_MODULE_FIRMWARE(api) _IWL6000G2B_MODULE_FIRMWARE(api)
-
-#define IWL130_FW_PRE "iwlwifi-130-"
-#define _IWL130_MODULE_FIRMWARE(api) IWL130_FW_PRE #api ".ucode"
-#define IWL130_MODULE_FIRMWARE(api) _IWL130_MODULE_FIRMWARE(api)
+#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
+#define _IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE #api ".ucode"
+#define IWL6030_MODULE_FIRMWARE(api) _IWL6030_MODULE_FIRMWARE(api)
static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
{
@@ -96,7 +90,7 @@ static void iwl6050_additional_nic_config(struct iwl_priv *priv)
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
}
-static void iwl6050g2_additional_nic_config(struct iwl_priv *priv)
+static void iwl6150_additional_nic_config(struct iwl_priv *priv)
{
/* Indicate calibration version to uCode. */
if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6)
@@ -188,7 +182,11 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+ if (priv->cfg->rx_with_siso_diversity)
+ priv->hw_params.rx_chains_num = 1;
+ else
+ priv->hw_params.rx_chains_num =
+ num_of_ant(priv->cfg->valid_rx_ant);
priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
@@ -328,14 +326,16 @@ static struct iwl_lib_ops iwl6000_lib = {
.query_addr = iwlagn_eeprom_query_addr,
.update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
},
- .post_associate = iwl_post_associate,
- .isr = iwl_isr_ict,
- .config_ap = iwl_config_ap,
+ .isr_ops = {
+ .isr = iwl_isr_ict,
+ .free = iwl_free_isr_ict,
+ .alloc = iwl_alloc_isr_ict,
+ .reset = iwl_reset_ict,
+ .disable = iwl_disable_ict,
+ },
.temp_ops = {
.temperature = iwlagn_temperature,
},
- .manage_ibss_station = iwlagn_manage_ibss_station,
- .update_bcast_stations = iwl_update_bcast_stations,
.debugfs_ops = {
.rx_stats_read = iwl_ucode_rx_stats_read,
.tx_stats_read = iwl_ucode_tx_stats_read,
@@ -343,9 +343,6 @@ static struct iwl_lib_ops iwl6000_lib = {
.bt_stats_read = iwl_ucode_bt_stats_read,
.reply_tx_error = iwl_reply_tx_error_read,
},
- .recover_from_tx_stall = iwl_bg_monitor_recover,
- .check_plcp_health = iwl_good_plcp_health,
- .check_ack_health = iwl_good_ack_health,
.txfifo_flush = iwlagn_txfifo_flush,
.dev_txfifo_flush = iwlagn_dev_txfifo_flush,
.tt_ops = {
@@ -355,7 +352,7 @@ static struct iwl_lib_ops iwl6000_lib = {
}
};
-static struct iwl_lib_ops iwl6000g2b_lib = {
+static struct iwl_lib_ops iwl6030_lib = {
.set_hw_params = iwl6000_hw_set_hw_params,
.txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
.txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
@@ -399,14 +396,16 @@ static struct iwl_lib_ops iwl6000g2b_lib = {
.query_addr = iwlagn_eeprom_query_addr,
.update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
},
- .post_associate = iwl_post_associate,
- .isr = iwl_isr_ict,
- .config_ap = iwl_config_ap,
+ .isr_ops = {
+ .isr = iwl_isr_ict,
+ .free = iwl_free_isr_ict,
+ .alloc = iwl_alloc_isr_ict,
+ .reset = iwl_reset_ict,
+ .disable = iwl_disable_ict,
+ },
.temp_ops = {
.temperature = iwlagn_temperature,
},
- .manage_ibss_station = iwlagn_manage_ibss_station,
- .update_bcast_stations = iwl_update_bcast_stations,
.debugfs_ops = {
.rx_stats_read = iwl_ucode_rx_stats_read,
.tx_stats_read = iwl_ucode_tx_stats_read,
@@ -414,9 +413,6 @@ static struct iwl_lib_ops iwl6000g2b_lib = {
.bt_stats_read = iwl_ucode_bt_stats_read,
.reply_tx_error = iwl_reply_tx_error_read,
},
- .recover_from_tx_stall = iwl_bg_monitor_recover,
- .check_plcp_health = iwl_good_plcp_health,
- .check_ack_health = iwl_good_ack_health,
.txfifo_flush = iwlagn_txfifo_flush,
.dev_txfifo_flush = iwlagn_dev_txfifo_flush,
.tt_ops = {
@@ -430,8 +426,8 @@ static struct iwl_nic_ops iwl6050_nic_ops = {
.additional_nic_config = &iwl6050_additional_nic_config,
};
-static struct iwl_nic_ops iwl6050g2_nic_ops = {
- .additional_nic_config = &iwl6050g2_additional_nic_config,
+static struct iwl_nic_ops iwl6150_nic_ops = {
+ .additional_nic_config = &iwl6150_additional_nic_config,
};
static const struct iwl_ops iwl6000_ops = {
@@ -439,6 +435,7 @@ static const struct iwl_ops iwl6000_ops = {
.hcmd = &iwlagn_hcmd,
.utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
+ .ieee80211_ops = &iwlagn_hw_ops,
};
static const struct iwl_ops iwl6050_ops = {
@@ -447,21 +444,24 @@ static const struct iwl_ops iwl6050_ops = {
.utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
.nic = &iwl6050_nic_ops,
+ .ieee80211_ops = &iwlagn_hw_ops,
};
-static const struct iwl_ops iwl6050g2_ops = {
+static const struct iwl_ops iwl6150_ops = {
.lib = &iwl6000_lib,
.hcmd = &iwlagn_hcmd,
.utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
- .nic = &iwl6050g2_nic_ops,
+ .nic = &iwl6150_nic_ops,
+ .ieee80211_ops = &iwlagn_hw_ops,
};
-static const struct iwl_ops iwl6000g2b_ops = {
- .lib = &iwl6000g2b_lib,
+static const struct iwl_ops iwl6030_ops = {
+ .lib = &iwl6030_lib,
.hcmd = &iwlagn_bt_hcmd,
.utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
+ .ieee80211_ops = &iwlagn_hw_ops,
};
static struct iwl_base_params iwl6000_base_params = {
@@ -475,16 +475,16 @@ static struct iwl_base_params iwl6000_base_params = {
.shadow_ram_support = true,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
+ .wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
+ .shadow_reg_enable = true,
};
static struct iwl_base_params iwl6050_base_params = {
@@ -498,18 +498,18 @@ static struct iwl_base_params iwl6050_base_params = {
.shadow_ram_support = true,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1500,
- .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
+ .wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 1024,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
+ .shadow_reg_enable = true,
};
-static struct iwl_base_params iwl6000_coex_base_params = {
+static struct iwl_base_params iwl6000_g2_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
@@ -518,18 +518,18 @@ static struct iwl_base_params iwl6000_coex_base_params = {
.use_bsm = false,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
- .led_compensation = 51,
+ .led_compensation = 57,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
+ .shadow_reg_enable = true,
};
static struct iwl_ht_params iwl6000_ht_params = {
@@ -541,285 +541,175 @@ static struct iwl_bt_params iwl6000_bt_params = {
.bt_statistics = true,
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
.advanced_bt_coexist = true,
+ .agg_time_limit = BT_AGG_THRESHOLD_DEF,
.bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
.bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
+ .bt_sco_disable = true,
+};
+
+#define IWL_DEVICE_6005 \
+ .fw_name_pre = IWL6005_FW_PRE, \
+ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
+ .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
+ .ops = &iwl6000_ops, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl6000_g2_base_params, \
+ .need_dc_calib = true, \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE
+
+struct iwl_cfg iwl6005_2agn_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
+ IWL_DEVICE_6005,
+ .ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl6000g2a_2agn_cfg = {
- .name = "6000 Series 2x2 AGN Gen2a",
- .fw_name_pre = IWL6000G2A_FW_PRE,
- .ucode_api_max = IWL6000G2_UCODE_API_MAX,
- .ucode_api_min = IWL6000G2_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
- .valid_tx_ant = ANT_AB,
- .valid_rx_ant = ANT_AB,
- .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
- .ops = &iwl6000_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl6000_base_params,
+struct iwl_cfg iwl6005_2abg_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6205 ABG",
+ IWL_DEVICE_6005,
+};
+
+struct iwl_cfg iwl6005_2bg_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6205 BG",
+ IWL_DEVICE_6005,
+};
+
+#define IWL_DEVICE_6030 \
+ .fw_name_pre = IWL6030_FW_PRE, \
+ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
+ .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
+ .ops = &iwl6030_ops, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl6000_g2_base_params, \
+ .bt_params = &iwl6000_bt_params, \
+ .need_dc_calib = true, \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .adv_pm = true \
+
+struct iwl_cfg iwl6030_2agn_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
+ IWL_DEVICE_6030,
.ht_params = &iwl6000_ht_params,
- .need_dc_calib = true,
- .need_temp_offset_calib = true,
-};
-
-struct iwl_cfg iwl6000g2a_2abg_cfg = {
- .name = "6000 Series 2x2 ABG Gen2a",
- .fw_name_pre = IWL6000G2A_FW_PRE,
- .ucode_api_max = IWL6000G2_UCODE_API_MAX,
- .ucode_api_min = IWL6000G2_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G,
- .valid_tx_ant = ANT_AB,
- .valid_rx_ant = ANT_AB,
- .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
- .ops = &iwl6000_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl6000_base_params,
- .need_dc_calib = true,
- .need_temp_offset_calib = true,
-};
-
-struct iwl_cfg iwl6000g2a_2bg_cfg = {
- .name = "6000 Series 2x2 BG Gen2a",
- .fw_name_pre = IWL6000G2A_FW_PRE,
- .ucode_api_max = IWL6000G2_UCODE_API_MAX,
- .ucode_api_min = IWL6000G2_UCODE_API_MIN,
- .sku = IWL_SKU_G,
- .valid_tx_ant = ANT_AB,
- .valid_rx_ant = ANT_AB,
- .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
- .ops = &iwl6000_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl6000_base_params,
- .need_dc_calib = true,
- .need_temp_offset_calib = true,
-};
-
-struct iwl_cfg iwl6000g2b_2agn_cfg = {
- .name = "6000 Series 2x2 AGN Gen2b",
- .fw_name_pre = IWL6000G2B_FW_PRE,
- .ucode_api_max = IWL6000G2_UCODE_API_MAX,
- .ucode_api_min = IWL6000G2_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
- .valid_tx_ant = ANT_AB,
- .valid_rx_ant = ANT_AB,
- .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
- .ops = &iwl6000g2b_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl6000_coex_base_params,
- .bt_params = &iwl6000_bt_params,
+};
+
+struct iwl_cfg iwl6030_2abg_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6230 ABG",
+ IWL_DEVICE_6030,
+};
+
+struct iwl_cfg iwl6030_2bgn_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6230 BGN",
+ IWL_DEVICE_6030,
.ht_params = &iwl6000_ht_params,
- .need_dc_calib = true,
- .need_temp_offset_calib = true,
- /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
- .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
-};
-
-struct iwl_cfg iwl6000g2b_2abg_cfg = {
- .name = "6000 Series 2x2 ABG Gen2b",
- .fw_name_pre = IWL6000G2B_FW_PRE,
- .ucode_api_max = IWL6000G2_UCODE_API_MAX,
- .ucode_api_min = IWL6000G2_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G,
- .valid_tx_ant = ANT_AB,
- .valid_rx_ant = ANT_AB,
- .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
- .ops = &iwl6000g2b_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl6000_coex_base_params,
- .bt_params = &iwl6000_bt_params,
- .need_dc_calib = true,
- .need_temp_offset_calib = true,
- /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
- .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
-};
-
-struct iwl_cfg iwl6000g2b_2bgn_cfg = {
- .name = "6000 Series 2x2 BGN Gen2b",
- .fw_name_pre = IWL6000G2B_FW_PRE,
- .ucode_api_max = IWL6000G2_UCODE_API_MAX,
- .ucode_api_min = IWL6000G2_UCODE_API_MIN,
- .sku = IWL_SKU_G|IWL_SKU_N,
- .valid_tx_ant = ANT_AB,
- .valid_rx_ant = ANT_AB,
- .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
- .ops = &iwl6000g2b_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl6000_coex_base_params,
- .bt_params = &iwl6000_bt_params,
+};
+
+struct iwl_cfg iwl6030_2bg_cfg = {
+ .name = "Intel(R) Centrino(R) Advanced-N 6230 BG",
+ IWL_DEVICE_6030,
+};
+
+struct iwl_cfg iwl1030_bgn_cfg = {
+ .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
+ IWL_DEVICE_6030,
.ht_params = &iwl6000_ht_params,
- .need_dc_calib = true,
- .need_temp_offset_calib = true,
- /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
- .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
-};
-
-struct iwl_cfg iwl6000g2b_2bg_cfg = {
- .name = "6000 Series 2x2 BG Gen2b",
- .fw_name_pre = IWL6000G2B_FW_PRE,
- .ucode_api_max = IWL6000G2_UCODE_API_MAX,
- .ucode_api_min = IWL6000G2_UCODE_API_MIN,
- .sku = IWL_SKU_G,
- .valid_tx_ant = ANT_AB,
- .valid_rx_ant = ANT_AB,
- .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
- .ops = &iwl6000g2b_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl6000_coex_base_params,
- .bt_params = &iwl6000_bt_params,
- .need_dc_calib = true,
- .need_temp_offset_calib = true,
- /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
- .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
-};
-
-struct iwl_cfg iwl6000g2b_bgn_cfg = {
- .name = "6000 Series 1x2 BGN Gen2b",
- .fw_name_pre = IWL6000G2B_FW_PRE,
- .ucode_api_max = IWL6000G2_UCODE_API_MAX,
- .ucode_api_min = IWL6000G2_UCODE_API_MIN,
- .sku = IWL_SKU_G|IWL_SKU_N,
- .valid_tx_ant = ANT_A,
- .valid_rx_ant = ANT_AB,
- .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
- .ops = &iwl6000g2b_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl6000_coex_base_params,
- .bt_params = &iwl6000_bt_params,
+};
+
+struct iwl_cfg iwl1030_bg_cfg = {
+ .name = "Intel(R) Centrino(R) Wireless-N 1030 BG",
+ IWL_DEVICE_6030,
+};
+
+struct iwl_cfg iwl130_bgn_cfg = {
+ .name = "Intel(R) Centrino(R) Wireless-N 130 BGN",
+ IWL_DEVICE_6030,
.ht_params = &iwl6000_ht_params,
- .need_dc_calib = true,
- .need_temp_offset_calib = true,
- /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
- .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
-};
-
-struct iwl_cfg iwl6000g2b_bg_cfg = {
- .name = "6000 Series 1x2 BG Gen2b",
- .fw_name_pre = IWL6000G2B_FW_PRE,
- .ucode_api_max = IWL6000G2_UCODE_API_MAX,
- .ucode_api_min = IWL6000G2_UCODE_API_MIN,
- .sku = IWL_SKU_G,
- .valid_tx_ant = ANT_A,
- .valid_rx_ant = ANT_AB,
- .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
- .ops = &iwl6000g2b_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl6000_coex_base_params,
- .bt_params = &iwl6000_bt_params,
- .need_dc_calib = true,
- .need_temp_offset_calib = true,
- /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
- .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
+ .rx_with_siso_diversity = true,
+};
+
+struct iwl_cfg iwl130_bg_cfg = {
+ .name = "Intel(R) Centrino(R) Wireless-N 130 BG",
+ IWL_DEVICE_6030,
+ .rx_with_siso_diversity = true,
};
/*
* "i": Internal configuration, use internal Power Amplifier
*/
+#define IWL_DEVICE_6000i \
+ .fw_name_pre = IWL6000_FW_PRE, \
+ .ucode_api_max = IWL6000_UCODE_API_MAX, \
+ .ucode_api_min = IWL6000_UCODE_API_MIN, \
+ .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
+ .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
+ .eeprom_ver = EEPROM_6000_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
+ .ops = &iwl6000_ops, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl6000_base_params, \
+ .pa_type = IWL_PA_INTERNAL, \
+ .led_mode = IWL_LED_BLINK
+
struct iwl_cfg iwl6000i_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
- .fw_name_pre = IWL6000_FW_PRE,
- .ucode_api_max = IWL6000_UCODE_API_MAX,
- .ucode_api_min = IWL6000_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
- .valid_tx_ant = ANT_BC,
- .valid_rx_ant = ANT_BC,
- .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
- .ops = &iwl6000_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl6000_base_params,
+ IWL_DEVICE_6000i,
.ht_params = &iwl6000_ht_params,
- .pa_type = IWL_PA_INTERNAL,
};
struct iwl_cfg iwl6000i_2abg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6200 ABG",
- .fw_name_pre = IWL6000_FW_PRE,
- .ucode_api_max = IWL6000_UCODE_API_MAX,
- .ucode_api_min = IWL6000_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G,
- .valid_tx_ant = ANT_BC,
- .valid_rx_ant = ANT_BC,
- .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
- .ops = &iwl6000_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl6000_base_params,
- .pa_type = IWL_PA_INTERNAL,
+ IWL_DEVICE_6000i,
};
struct iwl_cfg iwl6000i_2bg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6200 BG",
- .fw_name_pre = IWL6000_FW_PRE,
- .ucode_api_max = IWL6000_UCODE_API_MAX,
- .ucode_api_min = IWL6000_UCODE_API_MIN,
- .sku = IWL_SKU_G,
- .valid_tx_ant = ANT_BC,
- .valid_rx_ant = ANT_BC,
- .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
- .ops = &iwl6000_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl6000_base_params,
- .pa_type = IWL_PA_INTERNAL,
-};
+ IWL_DEVICE_6000i,
+};
+
+#define IWL_DEVICE_6050 \
+ .fw_name_pre = IWL6050_FW_PRE, \
+ .ucode_api_max = IWL6050_UCODE_API_MAX, \
+ .ucode_api_min = IWL6050_UCODE_API_MIN, \
+ .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \
+ .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \
+ .ops = &iwl6050_ops, \
+ .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl6050_base_params, \
+ .need_dc_calib = true, \
+ .led_mode = IWL_LED_BLINK, \
+ .internal_wimax_coex = true
struct iwl_cfg iwl6050_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
- .fw_name_pre = IWL6050_FW_PRE,
- .ucode_api_max = IWL6050_UCODE_API_MAX,
- .ucode_api_min = IWL6050_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
- .valid_tx_ant = ANT_AB,
- .valid_rx_ant = ANT_AB,
- .ops = &iwl6050_ops,
- .eeprom_ver = EEPROM_6050_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl6050_base_params,
- .ht_params = &iwl6000_ht_params,
- .need_dc_calib = true,
-};
-
-struct iwl_cfg iwl6050g2_bgn_cfg = {
- .name = "6050 Series 1x2 BGN Gen2",
- .fw_name_pre = IWL6050_FW_PRE,
- .ucode_api_max = IWL6050_UCODE_API_MAX,
- .ucode_api_min = IWL6050_UCODE_API_MIN,
- .sku = IWL_SKU_G|IWL_SKU_N,
- .valid_tx_ant = ANT_A,
- .valid_rx_ant = ANT_AB,
- .eeprom_ver = EEPROM_6050G2_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_6050G2_TX_POWER_VERSION,
- .ops = &iwl6050g2_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl6050_base_params,
+ IWL_DEVICE_6050,
.ht_params = &iwl6000_ht_params,
- .need_dc_calib = true,
};
struct iwl_cfg iwl6050_2abg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG",
+ IWL_DEVICE_6050,
+};
+
+struct iwl_cfg iwl6150_bgn_cfg = {
+ .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
.fw_name_pre = IWL6050_FW_PRE,
.ucode_api_max = IWL6050_UCODE_API_MAX,
.ucode_api_min = IWL6050_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G,
- .valid_tx_ant = ANT_AB,
- .valid_rx_ant = ANT_AB,
- .eeprom_ver = EEPROM_6050_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
- .ops = &iwl6050_ops,
+ .eeprom_ver = EEPROM_6150_EEPROM_VERSION,
+ .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,
+ .ops = &iwl6150_ops,
.mod_params = &iwlagn_mod_params,
.base_params = &iwl6050_base_params,
+ .ht_params = &iwl6000_ht_params,
.need_dc_calib = true,
+ .led_mode = IWL_LED_RF_STATE,
+ .internal_wimax_coex = true,
};
struct iwl_cfg iwl6000_3agn_cfg = {
@@ -827,9 +717,6 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
.ucode_api_min = IWL6000_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
- .valid_tx_ant = ANT_ABC,
- .valid_rx_ant = ANT_ABC,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
.ops = &iwl6000_ops,
@@ -837,49 +724,10 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.base_params = &iwl6000_base_params,
.ht_params = &iwl6000_ht_params,
.need_dc_calib = true,
-};
-
-struct iwl_cfg iwl130_bgn_cfg = {
- .name = "Intel(R) 130 Series 1x1 BGN",
- .fw_name_pre = IWL6000G2B_FW_PRE,
- .ucode_api_max = IWL130_UCODE_API_MAX,
- .ucode_api_min = IWL130_UCODE_API_MIN,
- .sku = IWL_SKU_G|IWL_SKU_N,
- .valid_tx_ant = ANT_A,
- .valid_rx_ant = ANT_A,
- .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
- .ops = &iwl6000g2b_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl6000_coex_base_params,
- .bt_params = &iwl6000_bt_params,
- .ht_params = &iwl6000_ht_params,
- .need_dc_calib = true,
- /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
- .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
-};
-
-struct iwl_cfg iwl130_bg_cfg = {
- .name = "Intel(R) 130 Series 1x2 BG",
- .fw_name_pre = IWL6000G2B_FW_PRE,
- .ucode_api_max = IWL130_UCODE_API_MAX,
- .ucode_api_min = IWL130_UCODE_API_MIN,
- .sku = IWL_SKU_G,
- .valid_tx_ant = ANT_A,
- .valid_rx_ant = ANT_A,
- .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
- .ops = &iwl6000g2b_ops,
- .mod_params = &iwlagn_mod_params,
- .base_params = &iwl6000_coex_base_params,
- .bt_params = &iwl6000_bt_params,
- .need_dc_calib = true,
- /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
- .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
+ .led_mode = IWL_LED_BLINK,
};
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL6000G2B_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL130_MODULE_FIRMWARE(IWL130_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index e2019e756936..9006293e740c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -631,8 +631,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv, void *resp)
}
spin_lock_irqsave(&priv->lock, flags);
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
rx_info = &(((struct iwl_bt_notif_statistics *)resp)->
rx.general.common);
ofdm = &(((struct iwl_bt_notif_statistics *)resp)->rx.ofdm);
@@ -732,8 +731,122 @@ static inline u8 find_first_chain(u8 mask)
return CHAIN_C;
}
+/**
+ * Run disconnected antenna algorithm to find out which antennas are
+ * disconnected.
+ */
+static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
+ struct iwl_chain_noise_data *data)
+{
+ u32 active_chains = 0;
+ u32 max_average_sig;
+ u16 max_average_sig_antenna_i;
+ u8 num_tx_chains;
+ u8 first_chain;
+ u16 i = 0;
+
+ average_sig[0] = data->chain_signal_a /
+ priv->cfg->base_params->chain_noise_num_beacons;
+ average_sig[1] = data->chain_signal_b /
+ priv->cfg->base_params->chain_noise_num_beacons;
+ average_sig[2] = data->chain_signal_c /
+ priv->cfg->base_params->chain_noise_num_beacons;
+
+ if (average_sig[0] >= average_sig[1]) {
+ max_average_sig = average_sig[0];
+ max_average_sig_antenna_i = 0;
+ active_chains = (1 << max_average_sig_antenna_i);
+ } else {
+ max_average_sig = average_sig[1];
+ max_average_sig_antenna_i = 1;
+ active_chains = (1 << max_average_sig_antenna_i);
+ }
+
+ if (average_sig[2] >= max_average_sig) {
+ max_average_sig = average_sig[2];
+ max_average_sig_antenna_i = 2;
+ active_chains = (1 << max_average_sig_antenna_i);
+ }
+
+ IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
+ average_sig[0], average_sig[1], average_sig[2]);
+ IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
+ max_average_sig, max_average_sig_antenna_i);
+
+ /* Compare signal strengths for all 3 receivers. */
+ for (i = 0; i < NUM_RX_CHAINS; i++) {
+ if (i != max_average_sig_antenna_i) {
+ s32 rssi_delta = (max_average_sig - average_sig[i]);
+
+ /* If signal is very weak, compared with
+ * strongest, mark it as disconnected. */
+ if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
+ data->disconn_array[i] = 1;
+ else
+ active_chains |= (1 << i);
+ IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d "
+ "disconn_array[i] = %d\n",
+ i, rssi_delta, data->disconn_array[i]);
+ }
+ }
+
+ /*
+ * The above algorithm sometimes fails when the ucode
+ * reports 0 for all chains. It's not clear why that
+ * happens to start with, but it is then causing trouble
+ * because this can make us enable more chains than the
+ * hardware really has.
+ *
+ * To be safe, simply mask out any chains that we know
+ * are not on the device.
+ */
+ active_chains &= priv->hw_params.valid_rx_ant;
+
+ num_tx_chains = 0;
+ for (i = 0; i < NUM_RX_CHAINS; i++) {
+ /* loops on all the bits of
+ * priv->hw_setting.valid_tx_ant */
+ u8 ant_msk = (1 << i);
+ if (!(priv->hw_params.valid_tx_ant & ant_msk))
+ continue;
+
+ num_tx_chains++;
+ if (data->disconn_array[i] == 0)
+ /* there is a Tx antenna connected */
+ break;
+ if (num_tx_chains == priv->hw_params.tx_chains_num &&
+ data->disconn_array[i]) {
+ /*
+ * If all chains are disconnected
+ * connect the first valid tx chain
+ */
+ first_chain =
+ find_first_chain(priv->cfg->valid_tx_ant);
+ data->disconn_array[first_chain] = 0;
+ active_chains |= BIT(first_chain);
+ IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected \
+ W/A - declare %d as connected\n",
+ first_chain);
+ break;
+ }
+ }
+
+ if (active_chains != priv->hw_params.valid_rx_ant &&
+ active_chains != priv->chain_noise_data.active_chains)
+ IWL_DEBUG_CALIB(priv,
+ "Detected that not all antennas are connected! "
+ "Connected: %#x, valid: %#x.\n",
+ active_chains, priv->hw_params.valid_rx_ant);
+
+ /* Save for use within RXON, TX, SCAN commands, etc. */
+ data->active_chains = active_chains;
+ IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
+ active_chains);
+}
+
+
/*
- * Accumulate 20 beacons of signal and noise statistics for each of
+ * Accumulate 16 beacons of signal and noise statistics for each of
* 3 receivers/antennas/rx-chains, then figure out:
* 1) Which antennas are connected.
* 2) Differential rx gain settings to balance the 3 receivers.
@@ -750,8 +863,6 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
u32 chain_sig_c;
u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
- u32 max_average_sig;
- u16 max_average_sig_antenna_i;
u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
u16 i = 0;
@@ -759,11 +870,9 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
u16 stat_chnum = INITIALIZATION_VALUE;
u8 rxon_band24;
u8 stat_band24;
- u32 active_chains = 0;
- u8 num_tx_chains;
unsigned long flags;
struct statistics_rx_non_phy *rx_info;
- u8 first_chain;
+
/*
* MULTI-FIXME:
* When we support multiple interfaces on different channels,
@@ -787,8 +896,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
}
spin_lock_irqsave(&priv->lock, flags);
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
rx_info = &(((struct iwl_bt_notif_statistics *)stat_resp)->
rx.general.common);
} else {
@@ -803,8 +911,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
rxon_chnum = le16_to_cpu(ctx->staging.channel);
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
stat_band24 = !!(((struct iwl_bt_notif_statistics *)
stat_resp)->flag &
STATISTICS_REPLY_FLG_BAND_24G_MSK);
@@ -869,108 +976,16 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
return;
/* Analyze signal for disconnected antenna */
- average_sig[0] = data->chain_signal_a /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_sig[1] = data->chain_signal_b /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_sig[2] = data->chain_signal_c /
- priv->cfg->base_params->chain_noise_num_beacons;
-
- if (average_sig[0] >= average_sig[1]) {
- max_average_sig = average_sig[0];
- max_average_sig_antenna_i = 0;
- active_chains = (1 << max_average_sig_antenna_i);
- } else {
- max_average_sig = average_sig[1];
- max_average_sig_antenna_i = 1;
- active_chains = (1 << max_average_sig_antenna_i);
- }
-
- if (average_sig[2] >= max_average_sig) {
- max_average_sig = average_sig[2];
- max_average_sig_antenna_i = 2;
- active_chains = (1 << max_average_sig_antenna_i);
- }
-
- IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
- average_sig[0], average_sig[1], average_sig[2]);
- IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
- max_average_sig, max_average_sig_antenna_i);
-
- /* Compare signal strengths for all 3 receivers. */
- for (i = 0; i < NUM_RX_CHAINS; i++) {
- if (i != max_average_sig_antenna_i) {
- s32 rssi_delta = (max_average_sig - average_sig[i]);
-
- /* If signal is very weak, compared with
- * strongest, mark it as disconnected. */
- if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
- data->disconn_array[i] = 1;
- else
- active_chains |= (1 << i);
- IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d "
- "disconn_array[i] = %d\n",
- i, rssi_delta, data->disconn_array[i]);
- }
- }
-
- /*
- * The above algorithm sometimes fails when the ucode
- * reports 0 for all chains. It's not clear why that
- * happens to start with, but it is then causing trouble
- * because this can make us enable more chains than the
- * hardware really has.
- *
- * To be safe, simply mask out any chains that we know
- * are not on the device.
- */
if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
- priv->bt_full_concurrent) {
- /* operated as 1x1 in full concurrency mode */
- active_chains &= first_antenna(priv->hw_params.valid_rx_ant);
+ priv->cfg->bt_params->advanced_bt_coexist) {
+ /* Disable disconnected antenna algorithm for advanced
+ bt coex, assuming valid antennas are connected */
+ data->active_chains = priv->hw_params.valid_rx_ant;
+ for (i = 0; i < NUM_RX_CHAINS; i++)
+ if (!(data->active_chains & (1<<i)))
+ data->disconn_array[i] = 1;
} else
- active_chains &= priv->hw_params.valid_rx_ant;
-
- num_tx_chains = 0;
- for (i = 0; i < NUM_RX_CHAINS; i++) {
- /* loops on all the bits of
- * priv->hw_setting.valid_tx_ant */
- u8 ant_msk = (1 << i);
- if (!(priv->hw_params.valid_tx_ant & ant_msk))
- continue;
-
- num_tx_chains++;
- if (data->disconn_array[i] == 0)
- /* there is a Tx antenna connected */
- break;
- if (num_tx_chains == priv->hw_params.tx_chains_num &&
- data->disconn_array[i]) {
- /*
- * If all chains are disconnected
- * connect the first valid tx chain
- */
- first_chain =
- find_first_chain(priv->cfg->valid_tx_ant);
- data->disconn_array[first_chain] = 0;
- active_chains |= BIT(first_chain);
- IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected W/A - declare %d as connected\n",
- first_chain);
- break;
- }
- }
-
- if (active_chains != priv->hw_params.valid_rx_ant &&
- active_chains != priv->chain_noise_data.active_chains)
- IWL_DEBUG_CALIB(priv,
- "Detected that not all antennas are connected! "
- "Connected: %#x, valid: %#x.\n",
- active_chains, priv->hw_params.valid_rx_ant);
-
- /* Save for use within RXON, TX, SCAN commands, etc. */
- priv->chain_noise_data.active_chains = active_chains;
- IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
- active_chains);
+ iwl_find_disconn_antenna(priv, average_sig, data);
/* Analyze noise for rx balance */
average_noise[0] = data->chain_noise_a /
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
index a358d4334a1a..b500aaae53ec 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
@@ -39,8 +39,7 @@ static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
int p = 0;
u32 flag;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics)
+ if (iwl_bt_statistics(priv))
flag = le32_to_cpu(priv->_agn.statistics_bt.flag);
else
flag = le32_to_cpu(priv->_agn.statistics.flag);
@@ -89,8 +88,7 @@ ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
ofdm = &priv->_agn.statistics_bt.rx.ofdm;
cck = &priv->_agn.statistics_bt.rx.cck;
general = &priv->_agn.statistics_bt.rx.general.common;
@@ -536,8 +534,7 @@ ssize_t iwl_ucode_tx_stats_read(struct file *file,
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
tx = &priv->_agn.statistics_bt.tx;
accum_tx = &priv->_agn.accum_statistics_bt.tx;
delta_tx = &priv->_agn.delta_statistics_bt.tx;
@@ -737,8 +734,7 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
general = &priv->_agn.statistics_bt.general.common;
dbg = &priv->_agn.statistics_bt.general.common.dbg;
div = &priv->_agn.statistics_bt.general.common.div;
@@ -856,6 +852,9 @@ ssize_t iwl_ucode_bt_stats_read(struct file *file,
if (!iwl_is_alive(priv))
return -EAGAIN;
+ if (!priv->bt_enable_flag)
+ return -EINVAL;
+
/* make request to uCode to retrieve statistics information */
mutex_lock(&priv->mutex);
ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
index a650baba0809..27b5a3eec9dc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
@@ -75,109 +75,6 @@
#include "iwl-agn.h"
#include "iwl-io.h"
-/************************** EEPROM BANDS ****************************
- *
- * The iwl_eeprom_band definitions below provide the mapping from the
- * EEPROM contents to the specific channel number supported for each
- * band.
- *
- * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
- * definition below maps to physical channel 42 in the 5.2GHz spectrum.
- * The specific geography and calibration information for that channel
- * is contained in the eeprom map itself.
- *
- * During init, we copy the eeprom information and channel map
- * information into priv->channel_info_24/52 and priv->channel_map_24/52
- *
- * channel_map_24/52 provides the index in the channel_info array for a
- * given channel. We have to have two separate maps as there is channel
- * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
- * band_2
- *
- * A value of 0xff stored in the channel_map indicates that the channel
- * is not supported by the hardware at all.
- *
- * A value of 0xfe in the channel_map indicates that the channel is not
- * valid for Tx with the current hardware. This means that
- * while the system can tune and receive on a given channel, it may not
- * be able to associate or transmit any frames on that
- * channel. There is no corresponding channel information for that
- * entry.
- *
- *********************************************************************/
-
-/**
- * struct iwl_txpwr_section: eeprom section information
- * @offset: indirect address into eeprom image
- * @count: number of "struct iwl_eeprom_enhanced_txpwr" in this section
- * @band: band type for the section
- * @is_common - true: common section, false: channel section
- * @is_cck - true: cck section, false: not cck section
- * @is_ht_40 - true: all channel in the section are HT40 channel,
- * false: legacy or HT 20 MHz
- * ignore if it is common section
- * @iwl_eeprom_section_channel: channel array in the section,
- * ignore if common section
- */
-struct iwl_txpwr_section {
- u32 offset;
- u8 count;
- enum ieee80211_band band;
- bool is_common;
- bool is_cck;
- bool is_ht40;
- u8 iwl_eeprom_section_channel[EEPROM_MAX_TXPOWER_SECTION_ELEMENTS];
-};
-
-/**
- * section 1 - 3 are regulatory tx power apply to all channels based on
- * modulation: CCK, OFDM
- * Band: 2.4GHz, 5.2GHz
- * section 4 - 10 are regulatory tx power apply to specified channels
- * For example:
- * 1L - Channel 1 Legacy
- * 1HT - Channel 1 HT
- * (1,+1) - Channel 1 HT40 "_above_"
- *
- * Section 1: all CCK channels
- * Section 2: all 2.4 GHz OFDM (Legacy, HT and HT40) channels
- * Section 3: all 5.2 GHz OFDM (Legacy, HT and HT40) channels
- * Section 4: 2.4 GHz 20MHz channels: 1L, 1HT, 2L, 2HT, 10L, 10HT, 11L, 11HT
- * Section 5: 2.4 GHz 40MHz channels: (1,+1) (2,+1) (6,+1) (7,+1) (9,+1)
- * Section 6: 5.2 GHz 20MHz channels: 36L, 64L, 100L, 36HT, 64HT, 100HT
- * Section 7: 5.2 GHz 40MHz channels: (36,+1) (60,+1) (100,+1)
- * Section 8: 2.4 GHz channel: 13L, 13HT
- * Section 9: 2.4 GHz channel: 140L, 140HT
- * Section 10: 2.4 GHz 40MHz channels: (132,+1) (44,+1)
- *
- */
-static const struct iwl_txpwr_section enhinfo[] = {
- { EEPROM_LB_CCK_20_COMMON, 1, IEEE80211_BAND_2GHZ, true, true, false },
- { EEPROM_LB_OFDM_COMMON, 3, IEEE80211_BAND_2GHZ, true, false, false },
- { EEPROM_HB_OFDM_COMMON, 3, IEEE80211_BAND_5GHZ, true, false, false },
- { EEPROM_LB_OFDM_20_BAND, 8, IEEE80211_BAND_2GHZ,
- false, false, false,
- {1, 1, 2, 2, 10, 10, 11, 11 } },
- { EEPROM_LB_OFDM_HT40_BAND, 5, IEEE80211_BAND_2GHZ,
- false, false, true,
- { 1, 2, 6, 7, 9 } },
- { EEPROM_HB_OFDM_20_BAND, 6, IEEE80211_BAND_5GHZ,
- false, false, false,
- { 36, 64, 100, 36, 64, 100 } },
- { EEPROM_HB_OFDM_HT40_BAND, 3, IEEE80211_BAND_5GHZ,
- false, false, true,
- { 36, 60, 100 } },
- { EEPROM_LB_OFDM_20_CHANNEL_13, 2, IEEE80211_BAND_2GHZ,
- false, false, false,
- { 13, 13 } },
- { EEPROM_HB_OFDM_20_CHANNEL_140, 2, IEEE80211_BAND_5GHZ,
- false, false, false,
- { 140, 140 } },
- { EEPROM_HB_OFDM_HT40_BAND_1, 2, IEEE80211_BAND_5GHZ,
- false, false, true,
- { 132, 44 } },
-};
-
/******************************************************************************
*
* EEPROM related functions
@@ -248,6 +145,50 @@ err:
}
+int iwl_eeprom_check_sku(struct iwl_priv *priv)
+{
+ u16 eeprom_sku;
+ u16 radio_cfg;
+
+ eeprom_sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
+
+ if (!priv->cfg->sku) {
+ /* not using sku overwrite */
+ priv->cfg->sku =
+ ((eeprom_sku & EEPROM_SKU_CAP_BAND_SELECTION) >>
+ EEPROM_SKU_CAP_BAND_POS);
+ if (eeprom_sku & EEPROM_SKU_CAP_11N_ENABLE)
+ priv->cfg->sku |= IWL_SKU_N;
+ }
+ if (!priv->cfg->sku) {
+ IWL_ERR(priv, "Invalid device sku\n");
+ return -EINVAL;
+ }
+
+ IWL_INFO(priv, "Device SKU: 0X%x\n", priv->cfg->sku);
+
+ if (!priv->cfg->valid_tx_ant && !priv->cfg->valid_rx_ant) {
+ /* not using .cfg overwrite */
+ radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
+ priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
+ priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
+ if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) {
+ IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n",
+ priv->cfg->valid_tx_ant,
+ priv->cfg->valid_rx_ant);
+ return -EINVAL;
+ }
+ IWL_INFO(priv, "Valid Tx ant: 0X%x, Valid Rx ant: 0X%x\n",
+ priv->cfg->valid_tx_ant, priv->cfg->valid_rx_ant);
+ }
+ /*
+ * for some special cases,
+ * EEPROM did not reflect the correct antenna setting
+ * so overwrite the valid tx/rx antenna from .cfg
+ */
+ return 0;
+}
+
void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
{
const u8 *addr = priv->cfg->ops->lib->eeprom_ops.query_addr(priv,
@@ -265,15 +206,6 @@ static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
{
s8 max_txpower_avg = 0; /* (dBm) */
- IWL_DEBUG_INFO(priv, "%d - "
- "chain_a: %d dB chain_b: %d dB "
- "chain_c: %d dB mimo2: %d dB mimo3: %d dB\n",
- element,
- enhanced_txpower[element].chain_a_max >> 1,
- enhanced_txpower[element].chain_b_max >> 1,
- enhanced_txpower[element].chain_c_max >> 1,
- enhanced_txpower[element].mimo2_max >> 1,
- enhanced_txpower[element].mimo3_max >> 1);
/* Take the highest tx power from any valid chains */
if ((priv->cfg->valid_tx_ant & ANT_A) &&
(enhanced_txpower[element].chain_a_max > max_txpower_avg))
@@ -303,152 +235,106 @@ static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
return (max_txpower_avg & 0x01) + (max_txpower_avg >> 1);
}
-/**
- * iwl_update_common_txpower: update channel tx power
- * update tx power per band based on EEPROM enhanced tx power info.
- */
-static s8 iwl_update_common_txpower(struct iwl_priv *priv,
- struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
- int section, int element, s8 *max_txpower_in_half_dbm)
+static void
+iwlcore_eeprom_enh_txp_read_element(struct iwl_priv *priv,
+ struct iwl_eeprom_enhanced_txpwr *txp,
+ s8 max_txpower_avg)
{
- struct iwl_channel_info *ch_info;
- int ch;
- bool is_ht40 = false;
- s8 max_txpower_avg; /* (dBm) */
-
- /* it is common section, contain all type (Legacy, HT and HT40)
- * based on the element in the section to determine
- * is it HT 40 or not
- */
- if (element == EEPROM_TXPOWER_COMMON_HT40_INDEX)
- is_ht40 = true;
- max_txpower_avg =
- iwl_get_max_txpower_avg(priv, enhanced_txpower,
- element, max_txpower_in_half_dbm);
-
- ch_info = priv->channel_info;
-
- for (ch = 0; ch < priv->channel_count; ch++) {
- /* find matching band and update tx power if needed */
- if ((ch_info->band == enhinfo[section].band) &&
- (ch_info->max_power_avg < max_txpower_avg) &&
- (!is_ht40)) {
- /* Update regulatory-based run-time data */
- ch_info->max_power_avg = ch_info->curr_txpow =
- max_txpower_avg;
+ int ch_idx;
+ bool is_ht40 = txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ;
+ enum ieee80211_band band;
+
+ band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
+ IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+
+ for (ch_idx = 0; ch_idx < priv->channel_count; ch_idx++) {
+ struct iwl_channel_info *ch_info = &priv->channel_info[ch_idx];
+
+ /* update matching channel or from common data only */
+ if (txp->channel != 0 && ch_info->channel != txp->channel)
+ continue;
+
+ /* update matching band only */
+ if (band != ch_info->band)
+ continue;
+
+ if (ch_info->max_power_avg < max_txpower_avg && !is_ht40) {
+ ch_info->max_power_avg = max_txpower_avg;
+ ch_info->curr_txpow = max_txpower_avg;
ch_info->scan_power = max_txpower_avg;
}
- if ((ch_info->band == enhinfo[section].band) && is_ht40 &&
- (ch_info->ht40_max_power_avg < max_txpower_avg)) {
- /* Update regulatory-based run-time data */
+
+ if (is_ht40 && ch_info->ht40_max_power_avg < max_txpower_avg)
ch_info->ht40_max_power_avg = max_txpower_avg;
- }
- ch_info++;
}
- return max_txpower_avg;
}
-/**
- * iwl_update_channel_txpower: update channel tx power
- * update channel tx power based on EEPROM enhanced tx power info.
- */
-static s8 iwl_update_channel_txpower(struct iwl_priv *priv,
- struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
- int section, int element, s8 *max_txpower_in_half_dbm)
-{
- struct iwl_channel_info *ch_info;
- int ch;
- u8 channel;
- s8 max_txpower_avg; /* (dBm) */
-
- channel = enhinfo[section].iwl_eeprom_section_channel[element];
- max_txpower_avg =
- iwl_get_max_txpower_avg(priv, enhanced_txpower,
- element, max_txpower_in_half_dbm);
-
- ch_info = priv->channel_info;
- for (ch = 0; ch < priv->channel_count; ch++) {
- /* find matching channel and update tx power if needed */
- if (ch_info->channel == channel) {
- if ((ch_info->max_power_avg < max_txpower_avg) &&
- (!enhinfo[section].is_ht40)) {
- /* Update regulatory-based run-time data */
- ch_info->max_power_avg = max_txpower_avg;
- ch_info->curr_txpow = max_txpower_avg;
- ch_info->scan_power = max_txpower_avg;
- }
- if ((enhinfo[section].is_ht40) &&
- (ch_info->ht40_max_power_avg < max_txpower_avg)) {
- /* Update regulatory-based run-time data */
- ch_info->ht40_max_power_avg = max_txpower_avg;
- }
- break;
- }
- ch_info++;
- }
- return max_txpower_avg;
-}
+#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
+#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr)
+#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE)
+
+#define TXP_CHECK_AND_PRINT(x) ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) \
+ ? # x " " : "")
-/**
- * iwlcore_eeprom_enhanced_txpower: process enhanced tx power info
- */
void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv)
{
- int eeprom_section_count = 0;
- int section, element;
- struct iwl_eeprom_enhanced_txpwr *enhanced_txpower;
- u32 offset;
- s8 max_txpower_avg; /* (dBm) */
- s8 max_txpower_in_half_dbm; /* (half-dBm) */
-
- /* Loop through all the sections
- * adjust bands and channel's max tx power
- * Set the tx_power_user_lmt to the highest power
- * supported by any channels and chains
- */
- for (section = 0; section < ARRAY_SIZE(enhinfo); section++) {
- eeprom_section_count = enhinfo[section].count;
- offset = enhinfo[section].offset;
- enhanced_txpower = (struct iwl_eeprom_enhanced_txpwr *)
- iwl_eeprom_query_addr(priv, offset);
+ struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
+ int idx, entries;
+ __le16 *txp_len;
+ s8 max_txp_avg, max_txp_avg_halfdbm;
+
+ BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
+
+ /* the length is in 16-bit words, but we want entries */
+ txp_len = (__le16 *) iwlagn_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
+ entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
+
+ txp_array = (void *) iwlagn_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
+
+ for (idx = 0; idx < entries; idx++) {
+ txp = &txp_array[idx];
+ /* skip invalid entries */
+ if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID))
+ continue;
+
+ IWL_DEBUG_EEPROM(priv, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n",
+ (txp->channel && (txp->flags &
+ IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ?
+ "Common " : (txp->channel) ?
+ "Channel" : "Common",
+ (txp->channel),
+ TXP_CHECK_AND_PRINT(VALID),
+ TXP_CHECK_AND_PRINT(BAND_52G),
+ TXP_CHECK_AND_PRINT(OFDM),
+ TXP_CHECK_AND_PRINT(40MHZ),
+ TXP_CHECK_AND_PRINT(HT_AP),
+ TXP_CHECK_AND_PRINT(RES1),
+ TXP_CHECK_AND_PRINT(RES2),
+ TXP_CHECK_AND_PRINT(COMMON_TYPE),
+ txp->flags);
+ IWL_DEBUG_EEPROM(priv, "\t\t chain_A: 0x%02x "
+ "chain_B: 0X%02x chain_C: 0X%02x\n",
+ txp->chain_a_max, txp->chain_b_max,
+ txp->chain_c_max);
+ IWL_DEBUG_EEPROM(priv, "\t\t MIMO2: 0x%02x "
+ "MIMO3: 0x%02x High 20_on_40: 0x%02x "
+ "Low 20_on_40: 0x%02x\n",
+ txp->mimo2_max, txp->mimo3_max,
+ ((txp->delta_20_in_40 & 0xf0) >> 4),
+ (txp->delta_20_in_40 & 0x0f));
+
+ max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
+ &max_txp_avg_halfdbm);
/*
- * check for valid entry -
- * different version of EEPROM might contain different set
- * of enhanced tx power table
- * always check for valid entry before process
- * the information
+ * Update the user limit values values to the highest
+ * power supported by any channel
*/
- if (!enhanced_txpower->common || enhanced_txpower->reserved)
- continue;
+ if (max_txp_avg > priv->tx_power_user_lmt)
+ priv->tx_power_user_lmt = max_txp_avg;
+ if (max_txp_avg_halfdbm > priv->tx_power_lmt_in_half_dbm)
+ priv->tx_power_lmt_in_half_dbm = max_txp_avg_halfdbm;
- for (element = 0; element < eeprom_section_count; element++) {
- if (enhinfo[section].is_common)
- max_txpower_avg =
- iwl_update_common_txpower(priv,
- enhanced_txpower, section,
- element,
- &max_txpower_in_half_dbm);
- else
- max_txpower_avg =
- iwl_update_channel_txpower(priv,
- enhanced_txpower, section,
- element,
- &max_txpower_in_half_dbm);
-
- /* Update the tx_power_user_lmt to the highest power
- * supported by any channel */
- if (max_txpower_avg > priv->tx_power_user_lmt)
- priv->tx_power_user_lmt = max_txpower_avg;
-
- /*
- * Update the tx_power_lmt_in_half_dbm to
- * the highest power supported by any channel
- */
- if (max_txpower_in_half_dbm >
- priv->tx_power_lmt_in_half_dbm)
- priv->tx_power_lmt_in_half_dbm =
- max_txpower_in_half_dbm;
- }
+ iwlcore_eeprom_enh_txp_read_element(priv, txp, max_txp_avg);
}
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index ffb2f4111ad0..41543ad4cb84 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -305,8 +305,13 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
cmd.slots[0].type = 0; /* BSS */
cmd.slots[1].type = 1; /* PAN */
- if (ctx_bss->vif && ctx_pan->vif) {
+ if (priv->_agn.hw_roc_channel) {
+ /* both contexts must be used for this to happen */
+ slot1 = priv->_agn.hw_roc_duration;
+ slot0 = IWL_MIN_SLOT_TIME;
+ } else if (ctx_bss->vif && ctx_pan->vif) {
int bcnint = ctx_pan->vif->bss_conf.beacon_int;
+ int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
/* should be set, but seems unused?? */
cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE);
@@ -329,12 +334,12 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
if (test_bit(STATUS_SCAN_HW, &priv->status) ||
(!ctx_bss->vif->bss_conf.idle &&
!ctx_bss->vif->bss_conf.assoc)) {
- slot0 = bcnint * 3 - 20;
- slot1 = 20;
+ slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
+ slot1 = IWL_MIN_SLOT_TIME;
} else if (!ctx_pan->vif->bss_conf.idle &&
- !ctx_pan->vif->bss_conf.assoc) {
- slot1 = bcnint * 3 - 20;
- slot0 = 20;
+ !ctx_pan->vif->bss_conf.assoc) {
+ slot1 = bcnint * 3 - IWL_MIN_SLOT_TIME;
+ slot0 = IWL_MIN_SLOT_TIME;
}
} else if (ctx_pan->vif) {
slot0 = 0;
@@ -343,8 +348,8 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
if (test_bit(STATUS_SCAN_HW, &priv->status)) {
- slot0 = slot1 * 3 - 20;
- slot1 = 20;
+ slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
+ slot1 = IWL_MIN_SLOT_TIME;
}
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
index a5dbfea1bfad..b5cb3be0eb4b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
@@ -197,7 +197,7 @@ static irqreturn_t iwl_isr(int irq, void *data)
none:
/* re-enable interrupts here since we don't have anything to service. */
- /* only Re-enable if diabled by irq and no schedules tasklet. */
+ /* only Re-enable if disabled by irq and no schedules tasklet. */
if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
iwl_enable_interrupts(priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.c b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
index 1a24946bc203..c1190d965614 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
@@ -63,23 +63,11 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
}
/* Set led register off */
-static int iwl_led_on_reg(struct iwl_priv *priv)
+void iwlagn_led_enable(struct iwl_priv *priv)
{
- IWL_DEBUG_LED(priv, "led on\n");
iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
- return 0;
-}
-
-/* Set led register off */
-static int iwl_led_off_reg(struct iwl_priv *priv)
-{
- IWL_DEBUG_LED(priv, "LED Reg off\n");
- iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF);
- return 0;
}
const struct iwl_led_ops iwlagn_led_ops = {
.cmd = iwl_send_led_cmd,
- .on = iwl_led_on_reg,
- .off = iwl_led_off_reg,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.h b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
index a594e4fdc6b8..96f323dc5dd6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
@@ -28,5 +28,6 @@
#define __iwl_agn_led_h__
extern const struct iwl_led_ops iwlagn_led_ops;
+void iwlagn_led_enable(struct iwl_priv *priv);
#endif /* __iwl_agn_led_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index b555edd53354..2003c1d4295f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -405,6 +405,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
return;
}
+ txq->time_stamp = jiffies;
info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
memset(&info->status, 0, sizeof(info->status));
@@ -445,22 +446,17 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark) &&
- (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
- if (agg->state == IWL_AGG_OFF)
- iwl_wake_queue(priv, txq_id);
- else
- iwl_wake_queue(priv, txq->swq_id);
- }
+ (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
+ iwl_wake_queue(priv, txq);
}
} else {
- BUG_ON(txq_id != txq->swq_id);
iwlagn_set_tx_status(priv, info, tx_resp, txq_id, false);
freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark))
- iwl_wake_queue(priv, txq_id);
+ iwl_wake_queue(priv, txq);
}
iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
@@ -477,6 +473,11 @@ void iwlagn_rx_handler_setup(struct iwl_priv *priv)
priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
iwlagn_rx_calib_complete;
priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
+
+ /* set up notification wait support */
+ spin_lock_init(&priv->_agn.notif_wait_lock);
+ INIT_LIST_HEAD(&priv->_agn.notif_waits);
+ init_waitqueue_head(&priv->_agn.notif_waitq);
}
void iwlagn_setup_deferred_work(struct iwl_priv *priv)
@@ -496,6 +497,10 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
u8 tx_ant_cfg_cmd;
+ if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
+ "TX Power requested while scanning!\n"))
+ return -EAGAIN;
+
/* half dBm need to multiply */
tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
@@ -522,16 +527,16 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
else
tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
- return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd,
- sizeof(tx_power_cmd), &tx_power_cmd,
- NULL);
+ return iwl_send_cmd_pdu(priv, tx_ant_cfg_cmd, sizeof(tx_power_cmd),
+ &tx_power_cmd);
}
void iwlagn_temperature(struct iwl_priv *priv)
{
- /* store temperature from statistics (in Celsius) */
- priv->temperature =
- le32_to_cpu(priv->_agn.statistics.general.common.temperature);
+ /* store temperature from correct statistics (in Celsius) */
+ priv->temperature = le32_to_cpu((iwl_bt_statistics(priv)) ?
+ priv->_agn.statistics_bt.general.common.temperature :
+ priv->_agn.statistics.general.common.temperature);
iwl_tt_handler(priv);
}
@@ -569,6 +574,12 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
case INDIRECT_REGULATORY:
offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
break;
+ case INDIRECT_TXP_LIMIT:
+ offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
+ break;
+ case INDIRECT_TXP_LIMIT_SIZE:
+ offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
+ break;
case INDIRECT_CALIBRATION:
offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
break;
@@ -599,6 +610,7 @@ const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
struct iwl_mod_params iwlagn_mod_params = {
.amsdu_size_8K = 1,
.restart_fw = 1,
+ .plcp_check = true,
/* the rest are 0 by default */
};
@@ -750,6 +762,12 @@ int iwlagn_hw_nic_init(struct iwl_priv *priv)
} else
iwlagn_txq_ctx_reset(priv);
+ if (priv->cfg->base_params->shadow_reg_enable) {
+ /* enable shadow regs in HW */
+ iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
+ 0x800FFFFF);
+ }
+
set_bit(STATUS_INIT, &priv->status);
return 0;
@@ -977,240 +995,6 @@ int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
return -1;
}
-/* Calc max signal level (dBm) among 3 possible receivers */
-static inline int iwlagn_calc_rssi(struct iwl_priv *priv,
- struct iwl_rx_phy_res *rx_resp)
-{
- return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
-}
-
-static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
-{
- u32 decrypt_out = 0;
-
- if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
- RX_RES_STATUS_STATION_FOUND)
- decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
- RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
-
- decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
-
- /* packet was not encrypted */
- if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
- RX_RES_STATUS_SEC_TYPE_NONE)
- return decrypt_out;
-
- /* packet was encrypted with unknown alg */
- if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
- RX_RES_STATUS_SEC_TYPE_ERR)
- return decrypt_out;
-
- /* decryption was not done in HW */
- if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
- RX_MPDU_RES_STATUS_DEC_DONE_MSK)
- return decrypt_out;
-
- switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
-
- case RX_RES_STATUS_SEC_TYPE_CCMP:
- /* alg is CCM: check MIC only */
- if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
- /* Bad MIC */
- decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
- else
- decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
-
- break;
-
- case RX_RES_STATUS_SEC_TYPE_TKIP:
- if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
- /* Bad TTAK */
- decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
- break;
- }
- /* fall through if TTAK OK */
- default:
- if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
- decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
- else
- decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
- break;
- }
-
- IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
- decrypt_in, decrypt_out);
-
- return decrypt_out;
-}
-
-static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- u16 len,
- u32 ampdu_status,
- struct iwl_rx_mem_buffer *rxb,
- struct ieee80211_rx_status *stats)
-{
- struct sk_buff *skb;
- __le16 fc = hdr->frame_control;
-
- /* We only process data packets if the interface is open */
- if (unlikely(!priv->is_open)) {
- IWL_DEBUG_DROP_LIMIT(priv,
- "Dropping packet while interface is not open.\n");
- return;
- }
-
- /* In case of HW accelerated crypto and bad decryption, drop */
- if (!priv->cfg->mod_params->sw_crypto &&
- iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
- return;
-
- skb = dev_alloc_skb(128);
- if (!skb) {
- IWL_ERR(priv, "dev_alloc_skb failed\n");
- return;
- }
-
- skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
-
- iwl_update_stats(priv, false, fc, len);
- memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
-
- ieee80211_rx(priv->hw, skb);
- priv->alloc_rxb_page--;
- rxb->page = NULL;
-}
-
-/* Called for REPLY_RX (legacy ABG frames), or
- * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
-void iwlagn_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct ieee80211_hdr *header;
- struct ieee80211_rx_status rx_status;
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_rx_phy_res *phy_res;
- __le32 rx_pkt_status;
- struct iwl_rx_mpdu_res_start *amsdu;
- u32 len;
- u32 ampdu_status;
- u32 rate_n_flags;
-
- /**
- * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
- * REPLY_RX: physical layer info is in this buffer
- * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
- * command and cached in priv->last_phy_res
- *
- * Here we set up local variables depending on which command is
- * received.
- */
- if (pkt->hdr.cmd == REPLY_RX) {
- phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
- header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
- + phy_res->cfg_phy_cnt);
-
- len = le16_to_cpu(phy_res->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
- phy_res->cfg_phy_cnt + len);
- ampdu_status = le32_to_cpu(rx_pkt_status);
- } else {
- if (!priv->_agn.last_phy_res_valid) {
- IWL_ERR(priv, "MPDU frame without cached PHY data\n");
- return;
- }
- phy_res = &priv->_agn.last_phy_res;
- amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
- header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
- len = le16_to_cpu(amsdu->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
- ampdu_status = iwlagn_translate_rx_status(priv,
- le32_to_cpu(rx_pkt_status));
- }
-
- if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
- IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
- phy_res->cfg_phy_cnt);
- return;
- }
-
- if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
- !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
- IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
- le32_to_cpu(rx_pkt_status));
- return;
- }
-
- /* This will be used in several places later */
- rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
-
- /* rx_status carries information about the packet to mac80211 */
- rx_status.mactime = le64_to_cpu(phy_res->timestamp);
- rx_status.freq =
- ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
- rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
- rx_status.rate_idx =
- iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
- rx_status.flag = 0;
-
- /* TSF isn't reliable. In order to allow smooth user experience,
- * this W/A doesn't propagate it to the mac80211 */
- /*rx_status.flag |= RX_FLAG_TSFT;*/
-
- priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
-
- /* Find max signal strength (dBm) among 3 antenna/receiver chains */
- rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
-
- iwl_dbg_log_rx_data_frame(priv, len, header);
- IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
- rx_status.signal, (unsigned long long)rx_status.mactime);
-
- /*
- * "antenna number"
- *
- * It seems that the antenna field in the phy flags value
- * is actually a bit field. This is undefined by radiotap,
- * it wants an actual antenna number but I always get "7"
- * for most legacy frames I receive indicating that the
- * same frame was received on all three RX chains.
- *
- * I think this field should be removed in favor of a
- * new 802.11n radiotap field "RX chains" that is defined
- * as a bitmask.
- */
- rx_status.antenna =
- (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
- >> RX_RES_PHY_FLAGS_ANTENNA_POS;
-
- /* set the preamble flag if appropriate */
- if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
- rx_status.flag |= RX_FLAG_SHORTPRE;
-
- /* Set up the HT phy flags */
- if (rate_n_flags & RATE_MCS_HT_MSK)
- rx_status.flag |= RX_FLAG_HT;
- if (rate_n_flags & RATE_MCS_HT40_MSK)
- rx_status.flag |= RX_FLAG_40MHZ;
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- rx_status.flag |= RX_FLAG_SHORT_GI;
-
- iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
- rxb, &rx_status);
-}
-
-/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
- * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
-void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- priv->_agn.last_phy_res_valid = true;
- memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
- sizeof(struct iwl_rx_phy_res));
-}
-
static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
struct ieee80211_vif *vif,
enum ieee80211_band band,
@@ -1331,6 +1115,18 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
return added;
}
+static int iwl_fill_offch_tx(struct iwl_priv *priv, void *data, size_t maxlen)
+{
+ struct sk_buff *skb = priv->_agn.offchan_tx_skb;
+
+ if (skb->len < maxlen)
+ maxlen = skb->len;
+
+ memcpy(data, skb->data, maxlen);
+
+ return maxlen;
+}
+
int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
struct iwl_host_cmd cmd = {
@@ -1373,20 +1169,25 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
- if (iwl_is_any_associated(priv)) {
+ if (priv->scan_type != IWL_SCAN_OFFCH_TX &&
+ iwl_is_any_associated(priv)) {
u16 interval = 0;
u32 extra;
u32 suspend_time = 100;
u32 scan_suspend_time = 100;
- unsigned long flags;
IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
- spin_lock_irqsave(&priv->lock, flags);
- if (priv->is_internal_short_scan)
+ switch (priv->scan_type) {
+ case IWL_SCAN_OFFCH_TX:
+ WARN_ON(1);
+ break;
+ case IWL_SCAN_RADIO_RESET:
interval = 0;
- else
+ break;
+ case IWL_SCAN_NORMAL:
interval = vif->bss_conf.beacon_int;
- spin_unlock_irqrestore(&priv->lock, flags);
+ break;
+ }
scan->suspend_time = 0;
scan->max_out_time = cpu_to_le32(200 * 1024);
@@ -1399,29 +1200,41 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->suspend_time = cpu_to_le32(scan_suspend_time);
IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
scan_suspend_time, interval);
+ } else if (priv->scan_type == IWL_SCAN_OFFCH_TX) {
+ scan->suspend_time = 0;
+ scan->max_out_time =
+ cpu_to_le32(1024 * priv->_agn.offchan_tx_timeout);
}
- if (priv->is_internal_short_scan) {
+ switch (priv->scan_type) {
+ case IWL_SCAN_RADIO_RESET:
IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
- } else if (priv->scan_request->n_ssids) {
- int i, p = 0;
- IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- /* always does wildcard anyway */
- if (!priv->scan_request->ssids[i].ssid_len)
- continue;
- scan->direct_scan[p].id = WLAN_EID_SSID;
- scan->direct_scan[p].len =
- priv->scan_request->ssids[i].ssid_len;
- memcpy(scan->direct_scan[p].ssid,
- priv->scan_request->ssids[i].ssid,
- priv->scan_request->ssids[i].ssid_len);
- n_probes++;
- p++;
- }
- is_active = true;
- } else
- IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
+ break;
+ case IWL_SCAN_NORMAL:
+ if (priv->scan_request->n_ssids) {
+ int i, p = 0;
+ IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
+ for (i = 0; i < priv->scan_request->n_ssids; i++) {
+ /* always does wildcard anyway */
+ if (!priv->scan_request->ssids[i].ssid_len)
+ continue;
+ scan->direct_scan[p].id = WLAN_EID_SSID;
+ scan->direct_scan[p].len =
+ priv->scan_request->ssids[i].ssid_len;
+ memcpy(scan->direct_scan[p].ssid,
+ priv->scan_request->ssids[i].ssid,
+ priv->scan_request->ssids[i].ssid_len);
+ n_probes++;
+ p++;
+ }
+ is_active = true;
+ } else
+ IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
+ break;
+ case IWL_SCAN_OFFCH_TX:
+ IWL_DEBUG_SCAN(priv, "Start offchannel TX scan.\n");
+ break;
+ }
scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
scan->tx_cmd.sta_id = ctx->bcast_sta_id;
@@ -1481,15 +1294,11 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
if (priv->cfg->scan_rx_antennas[band])
rx_ant = priv->cfg->scan_rx_antennas[band];
- if (priv->cfg->scan_tx_antennas[band])
- scan_tx_antennas = priv->cfg->scan_tx_antennas[band];
-
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
- priv->bt_full_concurrent) {
- /* operated as 1x1 in full concurrency mode */
- scan_tx_antennas = first_antenna(
- priv->cfg->scan_tx_antennas[band]);
+ if (band == IEEE80211_BAND_2GHZ &&
+ priv->cfg->bt_params &&
+ priv->cfg->bt_params->advanced_bt_coexist) {
+ /* transmit 2.4 GHz probes only on first antenna */
+ scan_tx_antennas = first_antenna(scan_tx_antennas);
}
priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band],
@@ -1523,38 +1332,77 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
scan->rx_chain = cpu_to_le16(rx_chain);
- if (!priv->is_internal_short_scan) {
+ switch (priv->scan_type) {
+ case IWL_SCAN_NORMAL:
cmd_len = iwl_fill_probe_req(priv,
(struct ieee80211_mgmt *)scan->data,
vif->addr,
priv->scan_request->ie,
priv->scan_request->ie_len,
IWL_MAX_SCAN_SIZE - sizeof(*scan));
- } else {
+ break;
+ case IWL_SCAN_RADIO_RESET:
/* use bcast addr, will not be transmitted but must be valid */
cmd_len = iwl_fill_probe_req(priv,
(struct ieee80211_mgmt *)scan->data,
iwl_bcast_addr, NULL, 0,
IWL_MAX_SCAN_SIZE - sizeof(*scan));
-
+ break;
+ case IWL_SCAN_OFFCH_TX:
+ cmd_len = iwl_fill_offch_tx(priv, scan->data,
+ IWL_MAX_SCAN_SIZE
+ - sizeof(*scan)
+ - sizeof(struct iwl_scan_channel));
+ scan->scan_flags |= IWL_SCAN_FLAGS_ACTION_FRAME_TX;
+ break;
+ default:
+ BUG();
}
scan->tx_cmd.len = cpu_to_le16(cmd_len);
scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
RXON_FILTER_BCON_AWARE_MSK);
- if (priv->is_internal_short_scan) {
+ switch (priv->scan_type) {
+ case IWL_SCAN_RADIO_RESET:
scan->channel_count =
iwl_get_single_channel_for_scan(priv, vif, band,
- (void *)&scan->data[le16_to_cpu(
- scan->tx_cmd.len)]);
- } else {
+ (void *)&scan->data[cmd_len]);
+ break;
+ case IWL_SCAN_NORMAL:
scan->channel_count =
iwl_get_channels_for_scan(priv, vif, band,
is_active, n_probes,
- (void *)&scan->data[le16_to_cpu(
- scan->tx_cmd.len)]);
+ (void *)&scan->data[cmd_len]);
+ break;
+ case IWL_SCAN_OFFCH_TX: {
+ struct iwl_scan_channel *scan_ch;
+
+ scan->channel_count = 1;
+
+ scan_ch = (void *)&scan->data[cmd_len];
+ scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
+ scan_ch->channel =
+ cpu_to_le16(priv->_agn.offchan_tx_chan->hw_value);
+ scan_ch->active_dwell =
+ cpu_to_le16(priv->_agn.offchan_tx_timeout);
+ scan_ch->passive_dwell = 0;
+
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+
+ /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+ * power level:
+ * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
+ */
+ if (priv->_agn.offchan_tx_chan->band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+ }
+ break;
}
+
if (scan->channel_count == 0) {
IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
return -EIO;
@@ -1584,22 +1432,6 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
return ret;
}
-void iwlagn_post_scan(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx;
-
- /*
- * Since setting the RXON may have been deferred while
- * performing the scan, fire one off if needed
- */
- for_each_context(priv, ctx)
- if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
- iwlagn_commit_rxon(priv, ctx);
-
- if (priv->cfg->ops->hcmd->set_pan_params)
- priv->cfg->ops->hcmd->set_pan_params(priv);
-}
-
int iwlagn_manage_ibss_station(struct iwl_priv *priv,
struct ieee80211_vif *vif, bool add)
{
@@ -1790,7 +1622,7 @@ static const __le32 iwlagn_def_3w_lookup[12] = {
cpu_to_le32(0xc0004000),
cpu_to_le32(0x00004000),
cpu_to_le32(0xf0005000),
- cpu_to_le32(0xf0004000),
+ cpu_to_le32(0xf0005000),
};
static const __le32 iwlagn_concurrent_lookup[12] = {
@@ -1810,25 +1642,39 @@ static const __le32 iwlagn_concurrent_lookup[12] = {
void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
{
- struct iwlagn_bt_cmd bt_cmd = {
+ struct iwl_basic_bt_cmd basic = {
.max_kill = IWLAGN_BT_MAX_KILL_DEFAULT,
.bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT,
.bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
.bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
};
+ struct iwl6000_bt_cmd bt_cmd_6000;
+ struct iwl2000_bt_cmd bt_cmd_2000;
+ int ret;
BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
- sizeof(bt_cmd.bt3_lookup_table));
+ sizeof(basic.bt3_lookup_table));
+
+ if (priv->cfg->bt_params) {
+ if (priv->cfg->bt_params->bt_session_2) {
+ bt_cmd_2000.prio_boost = cpu_to_le32(
+ priv->cfg->bt_params->bt_prio_boost);
+ bt_cmd_2000.tx_prio_boost = 0;
+ bt_cmd_2000.rx_prio_boost = 0;
+ } else {
+ bt_cmd_6000.prio_boost =
+ priv->cfg->bt_params->bt_prio_boost;
+ bt_cmd_6000.tx_prio_boost = 0;
+ bt_cmd_6000.rx_prio_boost = 0;
+ }
+ } else {
+ IWL_ERR(priv, "failed to construct BT Coex Config\n");
+ return;
+ }
- if (priv->cfg->bt_params)
- bt_cmd.prio_boost = priv->cfg->bt_params->bt_prio_boost;
- else
- bt_cmd.prio_boost = 0;
- bt_cmd.kill_ack_mask = priv->kill_ack_mask;
- bt_cmd.kill_cts_mask = priv->kill_cts_mask;
- bt_cmd.valid = priv->bt_valid;
- bt_cmd.tx_prio_boost = 0;
- bt_cmd.rx_prio_boost = 0;
+ basic.kill_ack_mask = priv->kill_ack_mask;
+ basic.kill_cts_mask = priv->kill_cts_mask;
+ basic.valid = priv->bt_valid;
/*
* Configure BT coex mode to "no coexistence" when the
@@ -1837,44 +1683,45 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
* IBSS mode (no proper uCode support for coex then).
*/
if (!bt_coex_active || priv->iw_mode == NL80211_IFTYPE_ADHOC) {
- bt_cmd.flags = 0;
+ basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
} else {
- bt_cmd.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
+ basic.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
+ if (priv->cfg->bt_params &&
+ priv->cfg->bt_params->bt_sco_disable)
+ basic.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
+
if (priv->bt_ch_announce)
- bt_cmd.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
- IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", bt_cmd.flags);
+ basic.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
+ IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", basic.flags);
}
+ priv->bt_enable_flag = basic.flags;
if (priv->bt_full_concurrent)
- memcpy(bt_cmd.bt3_lookup_table, iwlagn_concurrent_lookup,
+ memcpy(basic.bt3_lookup_table, iwlagn_concurrent_lookup,
sizeof(iwlagn_concurrent_lookup));
else
- memcpy(bt_cmd.bt3_lookup_table, iwlagn_def_3w_lookup,
+ memcpy(basic.bt3_lookup_table, iwlagn_def_3w_lookup,
sizeof(iwlagn_def_3w_lookup));
IWL_DEBUG_INFO(priv, "BT coex %s in %s mode\n",
- bt_cmd.flags ? "active" : "disabled",
+ basic.flags ? "active" : "disabled",
priv->bt_full_concurrent ?
"full concurrency" : "3-wire");
- if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, sizeof(bt_cmd), &bt_cmd))
+ if (priv->cfg->bt_params->bt_session_2) {
+ memcpy(&bt_cmd_2000.basic, &basic,
+ sizeof(basic));
+ ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
+ sizeof(bt_cmd_2000), &bt_cmd_2000);
+ } else {
+ memcpy(&bt_cmd_6000.basic, &basic,
+ sizeof(basic));
+ ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
+ sizeof(bt_cmd_6000), &bt_cmd_6000);
+ }
+ if (ret)
IWL_ERR(priv, "failed to send BT Coex Config\n");
- /*
- * When we are doing a restart, need to also reconfigure BT
- * SCO to the device. If not doing a restart, bt_sco_active
- * will always be false, so there's no need to have an extra
- * variable to check for it.
- */
- if (priv->bt_sco_active) {
- struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 };
-
- if (priv->bt_sco_active)
- sco_cmd.flags |= IWLAGN_BT_SCO_ACTIVE;
- if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_SCO,
- sizeof(sco_cmd), &sco_cmd))
- IWL_ERR(priv, "failed to send BT SCO command\n");
- }
}
static void iwlagn_bt_traffic_change_work(struct work_struct *work)
@@ -1884,12 +1731,25 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
struct iwl_rxon_context *ctx;
int smps_request = -1;
+ if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
+ /* bt coex disabled */
+ return;
+ }
+
+ /*
+ * Note: bt_traffic_load can be overridden by scan complete and
+ * coex profile notifications. Ignore that since only bad consequence
+ * can be not matching debug print with actual state.
+ */
IWL_DEBUG_INFO(priv, "BT traffic load changes: %d\n",
priv->bt_traffic_load);
switch (priv->bt_traffic_load) {
case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
- smps_request = IEEE80211_SMPS_AUTOMATIC;
+ if (priv->bt_status)
+ smps_request = IEEE80211_SMPS_DYNAMIC;
+ else
+ smps_request = IEEE80211_SMPS_AUTOMATIC;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
smps_request = IEEE80211_SMPS_DYNAMIC;
@@ -1906,6 +1766,16 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
mutex_lock(&priv->mutex);
+ /*
+ * We can not send command to firmware while scanning. When the scan
+ * complete we will schedule this work again. We do check with mutex
+ * locked to prevent new scan request to arrive. We do not check
+ * STATUS_SCANNING to avoid race when queue_work two times from
+ * different notifications, but quit and not perform any work at all.
+ */
+ if (test_bit(STATUS_SCAN_HW, &priv->status))
+ goto out;
+
if (priv->cfg->ops->lib->update_chain_flags)
priv->cfg->ops->lib->update_chain_flags(priv);
@@ -1915,7 +1785,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
ieee80211_request_smps(ctx->vif, smps_request);
}
}
-
+out:
mutex_unlock(&priv->mutex);
}
@@ -1976,34 +1846,41 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
(BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
BT_UART_MSG_FRAME6DISCOVERABLE_POS);
- IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Inquiry/Page SR Mode = "
- "0x%X, Connectable = 0x%X",
+ IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Page = "
+ "0x%X, Inquiry = 0x%X, Connectable = 0x%X",
(BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
- (BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_MSK & uart_msg->frame7) >>
- BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS,
+ (BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >>
+ BT_UART_MSG_FRAME7PAGE_POS,
+ (BT_UART_MSG_FRAME7INQUIRY_MSK & uart_msg->frame7) >>
+ BT_UART_MSG_FRAME7INQUIRY_POS,
(BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >>
BT_UART_MSG_FRAME7CONNECTABLE_POS);
}
-static void iwlagn_set_kill_ack_msk(struct iwl_priv *priv,
- struct iwl_bt_uart_msg *uart_msg)
+static void iwlagn_set_kill_msk(struct iwl_priv *priv,
+ struct iwl_bt_uart_msg *uart_msg)
{
- u8 kill_ack_msk;
- __le32 bt_kill_ack_msg[2] = {
- cpu_to_le32(0xFFFFFFF), cpu_to_le32(0xFFFFFC00) };
-
- kill_ack_msk = (((BT_UART_MSG_FRAME3A2DP_MSK |
- BT_UART_MSG_FRAME3SNIFF_MSK |
- BT_UART_MSG_FRAME3SCOESCO_MSK) &
- uart_msg->frame3) == 0) ? 1 : 0;
- if (priv->kill_ack_mask != bt_kill_ack_msg[kill_ack_msk]) {
+ u8 kill_msk;
+ static const __le32 bt_kill_ack_msg[2] = {
+ IWLAGN_BT_KILL_ACK_MASK_DEFAULT,
+ IWLAGN_BT_KILL_ACK_CTS_MASK_SCO };
+ static const __le32 bt_kill_cts_msg[2] = {
+ IWLAGN_BT_KILL_CTS_MASK_DEFAULT,
+ IWLAGN_BT_KILL_ACK_CTS_MASK_SCO };
+
+ kill_msk = (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3)
+ ? 1 : 0;
+ if (priv->kill_ack_mask != bt_kill_ack_msg[kill_msk] ||
+ priv->kill_cts_mask != bt_kill_cts_msg[kill_msk]) {
priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK;
- priv->kill_ack_mask = bt_kill_ack_msg[kill_ack_msk];
+ priv->kill_ack_mask = bt_kill_ack_msg[kill_msk];
+ priv->bt_valid |= IWLAGN_BT_VALID_KILL_CTS_MASK;
+ priv->kill_cts_mask = bt_kill_cts_msg[kill_msk];
+
/* schedule to send runtime bt_config */
queue_work(priv->workqueue, &priv->bt_runtime_config);
}
-
}
void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
@@ -2012,9 +1889,12 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
unsigned long flags;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif;
- struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 };
struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
- u8 last_traffic_load;
+
+ if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
+ /* bt coex disabled */
+ return;
+ }
IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n");
IWL_DEBUG_NOTIF(priv, " status: %d\n", coex->bt_status);
@@ -2023,11 +1903,10 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
coex->bt_ci_compliance);
iwlagn_print_uartmsg(priv, uart_msg);
- last_traffic_load = priv->notif_bt_traffic_load;
- priv->notif_bt_traffic_load = coex->bt_traffic_load;
+ priv->last_bt_traffic_load = priv->bt_traffic_load;
if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
if (priv->bt_status != coex->bt_status ||
- last_traffic_load != coex->bt_traffic_load) {
+ priv->last_bt_traffic_load != coex->bt_traffic_load) {
if (coex->bt_status) {
/* BT on */
if (!priv->bt_ch_announce)
@@ -2045,18 +1924,9 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
queue_work(priv->workqueue,
&priv->bt_traffic_change_work);
}
- if (priv->bt_sco_active !=
- (uart_msg->frame3 & BT_UART_MSG_FRAME3SCOESCO_MSK)) {
- priv->bt_sco_active = uart_msg->frame3 &
- BT_UART_MSG_FRAME3SCOESCO_MSK;
- if (priv->bt_sco_active)
- sco_cmd.flags |= IWLAGN_BT_SCO_ACTIVE;
- iwl_send_cmd_pdu_async(priv, REPLY_BT_COEX_SCO,
- sizeof(sco_cmd), &sco_cmd, NULL);
- }
}
- iwlagn_set_kill_ack_msk(priv, uart_msg);
+ iwlagn_set_kill_msk(priv, uart_msg);
/* FIXME: based on notification, adjust the prio_boost */
@@ -2276,7 +2146,7 @@ static const char *get_csr_string(int cmd)
void iwl_dump_csr(struct iwl_priv *priv)
{
int i;
- u32 csr_tbl[] = {
+ static const u32 csr_tbl[] = {
CSR_HW_IF_CONFIG_REG,
CSR_INT_COALESCING,
CSR_INT,
@@ -2335,7 +2205,7 @@ int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
int pos = 0;
size_t bufsz = 0;
#endif
- u32 fh_tbl[] = {
+ static const u32 fh_tbl[] = {
FH_RSCSR_CHNL0_STTS_WPTR_REG,
FH_RSCSR_CHNL0_RBDCB_BASE_REG,
FH_RSCSR_CHNL0_WPTR,
@@ -2371,3 +2241,44 @@ int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
}
return 0;
}
+
+/* notification wait support */
+void iwlagn_init_notification_wait(struct iwl_priv *priv,
+ struct iwl_notification_wait *wait_entry,
+ void (*fn)(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt),
+ u8 cmd)
+{
+ wait_entry->fn = fn;
+ wait_entry->cmd = cmd;
+ wait_entry->triggered = false;
+
+ spin_lock_bh(&priv->_agn.notif_wait_lock);
+ list_add(&wait_entry->list, &priv->_agn.notif_waits);
+ spin_unlock_bh(&priv->_agn.notif_wait_lock);
+}
+
+signed long iwlagn_wait_notification(struct iwl_priv *priv,
+ struct iwl_notification_wait *wait_entry,
+ unsigned long timeout)
+{
+ int ret;
+
+ ret = wait_event_timeout(priv->_agn.notif_waitq,
+ &wait_entry->triggered,
+ timeout);
+
+ spin_lock_bh(&priv->_agn.notif_wait_lock);
+ list_del(&wait_entry->list);
+ spin_unlock_bh(&priv->_agn.notif_wait_lock);
+
+ return ret;
+}
+
+void iwlagn_remove_notification(struct iwl_priv *priv,
+ struct iwl_notification_wait *wait_entry)
+{
+ spin_lock_bh(&priv->_agn.notif_wait_lock);
+ list_del(&wait_entry->list);
+ spin_unlock_bh(&priv->_agn.notif_wait_lock);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 065553629de5..d03b4734c892 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -179,31 +179,31 @@ static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
};
static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
- {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
- {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
- {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
+ {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */
+ {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */
+ {0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */
+ {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
};
static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
{0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
- {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
- {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
+ {0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
+ {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
};
static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
- {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
- {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
- {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
+ {0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
+ {0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
+ {0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
+ {0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
};
static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
{0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
- {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
- {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
+ {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
+ {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
};
static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
@@ -387,7 +387,7 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
if (load > IWL_AGG_LOAD_THRESHOLD) {
IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
sta->addr, tid);
- ret = ieee80211_start_tx_ba_session(sta, tid);
+ ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
if (ret == -EAGAIN) {
/*
* driver and mac80211 is out of sync
@@ -833,17 +833,23 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct iwl_lq_sta *lq_sta)
{
struct iwl_scale_tbl_info *tbl;
- bool full_concurrent;
+ bool full_concurrent = priv->bt_full_concurrent;
unsigned long flags;
- spin_lock_irqsave(&priv->lock, flags);
- if (priv->bt_ci_compliance && priv->bt_ant_couple_ok)
- full_concurrent = true;
- else
- full_concurrent = false;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (priv->bt_full_concurrent != full_concurrent) {
+ if (priv->bt_ant_couple_ok) {
+ /*
+ * Is there a need to switch between
+ * full concurrency and 3-wire?
+ */
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->bt_ci_compliance && priv->bt_ant_couple_ok)
+ full_concurrent = true;
+ else
+ full_concurrent = false;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+ if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
+ (priv->bt_full_concurrent != full_concurrent)) {
priv->bt_full_concurrent = full_concurrent;
/* Update uCode's rate table. */
@@ -1040,8 +1046,7 @@ done:
if (sta && sta->supp_rates[sband->band])
rs_rate_scale_perform(priv, skb, sta, lq_sta);
- /* Is there a need to switch between full concurrency and 3-wire? */
- if (priv->bt_ant_couple_ok)
+ if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
rs_bt_update_lq(priv, ctx, lq_sta);
}
@@ -2868,6 +2873,10 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
lq_sta->is_agg = 0;
+#ifdef CONFIG_MAC80211_DEBUGFS
+ lq_sta->dbg_fixed_rate = 0;
+#endif
+
rs_initialize_lq(priv, conf, sta, lq_sta);
}
@@ -2881,6 +2890,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
u8 ant_toggle_cnt = 0;
u8 use_ht_possible = 1;
u8 valid_tx_ant = 0;
+ struct iwl_station_priv *sta_priv =
+ container_of(lq_sta, struct iwl_station_priv, lq_sta);
struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
/* Override starting rate (index 0) if needed for debug purposes */
@@ -2999,7 +3010,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
repeat_rate--;
}
- lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+ lq_cmd->agg_params.agg_frame_cnt_limit =
+ sta_priv->max_agg_bufsize ?: LINK_QUAL_AGG_FRAME_LIMIT_DEF;
lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
lq_cmd->agg_params.agg_time_limit =
@@ -3010,10 +3022,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
*/
if (priv && priv->cfg->bt_params &&
priv->cfg->bt_params->agg_time_limit &&
- priv->cfg->bt_params->agg_time_limit >=
- LINK_QUAL_AGG_TIME_LIMIT_MIN &&
- priv->cfg->bt_params->agg_time_limit <=
- LINK_QUAL_AGG_TIME_LIMIT_MAX)
+ priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
lq_cmd->agg_params.agg_time_limit =
cpu_to_le16(priv->cfg->bt_params->agg_time_limit);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 75e50d33ecb3..184828c72b31 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -213,6 +213,7 @@ enum {
IWL_CCK_BASIC_RATES_MASK)
#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
+#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
#define IWL_INVALID_VALUE -1
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
new file mode 100644
index 000000000000..dfdbea6e8f99
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -0,0 +1,691 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include "iwl-dev.h"
+#include "iwl-agn.h"
+#include "iwl-sta.h"
+#include "iwl-core.h"
+#include "iwl-agn-calib.h"
+
+static int iwlagn_disable_bss(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct iwl_rxon_cmd *send)
+{
+ __le32 old_filter = send->filter_flags;
+ int ret;
+
+ send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send);
+
+ send->filter_flags = old_filter;
+
+ if (ret)
+ IWL_ERR(priv, "Error clearing ASSOC_MSK on BSS (%d)\n", ret);
+
+ return ret;
+}
+
+static int iwlagn_disable_pan(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct iwl_rxon_cmd *send)
+{
+ struct iwl_notification_wait disable_wait;
+ __le32 old_filter = send->filter_flags;
+ u8 old_dev_type = send->dev_type;
+ int ret;
+
+ iwlagn_init_notification_wait(priv, &disable_wait, NULL,
+ REPLY_WIPAN_DEACTIVATION_COMPLETE);
+
+ send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ send->dev_type = RXON_DEV_TYPE_P2P;
+ ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send);
+
+ send->filter_flags = old_filter;
+ send->dev_type = old_dev_type;
+
+ if (ret) {
+ IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
+ iwlagn_remove_notification(priv, &disable_wait);
+ } else {
+ signed long wait_res;
+
+ wait_res = iwlagn_wait_notification(priv, &disable_wait, HZ);
+ if (wait_res == 0) {
+ IWL_ERR(priv, "Timed out waiting for PAN disable\n");
+ ret = -EIO;
+ }
+ }
+
+ return ret;
+}
+
+static void iwlagn_update_qos(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ int ret;
+
+ if (!ctx->is_active)
+ return;
+
+ ctx->qos_data.def_qos_parm.qos_flags = 0;
+
+ if (ctx->qos_data.qos_active)
+ ctx->qos_data.def_qos_parm.qos_flags |=
+ QOS_PARAM_FLG_UPDATE_EDCA_MSK;
+
+ if (ctx->ht.enabled)
+ ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
+
+ IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+ ctx->qos_data.qos_active,
+ ctx->qos_data.def_qos_parm.qos_flags);
+
+ ret = iwl_send_cmd_pdu(priv, ctx->qos_cmd,
+ sizeof(struct iwl_qosparam_cmd),
+ &ctx->qos_data.def_qos_parm);
+ if (ret)
+ IWL_ERR(priv, "Failed to update QoS\n");
+}
+
+static int iwlagn_update_beacon(struct iwl_priv *priv,
+ struct ieee80211_vif *vif)
+{
+ lockdep_assert_held(&priv->mutex);
+
+ dev_kfree_skb(priv->beacon_skb);
+ priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif);
+ if (!priv->beacon_skb)
+ return -ENOMEM;
+ return iwlagn_send_beacon_cmd(priv);
+}
+
+/**
+ * iwlagn_commit_rxon - commit staging_rxon to hardware
+ *
+ * The RXON command in staging_rxon is committed to the hardware and
+ * the active_rxon structure is updated with the new data. This
+ * function correctly transitions out of the RXON_ASSOC_MSK state if
+ * a HW tune is required based on the RXON structure changes.
+ */
+int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ /* cast away the const for active_rxon in this function */
+ struct iwl_rxon_cmd *active = (void *)&ctx->active;
+ bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
+ bool old_assoc = !!(ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK);
+ int ret;
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return -EINVAL;
+
+ if (!iwl_is_alive(priv))
+ return -EBUSY;
+
+ /* This function hardcodes a bunch of dual-mode assumptions */
+ BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+
+ if (!ctx->is_active)
+ return 0;
+
+ /* always get timestamp with Rx frame */
+ ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
+
+ if (ctx->ctxid == IWL_RXON_CTX_PAN && priv->_agn.hw_roc_channel) {
+ struct ieee80211_channel *chan = priv->_agn.hw_roc_channel;
+
+ iwl_set_rxon_channel(priv, chan, ctx);
+ iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
+ ctx->staging.filter_flags |=
+ RXON_FILTER_ASSOC_MSK |
+ RXON_FILTER_PROMISC_MSK |
+ RXON_FILTER_CTL2HOST_MSK;
+ ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
+ new_assoc = true;
+
+ if (memcmp(&ctx->staging, &ctx->active,
+ sizeof(ctx->staging)) == 0)
+ return 0;
+ }
+
+ if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
+ !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+
+ ret = iwl_check_rxon_cmd(priv, ctx);
+ if (ret) {
+ IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * receive commit_rxon request
+ * abort any previous channel switch if still in process
+ */
+ if (priv->switch_rxon.switch_in_progress &&
+ (priv->switch_rxon.channel != ctx->staging.channel)) {
+ IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
+ le16_to_cpu(priv->switch_rxon.channel));
+ iwl_chswitch_done(priv, false);
+ }
+
+ /*
+ * If we don't need to send a full RXON, we can use
+ * iwl_rxon_assoc_cmd which is used to reconfigure filter
+ * and other flags for the current radio configuration.
+ */
+ if (!iwl_full_rxon_required(priv, ctx)) {
+ ret = iwl_send_rxon_assoc(priv, ctx);
+ if (ret) {
+ IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
+ return ret;
+ }
+
+ memcpy(active, &ctx->staging, sizeof(*active));
+ iwl_print_rx_config_cmd(priv, ctx);
+ return 0;
+ }
+
+ if (priv->cfg->ops->hcmd->set_pan_params) {
+ ret = priv->cfg->ops->hcmd->set_pan_params(priv);
+ if (ret)
+ return ret;
+ }
+
+ iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto);
+
+ IWL_DEBUG_INFO(priv,
+ "Going to commit RXON\n"
+ " * with%s RXON_FILTER_ASSOC_MSK\n"
+ " * channel = %d\n"
+ " * bssid = %pM\n",
+ (new_assoc ? "" : "out"),
+ le16_to_cpu(ctx->staging.channel),
+ ctx->staging.bssid_addr);
+
+ /*
+ * Always clear associated first, but with the correct config.
+ * This is required as for example station addition for the
+ * AP station must be done after the BSSID is set to correctly
+ * set up filters in the device.
+ */
+ if ((old_assoc && new_assoc) || !new_assoc) {
+ if (ctx->ctxid == IWL_RXON_CTX_BSS)
+ ret = iwlagn_disable_bss(priv, ctx, &ctx->staging);
+ else
+ ret = iwlagn_disable_pan(priv, ctx, &ctx->staging);
+ if (ret)
+ return ret;
+
+ memcpy(active, &ctx->staging, sizeof(*active));
+
+ /*
+ * Un-assoc RXON clears the station table and WEP
+ * keys, so we have to restore those afterwards.
+ */
+ iwl_clear_ucode_stations(priv, ctx);
+ iwl_restore_stations(priv, ctx);
+ ret = iwl_restore_default_wep_keys(priv, ctx);
+ if (ret) {
+ IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ /* RXON timing must be before associated RXON */
+ ret = iwl_send_rxon_timing(priv, ctx);
+ if (ret) {
+ IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
+ return ret;
+ }
+
+ if (new_assoc) {
+ /* QoS info may be cleared by previous un-assoc RXON */
+ iwlagn_update_qos(priv, ctx);
+
+ /*
+ * We'll run into this code path when beaconing is
+ * enabled, but then we also need to send the beacon
+ * to the device.
+ */
+ if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_AP)) {
+ ret = iwlagn_update_beacon(priv, ctx->vif);
+ if (ret) {
+ IWL_ERR(priv,
+ "Error sending required beacon (%d)!\n",
+ ret);
+ return ret;
+ }
+ }
+
+ priv->start_calib = 0;
+ /*
+ * Apply the new configuration.
+ *
+ * Associated RXON doesn't clear the station table in uCode,
+ * so we don't need to restore stations etc. after this.
+ */
+ ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
+ sizeof(struct iwl_rxon_cmd), &ctx->staging);
+ if (ret) {
+ IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
+ return ret;
+ }
+ memcpy(active, &ctx->staging, sizeof(*active));
+
+ iwl_reprogram_ap_sta(priv, ctx);
+
+ /* IBSS beacon needs to be sent after setting assoc */
+ if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_ADHOC))
+ if (iwlagn_update_beacon(priv, ctx->vif))
+ IWL_ERR(priv, "Error sending IBSS beacon\n");
+ }
+
+ iwl_print_rx_config_cmd(priv, ctx);
+
+ iwl_init_sensitivity(priv);
+
+ /*
+ * If we issue a new RXON command which required a tune then we must
+ * send a new TXPOWER command or we won't be able to Tx any frames.
+ *
+ * It's expected we set power here if channel is changing.
+ */
+ ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
+ if (ret) {
+ IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_channel *channel = conf->channel;
+ const struct iwl_channel_info *ch_info;
+ int ret = 0;
+ bool ht_changed[NUM_IWL_RXON_CTX] = {};
+
+ IWL_DEBUG_MAC80211(priv, "changed %#x", changed);
+
+ mutex_lock(&priv->mutex);
+
+ if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
+ IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
+ goto out;
+ }
+
+ if (!iwl_is_ready(priv)) {
+ IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
+ goto out;
+ }
+
+ if (changed & (IEEE80211_CONF_CHANGE_SMPS |
+ IEEE80211_CONF_CHANGE_CHANNEL)) {
+ /* mac80211 uses static for non-HT which is what we want */
+ priv->current_ht_config.smps = conf->smps_mode;
+
+ /*
+ * Recalculate chain counts.
+ *
+ * If monitor mode is enabled then mac80211 will
+ * set up the SM PS mode to OFF if an HT channel is
+ * configured.
+ */
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ for_each_context(priv, ctx)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ unsigned long flags;
+
+ ch_info = iwl_get_channel_info(priv, channel->band,
+ channel->hw_value);
+ if (!is_channel_valid(ch_info)) {
+ IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ for_each_context(priv, ctx) {
+ /* Configure HT40 channels */
+ if (ctx->ht.enabled != conf_is_ht(conf)) {
+ ctx->ht.enabled = conf_is_ht(conf);
+ ht_changed[ctx->ctxid] = true;
+ }
+
+ if (ctx->ht.enabled) {
+ if (conf_is_ht40_minus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ctx->ht.is_40mhz = true;
+ } else if (conf_is_ht40_plus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ctx->ht.is_40mhz = true;
+ } else {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ctx->ht.is_40mhz = false;
+ }
+ } else
+ ctx->ht.is_40mhz = false;
+
+ /*
+ * Default to no protection. Protection mode will
+ * later be set from BSS config in iwl_ht_conf
+ */
+ ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
+
+ /* if we are switching from ht to 2.4 clear flags
+ * from any ht related info since 2.4 does not
+ * support ht */
+ if (le16_to_cpu(ctx->staging.channel) !=
+ channel->hw_value)
+ ctx->staging.flags = 0;
+
+ iwl_set_rxon_channel(priv, channel, ctx);
+ iwl_set_rxon_ht(priv, &priv->current_ht_config);
+
+ iwl_set_flags_for_band(priv, ctx, channel->band,
+ ctx->vif);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ iwl_update_bcast_stations(priv);
+
+ /*
+ * The list of supported rates and rate mask can be different
+ * for each band; since the band may have changed, reset
+ * the rate mask to what mac80211 lists.
+ */
+ iwl_set_rate(priv);
+ }
+
+ if (changed & (IEEE80211_CONF_CHANGE_PS |
+ IEEE80211_CONF_CHANGE_IDLE)) {
+ ret = iwl_power_update_mode(priv, false);
+ if (ret)
+ IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
+ priv->tx_power_user_lmt, conf->power_level);
+
+ iwl_set_tx_power(priv, conf->power_level, false);
+ }
+
+ for_each_context(priv, ctx) {
+ if (!memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+ continue;
+ iwlagn_commit_rxon(priv, ctx);
+ if (ht_changed[ctx->ctxid])
+ iwlagn_update_qos(priv, ctx);
+ }
+ out:
+ mutex_unlock(&priv->mutex);
+ return ret;
+}
+
+static void iwlagn_check_needed_chains(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ieee80211_vif *vif = ctx->vif;
+ struct iwl_rxon_context *tmp;
+ struct ieee80211_sta *sta;
+ struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+ struct ieee80211_sta_ht_cap *ht_cap;
+ bool need_multiple;
+
+ lockdep_assert_held(&priv->mutex);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!sta) {
+ /*
+ * If at all, this can only happen through a race
+ * when the AP disconnects us while we're still
+ * setting up the connection, in that case mac80211
+ * will soon tell us about that.
+ */
+ need_multiple = false;
+ rcu_read_unlock();
+ break;
+ }
+
+ ht_cap = &sta->ht_cap;
+
+ need_multiple = true;
+
+ /*
+ * If the peer advertises no support for receiving 2 and 3
+ * stream MCS rates, it can't be transmitting them either.
+ */
+ if (ht_cap->mcs.rx_mask[1] == 0 &&
+ ht_cap->mcs.rx_mask[2] == 0) {
+ need_multiple = false;
+ } else if (!(ht_cap->mcs.tx_params &
+ IEEE80211_HT_MCS_TX_DEFINED)) {
+ /* If it can't TX MCS at all ... */
+ need_multiple = false;
+ } else if (ht_cap->mcs.tx_params &
+ IEEE80211_HT_MCS_TX_RX_DIFF) {
+ int maxstreams;
+
+ /*
+ * But if it can receive them, it might still not
+ * be able to transmit them, which is what we need
+ * to check here -- so check the number of streams
+ * it advertises for TX (if different from RX).
+ */
+
+ maxstreams = (ht_cap->mcs.tx_params &
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK);
+ maxstreams >>=
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+ maxstreams += 1;
+
+ if (maxstreams <= 1)
+ need_multiple = false;
+ }
+
+ rcu_read_unlock();
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ /* currently */
+ need_multiple = false;
+ break;
+ default:
+ /* only AP really */
+ need_multiple = true;
+ break;
+ }
+
+ ctx->ht_need_multiple_chains = need_multiple;
+
+ if (!need_multiple) {
+ /* check all contexts */
+ for_each_context(priv, tmp) {
+ if (!tmp->vif)
+ continue;
+ if (tmp->ht_need_multiple_chains) {
+ need_multiple = true;
+ break;
+ }
+ }
+ }
+
+ ht_conf->single_chain_sufficient = !need_multiple;
+}
+
+void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+ int ret;
+ bool force = false;
+
+ mutex_lock(&priv->mutex);
+
+ if (unlikely(!iwl_is_ready(priv))) {
+ IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
+ mutex_unlock(&priv->mutex);
+ return;
+ }
+
+ if (unlikely(!ctx->vif)) {
+ IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n");
+ mutex_unlock(&priv->mutex);
+ return;
+ }
+
+ if (changes & BSS_CHANGED_BEACON_INT)
+ force = true;
+
+ if (changes & BSS_CHANGED_QOS) {
+ ctx->qos_data.qos_active = bss_conf->qos;
+ iwlagn_update_qos(priv, ctx);
+ }
+
+ ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
+ if (vif->bss_conf.use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+ if (changes & BSS_CHANGED_ASSOC) {
+ if (bss_conf->assoc) {
+ priv->timestamp = bss_conf->timestamp;
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ } else {
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ }
+ }
+
+ if (ctx->ht.enabled) {
+ ctx->ht.protection = bss_conf->ht_operation_mode &
+ IEEE80211_HT_OP_MODE_PROTECTION;
+ ctx->ht.non_gf_sta_present = !!(bss_conf->ht_operation_mode &
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+ iwlagn_check_needed_chains(priv, ctx, bss_conf);
+ iwl_set_rxon_ht(priv, &priv->current_ht_config);
+ }
+
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+
+ if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
+ ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+
+ if (bss_conf->use_cts_prot)
+ ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
+
+ memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
+
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC) {
+ if (vif->bss_conf.enable_beacon) {
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ priv->beacon_ctx = ctx;
+ } else {
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ priv->beacon_ctx = NULL;
+ }
+ }
+
+ if (force || memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+ iwlagn_commit_rxon(priv, ctx);
+
+ if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
+ /*
+ * The chain noise calibration will enable PM upon
+ * completion. If calibration has already been run
+ * then we need to enable power management here.
+ */
+ if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
+ iwl_power_update_mode(priv, false);
+
+ /* Enable RX differential gain and sensitivity calibrations */
+ iwl_chain_noise_reset(priv);
+ priv->start_calib = 1;
+ }
+
+ if (changes & BSS_CHANGED_IBSS) {
+ ret = iwlagn_manage_ibss_station(priv, vif,
+ bss_conf->ibss_joined);
+ if (ret)
+ IWL_ERR(priv, "failed to %s IBSS station %pM\n",
+ bss_conf->ibss_joined ? "add" : "remove",
+ bss_conf->bssid);
+ }
+
+ if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_ADHOC &&
+ priv->beacon_ctx) {
+ if (iwlagn_update_beacon(priv, vif))
+ IWL_ERR(priv, "Error sending IBSS beacon\n");
+ }
+
+ mutex_unlock(&priv->mutex);
+}
+
+void iwlagn_post_scan(struct iwl_priv *priv)
+{
+ struct iwl_rxon_context *ctx;
+
+ /*
+ * Since setting the RXON may have been deferred while
+ * performing the scan, fire one off if needed
+ */
+ for_each_context(priv, ctx)
+ if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+ iwlagn_commit_rxon(priv, ctx);
+
+ if (priv->cfg->ops->hcmd->set_pan_params)
+ priv->cfg->ops->hcmd->set_pan_params(priv);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index 35a30d2e0734..35f085ac336b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -684,7 +684,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
-void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
+static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
{
unsigned long flags;
@@ -714,3 +714,33 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
spin_unlock_irqrestore(&priv->sta_lock, flags);
}
+
+void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ int sta_id;
+
+ switch (cmd) {
+ case STA_NOTIFY_SLEEP:
+ WARN_ON(!sta_priv->client);
+ sta_priv->asleep = true;
+ if (atomic_read(&sta_priv->pending_frames) > 0)
+ ieee80211_sta_block_awake(hw, sta, true);
+ break;
+ case STA_NOTIFY_AWAKE:
+ WARN_ON(!sta_priv->client);
+ if (!sta_priv->asleep)
+ break;
+ sta_priv->asleep = false;
+ sta_id = iwl_sta_id(sta);
+ if (sta_id != IWL_INVALID_STATION)
+ iwl_sta_modify_ps_wake(priv, sta_id);
+ break;
+ default:
+ break;
+ }
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 2b078a995729..a709d05c5868 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -67,8 +67,14 @@
*/
static const u8 tid_to_ac[] = {
- /* this matches the mac80211 numbers */
- 2, 3, 3, 2, 1, 1, 0, 0
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO
};
static inline int get_ac_from_tid(u16 tid)
@@ -518,11 +524,11 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
struct iwl_cmd_meta *out_meta;
struct iwl_tx_cmd *tx_cmd;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- int swq_id, txq_id;
+ int txq_id;
dma_addr_t phys_addr;
dma_addr_t txcmd_phys;
dma_addr_t scratch_phys;
- u16 len, len_org, firstlen, secondlen;
+ u16 len, firstlen, secondlen;
u16 seq_number = 0;
__le16 fc;
u8 hdr_len;
@@ -531,8 +537,16 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
u8 tid = 0;
u8 *qc = NULL;
unsigned long flags;
+ bool is_agg = false;
- if (info->control.vif)
+ /*
+ * If the frame needs to go out off-channel, then
+ * we'll have put the PAN context to that channel,
+ * so make the frame go out there.
+ */
+ if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
+ ctx = &priv->contexts[IWL_RXON_CTX_PAN];
+ else if (info->control.vif)
ctx = iwl_rxon_ctx_from_vif(info->control.vif);
spin_lock_irqsave(&priv->lock, flags);
@@ -567,8 +581,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
if (sta)
sta_priv = (void *)sta->drv_priv;
- if (sta_priv && sta_priv->asleep) {
- WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
+ if (sta_priv && sta_priv->asleep &&
+ (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
/*
* This sends an asynchronous command to the device,
* but we can rely on it being processed before the
@@ -616,11 +630,11 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
if (info->flags & IEEE80211_TX_CTL_AMPDU &&
priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
+ is_agg = true;
}
}
txq = &priv->txq[txq_id];
- swq_id = txq->swq_id;
q = &txq->q;
if (unlikely(iwl_queue_space(q) < q->high_mark)) {
@@ -687,30 +701,23 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
*/
len = sizeof(struct iwl_tx_cmd) +
sizeof(struct iwl_cmd_header) + hdr_len;
-
- len_org = len;
- firstlen = len = (len + 3) & ~3;
-
- if (len_org != len)
- len_org = 1;
- else
- len_org = 0;
+ firstlen = (len + 3) & ~3;
/* Tell NIC about any 2-byte padding after MAC header */
- if (len_org)
+ if (firstlen != len)
tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
/* Physical address of this Tx command's header (not MAC header!),
* within command buffer array. */
txcmd_phys = pci_map_single(priv->pci_dev,
- &out_cmd->hdr, len,
+ &out_cmd->hdr, firstlen,
PCI_DMA_BIDIRECTIONAL);
dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
- dma_unmap_len_set(out_meta, len, len);
+ dma_unmap_len_set(out_meta, len, firstlen);
/* Add buffer containing Tx command and MAC(!) header to TFD's
* first entry */
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- txcmd_phys, len, 1, 0);
+ txcmd_phys, firstlen, 1, 0);
if (!ieee80211_has_morefrags(hdr->frame_control)) {
txq->need_update = 1;
@@ -721,23 +728,21 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Set up TFD's 2nd entry to point directly to remainder of skb,
* if any (802.11 null frames have no payload). */
- secondlen = len = skb->len - hdr_len;
- if (len) {
+ secondlen = skb->len - hdr_len;
+ if (secondlen > 0) {
phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
- len, PCI_DMA_TODEVICE);
+ secondlen, PCI_DMA_TODEVICE);
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- phys_addr, len,
+ phys_addr, secondlen,
0, 0);
}
scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
offsetof(struct iwl_tx_cmd, scratch);
- len = sizeof(struct iwl_tx_cmd) +
- sizeof(struct iwl_cmd_header) + hdr_len;
/* take back ownership of DMA buffer to enable update */
pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
- len, PCI_DMA_BIDIRECTIONAL);
+ firstlen, PCI_DMA_BIDIRECTIONAL);
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
@@ -753,7 +758,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
le16_to_cpu(tx_cmd->len));
pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
- len, PCI_DMA_BIDIRECTIONAL);
+ firstlen, PCI_DMA_BIDIRECTIONAL);
trace_iwlwifi_dev_tx(priv,
&((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
@@ -773,8 +778,14 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
* whether or not we should update the write pointer.
*/
- /* avoid atomic ops if it isn't an associated client */
- if (sta_priv && sta_priv->client)
+ /*
+ * Avoid atomic ops if it isn't an associated client.
+ * Also, if this is a packet for aggregation, don't
+ * increase the counter because the ucode will stop
+ * aggregation queues when their respective station
+ * goes to sleep.
+ */
+ if (sta_priv && sta_priv->client && !is_agg)
atomic_inc(&sta_priv->pending_frames);
if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
@@ -784,7 +795,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
iwl_txq_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->lock, flags);
} else {
- iwl_stop_queue(priv, txq->swq_id);
+ iwl_stop_queue(priv, txq);
}
}
@@ -936,7 +947,7 @@ void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
*/
void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
{
- int ch;
+ int ch, txq_id;
unsigned long flags;
/* Turn off all Tx DMA fifos */
@@ -955,6 +966,16 @@ void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
}
spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (!priv->txq)
+ return;
+
+ /* Unmap DMA from host system and free skb's */
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+ if (txq_id == priv->cmd_queue)
+ iwl_cmd_queue_unmap(priv);
+ else
+ iwl_tx_queue_unmap(priv, txq_id);
}
/*
@@ -1013,7 +1034,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
tid_data = &priv->stations[sta_id].tid[tid];
*ssn = SEQ_TO_SN(tid_data->seq_number);
tid_data->agg.txq_id = txq_id;
- priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(get_ac_from_tid(tid), txq_id);
+ iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id);
spin_unlock_irqrestore(&priv->sta_lock, flags);
ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
@@ -1153,14 +1174,15 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
return 0;
}
-static void iwlagn_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info)
+static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ const u8 *addr1)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
struct ieee80211_sta *sta;
struct iwl_station_priv *sta_priv;
rcu_read_lock();
- sta = ieee80211_find_sta(tx_info->ctx->vif, hdr->addr1);
+ sta = ieee80211_find_sta(ctx->vif, addr1);
if (sta) {
sta_priv = (void *)sta->drv_priv;
/* avoid atomic ops if this isn't a client */
@@ -1169,6 +1191,15 @@ static void iwlagn_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info)
ieee80211_sta_block_awake(priv->hw, sta, false);
}
rcu_read_unlock();
+}
+
+static void iwlagn_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
+ bool is_agg)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
+
+ if (!is_agg)
+ iwlagn_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
}
@@ -1193,7 +1224,8 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
tx_info = &txq->txb[txq->q.read_ptr];
- iwlagn_tx_status(priv, tx_info);
+ iwlagn_tx_status(priv, tx_info,
+ txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
hdr = (struct ieee80211_hdr *)tx_info->skb->data;
if (hdr && ieee80211_is_data_qos(hdr->frame_control))
@@ -1222,7 +1254,6 @@ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
int i, sh, ack;
u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
- u64 bitmap, sent_bitmap;
int successes = 0;
struct ieee80211_tx_info *info;
@@ -1241,40 +1272,68 @@ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
if (sh < 0) /* tbw something is wrong with indices */
sh += 0x100;
- /* don't use 64-bit values for now */
- bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
-
if (agg->frame_count > (64 - sh)) {
IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
return -1;
}
+ if (!priv->cfg->base_params->no_agg_framecnt_info && ba_resp->txed) {
+ /*
+ * sent and ack information provided by uCode
+ * use it instead of figure out ourself
+ */
+ if (ba_resp->txed_2_done > ba_resp->txed) {
+ IWL_DEBUG_TX_REPLY(priv,
+ "bogus sent(%d) and ack(%d) count\n",
+ ba_resp->txed, ba_resp->txed_2_done);
+ /*
+ * set txed_2_done = txed,
+ * so it won't impact rate scale
+ */
+ ba_resp->txed = ba_resp->txed_2_done;
+ }
+ IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
+ ba_resp->txed, ba_resp->txed_2_done);
+ } else {
+ u64 bitmap, sent_bitmap;
+
+ /* don't use 64-bit values for now */
+ bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
+
+ /* check for success or failure according to the
+ * transmitted bitmap and block-ack bitmap */
+ sent_bitmap = bitmap & agg->bitmap;
+
+ /* For each frame attempted in aggregation,
+ * update driver's record of tx frame's status. */
+ i = 0;
+ while (sent_bitmap) {
+ ack = sent_bitmap & 1ULL;
+ successes += ack;
+ IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
+ ack ? "ACK" : "NACK", i,
+ (agg->start_idx + i) & 0xff,
+ agg->start_idx + i);
+ sent_bitmap >>= 1;
+ ++i;
+ }
- /* check for success or failure according to the
- * transmitted bitmap and block-ack bitmap */
- sent_bitmap = bitmap & agg->bitmap;
-
- /* For each frame attempted in aggregation,
- * update driver's record of tx frame's status. */
- i = 0;
- while (sent_bitmap) {
- ack = sent_bitmap & 1ULL;
- successes += ack;
- IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
- ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
- agg->start_idx + i);
- sent_bitmap >>= 1;
- ++i;
+ IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
+ (unsigned long long)bitmap);
}
info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
memset(&info->status, 0, sizeof(info->status));
info->flags |= IEEE80211_TX_STAT_ACK;
info->flags |= IEEE80211_TX_STAT_AMPDU;
- info->status.ampdu_ack_len = successes;
- info->status.ampdu_len = agg->frame_count;
- iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
+ if (!priv->cfg->base_params->no_agg_framecnt_info && ba_resp->txed) {
+ info->status.ampdu_ack_len = ba_resp->txed_2_done;
+ info->status.ampdu_len = ba_resp->txed;
- IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
+ } else {
+ info->status.ampdu_ack_len = successes;
+ info->status.ampdu_len = agg->frame_count;
+ }
+ iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
return 0;
}
@@ -1385,7 +1444,7 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
priv->mac80211_registered &&
(agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
- iwl_wake_queue(priv, txq->swq_id);
+ iwl_wake_queue(priv, txq);
iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index 703621107dac..d807e5e2b718 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -40,30 +40,36 @@
#include "iwl-agn.h"
#include "iwl-agn-calib.h"
-static const s8 iwlagn_default_queue_to_tx_fifo[] = {
- IWL_TX_FIFO_VO,
- IWL_TX_FIFO_VI,
- IWL_TX_FIFO_BE,
- IWL_TX_FIFO_BK,
- IWLAGN_CMD_FIFO_NUM,
- IWL_TX_FIFO_UNUSED,
- IWL_TX_FIFO_UNUSED,
- IWL_TX_FIFO_UNUSED,
- IWL_TX_FIFO_UNUSED,
- IWL_TX_FIFO_UNUSED,
+#define IWL_AC_UNSET -1
+
+struct queue_to_fifo_ac {
+ s8 fifo, ac;
+};
+
+static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
+ { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
+ { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
+ { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
+ { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
+ { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
};
-static const s8 iwlagn_ipan_queue_to_tx_fifo[] = {
- IWL_TX_FIFO_VO,
- IWL_TX_FIFO_VI,
- IWL_TX_FIFO_BE,
- IWL_TX_FIFO_BK,
- IWL_TX_FIFO_BK_IPAN,
- IWL_TX_FIFO_BE_IPAN,
- IWL_TX_FIFO_VI_IPAN,
- IWL_TX_FIFO_VO_IPAN,
- IWL_TX_FIFO_BE_IPAN,
- IWLAGN_CMD_FIFO_NUM,
+static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
+ { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
+ { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
+ { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
+ { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
+ { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
+ { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
+ { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
+ { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
+ { IWL_TX_FIFO_BE_IPAN, 2, },
+ { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
};
static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
@@ -302,14 +308,6 @@ void iwlagn_init_alive_start(struct iwl_priv *priv)
{
int ret = 0;
- /* Check alive response for "valid" sign from uCode */
- if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
- goto restart;
- }
-
/* initialize uCode was loaded... verify inst image.
* This is a paranoid check, because we would not have gotten the
* "initialize" alive if code weren't properly loaded. */
@@ -429,7 +427,7 @@ void iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
int iwlagn_alive_notify(struct iwl_priv *priv)
{
- const s8 *queues;
+ const struct queue_to_fifo_ac *queue_to_fifo;
u32 a;
unsigned long flags;
int i, chan;
@@ -492,9 +490,9 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
/* map queues to FIFOs */
if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
- queues = iwlagn_ipan_queue_to_tx_fifo;
+ queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
else
- queues = iwlagn_default_queue_to_tx_fifo;
+ queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
iwlagn_set_wr_ptrs(priv, priv->cmd_queue, 0);
@@ -510,18 +508,25 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);
for (i = 0; i < 10; i++) {
- int ac = queues[i];
+ int fifo = queue_to_fifo[i].fifo;
+ int ac = queue_to_fifo[i].ac;
iwl_txq_ctx_activate(priv, i);
- if (ac == IWL_TX_FIFO_UNUSED)
+ if (fifo == IWL_TX_FIFO_UNUSED)
continue;
- iwlagn_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
+ if (ac != IWL_AC_UNSET)
+ iwl_set_swq_id(&priv->txq[i], ac, i);
+ iwlagn_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
}
spin_unlock_irqrestore(&priv->lock, flags);
+ /* Enable L1-Active */
+ iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
iwlagn_send_wimax_coex(priv);
iwlagn_set_Xtal_calib(priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index c2636a7ab9ee..581dc9f10273 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -59,6 +59,7 @@
#include "iwl-sta.h"
#include "iwl-agn-calib.h"
#include "iwl-agn.h"
+#include "iwl-agn-led.h"
/******************************************************************************
@@ -85,175 +86,10 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL");
-MODULE_ALIAS("iwl4965");
static int iwlagn_ant_coupling;
static bool iwlagn_bt_ch_announce = 1;
-/**
- * iwlagn_commit_rxon - commit staging_rxon to hardware
- *
- * The RXON command in staging_rxon is committed to the hardware and
- * the active_rxon structure is updated with the new data. This
- * function correctly transitions out of the RXON_ASSOC_MSK state if
- * a HW tune is required based on the RXON structure changes.
- */
-int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- /* cast away the const for active_rxon in this function */
- struct iwl_rxon_cmd *active_rxon = (void *)&ctx->active;
- int ret;
- bool new_assoc =
- !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
- bool old_assoc = !!(ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK);
-
- if (!iwl_is_alive(priv))
- return -EBUSY;
-
- if (!ctx->is_active)
- return 0;
-
- /* always get timestamp with Rx frame */
- ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
-
- ret = iwl_check_rxon_cmd(priv, ctx);
- if (ret) {
- IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
- return -EINVAL;
- }
-
- /*
- * receive commit_rxon request
- * abort any previous channel switch if still in process
- */
- if (priv->switch_rxon.switch_in_progress &&
- (priv->switch_rxon.channel != ctx->staging.channel)) {
- IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
- le16_to_cpu(priv->switch_rxon.channel));
- iwl_chswitch_done(priv, false);
- }
-
- /* If we don't need to send a full RXON, we can use
- * iwl_rxon_assoc_cmd which is used to reconfigure filter
- * and other flags for the current radio configuration. */
- if (!iwl_full_rxon_required(priv, ctx)) {
- ret = iwl_send_rxon_assoc(priv, ctx);
- if (ret) {
- IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
- return ret;
- }
-
- memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
- iwl_print_rx_config_cmd(priv, ctx);
- return 0;
- }
-
- /* If we are currently associated and the new config requires
- * an RXON_ASSOC and the new config wants the associated mask enabled,
- * we must clear the associated from the active configuration
- * before we apply the new config */
- if (iwl_is_associated_ctx(ctx) && new_assoc) {
- IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
- active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-
- ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
- sizeof(struct iwl_rxon_cmd),
- active_rxon);
-
- /* If the mask clearing failed then we set
- * active_rxon back to what it was previously */
- if (ret) {
- active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
- IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
- return ret;
- }
- iwl_clear_ucode_stations(priv, ctx);
- iwl_restore_stations(priv, ctx);
- ret = iwl_restore_default_wep_keys(priv, ctx);
- if (ret) {
- IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
- return ret;
- }
- }
-
- IWL_DEBUG_INFO(priv, "Sending RXON\n"
- "* with%s RXON_FILTER_ASSOC_MSK\n"
- "* channel = %d\n"
- "* bssid = %pM\n",
- (new_assoc ? "" : "out"),
- le16_to_cpu(ctx->staging.channel),
- ctx->staging.bssid_addr);
-
- iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto);
-
- if (!old_assoc) {
- /*
- * First of all, before setting associated, we need to
- * send RXON timing so the device knows about the DTIM
- * period and other timing values
- */
- ret = iwl_send_rxon_timing(priv, ctx);
- if (ret) {
- IWL_ERR(priv, "Error setting RXON timing!\n");
- return ret;
- }
- }
-
- if (priv->cfg->ops->hcmd->set_pan_params) {
- ret = priv->cfg->ops->hcmd->set_pan_params(priv);
- if (ret)
- return ret;
- }
-
- /* Apply the new configuration
- * RXON unassoc clears the station table in uCode so restoration of
- * stations is needed after it (the RXON command) completes
- */
- if (!new_assoc) {
- ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
- sizeof(struct iwl_rxon_cmd), &ctx->staging);
- if (ret) {
- IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
- return ret;
- }
- IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
- memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
- iwl_clear_ucode_stations(priv, ctx);
- iwl_restore_stations(priv, ctx);
- ret = iwl_restore_default_wep_keys(priv, ctx);
- if (ret) {
- IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
- return ret;
- }
- }
- if (new_assoc) {
- priv->start_calib = 0;
- /* Apply the new configuration
- * RXON assoc doesn't clear the station table in uCode,
- */
- ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
- sizeof(struct iwl_rxon_cmd), &ctx->staging);
- if (ret) {
- IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
- return ret;
- }
- memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
- }
- iwl_print_rx_config_cmd(priv, ctx);
-
- iwl_init_sensitivity(priv);
-
- /* If we issue a new RXON command which required a tune then we must
- * send a new TXPOWER command or we won't be able to Tx any frames */
- ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
- if (ret) {
- IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
- return ret;
- }
-
- return 0;
-}
-
void iwl_update_chain_flags(struct iwl_priv *priv)
{
struct iwl_rxon_context *ctx;
@@ -261,7 +97,8 @@ void iwl_update_chain_flags(struct iwl_priv *priv)
if (priv->cfg->ops->hcmd->set_rxon_chain) {
for_each_context(priv, ctx) {
priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- iwlcore_commit_rxon(priv, ctx);
+ if (ctx->active.rx_chain != ctx->staging.rx_chain)
+ iwlcore_commit_rxon(priv, ctx);
}
}
}
@@ -411,7 +248,8 @@ static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
return sizeof(*tx_beacon_cmd) + frame_size;
}
-static int iwl_send_beacon_cmd(struct iwl_priv *priv)
+
+int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
{
struct iwl_frame *frame;
unsigned int frame_size;
@@ -586,47 +424,6 @@ int iwl_hw_tx_queue_init(struct iwl_priv *priv,
return 0;
}
-/******************************************************************************
- *
- * Generic RX handler implementations
- *
- ******************************************************************************/
-static void iwl_rx_reply_alive(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_alive_resp *palive;
- struct delayed_work *pwork;
-
- palive = &pkt->u.alive_frame;
-
- IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
- "0x%01X 0x%01X\n",
- palive->is_valid, palive->ver_type,
- palive->ver_subtype);
-
- if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
- IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
- memcpy(&priv->card_alive_init,
- &pkt->u.alive_frame,
- sizeof(struct iwl_init_alive_resp));
- pwork = &priv->init_alive_start;
- } else {
- IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
- memcpy(&priv->card_alive, &pkt->u.alive_frame,
- sizeof(struct iwl_alive_resp));
- pwork = &priv->alive_start;
- }
-
- /* We delay the ALIVE response by 5ms to
- * give the HW RF Kill time to activate... */
- if (palive->is_valid == UCODE_VALID_OK)
- queue_delayed_work(priv->workqueue, pwork,
- msecs_to_jiffies(5));
- else
- IWL_WARN(priv, "uCode did not respond OK.\n");
-}
-
static void iwl_bg_beacon_update(struct work_struct *work)
{
struct iwl_priv *priv =
@@ -661,7 +458,7 @@ static void iwl_bg_beacon_update(struct work_struct *work)
priv->beacon_skb = beacon;
- iwl_send_beacon_cmd(priv);
+ iwlagn_send_beacon_cmd(priv);
out:
mutex_unlock(&priv->mutex);
}
@@ -861,83 +658,6 @@ static void iwl_bg_ucode_trace(unsigned long data)
}
}
-static void iwl_rx_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl4965_beacon_notif *beacon =
- (struct iwl4965_beacon_notif *)pkt->u.raw;
-#ifdef CONFIG_IWLWIFI_DEBUG
- u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
-
- IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
- "tsf %d %d rate %d\n",
- le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
- beacon->beacon_notify_hdr.failure_frame,
- le32_to_cpu(beacon->ibss_mgr_status),
- le32_to_cpu(beacon->high_tsf),
- le32_to_cpu(beacon->low_tsf), rate);
-#endif
-
- priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-
- if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
- queue_work(priv->workqueue, &priv->beacon_update);
-}
-
-/* Handle notification from uCode that card's power state is changing
- * due to software, hardware, or critical temperature RFKILL */
-static void iwl_rx_card_state_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
- unsigned long status = priv->status;
-
- IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
- (flags & HW_CARD_DISABLED) ? "Kill" : "On",
- (flags & SW_CARD_DISABLED) ? "Kill" : "On",
- (flags & CT_CARD_DISABLED) ?
- "Reached" : "Not reached");
-
- if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
- CT_CARD_DISABLED)) {
-
- iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- iwl_write_direct32(priv, HBUS_TARG_MBX_C,
- HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
-
- if (!(flags & RXON_CARD_DISABLED)) {
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
- iwl_write_direct32(priv, HBUS_TARG_MBX_C,
- HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
- }
- if (flags & CT_CARD_DISABLED)
- iwl_tt_enter_ct_kill(priv);
- }
- if (!(flags & CT_CARD_DISABLED))
- iwl_tt_exit_ct_kill(priv);
-
- if (flags & HW_CARD_DISABLED)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
-
- if (!(flags & RXON_CARD_DISABLED))
- iwl_scan_cancel(priv);
-
- if ((test_bit(STATUS_RF_KILL_HW, &status) !=
- test_bit(STATUS_RF_KILL_HW, &priv->status)))
- wiphy_rfkill_set_hw_state(priv->hw->wiphy,
- test_bit(STATUS_RF_KILL_HW, &priv->status));
- else
- wake_up_interruptible(&priv->wait_command_queue);
-}
-
static void iwl_bg_tx_flush(struct work_struct *work)
{
struct iwl_priv *priv =
@@ -957,58 +677,13 @@ static void iwl_bg_tx_flush(struct work_struct *work)
}
/**
- * iwl_setup_rx_handlers - Initialize Rx handler callbacks
- *
- * Setup the RX handlers for each of the reply types sent from the uCode
- * to the host.
- *
- * This function chains into the hardware specific files for them to setup
- * any hardware specific handlers as well.
- */
-static void iwl_setup_rx_handlers(struct iwl_priv *priv)
-{
- priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
- priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
- priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
- priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
- iwl_rx_spectrum_measure_notif;
- priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
- priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
- iwl_rx_pm_debug_statistics_notif;
- priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
-
- /*
- * The same handler is used for both the REPLY to a discrete
- * statistics request from the host as well as for the periodic
- * statistics notifications (after received beacons) from the uCode.
- */
- priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_reply_statistics;
- priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
-
- iwl_setup_rx_scan_handlers(priv);
-
- /* status change handler */
- priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
-
- priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
- iwl_rx_missed_beacon_notif;
- /* Rx handlers */
- priv->rx_handlers[REPLY_RX_PHY_CMD] = iwlagn_rx_reply_rx_phy;
- priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwlagn_rx_reply_rx;
- /* block ack */
- priv->rx_handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
- /* Set up hardware specific Rx handlers */
- priv->cfg->ops->lib->rx_handler_setup(priv);
-}
-
-/**
* iwl_rx_handle - Main entry function for receiving responses from uCode
*
* Uses the priv->rx_handlers callback function array to invoke
* the appropriate handlers, including command responses,
* frame-received notifications, and other notifications.
*/
-void iwl_rx_handle(struct iwl_priv *priv)
+static void iwl_rx_handle(struct iwl_priv *priv)
{
struct iwl_rx_mem_buffer *rxb;
struct iwl_rx_packet *pkt;
@@ -1072,6 +747,27 @@ void iwl_rx_handle(struct iwl_priv *priv)
(pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
(pkt->hdr.cmd != REPLY_TX);
+ /*
+ * Do the notification wait before RX handlers so
+ * even if the RX handler consumes the RXB we have
+ * access to it in the notification wait entry.
+ */
+ if (!list_empty(&priv->_agn.notif_waits)) {
+ struct iwl_notification_wait *w;
+
+ spin_lock(&priv->_agn.notif_wait_lock);
+ list_for_each_entry(w, &priv->_agn.notif_waits, list) {
+ if (w->cmd == pkt->hdr.cmd) {
+ w->triggered = true;
+ if (w->fn)
+ w->fn(priv, pkt);
+ }
+ }
+ spin_unlock(&priv->_agn.notif_wait_lock);
+
+ wake_up_all(&priv->_agn.notif_waitq);
+ }
+
/* Based on type of command response or notification,
* handle those that need handling via function in
* rx_handlers table. See iwl_setup_rx_handlers() */
@@ -1316,9 +1012,12 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
}
/* Re-enable all interrupts */
- /* only Re-enable if diabled by irq */
+ /* only Re-enable if disabled by irq */
if (test_bit(STATUS_INT_ENABLED, &priv->status))
iwl_enable_interrupts(priv);
+ /* Re-enable RF_KILL if it occurred */
+ else if (handled & CSR_INT_BIT_RF_KILL)
+ iwl_enable_rfkill_int(priv);
#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
@@ -1530,71 +1229,14 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
}
/* Re-enable all interrupts */
- /* only Re-enable if diabled by irq */
+ /* only Re-enable if disabled by irq */
if (test_bit(STATUS_INT_ENABLED, &priv->status))
iwl_enable_interrupts(priv);
+ /* Re-enable RF_KILL if it occurred */
+ else if (handled & CSR_INT_BIT_RF_KILL)
+ iwl_enable_rfkill_int(priv);
}
-/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
-#define ACK_CNT_RATIO (50)
-#define BA_TIMEOUT_CNT (5)
-#define BA_TIMEOUT_MAX (16)
-
-/**
- * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
- *
- * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding
- * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
- * operation state.
- */
-bool iwl_good_ack_health(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt)
-{
- bool rc = true;
- int actual_ack_cnt_delta, expected_ack_cnt_delta;
- int ba_timeout_delta;
-
- actual_ack_cnt_delta =
- le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) -
- le32_to_cpu(priv->_agn.statistics.tx.actual_ack_cnt);
- expected_ack_cnt_delta =
- le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) -
- le32_to_cpu(priv->_agn.statistics.tx.expected_ack_cnt);
- ba_timeout_delta =
- le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) -
- le32_to_cpu(priv->_agn.statistics.tx.agg.ba_timeout);
- if ((priv->_agn.agg_tids_count > 0) &&
- (expected_ack_cnt_delta > 0) &&
- (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta)
- < ACK_CNT_RATIO) &&
- (ba_timeout_delta > BA_TIMEOUT_CNT)) {
- IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d,"
- " expected_ack_cnt = %d\n",
- actual_ack_cnt_delta, expected_ack_cnt_delta);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- /*
- * This is ifdef'ed on DEBUGFS because otherwise the
- * statistics aren't available. If DEBUGFS is set but
- * DEBUG is not, these will just compile out.
- */
- IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n",
- priv->_agn.delta_statistics.tx.rx_detected_cnt);
- IWL_DEBUG_RADIO(priv,
- "ack_or_ba_timeout_collision delta = %d\n",
- priv->_agn.delta_statistics.tx.
- ack_or_ba_timeout_collision);
-#endif
- IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n",
- ba_timeout_delta);
- if (!actual_ack_cnt_delta &&
- (ba_timeout_delta >= BA_TIMEOUT_MAX))
- rc = false;
- }
- return rc;
-}
-
-
/*****************************************************************************
*
* sysfs attributes
@@ -2664,7 +2306,7 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
return pos;
}
- /* enable/disable bt channel announcement */
+ /* enable/disable bt channel inhibition */
priv->bt_ch_announce = iwlagn_bt_ch_announce;
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -2788,13 +2430,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
- if (priv->card_alive.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Alive failed.\n");
- goto restart;
- }
-
/* Initialize uCode has loaded Runtime uCode ... verify inst image.
* This is a paranoid check, because we would not have gotten the
* "runtime" alive if code weren't properly loaded. */
@@ -2816,13 +2451,8 @@ static void iwl_alive_start(struct iwl_priv *priv)
/* After the ALIVE response, we can send host commands to the uCode */
set_bit(STATUS_ALIVE, &priv->status);
- if (priv->cfg->ops->lib->recover_from_tx_stall) {
- /* Enable timer to monitor the driver queues */
- mod_timer(&priv->monitor_recover,
- jiffies +
- msecs_to_jiffies(
- priv->cfg->base_params->monitor_recover_period));
- }
+ /* Enable watchdog to monitor the driver tx queues */
+ iwl_setup_watchdog(priv);
if (iwl_is_rfkill(priv))
return;
@@ -2871,24 +2501,25 @@ static void iwl_alive_start(struct iwl_priv *priv)
priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
}
- if (priv->cfg->bt_params &&
- !priv->cfg->bt_params->advanced_bt_coexist) {
- /* Configure Bluetooth device coexistence support */
+ if (!priv->cfg->bt_params || (priv->cfg->bt_params &&
+ !priv->cfg->bt_params->advanced_bt_coexist)) {
+ /*
+ * default is 2-wire BT coexexistence support
+ */
priv->cfg->ops->hcmd->send_bt_config(priv);
}
iwl_reset_run_time_calib(priv);
+ set_bit(STATUS_READY, &priv->status);
+
/* Configure the adapter for unassociated operation */
iwlcore_commit_rxon(priv, ctx);
/* At this point, the NIC is initialized and operational */
iwl_rf_kill_ct_config(priv);
- iwl_leds_init(priv);
-
IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
- set_bit(STATUS_READY, &priv->status);
wake_up_interruptible(&priv->wait_command_queue);
iwl_power_update_mode(priv, true);
@@ -2916,8 +2547,7 @@ static void __iwl_down(struct iwl_priv *priv)
/* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
* to prevent rearm timer */
- if (priv->cfg->ops->lib->recover_from_tx_stall)
- del_timer_sync(&priv->monitor_recover);
+ del_timer_sync(&priv->watchdog);
iwl_clear_ucode_stations(priv, NULL);
iwl_dealloc_bcast_stations(priv);
@@ -2930,7 +2560,6 @@ static void __iwl_down(struct iwl_priv *priv)
priv->cfg->bt_params->bt_init_traffic_load;
else
priv->bt_traffic_load = 0;
- priv->bt_sco_active = false;
priv->bt_full_concurrent = false;
priv->bt_ci_compliance = 0;
@@ -2978,7 +2607,8 @@ static void __iwl_down(struct iwl_priv *priv)
STATUS_EXIT_PENDING;
/* device going down, Stop using ICT table */
- iwl_disable_ict(priv);
+ if (priv->cfg->ops->lib->isr_ops.disable)
+ priv->cfg->ops->lib->isr_ops.disable(priv);
iwlagn_txq_ctx_stop(priv);
iwlagn_rxq_stop(priv);
@@ -3201,7 +2831,8 @@ static void iwl_bg_alive_start(struct work_struct *data)
return;
/* enable dram interrupt */
- iwl_reset_ict(priv);
+ if (priv->cfg->ops->lib->isr_ops.reset)
+ priv->cfg->ops->lib->isr_ops.reset(priv);
mutex_lock(&priv->mutex);
iwl_alive_start(priv);
@@ -3222,8 +2853,7 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
}
if (priv->start_calib) {
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
iwl_chain_noise_calibration(priv,
(void *)&priv->_agn.statistics_bt);
iwl_sensitivity_calibration(priv,
@@ -3248,7 +2878,7 @@ static void iwl_bg_restart(struct work_struct *data)
if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
struct iwl_rxon_context *ctx;
- bool bt_sco, bt_full_concurrent;
+ bool bt_full_concurrent;
u8 bt_ci_compliance;
u8 bt_load;
u8 bt_status;
@@ -3267,7 +2897,6 @@ static void iwl_bg_restart(struct work_struct *data)
* re-configure the hw when we reconfigure the BT
* command.
*/
- bt_sco = priv->bt_sco_active;
bt_full_concurrent = priv->bt_full_concurrent;
bt_ci_compliance = priv->bt_ci_compliance;
bt_load = priv->bt_traffic_load;
@@ -3275,7 +2904,6 @@ static void iwl_bg_restart(struct work_struct *data)
__iwl_down(priv);
- priv->bt_sco_active = bt_sco;
priv->bt_full_concurrent = bt_full_concurrent;
priv->bt_ci_compliance = bt_ci_compliance;
priv->bt_traffic_load = bt_load;
@@ -3309,90 +2937,89 @@ static void iwl_bg_rx_replenish(struct work_struct *data)
mutex_unlock(&priv->mutex);
}
-#define IWL_DELAY_NEXT_SCAN (HZ*2)
-
-void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
+static int iwl_mac_offchannel_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int wait)
{
- struct iwl_rxon_context *ctx;
- struct ieee80211_conf *conf = NULL;
- int ret = 0;
-
- if (!vif || !priv->is_open)
- return;
-
- ctx = iwl_rxon_ctx_from_vif(vif);
+ struct iwl_priv *priv = hw->priv;
+ int ret;
- if (vif->type == NL80211_IFTYPE_AP) {
- IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
- return;
+ /* Not supported if we don't have PAN */
+ if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN))) {
+ ret = -EOPNOTSUPP;
+ goto free;
}
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- iwl_scan_cancel_timeout(priv, 200);
-
- conf = ieee80211_get_hw_conf(priv->hw);
-
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwlcore_commit_rxon(priv, ctx);
-
- ret = iwl_send_rxon_timing(priv, ctx);
- if (ret)
- IWL_WARN(priv, "RXON timing - "
- "Attempting to continue.\n");
+ /* Not supported on pre-P2P firmware */
+ if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes &
+ BIT(NL80211_IFTYPE_P2P_CLIENT))) {
+ ret = -EOPNOTSUPP;
+ goto free;
+ }
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ mutex_lock(&priv->mutex);
- iwl_set_rxon_ht(priv, &priv->current_ht_config);
+ if (!priv->contexts[IWL_RXON_CTX_PAN].is_active) {
+ /*
+ * If the PAN context is free, use the normal
+ * way of doing remain-on-channel offload + TX.
+ */
+ ret = 1;
+ goto out;
+ }
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+ /* TODO: queue up if scanning? */
+ if (test_bit(STATUS_SCANNING, &priv->status) ||
+ priv->_agn.offchan_tx_skb) {
+ ret = -EBUSY;
+ goto out;
+ }
- ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
+ /*
+ * max_scan_ie_len doesn't include the blank SSID or the header,
+ * so need to add that again here.
+ */
+ if (skb->len > hw->wiphy->max_scan_ie_len + 24 + 2) {
+ ret = -ENOBUFS;
+ goto out;
+ }
- IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
- vif->bss_conf.aid, vif->bss_conf.beacon_int);
+ priv->_agn.offchan_tx_skb = skb;
+ priv->_agn.offchan_tx_timeout = wait;
+ priv->_agn.offchan_tx_chan = chan;
- if (vif->bss_conf.use_short_preamble)
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+ ret = iwl_scan_initiate(priv, priv->contexts[IWL_RXON_CTX_PAN].vif,
+ IWL_SCAN_OFFCH_TX, chan->band);
+ if (ret)
+ priv->_agn.offchan_tx_skb = NULL;
+ out:
+ mutex_unlock(&priv->mutex);
+ free:
+ if (ret < 0)
+ kfree_skb(skb);
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
- if (vif->bss_conf.use_short_slot)
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
- }
+ return ret;
+}
- iwlcore_commit_rxon(priv, ctx);
+static int iwl_mac_offchannel_tx_cancel_wait(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+ int ret;
- IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
- vif->bss_conf.aid, ctx->active.bssid_addr);
+ mutex_lock(&priv->mutex);
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- break;
- case NL80211_IFTYPE_ADHOC:
- iwl_send_beacon_cmd(priv);
- break;
- default:
- IWL_ERR(priv, "%s Should not be called in %d mode\n",
- __func__, vif->type);
- break;
- }
+ if (!priv->_agn.offchan_tx_skb)
+ return -EINVAL;
- /* the chain noise calibration will enabled PM upon completion
- * If chain noise has already been run, then we need to enable
- * power management here */
- if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
- iwl_power_update_mode(priv, false);
+ priv->_agn.offchan_tx_skb = NULL;
- /* Enable Rx differential gain and sensitivity calibrations */
- iwl_chain_noise_reset(priv);
- priv->start_calib = 1;
+ ret = iwl_scan_cancel_timeout(priv, 200);
+ if (ret)
+ ret = -EIO;
+ mutex_unlock(&priv->mutex);
+ return ret;
}
/*****************************************************************************
@@ -3420,7 +3047,10 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_NEED_DTIM_PERIOD |
- IEEE80211_HW_SPECTRUM_MGMT;
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+
+ hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
if (!priv->cfg->base_params->broken_powersave)
hw->flags |= IEEE80211_HW_SUPPORTS_PS |
@@ -3438,8 +3068,11 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
}
+ hw->wiphy->max_remain_on_channel_duration = 1000;
+
hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_DISABLE_BEACON_HINTS;
+ WIPHY_FLAG_DISABLE_BEACON_HINTS |
+ WIPHY_FLAG_IBSS_RSN;
/*
* For now, disable PS by default because it affects
@@ -3463,6 +3096,8 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&priv->bands[IEEE80211_BAND_5GHZ];
+ iwl_leds_init(priv);
+
ret = ieee80211_register_hw(priv->hw);
if (ret) {
IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
@@ -3474,7 +3109,7 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
}
-static int iwl_mac_start(struct ieee80211_hw *hw)
+int iwlagn_mac_start(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = hw->priv;
int ret;
@@ -3507,7 +3142,7 @@ static int iwl_mac_start(struct ieee80211_hw *hw)
}
}
- iwl_led_start(priv);
+ iwlagn_led_enable(priv);
out:
priv->is_open = 1;
@@ -3515,7 +3150,7 @@ out:
return 0;
}
-static void iwl_mac_stop(struct ieee80211_hw *hw)
+void iwlagn_mac_stop(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = hw->priv;
@@ -3530,14 +3165,15 @@ static void iwl_mac_stop(struct ieee80211_hw *hw)
flush_workqueue(priv->workqueue);
- /* enable interrupts again in order to receive rfkill changes */
+ /* User space software may expect getting rfkill changes
+ * even if interface is down */
iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- iwl_enable_interrupts(priv);
+ iwl_enable_rfkill_int(priv);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
-static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct iwl_priv *priv = hw->priv;
@@ -3550,76 +3186,14 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
dev_kfree_skb_any(skb);
IWL_DEBUG_MACDUMP(priv, "leave\n");
- return NETDEV_TX_OK;
}
-void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
-{
- struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
- int ret = 0;
-
- lockdep_assert_held(&priv->mutex);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- /* The following should be done only at AP bring up */
- if (!iwl_is_associated_ctx(ctx)) {
-
- /* RXON - unassoc (to set timing command) */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwlcore_commit_rxon(priv, ctx);
-
- /* RXON Timing */
- ret = iwl_send_rxon_timing(priv, ctx);
- if (ret)
- IWL_WARN(priv, "RXON timing failed - "
- "Attempting to continue.\n");
-
- /* AP has all antennas */
- priv->chain_noise_data.active_chains =
- priv->hw_params.valid_rx_ant;
- iwl_set_rxon_ht(priv, &priv->current_ht_config);
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
- ctx->staging.assoc_id = 0;
-
- if (vif->bss_conf.use_short_preamble)
- ctx->staging.flags |=
- RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &=
- ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
- if (vif->bss_conf.use_short_slot)
- ctx->staging.flags |=
- RXON_FLG_SHORT_SLOT_MSK;
- else
- ctx->staging.flags &=
- ~RXON_FLG_SHORT_SLOT_MSK;
- }
- /* need to send beacon cmd before committing assoc RXON! */
- iwl_send_beacon_cmd(priv);
- /* restore RXON assoc */
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- iwlcore_commit_rxon(priv, ctx);
- }
- iwl_send_beacon_cmd(priv);
-
- /* FIXME - we need to add code here to detect a totally new
- * configuration, reset the AP, unassoc, rxon timing, assoc,
- * clear sta table, add BCAST sta... */
-}
-
-static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta,
- u32 iv32, u16 *phase1key)
+void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key)
{
-
struct iwl_priv *priv = hw->priv;
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -3631,10 +3205,9 @@ static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "leave\n");
}
-static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
+int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
{
struct iwl_priv *priv = hw->priv;
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -3650,6 +3223,14 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
+ /*
+ * To support IBSS RSN, don't program group keys in IBSS, the
+ * hardware will then not attempt to decrypt the frames.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
sta_id = iwl_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
if (sta_id == IWL_INVALID_STATION)
return -EINVAL;
@@ -3701,13 +3282,15 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return ret;
}
-static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
{
struct iwl_priv *priv = hw->priv;
int ret = -EINVAL;
+ struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
sta->addr, tid);
@@ -3762,11 +3345,28 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
}
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
+ /*
+ * If the limit is 0, then it wasn't initialised yet,
+ * use the default. We can do that since we take the
+ * minimum below, and we don't want to go above our
+ * default due to hardware restrictions.
+ */
+ if (sta_priv->max_agg_bufsize == 0)
+ sta_priv->max_agg_bufsize =
+ LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+
+ /*
+ * Even though in theory the peer could have different
+ * aggregation reorder buffer sizes for different sessions,
+ * our ucode doesn't allow for that and has a global limit
+ * for each station. Therefore, use the minimum of all the
+ * aggregation sessions and our default value.
+ */
+ sta_priv->max_agg_bufsize =
+ min(sta_priv->max_agg_bufsize, buf_size);
+
if (priv->cfg->ht_params &&
priv->cfg->ht_params->use_rts_for_aggregation) {
- struct iwl_station_priv *sta_priv =
- (void *) sta->drv_priv;
-
/*
* switch to RTS/CTS if it is the prefer protection
* method for HT traffic
@@ -3774,9 +3374,13 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
sta_priv->lq_sta.lq.general_params.flags |=
LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
- iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
- &sta_priv->lq_sta.lq, CMD_ASYNC, false);
}
+
+ sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
+ sta_priv->max_agg_bufsize;
+
+ iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
+ &sta_priv->lq_sta.lq, CMD_ASYNC, false);
ret = 0;
break;
}
@@ -3785,39 +3389,9 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
return ret;
}
-static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- int sta_id;
-
- switch (cmd) {
- case STA_NOTIFY_SLEEP:
- WARN_ON(!sta_priv->client);
- sta_priv->asleep = true;
- if (atomic_read(&sta_priv->pending_frames) > 0)
- ieee80211_sta_block_awake(hw, sta, true);
- break;
- case STA_NOTIFY_AWAKE:
- WARN_ON(!sta_priv->client);
- if (!sta_priv->asleep)
- break;
- sta_priv->asleep = false;
- sta_id = iwl_sta_id(sta);
- if (sta_id != IWL_INVALID_STATION)
- iwl_sta_modify_ps_wake(priv, sta_id);
- break;
- default:
- break;
- }
-}
-
-static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct iwl_priv *priv = hw->priv;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
@@ -3858,8 +3432,8 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
return 0;
}
-static void iwl_mac_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_channel_switch *ch_switch)
+void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch)
{
struct iwl_priv *priv = hw->priv;
const struct iwl_channel_info *ch_info;
@@ -3956,10 +3530,10 @@ out_exit:
IWL_DEBUG_MAC80211(priv, "leave\n");
}
-static void iwlagn_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast)
+void iwlagn_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
{
struct iwl_priv *priv = hw->priv;
__le32 filter_or = 0, filter_nand = 0;
@@ -3976,7 +3550,8 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
changed_flags, *total_flags);
CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
- CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
+ /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
+ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
#undef CHK
@@ -3986,7 +3561,11 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
for_each_context(priv, ctx) {
ctx->staging.filter_flags &= ~filter_nand;
ctx->staging.filter_flags |= filter_or;
- iwlcore_commit_rxon(priv, ctx);
+
+ /*
+ * Not committing directly because hardware can perform a scan,
+ * but we'll eventually commit the filter flags change anyway.
+ */
}
mutex_unlock(&priv->mutex);
@@ -4001,7 +3580,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
}
-static void iwl_mac_flush(struct ieee80211_hw *hw, bool drop)
+void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
{
struct iwl_priv *priv = hw->priv;
@@ -4039,6 +3618,95 @@ done:
IWL_DEBUG_MAC80211(priv, "leave\n");
}
+static void iwlagn_disable_roc(struct iwl_priv *priv)
+{
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
+ struct ieee80211_channel *chan = ACCESS_ONCE(priv->hw->conf.channel);
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (!ctx->is_active)
+ return;
+
+ ctx->staging.dev_type = RXON_DEV_TYPE_2STA;
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ iwl_set_rxon_channel(priv, chan, ctx);
+ iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
+
+ priv->_agn.hw_roc_channel = NULL;
+
+ iwlcore_commit_rxon(priv, ctx);
+
+ ctx->is_active = false;
+}
+
+static void iwlagn_bg_roc_done(struct work_struct *work)
+{
+ struct iwl_priv *priv = container_of(work, struct iwl_priv,
+ _agn.hw_roc_work.work);
+
+ mutex_lock(&priv->mutex);
+ ieee80211_remain_on_channel_expired(priv->hw);
+ iwlagn_disable_roc(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_channel *channel,
+ enum nl80211_channel_type channel_type,
+ int duration)
+{
+ struct iwl_priv *priv = hw->priv;
+ int err = 0;
+
+ if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+ return -EOPNOTSUPP;
+
+ if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes &
+ BIT(NL80211_IFTYPE_P2P_CLIENT)))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->mutex);
+
+ if (priv->contexts[IWL_RXON_CTX_PAN].is_active ||
+ test_bit(STATUS_SCAN_HW, &priv->status)) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ priv->contexts[IWL_RXON_CTX_PAN].is_active = true;
+ priv->_agn.hw_roc_channel = channel;
+ priv->_agn.hw_roc_chantype = channel_type;
+ priv->_agn.hw_roc_duration = DIV_ROUND_UP(duration * 1000, 1024);
+ iwlcore_commit_rxon(priv, &priv->contexts[IWL_RXON_CTX_PAN]);
+ queue_delayed_work(priv->workqueue, &priv->_agn.hw_roc_work,
+ msecs_to_jiffies(duration + 20));
+
+ msleep(IWL_MIN_SLOT_TIME); /* TU is almost ms */
+ ieee80211_ready_on_channel(priv->hw);
+
+ out:
+ mutex_unlock(&priv->mutex);
+
+ return err;
+}
+
+static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+ return -EOPNOTSUPP;
+
+ cancel_delayed_work_sync(&priv->_agn.hw_roc_work);
+
+ mutex_lock(&priv->mutex);
+ iwlagn_disable_roc(priv);
+ mutex_unlock(&priv->mutex);
+
+ return 0;
+}
+
/*****************************************************************************
*
* driver setup and teardown
@@ -4060,6 +3728,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start);
INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start);
+ INIT_DELAYED_WORK(&priv->_agn.hw_roc_work, iwlagn_bg_roc_done);
iwl_setup_scan_deferred_work(priv);
@@ -4074,12 +3743,9 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
priv->ucode_trace.data = (unsigned long)priv;
priv->ucode_trace.function = iwl_bg_ucode_trace;
- if (priv->cfg->ops->lib->recover_from_tx_stall) {
- init_timer(&priv->monitor_recover);
- priv->monitor_recover.data = (unsigned long)priv;
- priv->monitor_recover.function =
- priv->cfg->ops->lib->recover_from_tx_stall;
- }
+ init_timer(&priv->watchdog);
+ priv->watchdog.data = (unsigned long)priv;
+ priv->watchdog.function = iwl_bg_watchdog;
if (!priv->cfg->base_params->use_isr_legacy)
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
@@ -4156,6 +3822,8 @@ static int iwl_init_drv(struct iwl_priv *priv)
priv->force_reset[IWL_FW_RESET].reset_duration =
IWL_DELAY_NEXT_FORCE_FW_RELOAD;
+ priv->rx_statistics_jiffies = jiffies;
+
/* Choose which receivers/antennas to use */
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv,
@@ -4172,13 +3840,13 @@ static int iwl_init_drv(struct iwl_priv *priv)
priv->bt_on_thresh = BT_ON_THRESHOLD_DEF;
priv->bt_duration = BT_DURATION_LIMIT_DEF;
priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF;
- priv->dynamic_agg_thresh = BT_AGG_THRESHOLD_DEF;
}
/* Set the tx_power_user_lmt to the lowest power level
* this value will get overwritten by channel max power avg
* from eeprom */
priv->tx_power_user_lmt = IWLAGN_TX_POWER_TARGET_POWER_MIN;
+ priv->tx_power_next = IWLAGN_TX_POWER_TARGET_POWER_MIN;
ret = iwl_init_channel_map(priv);
if (ret) {
@@ -4209,34 +3877,38 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
kfree(priv->scan_cmd);
}
-static struct ieee80211_ops iwl_hw_ops = {
- .tx = iwl_mac_tx,
- .start = iwl_mac_start,
- .stop = iwl_mac_stop,
+struct ieee80211_ops iwlagn_hw_ops = {
+ .tx = iwlagn_mac_tx,
+ .start = iwlagn_mac_start,
+ .stop = iwlagn_mac_stop,
.add_interface = iwl_mac_add_interface,
.remove_interface = iwl_mac_remove_interface,
- .config = iwl_mac_config,
+ .change_interface = iwl_mac_change_interface,
+ .config = iwlagn_mac_config,
.configure_filter = iwlagn_configure_filter,
- .set_key = iwl_mac_set_key,
- .update_tkip_key = iwl_mac_update_tkip_key,
+ .set_key = iwlagn_mac_set_key,
+ .update_tkip_key = iwlagn_mac_update_tkip_key,
.conf_tx = iwl_mac_conf_tx,
- .reset_tsf = iwl_mac_reset_tsf,
- .bss_info_changed = iwl_bss_info_changed,
- .ampdu_action = iwl_mac_ampdu_action,
+ .bss_info_changed = iwlagn_bss_info_changed,
+ .ampdu_action = iwlagn_mac_ampdu_action,
.hw_scan = iwl_mac_hw_scan,
- .sta_notify = iwl_mac_sta_notify,
+ .sta_notify = iwlagn_mac_sta_notify,
.sta_add = iwlagn_mac_sta_add,
.sta_remove = iwl_mac_sta_remove,
- .channel_switch = iwl_mac_channel_switch,
- .flush = iwl_mac_flush,
+ .channel_switch = iwlagn_mac_channel_switch,
+ .flush = iwlagn_mac_flush,
.tx_last_beacon = iwl_mac_tx_last_beacon,
+ .remain_on_channel = iwl_mac_remain_on_channel,
+ .cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel,
+ .offchannel_tx = iwl_mac_offchannel_tx,
+ .offchannel_tx_cancel_wait = iwl_mac_offchannel_tx_cancel_wait,
};
static void iwl_hw_detect(struct iwl_priv *priv)
{
priv->hw_rev = _iwl_read32(priv, CSR_HW_REV);
priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG);
- pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
+ priv->rev_id = priv->pci_dev->revision;
IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
}
@@ -4298,10 +3970,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (cfg->mod_params->disable_hw_scan) {
dev_printk(KERN_DEBUG, &(pdev->dev),
"sw scan support is deprecated\n");
- iwl_hw_ops.hw_scan = NULL;
+ iwlagn_hw_ops.hw_scan = NULL;
}
- hw = iwl_alloc_all(cfg, &iwl_hw_ops);
+ hw = iwl_alloc_all(cfg);
if (!hw) {
err = -ENOMEM;
goto out;
@@ -4333,6 +4005,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
BIT(NL80211_IFTYPE_ADHOC);
priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
BIT(NL80211_IFTYPE_STATION);
+ priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
@@ -4350,6 +4023,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
+#ifdef CONFIG_IWL_P2P
+ priv->contexts[IWL_RXON_CTX_PAN].interface_modes |=
+ BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
+#endif
priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
@@ -4368,8 +4045,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(iwlagn_ant_coupling > IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
true : false;
- /* enable/disable bt channel announcement */
+ /* enable/disable bt channel inhibition */
priv->bt_ch_announce = iwlagn_bt_ch_announce;
+ IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n",
+ (priv->bt_ch_announce) ? "On" : "Off");
if (iwl_alloc_traffic_mem(priv))
IWL_ERR(priv, "Not enough memory to generate traffic log\n");
@@ -4461,6 +4140,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto out_free_eeprom;
+ err = iwl_eeprom_check_sku(priv);
+ if (err)
+ goto out_free_eeprom;
+
/* extract MAC Address */
iwl_eeprom_get_mac(priv, priv->addresses[0].addr);
IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
@@ -4500,8 +4183,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_enable_msi(priv->pci_dev);
- iwl_alloc_isr_ict(priv);
- err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr,
+ if (priv->cfg->ops->lib->isr_ops.alloc)
+ priv->cfg->ops->lib->isr_ops.alloc(priv);
+
+ err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr_ops.isr,
IRQF_SHARED, DRV_NAME, priv);
if (err) {
IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
@@ -4515,14 +4200,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* 8. Enable interrupts and read RFKILL state
*********************************************/
- /* enable interrupts if needed: hw bug w/a */
+ /* enable rfkill interrupt: hw bug w/a */
pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
}
- iwl_enable_interrupts(priv);
+ iwl_enable_rfkill_int(priv);
/* If platform's RF_KILL switch is NOT set to KILL */
if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
@@ -4548,7 +4233,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
destroy_workqueue(priv->workqueue);
priv->workqueue = NULL;
free_irq(priv->pci_dev->irq, priv);
- iwl_free_isr_ict(priv);
+ if (priv->cfg->ops->lib->isr_ops.free)
+ priv->cfg->ops->lib->isr_ops.free(priv);
out_disable_msi:
pci_disable_msi(priv->pci_dev);
iwl_uninit_drv(priv);
@@ -4588,6 +4274,9 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
* we need to set STATUS_EXIT_PENDING bit.
*/
set_bit(STATUS_EXIT_PENDING, &priv->status);
+
+ iwl_leds_exit(priv);
+
if (priv->mac80211_registered) {
ieee80211_unregister_hw(priv->hw);
priv->mac80211_registered = 0;
@@ -4643,7 +4332,8 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
iwl_uninit_drv(priv);
- iwl_free_isr_ict(priv);
+ if (priv->cfg->ops->lib->isr_ops.free)
+ priv->cfg->ops->lib->isr_ops.free(priv);
dev_kfree_skb(priv->beacon_skb);
@@ -4659,12 +4349,6 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
/* Hardware specific file defines the PCI IDs table for that hardware module */
static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
-#ifdef CONFIG_IWL4965
- {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
- {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
-#endif /* CONFIG_IWL4965 */
-#ifdef CONFIG_IWL5000
-/* 5100 Series WiFi */
{IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
{IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
{IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
@@ -4734,51 +4418,32 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
{IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
-/* 6x00 Series Gen2a */
- {IWL_PCI_DEVICE(0x0082, 0x1201, iwl6000g2a_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0085, 0x1211, iwl6000g2a_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1221, iwl6000g2a_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1206, iwl6000g2a_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0085, 0x1216, iwl6000g2a_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1226, iwl6000g2a_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1207, iwl6000g2a_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6000g2a_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6000g2a_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6000g2a_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6000g2a_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6000g2a_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6000g2a_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6000g2a_2abg_cfg)},
-
-/* 6x00 Series Gen2b */
- {IWL_PCI_DEVICE(0x008F, 0x5105, iwl6000g2b_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0090, 0x5115, iwl6000g2b_bgn_cfg)},
- {IWL_PCI_DEVICE(0x008F, 0x5125, iwl6000g2b_bgn_cfg)},
- {IWL_PCI_DEVICE(0x008F, 0x5107, iwl6000g2b_bg_cfg)},
- {IWL_PCI_DEVICE(0x008F, 0x5201, iwl6000g2b_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6000g2b_2agn_cfg)},
- {IWL_PCI_DEVICE(0x008F, 0x5221, iwl6000g2b_2agn_cfg)},
- {IWL_PCI_DEVICE(0x008F, 0x5206, iwl6000g2b_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6000g2b_2abg_cfg)},
- {IWL_PCI_DEVICE(0x008F, 0x5226, iwl6000g2b_2abg_cfg)},
- {IWL_PCI_DEVICE(0x008F, 0x5207, iwl6000g2b_2bg_cfg)},
- {IWL_PCI_DEVICE(0x008A, 0x5301, iwl6000g2b_bgn_cfg)},
- {IWL_PCI_DEVICE(0x008A, 0x5305, iwl6000g2b_bgn_cfg)},
- {IWL_PCI_DEVICE(0x008A, 0x5307, iwl6000g2b_bg_cfg)},
- {IWL_PCI_DEVICE(0x008A, 0x5321, iwl6000g2b_bgn_cfg)},
- {IWL_PCI_DEVICE(0x008A, 0x5325, iwl6000g2b_bgn_cfg)},
- {IWL_PCI_DEVICE(0x008B, 0x5311, iwl6000g2b_bgn_cfg)},
- {IWL_PCI_DEVICE(0x008B, 0x5315, iwl6000g2b_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6000g2b_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6000g2b_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6000g2b_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6000g2b_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6000g2b_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6000g2b_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6000g2b_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6000g2b_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6000g2b_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6000g2b_2abg_cfg)},
+/* 6x05 Series */
+ {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
+
+/* 6x30 Series */
+ {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)},
+ {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)},
/* 6x50 WiFi/WiMax Series */
{IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
@@ -4788,13 +4453,13 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)},
{IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)},
-/* 6x50 WiFi/WiMax Series Gen2 */
- {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6050g2_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0885, 0x1306, iwl6050g2_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6050g2_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0885, 0x1326, iwl6050g2_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6050g2_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0886, 0x1316, iwl6050g2_bgn_cfg)},
+/* 6150 WiFi/WiMax Series */
+ {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0885, 0x1306, iwl6150_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0885, 0x1326, iwl6150_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0886, 0x1316, iwl6150_bgn_cfg)},
/* 1000 Series WiFi */
{IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
@@ -4812,10 +4477,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
/* 100 Series WiFi */
{IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)},
{IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)},
{IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)},
- {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)},
- {IWL_PCI_DEVICE(0x08AE, 0x1017, iwl100_bg_cfg)},
+ {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)},
/* 130 Series WiFi */
{IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)},
@@ -4825,7 +4491,48 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)},
{IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)},
-#endif /* CONFIG_IWL5000 */
+/* 2x00 Series */
+ {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
+
+/* 2x30 Series */
+ {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)},
+
+/* 6x35 Series */
+ {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)},
+
+/* 200 Series */
+ {IWL_PCI_DEVICE(0x0894, 0x0022, iwl200_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0895, 0x0222, iwl200_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0422, iwl200_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0026, iwl200_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0895, 0x0226, iwl200_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0426, iwl200_bg_cfg)},
+
+/* 230 Series */
+ {IWL_PCI_DEVICE(0x0892, 0x0062, iwl230_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0893, 0x0262, iwl230_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0892, 0x0462, iwl230_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0892, 0x0066, iwl230_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0893, 0x0266, iwl230_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0892, 0x0466, iwl230_bg_cfg)},
{0}
};
@@ -4836,10 +4543,7 @@ static struct pci_driver iwl_driver = {
.id_table = iwl_hw_card_ids,
.probe = iwl_pci_probe,
.remove = __devexit_p(iwl_pci_remove),
-#ifdef CONFIG_PM
- .suspend = iwl_pci_suspend,
- .resume = iwl_pci_resume,
-#endif
+ .driver.pm = IWL_PM_OPS,
};
static int __init iwl_init(void)
@@ -4925,6 +4629,12 @@ module_param_named(antenna_coupling, iwlagn_ant_coupling, int, S_IRUGO);
MODULE_PARM_DESC(antenna_coupling,
"specify antenna coupling in dB (defualt: 0 dB)");
-module_param_named(bt_ch_announce, iwlagn_bt_ch_announce, bool, S_IRUGO);
-MODULE_PARM_DESC(bt_ch_announce,
- "Enable BT channel announcement mode (default: enable)");
+module_param_named(bt_ch_inhibition, iwlagn_bt_ch_announce, bool, S_IRUGO);
+MODULE_PARM_DESC(bt_ch_inhibition,
+ "Disable BT channel inhibition (default: enable)");
+
+module_param_named(plcp_check, iwlagn_mod_params.plcp_check, bool, S_IRUGO);
+MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
+
+module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
+MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index f525d55f2c0f..20f8e4188994 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -74,41 +74,53 @@ extern struct iwl_cfg iwl5100_bgn_cfg;
extern struct iwl_cfg iwl5100_abg_cfg;
extern struct iwl_cfg iwl5150_agn_cfg;
extern struct iwl_cfg iwl5150_abg_cfg;
-extern struct iwl_cfg iwl6000g2a_2agn_cfg;
-extern struct iwl_cfg iwl6000g2a_2abg_cfg;
-extern struct iwl_cfg iwl6000g2a_2bg_cfg;
-extern struct iwl_cfg iwl6000g2b_bgn_cfg;
-extern struct iwl_cfg iwl6000g2b_bg_cfg;
-extern struct iwl_cfg iwl6000g2b_2agn_cfg;
-extern struct iwl_cfg iwl6000g2b_2abg_cfg;
-extern struct iwl_cfg iwl6000g2b_2bgn_cfg;
-extern struct iwl_cfg iwl6000g2b_2bg_cfg;
+extern struct iwl_cfg iwl6005_2agn_cfg;
+extern struct iwl_cfg iwl6005_2abg_cfg;
+extern struct iwl_cfg iwl6005_2bg_cfg;
+extern struct iwl_cfg iwl1030_bgn_cfg;
+extern struct iwl_cfg iwl1030_bg_cfg;
+extern struct iwl_cfg iwl6030_2agn_cfg;
+extern struct iwl_cfg iwl6030_2abg_cfg;
+extern struct iwl_cfg iwl6030_2bgn_cfg;
+extern struct iwl_cfg iwl6030_2bg_cfg;
extern struct iwl_cfg iwl6000i_2agn_cfg;
extern struct iwl_cfg iwl6000i_2abg_cfg;
extern struct iwl_cfg iwl6000i_2bg_cfg;
extern struct iwl_cfg iwl6000_3agn_cfg;
extern struct iwl_cfg iwl6050_2agn_cfg;
extern struct iwl_cfg iwl6050_2abg_cfg;
-extern struct iwl_cfg iwl6050g2_bgn_cfg;
+extern struct iwl_cfg iwl6150_bgn_cfg;
extern struct iwl_cfg iwl1000_bgn_cfg;
extern struct iwl_cfg iwl1000_bg_cfg;
extern struct iwl_cfg iwl100_bgn_cfg;
extern struct iwl_cfg iwl100_bg_cfg;
extern struct iwl_cfg iwl130_bgn_cfg;
extern struct iwl_cfg iwl130_bg_cfg;
+extern struct iwl_cfg iwl2000_2bgn_cfg;
+extern struct iwl_cfg iwl2000_2bg_cfg;
+extern struct iwl_cfg iwl2030_2bgn_cfg;
+extern struct iwl_cfg iwl2030_2bg_cfg;
+extern struct iwl_cfg iwl6035_2agn_cfg;
+extern struct iwl_cfg iwl6035_2abg_cfg;
+extern struct iwl_cfg iwl6035_2bg_cfg;
+extern struct iwl_cfg iwl200_bg_cfg;
+extern struct iwl_cfg iwl200_bgn_cfg;
+extern struct iwl_cfg iwl230_bg_cfg;
+extern struct iwl_cfg iwl230_bgn_cfg;
extern struct iwl_mod_params iwlagn_mod_params;
extern struct iwl_hcmd_ops iwlagn_hcmd;
extern struct iwl_hcmd_ops iwlagn_bt_hcmd;
extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils;
+extern struct ieee80211_ops iwlagn_hw_ops;
+extern struct ieee80211_ops iwl4965_hw_ops;
+
int iwl_reset_ict(struct iwl_priv *priv);
void iwl_disable_ict(struct iwl_priv *priv);
int iwl_alloc_isr_ict(struct iwl_priv *priv);
void iwl_free_isr_ict(struct iwl_priv *priv);
irqreturn_t iwl_isr_ict(int irq, void *data);
-bool iwl_good_ack_health(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt);
/* tx queue */
void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
@@ -132,6 +144,11 @@ void iwl_free_tfds_in_queue(struct iwl_priv *priv,
/* RXON */
int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
+int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed);
+void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes);
/* uCode */
int iwlagn_load_ucode(struct iwl_priv *priv);
@@ -173,11 +190,7 @@ void iwlagn_rx_replenish_now(struct iwl_priv *priv);
void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
int iwlagn_rxq_stop(struct iwl_priv *priv);
int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
-void iwlagn_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl_rx_handle(struct iwl_priv *priv);
+void iwl_setup_rx_handlers(struct iwl_priv *priv);
/* tx */
void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
@@ -227,16 +240,6 @@ static inline bool iwl_is_tx_success(u32 status)
u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
-/* rx */
-void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-bool iwl_good_plcp_health(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt);
-void iwl_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-
/* scan */
int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
void iwlagn_post_scan(struct iwl_priv *priv);
@@ -249,6 +252,7 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant);
+int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
/* bt coex */
void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
@@ -292,9 +296,12 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
int tid, u16 ssn);
int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
int tid);
-void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id);
void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
int iwl_update_bcast_stations(struct iwl_priv *priv);
+void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta);
/* rate */
static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)
@@ -318,4 +325,47 @@ void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv);
void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv);
+/* notification wait support */
+void __acquires(wait_entry)
+iwlagn_init_notification_wait(struct iwl_priv *priv,
+ struct iwl_notification_wait *wait_entry,
+ void (*fn)(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt),
+ u8 cmd);
+signed long __releases(wait_entry)
+iwlagn_wait_notification(struct iwl_priv *priv,
+ struct iwl_notification_wait *wait_entry,
+ unsigned long timeout);
+void __releases(wait_entry)
+iwlagn_remove_notification(struct iwl_priv *priv,
+ struct iwl_notification_wait *wait_entry);
+
+/* mac80211 handlers (for 4965) */
+void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+int iwlagn_mac_start(struct ieee80211_hw *hw);
+void iwlagn_mac_stop(struct ieee80211_hw *hw);
+void iwlagn_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast);
+int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key);
+int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size);
+int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch);
+void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop);
+
#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 424801abc80e..ca42ffa63ed7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -178,7 +178,6 @@ enum {
REPLY_BT_COEX_PRIO_TABLE = 0xcc,
REPLY_BT_COEX_PROT_ENV = 0xcd,
REPLY_BT_COEX_PROFILE_NOTIF = 0xce,
- REPLY_BT_COEX_SCO = 0xcf,
/* PAN commands */
REPLY_WIPAN_PARAMS = 0xb2,
@@ -189,6 +188,7 @@ enum {
REPLY_WIPAN_WEPKEY = 0xb8, /* use REPLY_WEPKEY structure */
REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9,
REPLY_WIPAN_NOA_NOTIFICATION = 0xbc,
+ REPLY_WIPAN_DEACTIVATION_COMPLETE = 0xbd,
REPLY_MAX = 0xff
};
@@ -2022,6 +2022,9 @@ struct iwl_compressed_ba_resp {
__le64 bitmap;
__le16 scd_flow;
__le16 scd_ssn;
+ /* following only for 5000 series and up */
+ u8 txed; /* number of frames sent */
+ u8 txed_2_done; /* number of frames acked */
} __packed;
/*
@@ -2407,9 +2410,9 @@ struct iwl_link_quality_cmd {
#define BT_FRAG_THRESHOLD_MAX 0
#define BT_FRAG_THRESHOLD_MIN 0
-#define BT_AGG_THRESHOLD_DEF 0
-#define BT_AGG_THRESHOLD_MAX 0
-#define BT_AGG_THRESHOLD_MIN 0
+#define BT_AGG_THRESHOLD_DEF 1200
+#define BT_AGG_THRESHOLD_MAX 8000
+#define BT_AGG_THRESHOLD_MIN 400
/*
* REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
@@ -2436,8 +2439,9 @@ struct iwl_bt_cmd {
#define IWLAGN_BT_FLAG_COEX_MODE_3W 2
#define IWLAGN_BT_FLAG_COEX_MODE_4W 3
-#define IWLAGN_BT_FLAG_UCODE_DEFAULT BIT(6)
-#define IWLAGN_BT_FLAG_NOCOEX_NOTIF BIT(7)
+#define IWLAGN_BT_FLAG_UCODE_DEFAULT BIT(6)
+/* Disable Sync PSPoll on SCO/eSCO */
+#define IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE BIT(7)
#define IWLAGN_BT_PRIO_BOOST_MAX 0xFF
#define IWLAGN_BT_PRIO_BOOST_MIN 0x00
@@ -2447,8 +2451,9 @@ struct iwl_bt_cmd {
#define IWLAGN_BT3_T7_DEFAULT 1
-#define IWLAGN_BT_KILL_ACK_MASK_DEFAULT cpu_to_le32(0xffffffff)
-#define IWLAGN_BT_KILL_CTS_MASK_DEFAULT cpu_to_le32(0xffffffff)
+#define IWLAGN_BT_KILL_ACK_MASK_DEFAULT cpu_to_le32(0xffff0000)
+#define IWLAGN_BT_KILL_CTS_MASK_DEFAULT cpu_to_le32(0xffff0000)
+#define IWLAGN_BT_KILL_ACK_CTS_MASK_SCO cpu_to_le32(0xffffffff)
#define IWLAGN_BT3_PRIO_SAMPLE_DEFAULT 2
@@ -2472,7 +2477,7 @@ struct iwl_bt_cmd {
IWLAGN_BT_VALID_BT4_TIMES | \
IWLAGN_BT_VALID_3W_LUT)
-struct iwlagn_bt_cmd {
+struct iwl_basic_bt_cmd {
u8 flags;
u8 ledtime; /* unused */
u8 max_kill;
@@ -2485,6 +2490,10 @@ struct iwlagn_bt_cmd {
__le32 bt3_lookup_table[12];
__le16 bt4_decision_time; /* unused */
__le16 valid;
+};
+
+struct iwl6000_bt_cmd {
+ struct iwl_basic_bt_cmd basic;
u8 prio_boost;
/*
* set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask
@@ -2494,6 +2503,18 @@ struct iwlagn_bt_cmd {
__le16 rx_prio_boost; /* SW boost of WiFi rx priority */
};
+struct iwl2000_bt_cmd {
+ struct iwl_basic_bt_cmd basic;
+ __le32 prio_boost;
+ /*
+ * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask
+ * if configure the following patterns
+ */
+ u8 reserved;
+ u8 tx_prio_boost; /* SW boost of WiFi tx priority */
+ __le16 rx_prio_boost; /* SW boost of WiFi rx priority */
+};
+
#define IWLAGN_BT_SCO_ACTIVE cpu_to_le32(BIT(0))
struct iwlagn_bt_sco_cmd {
@@ -2664,9 +2685,16 @@ struct iwl_spectrum_notification {
#define IWL_POWER_VEC_SIZE 5
#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
+#define IWL_POWER_POWER_SAVE_ENA_MSK cpu_to_le16(BIT(0))
+#define IWL_POWER_POWER_MANAGEMENT_ENA_MSK cpu_to_le16(BIT(1))
#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2))
#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
#define IWL_POWER_FAST_PD cpu_to_le16(BIT(4))
+#define IWL_POWER_BEACON_FILTERING cpu_to_le16(BIT(5))
+#define IWL_POWER_SHADOW_REG_ENA cpu_to_le16(BIT(6))
+#define IWL_POWER_CT_KILL_SET cpu_to_le16(BIT(7))
+#define IWL_POWER_BT_SCO_ENA cpu_to_le16(BIT(8))
+#define IWL_POWER_ADVANCE_PM_ENA_MSK cpu_to_le16(BIT(9))
struct iwl3945_powertable_cmd {
__le16 flags;
@@ -2936,9 +2964,15 @@ struct iwl3945_scan_cmd {
u8 data[0];
} __packed;
+enum iwl_scan_flags {
+ /* BIT(0) currently unused */
+ IWL_SCAN_FLAGS_ACTION_FRAME_TX = BIT(1),
+ /* bits 2-7 reserved */
+};
+
struct iwl_scan_cmd {
__le16 len;
- u8 reserved0;
+ u8 scan_flags; /* scan flags: see enum iwl_scan_flags */
u8 channel_count; /* # channels in channel list */
__le16 quiet_time; /* dwell only this # millisecs on quiet channel
* (only for active scan) */
@@ -3070,6 +3104,13 @@ struct iwl4965_beacon_notif {
__le32 ibss_mgr_status;
} __packed;
+struct iwlagn_beacon_notif {
+ struct iwlagn_tx_resp beacon_notify_hdr;
+ __le32 low_tsf;
+ __le32 high_tsf;
+ __le32 ibss_mgr_status;
+} __packed;
+
/*
* REPLY_TX_BEACON = 0x91 (command, has simple generic response)
*/
@@ -4131,6 +4172,10 @@ enum iwl_bt_coex_profile_traffic_load {
*/
};
+#define BT_SESSION_ACTIVITY_1_UART_MSG 0x1
+#define BT_SESSION_ACTIVITY_2_UART_MSG 0x2
+
+/* BT UART message - Share Part (BT -> WiFi) */
#define BT_UART_MSG_FRAME1MSGTYPE_POS (0)
#define BT_UART_MSG_FRAME1MSGTYPE_MSK \
(0x7 << BT_UART_MSG_FRAME1MSGTYPE_POS)
@@ -4215,9 +4260,12 @@ enum iwl_bt_coex_profile_traffic_load {
#define BT_UART_MSG_FRAME7SNIFFACTIVITY_POS (0)
#define BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK \
(0x7 << BT_UART_MSG_FRAME7SNIFFACTIVITY_POS)
-#define BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS (3)
-#define BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_MSK \
- (0x3 << BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS)
+#define BT_UART_MSG_FRAME7PAGE_POS (3)
+#define BT_UART_MSG_FRAME7PAGE_MSK \
+ (0x1 << BT_UART_MSG_FRAME7PAGE_POS)
+#define BT_UART_MSG_FRAME7INQUIRY_POS (4)
+#define BT_UART_MSG_FRAME7INQUIRY_MSK \
+ (0x1 << BT_UART_MSG_FRAME7INQUIRY_POS)
#define BT_UART_MSG_FRAME7CONNECTABLE_POS (5)
#define BT_UART_MSG_FRAME7CONNECTABLE_MSK \
(0x1 << BT_UART_MSG_FRAME7CONNECTABLE_POS)
@@ -4225,6 +4273,83 @@ enum iwl_bt_coex_profile_traffic_load {
#define BT_UART_MSG_FRAME7RESERVED_MSK \
(0x3 << BT_UART_MSG_FRAME7RESERVED_POS)
+/* BT Session Activity 2 UART message (BT -> WiFi) */
+#define BT_UART_MSG_2_FRAME1RESERVED1_POS (5)
+#define BT_UART_MSG_2_FRAME1RESERVED1_MSK \
+ (0x1<<BT_UART_MSG_2_FRAME1RESERVED1_POS)
+#define BT_UART_MSG_2_FRAME1RESERVED2_POS (6)
+#define BT_UART_MSG_2_FRAME1RESERVED2_MSK \
+ (0x3<<BT_UART_MSG_2_FRAME1RESERVED2_POS)
+
+#define BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_POS (0)
+#define BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_MSK \
+ (0x3F<<BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_POS)
+#define BT_UART_MSG_2_FRAME2RESERVED_POS (6)
+#define BT_UART_MSG_2_FRAME2RESERVED_MSK \
+ (0x3<<BT_UART_MSG_2_FRAME2RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME3BRLASTTXPOWER_POS (0)
+#define BT_UART_MSG_2_FRAME3BRLASTTXPOWER_MSK \
+ (0xF<<BT_UART_MSG_2_FRAME3BRLASTTXPOWER_POS)
+#define BT_UART_MSG_2_FRAME3INQPAGESRMODE_POS (4)
+#define BT_UART_MSG_2_FRAME3INQPAGESRMODE_MSK \
+ (0x1<<BT_UART_MSG_2_FRAME3INQPAGESRMODE_POS)
+#define BT_UART_MSG_2_FRAME3LEMASTER_POS (5)
+#define BT_UART_MSG_2_FRAME3LEMASTER_MSK \
+ (0x1<<BT_UART_MSG_2_FRAME3LEMASTER_POS)
+#define BT_UART_MSG_2_FRAME3RESERVED_POS (6)
+#define BT_UART_MSG_2_FRAME3RESERVED_MSK \
+ (0x3<<BT_UART_MSG_2_FRAME3RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME4LELASTTXPOWER_POS (0)
+#define BT_UART_MSG_2_FRAME4LELASTTXPOWER_MSK \
+ (0xF<<BT_UART_MSG_2_FRAME4LELASTTXPOWER_POS)
+#define BT_UART_MSG_2_FRAME4NUMLECONN_POS (4)
+#define BT_UART_MSG_2_FRAME4NUMLECONN_MSK \
+ (0x3<<BT_UART_MSG_2_FRAME4NUMLECONN_POS)
+#define BT_UART_MSG_2_FRAME4RESERVED_POS (6)
+#define BT_UART_MSG_2_FRAME4RESERVED_MSK \
+ (0x3<<BT_UART_MSG_2_FRAME4RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME5BTMINRSSI_POS (0)
+#define BT_UART_MSG_2_FRAME5BTMINRSSI_MSK \
+ (0xF<<BT_UART_MSG_2_FRAME5BTMINRSSI_POS)
+#define BT_UART_MSG_2_FRAME5LESCANINITMODE_POS (4)
+#define BT_UART_MSG_2_FRAME5LESCANINITMODE_MSK \
+ (0x1<<BT_UART_MSG_2_FRAME5LESCANINITMODE_POS)
+#define BT_UART_MSG_2_FRAME5LEADVERMODE_POS (5)
+#define BT_UART_MSG_2_FRAME5LEADVERMODE_MSK \
+ (0x1<<BT_UART_MSG_2_FRAME5LEADVERMODE_POS)
+#define BT_UART_MSG_2_FRAME5RESERVED_POS (6)
+#define BT_UART_MSG_2_FRAME5RESERVED_MSK \
+ (0x3<<BT_UART_MSG_2_FRAME5RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME6LECONNINTERVAL_POS (0)
+#define BT_UART_MSG_2_FRAME6LECONNINTERVAL_MSK \
+ (0x1F<<BT_UART_MSG_2_FRAME6LECONNINTERVAL_POS)
+#define BT_UART_MSG_2_FRAME6RFU_POS (5)
+#define BT_UART_MSG_2_FRAME6RFU_MSK \
+ (0x1<<BT_UART_MSG_2_FRAME6RFU_POS)
+#define BT_UART_MSG_2_FRAME6RESERVED_POS (6)
+#define BT_UART_MSG_2_FRAME6RESERVED_MSK \
+ (0x3<<BT_UART_MSG_2_FRAME6RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME7LECONNSLAVELAT_POS (0)
+#define BT_UART_MSG_2_FRAME7LECONNSLAVELAT_MSK \
+ (0x7<<BT_UART_MSG_2_FRAME7LECONNSLAVELAT_POS)
+#define BT_UART_MSG_2_FRAME7LEPROFILE1_POS (3)
+#define BT_UART_MSG_2_FRAME7LEPROFILE1_MSK \
+ (0x1<<BT_UART_MSG_2_FRAME7LEPROFILE1_POS)
+#define BT_UART_MSG_2_FRAME7LEPROFILE2_POS (4)
+#define BT_UART_MSG_2_FRAME7LEPROFILE2_MSK \
+ (0x1<<BT_UART_MSG_2_FRAME7LEPROFILE2_POS)
+#define BT_UART_MSG_2_FRAME7LEPROFILEOTHER_POS (5)
+#define BT_UART_MSG_2_FRAME7LEPROFILEOTHER_MSK \
+ (0x1<<BT_UART_MSG_2_FRAME7LEPROFILEOTHER_POS)
+#define BT_UART_MSG_2_FRAME7RESERVED_POS (6)
+#define BT_UART_MSG_2_FRAME7RESERVED_MSK \
+ (0x3<<BT_UART_MSG_2_FRAME7RESERVED_POS)
+
struct iwl_bt_uart_msg {
u8 header;
@@ -4357,6 +4482,11 @@ int iwl_agn_check_rxon_cmd(struct iwl_priv *priv);
* REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification)
*/
+/*
+ * Minimum slot time in TU
+ */
+#define IWL_MIN_SLOT_TIME 20
+
/**
* struct iwl_wipan_slot
* @width: Time in TU
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 25fb3912342c..6c30fa652e27 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -43,11 +43,6 @@
#include "iwl-helpers.h"
-MODULE_DESCRIPTION("iwl core");
-MODULE_VERSION(IWLWIFI_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-
/*
* set bt_coex_active to true, uCode will do kill/defer
* every time the priority line is asserted (BT is sending signals on the
@@ -65,27 +60,24 @@ MODULE_LICENSE("GPL");
* default: bt_coex_active = true (BT_COEX_ENABLE)
*/
bool bt_coex_active = true;
-EXPORT_SYMBOL_GPL(bt_coex_active);
module_param(bt_coex_active, bool, S_IRUGO);
MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
u32 iwl_debug_level;
-EXPORT_SYMBOL(iwl_debug_level);
const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
-EXPORT_SYMBOL(iwl_bcast_addr);
/* This function both allocates and initializes hw and priv. */
-struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
- struct ieee80211_ops *hw_ops)
+struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
{
struct iwl_priv *priv;
-
/* mac80211 allocates memory for this device instance, including
* space for this driver's private structure */
- struct ieee80211_hw *hw =
- ieee80211_alloc_hw(sizeof(struct iwl_priv), hw_ops);
+ struct ieee80211_hw *hw;
+
+ hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
+ cfg->ops->ieee80211_ops);
if (hw == NULL) {
pr_err("%s: Can not allocate network device\n",
cfg->name);
@@ -98,36 +90,6 @@ struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
out:
return hw;
}
-EXPORT_SYMBOL(iwl_alloc_all);
-
-/*
- * QoS support
-*/
-static void iwl_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- if (!ctx->is_active)
- return;
-
- ctx->qos_data.def_qos_parm.qos_flags = 0;
-
- if (ctx->qos_data.qos_active)
- ctx->qos_data.def_qos_parm.qos_flags |=
- QOS_PARAM_FLG_UPDATE_EDCA_MSK;
-
- if (ctx->ht.enabled)
- ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
-
- IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
- ctx->qos_data.qos_active,
- ctx->qos_data.def_qos_parm.qos_flags);
-
- iwl_send_cmd_pdu_async(priv, ctx->qos_cmd,
- sizeof(struct iwl_qosparam_cmd),
- &ctx->qos_data.def_qos_parm, NULL);
-}
#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
@@ -248,15 +210,12 @@ int iwlcore_init_geos(struct iwl_priv *priv)
if (!is_channel_valid(ch))
continue;
- if (is_channel_a_band(ch))
- sband = &priv->bands[IEEE80211_BAND_5GHZ];
- else
- sband = &priv->bands[IEEE80211_BAND_2GHZ];
+ sband = &priv->bands[ch->band];
geo_ch = &sband->channels[sband->n_channels++];
geo_ch->center_freq =
- ieee80211_channel_to_frequency(ch->channel);
+ ieee80211_channel_to_frequency(ch->channel, ch->band);
geo_ch->max_power = ch->max_power_avg;
geo_ch->max_antenna_gain = 0xff;
geo_ch->hw_value = ch->channel;
@@ -304,7 +263,6 @@ int iwlcore_init_geos(struct iwl_priv *priv)
return 0;
}
-EXPORT_SYMBOL(iwlcore_init_geos);
/*
* iwlcore_free_geos - undo allocations in iwlcore_init_geos
@@ -315,41 +273,6 @@ void iwlcore_free_geos(struct iwl_priv *priv)
kfree(priv->ieee_rates);
clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
}
-EXPORT_SYMBOL(iwlcore_free_geos);
-
-/*
- * iwlcore_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
- * function.
- */
-void iwlcore_tx_cmd_protection(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- __le16 fc, __le32 *tx_flags)
-{
- if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
- *tx_flags |= TX_CMD_FLG_RTS_MSK;
- *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
- *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
-
- if (!ieee80211_is_mgmt(fc))
- return;
-
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_AUTH):
- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
- *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
- *tx_flags |= TX_CMD_FLG_CTS_MSK;
- break;
- }
- } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
- *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
- *tx_flags |= TX_CMD_FLG_CTS_MSK;
- *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
- }
-}
-EXPORT_SYMBOL(iwlcore_tx_cmd_protection);
-
static bool iwl_is_channel_extension(struct iwl_priv *priv,
enum ieee80211_band band,
@@ -394,7 +317,6 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
le16_to_cpu(ctx->staging.channel),
ctx->ht.extension_chan_offset);
}
-EXPORT_SYMBOL(iwl_is_ht40_tx_allowed);
static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
{
@@ -495,7 +417,6 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
return iwl_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
sizeof(ctx->timing), &ctx->timing);
}
-EXPORT_SYMBOL(iwl_send_rxon_timing);
void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
int hw_decrypt)
@@ -508,7 +429,6 @@ void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
}
-EXPORT_SYMBOL(iwl_set_rxon_hwcrypto);
/* validate RXON structure is valid */
int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
@@ -581,7 +501,6 @@ int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
}
return 0;
}
-EXPORT_SYMBOL(iwl_check_rxon_cmd);
/**
* iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
@@ -645,7 +564,6 @@ int iwl_full_rxon_required(struct iwl_priv *priv,
return 0;
}
-EXPORT_SYMBOL(iwl_full_rxon_required);
u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
@@ -659,7 +577,6 @@ u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
else
return IWL_RATE_6M_PLCP;
}
-EXPORT_SYMBOL(iwl_rate_get_lowest_plcp);
static void _iwl_set_rxon_ht(struct iwl_priv *priv,
struct iwl_ht_config *ht_conf,
@@ -736,7 +653,6 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
for_each_context(priv, ctx)
_iwl_set_rxon_ht(priv, ht_conf, ctx);
}
-EXPORT_SYMBOL(iwl_set_rxon_ht);
/* Return valid, unused, channel for a passive scan to reset the RF */
u8 iwl_get_single_channel_number(struct iwl_priv *priv,
@@ -777,7 +693,6 @@ u8 iwl_get_single_channel_number(struct iwl_priv *priv,
return channel;
}
-EXPORT_SYMBOL(iwl_get_single_channel_number);
/**
* iwl_set_rxon_channel - Set the band and channel values in staging RXON
@@ -808,7 +723,6 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
return 0;
}
-EXPORT_SYMBOL(iwl_set_rxon_channel);
void iwl_set_flags_for_band(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
@@ -832,7 +746,6 @@ void iwl_set_flags_for_band(struct iwl_priv *priv,
ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
}
}
-EXPORT_SYMBOL(iwl_set_flags_for_band);
/*
* initialize rxon structure with default values from eeprom
@@ -904,7 +817,6 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv,
ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
}
-EXPORT_SYMBOL(iwl_connection_init_rx_config);
void iwl_set_rate(struct iwl_priv *priv)
{
@@ -937,7 +849,6 @@ void iwl_set_rate(struct iwl_priv *priv)
(IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
}
}
-EXPORT_SYMBOL(iwl_set_rate);
void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
{
@@ -957,35 +868,6 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
mutex_unlock(&priv->mutex);
}
}
-EXPORT_SYMBOL(iwl_chswitch_done);
-
-void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
- /*
- * MULTI-FIXME
- * See iwl_mac_channel_switch.
- */
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
-
- if (priv->switch_rxon.switch_in_progress) {
- if (!le32_to_cpu(csa->status) &&
- (csa->channel == priv->switch_rxon.channel)) {
- rxon->channel = csa->channel;
- ctx->staging.channel = csa->channel;
- IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
- le16_to_cpu(csa->channel));
- iwl_chswitch_done(priv, true);
- } else {
- IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
- le16_to_cpu(csa->channel));
- iwl_chswitch_done(priv, false);
- }
- }
-}
-EXPORT_SYMBOL(iwl_rx_csa);
#ifdef CONFIG_IWLWIFI_DEBUG
void iwl_print_rx_config_cmd(struct iwl_priv *priv,
@@ -1007,19 +889,37 @@ void iwl_print_rx_config_cmd(struct iwl_priv *priv,
IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
}
-EXPORT_SYMBOL(iwl_print_rx_config_cmd);
#endif
/**
* iwl_irq_handle_error - called for HW or SW error interrupt from card
*/
void iwl_irq_handle_error(struct iwl_priv *priv)
{
+ unsigned int reload_msec;
+ unsigned long reload_jiffies;
+
/* Set the FW error flag -- cleared on iwl_down */
set_bit(STATUS_FW_ERROR, &priv->status);
/* Cancel currently queued command. */
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+ /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
+ if (priv->cfg->internal_wimax_coex &&
+ (!(iwl_read_prph(priv, APMG_CLK_CTRL_REG) &
+ APMS_CLK_VAL_MRB_FUNC_MODE) ||
+ (iwl_read_prph(priv, APMG_PS_CTRL_REG) &
+ APMG_PS_CTRL_VAL_RESET_REQ))) {
+ wake_up_interruptible(&priv->wait_command_queue);
+ /*
+ *Keep the restart process from trying to send host
+ * commands by clearing the INIT status bit
+ */
+ clear_bit(STATUS_READY, &priv->status);
+ IWL_ERR(priv, "RF is used by WiMAX\n");
+ return;
+ }
+
IWL_ERR(priv, "Loaded firmware version: %s\n",
priv->hw->wiphy->fw_version);
@@ -1041,6 +941,25 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
* commands by clearing the INIT status bit */
clear_bit(STATUS_READY, &priv->status);
+ /*
+ * If firmware keep reloading, then it indicate something
+ * serious wrong and firmware having problem to recover
+ * from it. Instead of keep trying which will fill the syslog
+ * and hang the system, let's just stop it
+ */
+ reload_jiffies = jiffies;
+ reload_msec = jiffies_to_msecs((long) reload_jiffies -
+ (long) priv->reload_jiffies);
+ priv->reload_jiffies = reload_jiffies;
+ if (reload_msec <= IWL_MIN_RELOAD_DURATION) {
+ priv->reload_count++;
+ if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) {
+ IWL_ERR(priv, "BUG_ON, Stop restarting\n");
+ return;
+ }
+ } else
+ priv->reload_count = 0;
+
if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
"Restarting adapter due to uCode error.\n");
@@ -1049,7 +968,6 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
queue_work(priv->workqueue, &priv->restart);
}
}
-EXPORT_SYMBOL(iwl_irq_handle_error);
static int iwl_apm_stop_master(struct iwl_priv *priv)
{
@@ -1086,7 +1004,6 @@ void iwl_apm_stop(struct iwl_priv *priv)
*/
iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
-EXPORT_SYMBOL(iwl_apm_stop);
/*
@@ -1201,13 +1118,22 @@ int iwl_apm_init(struct iwl_priv *priv)
out:
return ret;
}
-EXPORT_SYMBOL(iwl_apm_init);
int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
{
- int ret = 0;
- s8 prev_tx_power = priv->tx_power_user_lmt;
+ int ret;
+ s8 prev_tx_power;
+ bool defer;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (priv->tx_power_user_lmt == tx_power && !force)
+ return 0;
+
+ if (!priv->cfg->ops->lib->send_tx_power)
+ return -EOPNOTSUPP;
if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
IWL_WARN(priv,
@@ -1224,93 +1150,33 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
return -EINVAL;
}
- if (priv->tx_power_user_lmt != tx_power)
- force = true;
-
- /* if nic is not up don't send command */
- if (iwl_is_ready_rf(priv)) {
- priv->tx_power_user_lmt = tx_power;
- if (force && priv->cfg->ops->lib->send_tx_power)
- ret = priv->cfg->ops->lib->send_tx_power(priv);
- else if (!priv->cfg->ops->lib->send_tx_power)
- ret = -EOPNOTSUPP;
- /*
- * if fail to set tx_power, restore the orig. tx power
- */
- if (ret)
- priv->tx_power_user_lmt = prev_tx_power;
- }
-
- /*
- * Even this is an async host command, the command
- * will always report success from uCode
- * So once driver can placing the command into the queue
- * successfully, driver can use priv->tx_power_user_lmt
- * to reflect the current tx power
- */
- return ret;
-}
-EXPORT_SYMBOL(iwl_set_tx_power);
-
-irqreturn_t iwl_isr_legacy(int irq, void *data)
-{
- struct iwl_priv *priv = data;
- u32 inta, inta_mask;
- u32 inta_fh;
- unsigned long flags;
- if (!priv)
- return IRQ_NONE;
-
- spin_lock_irqsave(&priv->lock, flags);
+ if (!iwl_is_ready_rf(priv))
+ return -EIO;
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here. */
- inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
- /* Discover which interrupts are active/pending */
- inta = iwl_read32(priv, CSR_INT);
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!inta && !inta_fh) {
- IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0, inta_fh == 0\n");
- goto none;
- }
+ /* scan complete and commit_rxon use tx_power_next value,
+ * it always need to be updated for newest request */
+ priv->tx_power_next = tx_power;
- if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
- /* Hardware disappeared. It might have already raised
- * an interrupt */
- IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- goto unplugged;
+ /* do not set tx power when scanning or channel changing */
+ defer = test_bit(STATUS_SCANNING, &priv->status) ||
+ memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
+ if (defer && !force) {
+ IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
+ return 0;
}
- IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
- inta, inta_mask, inta_fh);
+ prev_tx_power = priv->tx_power_user_lmt;
+ priv->tx_power_user_lmt = tx_power;
- inta &= ~CSR_INT_BIT_SCD;
+ ret = priv->cfg->ops->lib->send_tx_power(priv);
- /* iwl_irq_tasklet() will service interrupts and re-enable them */
- if (likely(inta || inta_fh))
- tasklet_schedule(&priv->irq_tasklet);
-
- unplugged:
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_HANDLED;
-
- none:
- /* re-enable interrupts here since we don't have anything to service. */
- /* only Re-enable if diabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &priv->status))
- iwl_enable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_NONE;
+ /* if fail to set tx_power, restore the orig. tx power */
+ if (ret) {
+ priv->tx_power_user_lmt = prev_tx_power;
+ priv->tx_power_next = prev_tx_power;
+ }
+ return ret;
}
-EXPORT_SYMBOL(iwl_isr_legacy);
void iwl_send_bt_config(struct iwl_priv *priv)
{
@@ -1326,6 +1192,7 @@ void iwl_send_bt_config(struct iwl_priv *priv)
else
bt_cmd.flags = BT_COEX_ENABLE;
+ priv->bt_enable_flag = bt_cmd.flags;
IWL_DEBUG_INFO(priv, "BT coex %s\n",
(bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
@@ -1333,7 +1200,6 @@ void iwl_send_bt_config(struct iwl_priv *priv)
sizeof(struct iwl_bt_cmd), &bt_cmd))
IWL_ERR(priv, "failed to send BT Coex Config\n");
}
-EXPORT_SYMBOL(iwl_send_bt_config);
int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
{
@@ -1351,46 +1217,6 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
sizeof(struct iwl_statistics_cmd),
&statistics_cmd);
}
-EXPORT_SYMBOL(iwl_send_statistics_request);
-
-void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_DEBUG
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
- IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
- sleep->pm_sleep_mode, sleep->pm_wakeup_src);
-#endif
-}
-EXPORT_SYMBOL(iwl_rx_pm_sleep_notif);
-
-void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
- "notification for %s:\n", len,
- get_cmd_string(pkt->hdr.cmd));
- iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
-}
-EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif);
-
-void iwl_rx_reply_error(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
- "seq 0x%04X ser 0x%08X\n",
- le32_to_cpu(pkt->u.err_resp.error_type),
- get_cmd_string(pkt->u.err_resp.cmd_id),
- pkt->u.err_resp.cmd_id,
- le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
- le32_to_cpu(pkt->u.err_resp.error_info));
-}
-EXPORT_SYMBOL(iwl_rx_reply_error);
void iwl_clear_isr_stats(struct iwl_priv *priv)
{
@@ -1442,7 +1268,6 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
IWL_DEBUG_MAC80211(priv, "leave\n");
return 0;
}
-EXPORT_SYMBOL(iwl_mac_conf_tx);
int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
{
@@ -1450,320 +1275,52 @@ int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
return priv->ibss_manager == IWL_IBSS_MANAGER;
}
-EXPORT_SYMBOL_GPL(iwl_mac_tx_last_beacon);
-static void iwl_ht_conf(struct iwl_priv *priv,
- struct ieee80211_vif *vif)
+static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
{
- struct iwl_ht_config *ht_conf = &priv->current_ht_config;
- struct ieee80211_sta *sta;
- struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
- struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
- IWL_DEBUG_MAC80211(priv, "enter:\n");
-
- if (!ctx->ht.enabled)
- return;
-
- ctx->ht.protection =
- bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
- ctx->ht.non_gf_sta_present =
- !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
-
- ht_conf->single_chain_sufficient = false;
-
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, bss_conf->bssid);
- if (sta) {
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- int maxstreams;
-
- maxstreams = (ht_cap->mcs.tx_params &
- IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
- >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
- maxstreams += 1;
-
- if ((ht_cap->mcs.rx_mask[1] == 0) &&
- (ht_cap->mcs.rx_mask[2] == 0))
- ht_conf->single_chain_sufficient = true;
- if (maxstreams <= 1)
- ht_conf->single_chain_sufficient = true;
- } else {
- /*
- * If at all, this can only happen through a race
- * when the AP disconnects us while we're still
- * setting up the connection, in that case mac80211
- * will soon tell us about that.
- */
- ht_conf->single_chain_sufficient = true;
- }
- rcu_read_unlock();
- break;
- case NL80211_IFTYPE_ADHOC:
- ht_conf->single_chain_sufficient = true;
- break;
- default:
- break;
- }
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
+ iwl_connection_init_rx_config(priv, ctx);
-static inline void iwl_set_no_assoc(struct iwl_priv *priv,
- struct ieee80211_vif *vif)
-{
- struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- iwl_led_disassociate(priv);
- /*
- * inform the ucode that there is no longer an
- * association and that no more packets should be
- * sent
- */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- ctx->staging.assoc_id = 0;
- iwlcore_commit_rxon(priv, ctx);
+ return iwlcore_commit_rxon(priv, ctx);
}
-static void iwlcore_beacon_update(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int iwl_setup_interface(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
{
- struct iwl_priv *priv = hw->priv;
- unsigned long flags;
- __le64 timestamp;
- struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
-
- if (!skb)
- return;
-
- IWL_DEBUG_ASSOC(priv, "enter\n");
+ struct ieee80211_vif *vif = ctx->vif;
+ int err;
lockdep_assert_held(&priv->mutex);
- if (!priv->beacon_ctx) {
- IWL_ERR(priv, "update beacon but no beacon context!\n");
- dev_kfree_skb(skb);
- return;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- if (priv->beacon_skb)
- dev_kfree_skb(priv->beacon_skb);
-
- priv->beacon_skb = skb;
-
- timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
- priv->timestamp = le64_to_cpu(timestamp);
-
- IWL_DEBUG_ASSOC(priv, "leave\n");
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (!iwl_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return;
- }
-
- priv->cfg->ops->lib->post_associate(priv, priv->beacon_ctx->vif);
-}
-
-void iwl_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- u32 changes)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
-
- if (!iwl_is_alive(priv))
- return;
-
- mutex_lock(&priv->mutex);
-
- if (changes & BSS_CHANGED_QOS) {
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- ctx->qos_data.qos_active = bss_conf->qos;
- iwl_update_qos(priv, ctx);
- spin_unlock_irqrestore(&priv->lock, flags);
- }
-
- if (changes & BSS_CHANGED_BEACON_ENABLED) {
- /*
- * the add_interface code must make sure we only ever
- * have a single interface that could be beaconing at
- * any time.
- */
- if (vif->bss_conf.enable_beacon)
- priv->beacon_ctx = ctx;
- else
- priv->beacon_ctx = NULL;
- }
-
- if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
- dev_kfree_skb(priv->beacon_skb);
- priv->beacon_skb = ieee80211_beacon_get(hw, vif);
- }
-
- if (changes & BSS_CHANGED_BEACON_INT && vif->type == NL80211_IFTYPE_AP)
- iwl_send_rxon_timing(priv, ctx);
-
- if (changes & BSS_CHANGED_BSSID) {
- IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
-
- /*
- * If there is currently a HW scan going on in the
- * background then we need to cancel it else the RXON
- * below/in post_associate will fail.
- */
- if (iwl_scan_cancel_timeout(priv, 100)) {
- IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
- IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
- mutex_unlock(&priv->mutex);
- return;
- }
-
- /* mac80211 only sets assoc when in STATION mode */
- if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
- memcpy(ctx->staging.bssid_addr,
- bss_conf->bssid, ETH_ALEN);
-
- /* currently needed in a few places */
- memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
- } else {
- ctx->staging.filter_flags &=
- ~RXON_FILTER_ASSOC_MSK;
- }
-
- }
-
/*
- * This needs to be after setting the BSSID in case
- * mac80211 decides to do both changes at once because
- * it will invoke post_associate.
+ * This variable will be correct only when there's just
+ * a single context, but all code using it is for hardware
+ * that supports only one context.
*/
- if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
- iwlcore_beacon_update(hw, vif);
-
- if (changes & BSS_CHANGED_ERP_PREAMBLE) {
- IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
- bss_conf->use_short_preamble);
- if (bss_conf->use_short_preamble)
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
- }
-
- if (changes & BSS_CHANGED_ERP_CTS_PROT) {
- IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
- if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
- ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
- if (bss_conf->use_cts_prot)
- ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
- else
- ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
- }
-
- if (changes & BSS_CHANGED_BASIC_RATES) {
- /* XXX use this information
- *
- * To do that, remove code from iwl_set_rate() and put something
- * like this here:
- *
- if (A-band)
- ctx->staging.ofdm_basic_rates =
- bss_conf->basic_rates;
- else
- ctx->staging.ofdm_basic_rates =
- bss_conf->basic_rates >> 4;
- ctx->staging.cck_basic_rates =
- bss_conf->basic_rates & 0xF;
- */
- }
-
- if (changes & BSS_CHANGED_HT) {
- iwl_ht_conf(priv, vif);
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- }
-
- if (changes & BSS_CHANGED_ASSOC) {
- IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
- if (bss_conf->assoc) {
- priv->timestamp = bss_conf->timestamp;
-
- iwl_led_associate(priv);
-
- if (!iwl_is_rfkill(priv))
- priv->cfg->ops->lib->post_associate(priv, vif);
- } else
- iwl_set_no_assoc(priv, vif);
- }
-
- if (changes && iwl_is_associated_ctx(ctx) && bss_conf->aid) {
- IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
- changes);
- ret = iwl_send_rxon_assoc(priv, ctx);
- if (!ret) {
- /* Sync active_rxon with latest change. */
- memcpy((void *)&ctx->active,
- &ctx->staging,
- sizeof(struct iwl_rxon_cmd));
- }
- }
+ priv->iw_mode = vif->type;
- if (changes & BSS_CHANGED_BEACON_ENABLED) {
- if (vif->bss_conf.enable_beacon) {
- memcpy(ctx->staging.bssid_addr,
- bss_conf->bssid, ETH_ALEN);
- memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
- iwl_led_associate(priv);
- iwlcore_config_ap(priv, vif);
- } else
- iwl_set_no_assoc(priv, vif);
- }
+ ctx->is_active = true;
- if (changes & BSS_CHANGED_IBSS) {
- ret = priv->cfg->ops->lib->manage_ibss_station(priv, vif,
- bss_conf->ibss_joined);
- if (ret)
- IWL_ERR(priv, "failed to %s IBSS station %pM\n",
- bss_conf->ibss_joined ? "add" : "remove",
- bss_conf->bssid);
+ err = iwl_set_mode(priv, ctx);
+ if (err) {
+ if (!ctx->always_active)
+ ctx->is_active = false;
+ return err;
}
- if (changes & BSS_CHANGED_IDLE &&
- priv->cfg->ops->hcmd->set_pan_params) {
- if (priv->cfg->ops->hcmd->set_pan_params(priv))
- IWL_ERR(priv, "failed to update PAN params\n");
+ if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist &&
+ vif->type == NL80211_IFTYPE_ADHOC) {
+ /*
+ * pretend to have high BT traffic as long as we
+ * are operating in IBSS mode, as this will cause
+ * the rate scaling etc. to behave as intended.
+ */
+ priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
}
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-EXPORT_SYMBOL(iwl_bss_info_changed);
-
-static int iwl_set_mode(struct iwl_priv *priv, struct ieee80211_vif *vif)
-{
- struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
- iwl_connection_init_rx_config(priv, ctx);
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
- return iwlcore_commit_rxon(priv, ctx);
+ return 0;
}
int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
@@ -1771,10 +1328,11 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct iwl_priv *priv = hw->priv;
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
struct iwl_rxon_context *tmp, *ctx = NULL;
- int err = 0;
+ int err;
+ enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
- vif->type, vif->addr);
+ viftype, vif->addr);
mutex_lock(&priv->mutex);
@@ -1798,7 +1356,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
continue;
}
- if (!(possible_modes & BIT(vif->type)))
+ if (!(possible_modes & BIT(viftype)))
continue;
/* have maybe usable context w/o interface */
@@ -1813,36 +1371,11 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
vif_priv->ctx = ctx;
ctx->vif = vif;
- /*
- * This variable will be correct only when there's just
- * a single context, but all code using it is for hardware
- * that supports only one context.
- */
- priv->iw_mode = vif->type;
-
- ctx->is_active = true;
-
- err = iwl_set_mode(priv, vif);
- if (err) {
- if (!ctx->always_active)
- ctx->is_active = false;
- goto out_err;
- }
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
- vif->type == NL80211_IFTYPE_ADHOC) {
- /*
- * pretend to have high BT traffic as long as we
- * are operating in IBSS mode, as this will cause
- * the rate scaling etc. to behave as intended.
- */
- priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
- }
-
- goto out;
+ err = iwl_setup_interface(priv, ctx);
+ if (!err)
+ goto out;
- out_err:
ctx->vif = NULL;
priv->iw_mode = NL80211_IFTYPE_STATION;
out:
@@ -1851,29 +1384,25 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
IWL_DEBUG_MAC80211(priv, "leave\n");
return err;
}
-EXPORT_SYMBOL(iwl_mac_add_interface);
-void iwl_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static void iwl_teardown_interface(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ bool mode_change)
{
- struct iwl_priv *priv = hw->priv;
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- mutex_lock(&priv->mutex);
-
- WARN_ON(ctx->vif != vif);
- ctx->vif = NULL;
+ lockdep_assert_held(&priv->mutex);
if (priv->scan_vif == vif) {
iwl_scan_cancel_timeout(priv, 200);
iwl_force_scan_end(priv);
}
- iwl_set_mode(priv, vif);
- if (!ctx->always_active)
- ctx->is_active = false;
+ if (!mode_change) {
+ iwl_set_mode(priv, ctx);
+ if (!ctx->always_active)
+ ctx->is_active = false;
+ }
/*
* When removing the IBSS interface, overwrite the
@@ -1883,211 +1412,30 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
* both values are the same and zero.
*/
if (vif->type == NL80211_IFTYPE_ADHOC)
- priv->bt_traffic_load = priv->notif_bt_traffic_load;
-
- memset(priv->bssid, 0, ETH_ALEN);
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
+ priv->bt_traffic_load = priv->last_bt_traffic_load;
}
-EXPORT_SYMBOL(iwl_mac_remove_interface);
-
-/**
- * iwl_mac_config - mac80211 config callback
- */
-int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
-{
- struct iwl_priv *priv = hw->priv;
- const struct iwl_channel_info *ch_info;
- struct ieee80211_conf *conf = &hw->conf;
- struct ieee80211_channel *channel = conf->channel;
- struct iwl_ht_config *ht_conf = &priv->current_ht_config;
- struct iwl_rxon_context *ctx;
- unsigned long flags = 0;
- int ret = 0;
- u16 ch;
- int scan_active = 0;
- mutex_lock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
- channel->hw_value, changed);
-
- if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
- test_bit(STATUS_SCANNING, &priv->status))) {
- scan_active = 1;
- IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
- }
-
- if (changed & (IEEE80211_CONF_CHANGE_SMPS |
- IEEE80211_CONF_CHANGE_CHANNEL)) {
- /* mac80211 uses static for non-HT which is what we want */
- priv->current_ht_config.smps = conf->smps_mode;
-
- /*
- * Recalculate chain counts.
- *
- * If monitor mode is enabled then mac80211 will
- * set up the SM PS mode to OFF if an HT channel is
- * configured.
- */
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- for_each_context(priv, ctx)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- }
-
- /* during scanning mac80211 will delay channel setting until
- * scan finish with changed = 0
- */
- if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
- if (scan_active)
- goto set_ch_out;
-
- ch = channel->hw_value;
- ch_info = iwl_get_channel_info(priv, channel->band, ch);
- if (!is_channel_valid(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
- ret = -EINVAL;
- goto set_ch_out;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- for_each_context(priv, ctx) {
- /* Configure HT40 channels */
- ctx->ht.enabled = conf_is_ht(conf);
- if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
- } else
- ctx->ht.is_40mhz = false;
-
- /*
- * Default to no protection. Protection mode will
- * later be set from BSS config in iwl_ht_conf
- */
- ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
-
- /* if we are switching from ht to 2.4 clear flags
- * from any ht related info since 2.4 does not
- * support ht */
- if ((le16_to_cpu(ctx->staging.channel) != ch))
- ctx->staging.flags = 0;
-
- iwl_set_rxon_channel(priv, channel, ctx);
- iwl_set_rxon_ht(priv, ht_conf);
-
- iwl_set_flags_for_band(priv, ctx, channel->band,
- ctx->vif);
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (priv->cfg->ops->lib->update_bcast_stations)
- ret = priv->cfg->ops->lib->update_bcast_stations(priv);
-
- set_ch_out:
- /* The list of supported rates and rate mask can be different
- * for each band; since the band may have changed, reset
- * the rate mask to what mac80211 lists */
- iwl_set_rate(priv);
- }
-
- if (changed & (IEEE80211_CONF_CHANGE_PS |
- IEEE80211_CONF_CHANGE_IDLE)) {
- ret = iwl_power_update_mode(priv, false);
- if (ret)
- IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
- }
-
- if (changed & IEEE80211_CONF_CHANGE_POWER) {
- IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
- priv->tx_power_user_lmt, conf->power_level);
-
- iwl_set_tx_power(priv, conf->power_level, false);
- }
-
- if (!iwl_is_ready(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
- goto out;
- }
-
- if (scan_active)
- goto out;
-
- for_each_context(priv, ctx) {
- if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
- iwlcore_commit_rxon(priv, ctx);
- else
- IWL_DEBUG_INFO(priv,
- "Not re-sending same RXON configuration.\n");
- }
-
-out:
- IWL_DEBUG_MAC80211(priv, "leave\n");
- mutex_unlock(&priv->mutex);
- return ret;
-}
-EXPORT_SYMBOL(iwl_mac_config);
-
-void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
+void iwl_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
- unsigned long flags;
- /* IBSS can only be the IWL_RXON_CTX_BSS context */
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
- mutex_lock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "enter\n");
- spin_lock_irqsave(&priv->lock, flags);
- memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
- spin_unlock_irqrestore(&priv->lock, flags);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* new association get rid of ibss beacon skb */
- if (priv->beacon_skb)
- dev_kfree_skb(priv->beacon_skb);
-
- priv->beacon_skb = NULL;
-
- priv->timestamp = 0;
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl_scan_cancel_timeout(priv, 100);
- if (!iwl_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
- mutex_unlock(&priv->mutex);
- return;
- }
+ mutex_lock(&priv->mutex);
- /* we are restarting association process
- * clear RXON_FILTER_ASSOC_MSK bit
- */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwlcore_commit_rxon(priv, ctx);
+ WARN_ON(ctx->vif != vif);
+ ctx->vif = NULL;
- iwl_set_rate(priv);
+ iwl_teardown_interface(priv, vif, false);
+ memset(priv->bssid, 0, ETH_ALEN);
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
+
}
-EXPORT_SYMBOL(iwl_mac_reset_tsf);
int iwl_alloc_txq_mem(struct iwl_priv *priv)
{
@@ -2102,14 +1450,12 @@ int iwl_alloc_txq_mem(struct iwl_priv *priv)
}
return 0;
}
-EXPORT_SYMBOL(iwl_alloc_txq_mem);
void iwl_free_txq_mem(struct iwl_priv *priv)
{
kfree(priv->txq);
priv->txq = NULL;
}
-EXPORT_SYMBOL(iwl_free_txq_mem);
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -2148,7 +1494,6 @@ int iwl_alloc_traffic_mem(struct iwl_priv *priv)
iwl_reset_traffic_log(priv);
return 0;
}
-EXPORT_SYMBOL(iwl_alloc_traffic_mem);
void iwl_free_traffic_mem(struct iwl_priv *priv)
{
@@ -2158,7 +1503,6 @@ void iwl_free_traffic_mem(struct iwl_priv *priv)
kfree(priv->rx_traffic);
priv->rx_traffic = NULL;
}
-EXPORT_SYMBOL(iwl_free_traffic_mem);
void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
u16 length, struct ieee80211_hdr *header)
@@ -2183,7 +1527,6 @@ void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
(priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
}
}
-EXPORT_SYMBOL(iwl_dbg_log_tx_data_frame);
void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
u16 length, struct ieee80211_hdr *header)
@@ -2208,7 +1551,6 @@ void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
(priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
}
}
-EXPORT_SYMBOL(iwl_dbg_log_rx_data_frame);
const char *get_mgmt_string(int cmd)
{
@@ -2252,7 +1594,6 @@ void iwl_clear_traffic_stats(struct iwl_priv *priv)
{
memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
- priv->led_tpt = 0;
}
/*
@@ -2345,9 +1686,7 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
stats->data_cnt++;
stats->data_bytes += len;
}
- iwl_leds_background(priv);
}
-EXPORT_SYMBOL(iwl_update_stats);
#endif
static void iwl_force_rf_reset(struct iwl_priv *priv)
@@ -2431,77 +1770,114 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
return 0;
}
-/**
- * iwl_bg_monitor_recover - Timer callback to check for stuck queue and recover
- *
- * During normal condition (no queue is stuck), the timer is continually set to
- * execute every monitor_recover_period milliseconds after the last timer
- * expired. When the queue read_ptr is at the same place, the timer is
- * shorten to 100mSecs. This is
- * 1) to reduce the chance that the read_ptr may wrap around (not stuck)
- * 2) to detect the stuck queues quicker before the station and AP can
- * disassociate each other.
- *
- * This function monitors all the tx queues and recover from it if any
- * of the queues are stuck.
- * 1. It first check the cmd queue for stuck conditions. If it is stuck,
- * it will recover by resetting the firmware and return.
- * 2. Then, it checks for station association. If it associates it will check
- * other queues. If any queue is stuck, it will recover by resetting
- * the firmware.
- * Note: It the number of times the queue read_ptr to be at the same place to
- * be MAX_REPEAT+1 in order to consider to be stuck.
- */
+int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+ struct iwl_rxon_context *tmp;
+ u32 interface_modes;
+ int err;
+
+ newtype = ieee80211_iftype_p2p(newtype, newp2p);
+
+ mutex_lock(&priv->mutex);
+
+ interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
+
+ if (!(interface_modes & BIT(newtype))) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (ctx->exclusive_interface_modes & BIT(newtype)) {
+ for_each_context(priv, tmp) {
+ if (ctx == tmp)
+ continue;
+
+ if (!tmp->vif)
+ continue;
+
+ /*
+ * The current mode switch would be exclusive, but
+ * another context is active ... refuse the switch.
+ */
+ err = -EBUSY;
+ goto out;
+ }
+ }
+
+ /* success */
+ iwl_teardown_interface(priv, vif, true);
+ vif->type = newtype;
+ err = iwl_setup_interface(priv, ctx);
+ WARN_ON(err);
+ /*
+ * We've switched internally, but submitting to the
+ * device may have failed for some reason. Mask this
+ * error, because otherwise mac80211 will not switch
+ * (and set the interface type back) and we'll be
+ * out of sync with it.
+ */
+ err = 0;
+
+ out:
+ mutex_unlock(&priv->mutex);
+ return err;
+}
+
/*
- * The maximum number of times the read pointer of the tx queue at the
- * same place without considering to be stuck.
+ * On every watchdog tick we check (latest) time stamp. If it does not
+ * change during timeout period and queue is not empty we reset firmware.
*/
-#define MAX_REPEAT (2)
static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt)
{
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
+ struct iwl_tx_queue *txq = &priv->txq[cnt];
+ struct iwl_queue *q = &txq->q;
+ unsigned long timeout;
+ int ret;
- txq = &priv->txq[cnt];
- q = &txq->q;
- /* queue is empty, skip */
- if (q->read_ptr == q->write_ptr)
+ if (q->read_ptr == q->write_ptr) {
+ txq->time_stamp = jiffies;
return 0;
+ }
- if (q->read_ptr == q->last_read_ptr) {
- /* a queue has not been read from last time */
- if (q->repeat_same_read_ptr > MAX_REPEAT) {
- IWL_ERR(priv,
- "queue %d stuck %d time. Fw reload.\n",
- q->id, q->repeat_same_read_ptr);
- q->repeat_same_read_ptr = 0;
- iwl_force_reset(priv, IWL_FW_RESET, false);
- } else {
- q->repeat_same_read_ptr++;
- IWL_DEBUG_RADIO(priv,
- "queue %d, not read %d time\n",
- q->id,
- q->repeat_same_read_ptr);
- mod_timer(&priv->monitor_recover,
- jiffies + msecs_to_jiffies(
- IWL_ONE_HUNDRED_MSECS));
- return 1;
- }
- } else {
- q->last_read_ptr = q->read_ptr;
- q->repeat_same_read_ptr = 0;
+ timeout = txq->time_stamp +
+ msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
+
+ if (time_after(jiffies, timeout)) {
+ IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
+ q->id, priv->cfg->base_params->wd_timeout);
+ ret = iwl_force_reset(priv, IWL_FW_RESET, false);
+ return (ret == -EAGAIN) ? 0 : 1;
}
+
return 0;
}
-void iwl_bg_monitor_recover(unsigned long data)
+/*
+ * Making watchdog tick be a quarter of timeout assure we will
+ * discover the queue hung between timeout and 1.25*timeout
+ */
+#define IWL_WD_TICK(timeout) ((timeout) / 4)
+
+/*
+ * Watchdog timer callback, we check each tx queue for stuck, if if hung
+ * we reset the firmware. If everything is fine just rearm the timer.
+ */
+void iwl_bg_watchdog(unsigned long data)
{
struct iwl_priv *priv = (struct iwl_priv *)data;
int cnt;
+ unsigned long timeout;
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
+ timeout = priv->cfg->base_params->wd_timeout;
+ if (timeout == 0)
+ return;
+
/* monitor and check for stuck cmd queue */
if (iwl_check_stuck_queue(priv, priv->cmd_queue))
return;
@@ -2516,17 +1892,21 @@ void iwl_bg_monitor_recover(unsigned long data)
return;
}
}
- if (priv->cfg->base_params->monitor_recover_period) {
- /*
- * Reschedule the timer to occur in
- * priv->cfg->base_params->monitor_recover_period
- */
- mod_timer(&priv->monitor_recover, jiffies + msecs_to_jiffies(
- priv->cfg->base_params->monitor_recover_period));
- }
+
+ mod_timer(&priv->watchdog, jiffies +
+ msecs_to_jiffies(IWL_WD_TICK(timeout)));
}
-EXPORT_SYMBOL(iwl_bg_monitor_recover);
+void iwl_setup_watchdog(struct iwl_priv *priv)
+{
+ unsigned int timeout = priv->cfg->base_params->wd_timeout;
+
+ if (timeout)
+ mod_timer(&priv->watchdog,
+ jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
+ else
+ del_timer(&priv->watchdog);
+}
/*
* extended beacon time format
@@ -2552,7 +1932,6 @@ u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval)
return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
}
-EXPORT_SYMBOL(iwl_usecs_to_beacons);
/* base is usually what we get from ucode with each received frame,
* the same as HW timer counter counting down
@@ -2580,12 +1959,12 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
return cpu_to_le32(res);
}
-EXPORT_SYMBOL(iwl_add_beacon_time);
#ifdef CONFIG_PM
-int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+int iwl_pci_suspend(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct iwl_priv *priv = pci_get_drvdata(pdev);
/*
@@ -2597,18 +1976,13 @@ int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
*/
iwl_apm_stop(priv);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
-
return 0;
}
-EXPORT_SYMBOL(iwl_pci_suspend);
-int iwl_pci_resume(struct pci_dev *pdev)
+int iwl_pci_resume(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct iwl_priv *priv = pci_get_drvdata(pdev);
- int ret;
bool hw_rfkill = false;
/*
@@ -2617,11 +1991,6 @@ int iwl_pci_resume(struct pci_dev *pdev)
*/
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
- pci_set_power_state(pdev, PCI_D0);
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
- pci_restore_state(pdev);
iwl_enable_interrupts(priv);
if (!(iwl_read32(priv, CSR_GP_CNTRL) &
@@ -2637,6 +2006,14 @@ int iwl_pci_resume(struct pci_dev *pdev)
return 0;
}
-EXPORT_SYMBOL(iwl_pci_resume);
+
+const struct dev_pm_ops iwl_pm_ops = {
+ .suspend = iwl_pci_suspend,
+ .resume = iwl_pci_resume,
+ .freeze = iwl_pci_suspend,
+ .thaw = iwl_pci_resume,
+ .poweroff = iwl_pci_suspend,
+ .restore = iwl_pci_resume,
+};
#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 64527def059f..b316d833d9a2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -63,6 +63,8 @@
#ifndef __iwl_core_h__
#define __iwl_core_h__
+#include "iwl-dev.h"
+
/************************
* forward declarations *
************************/
@@ -120,6 +122,14 @@ struct iwl_apm_ops {
void (*config)(struct iwl_priv *priv);
};
+struct iwl_isr_ops {
+ irqreturn_t (*isr) (int irq, void *data);
+ void (*free)(struct iwl_priv *priv);
+ int (*alloc)(struct iwl_priv *priv);
+ int (*reset)(struct iwl_priv *priv);
+ void (*disable)(struct iwl_priv *priv);
+};
+
struct iwl_debugfs_ops {
ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos);
@@ -193,28 +203,16 @@ struct iwl_lib_ops {
/* power */
int (*send_tx_power) (struct iwl_priv *priv);
void (*update_chain_flags)(struct iwl_priv *priv);
- void (*post_associate)(struct iwl_priv *priv,
- struct ieee80211_vif *vif);
- void (*config_ap)(struct iwl_priv *priv, struct ieee80211_vif *vif);
- irqreturn_t (*isr) (int irq, void *data);
+
+ /* isr */
+ struct iwl_isr_ops isr_ops;
/* eeprom operations (as defined in iwl-eeprom.h) */
struct iwl_eeprom_ops eeprom_ops;
/* temperature */
struct iwl_temp_ops temp_ops;
- /* station management */
- int (*manage_ibss_station)(struct iwl_priv *priv,
- struct ieee80211_vif *vif, bool add);
- int (*update_bcast_stations)(struct iwl_priv *priv);
- /* recover from tx queue stall */
- void (*recover_from_tx_stall)(unsigned long data);
- /* check for plcp health */
- bool (*check_plcp_health)(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt);
- /* check for ack health */
- bool (*check_ack_health)(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt);
+
int (*txfifo_flush)(struct iwl_priv *priv, u16 flush_control);
void (*dev_txfifo_flush)(struct iwl_priv *priv, u16 flush_control);
@@ -226,8 +224,6 @@ struct iwl_lib_ops {
struct iwl_led_ops {
int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
- int (*on)(struct iwl_priv *priv);
- int (*off)(struct iwl_priv *priv);
};
/* NIC specific ops */
@@ -235,12 +231,23 @@ struct iwl_nic_ops {
void (*additional_nic_config)(struct iwl_priv *priv);
};
+struct iwl_legacy_ops {
+ void (*post_associate)(struct iwl_priv *priv);
+ void (*config_ap)(struct iwl_priv *priv);
+ /* station management */
+ int (*update_bcast_stations)(struct iwl_priv *priv);
+ int (*manage_ibss_station)(struct iwl_priv *priv,
+ struct ieee80211_vif *vif, bool add);
+};
+
struct iwl_ops {
const struct iwl_lib_ops *lib;
const struct iwl_hcmd_ops *hcmd;
const struct iwl_hcmd_utils_ops *utils;
const struct iwl_led_ops *led;
const struct iwl_nic_ops *nic;
+ const struct iwl_legacy_ops *legacy;
+ const struct ieee80211_ops *ieee80211_ops;
};
struct iwl_mod_params {
@@ -251,6 +258,8 @@ struct iwl_mod_params {
int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
int antenna; /* def: 0 = both antennas (use diversity) */
int restart_fw; /* def: 1 = restart firmware */
+ bool plcp_check; /* def: true = enable plcp health check */
+ bool ack_check; /* def: false = disable ack health check */
};
/*
@@ -266,7 +275,7 @@ struct iwl_mod_params {
* @plcp_delta_threshold: plcp error rate threshold used to trigger
* radio tuning when there is a high receiving plcp error rate
* @chain_noise_scale: default chain noise scale used for gain computation
- * @monitor_recover_period: default timer used to check stuck queues
+ * @wd_timeout: TX queues watchdog timeout
* @temperature_kelvin: temperature report by uCode in kelvin
* @max_event_log_size: size of event log buffer size for ucode event logging
* @tx_power_by_driver: tx power calibration performed by driver
@@ -276,7 +285,10 @@ struct iwl_mod_params {
* sensitivity calibration operation
* @chain_noise_calib_by_driver: driver has the capability to perform
* chain noise calibration operation
-*/
+ * @shadow_reg_enable: HW shadhow register bit
+ * @no_agg_framecnt_info: uCode do not provide aggregation frame count
+ * information
+ */
struct iwl_base_params {
int eeprom_size;
int num_of_queues; /* def: HW dependent */
@@ -292,20 +304,20 @@ struct iwl_base_params {
u16 led_compensation;
const bool broken_powersave;
int chain_noise_num_beacons;
- const bool supports_idle;
bool adv_thermal_throttle;
bool support_ct_kill_exit;
const bool support_wimax_coexist;
u8 plcp_delta_threshold;
s32 chain_noise_scale;
- /* timer period for monitor the driver queues */
- u32 monitor_recover_period;
+ unsigned int wd_timeout;
bool temperature_kelvin;
u32 max_event_log_size;
const bool tx_power_by_driver;
const bool ucode_tracing;
const bool sensitivity_calib_by_driver;
const bool chain_noise_calib_by_driver;
+ const bool shadow_reg_enable;
+ const bool no_agg_framecnt_info;
};
/*
* @advanced_bt_coexist: support advanced bt coexist
@@ -315,6 +327,7 @@ struct iwl_base_params {
* @agg_time_limit: maximum number of uSec in aggregation
* @ampdu_factor: Maximum A-MPDU length factor
* @ampdu_density: Minimum A-MPDU spacing
+ * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
*/
struct iwl_bt_params {
bool advanced_bt_coexist;
@@ -324,6 +337,8 @@ struct iwl_bt_params {
u16 agg_time_limit;
u8 ampdu_factor;
u8 ampdu_density;
+ bool bt_sco_disable;
+ bool bt_session_2;
};
/*
* @use_rts_for_aggregation: use rts/cts protection for HT traffic
@@ -344,6 +359,11 @@ struct iwl_ht_params {
* @need_dc_calib: need to perform init dc calibration
* @need_temp_offset_calib: need to perform temperature offset calibration
* @scan_antennas: available antenna for scan operation
+ * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
+ * @adv_pm: advance power management
+ * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
+ * @internal_wimax_coex: internal wifi/wimax combo device
+ * @iq_invert: I/Q inversion
*
* We enable the driver to be backward compatible wrt API version. The
* driver specifies which APIs it supports (with @ucode_api_max being the
@@ -389,15 +409,18 @@ struct iwl_cfg {
const bool need_dc_calib; /* if used set to true */
const bool need_temp_offset_calib; /* if used set to true */
u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
- u8 scan_tx_antennas[IEEE80211_NUM_BANDS];
+ enum iwl_led_mode led_mode;
+ const bool adv_pm;
+ const bool rx_with_siso_diversity;
+ const bool internal_wimax_coex;
+ const bool iq_invert;
};
/***************************
* L i b *
***************************/
-struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
- struct ieee80211_ops *hw_ops);
+struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg);
int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *params);
int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw);
@@ -420,28 +443,17 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
void iwl_connection_init_rx_config(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
void iwl_set_rate(struct iwl_priv *priv);
-int iwl_set_decrypted_flag(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- u32 decrypt_res,
- struct ieee80211_rx_status *stats);
void iwl_irq_handle_error(struct iwl_priv *priv);
-void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif);
-void iwl_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- u32 changes);
int iwl_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void iwl_mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
-int iwl_mac_config(struct ieee80211_hw *hw, u32 changed);
-void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif);
-void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
+int iwl_mac_change_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p);
int iwl_alloc_txq_mem(struct iwl_priv *priv);
void iwl_free_txq_mem(struct iwl_priv *priv);
-void iwlcore_tx_cmd_protection(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- __le16 fc, __le32 *tx_flags);
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_alloc_traffic_mem(struct iwl_priv *priv);
void iwl_free_traffic_mem(struct iwl_priv *priv);
@@ -477,46 +489,21 @@ static inline void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx,
__le16 fc, u16 len)
{
- struct traffic_stats *stats;
-
- if (is_tx)
- stats = &priv->tx_stats;
- else
- stats = &priv->rx_stats;
-
- if (ieee80211_is_data(fc)) {
- /* data */
- stats->data_bytes += len;
- }
- iwl_leds_background(priv);
}
#endif
-/*****************************************************
- * RX handlers.
- * **************************************************/
-void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl_rx_reply_error(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
/*****************************************************
* RX
******************************************************/
void iwl_cmd_queue_free(struct iwl_priv *priv);
+void iwl_cmd_queue_unmap(struct iwl_priv *priv);
int iwl_rx_queue_alloc(struct iwl_priv *priv);
void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
struct iwl_rx_queue *q);
int iwl_rx_queue_space(const struct iwl_rx_queue *q);
void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
-/* Handlers */
-void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl_recover_from_statistics(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt);
+
void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
-void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
/* TX helpers */
@@ -529,6 +516,8 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
int slots_num, u32 txq_id);
void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
+void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
+void iwl_setup_watchdog(struct iwl_priv *priv);
/*****************************************************
* TX power
****************************************************/
@@ -564,6 +553,10 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
struct ieee80211_vif *vif);
void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
+int __must_check iwl_scan_initiate(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ enum iwl_scan_type scan_type,
+ enum ieee80211_band band);
/* For faster active scanning, scan will move to the next channel if fewer than
* PLCP_QUIET_THRESH packets are heard on this channel within
@@ -598,7 +591,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
/*****************************************************
* PCI *
*****************************************************/
-irqreturn_t iwl_isr_legacy(int irq, void *data);
static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
{
@@ -609,15 +601,23 @@ static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
return pci_lnk_ctl;
}
-void iwl_bg_monitor_recover(unsigned long data);
+void iwl_bg_watchdog(unsigned long data);
u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval);
__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
u32 addon, u32 beacon_interval);
#ifdef CONFIG_PM
-int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state);
-int iwl_pci_resume(struct pci_dev *pdev);
-#endif /* CONFIG_PM */
+int iwl_pci_suspend(struct device *device);
+int iwl_pci_resume(struct device *device);
+extern const struct dev_pm_ops iwl_pm_ops;
+
+#define IWL_PM_OPS (&iwl_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define IWL_PM_OPS NULL
+
+#endif /* !CONFIG_PM */
/*****************************************************
* Error Handling Debugging
@@ -724,17 +724,23 @@ static inline int iwlcore_commit_rxon(struct iwl_priv *priv,
{
return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
}
-static inline void iwlcore_config_ap(struct iwl_priv *priv,
- struct ieee80211_vif *vif)
-{
- priv->cfg->ops->lib->config_ap(priv, vif);
-}
static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
struct iwl_priv *priv, enum ieee80211_band band)
{
return priv->hw->wiphy->bands[band];
}
+static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
+{
+ return priv->cfg->bt_params &&
+ priv->cfg->bt_params->advanced_bt_coexist;
+}
+
+static inline bool iwl_bt_statistics(struct iwl_priv *priv)
+{
+ return priv->cfg->bt_params && priv->cfg->bt_params->bt_statistics;
+}
+
extern bool bt_coex_active;
extern bool bt_siso_mode;
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 2aa15ab13892..f52bc040bcbf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -132,6 +132,8 @@
#define CSR_LED_REG (CSR_BASE+0x094)
#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0)
+#define CSR_MAC_SHADOW_REG_CTRL (CSR_BASE+0x0A8) /* 6000 and up */
+
/* GIO Chicken Bits (PCI Express bus link power management) */
#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
@@ -288,7 +290,7 @@
/* HW REV */
-#define CSR_HW_REV_TYPE_MSK (0x00000F0)
+#define CSR_HW_REV_TYPE_MSK (0x00001F0)
#define CSR_HW_REV_TYPE_3945 (0x00000D0)
#define CSR_HW_REV_TYPE_4965 (0x0000000)
#define CSR_HW_REV_TYPE_5300 (0x0000020)
@@ -298,9 +300,15 @@
#define CSR_HW_REV_TYPE_1000 (0x0000060)
#define CSR_HW_REV_TYPE_6x00 (0x0000070)
#define CSR_HW_REV_TYPE_6x50 (0x0000080)
-#define CSR_HW_REV_TYPE_6x50g2 (0x0000084)
-#define CSR_HW_REV_TYPE_6x00g2 (0x00000B0)
-#define CSR_HW_REV_TYPE_NONE (0x00000F0)
+#define CSR_HW_REV_TYPE_6150 (0x0000084)
+#define CSR_HW_REV_TYPE_6x05 (0x00000B0)
+#define CSR_HW_REV_TYPE_6x30 CSR_HW_REV_TYPE_6x05
+#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05
+#define CSR_HW_REV_TYPE_2x30 (0x00000C0)
+#define CSR_HW_REV_TYPE_2x00 (0x0000100)
+#define CSR_HW_REV_TYPE_200 (0x0000110)
+#define CSR_HW_REV_TYPE_230 (0x0000120)
+#define CSR_HW_REV_TYPE_NONE (0x00001F0)
/* EEPROM REG */
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
@@ -374,6 +382,8 @@
#define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6 (0x00000004)
#define CSR_GP_DRIVER_REG_BIT_6050_1x2 (0x00000008)
+#define CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER (0x00000080)
+
/* GIO Chicken Bits (PCI Express bus link power management) */
#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 0b961a353ff6..ebdea3be3ef9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -120,6 +120,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
/* 0x000000F0 - 0x00000010 */
#define IWL_DL_MACDUMP (1 << 4)
#define IWL_DL_HCMD_DUMP (1 << 5)
+#define IWL_DL_EEPROM (1 << 6)
#define IWL_DL_RADIO (1 << 7)
/* 0x00000F00 - 0x00000100 */
#define IWL_DL_POWER (1 << 8)
@@ -164,6 +165,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
+#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 8fdd4efdb1d3..8842411f1cf3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -207,18 +207,19 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
return ret;
}
-#define BYTE1_MASK 0x000000ff;
-#define BYTE2_MASK 0x0000ffff;
-#define BYTE3_MASK 0x00ffffff;
static ssize_t iwl_dbgfs_sram_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- u32 val;
+ u32 val = 0;
char *buf;
ssize_t ret;
- int i;
+ int i = 0;
+ bool device_format = false;
+ int offset = 0;
+ int len = 0;
int pos = 0;
+ int sram;
struct iwl_priv *priv = file->private_data;
size_t bufsz;
@@ -230,35 +231,62 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
else
priv->dbgfs_sram_len = priv->ucode_data.len;
}
- bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10;
+ len = priv->dbgfs_sram_len;
+
+ if (len == -4) {
+ device_format = true;
+ len = 4;
+ }
+
+ bufsz = 50 + len * 4;
buf = kmalloc(bufsz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
+
pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
- priv->dbgfs_sram_len);
+ len);
pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
priv->dbgfs_sram_offset);
- for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
- val = iwl_read_targ_mem(priv, priv->dbgfs_sram_offset + \
- priv->dbgfs_sram_len - i);
- if (i < 4) {
- switch (i) {
- case 1:
- val &= BYTE1_MASK;
- break;
- case 2:
- val &= BYTE2_MASK;
- break;
- case 3:
- val &= BYTE3_MASK;
- break;
- }
+
+ /* adjust sram address since reads are only on even u32 boundaries */
+ offset = priv->dbgfs_sram_offset & 0x3;
+ sram = priv->dbgfs_sram_offset & ~0x3;
+
+ /* read the first u32 from sram */
+ val = iwl_read_targ_mem(priv, sram);
+
+ for (; len; len--) {
+ /* put the address at the start of every line */
+ if (i == 0)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "%08X: ", sram + offset);
+
+ if (device_format)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "%02x", (val >> (8 * (3 - offset))) & 0xff);
+ else
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "%02x ", (val >> (8 * offset)) & 0xff);
+
+ /* if all bytes processed, read the next u32 from sram */
+ if (++offset == 4) {
+ sram += 4;
+ offset = 0;
+ val = iwl_read_targ_mem(priv, sram);
}
- if (!(i % 16))
+
+ /* put in extra spaces and split lines for human readability */
+ if (++i == 16) {
+ i = 0;
pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
+ } else if (!(i & 7)) {
+ pos += scnprintf(buf + pos, bufsz - pos, " ");
+ } else if (!(i & 3)) {
+ pos += scnprintf(buf + pos, bufsz - pos, " ");
+ }
}
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ if (i)
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
@@ -282,6 +310,9 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
priv->dbgfs_sram_offset = offset;
priv->dbgfs_sram_len = len;
+ } else if (sscanf(buf, "%x", &offset) == 1) {
+ priv->dbgfs_sram_offset = offset;
+ priv->dbgfs_sram_len = -4;
} else {
priv->dbgfs_sram_offset = 0;
priv->dbgfs_sram_len = 0;
@@ -668,29 +699,6 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char buf[256];
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "allow blinking: %s\n",
- (priv->allow_blinking) ? "True" : "False");
- if (priv->allow_blinking) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "Led blinking rate: %u\n",
- priv->last_blink_rate);
- pos += scnprintf(buf + pos, bufsz - pos,
- "Last blink time: %lu\n",
- priv->last_blink_time);
- }
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -856,7 +864,6 @@ DEBUGFS_READ_FILE_OPS(channels);
DEBUGFS_READ_FILE_OPS(status);
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
DEBUGFS_READ_FILE_OPS(qos);
-DEBUGFS_READ_FILE_OPS(led);
DEBUGFS_READ_FILE_OPS(thermal_throttling);
DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
@@ -992,11 +999,8 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
" swq_id=%#.2x (ac %d/hwq %d)\n",
cnt, q->read_ptr, q->write_ptr,
!!test_bit(cnt, priv->queue_stopped),
- txq->swq_id,
- txq->swq_id & 0x80 ? txq->swq_id & 3 :
- txq->swq_id,
- txq->swq_id & 0x80 ? (txq->swq_id >> 2) &
- 0x1f : txq->swq_id);
+ txq->swq_id, txq->swq_id & 3,
+ (txq->swq_id >> 2) & 0x1f);
if (cnt >= 4)
continue;
/* for the ACs, display the stop count too */
@@ -1537,32 +1541,26 @@ static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
user_buf, count, ppos);
}
-static ssize_t iwl_dbgfs_monitor_period_write(struct file *file,
+static ssize_t iwl_dbgfs_wd_timeout_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos) {
struct iwl_priv *priv = file->private_data;
char buf[8];
int buf_size;
- int period;
+ int timeout;
memset(buf, 0, sizeof(buf));
buf_size = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, buf_size))
return -EFAULT;
- if (sscanf(buf, "%d", &period) != 1)
+ if (sscanf(buf, "%d", &timeout) != 1)
return -EINVAL;
- if (period < 0 || period > IWL_MAX_MONITORING_PERIOD)
- priv->cfg->base_params->monitor_recover_period =
- IWL_DEF_MONITORING_PERIOD;
- else
- priv->cfg->base_params->monitor_recover_period = period;
+ if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
+ timeout = IWL_DEF_WD_TIMEOUT;
- if (priv->cfg->base_params->monitor_recover_period)
- mod_timer(&priv->monitor_recover, jiffies + msecs_to_jiffies(
- priv->cfg->base_params->monitor_recover_period));
- else
- del_timer_sync(&priv->monitor_recover);
+ priv->cfg->base_params->wd_timeout = timeout;
+ iwl_setup_watchdog(priv);
return count;
}
@@ -1576,16 +1574,22 @@ static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file,
const size_t bufsz = sizeof(buf);
ssize_t ret;
+ if (!priv->bt_enable_flag) {
+ pos += scnprintf(buf + pos, bufsz - pos, "BT coex disabled\n");
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ return ret;
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "BT enable flag: 0x%x\n",
+ priv->bt_enable_flag);
pos += scnprintf(buf + pos, bufsz - pos, "BT in %s mode\n",
priv->bt_full_concurrent ? "full concurrency" : "3-wire");
pos += scnprintf(buf + pos, bufsz - pos, "BT status: %s, "
"last traffic notif: %d\n",
- priv->bt_status ? "On" : "Off", priv->notif_bt_traffic_load);
+ priv->bt_status ? "On" : "Off", priv->last_bt_traffic_load);
pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, "
- "sco_active: %d, kill_ack_mask: %x, "
- "kill_cts_mask: %x\n",
- priv->bt_ch_announce, priv->bt_sco_active,
- priv->kill_ack_mask, priv->kill_cts_mask);
+ "kill_ack_mask: %x, kill_cts_mask: %x\n",
+ priv->bt_ch_announce, priv->kill_ack_mask,
+ priv->kill_cts_mask);
pos += scnprintf(buf + pos, bufsz - pos, "bluetooth traffic load: ");
switch (priv->bt_traffic_load) {
@@ -1689,7 +1693,7 @@ DEBUGFS_READ_FILE_OPS(rxon_flags);
DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
DEBUGFS_WRITE_FILE_OPS(txfifo_flush);
DEBUGFS_READ_FILE_OPS(ucode_bt_stats);
-DEBUGFS_WRITE_FILE_OPS(monitor_period);
+DEBUGFS_WRITE_FILE_OPS(wd_timeout);
DEBUGFS_READ_FILE_OPS(bt_traffic);
DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
DEBUGFS_READ_FILE_OPS(reply_tx_error);
@@ -1727,7 +1731,6 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(led, dir_data, S_IRUSR);
if (!priv->cfg->base_params->broken_powersave) {
DEBUGFS_ADD_FILE(sleep_level_override, dir_data,
S_IWUSR | S_IRUSR);
@@ -1761,13 +1764,13 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
if (priv->cfg->base_params->ucode_tracing)
DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
- if (priv->cfg->bt_params && priv->cfg->bt_params->bt_statistics)
+ if (iwl_bt_statistics(priv))
DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(monitor_period, dir_debug, S_IWUSR);
- if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
+ DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
+ if (iwl_advanced_bt_coexist(priv))
DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
if (priv->cfg->base_params->sensitivity_calib_by_driver)
DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
@@ -1785,7 +1788,6 @@ err:
iwl_dbgfs_unregister(priv);
return -ENOMEM;
}
-EXPORT_SYMBOL(iwl_dbgfs_register);
/**
* Remove the debugfs files and directories
@@ -1799,7 +1801,6 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
debugfs_remove_recursive(priv->debugfs_dir);
priv->debugfs_dir = NULL;
}
-EXPORT_SYMBOL(iwl_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 70e07fa48405..68b953f2bdc7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -34,6 +34,8 @@
#include <linux/pci.h> /* for struct pci_device_id */
#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/leds.h>
#include <net/ieee80211_radiotap.h>
#include "iwl-eeprom.h"
@@ -41,14 +43,14 @@
#include "iwl-prph.h"
#include "iwl-fh.h"
#include "iwl-debug.h"
-#include "iwl-4965-hw.h"
-#include "iwl-3945-hw.h"
#include "iwl-agn-hw.h"
#include "iwl-led.h"
#include "iwl-power.h"
#include "iwl-agn-rs.h"
#include "iwl-agn-tt.h"
+#define U32_PAD(n) ((4-(n))&0x3)
+
struct iwl_tx_queue;
/* CT-KILL constants */
@@ -129,9 +131,6 @@ struct iwl_queue {
int write_ptr; /* 1-st empty entry (index) host_w*/
int read_ptr; /* last used entry (index) host_r*/
/* use for monitoring and recovering the stuck queue */
- int last_read_ptr; /* storing the last read_ptr */
- /* number of time read_ptr and last_read_ptr are the same */
- u8 repeat_same_read_ptr;
dma_addr_t dma_addr; /* physical addr for BD's */
int n_window; /* safe queue window */
u32 id;
@@ -139,7 +138,7 @@ struct iwl_queue {
* space more than this */
int high_mark; /* high watermark, stop queue if free
* space less than this */
-} __packed;
+};
/* One for each TFD */
struct iwl_tx_info {
@@ -155,6 +154,7 @@ struct iwl_tx_info {
* @meta: array of meta data for each command/tx buffer
* @dma_addr_cmd: physical address of cmd/tx buffer array
* @txb: array of per-TFD driver data
+ * @time_stamp: time (in jiffies) of last read_ptr change
* @need_update: indicates need to update read/write index
* @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
*
@@ -170,6 +170,7 @@ struct iwl_tx_queue {
struct iwl_device_cmd **cmd;
struct iwl_cmd_meta *meta;
struct iwl_tx_info *txb;
+ unsigned long time_stamp;
u8 need_update;
u8 sched_retry;
u8 active;
@@ -508,6 +509,7 @@ struct iwl_station_priv {
atomic_t pending_frames;
bool client;
bool asleep;
+ u8 max_agg_bufsize;
};
/**
@@ -996,7 +998,6 @@ struct reply_agg_tx_error_statistics {
u32 unknown;
};
-#ifdef CONFIG_IWLWIFI_DEBUGFS
/* management statistics */
enum iwl_mgmt_stats {
MANAGEMENT_ASSOC_REQ = 0,
@@ -1027,16 +1028,13 @@ enum iwl_ctrl_stats {
};
struct traffic_stats {
+#ifdef CONFIG_IWLWIFI_DEBUGFS
u32 mgmt[MANAGEMENT_MAX];
u32 ctrl[CONTROL_MAX];
u32 data_cnt;
u64 data_bytes;
-};
-#else
-struct traffic_stats {
- u64 data_bytes;
-};
#endif
+};
/*
* iwl_switch_rxon: "channel switch" structure
@@ -1104,15 +1102,19 @@ struct iwl_event_log {
#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
-/* timer constants use to monitor and recover stuck tx queues in mSecs */
-#define IWL_DEF_MONITORING_PERIOD (1000)
-#define IWL_LONG_MONITORING_PERIOD (5000)
-#define IWL_ONE_HUNDRED_MSECS (100)
-#define IWL_MAX_MONITORING_PERIOD (60000)
+/* TX queue watchdog timeouts in mSecs */
+#define IWL_DEF_WD_TIMEOUT (2000)
+#define IWL_LONG_WD_TIMEOUT (10000)
+#define IWL_MAX_WD_TIMEOUT (120000)
/* BT Antenna Coupling Threshold (dB) */
#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35)
+/* Firmware reload counter and Timestamp */
+#define IWL_MIN_RELOAD_DURATION 1000 /* 1000 ms */
+#define IWL_MAX_CONTINUE_RELOAD_CNT 4
+
+
enum iwl_reset {
IWL_RF_RESET = 0,
IWL_FW_RESET,
@@ -1141,6 +1143,33 @@ struct iwl_force_reset {
*/
#define IWLAGN_EXT_BEACON_TIME_POS 22
+/**
+ * struct iwl_notification_wait - notification wait entry
+ * @list: list head for global list
+ * @fn: function called with the notification
+ * @cmd: command ID
+ *
+ * This structure is not used directly, to wait for a
+ * notification declare it on the stack, and call
+ * iwlagn_init_notification_wait() with appropriate
+ * parameters. Then do whatever will cause the ucode
+ * to notify the driver, and to wait for that then
+ * call iwlagn_wait_notification().
+ *
+ * Each notification is one-shot. If at some point we
+ * need to support multi-shot notifications (which
+ * can't be allocated on the stack) we need to modify
+ * the code for them.
+ */
+struct iwl_notification_wait {
+ struct list_head list;
+
+ void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt);
+
+ u8 cmd;
+ bool triggered;
+};
+
enum iwl_rxon_context_id {
IWL_RXON_CTX_BSS,
IWL_RXON_CTX_PAN,
@@ -1162,6 +1191,8 @@ struct iwl_rxon_context {
*/
bool always_active, is_active;
+ bool ht_need_multiple_chains;
+
enum iwl_rxon_context_id ctxid;
u32 interface_modes, exclusive_interface_modes;
@@ -1199,6 +1230,12 @@ struct iwl_rxon_context {
} ht;
};
+enum iwl_scan_type {
+ IWL_SCAN_NORMAL,
+ IWL_SCAN_RADIO_RESET,
+ IWL_SCAN_OFFCH_TX,
+};
+
struct iwl_priv {
/* ieee device used by generic ieee processing code */
@@ -1230,12 +1267,16 @@ struct iwl_priv {
/* track IBSS manager (last beacon) status */
u32 ibss_manager;
- /* storing the jiffies when the plcp error rate is received */
- unsigned long plcp_jiffies;
+ /* jiffies when last recovery from statistics was performed */
+ unsigned long rx_statistics_jiffies;
/* force reset */
struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
+ /* firmware reload counter and timestamp */
+ unsigned long reload_jiffies;
+ int reload_count;
+
/* we allocate array of iwl_channel_info for NIC's valid channels.
* Access via channel # using indirect index array */
struct iwl_channel_info *channel_info; /* channel info array */
@@ -1255,7 +1296,7 @@ struct iwl_priv {
enum ieee80211_band scan_band;
struct cfg80211_scan_request *scan_request;
struct ieee80211_vif *scan_vif;
- bool is_internal_short_scan;
+ enum iwl_scan_type scan_type;
u8 scan_tx_ant[IEEE80211_NUM_BANDS];
u8 mgmt_tx_ant;
@@ -1310,11 +1351,6 @@ struct iwl_priv {
struct iwl_init_alive_resp card_alive_init;
struct iwl_alive_resp card_alive;
- unsigned long last_blink_time;
- u8 last_blink_rate;
- u8 allow_blinking;
- u64 led_tpt;
-
u16 active_rate;
u8 start_calib;
@@ -1463,15 +1499,30 @@ struct iwl_priv {
struct iwl_bt_notif_statistics delta_statistics_bt;
struct iwl_bt_notif_statistics max_delta_bt;
#endif
+
+ /* notification wait support */
+ struct list_head notif_waits;
+ spinlock_t notif_wait_lock;
+ wait_queue_head_t notif_waitq;
+
+ /* remain-on-channel offload support */
+ struct ieee80211_channel *hw_roc_channel;
+ struct delayed_work hw_roc_work;
+ enum nl80211_channel_type hw_roc_chantype;
+ int hw_roc_duration;
+
+ struct sk_buff *offchan_tx_skb;
+ int offchan_tx_timeout;
+ struct ieee80211_channel *offchan_tx_chan;
} _agn;
#endif
};
/* bt coex */
+ u8 bt_enable_flag;
u8 bt_status;
- u8 bt_traffic_load, notif_bt_traffic_load;
+ u8 bt_traffic_load, last_bt_traffic_load;
bool bt_ch_announce;
- bool bt_sco_active;
bool bt_full_concurrent;
bool bt_ant_couple_ok;
__le32 kill_ack_mask;
@@ -1480,7 +1531,6 @@ struct iwl_priv {
u16 bt_on_thresh;
u16 bt_duration;
u16 dynamic_frag_thresh;
- u16 dynamic_agg_thresh;
u8 bt_ci_compliance;
struct work_struct bt_traffic_change_work;
@@ -1517,6 +1567,7 @@ struct iwl_priv {
s8 tx_power_user_lmt;
s8 tx_power_device_lmt;
s8 tx_power_lmt_in_half_dbm; /* max tx power in half-dBm format */
+ s8 tx_power_next;
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -1542,10 +1593,14 @@ struct iwl_priv {
struct work_struct run_time_calib_work;
struct timer_list statistics_periodic;
struct timer_list ucode_trace;
- struct timer_list monitor_recover;
+ struct timer_list watchdog;
bool hw_ready;
struct iwl_event_log event_log;
+
+ struct led_classdev led;
+ unsigned long blink_on, blink_off;
+ bool led_registered;
}; /*iwl_priv */
static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 87cd10ff285d..833194a2c639 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -147,7 +147,7 @@ static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
int ret = 0;
- IWL_DEBUG_INFO(priv, "EEPROM signature=0x%08x\n", gp);
+ IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
switch (gp) {
case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
@@ -222,7 +222,6 @@ const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
return &priv->eeprom[offset];
}
-EXPORT_SYMBOL(iwlcore_eeprom_query_addr);
static int iwl_init_otp_access(struct iwl_priv *priv)
{
@@ -354,7 +353,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
*/
valid_addr = next_link_addr;
next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
- IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n",
+ IWL_DEBUG_EEPROM(priv, "OTP blocks %d addr 0x%x\n",
usedblocks, next_link_addr);
if (iwl_read_otp_word(priv, next_link_addr, &link_value))
return -EINVAL;
@@ -374,7 +373,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
} while (usedblocks <= priv->cfg->base_params->max_ll_items);
/* OTP has no valid blocks */
- IWL_DEBUG_INFO(priv, "OTP has no valid blocks\n");
+ IWL_DEBUG_EEPROM(priv, "OTP has no valid blocks\n");
return -EINVAL;
}
@@ -382,7 +381,6 @@ const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
{
return priv->cfg->ops->lib->eeprom_ops.query_addr(priv, offset);
}
-EXPORT_SYMBOL(iwl_eeprom_query_addr);
u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
{
@@ -390,7 +388,6 @@ u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
return 0;
return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
}
-EXPORT_SYMBOL(iwl_eeprom_query16);
/**
* iwl_eeprom_init - read EEPROM contents
@@ -414,7 +411,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
return -ENOENT;
/* allocate eeprom */
sz = priv->cfg->base_params->eeprom_size;
- IWL_DEBUG_INFO(priv, "NVM size = %d\n", sz);
+ IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
priv->eeprom = kzalloc(sz, GFP_KERNEL);
if (!priv->eeprom) {
ret = -ENOMEM;
@@ -492,7 +489,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
}
}
- IWL_DEBUG_INFO(priv, "NVM Type: %s, version: 0x%x\n",
+ IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
(priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
? "OTP" : "EEPROM",
iwl_eeprom_query16(priv, EEPROM_VERSION));
@@ -509,14 +506,12 @@ err:
alloc_err:
return ret;
}
-EXPORT_SYMBOL(iwl_eeprom_init);
void iwl_eeprom_free(struct iwl_priv *priv)
{
kfree(priv->eeprom);
priv->eeprom = NULL;
}
-EXPORT_SYMBOL(iwl_eeprom_free);
static void iwl_init_band_reference(const struct iwl_priv *priv,
int eep_band, int *eeprom_ch_count,
@@ -594,7 +589,7 @@ static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
if (!is_channel_valid(ch_info))
return -1;
- IWL_DEBUG_INFO(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
+ IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
" Ad-Hoc %ssupported\n",
ch_info->channel,
is_channel_a_band(ch_info) ?
@@ -634,11 +629,11 @@ int iwl_init_channel_map(struct iwl_priv *priv)
struct iwl_channel_info *ch_info;
if (priv->channel_count) {
- IWL_DEBUG_INFO(priv, "Channel map already initialized.\n");
+ IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
return 0;
}
- IWL_DEBUG_INFO(priv, "Initializing regulatory info from EEPROM\n");
+ IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
priv->channel_count =
ARRAY_SIZE(iwl_eeprom_band_1) +
@@ -647,7 +642,8 @@ int iwl_init_channel_map(struct iwl_priv *priv)
ARRAY_SIZE(iwl_eeprom_band_4) +
ARRAY_SIZE(iwl_eeprom_band_5);
- IWL_DEBUG_INFO(priv, "Parsing data for %d channels.\n", priv->channel_count);
+ IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
+ priv->channel_count);
priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
priv->channel_count, GFP_KERNEL);
@@ -686,7 +682,8 @@ int iwl_init_channel_map(struct iwl_priv *priv)
IEEE80211_CHAN_NO_HT40;
if (!(is_channel_valid(ch_info))) {
- IWL_DEBUG_INFO(priv, "Ch. %d Flags %x [%sGHz] - "
+ IWL_DEBUG_EEPROM(priv,
+ "Ch. %d Flags %x [%sGHz] - "
"No traffic\n",
ch_info->channel,
ch_info->flags,
@@ -702,7 +699,8 @@ int iwl_init_channel_map(struct iwl_priv *priv)
ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
ch_info->min_power = 0;
- IWL_DEBUG_INFO(priv, "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm):"
+ IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
+ "%s%s%s%s%s%s(0x%02x %ddBm):"
" Ad-Hoc %ssupported\n",
ch_info->channel,
is_channel_a_band(ch_info) ?
@@ -776,7 +774,6 @@ int iwl_init_channel_map(struct iwl_priv *priv)
return 0;
}
-EXPORT_SYMBOL(iwl_init_channel_map);
/*
* iwl_free_channel_map - undo allocations in iwl_init_channel_map
@@ -786,7 +783,6 @@ void iwl_free_channel_map(struct iwl_priv *priv)
kfree(priv->channel_info);
priv->channel_count = 0;
}
-EXPORT_SYMBOL(iwl_free_channel_map);
/**
* iwl_get_channel_info - Find driver's private channel info
@@ -815,4 +811,3 @@ const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv,
return NULL;
}
-EXPORT_SYMBOL(iwl_get_channel_info);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index d9b590625ae4..98aa8af01192 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -110,9 +110,18 @@ enum {
};
/* SKU Capabilities */
+/* 3945 only */
#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
+/* 5000 and up */
+#define EEPROM_SKU_CAP_BAND_POS (4)
+#define EEPROM_SKU_CAP_BAND_SELECTION \
+ (3 << EEPROM_SKU_CAP_BAND_POS)
+#define EEPROM_SKU_CAP_11N_ENABLE (1 << 6)
+#define EEPROM_SKU_CAP_AMT_ENABLE (1 << 7)
+#define EEPROM_SKU_CAP_IPAN_ENABLE (1 << 8)
+
/* *regulatory* channel data format in eeprom, one for each channel.
* There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
struct iwl_eeprom_channel {
@@ -120,6 +129,17 @@ struct iwl_eeprom_channel {
s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
} __packed;
+enum iwl_eeprom_enhanced_txpwr_flags {
+ IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0),
+ IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1),
+ IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2),
+ IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3),
+ IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4),
+ IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5),
+ IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6),
+ IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7),
+};
+
/**
* iwl_eeprom_enhanced_txpwr structure
* This structure presents the enhanced regulatory tx power limit layout
@@ -127,21 +147,23 @@ struct iwl_eeprom_channel {
* Enhanced regulatory tx power portion of eeprom image can be broken down
* into individual structures; each one is 8 bytes in size and contain the
* following information
- * @common: (desc + channel) not used by driver, should _NOT_ be "zero"
+ * @flags: entry flags
+ * @channel: channel number
* @chain_a_max_pwr: chain a max power in 1/2 dBm
* @chain_b_max_pwr: chain b max power in 1/2 dBm
* @chain_c_max_pwr: chain c max power in 1/2 dBm
- * @reserved: not used, should be "zero"
+ * @delta_20_in_40: 20-in-40 deltas (hi/lo)
* @mimo2_max_pwr: mimo2 max power in 1/2 dBm
* @mimo3_max_pwr: mimo3 max power in 1/2 dBm
*
*/
struct iwl_eeprom_enhanced_txpwr {
- __le16 common;
+ u8 flags;
+ u8 channel;
s8 chain_a_max;
s8 chain_b_max;
s8 chain_c_max;
- s8 reserved;
+ u8 delta_20_in_40;
s8 mimo2_max;
s8 mimo3_max;
} __packed;
@@ -186,6 +208,8 @@ struct iwl_eeprom_enhanced_txpwr {
#define EEPROM_LINK_CALIBRATION (2*0x67)
#define EEPROM_LINK_PROCESS_ADJST (2*0x68)
#define EEPROM_LINK_OTHERS (2*0x69)
+#define EEPROM_LINK_TXP_LIMIT (2*0x6a)
+#define EEPROM_LINK_TXP_LIMIT_SIZE (2*0x6b)
/* agn regulatory - indirect access */
#define EEPROM_REG_BAND_1_CHANNELS ((0x08)\
@@ -207,59 +231,6 @@ struct iwl_eeprom_enhanced_txpwr {
#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS ((0x80)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
-/* 6000 and up regulatory tx power - indirect access */
-/* max. elements per section */
-#define EEPROM_MAX_TXPOWER_SECTION_ELEMENTS (8)
-#define EEPROM_TXPOWER_COMMON_HT40_INDEX (2)
-
-/**
- * Partition the enhanced tx power portion of eeprom image into
- * 10 sections based on band, modulation, frequency and channel
- *
- * Section 1: all CCK channels
- * Section 2: all 2.4 GHz OFDM (Legacy, HT and HT40 ) channels
- * Section 3: all 5.2 GHz OFDM (Legacy, HT and HT40) channels
- * Section 4: 2.4 GHz 20MHz channels: 1, 2, 10, 11. Both Legacy and HT
- * Section 5: 2.4 GHz 40MHz channels: 1, 2, 6, 7, 9, (_above_)
- * Section 6: 5.2 GHz 20MHz channels: 36, 64, 100, both Legacy and HT
- * Section 7: 5.2 GHz 40MHz channels: 36, 60, 100 (_above_)
- * Section 8: 2.4 GHz channel 13, Both Legacy and HT
- * Section 9: 2.4 GHz channel 140, Both Legacy and HT
- * Section 10: 2.4 GHz 40MHz channels: 132, 44 (_above_)
- */
-/* 2.4 GHz band: CCK */
-#define EEPROM_LB_CCK_20_COMMON ((0xA8)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 8 bytes */
-/* 2.4 GHz band: 20MHz-Legacy, 20MHz-HT, 40MHz-HT */
-#define EEPROM_LB_OFDM_COMMON ((0xB0)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
-/* 5.2 GHz band: 20MHz-Legacy, 20MHz-HT, 40MHz-HT */
-#define EEPROM_HB_OFDM_COMMON ((0xC8)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
-/* 2.4GHz band channels:
- * 1Legacy, 1HT, 2Legacy, 2HT, 10Legacy, 10HT, 11Legacy, 11HT */
-#define EEPROM_LB_OFDM_20_BAND ((0xE0)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 64 bytes */
-/* 2.4 GHz band HT40 channels: (1,+1) (2,+1) (6,+1) (7,+1) (9,+1) */
-#define EEPROM_LB_OFDM_HT40_BAND ((0x120)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 40 bytes */
-/* 5.2GHz band channels: 36Legacy, 36HT, 64Legacy, 64HT, 100Legacy, 100HT */
-#define EEPROM_HB_OFDM_20_BAND ((0x148)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 48 bytes */
-/* 5.2 GHz band HT40 channels: (36,+1) (60,+1) (100,+1) */
-#define EEPROM_HB_OFDM_HT40_BAND ((0x178)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
-/* 2.4 GHz band, channnel 13: Legacy, HT */
-#define EEPROM_LB_OFDM_20_CHANNEL_13 ((0x190)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */
-/* 5.2 GHz band, channnel 140: Legacy, HT */
-#define EEPROM_HB_OFDM_20_CHANNEL_140 ((0x1A0)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */
-/* 5.2 GHz band, HT40 channnels (132,+1) (44,+1) */
-#define EEPROM_HB_OFDM_HT40_BAND_1 ((0x1B0)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */
-
-
/* 5050 Specific */
#define EEPROM_5050_TX_POWER_VERSION (4)
#define EEPROM_5050_EEPROM_VERSION (0x21E)
@@ -276,13 +247,26 @@ struct iwl_eeprom_enhanced_txpwr {
#define EEPROM_6050_TX_POWER_VERSION (4)
#define EEPROM_6050_EEPROM_VERSION (0x532)
-/* 6x50g2 Specific */
-#define EEPROM_6050G2_TX_POWER_VERSION (6)
-#define EEPROM_6050G2_EEPROM_VERSION (0x553)
+/* 6150 Specific */
+#define EEPROM_6150_TX_POWER_VERSION (6)
+#define EEPROM_6150_EEPROM_VERSION (0x553)
+
+/* 6x05 Specific */
+#define EEPROM_6005_TX_POWER_VERSION (6)
+#define EEPROM_6005_EEPROM_VERSION (0x709)
+
+/* 6x30 Specific */
+#define EEPROM_6030_TX_POWER_VERSION (6)
+#define EEPROM_6030_EEPROM_VERSION (0x709)
+
+/* 2x00 Specific */
+#define EEPROM_2000_TX_POWER_VERSION (6)
+#define EEPROM_2000_EEPROM_VERSION (0x805)
+
+/* 6x35 Specific */
+#define EEPROM_6035_TX_POWER_VERSION (6)
+#define EEPROM_6035_EEPROM_VERSION (0x753)
-/* 6x00g2 Specific */
-#define EEPROM_6000G2_TX_POWER_VERSION (6)
-#define EEPROM_6000G2_EEPROM_VERSION (0x709)
/* OTP */
/* lower blocks contain EEPROM image and calibration data */
@@ -293,6 +277,7 @@ struct iwl_eeprom_enhanced_txpwr {
#define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */
#define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */
#define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */
+#define OTP_MAX_LL_ITEMS_2x00 (4) /* OTP blocks for 2x00 */
/* 2.4 GHz */
extern const u8 iwl_eeprom_band_1[14];
@@ -389,6 +374,8 @@ struct iwl_eeprom_calib_info {
#define INDIRECT_CALIBRATION 0x00040000
#define INDIRECT_PROCESS_ADJST 0x00050000
#define INDIRECT_OTHERS 0x00060000
+#define INDIRECT_TXP_LIMIT 0x00070000
+#define INDIRECT_TXP_LIMIT_SIZE 0x00080000
#define INDIRECT_ADDRESS 0x00100000
/* General */
@@ -397,11 +384,10 @@ struct iwl_eeprom_calib_info {
#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
#define EEPROM_VERSION (2*0x44) /* 2 bytes */
-#define EEPROM_SKU_CAP (2*0x45) /* 1 bytes */
+#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
-#define EEPROM_3945_M_VERSION (2*0x4A) /* 1 bytes */
#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
@@ -504,6 +490,7 @@ struct iwl_eeprom_ops {
int iwl_eeprom_init(struct iwl_priv *priv);
void iwl_eeprom_free(struct iwl_priv *priv);
int iwl_eeprom_check_version(struct iwl_priv *priv);
+int iwl_eeprom_check_sku(struct iwl_priv *priv);
const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
int iwlcore_eeprom_verify_signature(struct iwl_priv *priv);
u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset);
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index c373b53babea..02499f684683 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -108,12 +108,12 @@ const char *get_cmd_string(u8 cmd)
IWL_CMD(REPLY_WIPAN_WEPKEY);
IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
+ IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE);
default:
return "UNKNOWN";
}
}
-EXPORT_SYMBOL(get_cmd_string);
#define HOST_COMPLETE_TIMEOUT (HZ / 2)
@@ -252,7 +252,6 @@ out:
mutex_unlock(&priv->sync_cmd_mutex);
return ret;
}
-EXPORT_SYMBOL(iwl_send_cmd_sync);
int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
{
@@ -261,7 +260,6 @@ int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
return iwl_send_cmd_sync(priv, cmd);
}
-EXPORT_SYMBOL(iwl_send_cmd);
int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
{
@@ -273,7 +271,6 @@ int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
return iwl_send_cmd_sync(priv, &cmd);
}
-EXPORT_SYMBOL(iwl_send_cmd_pdu);
int iwl_send_cmd_pdu_async(struct iwl_priv *priv,
u8 id, u16 len, const void *data,
@@ -292,4 +289,3 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv,
return iwl_send_cmd_async(priv, &cmd);
}
-EXPORT_SYMBOL(iwl_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index 1aaef70deaec..8821f088ba7f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -44,15 +44,6 @@ static inline struct ieee80211_conf *ieee80211_get_hw_conf(
return &hw->conf;
}
-static inline unsigned long elapsed_jiffies(unsigned long start,
- unsigned long end)
-{
- if (end >= start)
- return end - start;
-
- return end + (MAX_JIFFY_OFFSET - start) + 1;
-}
-
/**
* iwl_queue_inc_wrap - increment queue index, wrap back to beginning
* @index -- current index
@@ -104,42 +95,36 @@ static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev,
* | | | | | | | |
* | | | | | | +-+-------- AC queue (0-3)
* | | | | | |
- * | +-+-+-+-+------------ HW A-MPDU queue
+ * | +-+-+-+-+------------ HW queue ID
* |
- * +---------------------- indicates agg queue
+ * +---------------------- unused
*/
-static inline u8 iwl_virtual_agg_queue_num(u8 ac, u8 hwq)
+static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
{
BUG_ON(ac > 3); /* only have 2 bits */
- BUG_ON(hwq > 31); /* only have 5 bits */
+ BUG_ON(hwq > 31); /* only use 5 bits */
- return 0x80 | (hwq << 2) | ac;
+ txq->swq_id = (hwq << 2) | ac;
}
-static inline void iwl_wake_queue(struct iwl_priv *priv, u8 queue)
+static inline void iwl_wake_queue(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq)
{
- u8 ac = queue;
- u8 hwq = queue;
-
- if (queue & 0x80) {
- ac = queue & 3;
- hwq = (queue >> 2) & 0x1f;
- }
+ u8 queue = txq->swq_id;
+ u8 ac = queue & 3;
+ u8 hwq = (queue >> 2) & 0x1f;
if (test_and_clear_bit(hwq, priv->queue_stopped))
if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
ieee80211_wake_queue(priv->hw, ac);
}
-static inline void iwl_stop_queue(struct iwl_priv *priv, u8 queue)
+static inline void iwl_stop_queue(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq)
{
- u8 ac = queue;
- u8 hwq = queue;
-
- if (queue & 0x80) {
- ac = queue & 3;
- hwq = (queue >> 2) & 0x1f;
- }
+ u8 queue = txq->swq_id;
+ u8 ac = queue & 3;
+ u8 hwq = (queue >> 2) & 0x1f;
if (!test_and_set_bit(hwq, priv->queue_stopped))
if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
@@ -163,6 +148,12 @@ static inline void iwl_disable_interrupts(struct iwl_priv *priv)
IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
}
+static inline void iwl_enable_rfkill_int(struct iwl_priv *priv)
+{
+ IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
+ iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
+}
+
static inline void iwl_enable_interrupts(struct iwl_priv *priv)
{
IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 86c2b6fed0c6..d7f2a0bb32c9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -45,35 +45,22 @@
/* default: IWL_LED_BLINK(0) using blinking index table */
static int led_mode;
module_param(led_mode, int, S_IRUGO);
-MODULE_PARM_DESC(led_mode, "led mode: 0=blinking, 1=On(RF On)/Off(RF Off), "
- "(default 0)");
-
-
-static const struct {
- u16 tpt; /* Mb/s */
- u8 on_time;
- u8 off_time;
-} blink_tbl[] =
-{
- {300, 25, 25},
- {200, 40, 40},
- {100, 55, 55},
- {70, 65, 65},
- {50, 75, 75},
- {20, 85, 85},
- {10, 95, 95},
- {5, 110, 110},
- {1, 130, 130},
- {0, 167, 167},
- /* SOLID_ON */
- {-1, IWL_LED_SOLID, 0}
+MODULE_PARM_DESC(led_mode, "0=system default, "
+ "1=On(RF On)/Off(RF Off), 2=blinking");
+
+static const struct ieee80211_tpt_blink iwl_blink[] = {
+ { .throughput = 0 * 1024 - 1, .blink_time = 334 },
+ { .throughput = 1 * 1024 - 1, .blink_time = 260 },
+ { .throughput = 5 * 1024 - 1, .blink_time = 220 },
+ { .throughput = 10 * 1024 - 1, .blink_time = 190 },
+ { .throughput = 20 * 1024 - 1, .blink_time = 170 },
+ { .throughput = 50 * 1024 - 1, .blink_time = 150 },
+ { .throughput = 70 * 1024 - 1, .blink_time = 130 },
+ { .throughput = 100 * 1024 - 1, .blink_time = 110 },
+ { .throughput = 200 * 1024 - 1, .blink_time = 80 },
+ { .throughput = 300 * 1024 - 1, .blink_time = 50 },
};
-#define IWL_1MB_RATE (128 * 1024)
-#define IWL_LED_THRESHOLD (16)
-#define IWL_MAX_BLINK_TBL (ARRAY_SIZE(blink_tbl) - 1) /* exclude SOLID_ON */
-#define IWL_SOLID_BLINK_IDX (ARRAY_SIZE(blink_tbl) - 1)
-
/*
* Adjust led blink rate to compensate on a MAC Clock difference on every HW
* Led blink rate analysis showed an average deviation of 0% on 3945,
@@ -98,128 +85,102 @@ static inline u8 iwl_blink_compensation(struct iwl_priv *priv,
}
/* Set led pattern command */
-static int iwl_led_pattern(struct iwl_priv *priv, unsigned int idx)
+static int iwl_led_cmd(struct iwl_priv *priv,
+ unsigned long on,
+ unsigned long off)
{
struct iwl_led_cmd led_cmd = {
.id = IWL_LED_LINK,
.interval = IWL_DEF_LED_INTRVL
};
+ int ret;
- BUG_ON(idx > IWL_MAX_BLINK_TBL);
+ if (!test_bit(STATUS_READY, &priv->status))
+ return -EBUSY;
- IWL_DEBUG_LED(priv, "Led blink time compensation= %u\n",
+ if (priv->blink_on == on && priv->blink_off == off)
+ return 0;
+
+ IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
priv->cfg->base_params->led_compensation);
- led_cmd.on =
- iwl_blink_compensation(priv, blink_tbl[idx].on_time,
+ led_cmd.on = iwl_blink_compensation(priv, on,
priv->cfg->base_params->led_compensation);
- led_cmd.off =
- iwl_blink_compensation(priv, blink_tbl[idx].off_time,
+ led_cmd.off = iwl_blink_compensation(priv, off,
priv->cfg->base_params->led_compensation);
- return priv->cfg->ops->led->cmd(priv, &led_cmd);
+ ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
+ if (!ret) {
+ priv->blink_on = on;
+ priv->blink_off = off;
+ }
+ return ret;
}
-int iwl_led_start(struct iwl_priv *priv)
+static void iwl_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
{
- return priv->cfg->ops->led->on(priv);
-}
-EXPORT_SYMBOL(iwl_led_start);
+ struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
+ unsigned long on = 0;
-int iwl_led_associate(struct iwl_priv *priv)
-{
- IWL_DEBUG_LED(priv, "Associated\n");
- if (led_mode == IWL_LED_BLINK)
- priv->allow_blinking = 1;
- priv->last_blink_time = jiffies;
+ if (brightness > 0)
+ on = IWL_LED_SOLID;
- return 0;
+ iwl_led_cmd(priv, on, 0);
}
-int iwl_led_disassociate(struct iwl_priv *priv)
+static int iwl_led_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
{
- priv->allow_blinking = 0;
-
- return 0;
-}
+ struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
-/*
- * calculate blink rate according to last second Tx/Rx activities
- */
-static int iwl_get_blink_rate(struct iwl_priv *priv)
-{
- int i;
- /* count both tx and rx traffic to be able to
- * handle traffic in either direction
- */
- u64 current_tpt = priv->tx_stats.data_bytes +
- priv->rx_stats.data_bytes;
- s64 tpt = current_tpt - priv->led_tpt;
-
- if (tpt < 0) /* wraparound */
- tpt = -tpt;
-
- IWL_DEBUG_LED(priv, "tpt %lld current_tpt %llu\n",
- (long long)tpt,
- (unsigned long long)current_tpt);
- priv->led_tpt = current_tpt;
-
- if (!priv->allow_blinking)
- i = IWL_MAX_BLINK_TBL;
- else
- for (i = 0; i < IWL_MAX_BLINK_TBL; i++)
- if (tpt > (blink_tbl[i].tpt * IWL_1MB_RATE))
- break;
-
- IWL_DEBUG_LED(priv, "LED BLINK IDX=%d\n", i);
- return i;
+ return iwl_led_cmd(priv, *delay_on, *delay_off);
}
-/*
- * this function called from handler. Since setting Led command can
- * happen very frequent we postpone led command to be called from
- * REPLY handler so we know ucode is up
- */
-void iwl_leds_background(struct iwl_priv *priv)
+void iwl_leds_init(struct iwl_priv *priv)
{
- u8 blink_idx;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- priv->last_blink_time = 0;
- return;
- }
- if (iwl_is_rfkill(priv)) {
- priv->last_blink_time = 0;
- return;
+ int mode = led_mode;
+ int ret;
+
+ if (mode == IWL_LED_DEFAULT)
+ mode = priv->cfg->led_mode;
+
+ priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
+ wiphy_name(priv->hw->wiphy));
+ priv->led.brightness_set = iwl_led_brightness_set;
+ priv->led.blink_set = iwl_led_blink_set;
+ priv->led.max_brightness = 1;
+
+ switch (mode) {
+ case IWL_LED_DEFAULT:
+ WARN_ON(1);
+ break;
+ case IWL_LED_BLINK:
+ priv->led.default_trigger =
+ ieee80211_create_tpt_led_trigger(priv->hw,
+ IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
+ iwl_blink, ARRAY_SIZE(iwl_blink));
+ break;
+ case IWL_LED_RF_STATE:
+ priv->led.default_trigger =
+ ieee80211_get_radio_led_name(priv->hw);
+ break;
}
- if (!priv->allow_blinking) {
- priv->last_blink_time = 0;
- if (priv->last_blink_rate != IWL_SOLID_BLINK_IDX) {
- priv->last_blink_rate = IWL_SOLID_BLINK_IDX;
- iwl_led_pattern(priv, IWL_SOLID_BLINK_IDX);
- }
+ ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
+ if (ret) {
+ kfree(priv->led.name);
return;
}
- if (!priv->last_blink_time ||
- !time_after(jiffies, priv->last_blink_time +
- msecs_to_jiffies(1000)))
- return;
-
- blink_idx = iwl_get_blink_rate(priv);
-
- /* call only if blink rate change */
- if (blink_idx != priv->last_blink_rate)
- iwl_led_pattern(priv, blink_idx);
- priv->last_blink_time = jiffies;
- priv->last_blink_rate = blink_idx;
+ priv->led_registered = true;
}
-EXPORT_SYMBOL(iwl_leds_background);
-void iwl_leds_init(struct iwl_priv *priv)
+void iwl_leds_exit(struct iwl_priv *priv)
{
- priv->last_blink_rate = 0;
- priv->last_blink_time = 0;
- priv->allow_blinking = 0;
+ if (!priv->led_registered)
+ return;
+
+ led_classdev_unregister(&priv->led);
+ kfree(priv->led.name);
}
-EXPORT_SYMBOL(iwl_leds_init);
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index 49a70baa3fb6..101eef12b3bb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -31,36 +31,26 @@
struct iwl_priv;
#define IWL_LED_SOLID 11
-#define IWL_LED_NAME_LEN 31
#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
#define IWL_LED_ACTIVITY (0<<1)
#define IWL_LED_LINK (1<<1)
-enum led_type {
- IWL_LED_TRG_TX,
- IWL_LED_TRG_RX,
- IWL_LED_TRG_ASSOC,
- IWL_LED_TRG_RADIO,
- IWL_LED_TRG_MAX,
-};
-
/*
* LED mode
- * IWL_LED_BLINK: adjust led blink rate based on blink table
+ * IWL_LED_DEFAULT: use device default
* IWL_LED_RF_STATE: turn LED on/off based on RF state
* LED ON = RF ON
* LED OFF = RF OFF
+ * IWL_LED_BLINK: adjust led blink rate based on blink table
*/
enum iwl_led_mode {
- IWL_LED_BLINK,
+ IWL_LED_DEFAULT,
IWL_LED_RF_STATE,
+ IWL_LED_BLINK,
};
void iwl_leds_init(struct iwl_priv *priv);
-void iwl_leds_background(struct iwl_priv *priv);
-int iwl_led_start(struct iwl_priv *priv);
-int iwl_led_associate(struct iwl_priv *priv);
-int iwl_led_disassociate(struct iwl_priv *priv);
+void iwl_leds_exit(struct iwl_priv *priv);
#endif /* __iwl_leds_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 49d7788937a9..576795e2c75b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -75,6 +75,10 @@ struct iwl_power_vec_entry {
#define NOSLP cpu_to_le16(0), 0, 0
#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
+#define ASLP (IWL_POWER_POWER_SAVE_ENA_MSK | \
+ IWL_POWER_POWER_MANAGEMENT_ENA_MSK | \
+ IWL_POWER_ADVANCE_PM_ENA_MSK)
+#define ASLP_TOUT(T) cpu_to_le32(T)
#define TU_TO_USEC 1024
#define SLP_TOUT(T) cpu_to_le32((T) * TU_TO_USEC)
#define SLP_VEC(X0, X1, X2, X3, X4) {cpu_to_le32(X0), \
@@ -114,6 +118,52 @@ static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = {
{{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
};
+/* advance power management */
+/* DTIM 0 - 2 */
+static const struct iwl_power_vec_entry apm_range_0[IWL_POWER_NUM] = {
+ {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+ SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+ {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+ SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+ {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+ SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+ {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+ SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+ {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+ SLP_VEC(1, 2, 6, 8, 0xFF), ASLP_TOUT(2)}, 2}
+};
+
+
+/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
+/* DTIM 3 - 10 */
+static const struct iwl_power_vec_entry apm_range_1[IWL_POWER_NUM] = {
+ {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+ SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+ {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+ SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+ {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+ SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+ {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+ SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+ {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+ SLP_VEC(1, 2, 6, 8, 0xFF), 0}, 2}
+};
+
+/* for DTIM period > IWL_DTIM_RANGE_1_MAX */
+/* DTIM 11 - */
+static const struct iwl_power_vec_entry apm_range_2[IWL_POWER_NUM] = {
+ {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+ SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+ {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+ SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+ {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+ SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+ {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+ SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+ {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+ SLP_VEC(1, 2, 6, 8, 0xFF), ASLP_TOUT(2)}, 2}
+};
+
static void iwl_static_sleep_cmd(struct iwl_priv *priv,
struct iwl_powertable_cmd *cmd,
enum iwl_power_level lvl, int period)
@@ -124,11 +174,19 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
u8 skip;
u32 slp_itrvl;
- table = range_2;
- if (period <= IWL_DTIM_RANGE_1_MAX)
- table = range_1;
- if (period <= IWL_DTIM_RANGE_0_MAX)
- table = range_0;
+ if (priv->cfg->adv_pm) {
+ table = apm_range_2;
+ if (period <= IWL_DTIM_RANGE_1_MAX)
+ table = apm_range_1;
+ if (period <= IWL_DTIM_RANGE_0_MAX)
+ table = apm_range_0;
+ } else {
+ table = range_2;
+ if (period <= IWL_DTIM_RANGE_1_MAX)
+ table = range_1;
+ if (period <= IWL_DTIM_RANGE_0_MAX)
+ table = range_0;
+ }
BUG_ON(lvl < 0 || lvl >= IWL_POWER_NUM);
@@ -163,6 +221,19 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
else
cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
+ if (priv->cfg->base_params->shadow_reg_enable)
+ cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
+ else
+ cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
+
+ if (iwl_advanced_bt_coexist(priv)) {
+ if (!priv->cfg->bt_params->bt_sco_disable)
+ cmd->flags |= IWL_POWER_BT_SCO_ENA;
+ else
+ cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
+ }
+
+
slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL)
cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
@@ -236,6 +307,18 @@ static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
if (priv->power_data.pci_pm)
cmd->flags |= IWL_POWER_PCI_PM_MSK;
+ if (priv->cfg->base_params->shadow_reg_enable)
+ cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
+ else
+ cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
+
+ if (iwl_advanced_bt_coexist(priv)) {
+ if (!priv->cfg->bt_params->bt_sco_disable)
+ cmd->flags |= IWL_POWER_BT_SCO_ENA;
+ else
+ cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
+ }
+
cmd->rx_data_timeout = cpu_to_le32(1000 * dynps_ms);
cmd->tx_data_timeout = cpu_to_le32(1000 * dynps_ms);
@@ -263,71 +346,93 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
sizeof(struct iwl_powertable_cmd), cmd);
}
-/* priv->mutex must be held */
-int iwl_power_update_mode(struct iwl_priv *priv, bool force)
+static void iwl_power_build_cmd(struct iwl_priv *priv,
+ struct iwl_powertable_cmd *cmd)
{
- int ret = 0;
bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS;
- bool update_chains;
- struct iwl_powertable_cmd cmd;
int dtimper;
- /* Don't update the RX chain when chain noise calibration is running */
- update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
- priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
-
dtimper = priv->hw->conf.ps_dtim_period ?: 1;
if (priv->cfg->base_params->broken_powersave)
- iwl_power_sleep_cam_cmd(priv, &cmd);
- else if (priv->cfg->base_params->supports_idle &&
- priv->hw->conf.flags & IEEE80211_CONF_IDLE)
- iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_5, 20);
+ iwl_power_sleep_cam_cmd(priv, cmd);
+ else if (priv->hw->conf.flags & IEEE80211_CONF_IDLE)
+ iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
else if (priv->cfg->ops->lib->tt_ops.lower_power_detection &&
priv->cfg->ops->lib->tt_ops.tt_power_mode &&
priv->cfg->ops->lib->tt_ops.lower_power_detection(priv)) {
/* in thermal throttling low power state */
- iwl_static_sleep_cmd(priv, &cmd,
+ iwl_static_sleep_cmd(priv, cmd,
priv->cfg->ops->lib->tt_ops.tt_power_mode(priv), dtimper);
} else if (!enabled)
- iwl_power_sleep_cam_cmd(priv, &cmd);
+ iwl_power_sleep_cam_cmd(priv, cmd);
else if (priv->power_data.debug_sleep_level_override >= 0)
- iwl_static_sleep_cmd(priv, &cmd,
+ iwl_static_sleep_cmd(priv, cmd,
priv->power_data.debug_sleep_level_override,
dtimper);
else if (no_sleep_autoadjust)
- iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_1, dtimper);
+ iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_1, dtimper);
else
- iwl_power_fill_sleep_cmd(priv, &cmd,
+ iwl_power_fill_sleep_cmd(priv, cmd,
priv->hw->conf.dynamic_ps_timeout,
priv->hw->conf.max_sleep_period);
+}
+
+int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
+ bool force)
+{
+ int ret;
+ bool update_chains;
+
+ lockdep_assert_held(&priv->mutex);
+
+ /* Don't update the RX chain when chain noise calibration is running */
+ update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
+ priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
+
+ if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
+ return 0;
- if (iwl_is_ready_rf(priv) &&
- (memcmp(&priv->power_data.sleep_cmd, &cmd, sizeof(cmd)) || force)) {
- if (cmd.flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
- set_bit(STATUS_POWER_PMI, &priv->status);
-
- ret = iwl_set_power(priv, &cmd);
- if (!ret) {
- if (!(cmd.flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
- clear_bit(STATUS_POWER_PMI, &priv->status);
-
- if (priv->cfg->ops->lib->update_chain_flags &&
- update_chains)
- priv->cfg->ops->lib->update_chain_flags(priv);
- else if (priv->cfg->ops->lib->update_chain_flags)
- IWL_DEBUG_POWER(priv,
+ if (!iwl_is_ready_rf(priv))
+ return -EIO;
+
+ /* scan complete use sleep_power_next, need to be updated */
+ memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
+ if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
+ IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
+ return 0;
+ }
+
+ if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
+ set_bit(STATUS_POWER_PMI, &priv->status);
+
+ ret = iwl_set_power(priv, cmd);
+ if (!ret) {
+ if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
+ clear_bit(STATUS_POWER_PMI, &priv->status);
+
+ if (priv->cfg->ops->lib->update_chain_flags && update_chains)
+ priv->cfg->ops->lib->update_chain_flags(priv);
+ else if (priv->cfg->ops->lib->update_chain_flags)
+ IWL_DEBUG_POWER(priv,
"Cannot update the power, chain noise "
"calibration running: %d\n",
priv->chain_noise_data.state);
- memcpy(&priv->power_data.sleep_cmd, &cmd, sizeof(cmd));
- } else
- IWL_ERR(priv, "set power fail, ret = %d", ret);
- }
+
+ memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
+ } else
+ IWL_ERR(priv, "set power fail, ret = %d", ret);
return ret;
}
-EXPORT_SYMBOL(iwl_power_update_mode);
+
+int iwl_power_update_mode(struct iwl_priv *priv, bool force)
+{
+ struct iwl_powertable_cmd cmd;
+
+ iwl_power_build_cmd(priv, &cmd);
+ return iwl_power_set_mode(priv, &cmd, force);
+}
/* initialize to default */
void iwl_power_initialize(struct iwl_priv *priv)
@@ -341,4 +446,3 @@ void iwl_power_initialize(struct iwl_priv *priv)
memset(&priv->power_data.sleep_cmd, 0,
sizeof(priv->power_data.sleep_cmd));
}
-EXPORT_SYMBOL(iwl_power_initialize);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index df81565a7cc4..fe012032c28c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -41,10 +41,13 @@ enum iwl_power_level {
struct iwl_power_mgr {
struct iwl_powertable_cmd sleep_cmd;
+ struct iwl_powertable_cmd sleep_cmd_next;
int debug_sleep_level_override;
bool pci_pm;
};
+int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
+ bool force);
int iwl_power_update_mode(struct iwl_priv *priv, bool force);
void iwl_power_initialize(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 5469655646ae..86f5123bccda 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -83,10 +83,10 @@
#define APMG_DIGITAL_SVR_REG (APMG_BASE + 0x0058)
#define APMG_ANALOG_SVR_REG (APMG_BASE + 0x006C)
+#define APMS_CLK_VAL_MRB_FUNC_MODE (0x00000001)
#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200)
#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800)
-
#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000)
#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index f436270ca39a..6f9a2fa04763 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -29,6 +29,7 @@
#include <linux/etherdevice.h>
#include <linux/slab.h>
+#include <linux/sched.h>
#include <net/mac80211.h>
#include <asm/unaligned.h>
#include "iwl-eeprom.h"
@@ -37,7 +38,15 @@
#include "iwl-sta.h"
#include "iwl-io.h"
#include "iwl-helpers.h"
-/************************** RX-FUNCTIONS ****************************/
+#include "iwl-agn-calib.h"
+#include "iwl-agn.h"
+
+/******************************************************************************
+ *
+ * RX path functions
+ *
+ ******************************************************************************/
+
/*
* Rx theory of operation
*
@@ -118,7 +127,6 @@ int iwl_rx_queue_space(const struct iwl_rx_queue *q)
s = 0;
return s;
}
-EXPORT_SYMBOL(iwl_rx_queue_space);
/**
* iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
@@ -134,34 +142,42 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q
if (q->need_update == 0)
goto exit_unlock;
- /* If power-saving is in use, make sure device is awake */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
+ if (priv->cfg->base_params->shadow_reg_enable) {
+ /* shadow register enabled */
+ /* Device expects a multiple of 8 */
+ q->write_actual = (q->write & ~0x7);
+ iwl_write32(priv, rx_wrt_ptr_reg, q->write_actual);
+ } else {
+ /* If power-saving is in use, make sure device is awake */
+ if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+ reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(priv, "Rx queue requesting wakeup, GP1 = 0x%x\n",
- reg);
- iwl_set_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- goto exit_unlock;
- }
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ IWL_DEBUG_INFO(priv,
+ "Rx queue requesting wakeup,"
+ " GP1 = 0x%x\n", reg);
+ iwl_set_bit(priv, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ goto exit_unlock;
+ }
- q->write_actual = (q->write & ~0x7);
- iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual);
+ q->write_actual = (q->write & ~0x7);
+ iwl_write_direct32(priv, rx_wrt_ptr_reg,
+ q->write_actual);
- /* Else device is assumed to be awake */
- } else {
- /* Device expects a multiple of 8 */
- q->write_actual = (q->write & ~0x7);
- iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual);
+ /* Else device is assumed to be awake */
+ } else {
+ /* Device expects a multiple of 8 */
+ q->write_actual = (q->write & ~0x7);
+ iwl_write_direct32(priv, rx_wrt_ptr_reg,
+ q->write_actual);
+ }
}
-
q->need_update = 0;
exit_unlock:
spin_unlock_irqrestore(&q->lock, flags);
}
-EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
int iwl_rx_queue_alloc(struct iwl_priv *priv)
{
@@ -202,10 +218,105 @@ err_rb:
err_bd:
return -ENOMEM;
}
-EXPORT_SYMBOL(iwl_rx_queue_alloc);
+/******************************************************************************
+ *
+ * Generic RX handler implementations
+ *
+ ******************************************************************************/
+
+static void iwl_rx_reply_alive(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_alive_resp *palive;
+ struct delayed_work *pwork;
+
+ palive = &pkt->u.alive_frame;
+
+ IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
+ "0x%01X 0x%01X\n",
+ palive->is_valid, palive->ver_type,
+ palive->ver_subtype);
+
+ if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
+ IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
+ memcpy(&priv->card_alive_init,
+ &pkt->u.alive_frame,
+ sizeof(struct iwl_init_alive_resp));
+ pwork = &priv->init_alive_start;
+ } else {
+ IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
+ memcpy(&priv->card_alive, &pkt->u.alive_frame,
+ sizeof(struct iwl_alive_resp));
+ pwork = &priv->alive_start;
+ }
+
+ /* We delay the ALIVE response by 5ms to
+ * give the HW RF Kill time to activate... */
+ if (palive->is_valid == UCODE_VALID_OK)
+ queue_delayed_work(priv->workqueue, pwork,
+ msecs_to_jiffies(5));
+ else {
+ IWL_WARN(priv, "%s uCode did not respond OK.\n",
+ (palive->ver_subtype == INITIALIZE_SUBTYPE) ?
+ "init" : "runtime");
+ /*
+ * If fail to load init uCode,
+ * let's try to load the init uCode again.
+ * We should not get into this situation, but if it
+ * does happen, we should not move on and loading "runtime"
+ * without proper calibrate the device.
+ */
+ if (palive->ver_subtype == INITIALIZE_SUBTYPE)
+ priv->ucode_type = UCODE_NONE;
+ queue_work(priv->workqueue, &priv->restart);
+ }
+}
+
+static void iwl_rx_reply_error(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
-void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
+ IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
+ "seq 0x%04X ser 0x%08X\n",
+ le32_to_cpu(pkt->u.err_resp.error_type),
+ get_cmd_string(pkt->u.err_resp.cmd_id),
+ pkt->u.err_resp.cmd_id,
+ le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
+ le32_to_cpu(pkt->u.err_resp.error_info));
+}
+
+static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
+ /*
+ * MULTI-FIXME
+ * See iwl_mac_channel_switch.
+ */
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
+
+ if (priv->switch_rxon.switch_in_progress) {
+ if (!le32_to_cpu(csa->status) &&
+ (csa->channel == priv->switch_rxon.channel)) {
+ rxon->channel = csa->channel;
+ ctx->staging.channel = csa->channel;
+ IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
+ le16_to_cpu(csa->channel));
+ iwl_chswitch_done(priv, true);
+ } else {
+ IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
+ le16_to_cpu(csa->channel));
+ iwl_chswitch_done(priv, false);
+ }
+ }
+}
+
+
+static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -220,48 +331,494 @@ void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
memcpy(&priv->measure_report, report, sizeof(*report));
priv->measurement_status |= MEASUREMENT_READY;
}
-EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif);
-void iwl_recover_from_statistics(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt)
+static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+#ifdef CONFIG_IWLWIFI_DEBUG
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
+ IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
+ sleep->pm_sleep_mode, sleep->pm_wakeup_src);
+#endif
+}
+
+static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
+ "notification for %s:\n", len,
+ get_cmd_string(pkt->hdr.cmd));
+ iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
+}
+
+static void iwl_rx_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
+ u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
+
+ IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
+ "tsf:0x%.8x%.8x rate:%d\n",
+ status & TX_STATUS_MSK,
+ beacon->beacon_notify_hdr.failure_frame,
+ le32_to_cpu(beacon->ibss_mgr_status),
+ le32_to_cpu(beacon->high_tsf),
+ le32_to_cpu(beacon->low_tsf), rate);
+#endif
+
+ priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+
+ if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
+ queue_work(priv->workqueue, &priv->beacon_update);
+}
+
+/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
+#define ACK_CNT_RATIO (50)
+#define BA_TIMEOUT_CNT (5)
+#define BA_TIMEOUT_MAX (16)
+
+/**
+ * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
+ *
+ * When the ACK count ratio is low and aggregated BA timeout retries exceeding
+ * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
+ * operation state.
+ */
+static bool iwl_good_ack_health(struct iwl_priv *priv, struct iwl_rx_packet *pkt)
+{
+ int actual_delta, expected_delta, ba_timeout_delta;
+ struct statistics_tx *cur, *old;
+
+ if (priv->_agn.agg_tids_count)
+ return true;
+
+ if (iwl_bt_statistics(priv)) {
+ cur = &pkt->u.stats_bt.tx;
+ old = &priv->_agn.statistics_bt.tx;
+ } else {
+ cur = &pkt->u.stats.tx;
+ old = &priv->_agn.statistics.tx;
+ }
+
+ actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
+ le32_to_cpu(old->actual_ack_cnt);
+ expected_delta = le32_to_cpu(cur->expected_ack_cnt) -
+ le32_to_cpu(old->expected_ack_cnt);
+
+ /* Values should not be negative, but we do not trust the firmware */
+ if (actual_delta <= 0 || expected_delta <= 0)
+ return true;
+
+ ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) -
+ le32_to_cpu(old->agg.ba_timeout);
+
+ if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO &&
+ ba_timeout_delta > BA_TIMEOUT_CNT) {
+ IWL_DEBUG_RADIO(priv, "deltas: actual %d expected %d ba_timeout %d\n",
+ actual_delta, expected_delta, ba_timeout_delta);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ /*
+ * This is ifdef'ed on DEBUGFS because otherwise the
+ * statistics aren't available. If DEBUGFS is set but
+ * DEBUG is not, these will just compile out.
+ */
+ IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n",
+ priv->_agn.delta_statistics.tx.rx_detected_cnt);
+ IWL_DEBUG_RADIO(priv,
+ "ack_or_ba_timeout_collision delta %d\n",
+ priv->_agn.delta_statistics.tx.ack_or_ba_timeout_collision);
+#endif
+
+ if (ba_timeout_delta >= BA_TIMEOUT_MAX)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * iwl_good_plcp_health - checks for plcp error.
+ *
+ * When the plcp error is exceeding the thresholds, reset the radio
+ * to improve the throughput.
+ */
+static bool iwl_good_plcp_health(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt, unsigned int msecs)
+{
+ int delta;
+ int threshold = priv->cfg->base_params->plcp_delta_threshold;
+
+ if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
+ IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
+ return true;
+ }
+
+ if (iwl_bt_statistics(priv)) {
+ struct statistics_rx_bt *cur, *old;
+
+ cur = &pkt->u.stats_bt.rx;
+ old = &priv->_agn.statistics_bt.rx;
+
+ delta = le32_to_cpu(cur->ofdm.plcp_err) -
+ le32_to_cpu(old->ofdm.plcp_err) +
+ le32_to_cpu(cur->ofdm_ht.plcp_err) -
+ le32_to_cpu(old->ofdm_ht.plcp_err);
+ } else {
+ struct statistics_rx *cur, *old;
+
+ cur = &pkt->u.stats.rx;
+ old = &priv->_agn.statistics.rx;
+
+ delta = le32_to_cpu(cur->ofdm.plcp_err) -
+ le32_to_cpu(old->ofdm.plcp_err) +
+ le32_to_cpu(cur->ofdm_ht.plcp_err) -
+ le32_to_cpu(old->ofdm_ht.plcp_err);
+ }
+
+ /* Can be negative if firmware reseted statistics */
+ if (delta <= 0)
+ return true;
+
+ if ((delta * 100 / msecs) > threshold) {
+ IWL_DEBUG_RADIO(priv,
+ "plcp health threshold %u delta %d msecs %u\n",
+ threshold, delta, msecs);
+ return false;
+ }
+
+ return true;
+}
+
+static void iwl_recover_from_statistics(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt)
+{
+ const struct iwl_mod_params *mod_params = priv->cfg->mod_params;
+ unsigned int msecs;
+ unsigned long stamp;
+
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if (iwl_is_any_associated(priv)) {
- if (priv->cfg->ops->lib->check_ack_health) {
- if (!priv->cfg->ops->lib->check_ack_health(
- priv, pkt)) {
- /*
- * low ack count detected
- * restart Firmware
- */
- IWL_ERR(priv, "low ack count detected, "
- "restart firmware\n");
- if (!iwl_force_reset(priv, IWL_FW_RESET, false))
- return;
- }
+
+ stamp = jiffies;
+ msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies);
+
+ /* Only gather statistics and update time stamp when not associated */
+ if (!iwl_is_any_associated(priv))
+ goto out;
+
+ /* Do not check/recover when do not have enough statistics data */
+ if (msecs < 99)
+ return;
+
+ if (mod_params->ack_check && !iwl_good_ack_health(priv, pkt)) {
+ IWL_ERR(priv, "low ack count detected, restart firmware\n");
+ if (!iwl_force_reset(priv, IWL_FW_RESET, false))
+ return;
+ }
+
+ if (mod_params->plcp_check && !iwl_good_plcp_health(priv, pkt, msecs))
+ iwl_force_reset(priv, IWL_RF_RESET, false);
+
+out:
+ if (iwl_bt_statistics(priv))
+ memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt,
+ sizeof(priv->_agn.statistics_bt));
+ else
+ memcpy(&priv->_agn.statistics, &pkt->u.stats,
+ sizeof(priv->_agn.statistics));
+
+ priv->rx_statistics_jiffies = stamp;
+}
+
+/* Calculate noise level, based on measurements during network silence just
+ * before arriving beacon. This measurement can be done only if we know
+ * exactly when to expect beacons, therefore only when we're associated. */
+static void iwl_rx_calc_noise(struct iwl_priv *priv)
+{
+ struct statistics_rx_non_phy *rx_info;
+ int num_active_rx = 0;
+ int total_silence = 0;
+ int bcn_silence_a, bcn_silence_b, bcn_silence_c;
+ int last_rx_noise;
+
+ if (iwl_bt_statistics(priv))
+ rx_info = &(priv->_agn.statistics_bt.rx.general.common);
+ else
+ rx_info = &(priv->_agn.statistics.rx.general);
+ bcn_silence_a =
+ le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+ bcn_silence_b =
+ le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+ bcn_silence_c =
+ le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
+
+ if (bcn_silence_a) {
+ total_silence += bcn_silence_a;
+ num_active_rx++;
+ }
+ if (bcn_silence_b) {
+ total_silence += bcn_silence_b;
+ num_active_rx++;
+ }
+ if (bcn_silence_c) {
+ total_silence += bcn_silence_c;
+ num_active_rx++;
+ }
+
+ /* Average among active antennas */
+ if (num_active_rx)
+ last_rx_noise = (total_silence / num_active_rx) - 107;
+ else
+ last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
+
+ IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
+ bcn_silence_a, bcn_silence_b, bcn_silence_c,
+ last_rx_noise);
+}
+
+/*
+ * based on the assumption of all statistics counter are in DWORD
+ * FIXME: This function is for debugging, do not deal with
+ * the case of counters roll-over.
+ */
+static void iwl_accumulative_statistics(struct iwl_priv *priv,
+ __le32 *stats)
+{
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ int i, size;
+ __le32 *prev_stats;
+ u32 *accum_stats;
+ u32 *delta, *max_delta;
+ struct statistics_general_common *general, *accum_general;
+ struct statistics_tx *tx, *accum_tx;
+
+ if (iwl_bt_statistics(priv)) {
+ prev_stats = (__le32 *)&priv->_agn.statistics_bt;
+ accum_stats = (u32 *)&priv->_agn.accum_statistics_bt;
+ size = sizeof(struct iwl_bt_notif_statistics);
+ general = &priv->_agn.statistics_bt.general.common;
+ accum_general = &priv->_agn.accum_statistics_bt.general.common;
+ tx = &priv->_agn.statistics_bt.tx;
+ accum_tx = &priv->_agn.accum_statistics_bt.tx;
+ delta = (u32 *)&priv->_agn.delta_statistics_bt;
+ max_delta = (u32 *)&priv->_agn.max_delta_bt;
+ } else {
+ prev_stats = (__le32 *)&priv->_agn.statistics;
+ accum_stats = (u32 *)&priv->_agn.accum_statistics;
+ size = sizeof(struct iwl_notif_statistics);
+ general = &priv->_agn.statistics.general.common;
+ accum_general = &priv->_agn.accum_statistics.general.common;
+ tx = &priv->_agn.statistics.tx;
+ accum_tx = &priv->_agn.accum_statistics.tx;
+ delta = (u32 *)&priv->_agn.delta_statistics;
+ max_delta = (u32 *)&priv->_agn.max_delta;
+ }
+ for (i = sizeof(__le32); i < size;
+ i += sizeof(__le32), stats++, prev_stats++, delta++,
+ max_delta++, accum_stats++) {
+ if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+ *delta = (le32_to_cpu(*stats) -
+ le32_to_cpu(*prev_stats));
+ *accum_stats += *delta;
+ if (*delta > *max_delta)
+ *max_delta = *delta;
}
- if (priv->cfg->ops->lib->check_plcp_health) {
- if (!priv->cfg->ops->lib->check_plcp_health(
- priv, pkt)) {
- /*
- * high plcp error detected
- * reset Radio
- */
- iwl_force_reset(priv, IWL_RF_RESET, false);
- }
+ }
+
+ /* reset accumulative statistics for "no-counter" type statistics */
+ accum_general->temperature = general->temperature;
+ accum_general->temperature_m = general->temperature_m;
+ accum_general->ttl_timestamp = general->ttl_timestamp;
+ accum_tx->tx_power.ant_a = tx->tx_power.ant_a;
+ accum_tx->tx_power.ant_b = tx->tx_power.ant_b;
+ accum_tx->tx_power.ant_c = tx->tx_power.ant_c;
+#endif
+}
+
+static void iwl_rx_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ const int reg_recalib_period = 60;
+ int change;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+ if (iwl_bt_statistics(priv)) {
+ IWL_DEBUG_RX(priv,
+ "Statistics notification received (%d vs %d).\n",
+ (int)sizeof(struct iwl_bt_notif_statistics),
+ le32_to_cpu(pkt->len_n_flags) &
+ FH_RSCSR_FRAME_SIZE_MSK);
+
+ change = ((priv->_agn.statistics_bt.general.common.temperature !=
+ pkt->u.stats_bt.general.common.temperature) ||
+ ((priv->_agn.statistics_bt.flag &
+ STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
+ (pkt->u.stats_bt.flag &
+ STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
+
+ iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt);
+ } else {
+ IWL_DEBUG_RX(priv,
+ "Statistics notification received (%d vs %d).\n",
+ (int)sizeof(struct iwl_notif_statistics),
+ le32_to_cpu(pkt->len_n_flags) &
+ FH_RSCSR_FRAME_SIZE_MSK);
+
+ change = ((priv->_agn.statistics.general.common.temperature !=
+ pkt->u.stats.general.common.temperature) ||
+ ((priv->_agn.statistics.flag &
+ STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
+ (pkt->u.stats.flag &
+ STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
+
+ iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
+ }
+
+ iwl_recover_from_statistics(priv, pkt);
+
+ set_bit(STATUS_STATISTICS, &priv->status);
+
+ /* Reschedule the statistics timer to occur in
+ * reg_recalib_period seconds to ensure we get a
+ * thermal update even if the uCode doesn't give
+ * us one */
+ mod_timer(&priv->statistics_periodic, jiffies +
+ msecs_to_jiffies(reg_recalib_period * 1000));
+
+ if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
+ (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
+ iwl_rx_calc_noise(priv);
+ queue_work(priv->workqueue, &priv->run_time_calib_work);
+ }
+ if (priv->cfg->ops->lib->temp_ops.temperature && change)
+ priv->cfg->ops->lib->temp_ops.temperature(priv);
+}
+
+static void iwl_rx_reply_statistics(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+ if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ memset(&priv->_agn.accum_statistics, 0,
+ sizeof(struct iwl_notif_statistics));
+ memset(&priv->_agn.delta_statistics, 0,
+ sizeof(struct iwl_notif_statistics));
+ memset(&priv->_agn.max_delta, 0,
+ sizeof(struct iwl_notif_statistics));
+ memset(&priv->_agn.accum_statistics_bt, 0,
+ sizeof(struct iwl_bt_notif_statistics));
+ memset(&priv->_agn.delta_statistics_bt, 0,
+ sizeof(struct iwl_bt_notif_statistics));
+ memset(&priv->_agn.max_delta_bt, 0,
+ sizeof(struct iwl_bt_notif_statistics));
+#endif
+ IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
+ }
+ iwl_rx_statistics(priv, rxb);
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static void iwl_rx_card_state_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
+ unsigned long status = priv->status;
+
+ IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
+ (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & CT_CARD_DISABLED) ?
+ "Reached" : "Not reached");
+
+ if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
+ CT_CARD_DISABLED)) {
+
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ iwl_write_direct32(priv, HBUS_TARG_MBX_C,
+ HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+
+ if (!(flags & RXON_CARD_DISABLED)) {
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+ iwl_write_direct32(priv, HBUS_TARG_MBX_C,
+ HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
}
+ if (flags & CT_CARD_DISABLED)
+ iwl_tt_enter_ct_kill(priv);
}
+ if (!(flags & CT_CARD_DISABLED))
+ iwl_tt_exit_ct_kill(priv);
+
+ if (flags & HW_CARD_DISABLED)
+ set_bit(STATUS_RF_KILL_HW, &priv->status);
+ else
+ clear_bit(STATUS_RF_KILL_HW, &priv->status);
+
+
+ if (!(flags & RXON_CARD_DISABLED))
+ iwl_scan_cancel(priv);
+
+ if ((test_bit(STATUS_RF_KILL_HW, &status) !=
+ test_bit(STATUS_RF_KILL_HW, &priv->status)))
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy,
+ test_bit(STATUS_RF_KILL_HW, &priv->status));
+ else
+ wake_up_interruptible(&priv->wait_command_queue);
+}
+
+static void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_missed_beacon_notif *missed_beacon;
+
+ missed_beacon = &pkt->u.missed_beacon;
+ if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
+ priv->missed_beacon_threshold) {
+ IWL_DEBUG_CALIB(priv,
+ "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
+ le32_to_cpu(missed_beacon->consecutive_missed_beacons),
+ le32_to_cpu(missed_beacon->total_missed_becons),
+ le32_to_cpu(missed_beacon->num_recvd_beacons),
+ le32_to_cpu(missed_beacon->num_expected_beacons));
+ if (!test_bit(STATUS_SCANNING, &priv->status))
+ iwl_init_sensitivity(priv);
+ }
+}
+
+/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
+ * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
+static void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+ priv->_agn.last_phy_res_valid = true;
+ memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
+ sizeof(struct iwl_rx_phy_res));
}
-EXPORT_SYMBOL(iwl_recover_from_statistics);
/*
* returns non-zero if packet should be dropped
*/
-int iwl_set_decrypted_flag(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- u32 decrypt_res,
- struct ieee80211_rx_status *stats)
+static int iwl_set_decrypted_flag(struct iwl_priv *priv,
+ struct ieee80211_hdr *hdr,
+ u32 decrypt_res,
+ struct ieee80211_rx_status *stats)
{
u16 fc = le16_to_cpu(hdr->frame_control);
@@ -306,4 +863,264 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
}
return 0;
}
-EXPORT_SYMBOL(iwl_set_decrypted_flag);
+
+static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
+ struct ieee80211_hdr *hdr,
+ u16 len,
+ u32 ampdu_status,
+ struct iwl_rx_mem_buffer *rxb,
+ struct ieee80211_rx_status *stats)
+{
+ struct sk_buff *skb;
+ __le16 fc = hdr->frame_control;
+
+ /* We only process data packets if the interface is open */
+ if (unlikely(!priv->is_open)) {
+ IWL_DEBUG_DROP_LIMIT(priv,
+ "Dropping packet while interface is not open.\n");
+ return;
+ }
+
+ /* In case of HW accelerated crypto and bad decryption, drop */
+ if (!priv->cfg->mod_params->sw_crypto &&
+ iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
+ return;
+
+ skb = dev_alloc_skb(128);
+ if (!skb) {
+ IWL_ERR(priv, "dev_alloc_skb failed\n");
+ return;
+ }
+
+ skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
+
+ iwl_update_stats(priv, false, fc, len);
+ memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+ ieee80211_rx(priv->hw, skb);
+ priv->alloc_rxb_page--;
+ rxb->page = NULL;
+}
+
+static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
+{
+ u32 decrypt_out = 0;
+
+ if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
+ RX_RES_STATUS_STATION_FOUND)
+ decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
+ RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
+
+ decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
+
+ /* packet was not encrypted */
+ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+ RX_RES_STATUS_SEC_TYPE_NONE)
+ return decrypt_out;
+
+ /* packet was encrypted with unknown alg */
+ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+ RX_RES_STATUS_SEC_TYPE_ERR)
+ return decrypt_out;
+
+ /* decryption was not done in HW */
+ if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
+ RX_MPDU_RES_STATUS_DEC_DONE_MSK)
+ return decrypt_out;
+
+ switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
+
+ case RX_RES_STATUS_SEC_TYPE_CCMP:
+ /* alg is CCM: check MIC only */
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
+ /* Bad MIC */
+ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+ else
+ decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+
+ break;
+
+ case RX_RES_STATUS_SEC_TYPE_TKIP:
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
+ /* Bad TTAK */
+ decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
+ break;
+ }
+ /* fall through if TTAK OK */
+ default:
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
+ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+ else
+ decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+ break;
+ }
+
+ IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
+ decrypt_in, decrypt_out);
+
+ return decrypt_out;
+}
+
+/* Called for REPLY_RX (legacy ABG frames), or
+ * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
+static void iwl_rx_reply_rx(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct ieee80211_hdr *header;
+ struct ieee80211_rx_status rx_status;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rx_phy_res *phy_res;
+ __le32 rx_pkt_status;
+ struct iwl_rx_mpdu_res_start *amsdu;
+ u32 len;
+ u32 ampdu_status;
+ u32 rate_n_flags;
+
+ /**
+ * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
+ * REPLY_RX: physical layer info is in this buffer
+ * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
+ * command and cached in priv->last_phy_res
+ *
+ * Here we set up local variables depending on which command is
+ * received.
+ */
+ if (pkt->hdr.cmd == REPLY_RX) {
+ phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
+ header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
+ + phy_res->cfg_phy_cnt);
+
+ len = le16_to_cpu(phy_res->byte_count);
+ rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
+ phy_res->cfg_phy_cnt + len);
+ ampdu_status = le32_to_cpu(rx_pkt_status);
+ } else {
+ if (!priv->_agn.last_phy_res_valid) {
+ IWL_ERR(priv, "MPDU frame without cached PHY data\n");
+ return;
+ }
+ phy_res = &priv->_agn.last_phy_res;
+ amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
+ header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
+ len = le16_to_cpu(amsdu->byte_count);
+ rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
+ ampdu_status = iwl_translate_rx_status(priv,
+ le32_to_cpu(rx_pkt_status));
+ }
+
+ if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
+ IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
+ phy_res->cfg_phy_cnt);
+ return;
+ }
+
+ if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+ !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+ IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
+ le32_to_cpu(rx_pkt_status));
+ return;
+ }
+
+ /* This will be used in several places later */
+ rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
+
+ /* rx_status carries information about the packet to mac80211 */
+ rx_status.mactime = le64_to_cpu(phy_res->timestamp);
+ rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
+ IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
+ rx_status.band);
+ rx_status.rate_idx =
+ iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
+ rx_status.flag = 0;
+
+ /* TSF isn't reliable. In order to allow smooth user experience,
+ * this W/A doesn't propagate it to the mac80211 */
+ /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
+
+ priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
+
+ /* Find max signal strength (dBm) among 3 antenna/receiver chains */
+ rx_status.signal = priv->cfg->ops->utils->calc_rssi(priv, phy_res);
+
+ iwl_dbg_log_rx_data_frame(priv, len, header);
+ IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
+ rx_status.signal, (unsigned long long)rx_status.mactime);
+
+ /*
+ * "antenna number"
+ *
+ * It seems that the antenna field in the phy flags value
+ * is actually a bit field. This is undefined by radiotap,
+ * it wants an actual antenna number but I always get "7"
+ * for most legacy frames I receive indicating that the
+ * same frame was received on all three RX chains.
+ *
+ * I think this field should be removed in favor of a
+ * new 802.11n radiotap field "RX chains" that is defined
+ * as a bitmask.
+ */
+ rx_status.antenna =
+ (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
+ >> RX_RES_PHY_FLAGS_ANTENNA_POS;
+
+ /* set the preamble flag if appropriate */
+ if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+ rx_status.flag |= RX_FLAG_SHORTPRE;
+
+ /* Set up the HT phy flags */
+ if (rate_n_flags & RATE_MCS_HT_MSK)
+ rx_status.flag |= RX_FLAG_HT;
+ if (rate_n_flags & RATE_MCS_HT40_MSK)
+ rx_status.flag |= RX_FLAG_40MHZ;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rx_status.flag |= RX_FLAG_SHORT_GI;
+
+ iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
+ rxb, &rx_status);
+}
+
+/**
+ * iwl_setup_rx_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ */
+void iwl_setup_rx_handlers(struct iwl_priv *priv)
+{
+ void (**handlers)(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
+
+ handlers = priv->rx_handlers;
+
+ handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
+ handlers[REPLY_ERROR] = iwl_rx_reply_error;
+ handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
+ handlers[SPECTRUM_MEASURE_NOTIFICATION] = iwl_rx_spectrum_measure_notif;
+ handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
+ handlers[PM_DEBUG_STATISTIC_NOTIFIC] = iwl_rx_pm_debug_statistics_notif;
+ handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
+
+ /*
+ * The same handler is used for both the REPLY to a discrete
+ * statistics request from the host as well as for the periodic
+ * statistics notifications (after received beacons) from the uCode.
+ */
+ handlers[REPLY_STATISTICS_CMD] = iwl_rx_reply_statistics;
+ handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
+
+ iwl_setup_rx_scan_handlers(priv);
+
+ handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
+ handlers[MISSED_BEACONS_NOTIFICATION] = iwl_rx_missed_beacon_notif;
+
+ /* Rx handlers */
+ handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy;
+ handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx;
+
+ /* block ack */
+ handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
+
+ /* Set up hardware specific Rx handlers */
+ priv->cfg->ops->lib->rx_handler_setup(priv);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 67da31295781..3a4d9e6b0421 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -101,7 +101,7 @@ static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
ieee80211_scan_completed(priv->hw, aborted);
}
- priv->is_internal_short_scan = false;
+ priv->scan_type = IWL_SCAN_NORMAL;
priv->scan_vif = NULL;
priv->scan_request = NULL;
}
@@ -155,7 +155,6 @@ int iwl_scan_cancel(struct iwl_priv *priv)
queue_work(priv->workqueue, &priv->abort_scan);
return 0;
}
-EXPORT_SYMBOL(iwl_scan_cancel);
/**
* iwl_scan_cancel_timeout - Cancel any currently executing HW scan
@@ -180,7 +179,6 @@ int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
return test_bit(STATUS_SCAN_HW, &priv->status);
}
-EXPORT_SYMBOL(iwl_scan_cancel_timeout);
/* Service response to REPLY_SCAN_CMD (0x80) */
static void iwl_rx_reply_scan(struct iwl_priv *priv,
@@ -252,14 +250,12 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
(priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
- jiffies_to_msecs(elapsed_jiffies
- (priv->scan_start, jiffies)));
+ jiffies_to_msecs(jiffies - priv->scan_start));
queue_work(priv->workqueue, &priv->scan_completed);
if (priv->iw_mode != NL80211_IFTYPE_ADHOC &&
- priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ iwl_advanced_bt_coexist(priv) &&
priv->bt_status != scan_notif->bt_status) {
if (scan_notif->bt_status) {
/* BT on */
@@ -290,7 +286,6 @@ void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
iwl_rx_scan_complete_notif;
}
-EXPORT_SYMBOL(iwl_setup_rx_scan_handlers);
inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
enum ieee80211_band band,
@@ -303,7 +298,6 @@ inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
return IWL_ACTIVE_DWELL_TIME_24 +
IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
}
-EXPORT_SYMBOL(iwl_get_active_dwell_time);
u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
enum ieee80211_band band,
@@ -335,7 +329,6 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
return passive;
}
-EXPORT_SYMBOL(iwl_get_passive_dwell_time);
void iwl_init_scan_params(struct iwl_priv *priv)
{
@@ -345,12 +338,11 @@ void iwl_init_scan_params(struct iwl_priv *priv)
if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
}
-EXPORT_SYMBOL(iwl_init_scan_params);
-static int __must_check iwl_scan_initiate(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- bool internal,
- enum ieee80211_band band)
+int __must_check iwl_scan_initiate(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ enum iwl_scan_type scan_type,
+ enum ieee80211_band band)
{
int ret;
@@ -378,17 +370,19 @@ static int __must_check iwl_scan_initiate(struct iwl_priv *priv,
}
IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
- internal ? "internal short " : "");
+ scan_type == IWL_SCAN_NORMAL ? "" :
+ scan_type == IWL_SCAN_OFFCH_TX ? "offchan TX " :
+ "internal short ");
set_bit(STATUS_SCANNING, &priv->status);
- priv->is_internal_short_scan = internal;
+ priv->scan_type = scan_type;
priv->scan_start = jiffies;
priv->scan_band = band;
ret = priv->cfg->ops->utils->request_scan(priv, vif);
if (ret) {
clear_bit(STATUS_SCANNING, &priv->status);
- priv->is_internal_short_scan = false;
+ priv->scan_type = IWL_SCAN_NORMAL;
return ret;
}
@@ -413,7 +407,7 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
if (test_bit(STATUS_SCANNING, &priv->status) &&
- !priv->is_internal_short_scan) {
+ priv->scan_type != IWL_SCAN_NORMAL) {
IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
ret = -EAGAIN;
goto out_unlock;
@@ -427,11 +421,11 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
* If an internal scan is in progress, just set
* up the scan_request as per above.
*/
- if (priv->is_internal_short_scan) {
+ if (priv->scan_type != IWL_SCAN_NORMAL) {
IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n");
ret = 0;
} else
- ret = iwl_scan_initiate(priv, vif, false,
+ ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
req->channels[0]->band);
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -441,7 +435,6 @@ out_unlock:
return ret;
}
-EXPORT_SYMBOL(iwl_mac_hw_scan);
/*
* internal short scan, this function should only been called while associated.
@@ -461,7 +454,7 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
mutex_lock(&priv->mutex);
- if (priv->is_internal_short_scan == true) {
+ if (priv->scan_type == IWL_SCAN_RADIO_RESET) {
IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
goto unlock;
}
@@ -471,7 +464,7 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
goto unlock;
}
- if (iwl_scan_initiate(priv, NULL, true, priv->band))
+ if (iwl_scan_initiate(priv, NULL, IWL_SCAN_RADIO_RESET, priv->band))
IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
unlock:
mutex_unlock(&priv->mutex);
@@ -538,7 +531,6 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
return (u16)len;
}
-EXPORT_SYMBOL(iwl_fill_probe_req);
static void iwl_bg_abort_scan(struct work_struct *work)
{
@@ -559,8 +551,7 @@ static void iwl_bg_scan_completed(struct work_struct *work)
container_of(work, struct iwl_priv, scan_completed);
bool aborted;
- IWL_DEBUG_SCAN(priv, "Completed %sscan.\n",
- priv->is_internal_short_scan ? "internal short " : "");
+ IWL_DEBUG_SCAN(priv, "Completed scan.\n");
cancel_delayed_work(&priv->scan_check);
@@ -575,7 +566,13 @@ static void iwl_bg_scan_completed(struct work_struct *work)
goto out_settings;
}
- if (priv->is_internal_short_scan && !aborted) {
+ if (priv->scan_type == IWL_SCAN_OFFCH_TX && priv->_agn.offchan_tx_skb) {
+ ieee80211_tx_status_irqsafe(priv->hw,
+ priv->_agn.offchan_tx_skb);
+ priv->_agn.offchan_tx_skb = NULL;
+ }
+
+ if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
int err;
/* Check if mac80211 requested scan during our internal scan */
@@ -583,7 +580,7 @@ static void iwl_bg_scan_completed(struct work_struct *work)
goto out_complete;
/* If so request a new scan */
- err = iwl_scan_initiate(priv, priv->scan_vif, false,
+ err = iwl_scan_initiate(priv, priv->scan_vif, IWL_SCAN_NORMAL,
priv->scan_request->channels[0]->band);
if (err) {
IWL_DEBUG_SCAN(priv,
@@ -603,13 +600,16 @@ out_settings:
if (!iwl_is_ready_rf(priv))
goto out;
- /* Since setting the TXPOWER may have been deferred while
- * performing the scan, fire one off */
- iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
+ /*
+ * We do not commit power settings while scan is pending,
+ * do it now if the settings changed.
+ */
+ iwl_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
+ iwl_set_tx_power(priv, priv->tx_power_next, false);
priv->cfg->ops->utils->post_scan(priv);
- out:
+out:
mutex_unlock(&priv->mutex);
}
@@ -620,7 +620,6 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
}
-EXPORT_SYMBOL(iwl_setup_scan_deferred_work);
void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
{
@@ -634,4 +633,3 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
mutex_unlock(&priv->mutex);
}
}
-EXPORT_SYMBOL(iwl_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 7c7f7dcb1b1e..bc90a12408a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -107,7 +107,7 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
/*
* XXX: The MAC address in the command buffer is often changed from
* the original sent to the device. That is, the MAC address
- * written to the command buffer often is not the same MAC adress
+ * written to the command buffer often is not the same MAC address
* read from the command buffer when the command returns. This
* issue has not yet been resolved and this debugging is left to
* observe the problem.
@@ -169,7 +169,6 @@ int iwl_send_add_sta(struct iwl_priv *priv,
return ret;
}
-EXPORT_SYMBOL(iwl_send_add_sta);
static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
struct ieee80211_sta *sta,
@@ -316,7 +315,6 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
return sta_id;
}
-EXPORT_SYMBOL_GPL(iwl_prep_station);
#define STA_WAIT_TIMEOUT (HZ/2)
@@ -379,7 +377,6 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
*sta_id_r = sta_id;
return ret;
}
-EXPORT_SYMBOL(iwl_add_station_common);
/**
* iwl_sta_ucode_deactivate - deactivate ucode status for a station
@@ -400,7 +397,8 @@ static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
}
static int iwl_send_remove_station(struct iwl_priv *priv,
- const u8 *addr, int sta_id)
+ const u8 *addr, int sta_id,
+ bool temporary)
{
struct iwl_rx_packet *pkt;
int ret;
@@ -436,9 +434,11 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
if (!ret) {
switch (pkt->u.rem_sta.status) {
case REM_STA_SUCCESS_MSK:
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- iwl_sta_ucode_deactivate(priv, sta_id);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ if (!temporary) {
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ iwl_sta_ucode_deactivate(priv, sta_id);
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ }
IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
break;
default:
@@ -505,12 +505,11 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
spin_unlock_irqrestore(&priv->sta_lock, flags);
- return iwl_send_remove_station(priv, addr, sta_id);
+ return iwl_send_remove_station(priv, addr, sta_id, false);
out_err:
spin_unlock_irqrestore(&priv->sta_lock, flags);
return -EINVAL;
}
-EXPORT_SYMBOL_GPL(iwl_remove_station);
/**
* iwl_clear_ucode_stations - clear ucode station table bits
@@ -545,7 +544,6 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv,
if (!cleared)
IWL_DEBUG_INFO(priv, "No active stations found to be cleared\n");
}
-EXPORT_SYMBOL(iwl_clear_ucode_stations);
/**
* iwl_restore_stations() - Restore driver known stations to device
@@ -622,7 +620,48 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
else
IWL_DEBUG_INFO(priv, "Restoring all known stations .... complete.\n");
}
-EXPORT_SYMBOL(iwl_restore_stations);
+
+void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ unsigned long flags;
+ int sta_id = ctx->ap_sta_id;
+ int ret;
+ struct iwl_addsta_cmd sta_cmd;
+ struct iwl_link_quality_cmd lq;
+ bool active;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ return;
+ }
+
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
+ sta_cmd.mode = 0;
+ memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
+
+ active = priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE;
+ priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ if (active) {
+ ret = iwl_send_remove_station(
+ priv, priv->stations[sta_id].sta.sta.addr,
+ sta_id, true);
+ if (ret)
+ IWL_ERR(priv, "failed to remove STA %pM (%d)\n",
+ priv->stations[sta_id].sta.sta.addr, ret);
+ }
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ if (ret)
+ IWL_ERR(priv, "failed to re-add STA %pM (%d)\n",
+ priv->stations[sta_id].sta.sta.addr, ret);
+ iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
+}
int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
{
@@ -634,7 +673,6 @@ int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
return WEP_INVALID_OFFSET;
}
-EXPORT_SYMBOL(iwl_get_free_ucode_key_index);
void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
{
@@ -654,7 +692,6 @@ void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
}
spin_unlock_irqrestore(&priv->sta_lock, flags);
}
-EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_stations);
#ifdef CONFIG_IWLWIFI_DEBUG
static void iwl_dump_lq_cmd(struct iwl_priv *priv,
@@ -736,6 +773,14 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
return -EINVAL;
+
+ spin_lock_irqsave(&priv->sta_lock, flags_spin);
+ if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+
iwl_dump_lq_cmd(priv, lq);
BUG_ON(init && (cmd.flags & CMD_ASYNC));
@@ -756,7 +801,6 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
}
return ret;
}
-EXPORT_SYMBOL(iwl_send_lq_cmd);
int iwl_mac_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -778,4 +822,3 @@ int iwl_mac_sta_remove(struct ieee80211_hw *hw,
mutex_unlock(&priv->mutex);
return ret;
}
-EXPORT_SYMBOL(iwl_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 06475872eee4..206f1e1a0caf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -63,6 +63,7 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct iwl_link_quality_cmd *lq, u8 flags, bool init);
+void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
/**
* iwl_clear_driver_stations - clear knowledge of all stations from driver
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 7261ee49f282..277c9175dcf6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -49,33 +49,58 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
if (txq->need_update == 0)
return;
- /* if we're trying to save power */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- /* wake up nic if it's powered down ...
- * uCode will wake up, and interrupt us again, so next
- * time we'll skip this part. */
- reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(priv, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
- txq_id, reg);
- iwl_set_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- return;
- }
-
- iwl_write_direct32(priv, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
-
- /* else not in power-save mode, uCode will never sleep when we're
- * trying to tx (during RFKILL, we're not trying to tx). */
- } else
+ if (priv->cfg->base_params->shadow_reg_enable) {
+ /* shadow register enabled */
iwl_write32(priv, HBUS_TARG_WRPTR,
txq->q.write_ptr | (txq_id << 8));
+ } else {
+ /* if we're trying to save power */
+ if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+ /* wake up nic if it's powered down ...
+ * uCode will wake up, and interrupt us again, so next
+ * time we'll skip this part. */
+ reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ IWL_DEBUG_INFO(priv,
+ "Tx queue %d requesting wakeup,"
+ " GP1 = 0x%x\n", txq_id, reg);
+ iwl_set_bit(priv, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ return;
+ }
+
+ iwl_write_direct32(priv, HBUS_TARG_WRPTR,
+ txq->q.write_ptr | (txq_id << 8));
+ /*
+ * else not in power-save mode,
+ * uCode will never sleep when we're
+ * trying to tx (during RFKILL, we're not trying to tx).
+ */
+ } else
+ iwl_write32(priv, HBUS_TARG_WRPTR,
+ txq->q.write_ptr | (txq_id << 8));
+ }
txq->need_update = 0;
}
-EXPORT_SYMBOL(iwl_txq_update_write_ptr);
+
+/**
+ * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
+{
+ struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+
+ if (q->n_bd == 0)
+ return;
+
+ while (q->write_ptr != q->read_ptr) {
+ priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
+ }
+}
/**
* iwl_tx_queue_free - Deallocate DMA queue.
@@ -88,17 +113,10 @@ EXPORT_SYMBOL(iwl_txq_update_write_ptr);
void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
{
struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
struct device *dev = &priv->pci_dev->dev;
int i;
- if (q->n_bd == 0)
- return;
-
- /* first, empty all BD's */
- for (; q->write_ptr != q->read_ptr;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
- priv->cfg->ops->lib->txq_free_tfd(priv, txq);
+ iwl_tx_queue_unmap(priv, txq_id);
/* De-alloc array of command/tx buffers */
for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
@@ -122,42 +140,35 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
/* 0-fill queue descriptor structure */
memset(txq, 0, sizeof(*txq));
}
-EXPORT_SYMBOL(iwl_tx_queue_free);
/**
- * iwl_cmd_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
+ * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
*/
-void iwl_cmd_queue_free(struct iwl_priv *priv)
+void iwl_cmd_queue_unmap(struct iwl_priv *priv)
{
struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
struct iwl_queue *q = &txq->q;
- struct device *dev = &priv->pci_dev->dev;
int i;
bool huge = false;
if (q->n_bd == 0)
return;
- for (; q->read_ptr != q->write_ptr;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+ while (q->read_ptr != q->write_ptr) {
/* we have no way to tell if it is a huge cmd ATM */
i = get_cmd_index(q, q->read_ptr, 0);
- if (txq->meta[i].flags & CMD_SIZE_HUGE) {
+ if (txq->meta[i].flags & CMD_SIZE_HUGE)
huge = true;
- continue;
- }
+ else
+ pci_unmap_single(priv->pci_dev,
+ dma_unmap_addr(&txq->meta[i], mapping),
+ dma_unmap_len(&txq->meta[i], len),
+ PCI_DMA_BIDIRECTIONAL);
- pci_unmap_single(priv->pci_dev,
- dma_unmap_addr(&txq->meta[i], mapping),
- dma_unmap_len(&txq->meta[i], len),
- PCI_DMA_BIDIRECTIONAL);
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
}
+
if (huge) {
i = q->n_window;
pci_unmap_single(priv->pci_dev,
@@ -165,6 +176,23 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
dma_unmap_len(&txq->meta[i], len),
PCI_DMA_BIDIRECTIONAL);
}
+}
+
+/**
+ * iwl_cmd_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void iwl_cmd_queue_free(struct iwl_priv *priv)
+{
+ struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
+ struct device *dev = &priv->pci_dev->dev;
+ int i;
+
+ iwl_cmd_queue_unmap(priv);
/* De-alloc array of command/tx buffers */
for (i = 0; i <= TFD_CMD_SLOTS; i++)
@@ -184,7 +212,6 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
/* 0-fill queue descriptor structure */
memset(txq, 0, sizeof(*txq));
}
-EXPORT_SYMBOL(iwl_cmd_queue_free);
/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
* DMA services
@@ -224,7 +251,6 @@ int iwl_queue_space(const struct iwl_queue *q)
s = 0;
return s;
}
-EXPORT_SYMBOL(iwl_queue_space);
/**
@@ -254,8 +280,6 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
q->high_mark = 2;
q->write_ptr = q->read_ptr = 0;
- q->last_read_ptr = 0;
- q->repeat_same_read_ptr = 0;
return 0;
}
@@ -350,13 +374,12 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
txq->need_update = 0;
/*
- * Aggregation TX queues will get their ID when aggregation begins;
- * they overwrite the setting done here. The command FIFO doesn't
- * need an swq_id so don't set one to catch errors, all others can
- * be set up to the identity mapping.
+ * For the default queues 0-3, set up the swq_id
+ * already -- all others need to get one later
+ * (if they need one at all).
*/
- if (txq_id != priv->cmd_queue)
- txq->swq_id = txq_id;
+ if (txq_id < 4)
+ iwl_set_swq_id(txq, txq_id, txq_id);
/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
* iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
@@ -378,7 +401,6 @@ out_free_arrays:
return -ENOMEM;
}
-EXPORT_SYMBOL(iwl_tx_queue_init);
void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
int slots_num, u32 txq_id)
@@ -398,7 +420,6 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
/* Tell device where to find queue */
priv->cfg->ops->lib->txq_init(priv, txq);
}
-EXPORT_SYMBOL(iwl_tx_queue_reset);
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
@@ -635,4 +656,3 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
}
meta->flags = 0;
}
-EXPORT_SYMBOL(iwl_tx_cmd_complete);
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index c6c0eff9b5ed..ed57e4402800 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -225,7 +225,8 @@ static int iwm_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
static int iwm_cfg80211_set_default_key(struct wiphy *wiphy,
struct net_device *ndev,
- u8 key_index)
+ u8 key_index, bool unicast,
+ bool multicast)
{
struct iwm_priv *iwm = ndev_to_iwm(ndev);
@@ -286,7 +287,8 @@ int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
return -EINVAL;
}
- freq = ieee80211_channel_to_frequency(umac_bss->channel);
+ freq = ieee80211_channel_to_frequency(umac_bss->channel,
+ band->band);
channel = ieee80211_get_channel(wiphy, freq);
signal = umac_bss->rssi * 100;
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 330c7d9cf101..50dee6a0a5ca 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -908,7 +908,7 @@ int iwm_scan_ssids(struct iwm_priv *iwm, struct cfg80211_ssid *ssids,
return ret;
}
- iwm->scan_id = iwm->scan_id++ % IWM_SCAN_ID_MAX;
+ iwm->scan_id = (iwm->scan_id + 1) % IWM_SCAN_ID_MAX;
return 0;
}
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index 13a69ebf2a94..5091d77e02ce 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -126,6 +126,7 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev,
ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES);
if (!ndev) {
dev_err(dev, "no memory for network device instance\n");
+ ret = -ENOMEM;
goto out_priv;
}
@@ -138,6 +139,7 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev,
GFP_KERNEL);
if (!iwm->umac_profile) {
dev_err(dev, "Couldn't alloc memory for profile\n");
+ ret = -ENOMEM;
goto out_profile;
}
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index a944893ae3ca..9a57cf6a488f 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -543,7 +543,10 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
switch (le32_to_cpu(complete->status)) {
case UMAC_ASSOC_COMPLETE_SUCCESS:
chan = ieee80211_get_channel(wiphy,
- ieee80211_channel_to_frequency(complete->channel));
+ ieee80211_channel_to_frequency(complete->channel,
+ complete->band == UMAC_BAND_2GHZ ?
+ IEEE80211_BAND_2GHZ :
+ IEEE80211_BAND_5GHZ));
if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
/* Associated to a unallowed channel, disassociate. */
__iwm_invalidate_mlme_profile(iwm);
@@ -841,7 +844,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
goto err;
}
- freq = ieee80211_channel_to_frequency(umac_bss->channel);
+ freq = ieee80211_channel_to_frequency(umac_bss->channel, band->band);
channel = ieee80211_get_channel(wiphy, freq);
signal = umac_bss->rssi * 100;
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 373930afc26b..30ef0351bfc4 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -9,8 +9,6 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
#include <asm/unaligned.h>
@@ -609,7 +607,8 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
/* No channel, no luck */
if (chan_no != -1) {
struct wiphy *wiphy = priv->wdev->wiphy;
- int freq = ieee80211_channel_to_frequency(chan_no);
+ int freq = ieee80211_channel_to_frequency(chan_no,
+ IEEE80211_BAND_2GHZ);
struct ieee80211_channel *channel =
ieee80211_get_channel(wiphy, freq);
@@ -619,7 +618,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
print_ssid(ssid_buf, ssid, ssid_len),
LBS_SCAN_RSSI_TO_MBM(rssi)/100);
- if (channel ||
+ if (channel &&
!(channel->flags & IEEE80211_CHAN_DISABLED))
cfg80211_inform_bss(wiphy, channel,
bssid, le64_to_cpu(*(__le64 *)tsfdesc),
@@ -1424,7 +1423,8 @@ static int lbs_cfg_disconnect(struct wiphy *wiphy, struct net_device *dev,
static int lbs_cfg_set_default_key(struct wiphy *wiphy,
struct net_device *netdev,
- u8 key_index)
+ u8 key_index, bool unicast,
+ bool multicast)
{
struct lbs_private *priv = wiphy_priv(wiphy);
@@ -1598,7 +1598,8 @@ static int lbs_get_survey(struct wiphy *wiphy, struct net_device *dev,
lbs_deb_enter(LBS_DEB_CFG80211);
survey->channel = ieee80211_get_channel(wiphy,
- ieee80211_channel_to_frequency(priv->channel));
+ ieee80211_channel_to_frequency(priv->channel,
+ IEEE80211_BAND_2GHZ));
ret = lbs_get_rssi(priv, &signal, &noise);
if (ret == 0) {
@@ -2062,7 +2063,7 @@ static void lbs_cfg_set_regulatory_hint(struct lbs_private *priv)
};
/* Section 5.17.2 */
- static struct region_code_mapping regmap[] = {
+ static const struct region_code_mapping regmap[] = {
{"US ", 0x10}, /* US FCC */
{"CA ", 0x20}, /* Canada */
{"EU ", 0x30}, /* ETSI */
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 70745928f3f8..7e8a658b7670 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -145,9 +145,13 @@ int lbs_update_hw_spec(struct lbs_private *priv)
if (priv->current_addr[0] == 0xff)
memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN);
- memcpy(priv->dev->dev_addr, priv->current_addr, ETH_ALEN);
- if (priv->mesh_dev)
- memcpy(priv->mesh_dev->dev_addr, priv->current_addr, ETH_ALEN);
+ if (!priv->copied_hwaddr) {
+ memcpy(priv->dev->dev_addr, priv->current_addr, ETH_ALEN);
+ if (priv->mesh_dev)
+ memcpy(priv->mesh_dev->dev_addr,
+ priv->current_addr, ETH_ALEN);
+ priv->copied_hwaddr = 1;
+ }
out:
lbs_deb_leave(LBS_DEB_CMD);
@@ -177,6 +181,14 @@ int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria,
struct cmd_ds_host_sleep cmd_config;
int ret;
+ /*
+ * Certain firmware versions do not support EHS_REMOVE_WAKEUP command
+ * and the card will return a failure. Since we need to be
+ * able to reset the mask, in those cases we set a 0 mask instead.
+ */
+ if (criteria == EHS_REMOVE_WAKEUP && !priv->ehs_remove_supported)
+ criteria = 0;
+
cmd_config.hdr.size = cpu_to_le16(sizeof(cmd_config));
cmd_config.criteria = cpu_to_le32(criteria);
cmd_config.gpio = priv->wol_gpio;
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index cb14c38caf3a..bc461eb39660 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -90,6 +90,7 @@ struct lbs_private {
void *card;
u8 fw_ready;
u8 surpriseremoved;
+ u8 setup_fw_on_resume;
int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
void (*reset_card) (struct lbs_private *priv);
int (*enter_deep_sleep) (struct lbs_private *priv);
@@ -101,6 +102,7 @@ struct lbs_private {
u32 fwcapinfo;
u16 regioncode;
u8 current_addr[ETH_ALEN];
+ u8 copied_hwaddr;
/* Command download */
u8 dnld_sent;
@@ -138,6 +140,7 @@ struct lbs_private {
uint32_t wol_criteria;
uint8_t wol_gpio;
uint8_t wol_gap;
+ bool ehs_remove_supported;
/* Transmitting */
int tx_pending_len; /* -1 while building packet */
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index 5eac1351a021..6cb6935ee4a3 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -387,7 +387,7 @@ struct lbs_offset_value {
struct mrvl_ie_domain_param_set {
struct mrvl_ie_header header;
- u8 country_code[3];
+ u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
struct ieee80211_country_ie_triplet triplet[MAX_11D_TRIPLETS];
} __packed;
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index e5685dc317a8..b4de0ca10feb 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -1170,7 +1170,6 @@ static void if_sdio_remove(struct sdio_func *func)
lbs_deb_sdio("call remove card\n");
lbs_stop_card(card->priv);
lbs_remove_card(card->priv);
- card->priv->surpriseremoved = 1;
flush_workqueue(card->workqueue);
destroy_workqueue(card->workqueue);
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 79bcb4e5d2ca..f6c2cd665f49 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -20,10 +20,8 @@
#include <linux/moduleparam.h>
#include <linux/firmware.h>
#include <linux/jiffies.h>
-#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/netdevice.h>
-#include <linux/semaphore.h>
#include <linux/slab.h>
#include <linux/spi/libertas_spi.h>
#include <linux/spi/spi.h>
@@ -34,6 +32,12 @@
#include "dev.h"
#include "if_spi.h"
+struct if_spi_packet {
+ struct list_head list;
+ u16 blen;
+ u8 buffer[0] __attribute__((aligned(4)));
+};
+
struct if_spi_card {
struct spi_device *spi;
struct lbs_private *priv;
@@ -51,18 +55,36 @@ struct if_spi_card {
unsigned long spu_reg_delay;
/* Handles all SPI communication (except for FW load) */
- struct task_struct *spi_thread;
- int run_thread;
-
- /* Used to wake up the spi_thread */
- struct semaphore spi_ready;
- struct semaphore spi_thread_terminated;
+ struct workqueue_struct *workqueue;
+ struct work_struct packet_work;
u8 cmd_buffer[IF_SPI_CMD_BUF_SIZE];
+
+ /* A buffer of incoming packets from libertas core.
+ * Since we can't sleep in hw_host_to_card, we have to buffer
+ * them. */
+ struct list_head cmd_packet_list;
+ struct list_head data_packet_list;
+
+ /* Protects cmd_packet_list and data_packet_list */
+ spinlock_t buffer_lock;
};
static void free_if_spi_card(struct if_spi_card *card)
{
+ struct list_head *cursor, *next;
+ struct if_spi_packet *packet;
+
+ list_for_each_safe(cursor, next, &card->cmd_packet_list) {
+ packet = container_of(cursor, struct if_spi_packet, list);
+ list_del(&packet->list);
+ kfree(packet);
+ }
+ list_for_each_safe(cursor, next, &card->data_packet_list) {
+ packet = container_of(cursor, struct if_spi_packet, list);
+ list_del(&packet->list);
+ kfree(packet);
+ }
spi_set_drvdata(card->spi, NULL);
kfree(card);
}
@@ -622,7 +644,7 @@ out:
/*
* SPI Transfer Thread
*
- * The SPI thread handles all SPI transfers, so there is no need for a lock.
+ * The SPI worker handles all SPI transfers, so there is no need for a lock.
*/
/* Move a command from the card to the host */
@@ -742,6 +764,40 @@ out:
return err;
}
+/* Move data or a command from the host to the card. */
+static void if_spi_h2c(struct if_spi_card *card,
+ struct if_spi_packet *packet, int type)
+{
+ int err = 0;
+ u16 int_type, port_reg;
+
+ switch (type) {
+ case MVMS_DAT:
+ int_type = IF_SPI_CIC_TX_DOWNLOAD_OVER;
+ port_reg = IF_SPI_DATA_RDWRPORT_REG;
+ break;
+ case MVMS_CMD:
+ int_type = IF_SPI_CIC_CMD_DOWNLOAD_OVER;
+ port_reg = IF_SPI_CMD_RDWRPORT_REG;
+ break;
+ default:
+ lbs_pr_err("can't transfer buffer of type %d\n", type);
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Write the data to the card */
+ err = spu_write(card, port_reg, packet->buffer, packet->blen);
+ if (err)
+ goto out;
+
+out:
+ kfree(packet);
+
+ if (err)
+ lbs_pr_err("%s: error %d\n", __func__, err);
+}
+
/* Inform the host about a card event */
static void if_spi_e2h(struct if_spi_card *card)
{
@@ -766,71 +822,88 @@ out:
lbs_pr_err("%s: error %d\n", __func__, err);
}
-static int lbs_spi_thread(void *data)
+static void if_spi_host_to_card_worker(struct work_struct *work)
{
int err;
- struct if_spi_card *card = data;
+ struct if_spi_card *card;
u16 hiStatus;
+ unsigned long flags;
+ struct if_spi_packet *packet;
- while (1) {
- /* Wait to be woken up by one of two things. First, our ISR
- * could tell us that something happened on the WLAN.
- * Secondly, libertas could call hw_host_to_card with more
- * data, which we might be able to send.
- */
- do {
- err = down_interruptible(&card->spi_ready);
- if (!card->run_thread) {
- up(&card->spi_thread_terminated);
- do_exit(0);
- }
- } while (err == EINTR);
+ card = container_of(work, struct if_spi_card, packet_work);
- /* Read the host interrupt status register to see what we
- * can do. */
- err = spu_read_u16(card, IF_SPI_HOST_INT_STATUS_REG,
- &hiStatus);
- if (err) {
- lbs_pr_err("I/O error\n");
+ lbs_deb_enter(LBS_DEB_SPI);
+
+ /* Read the host interrupt status register to see what we
+ * can do. */
+ err = spu_read_u16(card, IF_SPI_HOST_INT_STATUS_REG,
+ &hiStatus);
+ if (err) {
+ lbs_pr_err("I/O error\n");
+ goto err;
+ }
+
+ if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY) {
+ err = if_spi_c2h_cmd(card);
+ if (err)
goto err;
- }
+ }
+ if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY) {
+ err = if_spi_c2h_data(card);
+ if (err)
+ goto err;
+ }
- if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY) {
- err = if_spi_c2h_cmd(card);
- if (err)
- goto err;
- }
- if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY) {
- err = if_spi_c2h_data(card);
- if (err)
- goto err;
+ /* workaround: in PS mode, the card does not set the Command
+ * Download Ready bit, but it sets TX Download Ready. */
+ if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY ||
+ (card->priv->psstate != PS_STATE_FULL_POWER &&
+ (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) {
+ /* This means two things. First of all,
+ * if there was a previous command sent, the card has
+ * successfully received it.
+ * Secondly, it is now ready to download another
+ * command.
+ */
+ lbs_host_to_card_done(card->priv);
+
+ /* Do we have any command packets from the host to
+ * send? */
+ packet = NULL;
+ spin_lock_irqsave(&card->buffer_lock, flags);
+ if (!list_empty(&card->cmd_packet_list)) {
+ packet = (struct if_spi_packet *)(card->
+ cmd_packet_list.next);
+ list_del(&packet->list);
}
+ spin_unlock_irqrestore(&card->buffer_lock, flags);
- /* workaround: in PS mode, the card does not set the Command
- * Download Ready bit, but it sets TX Download Ready. */
- if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY ||
- (card->priv->psstate != PS_STATE_FULL_POWER &&
- (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) {
- lbs_host_to_card_done(card->priv);
+ if (packet)
+ if_spi_h2c(card, packet, MVMS_CMD);
+ }
+ if (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY) {
+ /* Do we have any data packets from the host to
+ * send? */
+ packet = NULL;
+ spin_lock_irqsave(&card->buffer_lock, flags);
+ if (!list_empty(&card->data_packet_list)) {
+ packet = (struct if_spi_packet *)(card->
+ data_packet_list.next);
+ list_del(&packet->list);
}
+ spin_unlock_irqrestore(&card->buffer_lock, flags);
- if (hiStatus & IF_SPI_HIST_CARD_EVENT)
- if_spi_e2h(card);
+ if (packet)
+ if_spi_h2c(card, packet, MVMS_DAT);
+ }
+ if (hiStatus & IF_SPI_HIST_CARD_EVENT)
+ if_spi_e2h(card);
err:
- if (err)
- lbs_pr_err("%s: got error %d\n", __func__, err);
- }
-}
+ if (err)
+ lbs_pr_err("%s: got error %d\n", __func__, err);
-/* Block until lbs_spi_thread thread has terminated */
-static void if_spi_terminate_spi_thread(struct if_spi_card *card)
-{
- /* It would be nice to use kthread_stop here, but that function
- * can't wake threads waiting for a semaphore. */
- card->run_thread = 0;
- up(&card->spi_ready);
- down(&card->spi_thread_terminated);
+ lbs_deb_leave(LBS_DEB_SPI);
}
/*
@@ -842,18 +915,40 @@ static int if_spi_host_to_card(struct lbs_private *priv,
u8 type, u8 *buf, u16 nb)
{
int err = 0;
+ unsigned long flags;
struct if_spi_card *card = priv->card;
+ struct if_spi_packet *packet;
+ u16 blen;
lbs_deb_enter_args(LBS_DEB_SPI, "type %d, bytes %d", type, nb);
- nb = ALIGN(nb, 4);
+ if (nb == 0) {
+ lbs_pr_err("%s: invalid size requested: %d\n", __func__, nb);
+ err = -EINVAL;
+ goto out;
+ }
+ blen = ALIGN(nb, 4);
+ packet = kzalloc(sizeof(struct if_spi_packet) + blen, GFP_ATOMIC);
+ if (!packet) {
+ err = -ENOMEM;
+ goto out;
+ }
+ packet->blen = blen;
+ memcpy(packet->buffer, buf, nb);
+ memset(packet->buffer + nb, 0, blen - nb);
switch (type) {
case MVMS_CMD:
- err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG, buf, nb);
+ priv->dnld_sent = DNLD_CMD_SENT;
+ spin_lock_irqsave(&card->buffer_lock, flags);
+ list_add_tail(&packet->list, &card->cmd_packet_list);
+ spin_unlock_irqrestore(&card->buffer_lock, flags);
break;
case MVMS_DAT:
- err = spu_write(card, IF_SPI_DATA_RDWRPORT_REG, buf, nb);
+ priv->dnld_sent = DNLD_DATA_SENT;
+ spin_lock_irqsave(&card->buffer_lock, flags);
+ list_add_tail(&packet->list, &card->data_packet_list);
+ spin_unlock_irqrestore(&card->buffer_lock, flags);
break;
default:
lbs_pr_err("can't transfer buffer of type %d", type);
@@ -861,6 +956,9 @@ static int if_spi_host_to_card(struct lbs_private *priv,
break;
}
+ /* Queue spi xfer work */
+ queue_work(card->workqueue, &card->packet_work);
+out:
lbs_deb_leave_args(LBS_DEB_SPI, "err=%d", err);
return err;
}
@@ -869,13 +967,14 @@ static int if_spi_host_to_card(struct lbs_private *priv,
* Host Interrupts
*
* Service incoming interrupts from the WLAN device. We can't sleep here, so
- * don't try to talk on the SPI bus, just wake up the SPI thread.
+ * don't try to talk on the SPI bus, just queue the SPI xfer work.
*/
static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id)
{
struct if_spi_card *card = dev_id;
- up(&card->spi_ready);
+ queue_work(card->workqueue, &card->packet_work);
+
return IRQ_HANDLED;
}
@@ -883,56 +982,26 @@ static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id)
* SPI callbacks
*/
-static int __devinit if_spi_probe(struct spi_device *spi)
+static int if_spi_init_card(struct if_spi_card *card)
{
- struct if_spi_card *card;
- struct lbs_private *priv = NULL;
- struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
- int err = 0, i;
+ struct spi_device *spi = card->spi;
+ int err, i;
u32 scratch;
- struct sched_param param = { .sched_priority = 1 };
const struct firmware *helper = NULL;
const struct firmware *mainfw = NULL;
lbs_deb_enter(LBS_DEB_SPI);
- if (!pdata) {
- err = -EINVAL;
- goto out;
- }
-
- if (pdata->setup) {
- err = pdata->setup(spi);
- if (err)
- goto out;
- }
-
- /* Allocate card structure to represent this specific device */
- card = kzalloc(sizeof(struct if_spi_card), GFP_KERNEL);
- if (!card) {
- err = -ENOMEM;
- goto out;
- }
- spi_set_drvdata(spi, card);
- card->pdata = pdata;
- card->spi = spi;
- card->prev_xfer_time = jiffies;
-
- sema_init(&card->spi_ready, 0);
- sema_init(&card->spi_thread_terminated, 0);
-
- /* Initialize the SPI Interface Unit */
- err = spu_init(card, pdata->use_dummy_writes);
+ err = spu_init(card, card->pdata->use_dummy_writes);
if (err)
- goto free_card;
+ goto out;
err = spu_get_chip_revision(card, &card->card_id, &card->card_rev);
if (err)
- goto free_card;
+ goto out;
- /* Firmware load */
err = spu_read_u32(card, IF_SPI_SCRATCH_4_REG, &scratch);
if (err)
- goto free_card;
+ goto out;
if (scratch == SUCCESSFUL_FW_DOWNLOAD_MAGIC)
lbs_deb_spi("Firmware is already loaded for "
"Marvell WLAN 802.11 adapter\n");
@@ -946,7 +1015,7 @@ static int __devinit if_spi_probe(struct spi_device *spi)
lbs_pr_err("Unsupported chip_id: 0x%02x\n",
card->card_id);
err = -ENODEV;
- goto free_card;
+ goto out;
}
err = lbs_get_firmware(&card->spi->dev, NULL, NULL,
@@ -954,7 +1023,7 @@ static int __devinit if_spi_probe(struct spi_device *spi)
&mainfw);
if (err) {
lbs_pr_err("failed to find firmware (%d)\n", err);
- goto free_card;
+ goto out;
}
lbs_deb_spi("Initializing FW for Marvell WLAN 802.11 adapter "
@@ -966,15 +1035,68 @@ static int __devinit if_spi_probe(struct spi_device *spi)
spi->max_speed_hz);
err = if_spi_prog_helper_firmware(card, helper);
if (err)
- goto free_card;
+ goto out;
err = if_spi_prog_main_firmware(card, mainfw);
if (err)
- goto free_card;
+ goto out;
lbs_deb_spi("loaded FW for Marvell WLAN 802.11 adapter\n");
}
err = spu_set_interrupt_mode(card, 0, 1);
if (err)
+ goto out;
+
+out:
+ if (helper)
+ release_firmware(helper);
+ if (mainfw)
+ release_firmware(mainfw);
+
+ lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
+
+ return err;
+}
+
+static int __devinit if_spi_probe(struct spi_device *spi)
+{
+ struct if_spi_card *card;
+ struct lbs_private *priv = NULL;
+ struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
+ int err = 0;
+
+ lbs_deb_enter(LBS_DEB_SPI);
+
+ if (!pdata) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (pdata->setup) {
+ err = pdata->setup(spi);
+ if (err)
+ goto out;
+ }
+
+ /* Allocate card structure to represent this specific device */
+ card = kzalloc(sizeof(struct if_spi_card), GFP_KERNEL);
+ if (!card) {
+ err = -ENOMEM;
+ goto teardown;
+ }
+ spi_set_drvdata(spi, card);
+ card->pdata = pdata;
+ card->spi = spi;
+ card->prev_xfer_time = jiffies;
+
+ INIT_LIST_HEAD(&card->cmd_packet_list);
+ INIT_LIST_HEAD(&card->data_packet_list);
+ spin_lock_init(&card->buffer_lock);
+
+ /* Initialize the SPI Interface Unit */
+
+ /* Firmware load */
+ err = if_spi_init_card(card);
+ if (err)
goto free_card;
/* Register our card with libertas.
@@ -993,27 +1115,16 @@ static int __devinit if_spi_probe(struct spi_device *spi)
priv->fw_ready = 1;
/* Initialize interrupt handling stuff. */
- card->run_thread = 1;
- card->spi_thread = kthread_run(lbs_spi_thread, card, "lbs_spi_thread");
- if (IS_ERR(card->spi_thread)) {
- card->run_thread = 0;
- err = PTR_ERR(card->spi_thread);
- lbs_pr_err("error creating SPI thread: err=%d\n", err);
- goto remove_card;
- }
- if (sched_setscheduler(card->spi_thread, SCHED_FIFO, &param))
- lbs_pr_err("Error setting scheduler, using default.\n");
+ card->workqueue = create_workqueue("libertas_spi");
+ INIT_WORK(&card->packet_work, if_spi_host_to_card_worker);
err = request_irq(spi->irq, if_spi_host_interrupt,
IRQF_TRIGGER_FALLING, "libertas_spi", card);
if (err) {
lbs_pr_err("can't get host irq line-- request_irq failed\n");
- goto terminate_thread;
+ goto terminate_workqueue;
}
- /* poke the IRQ handler so that we don't miss the first interrupt */
- up(&card->spi_ready);
-
/* Start the card.
* This will call register_netdev, and we'll start
* getting interrupts... */
@@ -1028,18 +1139,16 @@ static int __devinit if_spi_probe(struct spi_device *spi)
release_irq:
free_irq(spi->irq, card);
-terminate_thread:
- if_spi_terminate_spi_thread(card);
-remove_card:
+terminate_workqueue:
+ flush_workqueue(card->workqueue);
+ destroy_workqueue(card->workqueue);
lbs_remove_card(priv); /* will call free_netdev */
free_card:
free_if_spi_card(card);
+teardown:
+ if (pdata->teardown)
+ pdata->teardown(spi);
out:
- if (helper)
- release_firmware(helper);
- if (mainfw)
- release_firmware(mainfw);
-
lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
return err;
}
@@ -1055,9 +1164,9 @@ static int __devexit libertas_spi_remove(struct spi_device *spi)
lbs_stop_card(priv);
lbs_remove_card(priv); /* will call free_netdev */
- priv->surpriseremoved = 1;
free_irq(spi->irq, card);
- if_spi_terminate_spi_thread(card);
+ flush_workqueue(card->workqueue);
+ destroy_workqueue(card->workqueue);
if (card->pdata->teardown)
card->pdata->teardown(spi);
free_if_spi_card(card);
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index efaf85032208..6524c70363d9 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -345,6 +345,13 @@ static int if_usb_probe(struct usb_interface *intf,
if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2))
lbs_pr_err("cannot register lbs_flash_boot2 attribute\n");
+ /*
+ * EHS_REMOVE_WAKEUP is not supported on all versions of the firmware.
+ */
+ priv->wol_criteria = EHS_REMOVE_WAKEUP;
+ if (lbs_host_sleep_cfg(priv, priv->wol_criteria, NULL))
+ priv->ehs_remove_supported = false;
+
return 0;
err_start_card:
@@ -1090,12 +1097,6 @@ static int if_usb_suspend(struct usb_interface *intf, pm_message_t message)
if (priv->psstate != PS_STATE_FULL_POWER)
return -1;
- if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
- lbs_pr_info("Suspend attempt without "
- "configuring wake params!\n");
- return -ENOSYS;
- }
-
ret = lbs_suspend(priv);
if (ret)
goto out;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 46b88b118c99..ca8149cd5bd9 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -539,6 +539,43 @@ static int lbs_thread(void *data)
return 0;
}
+/**
+ * @brief This function gets the HW spec from the firmware and sets
+ * some basic parameters.
+ *
+ * @param priv A pointer to struct lbs_private structure
+ * @return 0 or -1
+ */
+static int lbs_setup_firmware(struct lbs_private *priv)
+{
+ int ret = -1;
+ s16 curlevel = 0, minlevel = 0, maxlevel = 0;
+
+ lbs_deb_enter(LBS_DEB_FW);
+
+ /* Read MAC address from firmware */
+ memset(priv->current_addr, 0xff, ETH_ALEN);
+ ret = lbs_update_hw_spec(priv);
+ if (ret)
+ goto done;
+
+ /* Read power levels if available */
+ ret = lbs_get_tx_power(priv, &curlevel, &minlevel, &maxlevel);
+ if (ret == 0) {
+ priv->txpower_cur = curlevel;
+ priv->txpower_min = minlevel;
+ priv->txpower_max = maxlevel;
+ }
+
+ /* Send cmd to FW to enable 11D function */
+ ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1);
+
+ lbs_set_mac_control(priv);
+done:
+ lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
+ return ret;
+}
+
int lbs_suspend(struct lbs_private *priv)
{
int ret;
@@ -584,47 +621,13 @@ int lbs_resume(struct lbs_private *priv)
lbs_pr_err("deep sleep activation failed: %d\n", ret);
}
- lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
- return ret;
-}
-EXPORT_SYMBOL_GPL(lbs_resume);
-
-/**
- * @brief This function gets the HW spec from the firmware and sets
- * some basic parameters.
- *
- * @param priv A pointer to struct lbs_private structure
- * @return 0 or -1
- */
-static int lbs_setup_firmware(struct lbs_private *priv)
-{
- int ret = -1;
- s16 curlevel = 0, minlevel = 0, maxlevel = 0;
-
- lbs_deb_enter(LBS_DEB_FW);
-
- /* Read MAC address from firmware */
- memset(priv->current_addr, 0xff, ETH_ALEN);
- ret = lbs_update_hw_spec(priv);
- if (ret)
- goto done;
-
- /* Read power levels if available */
- ret = lbs_get_tx_power(priv, &curlevel, &minlevel, &maxlevel);
- if (ret == 0) {
- priv->txpower_cur = curlevel;
- priv->txpower_min = minlevel;
- priv->txpower_max = maxlevel;
- }
-
- /* Send cmd to FW to enable 11D function */
- ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1);
+ if (priv->setup_fw_on_resume)
+ ret = lbs_setup_firmware(priv);
- lbs_set_mac_control(priv);
-done:
lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
return ret;
}
+EXPORT_SYMBOL_GPL(lbs_resume);
/**
* This function handles the timeout of command sending.
@@ -851,9 +854,10 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
priv->work_thread = create_singlethread_workqueue("lbs_worker");
INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker);
- priv->wol_criteria = 0xffffffff;
+ priv->wol_criteria = EHS_REMOVE_WAKEUP;
priv->wol_gpio = 0xff;
priv->wol_gap = 20;
+ priv->ehs_remove_supported = true;
goto done;
@@ -915,8 +919,6 @@ void lbs_remove_card(struct lbs_private *priv)
lbs_free_adapter(priv);
lbs_cfg_free(priv);
-
- priv->dev = NULL;
free_netdev(dev);
lbs_deb_leave(LBS_DEB_MAIN);
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index acf3bf63ee33..9d097b9c8005 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -918,7 +918,6 @@ static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct mrvl_mesh_defaults defs;
- int maxlen;
int ret;
ret = mesh_get_default_parameters(dev, &defs);
@@ -931,13 +930,11 @@ static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr,
defs.meshie.val.mesh_id_len = IEEE80211_MAX_SSID_LEN;
}
- /* SSID not null terminated: reserve room for \0 + \n */
- maxlen = defs.meshie.val.mesh_id_len + 2;
- maxlen = (PAGE_SIZE > maxlen) ? maxlen : PAGE_SIZE;
+ memcpy(buf, defs.meshie.val.mesh_id, defs.meshie.val.mesh_id_len);
+ buf[defs.meshie.val.mesh_id_len] = '\n';
+ buf[defs.meshie.val.mesh_id_len + 1] = '\0';
- defs.meshie.val.mesh_id[defs.meshie.val.mesh_id_len] = '\0';
-
- return snprintf(buf, maxlen, "%s\n", defs.meshie.val.mesh_id);
+ return defs.meshie.val.mesh_id_len + 1;
}
/**
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index a4d0bca9ef2c..a2b1df21d286 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -55,7 +55,9 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
struct rxpd *p_rx_pd;
int hdrchop;
struct ethhdr *p_ethhdr;
- const u8 rfc1042_eth_hdr[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+ static const u8 rfc1042_eth_hdr[] = {
+ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00
+ };
lbs_deb_enter(LBS_DEB_RX);
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 9278b3c8ee30..d4005081f1df 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -225,7 +225,7 @@ static void lbtf_free_adapter(struct lbtf_private *priv)
lbtf_deb_leave(LBTF_DEB_MAIN);
}
-static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct lbtf_private *priv = hw->priv;
@@ -236,7 +236,6 @@ static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* there are no buffered multicast frames to send
*/
ieee80211_stop_queues(priv->hw);
- return NETDEV_TX_OK;
}
static void lbtf_tx_work(struct work_struct *work)
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 7eaaa3bab547..56f439d58013 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -309,6 +309,8 @@ struct mac80211_hwsim_data {
*/
u64 group;
struct dentry *debugfs_group;
+
+ int power_level;
};
@@ -497,7 +499,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
rx_status.band = data->channel->band;
rx_status.rate_idx = info->control.rates[0].idx;
/* TODO: simulate real signal strength (and optional packet loss) */
- rx_status.signal = -50;
+ rx_status.signal = data->power_level - 50;
if (data->ps != PS_DISABLED)
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
@@ -539,7 +541,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
}
-static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
bool ack;
struct ieee80211_tx_info *txi;
@@ -549,7 +551,7 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (skb->len < 10) {
/* Should not happen; just a sanity check for addr1 use */
dev_kfree_skb(skb);
- return NETDEV_TX_OK;
+ return;
}
ack = mac80211_hwsim_tx_frame(hw, skb);
@@ -569,7 +571,6 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && ack)
txi->flags |= IEEE80211_TX_STAT_ACK;
ieee80211_tx_status_irqsafe(hw, skb);
- return NETDEV_TX_OK;
}
@@ -698,6 +699,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
data->channel = conf->channel;
+ data->power_level = conf->power_level;
if (!data->started || !data->beacon_int)
del_timer(&data->beacon_timer);
else
@@ -940,7 +942,8 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
{
switch (action) {
case IEEE80211_AMPDU_TX_START:
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index f152a25be59f..36952274950e 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -29,6 +29,12 @@
#define MWL8K_NAME KBUILD_MODNAME
#define MWL8K_VERSION "0.12"
+/* Module parameters */
+static unsigned ap_mode_default;
+module_param(ap_mode_default, bool, 0);
+MODULE_PARM_DESC(ap_mode_default,
+ "Set to 1 to make ap mode the default instead of sta mode");
+
/* Register definitions */
#define MWL8K_HIU_GEN_PTR 0x00000c10
#define MWL8K_MODE_STA 0x0000005a
@@ -92,8 +98,10 @@ struct rxd_ops {
struct mwl8k_device_info {
char *part_name;
char *helper_image;
- char *fw_image;
+ char *fw_image_sta;
+ char *fw_image_ap;
struct rxd_ops *ap_rxd_ops;
+ u32 fw_api_ap;
};
struct mwl8k_rx_queue {
@@ -136,8 +144,8 @@ struct mwl8k_priv {
void __iomem *regs;
/* firmware */
- struct firmware *fw_helper;
- struct firmware *fw_ucode;
+ const struct firmware *fw_helper;
+ const struct firmware *fw_ucode;
/* hardware/firmware parameters */
bool ap_fw;
@@ -210,8 +218,23 @@ struct mwl8k_priv {
/* Most recently reported noise in dBm */
s8 noise;
+
+ /*
+ * preserve the queue configurations so they can be restored if/when
+ * the firmware image is swapped.
+ */
+ struct ieee80211_tx_queue_params wmm_params[MWL8K_TX_QUEUES];
+
+ /* async firmware loading state */
+ unsigned fw_state;
+ char *fw_pref;
+ char *fw_alt;
+ struct completion firmware_loading_complete;
};
+#define MAX_WEP_KEY_LEN 13
+#define NUM_WEP_KEYS 4
+
/* Per interface specific private data */
struct mwl8k_vif {
struct list_head list;
@@ -222,8 +245,21 @@ struct mwl8k_vif {
/* Non AMPDU sequence number assigned by driver. */
u16 seqno;
+
+ /* Saved WEP keys */
+ struct {
+ u8 enabled;
+ u8 key[sizeof(struct ieee80211_key_conf) + MAX_WEP_KEY_LEN];
+ } wep_key_conf[NUM_WEP_KEYS];
+
+ /* BSSID */
+ u8 bssid[ETH_ALEN];
+
+ /* A flag to indicate is HW crypto is enabled for this bssid */
+ bool is_hw_crypto_enabled;
};
#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv))
+#define IEEE80211_KEY_CONF(_u8) ((struct ieee80211_key_conf *)(_u8))
struct mwl8k_sta {
/* Index into station database. Returned by UPDATE_STADB. */
@@ -285,8 +321,9 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
};
/* Set or get info from Firmware */
-#define MWL8K_CMD_SET 0x0001
#define MWL8K_CMD_GET 0x0000
+#define MWL8K_CMD_SET 0x0001
+#define MWL8K_CMD_SET_LIST 0x0002
/* Firmware command codes */
#define MWL8K_CMD_CODE_DNLD 0x0001
@@ -296,6 +333,7 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
#define MWL8K_CMD_GET_STAT 0x0014
#define MWL8K_CMD_RADIO_CONTROL 0x001c
#define MWL8K_CMD_RF_TX_POWER 0x001e
+#define MWL8K_CMD_TX_POWER 0x001f
#define MWL8K_CMD_RF_ANTENNA 0x0020
#define MWL8K_CMD_SET_BEACON 0x0100 /* per-vif */
#define MWL8K_CMD_SET_PRE_SCAN 0x0107
@@ -315,6 +353,7 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203
#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */
#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */
+#define MWL8K_CMD_UPDATE_ENCRYPTION 0x1122 /* per-vif */
#define MWL8K_CMD_UPDATE_STADB 0x1123
static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
@@ -333,6 +372,7 @@ static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
MWL8K_CMDNAME(GET_STAT);
MWL8K_CMDNAME(RADIO_CONTROL);
MWL8K_CMDNAME(RF_TX_POWER);
+ MWL8K_CMDNAME(TX_POWER);
MWL8K_CMDNAME(RF_ANTENNA);
MWL8K_CMDNAME(SET_BEACON);
MWL8K_CMDNAME(SET_PRE_SCAN);
@@ -352,6 +392,7 @@ static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
MWL8K_CMDNAME(SET_RATEADAPT_MODE);
MWL8K_CMDNAME(BSS_START);
MWL8K_CMDNAME(SET_NEW_STN);
+ MWL8K_CMDNAME(UPDATE_ENCRYPTION);
MWL8K_CMDNAME(UPDATE_STADB);
default:
snprintf(buf, bufsize, "0x%x", cmd);
@@ -372,7 +413,7 @@ static void mwl8k_hw_reset(struct mwl8k_priv *priv)
}
/* Release fw image */
-static void mwl8k_release_fw(struct firmware **fw)
+static void mwl8k_release_fw(const struct firmware **fw)
{
if (*fw == NULL)
return;
@@ -386,37 +427,68 @@ static void mwl8k_release_firmware(struct mwl8k_priv *priv)
mwl8k_release_fw(&priv->fw_helper);
}
+/* states for asynchronous f/w loading */
+static void mwl8k_fw_state_machine(const struct firmware *fw, void *context);
+enum {
+ FW_STATE_INIT = 0,
+ FW_STATE_LOADING_PREF,
+ FW_STATE_LOADING_ALT,
+ FW_STATE_ERROR,
+};
+
/* Request fw image */
static int mwl8k_request_fw(struct mwl8k_priv *priv,
- const char *fname, struct firmware **fw)
+ const char *fname, const struct firmware **fw,
+ bool nowait)
{
/* release current image */
if (*fw != NULL)
mwl8k_release_fw(fw);
- return request_firmware((const struct firmware **)fw,
- fname, &priv->pdev->dev);
+ if (nowait)
+ return request_firmware_nowait(THIS_MODULE, 1, fname,
+ &priv->pdev->dev, GFP_KERNEL,
+ priv, mwl8k_fw_state_machine);
+ else
+ return request_firmware(fw, fname, &priv->pdev->dev);
}
-static int mwl8k_request_firmware(struct mwl8k_priv *priv)
+static int mwl8k_request_firmware(struct mwl8k_priv *priv, char *fw_image,
+ bool nowait)
{
struct mwl8k_device_info *di = priv->device_info;
int rc;
if (di->helper_image != NULL) {
- rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw_helper);
- if (rc) {
- printk(KERN_ERR "%s: Error requesting helper "
- "firmware file %s\n", pci_name(priv->pdev),
- di->helper_image);
+ if (nowait)
+ rc = mwl8k_request_fw(priv, di->helper_image,
+ &priv->fw_helper, true);
+ else
+ rc = mwl8k_request_fw(priv, di->helper_image,
+ &priv->fw_helper, false);
+ if (rc)
+ printk(KERN_ERR "%s: Error requesting helper fw %s\n",
+ pci_name(priv->pdev), di->helper_image);
+
+ if (rc || nowait)
return rc;
- }
}
- rc = mwl8k_request_fw(priv, di->fw_image, &priv->fw_ucode);
+ if (nowait) {
+ /*
+ * if we get here, no helper image is needed. Skip the
+ * FW_STATE_INIT state.
+ */
+ priv->fw_state = FW_STATE_LOADING_PREF;
+ rc = mwl8k_request_fw(priv, fw_image,
+ &priv->fw_ucode,
+ true);
+ } else
+ rc = mwl8k_request_fw(priv, fw_image,
+ &priv->fw_ucode, false);
if (rc) {
printk(KERN_ERR "%s: Error requesting firmware file %s\n",
- pci_name(priv->pdev), di->fw_image);
+ pci_name(priv->pdev), fw_image);
mwl8k_release_fw(&priv->fw_helper);
return rc;
}
@@ -577,12 +649,12 @@ static int mwl8k_feed_fw_image(struct mwl8k_priv *priv,
static int mwl8k_load_firmware(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
- struct firmware *fw = priv->fw_ucode;
+ const struct firmware *fw = priv->fw_ucode;
int rc;
int loops;
if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) {
- struct firmware *helper = priv->fw_helper;
+ const struct firmware *helper = priv->fw_helper;
if (helper == NULL) {
printk(KERN_ERR "%s: helper image needed but none "
@@ -661,10 +733,12 @@ static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos)
skb_pull(skb, sizeof(*tr) - hdrlen);
}
-static inline void mwl8k_add_dma_header(struct sk_buff *skb)
+static void
+mwl8k_add_dma_header(struct sk_buff *skb, int tail_pad)
{
struct ieee80211_hdr *wh;
int hdrlen;
+ int reqd_hdrlen;
struct mwl8k_dma_data *tr;
/*
@@ -676,11 +750,13 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
wh = (struct ieee80211_hdr *)skb->data;
hdrlen = ieee80211_hdrlen(wh->frame_control);
- if (hdrlen != sizeof(*tr))
- skb_push(skb, sizeof(*tr) - hdrlen);
+ reqd_hdrlen = sizeof(*tr);
+
+ if (hdrlen != reqd_hdrlen)
+ skb_push(skb, reqd_hdrlen - hdrlen);
if (ieee80211_is_data_qos(wh->frame_control))
- hdrlen -= 2;
+ hdrlen -= IEEE80211_QOS_CTL_LEN;
tr = (struct mwl8k_dma_data *)skb->data;
if (wh != &tr->wh)
@@ -693,9 +769,52 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
* payload". That is, everything except for the 802.11 header.
* This includes all crypto material including the MIC.
*/
- tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr));
+ tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr) + tail_pad);
}
+static void mwl8k_encapsulate_tx_frame(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *wh;
+ struct ieee80211_tx_info *tx_info;
+ struct ieee80211_key_conf *key_conf;
+ int data_pad;
+
+ wh = (struct ieee80211_hdr *)skb->data;
+
+ tx_info = IEEE80211_SKB_CB(skb);
+
+ key_conf = NULL;
+ if (ieee80211_is_data(wh->frame_control))
+ key_conf = tx_info->control.hw_key;
+
+ /*
+ * Make sure the packet header is in the DMA header format (4-address
+ * without QoS), the necessary crypto padding between the header and the
+ * payload has already been provided by mac80211, but it doesn't add tail
+ * padding when HW crypto is enabled.
+ *
+ * We have the following trailer padding requirements:
+ * - WEP: 4 trailer bytes (ICV)
+ * - TKIP: 12 trailer bytes (8 MIC + 4 ICV)
+ * - CCMP: 8 trailer bytes (MIC)
+ */
+ data_pad = 0;
+ if (key_conf != NULL) {
+ switch (key_conf->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ data_pad = 4;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ data_pad = 12;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ data_pad = 8;
+ break;
+ }
+ }
+ mwl8k_add_dma_header(skb, data_pad);
+}
/*
* Packet reception for 88w8366 AP firmware.
@@ -724,6 +843,13 @@ struct mwl8k_rxd_8366_ap {
#define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST 0x80
+/* 8366 AP rx_status bits */
+#define MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK 0x80
+#define MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR 0xFF
+#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR 0x02
+#define MWL8K_8366_AP_RXSTAT_WEP_DECRYPT_ICV_ERR 0x04
+#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_ICV_ERR 0x08
+
static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr)
{
struct mwl8k_rxd_8366_ap *rxd = _rxd;
@@ -780,10 +906,16 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
} else {
status->band = IEEE80211_BAND_2GHZ;
}
- status->freq = ieee80211_channel_to_frequency(rxd->channel);
+ status->freq = ieee80211_channel_to_frequency(rxd->channel,
+ status->band);
*qos = rxd->qos_control;
+ if ((rxd->rx_status != MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR) &&
+ (rxd->rx_status & MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK) &&
+ (rxd->rx_status & MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR))
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
return le16_to_cpu(rxd->pkt_len);
}
@@ -822,6 +954,11 @@ struct mwl8k_rxd_sta {
#define MWL8K_STA_RATE_INFO_MCS_FORMAT 0x0001
#define MWL8K_STA_RX_CTRL_OWNED_BY_HOST 0x02
+#define MWL8K_STA_RX_CTRL_DECRYPT_ERROR 0x04
+/* ICV=0 or MIC=1 */
+#define MWL8K_STA_RX_CTRL_DEC_ERR_TYPE 0x08
+/* Key is uploaded only in failure case */
+#define MWL8K_STA_RX_CTRL_KEY_INDEX 0x30
static void mwl8k_rxd_sta_init(void *_rxd, dma_addr_t next_dma_addr)
{
@@ -877,9 +1014,13 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
} else {
status->band = IEEE80211_BAND_2GHZ;
}
- status->freq = ieee80211_channel_to_frequency(rxd->channel);
+ status->freq = ieee80211_channel_to_frequency(rxd->channel,
+ status->band);
*qos = rxd->qos_control;
+ if ((rxd->rx_ctrl & MWL8K_STA_RX_CTRL_DECRYPT_ERROR) &&
+ (rxd->rx_ctrl & MWL8K_STA_RX_CTRL_DEC_ERR_TYPE))
+ status->flag |= RX_FLAG_MMIC_ERROR;
return le16_to_cpu(rxd->pkt_len);
}
@@ -915,13 +1056,12 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
}
memset(rxq->rxd, 0, size);
- rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL);
+ rxq->buf = kcalloc(MWL8K_RX_DESCS, sizeof(*rxq->buf), GFP_KERNEL);
if (rxq->buf == NULL) {
wiphy_err(hw->wiphy, "failed to alloc RX skbuff list\n");
pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma);
return -ENOMEM;
}
- memset(rxq->buf, 0, MWL8K_RX_DESCS * sizeof(*rxq->buf));
for (i = 0; i < MWL8K_RX_DESCS; i++) {
int desc_size;
@@ -1038,9 +1178,25 @@ static inline void mwl8k_save_beacon(struct ieee80211_hw *hw,
ieee80211_queue_work(hw, &priv->finalize_join_worker);
}
+static inline struct mwl8k_vif *mwl8k_find_vif_bss(struct list_head *vif_list,
+ u8 *bssid)
+{
+ struct mwl8k_vif *mwl8k_vif;
+
+ list_for_each_entry(mwl8k_vif,
+ vif_list, list) {
+ if (memcmp(bssid, mwl8k_vif->bssid,
+ ETH_ALEN) == 0)
+ return mwl8k_vif;
+ }
+
+ return NULL;
+}
+
static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
{
struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_vif *mwl8k_vif = NULL;
struct mwl8k_rx_queue *rxq = priv->rxq + index;
int processed;
@@ -1050,6 +1206,7 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
void *rxd;
int pkt_len;
struct ieee80211_rx_status status;
+ struct ieee80211_hdr *wh;
__le16 qos;
skb = rxq->buf[rxq->head].skb;
@@ -1076,8 +1233,7 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
rxq->rxd_count--;
- skb_put(skb, pkt_len);
- mwl8k_remove_dma_header(skb, qos);
+ wh = &((struct mwl8k_dma_data *)skb->data)->wh;
/*
* Check for a pending join operation. Save a
@@ -1087,6 +1243,46 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
if (mwl8k_capture_bssid(priv, (void *)skb->data))
mwl8k_save_beacon(hw, skb);
+ if (ieee80211_has_protected(wh->frame_control)) {
+
+ /* Check if hw crypto has been enabled for
+ * this bss. If yes, set the status flags
+ * accordingly
+ */
+ mwl8k_vif = mwl8k_find_vif_bss(&priv->vif_list,
+ wh->addr1);
+
+ if (mwl8k_vif != NULL &&
+ mwl8k_vif->is_hw_crypto_enabled == true) {
+ /*
+ * When MMIC ERROR is encountered
+ * by the firmware, payload is
+ * dropped and only 32 bytes of
+ * mwl8k Firmware header is sent
+ * to the host.
+ *
+ * We need to add four bytes of
+ * key information. In it
+ * MAC80211 expects keyidx set to
+ * 0 for triggering Counter
+ * Measure of MMIC failure.
+ */
+ if (status.flag & RX_FLAG_MMIC_ERROR) {
+ struct mwl8k_dma_data *tr;
+ tr = (struct mwl8k_dma_data *)skb->data;
+ memset((void *)&(tr->data), 0, 4);
+ pkt_len += 4;
+ }
+
+ if (!ieee80211_is_auth(wh->frame_control))
+ status.flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_DECRYPTED |
+ RX_FLAG_MMIC_STRIPPED;
+ }
+ }
+
+ skb_put(skb, pkt_len);
+ mwl8k_remove_dma_header(skb, qos);
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
ieee80211_rx_irqsafe(hw, skb);
@@ -1150,13 +1346,12 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
}
memset(txq->txd, 0, size);
- txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL);
+ txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL);
if (txq->skb == NULL) {
wiphy_err(hw->wiphy, "failed to alloc TX skbuff list\n");
pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma);
return -ENOMEM;
}
- memset(txq->skb, 0, MWL8K_TX_DESCS * sizeof(*txq->skb));
for (i = 0; i < MWL8K_TX_DESCS; i++) {
struct mwl8k_tx_desc *tx_desc;
@@ -1338,6 +1533,13 @@ mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
info = IEEE80211_SKB_CB(skb);
ieee80211_tx_info_clear_status(info);
+
+ /* Rate control is happening in the firmware.
+ * Ensure no tx rate is being reported.
+ */
+ info->status.rates[0].idx = -1;
+ info->status.rates[0].count = 1;
+
if (MWL8K_TXD_SUCCESS(status))
info->flags |= IEEE80211_TX_STAT_ACK;
@@ -1369,7 +1571,7 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
txq->txd = NULL;
}
-static int
+static void
mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
{
struct mwl8k_priv *priv = hw->priv;
@@ -1389,7 +1591,11 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
else
qos = 0;
- mwl8k_add_dma_header(skb);
+ if (priv->ap_fw)
+ mwl8k_encapsulate_tx_frame(skb);
+ else
+ mwl8k_add_dma_header(skb, 0);
+
wh = &((struct mwl8k_dma_data *)skb->data)->wh;
tx_info = IEEE80211_SKB_CB(skb);
@@ -1427,7 +1633,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
wiphy_debug(hw->wiphy,
"failed to dma map skb, dropping TX frame.\n");
dev_kfree_skb(skb);
- return NETDEV_TX_OK;
+ return;
}
spin_lock_bh(&priv->tx_lock);
@@ -1464,8 +1670,6 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
mwl8k_tx_start(priv);
spin_unlock_bh(&priv->tx_lock);
-
- return NETDEV_TX_OK;
}
@@ -1811,6 +2015,7 @@ struct mwl8k_cmd_get_hw_spec_ap {
__le32 wcbbase1;
__le32 wcbbase2;
__le32 wcbbase3;
+ __le32 fw_api_version;
} __packed;
static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
@@ -1818,6 +2023,7 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_cmd_get_hw_spec_ap *cmd;
int rc;
+ u32 api_version;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
@@ -1834,6 +2040,16 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
if (!rc) {
int off;
+ api_version = le32_to_cpu(cmd->fw_api_version);
+ if (priv->device_info->fw_api_ap != api_version) {
+ printk(KERN_ERR "%s: Unsupported fw API version for %s."
+ " Expected %d got %d.\n", MWL8K_NAME,
+ priv->device_info->part_name,
+ priv->device_info->fw_api_ap,
+ api_version);
+ rc = -EINVAL;
+ goto done;
+ }
SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr);
priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
priv->fw_rev = le32_to_cpu(cmd->fw_rev);
@@ -1861,6 +2077,7 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
iowrite32(priv->txq[3].txd_dma, priv->sram + off);
}
+done:
kfree(cmd);
return rc;
}
@@ -1907,8 +2124,18 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
- for (i = 0; i < MWL8K_TX_QUEUES; i++)
- cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma);
+
+ /*
+ * Mac80211 stack has Q0 as highest priority and Q3 as lowest in
+ * that order. Firmware has Q3 as highest priority and Q0 as lowest
+ * in that order. Map Q3 of mac80211 to Q0 of firmware so that the
+ * priority is interpreted the right way in firmware.
+ */
+ for (i = 0; i < MWL8K_TX_QUEUES; i++) {
+ int j = MWL8K_TX_QUEUES - 1 - i;
+ cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[j].txd_dma);
+ }
+
cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON);
@@ -2084,7 +2311,7 @@ mwl8k_set_radio_preamble(struct ieee80211_hw *hw, bool short_preamble)
/*
* CMD_RF_TX_POWER.
*/
-#define MWL8K_TX_POWER_LEVEL_TOTAL 8
+#define MWL8K_RF_TX_POWER_LEVEL_TOTAL 8
struct mwl8k_cmd_rf_tx_power {
struct mwl8k_cmd_pkt header;
@@ -2092,7 +2319,7 @@ struct mwl8k_cmd_rf_tx_power {
__le16 support_level;
__le16 current_level;
__le16 reserved;
- __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL];
+ __le16 power_level_list[MWL8K_RF_TX_POWER_LEVEL_TOTAL];
} __packed;
static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm)
@@ -2116,6 +2343,65 @@ static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm)
}
/*
+ * CMD_TX_POWER.
+ */
+#define MWL8K_TX_POWER_LEVEL_TOTAL 12
+
+struct mwl8k_cmd_tx_power {
+ struct mwl8k_cmd_pkt header;
+ __le16 action;
+ __le16 band;
+ __le16 channel;
+ __le16 bw;
+ __le16 sub_ch;
+ __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL];
+} __attribute__((packed));
+
+static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw,
+ struct ieee80211_conf *conf,
+ unsigned short pwr)
+{
+ struct ieee80211_channel *channel = conf->channel;
+ struct mwl8k_cmd_tx_power *cmd;
+ int rc;
+ int i;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_TX_POWER);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le16(MWL8K_CMD_SET_LIST);
+
+ if (channel->band == IEEE80211_BAND_2GHZ)
+ cmd->band = cpu_to_le16(0x1);
+ else if (channel->band == IEEE80211_BAND_5GHZ)
+ cmd->band = cpu_to_le16(0x4);
+
+ cmd->channel = channel->hw_value;
+
+ if (conf->channel_type == NL80211_CHAN_NO_HT ||
+ conf->channel_type == NL80211_CHAN_HT20) {
+ cmd->bw = cpu_to_le16(0x2);
+ } else {
+ cmd->bw = cpu_to_le16(0x4);
+ if (conf->channel_type == NL80211_CHAN_HT40MINUS)
+ cmd->sub_ch = cpu_to_le16(0x3);
+ else if (conf->channel_type == NL80211_CHAN_HT40PLUS)
+ cmd->sub_ch = cpu_to_le16(0x1);
+ }
+
+ for (i = 0; i < MWL8K_TX_POWER_LEVEL_TOTAL; i++)
+ cmd->power_level_list[i] = cpu_to_le16(pwr);
+
+ rc = mwl8k_post_cmd(hw, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+/*
* CMD_RF_ANTENNA.
*/
struct mwl8k_cmd_rf_antenna {
@@ -2973,6 +3259,274 @@ static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw,
}
/*
+ * CMD_UPDATE_ENCRYPTION.
+ */
+
+#define MAX_ENCR_KEY_LENGTH 16
+#define MIC_KEY_LENGTH 8
+
+struct mwl8k_cmd_update_encryption {
+ struct mwl8k_cmd_pkt header;
+
+ __le32 action;
+ __le32 reserved;
+ __u8 mac_addr[6];
+ __u8 encr_type;
+
+} __attribute__((packed));
+
+struct mwl8k_cmd_set_key {
+ struct mwl8k_cmd_pkt header;
+
+ __le32 action;
+ __le32 reserved;
+ __le16 length;
+ __le16 key_type_id;
+ __le32 key_info;
+ __le32 key_id;
+ __le16 key_len;
+ __u8 key_material[MAX_ENCR_KEY_LENGTH];
+ __u8 tkip_tx_mic_key[MIC_KEY_LENGTH];
+ __u8 tkip_rx_mic_key[MIC_KEY_LENGTH];
+ __le16 tkip_rsc_low;
+ __le32 tkip_rsc_high;
+ __le16 tkip_tsc_low;
+ __le32 tkip_tsc_high;
+ __u8 mac_addr[6];
+} __attribute__((packed));
+
+enum {
+ MWL8K_ENCR_ENABLE,
+ MWL8K_ENCR_SET_KEY,
+ MWL8K_ENCR_REMOVE_KEY,
+ MWL8K_ENCR_SET_GROUP_KEY,
+};
+
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_WEP 0
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_DISABLE 1
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_TKIP 4
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_MIXED 7
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_AES 8
+
+enum {
+ MWL8K_ALG_WEP,
+ MWL8K_ALG_TKIP,
+ MWL8K_ALG_CCMP,
+};
+
+#define MWL8K_KEY_FLAG_TXGROUPKEY 0x00000004
+#define MWL8K_KEY_FLAG_PAIRWISE 0x00000008
+#define MWL8K_KEY_FLAG_TSC_VALID 0x00000040
+#define MWL8K_KEY_FLAG_WEP_TXKEY 0x01000000
+#define MWL8K_KEY_FLAG_MICKEY_VALID 0x02000000
+
+static int mwl8k_cmd_update_encryption_enable(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u8 *addr,
+ u8 encr_type)
+{
+ struct mwl8k_cmd_update_encryption *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_ENCRYPTION);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32(MWL8K_ENCR_ENABLE);
+ memcpy(cmd->mac_addr, addr, ETH_ALEN);
+ cmd->encr_type = encr_type;
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+static int mwl8k_encryption_set_cmd_info(struct mwl8k_cmd_set_key *cmd,
+ u8 *addr,
+ struct ieee80211_key_conf *key)
+{
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_ENCRYPTION);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->length = cpu_to_le16(sizeof(*cmd) -
+ offsetof(struct mwl8k_cmd_set_key, length));
+ cmd->key_id = cpu_to_le32(key->keyidx);
+ cmd->key_len = cpu_to_le16(key->keylen);
+ memcpy(cmd->mac_addr, addr, ETH_ALEN);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ cmd->key_type_id = cpu_to_le16(MWL8K_ALG_WEP);
+ if (key->keyidx == 0)
+ cmd->key_info = cpu_to_le32(MWL8K_KEY_FLAG_WEP_TXKEY);
+
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ cmd->key_type_id = cpu_to_le16(MWL8K_ALG_TKIP);
+ cmd->key_info = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ ? cpu_to_le32(MWL8K_KEY_FLAG_PAIRWISE)
+ : cpu_to_le32(MWL8K_KEY_FLAG_TXGROUPKEY);
+ cmd->key_info |= cpu_to_le32(MWL8K_KEY_FLAG_MICKEY_VALID
+ | MWL8K_KEY_FLAG_TSC_VALID);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ cmd->key_type_id = cpu_to_le16(MWL8K_ALG_CCMP);
+ cmd->key_info = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ ? cpu_to_le32(MWL8K_KEY_FLAG_PAIRWISE)
+ : cpu_to_le32(MWL8K_KEY_FLAG_TXGROUPKEY);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int mwl8k_cmd_encryption_set_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u8 *addr,
+ struct ieee80211_key_conf *key)
+{
+ struct mwl8k_cmd_set_key *cmd;
+ int rc;
+ int keymlen;
+ u32 action;
+ u8 idx;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ rc = mwl8k_encryption_set_cmd_info(cmd, addr, key);
+ if (rc < 0)
+ goto done;
+
+ idx = key->keyidx;
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ action = MWL8K_ENCR_SET_KEY;
+ else
+ action = MWL8K_ENCR_SET_GROUP_KEY;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ if (!mwl8k_vif->wep_key_conf[idx].enabled) {
+ memcpy(mwl8k_vif->wep_key_conf[idx].key, key,
+ sizeof(*key) + key->keylen);
+ mwl8k_vif->wep_key_conf[idx].enabled = 1;
+ }
+
+ keymlen = 0;
+ action = MWL8K_ENCR_SET_KEY;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ keymlen = MAX_ENCR_KEY_LENGTH + 2 * MIC_KEY_LENGTH;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ keymlen = key->keylen;
+ break;
+ default:
+ rc = -ENOTSUPP;
+ goto done;
+ }
+
+ memcpy(cmd->key_material, key->key, keymlen);
+ cmd->action = cpu_to_le32(action);
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+done:
+ kfree(cmd);
+
+ return rc;
+}
+
+static int mwl8k_cmd_encryption_remove_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u8 *addr,
+ struct ieee80211_key_conf *key)
+{
+ struct mwl8k_cmd_set_key *cmd;
+ int rc;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ rc = mwl8k_encryption_set_cmd_info(cmd, addr, key);
+ if (rc < 0)
+ goto done;
+
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ WLAN_CIPHER_SUITE_WEP104)
+ mwl8k_vif->wep_key_conf[key->keyidx].enabled = 0;
+
+ cmd->action = cpu_to_le32(MWL8K_ENCR_REMOVE_KEY);
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+done:
+ kfree(cmd);
+
+ return rc;
+}
+
+static int mwl8k_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd_param,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ int rc = 0;
+ u8 encr_type;
+ u8 *addr;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ return -EOPNOTSUPP;
+
+ if (sta == NULL)
+ addr = hw->wiphy->perm_addr;
+ else
+ addr = sta->addr;
+
+ if (cmd_param == SET_KEY) {
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ rc = mwl8k_cmd_encryption_set_key(hw, vif, addr, key);
+ if (rc)
+ goto out;
+
+ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40)
+ || (key->cipher == WLAN_CIPHER_SUITE_WEP104))
+ encr_type = MWL8K_UPDATE_ENCRYPTION_TYPE_WEP;
+ else
+ encr_type = MWL8K_UPDATE_ENCRYPTION_TYPE_MIXED;
+
+ rc = mwl8k_cmd_update_encryption_enable(hw, vif, addr,
+ encr_type);
+ if (rc)
+ goto out;
+
+ mwl8k_vif->is_hw_crypto_enabled = true;
+
+ } else {
+ rc = mwl8k_cmd_encryption_remove_key(hw, vif, addr, key);
+
+ if (rc)
+ goto out;
+
+ mwl8k_vif->is_hw_crypto_enabled = false;
+
+ }
+out:
+ return rc;
+}
+
+/*
* CMD_UPDATE_STADB.
*/
struct ewc_ht_info {
@@ -3184,22 +3738,19 @@ static void mwl8k_rx_poll(unsigned long data)
/*
* Core driver operations.
*/
-static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct mwl8k_priv *priv = hw->priv;
int index = skb_get_queue_mapping(skb);
- int rc;
if (!priv->radio_on) {
wiphy_debug(hw->wiphy,
"dropped TX frame since radio disabled\n");
dev_kfree_skb(skb);
- return NETDEV_TX_OK;
+ return;
}
- rc = mwl8k_txq_xmit(hw, index, skb);
-
- return rc;
+ mwl8k_txq_xmit(hw, index, skb);
}
static int mwl8k_start(struct ieee80211_hw *hw)
@@ -3283,13 +3834,16 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
}
+static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image);
+
static int mwl8k_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_vif *mwl8k_vif;
u32 macids_supported;
- int macid;
+ int macid, rc;
+ struct mwl8k_device_info *di;
/*
* Reject interface creation if sniffer mode is active, as
@@ -3302,12 +3856,28 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
return -EINVAL;
}
-
+ di = priv->device_info;
switch (vif->type) {
case NL80211_IFTYPE_AP:
+ if (!priv->ap_fw && di->fw_image_ap) {
+ /* we must load the ap fw to meet this request */
+ if (!list_empty(&priv->vif_list))
+ return -EBUSY;
+ rc = mwl8k_reload_firmware(hw, di->fw_image_ap);
+ if (rc)
+ return rc;
+ }
macids_supported = priv->ap_macids_supported;
break;
case NL80211_IFTYPE_STATION:
+ if (priv->ap_fw && di->fw_image_sta) {
+ /* we must load the sta fw to meet this request */
+ if (!list_empty(&priv->vif_list))
+ return -EBUSY;
+ rc = mwl8k_reload_firmware(hw, di->fw_image_sta);
+ if (rc)
+ return rc;
+ }
macids_supported = priv->sta_macids_supported;
break;
default:
@@ -3324,6 +3894,8 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
mwl8k_vif->vif = vif;
mwl8k_vif->macid = macid;
mwl8k_vif->seqno = 0;
+ memcpy(mwl8k_vif->bssid, vif->addr, ETH_ALEN);
+ mwl8k_vif->is_hw_crypto_enabled = false;
/* Set the mac address. */
mwl8k_cmd_set_mac_addr(hw, vif, vif->addr);
@@ -3377,15 +3949,23 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
if (conf->power_level > 18)
conf->power_level = 18;
- rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
- if (rc)
- goto out;
if (priv->ap_fw) {
- rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x7);
- if (!rc)
- rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7);
+ rc = mwl8k_cmd_tx_power(hw, conf, conf->power_level);
+ if (rc)
+ goto out;
+
+ rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x3);
+ if (rc)
+ wiphy_warn(hw->wiphy, "failed to set # of RX antennas");
+ rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7);
+ if (rc)
+ wiphy_warn(hw->wiphy, "failed to set # of TX antennas");
+
} else {
+ rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
+ if (rc)
+ goto out;
rc = mwl8k_cmd_mimo_config(hw, 0x7, 0x7);
}
@@ -3717,18 +4297,27 @@ static int mwl8k_sta_add(struct ieee80211_hw *hw,
{
struct mwl8k_priv *priv = hw->priv;
int ret;
+ int i;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+ struct ieee80211_key_conf *key;
if (!priv->ap_fw) {
ret = mwl8k_cmd_update_stadb_add(hw, vif, sta);
if (ret >= 0) {
MWL8K_STA(sta)->peer_id = ret;
- return 0;
+ ret = 0;
}
- return ret;
+ } else {
+ ret = mwl8k_cmd_set_new_stn_add(hw, vif, sta);
}
- return mwl8k_cmd_set_new_stn_add(hw, vif, sta);
+ for (i = 0; i < NUM_WEP_KEYS; i++) {
+ key = IEEE80211_KEY_CONF(mwl8k_vif->wep_key_conf[i].key);
+ if (mwl8k_vif->wep_key_conf[i].enabled)
+ mwl8k_set_key(hw, SET_KEY, vif, sta, key);
+ }
+ return ret;
}
static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
@@ -3739,15 +4328,20 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
rc = mwl8k_fw_lock(hw);
if (!rc) {
+ BUG_ON(queue > MWL8K_TX_QUEUES - 1);
+ memcpy(&priv->wmm_params[queue], params, sizeof(*params));
+
if (!priv->wmm_enabled)
rc = mwl8k_cmd_set_wmm_mode(hw, 1);
- if (!rc)
- rc = mwl8k_cmd_set_edca_params(hw, queue,
+ if (!rc) {
+ int q = MWL8K_TX_QUEUES - 1 - queue;
+ rc = mwl8k_cmd_set_edca_params(hw, q,
params->cw_min,
params->cw_max,
params->aifs,
params->txop);
+ }
mwl8k_fw_unlock(hw);
}
@@ -3780,7 +4374,8 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
static int
mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
{
switch (action) {
case IEEE80211_AMPDU_RX_START:
@@ -3803,6 +4398,7 @@ static const struct ieee80211_ops mwl8k_ops = {
.bss_info_changed = mwl8k_bss_info_changed,
.prepare_multicast = mwl8k_prepare_multicast,
.configure_filter = mwl8k_configure_filter,
+ .set_key = mwl8k_set_key,
.set_rts_threshold = mwl8k_set_rts_threshold,
.sta_add = mwl8k_sta_add,
.sta_remove = mwl8k_sta_remove,
@@ -3838,21 +4434,27 @@ enum {
MWL8366,
};
+#define MWL8K_8366_AP_FW_API 1
+#define _MWL8K_8366_AP_FW(api) "mwl8k/fmimage_8366_ap-" #api ".fw"
+#define MWL8K_8366_AP_FW(api) _MWL8K_8366_AP_FW(api)
+
static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = {
[MWL8363] = {
.part_name = "88w8363",
.helper_image = "mwl8k/helper_8363.fw",
- .fw_image = "mwl8k/fmimage_8363.fw",
+ .fw_image_sta = "mwl8k/fmimage_8363.fw",
},
[MWL8687] = {
.part_name = "88w8687",
.helper_image = "mwl8k/helper_8687.fw",
- .fw_image = "mwl8k/fmimage_8687.fw",
+ .fw_image_sta = "mwl8k/fmimage_8687.fw",
},
[MWL8366] = {
.part_name = "88w8366",
.helper_image = "mwl8k/helper_8366.fw",
- .fw_image = "mwl8k/fmimage_8366.fw",
+ .fw_image_sta = "mwl8k/fmimage_8366.fw",
+ .fw_image_ap = MWL8K_8366_AP_FW(MWL8K_8366_AP_FW_API),
+ .fw_api_ap = MWL8K_8366_AP_FW_API,
.ap_rxd_ops = &rxd_8366_ap_ops,
},
};
@@ -3863,6 +4465,7 @@ MODULE_FIRMWARE("mwl8k/helper_8687.fw");
MODULE_FIRMWARE("mwl8k/fmimage_8687.fw");
MODULE_FIRMWARE("mwl8k/helper_8366.fw");
MODULE_FIRMWARE("mwl8k/fmimage_8366.fw");
+MODULE_FIRMWARE(MWL8K_8366_AP_FW(MWL8K_8366_AP_FW_API));
static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
{ PCI_VDEVICE(MARVELL, 0x2a0a), .driver_data = MWL8363, },
@@ -3876,94 +4479,133 @@ static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
};
MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table);
-static int __devinit mwl8k_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int mwl8k_request_alt_fw(struct mwl8k_priv *priv)
{
- static int printed_version = 0;
- struct ieee80211_hw *hw;
- struct mwl8k_priv *priv;
int rc;
- int i;
-
- if (!printed_version) {
- printk(KERN_INFO "%s version %s\n", MWL8K_DESC, MWL8K_VERSION);
- printed_version = 1;
- }
-
-
- rc = pci_enable_device(pdev);
+ printk(KERN_ERR "%s: Error requesting preferred fw %s.\n"
+ "Trying alternative firmware %s\n", pci_name(priv->pdev),
+ priv->fw_pref, priv->fw_alt);
+ rc = mwl8k_request_fw(priv, priv->fw_alt, &priv->fw_ucode, true);
if (rc) {
- printk(KERN_ERR "%s: Cannot enable new PCI device\n",
- MWL8K_NAME);
+ printk(KERN_ERR "%s: Error requesting alt fw %s\n",
+ pci_name(priv->pdev), priv->fw_alt);
return rc;
}
+ return 0;
+}
- rc = pci_request_regions(pdev, MWL8K_NAME);
- if (rc) {
- printk(KERN_ERR "%s: Cannot obtain PCI resources\n",
- MWL8K_NAME);
- goto err_disable_device;
- }
-
- pci_set_master(pdev);
-
-
- hw = ieee80211_alloc_hw(sizeof(*priv), &mwl8k_ops);
- if (hw == NULL) {
- printk(KERN_ERR "%s: ieee80211 alloc failed\n", MWL8K_NAME);
- rc = -ENOMEM;
- goto err_free_reg;
- }
+static int mwl8k_firmware_load_success(struct mwl8k_priv *priv);
+static void mwl8k_fw_state_machine(const struct firmware *fw, void *context)
+{
+ struct mwl8k_priv *priv = context;
+ struct mwl8k_device_info *di = priv->device_info;
+ int rc;
- SET_IEEE80211_DEV(hw, &pdev->dev);
- pci_set_drvdata(pdev, hw);
+ switch (priv->fw_state) {
+ case FW_STATE_INIT:
+ if (!fw) {
+ printk(KERN_ERR "%s: Error requesting helper fw %s\n",
+ pci_name(priv->pdev), di->helper_image);
+ goto fail;
+ }
+ priv->fw_helper = fw;
+ rc = mwl8k_request_fw(priv, priv->fw_pref, &priv->fw_ucode,
+ true);
+ if (rc && priv->fw_alt) {
+ rc = mwl8k_request_alt_fw(priv);
+ if (rc)
+ goto fail;
+ priv->fw_state = FW_STATE_LOADING_ALT;
+ } else if (rc)
+ goto fail;
+ else
+ priv->fw_state = FW_STATE_LOADING_PREF;
+ break;
- priv = hw->priv;
- priv->hw = hw;
- priv->pdev = pdev;
- priv->device_info = &mwl8k_info_tbl[id->driver_data];
+ case FW_STATE_LOADING_PREF:
+ if (!fw) {
+ if (priv->fw_alt) {
+ rc = mwl8k_request_alt_fw(priv);
+ if (rc)
+ goto fail;
+ priv->fw_state = FW_STATE_LOADING_ALT;
+ } else
+ goto fail;
+ } else {
+ priv->fw_ucode = fw;
+ rc = mwl8k_firmware_load_success(priv);
+ if (rc)
+ goto fail;
+ else
+ complete(&priv->firmware_loading_complete);
+ }
+ break;
+ case FW_STATE_LOADING_ALT:
+ if (!fw) {
+ printk(KERN_ERR "%s: Error requesting alt fw %s\n",
+ pci_name(priv->pdev), di->helper_image);
+ goto fail;
+ }
+ priv->fw_ucode = fw;
+ rc = mwl8k_firmware_load_success(priv);
+ if (rc)
+ goto fail;
+ else
+ complete(&priv->firmware_loading_complete);
+ break;
- priv->sram = pci_iomap(pdev, 0, 0x10000);
- if (priv->sram == NULL) {
- wiphy_err(hw->wiphy, "Cannot map device SRAM\n");
- goto err_iounmap;
+ default:
+ printk(KERN_ERR "%s: Unexpected firmware loading state: %d\n",
+ MWL8K_NAME, priv->fw_state);
+ BUG_ON(1);
}
- /*
- * If BAR0 is a 32 bit BAR, the register BAR will be BAR1.
- * If BAR0 is a 64 bit BAR, the register BAR will be BAR2.
- */
- priv->regs = pci_iomap(pdev, 1, 0x10000);
- if (priv->regs == NULL) {
- priv->regs = pci_iomap(pdev, 2, 0x10000);
- if (priv->regs == NULL) {
- wiphy_err(hw->wiphy, "Cannot map device registers\n");
- goto err_iounmap;
- }
- }
+ return;
+fail:
+ priv->fw_state = FW_STATE_ERROR;
+ complete(&priv->firmware_loading_complete);
+ device_release_driver(&priv->pdev->dev);
+ mwl8k_release_firmware(priv);
+}
+
+static int mwl8k_init_firmware(struct ieee80211_hw *hw, char *fw_image,
+ bool nowait)
+{
+ struct mwl8k_priv *priv = hw->priv;
+ int rc;
/* Reset firmware and hardware */
mwl8k_hw_reset(priv);
/* Ask userland hotplug daemon for the device firmware */
- rc = mwl8k_request_firmware(priv);
+ rc = mwl8k_request_firmware(priv, fw_image, nowait);
if (rc) {
wiphy_err(hw->wiphy, "Firmware files not found\n");
- goto err_stop_firmware;
+ return rc;
}
+ if (nowait)
+ return rc;
+
/* Load firmware into hardware */
rc = mwl8k_load_firmware(hw);
- if (rc) {
+ if (rc)
wiphy_err(hw->wiphy, "Cannot start firmware\n");
- goto err_stop_firmware;
- }
/* Reclaim memory once firmware is successfully loaded */
mwl8k_release_firmware(priv);
+ return rc;
+}
+
+/* initialize hw after successfully loading a firmware image */
+static int mwl8k_probe_hw(struct ieee80211_hw *hw)
+{
+ struct mwl8k_priv *priv = hw->priv;
+ int rc = 0;
+ int i;
if (priv->ap_fw) {
priv->rxd_ops = priv->device_info->ap_rxd_ops;
@@ -3980,58 +4622,11 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
priv->wmm_enabled = false;
priv->pending_tx_pkts = 0;
-
- /*
- * Extra headroom is the size of the required DMA header
- * minus the size of the smallest 802.11 frame (CTS frame).
- */
- hw->extra_tx_headroom =
- sizeof(struct mwl8k_dma_data) - sizeof(struct ieee80211_cts);
-
- hw->channel_change_time = 10;
-
- hw->queues = MWL8K_TX_QUEUES;
-
- /* Set rssi values to dBm */
- hw->flags |= IEEE80211_HW_SIGNAL_DBM;
- hw->vif_data_size = sizeof(struct mwl8k_vif);
- hw->sta_data_size = sizeof(struct mwl8k_sta);
-
- priv->macids_used = 0;
- INIT_LIST_HEAD(&priv->vif_list);
-
- /* Set default radio state and preamble */
- priv->radio_on = 0;
- priv->radio_short_preamble = 0;
-
- /* Finalize join worker */
- INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
-
- /* TX reclaim and RX tasklets. */
- tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
- tasklet_disable(&priv->poll_tx_task);
- tasklet_init(&priv->poll_rx_task, mwl8k_rx_poll, (unsigned long)hw);
- tasklet_disable(&priv->poll_rx_task);
-
- /* Power management cookie */
- priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma);
- if (priv->cookie == NULL)
- goto err_stop_firmware;
-
rc = mwl8k_rxq_init(hw, 0);
if (rc)
- goto err_free_cookie;
+ goto err_stop_firmware;
rxq_refill(hw, 0, INT_MAX);
- mutex_init(&priv->fw_mutex);
- priv->fw_mutex_owner = NULL;
- priv->fw_mutex_depth = 0;
- priv->hostcmd_wait = NULL;
-
- spin_lock_init(&priv->tx_lock);
-
- priv->tx_wait = NULL;
-
for (i = 0; i < MWL8K_TX_QUEUES; i++) {
rc = mwl8k_txq_init(hw, i);
if (rc)
@@ -4071,13 +4666,6 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
goto err_free_irq;
}
- hw->wiphy->interface_modes = 0;
- if (priv->ap_macids_supported)
- hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
- if (priv->sta_macids_supported)
- hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION);
-
-
/* Turn radio off */
rc = mwl8k_cmd_radio_disable(hw);
if (rc) {
@@ -4096,12 +4684,6 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
free_irq(priv->pdev->irq, hw);
- rc = ieee80211_register_hw(hw);
- if (rc) {
- wiphy_err(hw->wiphy, "Cannot register device\n");
- goto err_free_queues;
- }
-
wiphy_info(hw->wiphy, "%s v%d, %pm, %s firmware %u.%u.%u.%u\n",
priv->device_info->part_name,
priv->hw_rev, hw->wiphy->perm_addr,
@@ -4120,14 +4702,238 @@ err_free_queues:
mwl8k_txq_deinit(hw, i);
mwl8k_rxq_deinit(hw, 0);
+err_stop_firmware:
+ mwl8k_hw_reset(priv);
+
+ return rc;
+}
+
+/*
+ * invoke mwl8k_reload_firmware to change the firmware image after the device
+ * has already been registered
+ */
+static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image)
+{
+ int i, rc = 0;
+ struct mwl8k_priv *priv = hw->priv;
+
+ mwl8k_stop(hw);
+ mwl8k_rxq_deinit(hw, 0);
+
+ for (i = 0; i < MWL8K_TX_QUEUES; i++)
+ mwl8k_txq_deinit(hw, i);
+
+ rc = mwl8k_init_firmware(hw, fw_image, false);
+ if (rc)
+ goto fail;
+
+ rc = mwl8k_probe_hw(hw);
+ if (rc)
+ goto fail;
+
+ rc = mwl8k_start(hw);
+ if (rc)
+ goto fail;
+
+ rc = mwl8k_config(hw, ~0);
+ if (rc)
+ goto fail;
+
+ for (i = 0; i < MWL8K_TX_QUEUES; i++) {
+ rc = mwl8k_conf_tx(hw, i, &priv->wmm_params[i]);
+ if (rc)
+ goto fail;
+ }
+
+ return rc;
+
+fail:
+ printk(KERN_WARNING "mwl8k: Failed to reload firmware image.\n");
+ return rc;
+}
+
+static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
+{
+ struct ieee80211_hw *hw = priv->hw;
+ int i, rc;
+
+ rc = mwl8k_load_firmware(hw);
+ mwl8k_release_firmware(priv);
+ if (rc) {
+ wiphy_err(hw->wiphy, "Cannot start firmware\n");
+ return rc;
+ }
+
+ /*
+ * Extra headroom is the size of the required DMA header
+ * minus the size of the smallest 802.11 frame (CTS frame).
+ */
+ hw->extra_tx_headroom =
+ sizeof(struct mwl8k_dma_data) - sizeof(struct ieee80211_cts);
+
+ hw->channel_change_time = 10;
+
+ hw->queues = MWL8K_TX_QUEUES;
+
+ /* Set rssi values to dBm */
+ hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_HAS_RATE_CONTROL;
+ hw->vif_data_size = sizeof(struct mwl8k_vif);
+ hw->sta_data_size = sizeof(struct mwl8k_sta);
+
+ priv->macids_used = 0;
+ INIT_LIST_HEAD(&priv->vif_list);
+
+ /* Set default radio state and preamble */
+ priv->radio_on = 0;
+ priv->radio_short_preamble = 0;
+
+ /* Finalize join worker */
+ INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
+
+ /* TX reclaim and RX tasklets. */
+ tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
+ tasklet_disable(&priv->poll_tx_task);
+ tasklet_init(&priv->poll_rx_task, mwl8k_rx_poll, (unsigned long)hw);
+ tasklet_disable(&priv->poll_rx_task);
+
+ /* Power management cookie */
+ priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma);
+ if (priv->cookie == NULL)
+ return -ENOMEM;
+
+ mutex_init(&priv->fw_mutex);
+ priv->fw_mutex_owner = NULL;
+ priv->fw_mutex_depth = 0;
+ priv->hostcmd_wait = NULL;
+
+ spin_lock_init(&priv->tx_lock);
+
+ priv->tx_wait = NULL;
+
+ rc = mwl8k_probe_hw(hw);
+ if (rc)
+ goto err_free_cookie;
+
+ hw->wiphy->interface_modes = 0;
+ if (priv->ap_macids_supported || priv->device_info->fw_image_ap)
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
+ if (priv->sta_macids_supported || priv->device_info->fw_image_sta)
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION);
+
+ rc = ieee80211_register_hw(hw);
+ if (rc) {
+ wiphy_err(hw->wiphy, "Cannot register device\n");
+ goto err_unprobe_hw;
+ }
+
+ return 0;
+
+err_unprobe_hw:
+ for (i = 0; i < MWL8K_TX_QUEUES; i++)
+ mwl8k_txq_deinit(hw, i);
+ mwl8k_rxq_deinit(hw, 0);
+
err_free_cookie:
if (priv->cookie != NULL)
pci_free_consistent(priv->pdev, 4,
priv->cookie, priv->cookie_dma);
+ return rc;
+}
+static int __devinit mwl8k_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ static int printed_version;
+ struct ieee80211_hw *hw;
+ struct mwl8k_priv *priv;
+ struct mwl8k_device_info *di;
+ int rc;
+
+ if (!printed_version) {
+ printk(KERN_INFO "%s version %s\n", MWL8K_DESC, MWL8K_VERSION);
+ printed_version = 1;
+ }
+
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ printk(KERN_ERR "%s: Cannot enable new PCI device\n",
+ MWL8K_NAME);
+ return rc;
+ }
+
+ rc = pci_request_regions(pdev, MWL8K_NAME);
+ if (rc) {
+ printk(KERN_ERR "%s: Cannot obtain PCI resources\n",
+ MWL8K_NAME);
+ goto err_disable_device;
+ }
+
+ pci_set_master(pdev);
+
+
+ hw = ieee80211_alloc_hw(sizeof(*priv), &mwl8k_ops);
+ if (hw == NULL) {
+ printk(KERN_ERR "%s: ieee80211 alloc failed\n", MWL8K_NAME);
+ rc = -ENOMEM;
+ goto err_free_reg;
+ }
+
+ SET_IEEE80211_DEV(hw, &pdev->dev);
+ pci_set_drvdata(pdev, hw);
+
+ priv = hw->priv;
+ priv->hw = hw;
+ priv->pdev = pdev;
+ priv->device_info = &mwl8k_info_tbl[id->driver_data];
+
+
+ priv->sram = pci_iomap(pdev, 0, 0x10000);
+ if (priv->sram == NULL) {
+ wiphy_err(hw->wiphy, "Cannot map device SRAM\n");
+ goto err_iounmap;
+ }
+
+ /*
+ * If BAR0 is a 32 bit BAR, the register BAR will be BAR1.
+ * If BAR0 is a 64 bit BAR, the register BAR will be BAR2.
+ */
+ priv->regs = pci_iomap(pdev, 1, 0x10000);
+ if (priv->regs == NULL) {
+ priv->regs = pci_iomap(pdev, 2, 0x10000);
+ if (priv->regs == NULL) {
+ wiphy_err(hw->wiphy, "Cannot map device registers\n");
+ goto err_iounmap;
+ }
+ }
+
+ /*
+ * Choose the initial fw image depending on user input. If a second
+ * image is available, make it the alternative image that will be
+ * loaded if the first one fails.
+ */
+ init_completion(&priv->firmware_loading_complete);
+ di = priv->device_info;
+ if (ap_mode_default && di->fw_image_ap) {
+ priv->fw_pref = di->fw_image_ap;
+ priv->fw_alt = di->fw_image_sta;
+ } else if (!ap_mode_default && di->fw_image_sta) {
+ priv->fw_pref = di->fw_image_sta;
+ priv->fw_alt = di->fw_image_ap;
+ } else if (ap_mode_default && !di->fw_image_ap && di->fw_image_sta) {
+ printk(KERN_WARNING "AP fw is unavailable. Using STA fw.");
+ priv->fw_pref = di->fw_image_sta;
+ } else if (!ap_mode_default && !di->fw_image_sta && di->fw_image_ap) {
+ printk(KERN_WARNING "STA fw is unavailable. Using AP fw.");
+ priv->fw_pref = di->fw_image_ap;
+ }
+ rc = mwl8k_init_firmware(hw, priv->fw_pref, true);
+ if (rc)
+ goto err_stop_firmware;
+ return rc;
+
err_stop_firmware:
mwl8k_hw_reset(priv);
- mwl8k_release_firmware(priv);
err_iounmap:
if (priv->regs != NULL)
@@ -4163,6 +4969,13 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
return;
priv = hw->priv;
+ wait_for_completion(&priv->firmware_loading_complete);
+
+ if (priv->fw_state == FW_STATE_ERROR) {
+ mwl8k_hw_reset(priv);
+ goto unmap;
+ }
+
ieee80211_stop_queues(hw);
ieee80211_unregister_hw(hw);
@@ -4185,6 +4998,7 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
pci_free_consistent(priv->pdev, 4, priv->cookie, priv->cookie_dma);
+unmap:
pci_iounmap(pdev, priv->regs);
pci_iounmap(pdev, priv->sram);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index e8e2d0f4763d..f3d396e7544b 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -1392,10 +1392,9 @@ static void orinoco_process_scan_results(struct work_struct *work)
orinoco_add_hostscan_results(priv, buf, len);
kfree(buf);
- } else if (priv->scan_request) {
+ } else {
/* Either abort or complete the scan */
- cfg80211_scan_done(priv->scan_request, (len < 0));
- priv->scan_request = NULL;
+ orinoco_scan_done(priv, (len < 0));
}
spin_lock_irqsave(&priv->scan_lock, flags);
@@ -1684,6 +1683,8 @@ static int __orinoco_down(struct orinoco_private *priv)
hermes_write_regn(hw, EVACK, 0xffff);
}
+ orinoco_scan_done(priv, true);
+
/* firmware will have to reassociate */
netif_carrier_off(dev);
priv->last_linkstatus = 0xffff;
@@ -1762,10 +1763,7 @@ void orinoco_reset(struct work_struct *work)
orinoco_unlock(priv, &flags);
/* Scanning support: Notify scan cancellation */
- if (priv->scan_request) {
- cfg80211_scan_done(priv->scan_request, 1);
- priv->scan_request = NULL;
- }
+ orinoco_scan_done(priv, true);
if (priv->hard_reset) {
err = (*priv->hard_reset)(priv);
@@ -1813,6 +1811,12 @@ static int __orinoco_commit(struct orinoco_private *priv)
struct net_device *dev = priv->ndev;
int err = 0;
+ /* If we've called commit, we are reconfiguring or bringing the
+ * interface up. Maintaining countermeasures across this would
+ * be confusing, so note that we've disabled them. The port will
+ * be enabled later in orinoco_commit or __orinoco_up. */
+ priv->tkip_cm_active = 0;
+
err = orinoco_hw_program_rids(priv);
/* FIXME: what about netif_tx_lock */
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index 71b3d68b9403..32954c4b243a 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -151,20 +151,20 @@ orinoco_cs_config(struct pcmcia_device *link)
goto failed;
}
- ret = pcmcia_request_irq(link, orinoco_interrupt);
- if (ret)
- goto failed;
-
- /* We initialize the hermes structure before completing PCMCIA
- * configuration just in case the interrupt handler gets
- * called. */
mem = ioport_map(link->resource[0]->start,
resource_size(link->resource[0]));
if (!mem)
goto failed;
+ /* We initialize the hermes structure before completing PCMCIA
+ * configuration just in case the interrupt handler gets
+ * called. */
hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
+ ret = pcmcia_request_irq(link, orinoco_interrupt);
+ if (ret)
+ goto failed;
+
ret = pcmcia_enable_device(link);
if (ret)
goto failed;
diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c
index 4300d9db7d8c..e99ca1c1e0d8 100644
--- a/drivers/net/wireless/orinoco/scan.c
+++ b/drivers/net/wireless/orinoco/scan.c
@@ -111,6 +111,11 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
freq = ieee80211_dsss_chan_to_freq(le16_to_cpu(bss->a.channel));
channel = ieee80211_get_channel(wiphy, freq);
+ if (!channel) {
+ printk(KERN_DEBUG "Invalid channel designation %04X(%04X)",
+ bss->a.channel, freq);
+ return; /* Then ignore it for now */
+ }
timestamp = 0;
capability = le16_to_cpu(bss->a.capabilities);
beacon_interval = le16_to_cpu(bss->a.beacon_interv);
@@ -229,3 +234,11 @@ void orinoco_add_hostscan_results(struct orinoco_private *priv,
priv->scan_request = NULL;
}
}
+
+void orinoco_scan_done(struct orinoco_private *priv, bool abort)
+{
+ if (priv->scan_request) {
+ cfg80211_scan_done(priv->scan_request, abort);
+ priv->scan_request = NULL;
+ }
+}
diff --git a/drivers/net/wireless/orinoco/scan.h b/drivers/net/wireless/orinoco/scan.h
index 2dc4e046dbdb..27281fb0a6dc 100644
--- a/drivers/net/wireless/orinoco/scan.h
+++ b/drivers/net/wireless/orinoco/scan.h
@@ -16,5 +16,6 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
void orinoco_add_hostscan_results(struct orinoco_private *dev,
unsigned char *buf,
size_t len);
+void orinoco_scan_done(struct orinoco_private *priv, bool abort);
#endif /* _ORINOCO_SCAN_H_ */
diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c
index fb859a5ad2eb..db34c282e59b 100644
--- a/drivers/net/wireless/orinoco/spectrum_cs.c
+++ b/drivers/net/wireless/orinoco/spectrum_cs.c
@@ -214,21 +214,21 @@ spectrum_cs_config(struct pcmcia_device *link)
goto failed;
}
- ret = pcmcia_request_irq(link, orinoco_interrupt);
- if (ret)
- goto failed;
-
- /* We initialize the hermes structure before completing PCMCIA
- * configuration just in case the interrupt handler gets
- * called. */
mem = ioport_map(link->resource[0]->start,
resource_size(link->resource[0]));
if (!mem)
goto failed;
+ /* We initialize the hermes structure before completing PCMCIA
+ * configuration just in case the interrupt handler gets
+ * called. */
hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
hw->eeprom_pda = true;
+ ret = pcmcia_request_irq(link, orinoco_interrupt);
+ if (ret)
+ goto failed;
+
ret = pcmcia_enable_device(link);
if (ret)
goto failed;
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index 93505f93bf97..e793679e2e19 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -893,6 +893,14 @@ static int orinoco_ioctl_set_auth(struct net_device *dev,
*/
break;
+ case IW_AUTH_MFP:
+ /* Management Frame Protection not supported.
+ * Only fail if set to required.
+ */
+ if (param->value == IW_AUTH_MFP_REQUIRED)
+ ret = -EINVAL;
+ break;
+
case IW_AUTH_KEY_MGMT:
/* wl_lkm implies value 2 == PSK for Hermes I
* which ties in with WEXT
@@ -911,10 +919,10 @@ static int orinoco_ioctl_set_auth(struct net_device *dev,
*/
if (param->value) {
priv->tkip_cm_active = 1;
- ret = hermes_enable_port(hw, 0);
+ ret = hermes_disable_port(hw, 0);
} else {
priv->tkip_cm_active = 0;
- ret = hermes_disable_port(hw, 0);
+ ret = hermes_enable_port(hw, 0);
}
break;
diff --git a/drivers/net/wireless/p54/Kconfig b/drivers/net/wireless/p54/Kconfig
index 25f965ffc889..0ec55b50798e 100644
--- a/drivers/net/wireless/p54/Kconfig
+++ b/drivers/net/wireless/p54/Kconfig
@@ -43,9 +43,8 @@ config P54_SPI
tristate "Prism54 SPI (stlc45xx) support"
depends on P54_COMMON && SPI_MASTER && GENERIC_HARDIRQS
---help---
- This driver is for stlc4550 or stlc4560 based wireless chips.
- This driver is experimental, untested and will probably only work on
- Nokia's N800/N810 Portable Internet Tablet.
+ This driver is for stlc4550 or stlc4560 based wireless chips
+ such as Nokia's N800/N810 Portable Internet Tablet.
If you choose to build a module, it'll be called p54spi.
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 35b09aa0529b..13d750da9301 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -55,6 +55,17 @@ static struct ieee80211_rate p54_arates[] = {
{ .bitrate = 540, .hw_value = 11, },
};
+static struct p54_rssi_db_entry p54_rssi_default = {
+ /*
+ * The defaults are taken from usb-logs of the
+ * vendor driver. So, they should be safe to
+ * use in case we can't get a match from the
+ * rssi <-> dBm conversion database.
+ */
+ .mul = 130,
+ .add = -398,
+};
+
#define CHAN_HAS_CAL BIT(0)
#define CHAN_HAS_LIMIT BIT(1)
#define CHAN_HAS_CURVE BIT(2)
@@ -87,13 +98,27 @@ static int p54_get_band_from_freq(u16 freq)
return -1;
}
+static int same_band(u16 freq, u16 freq2)
+{
+ return p54_get_band_from_freq(freq) == p54_get_band_from_freq(freq2);
+}
+
static int p54_compare_channels(const void *_a,
const void *_b)
{
const struct p54_channel_entry *a = _a;
const struct p54_channel_entry *b = _b;
- return a->index - b->index;
+ return a->freq - b->freq;
+}
+
+static int p54_compare_rssichan(const void *_a,
+ const void *_b)
+{
+ const struct p54_rssi_db_entry *a = _a;
+ const struct p54_rssi_db_entry *b = _b;
+
+ return a->freq - b->freq;
}
static int p54_fill_band_bitrates(struct ieee80211_hw *dev,
@@ -145,25 +170,26 @@ static int p54_generate_band(struct ieee80211_hw *dev,
for (i = 0, j = 0; (j < list->band_channel_num[band]) &&
(i < list->entries); i++) {
+ struct p54_channel_entry *chan = &list->channels[i];
- if (list->channels[i].band != band)
+ if (chan->band != band)
continue;
- if (list->channels[i].data != CHAN_HAS_ALL) {
- wiphy_err(dev->wiphy,
- "%s%s%s is/are missing for channel:%d [%d MHz].\n",
- (list->channels[i].data & CHAN_HAS_CAL ? "" :
+ if (chan->data != CHAN_HAS_ALL) {
+ wiphy_err(dev->wiphy, "%s%s%s is/are missing for "
+ "channel:%d [%d MHz].\n",
+ (chan->data & CHAN_HAS_CAL ? "" :
" [iqauto calibration data]"),
- (list->channels[i].data & CHAN_HAS_LIMIT ? "" :
+ (chan->data & CHAN_HAS_LIMIT ? "" :
" [output power limits]"),
- (list->channels[i].data & CHAN_HAS_CURVE ? "" :
+ (chan->data & CHAN_HAS_CURVE ? "" :
" [curve data]"),
- list->channels[i].index, list->channels[i].freq);
+ chan->index, chan->freq);
continue;
}
- tmp->channels[j].band = list->channels[i].band;
- tmp->channels[j].center_freq = list->channels[i].freq;
+ tmp->channels[j].band = chan->band;
+ tmp->channels[j].center_freq = chan->freq;
j++;
}
@@ -291,7 +317,7 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
}
}
- /* sort the list by the channel index */
+ /* sort the channel list by frequency */
sort(list->channels, list->entries, sizeof(struct p54_channel_entry),
p54_compare_channels, NULL);
@@ -410,33 +436,121 @@ static int p54_convert_rev1(struct ieee80211_hw *dev,
static const char *p54_rf_chips[] = { "INVALID-0", "Duette3", "Duette2",
"Frisbee", "Xbow", "Longbow", "INVALID-6", "INVALID-7" };
-static void p54_parse_rssical(struct ieee80211_hw *dev, void *data, int len,
- u16 type)
+static int p54_parse_rssical(struct ieee80211_hw *dev,
+ u8 *data, int len, u16 type)
{
struct p54_common *priv = dev->priv;
- int offset = (type == PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) ? 2 : 0;
- int entry_size = sizeof(struct pda_rssi_cal_entry) + offset;
- int num_entries = (type == PDR_RSSI_LINEAR_APPROXIMATION) ? 1 : 2;
- int i;
+ struct p54_rssi_db_entry *entry;
+ size_t db_len, entries;
+ int offset = 0, i;
+
+ if (type != PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) {
+ entries = (type == PDR_RSSI_LINEAR_APPROXIMATION) ? 1 : 2;
+ if (len != sizeof(struct pda_rssi_cal_entry) * entries) {
+ wiphy_err(dev->wiphy, "rssical size mismatch.\n");
+ goto err_data;
+ }
+ } else {
+ /*
+ * Some devices (Dell 1450 USB, Xbow 5GHz card, etc...)
+ * have an empty two byte header.
+ */
+ if (*((__le16 *)&data[offset]) == cpu_to_le16(0))
+ offset += 2;
- if (len != (entry_size * num_entries)) {
- wiphy_err(dev->wiphy,
- "unknown rssi calibration data packing type:(%x) len:%d.\n",
- type, len);
+ entries = (len - offset) /
+ sizeof(struct pda_rssi_cal_ext_entry);
- print_hex_dump_bytes("rssical:", DUMP_PREFIX_NONE,
- data, len);
+ if ((len - offset) % sizeof(struct pda_rssi_cal_ext_entry) ||
+ entries <= 0) {
+ wiphy_err(dev->wiphy, "invalid rssi database.\n");
+ goto err_data;
+ }
+ }
- wiphy_err(dev->wiphy, "please report this issue.\n");
- return;
+ db_len = sizeof(*entry) * entries;
+ priv->rssi_db = kzalloc(db_len + sizeof(*priv->rssi_db), GFP_KERNEL);
+ if (!priv->rssi_db)
+ return -ENOMEM;
+
+ priv->rssi_db->offset = 0;
+ priv->rssi_db->entries = entries;
+ priv->rssi_db->entry_size = sizeof(*entry);
+ priv->rssi_db->len = db_len;
+
+ entry = (void *)((unsigned long)priv->rssi_db->data + priv->rssi_db->offset);
+ if (type == PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) {
+ struct pda_rssi_cal_ext_entry *cal = (void *) &data[offset];
+
+ for (i = 0; i < entries; i++) {
+ entry[i].freq = le16_to_cpu(cal[i].freq);
+ entry[i].mul = (s16) le16_to_cpu(cal[i].mul);
+ entry[i].add = (s16) le16_to_cpu(cal[i].add);
+ }
+ } else {
+ struct pda_rssi_cal_entry *cal = (void *) &data[offset];
+
+ for (i = 0; i < entries; i++) {
+ u16 freq;
+ switch (i) {
+ case IEEE80211_BAND_2GHZ:
+ freq = 2437;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ freq = 5240;
+ break;
+ }
+
+ entry[i].freq = freq;
+ entry[i].mul = (s16) le16_to_cpu(cal[i].mul);
+ entry[i].add = (s16) le16_to_cpu(cal[i].add);
+ }
}
- for (i = 0; i < num_entries; i++) {
- struct pda_rssi_cal_entry *cal = data +
- (offset + i * entry_size);
- priv->rssical_db[i].mul = (s16) le16_to_cpu(cal->mul);
- priv->rssical_db[i].add = (s16) le16_to_cpu(cal->add);
+ /* sort the list by channel frequency */
+ sort(entry, entries, sizeof(*entry), p54_compare_rssichan, NULL);
+ return 0;
+
+err_data:
+ wiphy_err(dev->wiphy,
+ "rssi calibration data packing type:(%x) len:%d.\n",
+ type, len);
+
+ print_hex_dump_bytes("rssical:", DUMP_PREFIX_NONE, data, len);
+
+ wiphy_err(dev->wiphy, "please report this issue.\n");
+ return -EINVAL;
+}
+
+struct p54_rssi_db_entry *p54_rssi_find(struct p54_common *priv, const u16 freq)
+{
+ struct p54_rssi_db_entry *entry;
+ int i, found = -1;
+
+ if (!priv->rssi_db)
+ return &p54_rssi_default;
+
+ entry = (void *)(priv->rssi_db->data + priv->rssi_db->offset);
+ for (i = 0; i < priv->rssi_db->entries; i++) {
+ if (!same_band(freq, entry[i].freq))
+ continue;
+
+ if (found == -1) {
+ found = i;
+ continue;
+ }
+
+ /* nearest match */
+ if (abs(freq - entry[i].freq) <
+ abs(freq - entry[found].freq)) {
+ found = i;
+ continue;
+ } else {
+ break;
+ }
}
+
+ return found < 0 ? &p54_rssi_default : &entry[found];
}
static void p54_parse_default_country(struct ieee80211_hw *dev,
@@ -627,21 +741,30 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
case PDR_RSSI_LINEAR_APPROXIMATION:
case PDR_RSSI_LINEAR_APPROXIMATION_DUAL_BAND:
case PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED:
- p54_parse_rssical(dev, entry->data, data_len,
- le16_to_cpu(entry->code));
+ err = p54_parse_rssical(dev, entry->data, data_len,
+ le16_to_cpu(entry->code));
+ if (err)
+ goto err;
break;
- case PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM: {
- __le16 *src = (void *) entry->data;
- s16 *dst = (void *) &priv->rssical_db;
+ case PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2: {
+ struct pda_custom_wrapper *pda = (void *) entry->data;
+ __le16 *src;
+ u16 *dst;
int i;
- if (data_len != sizeof(priv->rssical_db)) {
- err = -EINVAL;
- goto err;
- }
- for (i = 0; i < sizeof(priv->rssical_db) /
- sizeof(*src); i++)
+ if (priv->rssi_db || data_len < sizeof(*pda))
+ break;
+
+ priv->rssi_db = p54_convert_db(pda, data_len);
+ if (!priv->rssi_db)
+ break;
+
+ src = (void *) priv->rssi_db->data;
+ dst = (void *) priv->rssi_db->data;
+
+ for (i = 0; i < priv->rssi_db->entries; i++)
*(dst++) = (s16) le16_to_cpu(*(src++));
+
}
break;
case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM: {
@@ -717,6 +840,8 @@ good_eeprom:
SET_IEEE80211_PERM_ADDR(dev, perm_addr);
}
+ priv->cur_rssi = &p54_rssi_default;
+
wiphy_info(dev->wiphy, "hwaddr %pM, MAC:isl38%02x RF:%s\n",
dev->wiphy->perm_addr, priv->version,
p54_rf_chips[priv->rxhw]);
@@ -727,9 +852,11 @@ err:
kfree(priv->iq_autocal);
kfree(priv->output_limit);
kfree(priv->curve_data);
+ kfree(priv->rssi_db);
priv->iq_autocal = NULL;
priv->output_limit = NULL;
priv->curve_data = NULL;
+ priv->rssi_db = NULL;
wiphy_err(dev->wiphy, "eeprom parse failed!\n");
return err;
diff --git a/drivers/net/wireless/p54/eeprom.h b/drivers/net/wireless/p54/eeprom.h
index 9051aef11249..afde72b84606 100644
--- a/drivers/net/wireless/p54/eeprom.h
+++ b/drivers/net/wireless/p54/eeprom.h
@@ -81,6 +81,12 @@ struct pda_pa_curve_data {
u8 data[0];
} __packed;
+struct pda_rssi_cal_ext_entry {
+ __le16 freq;
+ __le16 mul;
+ __le16 add;
+} __packed;
+
struct pda_rssi_cal_entry {
__le16 mul;
__le16 add;
@@ -179,6 +185,7 @@ struct pda_custom_wrapper {
/* used by our modificated eeprom image */
#define PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM 0xDEAD
+#define PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2 0xCAFF
#define PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM 0xBEEF
#define PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM 0xB05D
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 92b9b1f05fd5..2fab7d20ffc2 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -397,9 +397,9 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
union p54_scan_body_union *body;
struct p54_scan_tail_rate *rate;
struct pda_rssi_cal_entry *rssi;
+ struct p54_rssi_db_entry *rssi_data;
unsigned int i;
void *entry;
- int band = priv->hw->conf.channel->band;
__le16 freq = cpu_to_le16(priv->hw->conf.channel->center_freq);
skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*head) +
@@ -503,13 +503,14 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
}
rssi = (struct pda_rssi_cal_entry *) skb_put(skb, sizeof(*rssi));
- rssi->mul = cpu_to_le16(priv->rssical_db[band].mul);
- rssi->add = cpu_to_le16(priv->rssical_db[band].add);
+ rssi_data = p54_rssi_find(priv, le16_to_cpu(freq));
+ rssi->mul = cpu_to_le16(rssi_data->mul);
+ rssi->add = cpu_to_le16(rssi_data->add);
if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) {
/* Longbow frontend needs ever more */
rssi = (void *) skb_put(skb, sizeof(*rssi));
- rssi->mul = cpu_to_le16(priv->rssical_db[band].longbow_unkn);
- rssi->add = cpu_to_le16(priv->rssical_db[band].longbow_unk2);
+ rssi->mul = cpu_to_le16(rssi_data->longbow_unkn);
+ rssi->add = cpu_to_le16(rssi_data->longbow_unk2);
}
if (priv->fw_var >= 0x509) {
@@ -523,6 +524,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
hdr->len = cpu_to_le16(skb->len - sizeof(*hdr));
p54_tx(priv, skb);
+ priv->cur_rssi = rssi_data;
return 0;
err:
@@ -557,6 +559,7 @@ int p54_set_edcf(struct p54_common *priv)
{
struct sk_buff *skb;
struct p54_edcf *edcf;
+ u8 rtd;
skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*edcf),
P54_CONTROL_TYPE_DCFINIT, GFP_ATOMIC);
@@ -573,9 +576,15 @@ int p54_set_edcf(struct p54_common *priv)
edcf->sifs = 0x0a;
edcf->eofpad = 0x06;
}
+ /*
+ * calculate the extra round trip delay according to the
+ * formula from 802.11-2007 17.3.8.6.
+ */
+ rtd = 3 * priv->coverage_class;
+ edcf->slottime += rtd;
+ edcf->round_trip_delay = cpu_to_le16(rtd);
/* (see prism54/isl_oid.h for further details) */
edcf->frameburst = cpu_to_le16(0);
- edcf->round_trip_delay = cpu_to_le16(0);
edcf->flags = 0;
memset(edcf->mapping, 0, sizeof(edcf->mapping));
memcpy(edcf->queue, priv->qos_params, sizeof(edcf->queue));
diff --git a/drivers/net/wireless/p54/lmac.h b/drivers/net/wireless/p54/lmac.h
index 04b63ec80fa4..eb581abc1079 100644
--- a/drivers/net/wireless/p54/lmac.h
+++ b/drivers/net/wireless/p54/lmac.h
@@ -526,7 +526,7 @@ int p54_init_leds(struct p54_common *priv);
void p54_unregister_leds(struct p54_common *priv);
/* xmit functions */
-int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb);
+void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb);
int p54_tx_cancel(struct p54_common *priv, __le32 req_id);
void p54_tx(struct p54_common *priv, struct sk_buff *skb);
@@ -551,6 +551,7 @@ int p54_upload_key(struct p54_common *priv, u8 algo, int slot,
/* eeprom */
int p54_download_eeprom(struct p54_common *priv, void *buf,
u16 offset, u16 len);
+struct p54_rssi_db_entry *p54_rssi_find(struct p54_common *p, const u16 freq);
/* utility */
u8 *p54_find_ie(struct sk_buff *skb, u8 ie);
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 622d27b6d8f2..356e6bb443a6 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -157,7 +157,7 @@ static int p54_beacon_update(struct p54_common *priv,
* to cancel the old beacon template by hand, instead the firmware
* will release the previous one through the feedback mechanism.
*/
- WARN_ON(p54_tx_80211(priv->hw, beacon));
+ p54_tx_80211(priv->hw, beacon);
priv->tsf_high32 = 0;
priv->tsf_low32 = 0;
@@ -524,6 +524,59 @@ static int p54_get_survey(struct ieee80211_hw *dev, int idx,
return 0;
}
+static unsigned int p54_flush_count(struct p54_common *priv)
+{
+ unsigned int total = 0, i;
+
+ BUILD_BUG_ON(P54_QUEUE_NUM > ARRAY_SIZE(priv->tx_stats));
+
+ /*
+ * Because the firmware has the sole control over any frames
+ * in the P54_QUEUE_BEACON or P54_QUEUE_SCAN queues, they
+ * don't really count as pending or active.
+ */
+ for (i = P54_QUEUE_MGMT; i < P54_QUEUE_NUM; i++)
+ total += priv->tx_stats[i].len;
+ return total;
+}
+
+static void p54_flush(struct ieee80211_hw *dev, bool drop)
+{
+ struct p54_common *priv = dev->priv;
+ unsigned int total, i;
+
+ /*
+ * Currently, it wouldn't really matter if we wait for one second
+ * or 15 minutes. But once someone gets around and completes the
+ * TODOs [ancel stuck frames / reset device] in p54_work, it will
+ * suddenly make sense to wait that long.
+ */
+ i = P54_STATISTICS_UPDATE * 2 / 20;
+
+ /*
+ * In this case no locking is required because as we speak the
+ * queues have already been stopped and no new frames can sneak
+ * up from behind.
+ */
+ while ((total = p54_flush_count(priv) && i--)) {
+ /* waste time */
+ msleep(20);
+ }
+
+ WARN(total, "tx flush timeout, unresponsive firmware");
+}
+
+static void p54_set_coverage_class(struct ieee80211_hw *dev, u8 coverage_class)
+{
+ struct p54_common *priv = dev->priv;
+
+ mutex_lock(&priv->conf_mutex);
+ /* support all coverage class values as in 802.11-2007 Table 7-27 */
+ priv->coverage_class = clamp_t(u8, coverage_class, 0, 31);
+ p54_set_edcf(priv);
+ mutex_unlock(&priv->conf_mutex);
+}
+
static const struct ieee80211_ops p54_ops = {
.tx = p54_tx_80211,
.start = p54_start,
@@ -536,11 +589,13 @@ static const struct ieee80211_ops p54_ops = {
.sta_remove = p54_sta_add_remove,
.set_key = p54_set_key,
.config = p54_config,
+ .flush = p54_flush,
.bss_info_changed = p54_bss_info_changed,
.configure_filter = p54_configure_filter,
.conf_tx = p54_conf_tx,
.get_stats = p54_get_stats,
.get_survey = p54_get_survey,
+ .set_coverage_class = p54_set_coverage_class,
};
struct ieee80211_hw *p54_init_common(size_t priv_data_len)
@@ -611,7 +666,7 @@ EXPORT_SYMBOL_GPL(p54_init_common);
int p54_register_common(struct ieee80211_hw *dev, struct device *pdev)
{
- struct p54_common *priv = dev->priv;
+ struct p54_common __maybe_unused *priv = dev->priv;
int err;
err = ieee80211_register_hw(dev);
@@ -642,10 +697,12 @@ void p54_free_common(struct ieee80211_hw *dev)
kfree(priv->iq_autocal);
kfree(priv->output_limit);
kfree(priv->curve_data);
+ kfree(priv->rssi_db);
kfree(priv->used_rxkeys);
priv->iq_autocal = NULL;
priv->output_limit = NULL;
priv->curve_data = NULL;
+ priv->rssi_db = NULL;
priv->used_rxkeys = NULL;
ieee80211_free_hw(dev);
}
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index 43a3b2ead81a..50730fc23fe5 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -116,7 +116,8 @@ struct p54_edcf_queue_param {
__le16 txop;
} __packed;
-struct p54_rssi_linear_approximation {
+struct p54_rssi_db_entry {
+ u16 freq;
s16 mul;
s16 add;
s16 longbow_unkn;
@@ -197,13 +198,14 @@ struct p54_common {
u8 rx_diversity_mask;
u8 tx_diversity_mask;
unsigned int output_power;
+ struct p54_rssi_db_entry *cur_rssi;
int noise;
/* calibration, output power limit and rssi<->dBm conversation data */
struct pda_iq_autocal_entry *iq_autocal;
unsigned int iq_autocal_len;
struct p54_cal_database *curve_data;
struct p54_cal_database *output_limit;
- struct p54_rssi_linear_approximation rssical_db[IEEE80211_NUM_BANDS];
+ struct p54_cal_database *rssi_db;
struct ieee80211_supported_band *band_table[IEEE80211_NUM_BANDS];
/* BBP/MAC state */
@@ -215,6 +217,7 @@ struct p54_common {
u32 tsf_low32, tsf_high32;
u32 basic_rate_mask;
u16 aid;
+ u8 coverage_class;
bool powersave_override;
__le32 beacon_req_id;
struct completion beacon_comp;
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 1eacba4daa5b..0494d7b102d4 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -199,6 +199,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
while (i != idx) {
u16 len;
struct sk_buff *skb;
+ dma_addr_t dma_addr;
desc = &ring[i];
len = le16_to_cpu(desc->len);
skb = rx_buf[i];
@@ -216,17 +217,20 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
len = priv->common.rx_mtu;
}
+ dma_addr = le32_to_cpu(desc->host_addr);
+ pci_dma_sync_single_for_cpu(priv->pdev, dma_addr,
+ priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
skb_put(skb, len);
if (p54_rx(dev, skb)) {
- pci_unmap_single(priv->pdev,
- le32_to_cpu(desc->host_addr),
- priv->common.rx_mtu + 32,
- PCI_DMA_FROMDEVICE);
+ pci_unmap_single(priv->pdev, dma_addr,
+ priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
rx_buf[i] = NULL;
- desc->host_addr = 0;
+ desc->host_addr = cpu_to_le32(0);
} else {
skb_trim(skb, 0);
+ pci_dma_sync_single_for_device(priv->pdev, dma_addr,
+ priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
}
diff --git a/drivers/net/wireless/p54/p54spi_eeprom.h b/drivers/net/wireless/p54/p54spi_eeprom.h
index d592cbd34d78..0b7bfb0adcf2 100644
--- a/drivers/net/wireless/p54/p54spi_eeprom.h
+++ b/drivers/net/wireless/p54/p54spi_eeprom.h
@@ -65,9 +65,10 @@ static unsigned char p54spi_eeprom[] = {
0x03, 0x00, 0x00, 0x11, /* PDR_ANTENNA_GAIN */
0x08, 0x08, 0x08, 0x08,
-0x09, 0x00, 0xad, 0xde, /* PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM */
- 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x0a, 0x00, 0xff, 0xca, /* PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2 */
+ 0x01, 0x00, 0x0a, 0x00,
+ 0x00, 0x00, 0x0a, 0x00,
+ 0x85, 0x09, 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00,
/* struct pda_custom_wrapper */
0x10, 0x06, 0x5d, 0xb0, /* PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM */
@@ -671,7 +672,7 @@ static unsigned char p54spi_eeprom[] = {
0xa8, 0x09, 0x25, 0x00, 0xf5, 0xff, 0xf9, 0xff, 0x00, 0x01,
0x02, 0x00, 0x00, 0x00, /* PDR_END */
- 0x67, 0x99,
+ 0xb6, 0x04,
};
#endif /* P54SPI_EEPROM_H */
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index d5bc21e5a02c..9b344a921e74 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -43,6 +43,7 @@ MODULE_FIRMWARE("isl3887usb");
static struct usb_device_id p54u_table[] __devinitdata = {
/* Version 1 devices (pci chip + net2280) */
+ {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */
{USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */
{USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */
{USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */
@@ -56,9 +57,13 @@ static struct usb_device_id p54u_table[] __devinitdata = {
{USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */
{USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */
{USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */
+ {USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */
{USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */
{USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */
+ {USB_DEVICE(0x1435, 0x0210)}, /* Inventel UR054G */
+ {USB_DEVICE(0x15a9, 0x0002)}, /* Gemtek WUBI-100GW 802.11g */
{USB_DEVICE(0x1630, 0x0005)}, /* 2Wire 802.11g USB (v1) / Z-Com */
+ {USB_DEVICE(0x182d, 0x096b)}, /* Sitecom WL-107 */
{USB_DEVICE(0x1915, 0x2234)}, /* Linksys WUSB54G OEM */
{USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */
{USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */
@@ -93,7 +98,9 @@ static struct usb_device_id p54u_table[] __devinitdata = {
{USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
{USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
{USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */
+ {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */
{USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
+ {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */
{USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
{USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */
{USB_DEVICE(0x413c, 0x8104)}, /* Cohiba Proto board */
@@ -183,7 +190,7 @@ static void p54u_rx_cb(struct urb *urb)
static void p54u_tx_cb(struct urb *urb)
{
struct sk_buff *skb = urb->context;
- struct ieee80211_hw *dev = (struct ieee80211_hw *)
+ struct ieee80211_hw *dev =
usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
p54_free_skb(dev, skb);
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 76b2318a7dc7..7834c26c2954 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -273,11 +273,9 @@ void p54_tx(struct p54_common *priv, struct sk_buff *skb)
static int p54_rssi_to_dbm(struct p54_common *priv, int rssi)
{
- int band = priv->hw->conf.channel->band;
-
if (priv->rxhw != 5) {
- return ((rssi * priv->rssical_db[band].mul) / 64 +
- priv->rssical_db[band].add) / 4;
+ return ((rssi * priv->cur_rssi->mul) / 64 +
+ priv->cur_rssi->add) / 4;
} else {
/*
* TODO: find the correct formula
@@ -369,7 +367,7 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
rx_status->mactime = ((u64)priv->tsf_high32) << 32 | tsf32;
priv->tsf_low32 = tsf32;
- rx_status->flag |= RX_FLAG_TSFT;
+ rx_status->flag |= RX_FLAG_MACTIME_MPDU;
if (hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
header_len += hdr->align[0];
@@ -618,7 +616,7 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
else
*burst_possible = false;
- if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
*flags |= P54_HDR_FLAG_DATA_OUT_SEQNR;
if (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)
@@ -698,7 +696,7 @@ static u8 p54_convert_algo(u32 cipher)
}
}
-int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
+void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
{
struct p54_common *priv = dev->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -719,12 +717,8 @@ int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
&hdr_flags, &aid, &burst_allowed);
if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
- if (!IS_QOS_QUEUE(queue)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- } else {
- return NETDEV_TX_BUSY;
- }
+ dev_kfree_skb_any(skb);
+ return;
}
padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3;
@@ -867,5 +861,4 @@ int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
p54info->extra_len = extra_len;
p54_tx(priv, skb);
- return NETDEV_TX_OK;
}
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 2c8cc954d1b6..ec2c75d77cea 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -630,7 +630,7 @@ islpci_alloc_memory(islpci_private *priv)
printk(KERN_DEBUG "islpci_alloc_memory\n");
#endif
- /* remap the PCI device base address to accessable */
+ /* remap the PCI device base address to accessible */
if (!(priv->device_base =
ioremap(pci_resource_start(priv->pdev, 0),
ISL38XX_PCI_MEM_SIZE))) {
@@ -709,7 +709,7 @@ islpci_alloc_memory(islpci_private *priv)
PCI_DMA_FROMDEVICE);
if (!priv->pci_map_rx_address[counter]) {
/* error mapping the buffer to device
- accessable memory address */
+ accessible memory address */
printk(KERN_ERR "failed to map skb DMA'able\n");
goto out_free;
}
@@ -773,7 +773,7 @@ islpci_free_memory(islpci_private *priv)
priv->data_low_rx[counter] = NULL;
}
- /* Free the acces control list and the WPA list */
+ /* Free the access control list and the WPA list */
prism54_acl_clean(&priv->acl);
prism54_wpa_bss_ie_clean(priv);
mgt_clean(priv);
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 2fc52bc2d7dd..d44f8e20cce0 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -450,7 +450,7 @@ islpci_eth_receive(islpci_private *priv)
MAX_FRAGMENT_SIZE_RX + 2,
PCI_DMA_FROMDEVICE);
if (unlikely(!priv->pci_map_rx_address[index])) {
- /* error mapping the buffer to device accessable memory address */
+ /* error mapping the buffer to device accessible memory address */
DEBUG(SHOW_ERROR_MESSAGES,
"Error mapping DMA address\n");
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 97007d9e2c1f..0764d1a30d13 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -1776,11 +1776,8 @@ static void ray_update_multi_list(struct net_device *dev, int all)
/* Copy the kernel's list of MC addresses to card */
netdev_for_each_mc_addr(ha, dev) {
memcpy_toio(p, ha->addr, ETH_ALEN);
- dev_dbg(&link->dev,
- "ray_update_multi add addr %02x%02x%02x%02x%02x%02x\n",
- ha->addr[0], ha->addr[1],
- ha->addr[2], ha->addr[3],
- ha->addr[4], ha->addr[5]);
+ dev_dbg(&link->dev, "ray_update_multi add addr %pm\n",
+ ha->addr);
p += ETH_ALEN;
i++;
}
@@ -2015,11 +2012,8 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
memcpy_fromio(&local->bss_id,
prcs->var.rejoin_net_complete.
bssid, ADDRLEN);
- dev_dbg(&link->dev,
- "ray_cs new BSSID = %02x%02x%02x%02x%02x%02x\n",
- local->bss_id[0], local->bss_id[1],
- local->bss_id[2], local->bss_id[3],
- local->bss_id[4], local->bss_id[5]);
+ dev_dbg(&link->dev, "ray_cs new BSSID = %pm\n",
+ local->bss_id);
if (!sniffer)
authenticate(local);
}
@@ -2286,8 +2280,8 @@ static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len)
struct ethhdr *peth;
UCHAR srcaddr[ADDRLEN];
UCHAR destaddr[ADDRLEN];
- static UCHAR org_bridge[3] = { 0, 0, 0xf8 };
- static UCHAR org_1042[3] = { 0, 0, 0 };
+ static const UCHAR org_bridge[3] = { 0, 0, 0xf8 };
+ static const UCHAR org_1042[3] = { 0, 0, 0 };
memcpy(destaddr, ieee80211_get_DA(pmac), ADDRLEN);
memcpy(srcaddr, ieee80211_get_SA(pmac), ADDRLEN);
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 71b5971da597..518542b4bf9e 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -129,6 +129,7 @@ MODULE_PARM_DESC(workaround_interval,
#define OID_802_11_RTS_THRESHOLD cpu_to_le32(0x0d01020a)
#define OID_802_11_SUPPORTED_RATES cpu_to_le32(0x0d01020e)
#define OID_802_11_CONFIGURATION cpu_to_le32(0x0d010211)
+#define OID_802_11_POWER_MODE cpu_to_le32(0x0d010216)
#define OID_802_11_BSSID_LIST cpu_to_le32(0x0d010217)
@@ -156,6 +157,12 @@ MODULE_PARM_DESC(workaround_interval,
#define RNDIS_STATUS_ADAPTER_NOT_OPEN cpu_to_le32(0xc0010012)
+/* Known device types */
+#define RNDIS_UNKNOWN 0
+#define RNDIS_BCM4320A 1
+#define RNDIS_BCM4320B 2
+
+
/* NDIS data structures. Taken from wpa_supplicant driver_ndis.c
* slightly modified for datatype endianess, etc
*/
@@ -233,6 +240,12 @@ enum ndis_80211_addwep_bits {
NDIS_80211_ADDWEP_TRANSMIT_KEY = cpu_to_le32(1 << 31)
};
+enum ndis_80211_power_mode {
+ NDIS_80211_POWER_MODE_CAM,
+ NDIS_80211_POWER_MODE_MAX_PSP,
+ NDIS_80211_POWER_MODE_FAST_PSP,
+};
+
struct ndis_80211_auth_request {
__le32 length;
u8 bssid[6];
@@ -472,12 +485,16 @@ struct rndis_wlan_private {
struct mutex command_lock;
unsigned long work_pending;
int last_qual;
+ s32 cqm_rssi_thold;
+ u32 cqm_rssi_hyst;
+ int last_cqm_event_rssi;
struct ieee80211_supported_band band;
struct ieee80211_channel channels[ARRAY_SIZE(rndis_channels)];
struct ieee80211_rate rates[ARRAY_SIZE(rndis_rates)];
u32 cipher_suites[ARRAY_SIZE(rndis_cipher_suites)];
+ int device_type;
int caps;
int multicast_size;
@@ -493,10 +510,10 @@ struct rndis_wlan_private {
/* hardware state */
bool radio_on;
+ int power_mode;
int infra_mode;
bool connected;
u8 bssid[ETH_ALEN];
- struct ndis_80211_ssid essid;
__le32 current_command_oid;
/* encryption stuff */
@@ -547,7 +564,7 @@ static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev,
u8 key_index, bool pairwise, const u8 *mac_addr);
static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index);
+ u8 key_index, bool unicast, bool multicast);
static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
u8 *mac, struct station_info *sinfo);
@@ -563,7 +580,14 @@ static int rndis_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev);
-static struct cfg80211_ops rndis_config_ops = {
+static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ bool enabled, int timeout);
+
+static int rndis_set_cqm_rssi_config(struct wiphy *wiphy,
+ struct net_device *dev,
+ s32 rssi_thold, u32 rssi_hyst);
+
+static const struct cfg80211_ops rndis_config_ops = {
.change_virtual_intf = rndis_change_virtual_intf,
.scan = rndis_scan,
.set_wiphy_params = rndis_set_wiphy_params,
@@ -582,6 +606,8 @@ static struct cfg80211_ops rndis_config_ops = {
.set_pmksa = rndis_set_pmksa,
.del_pmksa = rndis_del_pmksa,
.flush_pmksa = rndis_flush_pmksa,
+ .set_power_mgmt = rndis_set_power_mgmt,
+ .set_cqm_rssi_config = rndis_set_cqm_rssi_config,
};
static void *rndis_wiphy_privid = &rndis_wiphy_privid;
@@ -680,6 +706,7 @@ static const char *oid_to_string(__le32 oid)
OID_STR(OID_802_11_ADD_KEY);
OID_STR(OID_802_11_REMOVE_KEY);
OID_STR(OID_802_11_ASSOCIATION_INFORMATION);
+ OID_STR(OID_802_11_CAPABILITY);
OID_STR(OID_802_11_PMKID);
OID_STR(OID_802_11_NETWORK_TYPES_SUPPORTED);
OID_STR(OID_802_11_NETWORK_TYPE_IN_USE);
@@ -690,6 +717,7 @@ static const char *oid_to_string(__le32 oid)
OID_STR(OID_802_11_RTS_THRESHOLD);
OID_STR(OID_802_11_SUPPORTED_RATES);
OID_STR(OID_802_11_CONFIGURATION);
+ OID_STR(OID_802_11_POWER_MODE);
OID_STR(OID_802_11_BSSID_LIST);
#undef OID_STR
}
@@ -810,7 +838,8 @@ exit_unlock:
return ret;
}
-static int rndis_set_oid(struct usbnet *dev, __le32 oid, void *data, int len)
+static int rndis_set_oid(struct usbnet *dev, __le32 oid, const void *data,
+ int len)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev);
union {
@@ -994,7 +1023,18 @@ static int level_to_qual(int level)
*/
static int set_infra_mode(struct usbnet *usbdev, int mode);
static void restore_keys(struct usbnet *usbdev);
-static int rndis_check_bssid_list(struct usbnet *usbdev);
+static int rndis_check_bssid_list(struct usbnet *usbdev, u8 *match_bssid,
+ bool *matched);
+
+static int rndis_start_bssid_list_scan(struct usbnet *usbdev)
+{
+ __le32 tmp;
+
+ /* Note: OID_802_11_BSSID_LIST_SCAN clears internal BSS list. */
+ tmp = cpu_to_le32(1);
+ return rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp,
+ sizeof(tmp));
+}
static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid)
{
@@ -1007,7 +1047,6 @@ static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid)
return ret;
}
if (ret == 0) {
- memcpy(&priv->essid, ssid, sizeof(priv->essid));
priv->radio_on = true;
netdev_dbg(usbdev->net, "%s(): radio_on = true\n", __func__);
}
@@ -1015,7 +1054,7 @@ static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid)
return ret;
}
-static int set_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN])
+static int set_bssid(struct usbnet *usbdev, const u8 *bssid)
{
int ret;
@@ -1031,7 +1070,9 @@ static int set_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN])
static int clear_bssid(struct usbnet *usbdev)
{
- u8 broadcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ static const u8 broadcast_mac[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
return set_bssid(usbdev, broadcast_mac);
}
@@ -1904,14 +1945,14 @@ static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
struct usbnet *usbdev = netdev_priv(dev);
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
int ret;
- __le32 tmp;
+ int delay = SCAN_DELAY_JIFFIES;
netdev_dbg(usbdev->net, "cfg80211.scan\n");
/* Get current bssid list from device before new scan, as new scan
* clears internal bssid list.
*/
- rndis_check_bssid_list(usbdev);
+ rndis_check_bssid_list(usbdev, NULL, NULL);
if (!request)
return -EINVAL;
@@ -1921,13 +1962,13 @@ static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
priv->scan_request = request;
- tmp = cpu_to_le32(1);
- ret = rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp,
- sizeof(tmp));
+ ret = rndis_start_bssid_list_scan(usbdev);
if (ret == 0) {
+ if (priv->device_type == RNDIS_BCM4320A)
+ delay = HZ;
+
/* Wait before retrieving scan results from device */
- queue_delayed_work(priv->workqueue, &priv->scan_work,
- SCAN_DELAY_JIFFIES);
+ queue_delayed_work(priv->workqueue, &priv->scan_work, delay);
}
return ret;
@@ -1946,8 +1987,8 @@ static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev,
int ie_len, bssid_len;
u8 *ie;
- netdev_dbg(usbdev->net, " found bssid: '%.32s' [%pM]\n",
- bssid->ssid.essid, bssid->mac);
+ netdev_dbg(usbdev->net, " found bssid: '%.32s' [%pM], len: %d\n",
+ bssid->ssid.essid, bssid->mac, le32_to_cpu(bssid->length));
/* parse bssid structure */
bssid_len = le32_to_cpu(bssid->length);
@@ -1981,49 +2022,98 @@ static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev,
GFP_KERNEL);
}
-static int rndis_check_bssid_list(struct usbnet *usbdev)
+static struct ndis_80211_bssid_ex *next_bssid_list_item(
+ struct ndis_80211_bssid_ex *bssid,
+ int *bssid_len, void *buf, int len)
+{
+ void *buf_end, *bssid_end;
+
+ buf_end = (char *)buf + len;
+ bssid_end = (char *)bssid + *bssid_len;
+
+ if ((int)(buf_end - bssid_end) < sizeof(bssid->length)) {
+ *bssid_len = 0;
+ return NULL;
+ } else {
+ bssid = (void *)((char *)bssid + *bssid_len);
+ *bssid_len = le32_to_cpu(bssid->length);
+ return bssid;
+ }
+}
+
+static bool check_bssid_list_item(struct ndis_80211_bssid_ex *bssid,
+ int bssid_len, void *buf, int len)
+{
+ void *buf_end, *bssid_end;
+
+ if (!bssid || bssid_len <= 0 || bssid_len > len)
+ return false;
+
+ buf_end = (char *)buf + len;
+ bssid_end = (char *)bssid + bssid_len;
+
+ return (int)(buf_end - bssid_end) >= 0 && (int)(bssid_end - buf) >= 0;
+}
+
+static int rndis_check_bssid_list(struct usbnet *usbdev, u8 *match_bssid,
+ bool *matched)
{
void *buf = NULL;
struct ndis_80211_bssid_list_ex *bssid_list;
struct ndis_80211_bssid_ex *bssid;
- int ret = -EINVAL, len, count, bssid_len;
- bool resized = false;
+ int ret = -EINVAL, len, count, bssid_len, real_count, new_len;
- netdev_dbg(usbdev->net, "check_bssid_list\n");
+ netdev_dbg(usbdev->net, "%s()\n", __func__);
len = CONTROL_BUFFER_SIZE;
resize_buf:
- buf = kmalloc(len, GFP_KERNEL);
+ buf = kzalloc(len, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto out;
}
- ret = rndis_query_oid(usbdev, OID_802_11_BSSID_LIST, buf, &len);
- if (ret != 0)
+ /* BSSID-list might have got bigger last time we checked, keep
+ * resizing until it won't get any bigger.
+ */
+ new_len = len;
+ ret = rndis_query_oid(usbdev, OID_802_11_BSSID_LIST, buf, &new_len);
+ if (ret != 0 || new_len < sizeof(struct ndis_80211_bssid_list_ex))
goto out;
- if (!resized && len > CONTROL_BUFFER_SIZE) {
- resized = true;
+ if (new_len > len) {
+ len = new_len;
kfree(buf);
goto resize_buf;
}
+ len = new_len;
+
bssid_list = buf;
- bssid = bssid_list->bssid;
- bssid_len = le32_to_cpu(bssid->length);
count = le32_to_cpu(bssid_list->num_items);
- netdev_dbg(usbdev->net, "check_bssid_list: %d BSSIDs found (buflen: %d)\n",
- count, len);
+ real_count = 0;
+ netdev_dbg(usbdev->net, "%s(): buflen: %d\n", __func__, len);
- while (count && ((void *)bssid + bssid_len) <= (buf + len)) {
- rndis_bss_info_update(usbdev, bssid);
+ bssid_len = 0;
+ bssid = next_bssid_list_item(bssid_list->bssid, &bssid_len, buf, len);
- bssid = (void *)bssid + bssid_len;
- bssid_len = le32_to_cpu(bssid->length);
- count--;
+ /* Device returns incorrect 'num_items'. Workaround by ignoring the
+ * received 'num_items' and walking through full bssid buffer instead.
+ */
+ while (check_bssid_list_item(bssid, bssid_len, buf, len)) {
+ if (rndis_bss_info_update(usbdev, bssid) && match_bssid &&
+ matched) {
+ if (compare_ether_addr(bssid->mac, match_bssid))
+ *matched = true;
+ }
+
+ real_count++;
+ bssid = next_bssid_list_item(bssid, &bssid_len, buf, len);
}
+ netdev_dbg(usbdev->net, "%s(): num_items from device: %d, really found:"
+ " %d\n", __func__, count, real_count);
+
out:
kfree(buf);
return ret;
@@ -2041,7 +2131,7 @@ static void rndis_get_scan_results(struct work_struct *work)
if (!priv->scan_request)
return;
- ret = rndis_check_bssid_list(usbdev);
+ ret = rndis_check_bssid_list(usbdev, NULL, NULL);
cfg80211_scan_done(priv->scan_request, ret < 0);
@@ -2355,7 +2445,7 @@ static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev,
}
static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index)
+ u8 key_index, bool unicast, bool multicast)
{
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
struct usbnet *usbdev = priv->usbdev;
@@ -2365,6 +2455,9 @@ static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
priv->encr_tx_key_index = key_index;
+ if (is_wpa_key(priv, key_index))
+ return 0;
+
key = priv->encr_keys[key_index];
return add_wep_key(usbdev, key.material, key.len, key_index);
@@ -2495,6 +2588,139 @@ static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
return rndis_set_oid(usbdev, OID_802_11_PMKID, &pmkid, sizeof(pmkid));
}
+static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ bool enabled, int timeout)
+{
+ struct rndis_wlan_private *priv = wiphy_priv(wiphy);
+ struct usbnet *usbdev = priv->usbdev;
+ int power_mode;
+ __le32 mode;
+ int ret;
+
+ if (priv->device_type != RNDIS_BCM4320B)
+ return -ENOTSUPP;
+
+ netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__,
+ enabled ? "enabled" : "disabled",
+ timeout);
+
+ if (enabled)
+ power_mode = NDIS_80211_POWER_MODE_FAST_PSP;
+ else
+ power_mode = NDIS_80211_POWER_MODE_CAM;
+
+ if (power_mode == priv->power_mode)
+ return 0;
+
+ priv->power_mode = power_mode;
+
+ mode = cpu_to_le32(power_mode);
+ ret = rndis_set_oid(usbdev, OID_802_11_POWER_MODE, &mode, sizeof(mode));
+
+ netdev_dbg(usbdev->net, "%s(): OID_802_11_POWER_MODE -> %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static int rndis_set_cqm_rssi_config(struct wiphy *wiphy,
+ struct net_device *dev,
+ s32 rssi_thold, u32 rssi_hyst)
+{
+ struct rndis_wlan_private *priv = wiphy_priv(wiphy);
+
+ priv->cqm_rssi_thold = rssi_thold;
+ priv->cqm_rssi_hyst = rssi_hyst;
+ priv->last_cqm_event_rssi = 0;
+
+ return 0;
+}
+
+static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
+ struct ndis_80211_assoc_info *info)
+{
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+ struct ieee80211_channel *channel;
+ struct ndis_80211_conf config;
+ struct ndis_80211_ssid ssid;
+ s32 signal;
+ u64 timestamp;
+ u16 capability;
+ u16 beacon_interval;
+ __le32 rssi;
+ u8 ie_buf[34];
+ int len, ret, ie_len;
+
+ /* Get signal quality, in case of error use rssi=0 and ignore error. */
+ len = sizeof(rssi);
+ rssi = 0;
+ ret = rndis_query_oid(usbdev, OID_802_11_RSSI, &rssi, &len);
+ signal = level_to_qual(le32_to_cpu(rssi));
+
+ netdev_dbg(usbdev->net, "%s(): OID_802_11_RSSI -> %d, "
+ "rssi:%d, qual: %d\n", __func__, ret, le32_to_cpu(rssi),
+ level_to_qual(le32_to_cpu(rssi)));
+
+ /* Get AP capabilities */
+ if (info) {
+ capability = le16_to_cpu(info->resp_ie.capa);
+ } else {
+ /* Set atleast ESS/IBSS capability */
+ capability = (priv->infra_mode == NDIS_80211_INFRA_INFRA) ?
+ WLAN_CAPABILITY_ESS : WLAN_CAPABILITY_IBSS;
+ }
+
+ /* Get channel and beacon interval */
+ len = sizeof(config);
+ ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
+ netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n",
+ __func__, ret);
+ if (ret >= 0) {
+ beacon_interval = le16_to_cpu(config.beacon_period);
+ channel = ieee80211_get_channel(priv->wdev.wiphy,
+ KHZ_TO_MHZ(le32_to_cpu(config.ds_config)));
+ if (!channel) {
+ netdev_warn(usbdev->net, "%s(): could not get channel."
+ "\n", __func__);
+ return;
+ }
+ } else {
+ netdev_warn(usbdev->net, "%s(): could not get configuration.\n",
+ __func__);
+ return;
+ }
+
+ /* Get SSID, in case of error, use zero length SSID and ignore error. */
+ len = sizeof(ssid);
+ memset(&ssid, 0, sizeof(ssid));
+ ret = rndis_query_oid(usbdev, OID_802_11_SSID, &ssid, &len);
+ netdev_dbg(usbdev->net, "%s(): OID_802_11_SSID -> %d, len: %d, ssid: "
+ "'%.32s'\n", __func__, ret,
+ le32_to_cpu(ssid.length), ssid.essid);
+
+ if (le32_to_cpu(ssid.length) > 32)
+ ssid.length = cpu_to_le32(32);
+
+ ie_buf[0] = WLAN_EID_SSID;
+ ie_buf[1] = le32_to_cpu(ssid.length);
+ memcpy(&ie_buf[2], ssid.essid, le32_to_cpu(ssid.length));
+
+ ie_len = le32_to_cpu(ssid.length) + 2;
+
+ /* no tsf */
+ timestamp = 0;
+
+ netdev_dbg(usbdev->net, "%s(): channel:%d(freq), bssid:[%pM], tsf:%d, "
+ "capa:%x, beacon int:%d, resp_ie(len:%d, essid:'%.32s'), "
+ "signal:%d\n", __func__, (channel ? channel->center_freq : -1),
+ bssid, (u32)timestamp, capability, beacon_interval, ie_len,
+ ssid.essid, signal);
+
+ cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid,
+ timestamp, capability, beacon_interval, ie_buf, ie_len,
+ signal, GFP_KERNEL);
+}
+
/*
* workers, indication handlers, device poller
*/
@@ -2507,6 +2733,7 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
u8 *req_ie, *resp_ie;
int ret, offset;
bool roamed = false;
+ bool match_bss;
if (priv->infra_mode == NDIS_80211_INFRA_INFRA && priv->connected) {
/* received media connect indication while connected, either
@@ -2558,6 +2785,13 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
resp_ie_len =
CONTROL_BUFFER_SIZE - offset;
}
+ } else {
+ /* Since rndis_wlan_craft_connected_bss() might use info
+ * later and expects info to contain valid data if
+ * non-null, free info and set NULL here.
+ */
+ kfree(info);
+ info = NULL;
}
} else if (WARN_ON(priv->infra_mode != NDIS_80211_INFRA_ADHOC))
return;
@@ -2569,13 +2803,26 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
netdev_dbg(usbdev->net, "link up work: [%pM]%s\n",
bssid, roamed ? " roamed" : "");
- /* Internal bss list in device always contains at least the currently
+ /* Internal bss list in device should contain at least the currently
* connected bss and we can get it to cfg80211 with
* rndis_check_bssid_list().
- * NOTE: This is true for Broadcom chip, but not mentioned in RNDIS
- * spec.
+ *
+ * NDIS spec says: "If the device is associated, but the associated
+ * BSSID is not in its BSSID scan list, then the driver must add an
+ * entry for the BSSID at the end of the data that it returns in
+ * response to query of OID_802_11_BSSID_LIST."
+ *
+ * NOTE: Seems to be true for BCM4320b variant, but not BCM4320a.
*/
- rndis_check_bssid_list(usbdev);
+ match_bss = false;
+ rndis_check_bssid_list(usbdev, bssid, &match_bss);
+
+ if (!is_zero_ether_addr(bssid) && !match_bss) {
+ /* Couldn't get bss from device, we need to manually craft bss
+ * for cfg80211.
+ */
+ rndis_wlan_craft_connected_bss(usbdev, bssid, info);
+ }
if (priv->infra_mode == NDIS_80211_INFRA_INFRA) {
if (!roamed)
@@ -2918,6 +3165,32 @@ static int rndis_wlan_get_caps(struct usbnet *usbdev, struct wiphy *wiphy)
return retval;
}
+static void rndis_do_cqm(struct usbnet *usbdev, s32 rssi)
+{
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+ enum nl80211_cqm_rssi_threshold_event event;
+ int thold, hyst, last_event;
+
+ if (priv->cqm_rssi_thold >= 0 || rssi >= 0)
+ return;
+ if (priv->infra_mode != NDIS_80211_INFRA_INFRA)
+ return;
+
+ last_event = priv->last_cqm_event_rssi;
+ thold = priv->cqm_rssi_thold;
+ hyst = priv->cqm_rssi_hyst;
+
+ if (rssi < thold && (last_event == 0 || rssi < last_event - hyst))
+ event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
+ else if (rssi > thold && (last_event == 0 || rssi > last_event + hyst))
+ event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
+ else
+ return;
+
+ priv->last_cqm_event_rssi = rssi;
+ cfg80211_cqm_rssi_notify(usbdev->net, event, GFP_KERNEL);
+}
+
#define DEVICE_POLLER_JIFFIES (HZ)
static void rndis_device_poller(struct work_struct *work)
{
@@ -2934,13 +3207,28 @@ static void rndis_device_poller(struct work_struct *work)
* also polls device with rndis_command() and catches for media link
* indications.
*/
- if (!is_associated(usbdev))
+ if (!is_associated(usbdev)) {
+ /* Workaround bad scanning in BCM4320a devices with active
+ * background scanning when not associated.
+ */
+ if (priv->device_type == RNDIS_BCM4320A && priv->radio_on &&
+ !priv->scan_request) {
+ /* Get previous scan results */
+ rndis_check_bssid_list(usbdev, NULL, NULL);
+
+ /* Initiate new scan */
+ rndis_start_bssid_list_scan(usbdev);
+ }
+
goto end;
+ }
len = sizeof(rssi);
ret = rndis_query_oid(usbdev, OID_802_11_RSSI, &rssi, &len);
- if (ret == 0)
+ if (ret == 0) {
priv->last_qual = level_to_qual(le32_to_cpu(rssi));
+ rndis_do_cqm(usbdev, le32_to_cpu(rssi));
+ }
netdev_dbg(usbdev->net, "dev-poller: OID_802_11_RSSI -> %d, rssi:%d, qual: %d\n",
ret, le32_to_cpu(rssi), level_to_qual(le32_to_cpu(rssi)));
@@ -2992,10 +3280,12 @@ end:
/*
* driver/device initialization
*/
-static void rndis_copy_module_params(struct usbnet *usbdev)
+static void rndis_copy_module_params(struct usbnet *usbdev, int device_type)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+ priv->device_type = device_type;
+
priv->param_country[0] = modparam_country[0];
priv->param_country[1] = modparam_country[1];
priv->param_country[2] = 0;
@@ -3038,12 +3328,25 @@ static void rndis_copy_module_params(struct usbnet *usbdev)
priv->param_workaround_interval = modparam_workaround_interval;
}
+static int unknown_early_init(struct usbnet *usbdev)
+{
+ /* copy module parameters for unknown so that iwconfig reports txpower
+ * and workaround parameter is copied to private structure correctly.
+ */
+ rndis_copy_module_params(usbdev, RNDIS_UNKNOWN);
+
+ /* This is unknown device, so do not try set configuration parameters.
+ */
+
+ return 0;
+}
+
static int bcm4320a_early_init(struct usbnet *usbdev)
{
/* copy module parameters for bcm4320a so that iwconfig reports txpower
* and workaround parameter is copied to private structure correctly.
*/
- rndis_copy_module_params(usbdev);
+ rndis_copy_module_params(usbdev, RNDIS_BCM4320A);
/* bcm4320a doesn't handle configuration parameters well. Try
* set any and you get partially zeroed mac and broken device.
@@ -3057,7 +3360,7 @@ static int bcm4320b_early_init(struct usbnet *usbdev)
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
char buf[8];
- rndis_copy_module_params(usbdev);
+ rndis_copy_module_params(usbdev, RNDIS_BCM4320B);
/* Early initialization settings, setting these won't have effect
* if called after generic_rndis_bind().
@@ -3187,13 +3490,15 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf)
set_default_iw_params(usbdev);
+ priv->power_mode = -1;
+
/* set default rts/frag */
rndis_set_wiphy_params(wiphy,
WIPHY_PARAM_FRAG_THRESHOLD | WIPHY_PARAM_RTS_THRESHOLD);
- /* turn radio on */
- priv->radio_on = true;
- disassociate(usbdev, true);
+ /* turn radio off on init */
+ priv->radio_on = false;
+ disassociate(usbdev, false);
netif_carrier_off(usbdev->net);
return 0;
@@ -3320,7 +3625,7 @@ static const struct driver_info rndis_wlan_info = {
.tx_fixup = rndis_tx_fixup,
.reset = rndis_wlan_reset,
.stop = rndis_wlan_stop,
- .early_init = bcm4320a_early_init,
+ .early_init = unknown_early_init,
.indication = rndis_wlan_indication,
};
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 4396d4b9bfb9..f630552427b7 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -53,51 +53,41 @@ config RT61PCI
When compiled as a module, this driver will be called rt61pci.
-config RT2800PCI_PCI
- boolean
- depends on PCI
- default y
-
-config RT2800PCI_SOC
- boolean
- depends on RALINK_RT288X || RALINK_RT305X
- default y
-
config RT2800PCI
- tristate "Ralink rt28xx/rt30xx/rt35xx (PCI/PCIe/PCMCIA) support (EXPERIMENTAL)"
- depends on (RT2800PCI_PCI || RT2800PCI_SOC) && EXPERIMENTAL
+ tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"
+ depends on PCI || RALINK_RT288X || RALINK_RT305X
select RT2800_LIB
- select RT2X00_LIB_PCI if RT2800PCI_PCI
- select RT2X00_LIB_SOC if RT2800PCI_SOC
+ select RT2X00_LIB_PCI if PCI
+ select RT2X00_LIB_SOC if RALINK_RT288X || RALINK_RT305X
select RT2X00_LIB_HT
select RT2X00_LIB_FIRMWARE
select RT2X00_LIB_CRYPTO
select CRC_CCITT
select EEPROM_93CX6
---help---
- This adds support for rt2800/rt3000/rt3500 wireless chipset family.
- Supported chips: RT2760, RT2790, RT2860, RT2880, RT2890 & RT3052
-
- This driver is non-functional at the moment and is intended for
- developers.
+ This adds support for rt27xx/rt28xx/rt30xx wireless chipset family.
+ Supported chips: RT2760, RT2790, RT2860, RT2880, RT2890, RT3052,
+ RT3090, RT3091 & RT3092
When compiled as a module, this driver will be called "rt2800pci.ko".
if RT2800PCI
-config RT2800PCI_RT30XX
- bool "rt2800pci - Include support for rt30xx (PCI/PCIe/PCMCIA) devices"
- default y
+config RT2800PCI_RT33XX
+ bool "rt2800pci - Include support for rt33xx devices (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ default n
---help---
- This adds support for rt30xx wireless chipset family to the
+ This adds support for rt33xx wireless chipset family to the
rt2800pci driver.
- Supported chips: RT3090, RT3091 & RT3092
+ Supported chips: RT3390
Support for these devices is non-functional at the moment and is
intended for testers and developers.
config RT2800PCI_RT35XX
- bool "rt2800pci - Include support for rt35xx (PCI/PCIe/PCMCIA) devices"
+ bool "rt2800pci - Include support for rt35xx devices (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
default n
---help---
This adds support for rt35xx wireless chipset family to the
@@ -107,6 +97,18 @@ config RT2800PCI_RT35XX
Support for these devices is non-functional at the moment and is
intended for testers and developers.
+config RT2800PCI_RT53XX
+ bool "rt2800-pci - Include support for rt53xx devices (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ default n
+ ---help---
+ This adds support for rt53xx wireless chipset family to the
+ rt2800pci driver.
+ Supported chips: RT5390
+
+ Support for these devices is non-functional at the moment and is
+ intended for testers and developers.
+
endif
config RT2500USB
@@ -134,8 +136,8 @@ config RT73USB
When compiled as a module, this driver will be called rt73usb.
config RT2800USB
- tristate "Ralink rt2800 (USB) support (EXPERIMENTAL)"
- depends on USB && EXPERIMENTAL
+ tristate "Ralink rt27xx/rt28xx/rt30xx (USB) support"
+ depends on USB
select RT2800_LIB
select RT2X00_LIB_USB
select RT2X00_LIB_HT
@@ -143,30 +145,28 @@ config RT2800USB
select RT2X00_LIB_CRYPTO
select CRC_CCITT
---help---
- This adds experimental support for rt2800 wireless chipset family.
- Supported chips: RT2770, RT2870 & RT3070.
-
- Known issues:
- - support for RT2870 chips doesn't work with 802.11n APs yet
- - support for RT3070 chips is non-functional at the moment
+ This adds support for rt27xx/rt28xx/rt30xx wireless chipset family.
+ Supported chips: RT2770, RT2870 & RT3070, RT3071 & RT3072
When compiled as a module, this driver will be called "rt2800usb.ko".
if RT2800USB
-config RT2800USB_RT30XX
- bool "rt2800usb - Include support for rt30xx (USB) devices"
- default y
+config RT2800USB_RT33XX
+ bool "rt2800usb - Include support for rt33xx devices (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ default n
---help---
- This adds support for rt30xx wireless chipset family to the
+ This adds support for rt33xx wireless chipset family to the
rt2800usb driver.
- Supported chips: RT3070, RT3071 & RT3072
+ Supported chips: RT3370
Support for these devices is non-functional at the moment and is
intended for testers and developers.
config RT2800USB_RT35XX
- bool "rt2800usb - Include support for rt35xx (USB) devices"
+ bool "rt2800usb - Include support for rt35xx devices (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
default n
---help---
This adds support for rt35xx wireless chipset family to the
@@ -180,9 +180,9 @@ config RT2800USB_UNKNOWN
bool "rt2800usb - Include support for unknown (USB) devices"
default n
---help---
- This adds support for rt2800 family devices that are known to
- have a rt2800 family chipset, but for which the exact chipset
- is unknown.
+ This adds support for rt2800usb devices that are known to
+ have a rt28xx family compatible chipset, but for which the exact
+ chipset is unknown.
Support status for these devices is unknown, and enabling these
devices may or may not work.
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 4f420a9ec5dc..329f3283697b 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -46,7 +46,7 @@
* These indirect registers work with busy bits,
* and we will try maximal REGISTER_BUSY_COUNT times to access
* the register while taking a REGISTER_BUSY_DELAY us delay
- * between each attampt. When the busy bit is still set at that time,
+ * between each attempt. When the busy bit is still set at that time,
* the access attempt is considered to have failed,
* and we will print an error.
*/
@@ -305,9 +305,7 @@ static void rt2400pci_config_intf(struct rt2x00_dev *rt2x00dev,
* Enable synchronisation.
*/
rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
- rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync);
- rt2x00_set_field32(&reg, CSR14_TBCN, 1);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
}
@@ -633,6 +631,98 @@ static void rt2400pci_link_tuner(struct rt2x00_dev *rt2x00dev,
}
/*
+ * Queue handlers.
+ */
+static void rt2400pci_start_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u32 reg;
+
+ switch (queue->qid) {
+ case QID_RX:
+ rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg);
+ rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 0);
+ rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
+ break;
+ case QID_BEACON:
+ /*
+ * Allow the tbtt tasklet to be scheduled.
+ */
+ tasklet_enable(&rt2x00dev->tbtt_tasklet);
+
+ rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
+ rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
+ rt2x00_set_field32(&reg, CSR14_TBCN, 1);
+ rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
+ rt2x00pci_register_write(rt2x00dev, CSR14, reg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rt2400pci_kick_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u32 reg;
+
+ switch (queue->qid) {
+ case QID_AC_VO:
+ rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
+ rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, 1);
+ rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
+ break;
+ case QID_AC_VI:
+ rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
+ rt2x00_set_field32(&reg, TXCSR0_KICK_TX, 1);
+ rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
+ break;
+ case QID_ATIM:
+ rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
+ rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, 1);
+ rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rt2400pci_stop_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u32 reg;
+
+ switch (queue->qid) {
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_ATIM:
+ rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
+ rt2x00_set_field32(&reg, TXCSR0_ABORT, 1);
+ rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
+ break;
+ case QID_RX:
+ rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg);
+ rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 1);
+ rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
+ break;
+ case QID_BEACON:
+ rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
+ rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 0);
+ rt2x00_set_field32(&reg, CSR14_TBCN, 0);
+ rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
+ rt2x00pci_register_write(rt2x00dev, CSR14, reg);
+
+ /*
+ * Wait for possibly running tbtt tasklets.
+ */
+ tasklet_disable(&rt2x00dev->tbtt_tasklet);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
* Initialization functions.
*/
static bool rt2400pci_get_entry_state(struct queue_entry *entry)
@@ -689,7 +779,7 @@ static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev)
rt2x00pci_register_read(rt2x00dev, TXCSR2, &reg);
rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size);
rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit);
- rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->bcn[1].limit);
+ rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->atim->limit);
rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit);
rt2x00pci_register_write(rt2x00dev, TXCSR2, reg);
@@ -705,13 +795,13 @@ static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev)
entry_priv->desc_dma);
rt2x00pci_register_write(rt2x00dev, TXCSR5, reg);
- entry_priv = rt2x00dev->bcn[1].entries[0].priv_data;
+ entry_priv = rt2x00dev->atim->entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg);
rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER,
entry_priv->desc_dma);
rt2x00pci_register_write(rt2x00dev, TXCSR4, reg);
- entry_priv = rt2x00dev->bcn[0].entries[0].priv_data;
+ entry_priv = rt2x00dev->bcn->entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg);
rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER,
entry_priv->desc_dma);
@@ -878,24 +968,12 @@ static int rt2400pci_init_bbp(struct rt2x00_dev *rt2x00dev)
/*
* Device state switch handlers.
*/
-static void rt2400pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
- enum dev_state state)
-{
- u32 reg;
-
- rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg);
- rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX,
- (state == STATE_RADIO_RX_OFF) ||
- (state == STATE_RADIO_RX_OFF_LINK));
- rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
-}
-
static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
- int mask = (state == STATE_RADIO_IRQ_OFF) ||
- (state == STATE_RADIO_IRQ_OFF_ISR);
+ int mask = (state == STATE_RADIO_IRQ_OFF);
u32 reg;
+ unsigned long flags;
/*
* When interrupts are being enabled, the interrupt registers
@@ -904,12 +982,20 @@ static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
if (state == STATE_RADIO_IRQ_ON) {
rt2x00pci_register_read(rt2x00dev, CSR7, &reg);
rt2x00pci_register_write(rt2x00dev, CSR7, reg);
+
+ /*
+ * Enable tasklets.
+ */
+ tasklet_enable(&rt2x00dev->txstatus_tasklet);
+ tasklet_enable(&rt2x00dev->rxdone_tasklet);
}
/*
* Only toggle the interrupts bits we are going to use.
* Non-checked interrupt bits are disabled by default.
*/
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask);
rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask);
@@ -917,6 +1003,17 @@ static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask);
rt2x00_set_field32(&reg, CSR8_RXDONE, mask);
rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+ if (state == STATE_RADIO_IRQ_OFF) {
+ /*
+ * Ensure that all tasklets are finished before
+ * disabling the interrupts.
+ */
+ tasklet_disable(&rt2x00dev->txstatus_tasklet);
+ tasklet_disable(&rt2x00dev->rxdone_tasklet);
+ }
}
static int rt2400pci_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -988,16 +1085,8 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev,
case STATE_RADIO_OFF:
rt2400pci_disable_radio(rt2x00dev);
break;
- case STATE_RADIO_RX_ON:
- case STATE_RADIO_RX_ON_LINK:
- case STATE_RADIO_RX_OFF:
- case STATE_RADIO_RX_OFF_LINK:
- rt2400pci_toggle_rx(rt2x00dev, state);
- break;
case STATE_RADIO_IRQ_ON:
- case STATE_RADIO_IRQ_ON_ISR:
case STATE_RADIO_IRQ_OFF:
- case STATE_RADIO_IRQ_OFF_ISR:
rt2400pci_toggle_irq(rt2x00dev, state);
break;
case STATE_DEEP_SLEEP:
@@ -1042,19 +1131,21 @@ static void rt2400pci_write_tx_desc(struct queue_entry *entry,
rt2x00_desc_write(txd, 2, word);
rt2x00_desc_read(txd, 3, &word);
- rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->signal);
+ rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->u.plcp.signal);
rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_REGNUM, 5);
rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_BUSY, 1);
- rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->service);
+ rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->u.plcp.service);
rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_REGNUM, 6);
rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_BUSY, 1);
rt2x00_desc_write(txd, 3, word);
rt2x00_desc_read(txd, 4, &word);
- rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW, txdesc->length_low);
+ rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW,
+ txdesc->u.plcp.length_low);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_REGNUM, 8);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_BUSY, 1);
- rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_HIGH, txdesc->length_high);
+ rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_HIGH,
+ txdesc->u.plcp.length_high);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_REGNUM, 7);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_BUSY, 1);
rt2x00_desc_write(txd, 4, word);
@@ -1075,7 +1166,7 @@ static void rt2400pci_write_tx_desc(struct queue_entry *entry,
test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_RTS,
test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
+ rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
rt2x00_desc_write(txd, 0, word);
@@ -1119,38 +1210,10 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
/*
* Enable beaconing again.
*/
- rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
- rt2x00_set_field32(&reg, CSR14_TBCN, 1);
rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
}
-static void rt2400pci_kick_tx_queue(struct data_queue *queue)
-{
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
- u32 reg;
-
- rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
- rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue->qid == QID_AC_BE));
- rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue->qid == QID_AC_BK));
- rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, (queue->qid == QID_ATIM));
- rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
-}
-
-static void rt2400pci_kill_tx_queue(struct data_queue *queue)
-{
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
- u32 reg;
-
- if (queue->qid == QID_BEACON) {
- rt2x00pci_register_write(rt2x00dev, CSR14, 0);
- } else {
- rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
- rt2x00_set_field32(&reg, TXCSR0_ABORT, 1);
- rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
- }
-}
-
/*
* RX control handlers
*/
@@ -1215,7 +1278,7 @@ static void rt2400pci_fill_rxdone(struct queue_entry *entry,
static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue_idx)
{
- struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
+ struct data_queue *queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
struct queue_entry_priv_pci *entry_priv;
struct queue_entry *entry;
struct txdone_entry_desc txdesc;
@@ -1251,57 +1314,68 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
}
}
-static irqreturn_t rt2400pci_interrupt_thread(int irq, void *dev_instance)
+static void rt2400pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00_field32 irq_field)
{
- struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg = rt2x00dev->irqvalue[0];
+ u32 reg;
/*
- * Handle interrupts, walk through all bits
- * and run the tasks, the bits are checked in order of
- * priority.
+ * Enable a single interrupt. The interrupt mask register
+ * access needs locking.
*/
+ spin_lock_irq(&rt2x00dev->irqmask_lock);
- /*
- * 1 - Beacon timer expired interrupt.
- */
- if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
- rt2x00lib_beacondone(rt2x00dev);
+ rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+ rt2x00_set_field32(&reg, irq_field, 0);
+ rt2x00pci_register_write(rt2x00dev, CSR8, reg);
- /*
- * 2 - Rx ring done interrupt.
- */
- if (rt2x00_get_field32(reg, CSR7_RXDONE))
- rt2x00pci_rxdone(rt2x00dev);
+ spin_unlock_irq(&rt2x00dev->irqmask_lock);
+}
- /*
- * 3 - Atim ring transmit done interrupt.
- */
- if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING))
- rt2400pci_txdone(rt2x00dev, QID_ATIM);
+static void rt2400pci_txstatus_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ u32 reg;
/*
- * 4 - Priority ring transmit done interrupt.
+ * Handle all tx queues.
*/
- if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING))
- rt2400pci_txdone(rt2x00dev, QID_AC_BE);
+ rt2400pci_txdone(rt2x00dev, QID_ATIM);
+ rt2400pci_txdone(rt2x00dev, QID_AC_VO);
+ rt2400pci_txdone(rt2x00dev, QID_AC_VI);
/*
- * 5 - Tx ring transmit done interrupt.
+ * Enable all TXDONE interrupts again.
*/
- if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING))
- rt2400pci_txdone(rt2x00dev, QID_AC_BK);
+ spin_lock_irq(&rt2x00dev->irqmask_lock);
- /* Enable interrupts again. */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_ON_ISR);
- return IRQ_HANDLED;
+ rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
+ rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+ spin_unlock_irq(&rt2x00dev->irqmask_lock);
+}
+
+static void rt2400pci_tbtt_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00lib_beacondone(rt2x00dev);
+ rt2400pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE);
+}
+
+static void rt2400pci_rxdone_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00pci_rxdone(rt2x00dev);
+ rt2400pci_enable_interrupt(rt2x00dev, CSR8_RXDONE);
}
static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg;
+ u32 reg, mask;
/*
* Get the interrupt sources & saved to local variable.
@@ -1316,14 +1390,44 @@ static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
return IRQ_HANDLED;
- /* Store irqvalues for use in the interrupt thread. */
- rt2x00dev->irqvalue[0] = reg;
+ mask = reg;
+
+ /*
+ * Schedule tasklets for interrupt handling.
+ */
+ if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
+ tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
+
+ if (rt2x00_get_field32(reg, CSR7_RXDONE))
+ tasklet_schedule(&rt2x00dev->rxdone_tasklet);
- /* Disable interrupts, will be enabled again in the interrupt thread. */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_OFF_ISR);
+ if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING) ||
+ rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING) ||
+ rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) {
+ tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+ /*
+ * Mask out all txdone interrupts.
+ */
+ rt2x00_set_field32(&mask, CSR8_TXDONE_TXRING, 1);
+ rt2x00_set_field32(&mask, CSR8_TXDONE_ATIMRING, 1);
+ rt2x00_set_field32(&mask, CSR8_TXDONE_PRIORING, 1);
+ }
- return IRQ_WAKE_THREAD;
+ /*
+ * Disable all interrupts for which a tasklet was scheduled right now,
+ * the tasklet will reenable the appropriate interrupts.
+ */
+ spin_lock(&rt2x00dev->irqmask_lock);
+
+ rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+ reg |= mask;
+ rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+ spin_unlock(&rt2x00dev->irqmask_lock);
+
+
+
+ return IRQ_HANDLED;
}
/*
@@ -1536,6 +1640,7 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
*/
__set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
+ __set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags);
/*
* Set the rssi offset.
@@ -1612,11 +1717,14 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = {
.get_tsf = rt2400pci_get_tsf,
.tx_last_beacon = rt2400pci_tx_last_beacon,
.rfkill_poll = rt2x00mac_rfkill_poll,
+ .flush = rt2x00mac_flush,
};
static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
.irq_handler = rt2400pci_interrupt,
- .irq_handler_thread = rt2400pci_interrupt_thread,
+ .txstatus_tasklet = rt2400pci_txstatus_tasklet,
+ .tbtt_tasklet = rt2400pci_tbtt_tasklet,
+ .rxdone_tasklet = rt2400pci_rxdone_tasklet,
.probe_hw = rt2400pci_probe_hw,
.initialize = rt2x00pci_initialize,
.uninitialize = rt2x00pci_uninitialize,
@@ -1627,10 +1735,11 @@ static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
.link_stats = rt2400pci_link_stats,
.reset_tuner = rt2400pci_reset_tuner,
.link_tuner = rt2400pci_link_tuner,
+ .start_queue = rt2400pci_start_queue,
+ .kick_queue = rt2400pci_kick_queue,
+ .stop_queue = rt2400pci_stop_queue,
.write_tx_desc = rt2400pci_write_tx_desc,
.write_beacon = rt2400pci_write_beacon,
- .kick_tx_queue = rt2400pci_kick_tx_queue,
- .kill_tx_queue = rt2400pci_kill_tx_queue,
.fill_rxdone = rt2400pci_fill_rxdone,
.config_filter = rt2400pci_config_filter,
.config_intf = rt2400pci_config_intf,
@@ -1640,28 +1749,28 @@ static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
};
static const struct data_queue_desc rt2400pci_queue_rx = {
- .entry_num = RX_ENTRIES,
+ .entry_num = 24,
.data_size = DATA_FRAME_SIZE,
.desc_size = RXD_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_pci),
};
static const struct data_queue_desc rt2400pci_queue_tx = {
- .entry_num = TX_ENTRIES,
+ .entry_num = 24,
.data_size = DATA_FRAME_SIZE,
.desc_size = TXD_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_pci),
};
static const struct data_queue_desc rt2400pci_queue_bcn = {
- .entry_num = BEACON_ENTRIES,
+ .entry_num = 1,
.data_size = MGMT_FRAME_SIZE,
.desc_size = TXD_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_pci),
};
static const struct data_queue_desc rt2400pci_queue_atim = {
- .entry_num = ATIM_ENTRIES,
+ .entry_num = 8,
.data_size = DATA_FRAME_SIZE,
.desc_size = TXD_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_pci),
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index c048b18f4133..d3a4a68cc439 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -809,8 +809,8 @@
/*
* DMA descriptor defines.
*/
-#define TXD_DESC_SIZE ( 8 * sizeof(__le32) )
-#define RXD_DESC_SIZE ( 8 * sizeof(__le32) )
+#define TXD_DESC_SIZE (8 * sizeof(__le32))
+#define RXD_DESC_SIZE (8 * sizeof(__le32))
/*
* TX descriptor format for TX, PRIO, ATIM and Beacon Ring.
@@ -948,6 +948,6 @@
((__CLAMP_TX(__txpower) - MAX_TXPOWER) + MIN_TXPOWER)
#define TXPOWER_TO_DEV(__txpower) \
- MAX_TXPOWER - (__CLAMP_TX(__txpower) - MIN_TXPOWER)
+ (MAX_TXPOWER - (__CLAMP_TX(__txpower) - MIN_TXPOWER))
#endif /* RT2400PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 97feb7aef809..58277878889e 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -293,7 +293,7 @@ static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev,
struct rt2x00intf_conf *conf,
const unsigned int flags)
{
- struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON);
+ struct data_queue *queue = rt2x00dev->bcn;
unsigned int bcn_preload;
u32 reg;
@@ -311,9 +311,7 @@ static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev,
* Enable synchronisation.
*/
rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
- rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync);
- rt2x00_set_field32(&reg, CSR14_TBCN, 1);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
}
@@ -723,6 +721,98 @@ dynamic_cca_tune:
}
/*
+ * Queue handlers.
+ */
+static void rt2500pci_start_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u32 reg;
+
+ switch (queue->qid) {
+ case QID_RX:
+ rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg);
+ rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 0);
+ rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
+ break;
+ case QID_BEACON:
+ /*
+ * Allow the tbtt tasklet to be scheduled.
+ */
+ tasklet_enable(&rt2x00dev->tbtt_tasklet);
+
+ rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
+ rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
+ rt2x00_set_field32(&reg, CSR14_TBCN, 1);
+ rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
+ rt2x00pci_register_write(rt2x00dev, CSR14, reg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rt2500pci_kick_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u32 reg;
+
+ switch (queue->qid) {
+ case QID_AC_VO:
+ rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
+ rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, 1);
+ rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
+ break;
+ case QID_AC_VI:
+ rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
+ rt2x00_set_field32(&reg, TXCSR0_KICK_TX, 1);
+ rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
+ break;
+ case QID_ATIM:
+ rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
+ rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, 1);
+ rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rt2500pci_stop_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u32 reg;
+
+ switch (queue->qid) {
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_ATIM:
+ rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
+ rt2x00_set_field32(&reg, TXCSR0_ABORT, 1);
+ rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
+ break;
+ case QID_RX:
+ rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg);
+ rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 1);
+ rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
+ break;
+ case QID_BEACON:
+ rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
+ rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 0);
+ rt2x00_set_field32(&reg, CSR14_TBCN, 0);
+ rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
+ rt2x00pci_register_write(rt2x00dev, CSR14, reg);
+
+ /*
+ * Wait for possibly running tbtt tasklets.
+ */
+ tasklet_disable(&rt2x00dev->tbtt_tasklet);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
* Initialization functions.
*/
static bool rt2500pci_get_entry_state(struct queue_entry *entry)
@@ -775,7 +865,7 @@ static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev)
rt2x00pci_register_read(rt2x00dev, TXCSR2, &reg);
rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size);
rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit);
- rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->bcn[1].limit);
+ rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->atim->limit);
rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit);
rt2x00pci_register_write(rt2x00dev, TXCSR2, reg);
@@ -791,13 +881,13 @@ static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev)
entry_priv->desc_dma);
rt2x00pci_register_write(rt2x00dev, TXCSR5, reg);
- entry_priv = rt2x00dev->bcn[1].entries[0].priv_data;
+ entry_priv = rt2x00dev->atim->entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg);
rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER,
entry_priv->desc_dma);
rt2x00pci_register_write(rt2x00dev, TXCSR4, reg);
- entry_priv = rt2x00dev->bcn[0].entries[0].priv_data;
+ entry_priv = rt2x00dev->bcn->entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg);
rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER,
entry_priv->desc_dma);
@@ -1033,24 +1123,12 @@ static int rt2500pci_init_bbp(struct rt2x00_dev *rt2x00dev)
/*
* Device state switch handlers.
*/
-static void rt2500pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
- enum dev_state state)
-{
- u32 reg;
-
- rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg);
- rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX,
- (state == STATE_RADIO_RX_OFF) ||
- (state == STATE_RADIO_RX_OFF_LINK));
- rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
-}
-
static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
- int mask = (state == STATE_RADIO_IRQ_OFF) ||
- (state == STATE_RADIO_IRQ_OFF_ISR);
+ int mask = (state == STATE_RADIO_IRQ_OFF);
u32 reg;
+ unsigned long flags;
/*
* When interrupts are being enabled, the interrupt registers
@@ -1059,12 +1137,20 @@ static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
if (state == STATE_RADIO_IRQ_ON) {
rt2x00pci_register_read(rt2x00dev, CSR7, &reg);
rt2x00pci_register_write(rt2x00dev, CSR7, reg);
+
+ /*
+ * Enable tasklets.
+ */
+ tasklet_enable(&rt2x00dev->txstatus_tasklet);
+ tasklet_enable(&rt2x00dev->rxdone_tasklet);
}
/*
* Only toggle the interrupts bits we are going to use.
* Non-checked interrupt bits are disabled by default.
*/
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask);
rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask);
@@ -1072,6 +1158,16 @@ static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask);
rt2x00_set_field32(&reg, CSR8_RXDONE, mask);
rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+ if (state == STATE_RADIO_IRQ_OFF) {
+ /*
+ * Ensure that all tasklets are finished.
+ */
+ tasklet_disable(&rt2x00dev->txstatus_tasklet);
+ tasklet_disable(&rt2x00dev->rxdone_tasklet);
+ }
}
static int rt2500pci_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -1143,16 +1239,8 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev,
case STATE_RADIO_OFF:
rt2500pci_disable_radio(rt2x00dev);
break;
- case STATE_RADIO_RX_ON:
- case STATE_RADIO_RX_ON_LINK:
- case STATE_RADIO_RX_OFF:
- case STATE_RADIO_RX_OFF_LINK:
- rt2500pci_toggle_rx(rt2x00dev, state);
- break;
case STATE_RADIO_IRQ_ON:
- case STATE_RADIO_IRQ_ON_ISR:
case STATE_RADIO_IRQ_OFF:
- case STATE_RADIO_IRQ_OFF_ISR:
rt2500pci_toggle_irq(rt2x00dev, state);
break;
case STATE_DEEP_SLEEP:
@@ -1193,16 +1281,18 @@ static void rt2500pci_write_tx_desc(struct queue_entry *entry,
rt2x00_desc_read(txd, 2, &word);
rt2x00_set_field32(&word, TXD_W2_IV_OFFSET, IEEE80211_HEADER);
- rt2x00_set_field32(&word, TXD_W2_AIFS, txdesc->aifs);
- rt2x00_set_field32(&word, TXD_W2_CWMIN, txdesc->cw_min);
- rt2x00_set_field32(&word, TXD_W2_CWMAX, txdesc->cw_max);
+ rt2x00_set_field32(&word, TXD_W2_AIFS, entry->queue->aifs);
+ rt2x00_set_field32(&word, TXD_W2_CWMIN, entry->queue->cw_min);
+ rt2x00_set_field32(&word, TXD_W2_CWMAX, entry->queue->cw_max);
rt2x00_desc_write(txd, 2, word);
rt2x00_desc_read(txd, 3, &word);
- rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->signal);
- rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->service);
- rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW, txdesc->length_low);
- rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH, txdesc->length_high);
+ rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->u.plcp.signal);
+ rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->u.plcp.service);
+ rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW,
+ txdesc->u.plcp.length_low);
+ rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH,
+ txdesc->u.plcp.length_high);
rt2x00_desc_write(txd, 3, word);
rt2x00_desc_read(txd, 10, &word);
@@ -1227,7 +1317,7 @@ static void rt2500pci_write_tx_desc(struct queue_entry *entry,
rt2x00_set_field32(&word, TXD_W0_OFDM,
(txdesc->rate_mode == RATE_MODE_OFDM));
rt2x00_set_field32(&word, TXD_W0_CIPHER_OWNER, 1);
- rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
+ rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
@@ -1273,38 +1363,10 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
/*
* Enable beaconing again.
*/
- rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
- rt2x00_set_field32(&reg, CSR14_TBCN, 1);
rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
}
-static void rt2500pci_kick_tx_queue(struct data_queue *queue)
-{
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
- u32 reg;
-
- rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
- rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue->qid == QID_AC_BE));
- rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue->qid == QID_AC_BK));
- rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, (queue->qid == QID_ATIM));
- rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
-}
-
-static void rt2500pci_kill_tx_queue(struct data_queue *queue)
-{
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
- u32 reg;
-
- if (queue->qid == QID_BEACON) {
- rt2x00pci_register_write(rt2x00dev, CSR14, 0);
- } else {
- rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
- rt2x00_set_field32(&reg, TXCSR0_ABORT, 1);
- rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
- }
-}
-
/*
* RX control handlers
*/
@@ -1348,7 +1410,7 @@ static void rt2500pci_fill_rxdone(struct queue_entry *entry,
static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue_idx)
{
- struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
+ struct data_queue *queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
struct queue_entry_priv_pci *entry_priv;
struct queue_entry *entry;
struct txdone_entry_desc txdesc;
@@ -1384,58 +1446,68 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
}
}
-static irqreturn_t rt2500pci_interrupt_thread(int irq, void *dev_instance)
+static void rt2500pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00_field32 irq_field)
{
- struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg = rt2x00dev->irqvalue[0];
+ u32 reg;
/*
- * Handle interrupts, walk through all bits
- * and run the tasks, the bits are checked in order of
- * priority.
+ * Enable a single interrupt. The interrupt mask register
+ * access needs locking.
*/
+ spin_lock_irq(&rt2x00dev->irqmask_lock);
- /*
- * 1 - Beacon timer expired interrupt.
- */
- if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
- rt2x00lib_beacondone(rt2x00dev);
+ rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+ rt2x00_set_field32(&reg, irq_field, 0);
+ rt2x00pci_register_write(rt2x00dev, CSR8, reg);
- /*
- * 2 - Rx ring done interrupt.
- */
- if (rt2x00_get_field32(reg, CSR7_RXDONE))
- rt2x00pci_rxdone(rt2x00dev);
+ spin_unlock_irq(&rt2x00dev->irqmask_lock);
+}
- /*
- * 3 - Atim ring transmit done interrupt.
- */
- if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING))
- rt2500pci_txdone(rt2x00dev, QID_ATIM);
+static void rt2500pci_txstatus_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ u32 reg;
/*
- * 4 - Priority ring transmit done interrupt.
+ * Handle all tx queues.
*/
- if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING))
- rt2500pci_txdone(rt2x00dev, QID_AC_BE);
+ rt2500pci_txdone(rt2x00dev, QID_ATIM);
+ rt2500pci_txdone(rt2x00dev, QID_AC_VO);
+ rt2500pci_txdone(rt2x00dev, QID_AC_VI);
/*
- * 5 - Tx ring transmit done interrupt.
+ * Enable all TXDONE interrupts again.
*/
- if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING))
- rt2500pci_txdone(rt2x00dev, QID_AC_BK);
+ spin_lock_irq(&rt2x00dev->irqmask_lock);
- /* Enable interrupts again. */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_ON_ISR);
+ rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
+ rt2x00pci_register_write(rt2x00dev, CSR8, reg);
- return IRQ_HANDLED;
+ spin_unlock_irq(&rt2x00dev->irqmask_lock);
+}
+
+static void rt2500pci_tbtt_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00lib_beacondone(rt2x00dev);
+ rt2500pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE);
+}
+
+static void rt2500pci_rxdone_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00pci_rxdone(rt2x00dev);
+ rt2500pci_enable_interrupt(rt2x00dev, CSR8_RXDONE);
}
static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg;
+ u32 reg, mask;
/*
* Get the interrupt sources & saved to local variable.
@@ -1450,14 +1522,42 @@ static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
return IRQ_HANDLED;
- /* Store irqvalues for use in the interrupt thread. */
- rt2x00dev->irqvalue[0] = reg;
+ mask = reg;
- /* Disable interrupts, will be enabled again in the interrupt thread. */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_OFF_ISR);
+ /*
+ * Schedule tasklets for interrupt handling.
+ */
+ if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
+ tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
+
+ if (rt2x00_get_field32(reg, CSR7_RXDONE))
+ tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+
+ if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING) ||
+ rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING) ||
+ rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) {
+ tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+ /*
+ * Mask out all txdone interrupts.
+ */
+ rt2x00_set_field32(&mask, CSR8_TXDONE_TXRING, 1);
+ rt2x00_set_field32(&mask, CSR8_TXDONE_ATIMRING, 1);
+ rt2x00_set_field32(&mask, CSR8_TXDONE_PRIORING, 1);
+ }
- return IRQ_WAKE_THREAD;
+ /*
+ * Disable all interrupts for which a tasklet was scheduled right now,
+ * the tasklet will reenable the appropriate interrupts.
+ */
+ spin_lock(&rt2x00dev->irqmask_lock);
+
+ rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+ reg |= mask;
+ rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+ spin_unlock(&rt2x00dev->irqmask_lock);
+
+ return IRQ_HANDLED;
}
/*
@@ -1858,6 +1958,7 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
*/
__set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
+ __set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags);
/*
* Set the rssi offset.
@@ -1909,11 +2010,14 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = {
.get_tsf = rt2500pci_get_tsf,
.tx_last_beacon = rt2500pci_tx_last_beacon,
.rfkill_poll = rt2x00mac_rfkill_poll,
+ .flush = rt2x00mac_flush,
};
static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
.irq_handler = rt2500pci_interrupt,
- .irq_handler_thread = rt2500pci_interrupt_thread,
+ .txstatus_tasklet = rt2500pci_txstatus_tasklet,
+ .tbtt_tasklet = rt2500pci_tbtt_tasklet,
+ .rxdone_tasklet = rt2500pci_rxdone_tasklet,
.probe_hw = rt2500pci_probe_hw,
.initialize = rt2x00pci_initialize,
.uninitialize = rt2x00pci_uninitialize,
@@ -1924,10 +2028,11 @@ static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
.link_stats = rt2500pci_link_stats,
.reset_tuner = rt2500pci_reset_tuner,
.link_tuner = rt2500pci_link_tuner,
+ .start_queue = rt2500pci_start_queue,
+ .kick_queue = rt2500pci_kick_queue,
+ .stop_queue = rt2500pci_stop_queue,
.write_tx_desc = rt2500pci_write_tx_desc,
.write_beacon = rt2500pci_write_beacon,
- .kick_tx_queue = rt2500pci_kick_tx_queue,
- .kill_tx_queue = rt2500pci_kill_tx_queue,
.fill_rxdone = rt2500pci_fill_rxdone,
.config_filter = rt2500pci_config_filter,
.config_intf = rt2500pci_config_intf,
@@ -1937,28 +2042,28 @@ static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
};
static const struct data_queue_desc rt2500pci_queue_rx = {
- .entry_num = RX_ENTRIES,
+ .entry_num = 32,
.data_size = DATA_FRAME_SIZE,
.desc_size = RXD_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_pci),
};
static const struct data_queue_desc rt2500pci_queue_tx = {
- .entry_num = TX_ENTRIES,
+ .entry_num = 32,
.data_size = DATA_FRAME_SIZE,
.desc_size = TXD_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_pci),
};
static const struct data_queue_desc rt2500pci_queue_bcn = {
- .entry_num = BEACON_ENTRIES,
+ .entry_num = 1,
.data_size = MGMT_FRAME_SIZE,
.desc_size = TXD_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_pci),
};
static const struct data_queue_desc rt2500pci_queue_atim = {
- .entry_num = ATIM_ENTRIES,
+ .entry_num = 8,
.data_size = DATA_FRAME_SIZE,
.desc_size = TXD_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_pci),
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h
index d708031361ac..2aad7ba8a100 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/rt2x00/rt2500pci.h
@@ -1088,8 +1088,8 @@
/*
* DMA descriptor defines.
*/
-#define TXD_DESC_SIZE ( 11 * sizeof(__le32) )
-#define RXD_DESC_SIZE ( 11 * sizeof(__le32) )
+#define TXD_DESC_SIZE (11 * sizeof(__le32))
+#define RXD_DESC_SIZE (11 * sizeof(__le32))
/*
* TX descriptor format for TX, PRIO, ATIM and Beacon Ring.
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 93e44c7f3a74..979fe6596a2d 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -39,7 +39,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt = 0;
+static int modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -478,9 +478,7 @@ static void rt2500usb_config_intf(struct rt2x00_dev *rt2x00dev,
rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg);
rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg);
- rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1);
rt2x00_set_field16(&reg, TXRX_CSR19_TSF_SYNC, conf->sync);
- rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1);
rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
}
@@ -739,6 +737,55 @@ static void rt2500usb_reset_tuner(struct rt2x00_dev *rt2x00dev,
}
/*
+ * Queue handlers.
+ */
+static void rt2500usb_start_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u16 reg;
+
+ switch (queue->qid) {
+ case QID_RX:
+ rt2500usb_register_read(rt2x00dev, TXRX_CSR2, &reg);
+ rt2x00_set_field16(&reg, TXRX_CSR2_DISABLE_RX, 0);
+ rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg);
+ break;
+ case QID_BEACON:
+ rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg);
+ rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1);
+ rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1);
+ rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 1);
+ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rt2500usb_stop_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u16 reg;
+
+ switch (queue->qid) {
+ case QID_RX:
+ rt2500usb_register_read(rt2x00dev, TXRX_CSR2, &reg);
+ rt2x00_set_field16(&reg, TXRX_CSR2_DISABLE_RX, 1);
+ rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg);
+ break;
+ case QID_BEACON:
+ rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg);
+ rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 0);
+ rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 0);
+ rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 0);
+ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
* Initialization functions.
*/
static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev)
@@ -931,18 +978,6 @@ static int rt2500usb_init_bbp(struct rt2x00_dev *rt2x00dev)
/*
* Device state switch handlers.
*/
-static void rt2500usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
- enum dev_state state)
-{
- u16 reg;
-
- rt2500usb_register_read(rt2x00dev, TXRX_CSR2, &reg);
- rt2x00_set_field16(&reg, TXRX_CSR2_DISABLE_RX,
- (state == STATE_RADIO_RX_OFF) ||
- (state == STATE_RADIO_RX_OFF_LINK));
- rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg);
-}
-
static int rt2500usb_enable_radio(struct rt2x00_dev *rt2x00dev)
{
/*
@@ -1018,16 +1053,8 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev,
case STATE_RADIO_OFF:
rt2500usb_disable_radio(rt2x00dev);
break;
- case STATE_RADIO_RX_ON:
- case STATE_RADIO_RX_ON_LINK:
- case STATE_RADIO_RX_OFF:
- case STATE_RADIO_RX_OFF_LINK:
- rt2500usb_toggle_rx(rt2x00dev, state);
- break;
case STATE_RADIO_IRQ_ON:
- case STATE_RADIO_IRQ_ON_ISR:
case STATE_RADIO_IRQ_OFF:
- case STATE_RADIO_IRQ_OFF_ISR:
/* No support, but no error either */
break;
case STATE_DEEP_SLEEP:
@@ -1073,7 +1100,7 @@ static void rt2500usb_write_tx_desc(struct queue_entry *entry,
(txdesc->rate_mode == RATE_MODE_OFDM));
rt2x00_set_field32(&word, TXD_W0_NEW_SEQ,
test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
- rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
+ rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher);
rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx);
@@ -1081,16 +1108,18 @@ static void rt2500usb_write_tx_desc(struct queue_entry *entry,
rt2x00_desc_read(txd, 1, &word);
rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
- rt2x00_set_field32(&word, TXD_W1_AIFS, txdesc->aifs);
- rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
- rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
+ rt2x00_set_field32(&word, TXD_W1_AIFS, entry->queue->aifs);
+ rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min);
+ rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max);
rt2x00_desc_write(txd, 1, word);
rt2x00_desc_read(txd, 2, &word);
- rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal);
- rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service);
- rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low);
- rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW,
+ txdesc->u.plcp.length_low);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH,
+ txdesc->u.plcp.length_high);
rt2x00_desc_write(txd, 2, word);
if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
@@ -1206,14 +1235,6 @@ static int rt2500usb_get_tx_data_len(struct queue_entry *entry)
return length;
}
-static void rt2500usb_kill_tx_queue(struct data_queue *queue)
-{
- if (queue->qid == QID_BEACON)
- rt2500usb_register_write(queue->rt2x00dev, TXRX_CSR19, 0);
-
- rt2x00usb_kill_tx_queue(queue);
-}
-
/*
* RX control handlers
*/
@@ -1776,6 +1797,7 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
__set_bit(DRIVER_REQUIRE_COPY_IV, &rt2x00dev->flags);
}
__set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags);
+ __set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags);
/*
* Set the rssi offset.
@@ -1801,6 +1823,7 @@ static const struct ieee80211_ops rt2500usb_mac80211_ops = {
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt2x00mac_conf_tx,
.rfkill_poll = rt2x00mac_rfkill_poll,
+ .flush = rt2x00mac_flush,
};
static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
@@ -1813,11 +1836,13 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
.link_stats = rt2500usb_link_stats,
.reset_tuner = rt2500usb_reset_tuner,
.watchdog = rt2x00usb_watchdog,
+ .start_queue = rt2500usb_start_queue,
+ .kick_queue = rt2x00usb_kick_queue,
+ .stop_queue = rt2500usb_stop_queue,
+ .flush_queue = rt2x00usb_flush_queue,
.write_tx_desc = rt2500usb_write_tx_desc,
.write_beacon = rt2500usb_write_beacon,
.get_tx_data_len = rt2500usb_get_tx_data_len,
- .kick_tx_queue = rt2x00usb_kick_tx_queue,
- .kill_tx_queue = rt2500usb_kill_tx_queue,
.fill_rxdone = rt2500usb_fill_rxdone,
.config_shared_key = rt2500usb_config_key,
.config_pairwise_key = rt2500usb_config_key,
@@ -1829,28 +1854,28 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
};
static const struct data_queue_desc rt2500usb_queue_rx = {
- .entry_num = RX_ENTRIES,
+ .entry_num = 32,
.data_size = DATA_FRAME_SIZE,
.desc_size = RXD_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_usb),
};
static const struct data_queue_desc rt2500usb_queue_tx = {
- .entry_num = TX_ENTRIES,
+ .entry_num = 32,
.data_size = DATA_FRAME_SIZE,
.desc_size = TXD_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_usb),
};
static const struct data_queue_desc rt2500usb_queue_bcn = {
- .entry_num = BEACON_ENTRIES,
+ .entry_num = 1,
.data_size = MGMT_FRAME_SIZE,
.desc_size = TXD_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_usb_bcn),
};
static const struct data_queue_desc rt2500usb_queue_atim = {
- .entry_num = ATIM_ENTRIES,
+ .entry_num = 8,
.data_size = DATA_FRAME_SIZE,
.desc_size = TXD_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_usb),
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index eb8b6cab9925..70b9abbdeb9e 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -46,7 +46,12 @@
* RF2020 2.4G B/G
* RF3021 2.4G 1T2R
* RF3022 2.4G 2T2R
- * RF3052 2.4G 2T2R
+ * RF3052 2.4G/5G 2T2R
+ * RF2853 2.4G/5G 3T3R
+ * RF3320 2.4G 1T1R(RT3350/RT3370/RT3390)
+ * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
+ * RF3853 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
+ * RF5390 2.4G 1T1R
*/
#define RF2820 0x0001
#define RF2850 0x0002
@@ -57,7 +62,11 @@
#define RF3021 0x0007
#define RF3022 0x0008
#define RF3052 0x0009
+#define RF2853 0x000a
#define RF3320 0x000b
+#define RF3322 0x000c
+#define RF3853 0x000d
+#define RF5390 0x5390
/*
* Chipset revisions.
@@ -70,6 +79,7 @@
#define REV_RT3071E 0x0211
#define REV_RT3090E 0x0211
#define REV_RT3390E 0x0211
+#define REV_RT5390F 0x0502
/*
* Signal information.
@@ -114,6 +124,13 @@
#define E2PROM_CSR_RELOAD FIELD32(0x00000080)
/*
+ * AUX_CTRL: Aux/PCI-E related configuration
+ */
+#define AUX_CTRL 0x10c
+#define AUX_CTRL_WAKE_PCIE_EN FIELD32(0x00000002)
+#define AUX_CTRL_FORCE_PCIE_CLK FIELD32(0x00000400)
+
+/*
* OPT_14: Unknown register used by rt3xxx devices.
*/
#define OPT_14_CSR 0x0114
@@ -206,10 +223,10 @@
/*
* WMM_AIFSN_CFG: Aifsn for each EDCA AC
- * AIFSN0: AC_BE
- * AIFSN1: AC_BK
- * AIFSN2: AC_VI
- * AIFSN3: AC_VO
+ * AIFSN0: AC_VO
+ * AIFSN1: AC_VI
+ * AIFSN2: AC_BE
+ * AIFSN3: AC_BK
*/
#define WMM_AIFSN_CFG 0x0214
#define WMM_AIFSN_CFG_AIFSN0 FIELD32(0x0000000f)
@@ -219,10 +236,10 @@
/*
* WMM_CWMIN_CSR: CWmin for each EDCA AC
- * CWMIN0: AC_BE
- * CWMIN1: AC_BK
- * CWMIN2: AC_VI
- * CWMIN3: AC_VO
+ * CWMIN0: AC_VO
+ * CWMIN1: AC_VI
+ * CWMIN2: AC_BE
+ * CWMIN3: AC_BK
*/
#define WMM_CWMIN_CFG 0x0218
#define WMM_CWMIN_CFG_CWMIN0 FIELD32(0x0000000f)
@@ -232,10 +249,10 @@
/*
* WMM_CWMAX_CSR: CWmax for each EDCA AC
- * CWMAX0: AC_BE
- * CWMAX1: AC_BK
- * CWMAX2: AC_VI
- * CWMAX3: AC_VO
+ * CWMAX0: AC_VO
+ * CWMAX1: AC_VI
+ * CWMAX2: AC_BE
+ * CWMAX3: AC_BK
*/
#define WMM_CWMAX_CFG 0x021c
#define WMM_CWMAX_CFG_CWMAX0 FIELD32(0x0000000f)
@@ -244,18 +261,18 @@
#define WMM_CWMAX_CFG_CWMAX3 FIELD32(0x0000f000)
/*
- * AC_TXOP0: AC_BK/AC_BE TXOP register
- * AC0TXOP: AC_BK in unit of 32us
- * AC1TXOP: AC_BE in unit of 32us
+ * AC_TXOP0: AC_VO/AC_VI TXOP register
+ * AC0TXOP: AC_VO in unit of 32us
+ * AC1TXOP: AC_VI in unit of 32us
*/
#define WMM_TXOP0_CFG 0x0220
#define WMM_TXOP0_CFG_AC0TXOP FIELD32(0x0000ffff)
#define WMM_TXOP0_CFG_AC1TXOP FIELD32(0xffff0000)
/*
- * AC_TXOP1: AC_VO/AC_VI TXOP register
- * AC2TXOP: AC_VI in unit of 32us
- * AC3TXOP: AC_VO in unit of 32us
+ * AC_TXOP1: AC_BE/AC_BK TXOP register
+ * AC2TXOP: AC_BE in unit of 32us
+ * AC3TXOP: AC_BK in unit of 32us
*/
#define WMM_TXOP1_CFG 0x0224
#define WMM_TXOP1_CFG_AC2TXOP FIELD32(0x0000ffff)
@@ -263,6 +280,7 @@
/*
* GPIO_CTRL_CFG:
+ * GPIOD: GPIO direction, 0: Output, 1: Input
*/
#define GPIO_CTRL_CFG 0x0228
#define GPIO_CTRL_CFG_BIT0 FIELD32(0x00000001)
@@ -273,7 +291,14 @@
#define GPIO_CTRL_CFG_BIT5 FIELD32(0x00000020)
#define GPIO_CTRL_CFG_BIT6 FIELD32(0x00000040)
#define GPIO_CTRL_CFG_BIT7 FIELD32(0x00000080)
-#define GPIO_CTRL_CFG_BIT8 FIELD32(0x00000100)
+#define GPIO_CTRL_CFG_GPIOD_BIT0 FIELD32(0x00000100)
+#define GPIO_CTRL_CFG_GPIOD_BIT1 FIELD32(0x00000200)
+#define GPIO_CTRL_CFG_GPIOD_BIT2 FIELD32(0x00000400)
+#define GPIO_CTRL_CFG_GPIOD_BIT3 FIELD32(0x00000800)
+#define GPIO_CTRL_CFG_GPIOD_BIT4 FIELD32(0x00001000)
+#define GPIO_CTRL_CFG_GPIOD_BIT5 FIELD32(0x00002000)
+#define GPIO_CTRL_CFG_GPIOD_BIT6 FIELD32(0x00004000)
+#define GPIO_CTRL_CFG_GPIOD_BIT7 FIELD32(0x00008000)
/*
* MCU_CMD_CFG
@@ -281,7 +306,7 @@
#define MCU_CMD_CFG 0x022c
/*
- * AC_BK register offsets
+ * AC_VO register offsets
*/
#define TX_BASE_PTR0 0x0230
#define TX_MAX_CNT0 0x0234
@@ -289,7 +314,7 @@
#define TX_DTX_IDX0 0x023c
/*
- * AC_BE register offsets
+ * AC_VI register offsets
*/
#define TX_BASE_PTR1 0x0240
#define TX_MAX_CNT1 0x0244
@@ -297,7 +322,7 @@
#define TX_DTX_IDX1 0x024c
/*
- * AC_VI register offsets
+ * AC_BE register offsets
*/
#define TX_BASE_PTR2 0x0250
#define TX_MAX_CNT2 0x0254
@@ -305,7 +330,7 @@
#define TX_DTX_IDX2 0x025c
/*
- * AC_VO register offsets
+ * AC_BK register offsets
*/
#define TX_BASE_PTR3 0x0260
#define TX_MAX_CNT3 0x0264
@@ -365,8 +390,12 @@
/*
* US_CYC_CNT
+ * BT_MODE_EN: Bluetooth mode enable
+ * CLOCK CYCLE: Clock cycle count in 1us.
+ * PCI:0x21, PCIE:0x7d, USB:0x1e
*/
#define US_CYC_CNT 0x02a4
+#define US_CYC_CNT_BT_MODE_EN FIELD32(0x00000100)
#define US_CYC_CNT_CLOCK_CYCLE FIELD32(0x000000ff)
/*
@@ -412,10 +441,22 @@
#define BCN_OFFSET1_BCN7 FIELD32(0xff000000)
/*
- * PBF registers
- * Most are for debug. Driver doesn't touch PBF register.
+ * TXRXQ_PCNT: PBF register
+ * PCNT_TX0Q: Page count for TX hardware queue 0
+ * PCNT_TX1Q: Page count for TX hardware queue 1
+ * PCNT_TX2Q: Page count for TX hardware queue 2
+ * PCNT_RX0Q: Page count for RX hardware queue
*/
#define TXRXQ_PCNT 0x0438
+#define TXRXQ_PCNT_TX0Q FIELD32(0x000000ff)
+#define TXRXQ_PCNT_TX1Q FIELD32(0x0000ff00)
+#define TXRXQ_PCNT_TX2Q FIELD32(0x00ff0000)
+#define TXRXQ_PCNT_RX0Q FIELD32(0xff000000)
+
+/*
+ * PBF register
+ * Debug. Driver doesn't touch PBF register.
+ */
#define PBF_DBG 0x043c
/*
@@ -423,7 +464,7 @@
*/
#define RF_CSR_CFG 0x0500
#define RF_CSR_CFG_DATA FIELD32(0x000000ff)
-#define RF_CSR_CFG_REGNUM FIELD32(0x00001f00)
+#define RF_CSR_CFG_REGNUM FIELD32(0x00003f00)
#define RF_CSR_CFG_WRITE FIELD32(0x00010000)
#define RF_CSR_CFG_BUSY FIELD32(0x00020000)
@@ -686,8 +727,18 @@
/*
* CH_TIME_CFG: count as channel busy
+ * EIFS_BUSY: Count EIFS as channel busy
+ * NAV_BUSY: Count NAS as channel busy
+ * RX_BUSY: Count RX as channel busy
+ * TX_BUSY: Count TX as channel busy
+ * TMR_EN: Enable channel statistics timer
*/
#define CH_TIME_CFG 0x110c
+#define CH_TIME_CFG_EIFS_BUSY FIELD32(0x00000010)
+#define CH_TIME_CFG_NAV_BUSY FIELD32(0x00000008)
+#define CH_TIME_CFG_RX_BUSY FIELD32(0x00000004)
+#define CH_TIME_CFG_TX_BUSY FIELD32(0x00000002)
+#define CH_TIME_CFG_TMR_EN FIELD32(0x00000001)
/*
* PBF_LIFE_TIMER: TX/RX MPDU timestamp timer (free run) Unit: 1us
@@ -960,8 +1011,31 @@
/*
* TXOP_CTRL_CFG:
+ * TIMEOUT_TRUN_EN: Enable/Disable TXOP timeout truncation
+ * AC_TRUN_EN: Enable/Disable truncation for AC change
+ * TXRATEGRP_TRUN_EN: Enable/Disable truncation for TX rate group change
+ * USER_MODE_TRUN_EN: Enable/Disable truncation for user TXOP mode
+ * MIMO_PS_TRUN_EN: Enable/Disable truncation for MIMO PS RTS/CTS
+ * RESERVED_TRUN_EN: Reserved
+ * LSIG_TXOP_EN: Enable/Disable L-SIG TXOP protection
+ * EXT_CCA_EN: Enable/Disable extension channel CCA reference (Defer 40Mhz
+ * transmissions if extension CCA is clear).
+ * EXT_CCA_DLY: Extension CCA signal delay time (unit: us)
+ * EXT_CWMIN: CwMin for extension channel backoff
+ * 0: Disabled
+ *
*/
#define TXOP_CTRL_CFG 0x1340
+#define TXOP_CTRL_CFG_TIMEOUT_TRUN_EN FIELD32(0x00000001)
+#define TXOP_CTRL_CFG_AC_TRUN_EN FIELD32(0x00000002)
+#define TXOP_CTRL_CFG_TXRATEGRP_TRUN_EN FIELD32(0x00000004)
+#define TXOP_CTRL_CFG_USER_MODE_TRUN_EN FIELD32(0x00000008)
+#define TXOP_CTRL_CFG_MIMO_PS_TRUN_EN FIELD32(0x00000010)
+#define TXOP_CTRL_CFG_RESERVED_TRUN_EN FIELD32(0x00000020)
+#define TXOP_CTRL_CFG_LSIG_TXOP_EN FIELD32(0x00000040)
+#define TXOP_CTRL_CFG_EXT_CCA_EN FIELD32(0x00000080)
+#define TXOP_CTRL_CFG_EXT_CCA_DLY FIELD32(0x0000ff00)
+#define TXOP_CTRL_CFG_EXT_CWMIN FIELD32(0x000f0000)
/*
* TX_RTS_CFG:
@@ -1080,8 +1154,8 @@
* PROTECT_RATE: Protection control frame rate for CCK TX(RTS/CTS/CFEnd)
* PROTECT_CTRL: Protection control frame type for CCK TX
* 0:none, 1:RTS/CTS, 2:CTS-to-self
- * PROTECT_NAV: TXOP protection type for CCK TX
- * 0:none, 1:ShortNAVprotect, 2:LongNAVProtect
+ * PROTECT_NAV_SHORT: TXOP protection type for CCK TX with short NAV
+ * PROTECT_NAV_LONG: TXOP protection type for CCK TX with long NAV
* TX_OP_ALLOW_CCK: CCK TXOP allowance, 0:disallow
* TX_OP_ALLOW_OFDM: CCK TXOP allowance, 0:disallow
* TX_OP_ALLOW_MM20: CCK TXOP allowance, 0:disallow
@@ -1093,7 +1167,8 @@
#define CCK_PROT_CFG 0x1364
#define CCK_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
#define CCK_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
-#define CCK_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
+#define CCK_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
+#define CCK_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
#define CCK_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
#define CCK_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
#define CCK_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1108,7 +1183,8 @@
#define OFDM_PROT_CFG 0x1368
#define OFDM_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
#define OFDM_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
-#define OFDM_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
+#define OFDM_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
+#define OFDM_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
#define OFDM_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
#define OFDM_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
#define OFDM_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1123,7 +1199,8 @@
#define MM20_PROT_CFG 0x136c
#define MM20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
#define MM20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
-#define MM20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
+#define MM20_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
+#define MM20_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
#define MM20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
#define MM20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
#define MM20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1138,7 +1215,8 @@
#define MM40_PROT_CFG 0x1370
#define MM40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
#define MM40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
-#define MM40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
+#define MM40_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
+#define MM40_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
#define MM40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
#define MM40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
#define MM40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1153,7 +1231,8 @@
#define GF20_PROT_CFG 0x1374
#define GF20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
#define GF20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
-#define GF20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
+#define GF20_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
+#define GF20_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
#define GF20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
#define GF20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
#define GF20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1168,7 +1247,8 @@
#define GF40_PROT_CFG 0x1378
#define GF40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
#define GF40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
-#define GF40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
+#define GF40_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
+#define GF40_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
#define GF40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
#define GF40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
#define GF40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1485,17 +1565,17 @@
#define SHARED_KEY_MODE_BASE 0x7000
#define MAC_WCID_ENTRY(__idx) \
- ( MAC_WCID_BASE + ((__idx) * sizeof(struct mac_wcid_entry)) )
+ (MAC_WCID_BASE + ((__idx) * sizeof(struct mac_wcid_entry)))
#define PAIRWISE_KEY_ENTRY(__idx) \
- ( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
+ (PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)))
#define MAC_IVEIV_ENTRY(__idx) \
- ( MAC_IVEIV_TABLE_BASE + ((__idx) * sizeof(struct mac_iveiv_entry)) )
+ (MAC_IVEIV_TABLE_BASE + ((__idx) * sizeof(struct mac_iveiv_entry)))
#define MAC_WCID_ATTR_ENTRY(__idx) \
- ( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) )
+ (MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)))
#define SHARED_KEY_ENTRY(__idx) \
- ( SHARED_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
+ (SHARED_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)))
#define SHARED_KEY_MODE_ENTRY(__idx) \
- ( SHARED_KEY_MODE_BASE + ((__idx) * sizeof(u32)) )
+ (SHARED_KEY_MODE_BASE + ((__idx) * sizeof(u32)))
struct mac_wcid_entry {
u8 mac[6];
@@ -1635,9 +1715,9 @@ struct mac_iveiv_entry {
#define HW_BEACON_BASE7 0x5bc0
#define HW_BEACON_OFFSET(__index) \
- ( ((__index) < 4) ? ( HW_BEACON_BASE0 + (__index * 0x0200) ) : \
- (((__index) < 6) ? ( HW_BEACON_BASE4 + ((__index - 4) * 0x0200) ) : \
- (HW_BEACON_BASE6 - ((__index - 6) * 0x0200))) )
+ (((__index) < 4) ? (HW_BEACON_BASE0 + (__index * 0x0200)) : \
+ (((__index) < 6) ? (HW_BEACON_BASE4 + ((__index - 4) * 0x0200)) : \
+ (HW_BEACON_BASE6 - ((__index - 6) * 0x0200))))
/*
* BBP registers.
@@ -1645,11 +1725,14 @@ struct mac_iveiv_entry {
*/
/*
- * BBP 1: TX Antenna & Power
- * POWER: 0 - normal, 1 - drop tx power by 6dBm, 2 - drop tx power by 12dBm,
- * 3 - increase tx power by 6dBm
+ * BBP 1: TX Antenna & Power Control
+ * POWER_CTRL:
+ * 0 - normal,
+ * 1 - drop tx power by 6dBm,
+ * 2 - drop tx power by 12dBm,
+ * 3 - increase tx power by 6dBm
*/
-#define BBP1_TX_POWER FIELD8(0x07)
+#define BBP1_TX_POWER_CTRL FIELD8(0x07)
#define BBP1_TX_ANTENNA FIELD8(0x18)
/*
@@ -1663,6 +1746,13 @@ struct mac_iveiv_entry {
*/
#define BBP4_TX_BF FIELD8(0x01)
#define BBP4_BANDWIDTH FIELD8(0x18)
+#define BBP4_MAC_IF_CTRL FIELD8(0x40)
+
+/*
+ * BBP 109
+ */
+#define BBP109_TX0_POWER FIELD8(0x0f)
+#define BBP109_TX1_POWER FIELD8(0xf0)
/*
* BBP 138: Unknown
@@ -1673,6 +1763,11 @@ struct mac_iveiv_entry {
#define BBP138_TX_DAC2 FIELD8(0x40)
/*
+ * BBP 152: Rx Ant
+ */
+#define BBP152_RX_DEFAULT_ANT FIELD8(0x80)
+
+/*
* RFCSR registers
* The wordsize of the RFCSR is 8 bits.
*/
@@ -1681,12 +1776,18 @@ struct mac_iveiv_entry {
* RFCSR 1:
*/
#define RFCSR1_RF_BLOCK_EN FIELD8(0x01)
+#define RFCSR1_PLL_PD FIELD8(0x02)
#define RFCSR1_RX0_PD FIELD8(0x04)
#define RFCSR1_TX0_PD FIELD8(0x08)
#define RFCSR1_RX1_PD FIELD8(0x10)
#define RFCSR1_TX1_PD FIELD8(0x20)
/*
+ * RFCSR 2:
+ */
+#define RFCSR2_RESCAL_EN FIELD8(0x80)
+
+/*
* RFCSR 6:
*/
#define RFCSR6_R1 FIELD8(0x03)
@@ -1698,6 +1799,11 @@ struct mac_iveiv_entry {
#define RFCSR7_RF_TUNING FIELD8(0x01)
/*
+ * RFCSR 11:
+ */
+#define RFCSR11_R FIELD8(0x03)
+
+/*
* RFCSR 12:
*/
#define RFCSR12_TX_POWER FIELD8(0x1f)
@@ -1718,6 +1824,7 @@ struct mac_iveiv_entry {
#define RFCSR17_TXMIXER_GAIN FIELD8(0x07)
#define RFCSR17_TX_LO1_EN FIELD8(0x08)
#define RFCSR17_R FIELD8(0x20)
+#define RFCSR17_CODE FIELD8(0x7f)
/*
* RFCSR 20:
@@ -1750,9 +1857,33 @@ struct mac_iveiv_entry {
/*
* RFCSR 30:
*/
+#define RFCSR30_TX_H20M FIELD8(0x02)
+#define RFCSR30_RX_H20M FIELD8(0x04)
+#define RFCSR30_RX_VCM FIELD8(0x18)
#define RFCSR30_RF_CALIBRATION FIELD8(0x80)
/*
+ * RFCSR 31:
+ */
+#define RFCSR31_RX_AGC_FC FIELD8(0x1f)
+#define RFCSR31_RX_H20M FIELD8(0x20)
+
+/*
+ * RFCSR 38:
+ */
+#define RFCSR38_RX_LO1_EN FIELD8(0x20)
+
+/*
+ * RFCSR 39:
+ */
+#define RFCSR39_RX_LO2_EN FIELD8(0x80)
+
+/*
+ * RFCSR 49:
+ */
+#define RFCSR49_TX FIELD8(0x3f)
+
+/*
* RF registers
*/
@@ -1785,6 +1916,11 @@ struct mac_iveiv_entry {
*/
/*
+ * Chip ID
+ */
+#define EEPROM_CHIP_ID 0x0000
+
+/*
* EEPROM Version
*/
#define EEPROM_VERSION 0x0001
@@ -1805,32 +1941,51 @@ struct mac_iveiv_entry {
#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00)
/*
- * EEPROM ANTENNA config
+ * EEPROM NIC Configuration 0
* RXPATH: 1: 1R, 2: 2R, 3: 3R
- * TXPATH: 1: 1T, 2: 2T
- */
-#define EEPROM_ANTENNA 0x001a
-#define EEPROM_ANTENNA_RXPATH FIELD16(0x000f)
-#define EEPROM_ANTENNA_TXPATH FIELD16(0x00f0)
-#define EEPROM_ANTENNA_RF_TYPE FIELD16(0x0f00)
-
-/*
- * EEPROM NIC config
- * CARDBUS_ACCEL: 0 - enable, 1 - disable
- */
-#define EEPROM_NIC 0x001b
-#define EEPROM_NIC_HW_RADIO FIELD16(0x0001)
-#define EEPROM_NIC_DYNAMIC_TX_AGC FIELD16(0x0002)
-#define EEPROM_NIC_EXTERNAL_LNA_BG FIELD16(0x0004)
-#define EEPROM_NIC_EXTERNAL_LNA_A FIELD16(0x0008)
-#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0x0010)
-#define EEPROM_NIC_BW40M_SB_BG FIELD16(0x0020)
-#define EEPROM_NIC_BW40M_SB_A FIELD16(0x0040)
-#define EEPROM_NIC_WPS_PBC FIELD16(0x0080)
-#define EEPROM_NIC_BW40M_BG FIELD16(0x0100)
-#define EEPROM_NIC_BW40M_A FIELD16(0x0200)
-#define EEPROM_NIC_ANT_DIVERSITY FIELD16(0x0800)
-#define EEPROM_NIC_DAC_TEST FIELD16(0x8000)
+ * TXPATH: 1: 1T, 2: 2T, 3: 3T
+ * RF_TYPE: RFIC type
+ */
+#define EEPROM_NIC_CONF0 0x001a
+#define EEPROM_NIC_CONF0_RXPATH FIELD16(0x000f)
+#define EEPROM_NIC_CONF0_TXPATH FIELD16(0x00f0)
+#define EEPROM_NIC_CONF0_RF_TYPE FIELD16(0x0f00)
+
+/*
+ * EEPROM NIC Configuration 1
+ * HW_RADIO: 0: disable, 1: enable
+ * EXTERNAL_TX_ALC: 0: disable, 1: enable
+ * EXTERNAL_LNA_2G: 0: disable, 1: enable
+ * EXTERNAL_LNA_5G: 0: disable, 1: enable
+ * CARDBUS_ACCEL: 0: enable, 1: disable
+ * BW40M_SB_2G: 0: disable, 1: enable
+ * BW40M_SB_5G: 0: disable, 1: enable
+ * WPS_PBC: 0: disable, 1: enable
+ * BW40M_2G: 0: enable, 1: disable
+ * BW40M_5G: 0: enable, 1: disable
+ * BROADBAND_EXT_LNA: 0: disable, 1: enable
+ * ANT_DIVERSITY: 00: Disable, 01: Diversity,
+ * 10: Main antenna, 11: Aux antenna
+ * INTERNAL_TX_ALC: 0: disable, 1: enable
+ * BT_COEXIST: 0: disable, 1: enable
+ * DAC_TEST: 0: disable, 1: enable
+ */
+#define EEPROM_NIC_CONF1 0x001b
+#define EEPROM_NIC_CONF1_HW_RADIO FIELD16(0x0001)
+#define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC FIELD16(0x0002)
+#define EEPROM_NIC_CONF1_EXTERNAL_LNA_2G FIELD16(0x0004)
+#define EEPROM_NIC_CONF1_EXTERNAL_LNA_5G FIELD16(0x0008)
+#define EEPROM_NIC_CONF1_CARDBUS_ACCEL FIELD16(0x0010)
+#define EEPROM_NIC_CONF1_BW40M_SB_2G FIELD16(0x0020)
+#define EEPROM_NIC_CONF1_BW40M_SB_5G FIELD16(0x0040)
+#define EEPROM_NIC_CONF1_WPS_PBC FIELD16(0x0080)
+#define EEPROM_NIC_CONF1_BW40M_2G FIELD16(0x0100)
+#define EEPROM_NIC_CONF1_BW40M_5G FIELD16(0x0200)
+#define EEPROM_NIC_CONF1_BROADBAND_EXT_LNA FIELD16(0x400)
+#define EEPROM_NIC_CONF1_ANT_DIVERSITY FIELD16(0x1800)
+#define EEPROM_NIC_CONF1_INTERNAL_TX_ALC FIELD16(0x2000)
+#define EEPROM_NIC_CONF1_BT_COEXIST FIELD16(0x4000)
+#define EEPROM_NIC_CONF1_DAC_TEST FIELD16(0x8000)
/*
* EEPROM frequency
@@ -1852,9 +2007,9 @@ struct mac_iveiv_entry {
* POLARITY_GPIO_4: Polarity GPIO4 setting.
* LED_MODE: Led mode.
*/
-#define EEPROM_LED1 0x001e
-#define EEPROM_LED2 0x001f
-#define EEPROM_LED3 0x0020
+#define EEPROM_LED_AG_CONF 0x001e
+#define EEPROM_LED_ACT_CONF 0x001f
+#define EEPROM_LED_POLARITY 0x0020
#define EEPROM_LED_POLARITY_RDY_BG FIELD16(0x0001)
#define EEPROM_LED_POLARITY_RDY_A FIELD16(0x0002)
#define EEPROM_LED_POLARITY_ACT FIELD16(0x0004)
@@ -1866,6 +2021,17 @@ struct mac_iveiv_entry {
#define EEPROM_LED_LED_MODE FIELD16(0x1f00)
/*
+ * EEPROM NIC Configuration 2
+ * RX_STREAM: 0: Reserved, 1: 1 Stream, 2: 2 Stream
+ * TX_STREAM: 0: Reserved, 1: 1 Stream, 2: 2 Stream
+ * CRYSTAL: 00: Reserved, 01: One crystal, 10: Two crystal, 11: Reserved
+ */
+#define EEPROM_NIC_CONF2 0x0021
+#define EEPROM_NIC_CONF2_RX_STREAM FIELD16(0x000f)
+#define EEPROM_NIC_CONF2_TX_STREAM FIELD16(0x00f0)
+#define EEPROM_NIC_CONF2_CRYSTAL FIELD16(0x0600)
+
+/*
* EEPROM LNA
*/
#define EEPROM_LNA 0x0022
@@ -1907,23 +2073,26 @@ struct mac_iveiv_entry {
#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00)
/*
- * EEPROM Maximum TX power values
+ * EEPROM EIRP Maximum TX power values(unit: dbm)
*/
-#define EEPROM_MAX_TX_POWER 0x0027
-#define EEPROM_MAX_TX_POWER_24GHZ FIELD16(0x00ff)
-#define EEPROM_MAX_TX_POWER_5GHZ FIELD16(0xff00)
+#define EEPROM_EIRP_MAX_TX_POWER 0x0027
+#define EEPROM_EIRP_MAX_TX_POWER_2GHZ FIELD16(0x00ff)
+#define EEPROM_EIRP_MAX_TX_POWER_5GHZ FIELD16(0xff00)
/*
* EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power.
- * This is delta in 40MHZ.
- * VALUE: Tx Power dalta value (MAX=4)
+ * This is delta in 40MHZ.
+ * VALUE: Tx Power dalta value, MAX=4(unit: dbm)
* TYPE: 1: Plus the delta value, 0: minus the delta value
- * TXPOWER: Enable:
+ * ENABLE: enable tx power compensation for 40BW
*/
#define EEPROM_TXPOWER_DELTA 0x0028
-#define EEPROM_TXPOWER_DELTA_VALUE FIELD16(0x003f)
-#define EEPROM_TXPOWER_DELTA_TYPE FIELD16(0x0040)
-#define EEPROM_TXPOWER_DELTA_TXPOWER FIELD16(0x0080)
+#define EEPROM_TXPOWER_DELTA_VALUE_2G FIELD16(0x003f)
+#define EEPROM_TXPOWER_DELTA_TYPE_2G FIELD16(0x0040)
+#define EEPROM_TXPOWER_DELTA_ENABLE_2G FIELD16(0x0080)
+#define EEPROM_TXPOWER_DELTA_VALUE_5G FIELD16(0x3f00)
+#define EEPROM_TXPOWER_DELTA_TYPE_5G FIELD16(0x4000)
+#define EEPROM_TXPOWER_DELTA_ENABLE_5G FIELD16(0x8000)
/*
* EEPROM TXPOWER 802.11BG
@@ -1971,11 +2140,12 @@ struct mac_iveiv_entry {
#define MCU_CURRENT 0x36
#define MCU_LED 0x50
#define MCU_LED_STRENGTH 0x51
-#define MCU_LED_1 0x52
-#define MCU_LED_2 0x53
-#define MCU_LED_3 0x54
+#define MCU_LED_AG_CONF 0x52
+#define MCU_LED_ACT_CONF 0x53
+#define MCU_LED_LED_POLARITY 0x54
#define MCU_RADAR 0x60
#define MCU_BOOT_SIGNAL 0x72
+#define MCU_ANT_SELECT 0X73
#define MCU_BBP_SIGNAL 0x80
#define MCU_POWER_SAVE 0x83
@@ -1987,8 +2157,8 @@ struct mac_iveiv_entry {
/*
* DMA descriptor defines.
*/
-#define TXWI_DESC_SIZE ( 4 * sizeof(__le32) )
-#define RXWI_DESC_SIZE ( 4 * sizeof(__le32) )
+#define TXWI_DESC_SIZE (4 * sizeof(__le32))
+#define RXWI_DESC_SIZE (4 * sizeof(__le32))
/*
* TX WI structure
@@ -2120,4 +2290,9 @@ struct mac_iveiv_entry {
#define TXPOWER_A_TO_DEV(__txpower) \
clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER)
+/*
+ * Board's maximun TX power limitation
+ */
+#define EIRP_MAX_TX_POWER_LIMIT 0x50
+
#endif /* RT2800_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 5f00e00789d8..2ee6cebb9b25 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -277,13 +277,17 @@ int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
unsigned int i;
u32 reg;
+ /*
+ * Some devices are really slow to respond here. Wait a whole second
+ * before timing out.
+ */
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
!rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
return 0;
- msleep(1);
+ msleep(10);
}
ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
@@ -396,8 +400,15 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
if (rt2800_wait_csr_ready(rt2x00dev))
return -EBUSY;
- if (rt2x00_is_pci(rt2x00dev))
+ if (rt2x00_is_pci(rt2x00dev)) {
+ if (rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
+ rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
+ rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
+ rt2800_register_write(rt2x00dev, AUX_CTRL, reg);
+ }
rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002);
+ }
/*
* Disable DMA, will be reenabled later when enabling
@@ -461,14 +472,15 @@ void rt2800_write_tx_data(struct queue_entry *entry,
test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
rt2x00_set_field32(&word, TXWI_W0_AMPDU,
test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density);
- rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->txop);
- rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs);
+ rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY,
+ txdesc->u.ht.mpdu_density);
+ rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->u.ht.txop);
+ rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->u.ht.mcs);
rt2x00_set_field32(&word, TXWI_W0_BW,
test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc);
+ rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->u.ht.stbc);
rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
rt2x00_desc_write(txwi, 0, word);
@@ -477,13 +489,13 @@ void rt2800_write_tx_data(struct queue_entry *entry,
test_bit(ENTRY_TXD_ACK, &txdesc->flags));
rt2x00_set_field32(&word, TXWI_W1_NSEQ,
test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
- rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
+ rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->u.ht.ba_size);
rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
txdesc->key_idx : 0xff);
rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
txdesc->length);
- rt2x00_set_field32(&word, TXWI_W1_PACKETID_QUEUE, txdesc->qid);
+ rt2x00_set_field32(&word, TXWI_W1_PACKETID_QUEUE, entry->queue->qid);
rt2x00_set_field32(&word, TXWI_W1_PACKETID_ENTRY, (entry->entry_idx % 3) + 1);
rt2x00_desc_write(txwi, 1, word);
@@ -670,7 +682,7 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status)
* confuse the rate control algortihm by providing clearly wrong
* data.
*/
- if (aggr == 1 && ampdu == 0 && real_mcs != mcs) {
+ if (unlikely(aggr == 1 && ampdu == 0 && real_mcs != mcs)) {
skbdesc->tx_rate_idx = real_mcs;
mcs = real_mcs;
}
@@ -727,7 +739,7 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
* that the TX_STA_FIFO stack has a size of 16. We stick to our
* tx ring size for now.
*/
- for (i = 0; i < TX_ENTRIES; i++) {
+ for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) {
rt2800_register_read(rt2x00dev, TX_STA_FIFO, &reg);
if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID))
break;
@@ -740,7 +752,7 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
if (pid >= QID_RX)
continue;
- queue = rt2x00queue_get_queue(rt2x00dev, pid);
+ queue = rt2x00queue_get_tx_queue(rt2x00dev, pid);
if (unlikely(!queue))
continue;
@@ -768,13 +780,15 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
unsigned int beacon_base;
- u32 reg;
+ unsigned int padding_len;
+ u32 orig_reg, reg;
/*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ orig_reg = reg;
rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
@@ -802,17 +816,24 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
/*
- * Write entire beacon with TXWI to register.
+ * Write entire beacon with TXWI and padding to register.
*/
+ padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
+ if (padding_len && skb_pad(entry->skb, padding_len)) {
+ ERROR(rt2x00dev, "Failure padding beacon, aborting\n");
+ /* skb freed by skb_pad() on failure */
+ entry->skb = NULL;
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
+ return;
+ }
+
beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
- rt2800_register_multiwrite(rt2x00dev, beacon_base,
- entry->skb->data, entry->skb->len);
+ rt2800_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data,
+ entry->skb->len + padding_len);
/*
* Enable beaconing again.
*/
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
@@ -824,8 +845,8 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
}
EXPORT_SYMBOL_GPL(rt2800_write_beacon);
-static void inline rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev,
- unsigned int beacon_base)
+static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev,
+ unsigned int beacon_base)
{
int i;
@@ -838,6 +859,33 @@ static void inline rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev,
rt2800_register_write(rt2x00dev, beacon_base + i, 0);
}
+void rt2800_clear_beacon(struct queue_entry *entry)
+{
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ u32 reg;
+
+ /*
+ * Disable beaconing while we are reloading the beacon data,
+ * otherwise we might be sending out invalid data.
+ */
+ rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+ /*
+ * Clear beacon.
+ */
+ rt2800_clear_beacon_register(rt2x00dev,
+ HW_BEACON_OFFSET(entry->entry_idx));
+
+ /*
+ * Enabled beaconing again.
+ */
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+}
+EXPORT_SYMBOL_GPL(rt2800_clear_beacon);
+
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
const struct rt2x00debug rt2800_rt2x00debug = {
.owner = THIS_MODULE,
@@ -998,7 +1046,7 @@ static void rt2800_config_wcid_attr(struct rt2x00_dev *rt2x00dev,
memset(&wcid_entry, 0, sizeof(wcid_entry));
if (crypto->cmd == SET_KEY)
- memcpy(&wcid_entry, crypto->address, ETH_ALEN);
+ memcpy(wcid_entry.mac, crypto->address, ETH_ALEN);
rt2800_register_multiwrite(rt2x00dev, offset,
&wcid_entry, sizeof(wcid_entry));
}
@@ -1053,27 +1101,44 @@ int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
}
EXPORT_SYMBOL_GPL(rt2800_config_shared_key);
+static inline int rt2800_find_pairwise_keyslot(struct rt2x00_dev *rt2x00dev)
+{
+ int idx;
+ u32 offset, reg;
+
+ /*
+ * Search for the first free pairwise key entry and return the
+ * corresponding index.
+ *
+ * Make sure the WCID starts _after_ the last possible shared key
+ * entry (>32).
+ *
+ * Since parts of the pairwise key table might be shared with
+ * the beacon frame buffers 6 & 7 we should only write into the
+ * first 222 entries.
+ */
+ for (idx = 33; idx <= 222; idx++) {
+ offset = MAC_WCID_ATTR_ENTRY(idx);
+ rt2800_register_read(rt2x00dev, offset, &reg);
+ if (!reg)
+ return idx;
+ }
+ return -1;
+}
+
int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_crypto *crypto,
struct ieee80211_key_conf *key)
{
struct hw_key_entry key_entry;
u32 offset;
+ int idx;
if (crypto->cmd == SET_KEY) {
- /*
- * 1 pairwise key is possible per AID, this means that the AID
- * equals our hw_key_idx. Make sure the WCID starts _after_ the
- * last possible shared key entry.
- *
- * Since parts of the pairwise key table might be shared with
- * the beacon frame buffers 6 & 7 we should only write into the
- * first 222 entries.
- */
- if (crypto->aid > (222 - 32))
+ idx = rt2800_find_pairwise_keyslot(rt2x00dev);
+ if (idx < 0)
return -ENOSPC;
-
- key->hw_key_idx = 32 + crypto->aid;
+ key->hw_key_idx = idx;
memcpy(key_entry.key, crypto->key,
sizeof(key_entry.key));
@@ -1144,35 +1209,28 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
struct rt2x00intf_conf *conf, const unsigned int flags)
{
u32 reg;
+ bool update_bssid = false;
if (flags & CONFIG_UPDATE_TYPE) {
/*
- * Clear current synchronisation setup.
- */
- rt2800_clear_beacon(rt2x00dev,
- HW_BEACON_OFFSET(intf->beacon->entry_idx));
- /*
* Enable synchronisation.
*/
rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE,
- (conf->sync == TSF_SYNC_ADHOC ||
- conf->sync == TSF_SYNC_AP_NONE));
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
- /*
- * Enable pre tbtt interrupt for beaconing modes
- */
- rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg);
- rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER,
- (conf->sync == TSF_SYNC_AP_NONE));
- rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg);
-
}
if (flags & CONFIG_UPDATE_MAC) {
+ if (flags & CONFIG_UPDATE_TYPE &&
+ conf->sync == TSF_SYNC_AP_NONE) {
+ /*
+ * The BSSID register has to be set to our own mac
+ * address in AP mode.
+ */
+ memcpy(conf->bssid, conf->mac, sizeof(conf->mac));
+ update_bssid = true;
+ }
+
if (!is_zero_ether_addr((const u8 *)conf->mac)) {
reg = le32_to_cpu(conf->mac[1]);
rt2x00_set_field32(&reg, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff);
@@ -1183,7 +1241,7 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
conf->mac, sizeof(conf->mac));
}
- if (flags & CONFIG_UPDATE_BSSID) {
+ if ((flags & CONFIG_UPDATE_BSSID) || update_bssid) {
if (!is_zero_ether_addr((const u8 *)conf->bssid)) {
reg = le32_to_cpu(conf->bssid[1]);
rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 3);
@@ -1343,10 +1401,32 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp,
}
EXPORT_SYMBOL_GPL(rt2800_config_erp);
+static void rt2800_set_ant_diversity(struct rt2x00_dev *rt2x00dev,
+ enum antenna ant)
+{
+ u32 reg;
+ u8 eesk_pin = (ant == ANTENNA_A) ? 1 : 0;
+ u8 gpio_bit3 = (ant == ANTENNA_A) ? 0 : 1;
+
+ if (rt2x00_is_pci(rt2x00dev)) {
+ rt2800_register_read(rt2x00dev, E2PROM_CSR, &reg);
+ rt2x00_set_field32(&reg, E2PROM_CSR_DATA_CLOCK, eesk_pin);
+ rt2800_register_write(rt2x00dev, E2PROM_CSR, reg);
+ } else if (rt2x00_is_usb(rt2x00dev))
+ rt2800_mcu_request(rt2x00dev, MCU_ANT_SELECT, 0xff,
+ eesk_pin, 0);
+
+ rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
+ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0);
+ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, gpio_bit3);
+ rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+}
+
void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
{
u8 r1;
u8 r3;
+ u16 eeprom;
rt2800_bbp_read(rt2x00dev, 1, &r1);
rt2800_bbp_read(rt2x00dev, 3, &r3);
@@ -1354,7 +1434,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
/*
* Configure the TX antenna.
*/
- switch ((int)ant->tx) {
+ switch (ant->tx_chain_num) {
case 1:
rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
break;
@@ -1369,8 +1449,18 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
/*
* Configure the RX antenna.
*/
- switch ((int)ant->rx) {
+ switch (ant->rx_chain_num) {
case 1:
+ if (rt2x00_rt(rt2x00dev, RT3070) ||
+ rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2x00_eeprom_read(rt2x00dev,
+ EEPROM_NIC_CONF1, &eeprom);
+ if (rt2x00_get_field16(eeprom,
+ EEPROM_NIC_CONF1_ANT_DIVERSITY))
+ rt2800_set_ant_diversity(rt2x00dev,
+ rt2x00dev->default_ant.rx);
+ }
rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
break;
case 2:
@@ -1416,13 +1506,13 @@ static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev,
{
rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
- if (rt2x00dev->default_ant.tx == 1)
+ if (rt2x00dev->default_ant.tx_chain_num == 1)
rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_TX1, 1);
- if (rt2x00dev->default_ant.rx == 1) {
+ if (rt2x00dev->default_ant.rx_chain_num == 1) {
rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX1, 1);
rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
- } else if (rt2x00dev->default_ant.rx == 2)
+ } else if (rt2x00dev->default_ant.rx_chain_num == 2)
rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
if (rf->channel > 14) {
@@ -1508,6 +1598,105 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
}
+
+#define RT5390_POWER_BOUND 0x27
+#define RT5390_FREQ_OFFSET_BOUND 0x5f
+
+static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf,
+ struct rf_channel *rf,
+ struct channel_info *info)
+{
+ u8 rfcsr;
+ u16 eeprom;
+
+ rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
+ rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
+ rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf2);
+ rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
+ if (info->default_power1 > RT5390_POWER_BOUND)
+ rt2x00_set_field8(&rfcsr, RFCSR49_TX, RT5390_POWER_BOUND);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
+ rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
+ rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+ if (rt2x00dev->freq_offset > RT5390_FREQ_OFFSET_BOUND)
+ rt2x00_set_field8(&rfcsr, RFCSR17_CODE,
+ RT5390_FREQ_OFFSET_BOUND);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
+ rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ if (rf->channel <= 14) {
+ int idx = rf->channel-1;
+
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) {
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
+ /* r55/r59 value array of channel 1~14 */
+ static const char r55_bt_rev[] = {0x83, 0x83,
+ 0x83, 0x73, 0x73, 0x63, 0x53, 0x53,
+ 0x53, 0x43, 0x43, 0x43, 0x43, 0x43};
+ static const char r59_bt_rev[] = {0x0e, 0x0e,
+ 0x0e, 0x0e, 0x0e, 0x0b, 0x0a, 0x09,
+ 0x07, 0x07, 0x07, 0x07, 0x07, 0x07};
+
+ rt2800_rfcsr_write(rt2x00dev, 55,
+ r55_bt_rev[idx]);
+ rt2800_rfcsr_write(rt2x00dev, 59,
+ r59_bt_rev[idx]);
+ } else {
+ static const char r59_bt[] = {0x8b, 0x8b, 0x8b,
+ 0x8b, 0x8b, 0x8b, 0x8b, 0x8a, 0x89,
+ 0x88, 0x88, 0x86, 0x85, 0x84};
+
+ rt2800_rfcsr_write(rt2x00dev, 59, r59_bt[idx]);
+ }
+ } else {
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
+ static const char r55_nonbt_rev[] = {0x23, 0x23,
+ 0x23, 0x23, 0x13, 0x13, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03};
+ static const char r59_nonbt_rev[] = {0x07, 0x07,
+ 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
+ 0x07, 0x07, 0x06, 0x05, 0x04, 0x04};
+
+ rt2800_rfcsr_write(rt2x00dev, 55,
+ r55_nonbt_rev[idx]);
+ rt2800_rfcsr_write(rt2x00dev, 59,
+ r59_nonbt_rev[idx]);
+ } else if (rt2x00_rt(rt2x00dev, RT5390)) {
+ static const char r59_non_bt[] = {0x8f, 0x8f,
+ 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d,
+ 0x8a, 0x88, 0x88, 0x87, 0x87, 0x86};
+
+ rt2800_rfcsr_write(rt2x00dev, 59,
+ r59_non_bt[idx]);
+ }
+ }
+ }
+
+ rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0);
+ rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
+ rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
+}
+
static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
@@ -1529,8 +1718,11 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2x00_rf(rt2x00dev, RF3020) ||
rt2x00_rf(rt2x00dev, RF3021) ||
rt2x00_rf(rt2x00dev, RF3022) ||
- rt2x00_rf(rt2x00dev, RF3052))
+ rt2x00_rf(rt2x00dev, RF3052) ||
+ rt2x00_rf(rt2x00dev, RF3320))
rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
+ else if (rt2x00_rf(rt2x00dev, RF5390))
+ rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
else
rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
@@ -1543,12 +1735,15 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_bbp_write(rt2x00dev, 86, 0);
if (rf->channel <= 14) {
- if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) {
- rt2800_bbp_write(rt2x00dev, 82, 0x62);
- rt2800_bbp_write(rt2x00dev, 75, 0x46);
- } else {
- rt2800_bbp_write(rt2x00dev, 82, 0x84);
- rt2800_bbp_write(rt2x00dev, 75, 0x50);
+ if (!rt2x00_rt(rt2x00dev, RT5390)) {
+ if (test_bit(CONFIG_EXTERNAL_LNA_BG,
+ &rt2x00dev->flags)) {
+ rt2800_bbp_write(rt2x00dev, 82, 0x62);
+ rt2800_bbp_write(rt2x00dev, 75, 0x46);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 82, 0x84);
+ rt2800_bbp_write(rt2x00dev, 75, 0x50);
+ }
}
} else {
rt2800_bbp_write(rt2x00dev, 82, 0xf2);
@@ -1568,13 +1763,13 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
tx_pin = 0;
/* Turn on unused PA or LNA when not using 1T or 1R */
- if (rt2x00dev->default_ant.tx != 1) {
+ if (rt2x00dev->default_ant.tx_chain_num == 2) {
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1);
}
/* Turn on unused PA or LNA when not using 1T or 1R */
- if (rt2x00dev->default_ant.rx != 1) {
+ if (rt2x00dev->default_ant.rx_chain_num == 2) {
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
}
@@ -1609,32 +1804,125 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
}
msleep(1);
+
+ /*
+ * Clear channel statistic counters
+ */
+ rt2800_register_read(rt2x00dev, CH_IDLE_STA, &reg);
+ rt2800_register_read(rt2x00dev, CH_BUSY_STA, &reg);
+ rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &reg);
+}
+
+static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
+ enum ieee80211_band band)
+{
+ u16 eeprom;
+ u8 comp_en;
+ u8 comp_type;
+ int comp_value;
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA, &eeprom);
+
+ if (eeprom == 0xffff)
+ return 0;
+
+ if (band == IEEE80211_BAND_2GHZ) {
+ comp_en = rt2x00_get_field16(eeprom,
+ EEPROM_TXPOWER_DELTA_ENABLE_2G);
+ if (comp_en) {
+ comp_type = rt2x00_get_field16(eeprom,
+ EEPROM_TXPOWER_DELTA_TYPE_2G);
+ comp_value = rt2x00_get_field16(eeprom,
+ EEPROM_TXPOWER_DELTA_VALUE_2G);
+ if (!comp_type)
+ comp_value = -comp_value;
+ }
+ } else {
+ comp_en = rt2x00_get_field16(eeprom,
+ EEPROM_TXPOWER_DELTA_ENABLE_5G);
+ if (comp_en) {
+ comp_type = rt2x00_get_field16(eeprom,
+ EEPROM_TXPOWER_DELTA_TYPE_5G);
+ comp_value = rt2x00_get_field16(eeprom,
+ EEPROM_TXPOWER_DELTA_VALUE_5G);
+ if (!comp_type)
+ comp_value = -comp_value;
+ }
+ }
+
+ return comp_value;
+}
+
+static u8 rt2800_compesate_txpower(struct rt2x00_dev *rt2x00dev,
+ int is_rate_b,
+ enum ieee80211_band band,
+ int power_level,
+ u8 txpower)
+{
+ u32 reg;
+ u16 eeprom;
+ u8 criterion;
+ u8 eirp_txpower;
+ u8 eirp_txpower_criterion;
+ u8 reg_limit;
+ int bw_comp = 0;
+
+ if (!((band == IEEE80211_BAND_5GHZ) && is_rate_b))
+ return txpower;
+
+ if (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
+ bw_comp = rt2800_get_txpower_bw_comp(rt2x00dev, band);
+
+ if (test_bit(CONFIG_SUPPORT_POWER_LIMIT, &rt2x00dev->flags)) {
+ /*
+ * Check if eirp txpower exceed txpower_limit.
+ * We use OFDM 6M as criterion and its eirp txpower
+ * is stored at EEPROM_EIRP_MAX_TX_POWER.
+ * .11b data rate need add additional 4dbm
+ * when calculating eirp txpower.
+ */
+ rt2800_register_read(rt2x00dev, TX_PWR_CFG_0, &reg);
+ criterion = rt2x00_get_field32(reg, TX_PWR_CFG_0_6MBS);
+
+ rt2x00_eeprom_read(rt2x00dev,
+ EEPROM_EIRP_MAX_TX_POWER, &eeprom);
+
+ if (band == IEEE80211_BAND_2GHZ)
+ eirp_txpower_criterion = rt2x00_get_field16(eeprom,
+ EEPROM_EIRP_MAX_TX_POWER_2GHZ);
+ else
+ eirp_txpower_criterion = rt2x00_get_field16(eeprom,
+ EEPROM_EIRP_MAX_TX_POWER_5GHZ);
+
+ eirp_txpower = eirp_txpower_criterion + (txpower - criterion) +
+ (is_rate_b ? 4 : 0) + bw_comp;
+
+ reg_limit = (eirp_txpower > power_level) ?
+ (eirp_txpower - power_level) : 0;
+ } else
+ reg_limit = 0;
+
+ return txpower + bw_comp - reg_limit;
}
static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
- const int max_txpower)
+ struct ieee80211_conf *conf)
{
u8 txpower;
- u8 max_value = (u8)max_txpower;
u16 eeprom;
- int i;
+ int i, is_rate_b;
u32 reg;
u8 r1;
u32 offset;
+ enum ieee80211_band band = conf->channel->band;
+ int power_level = conf->power_level;
/*
- * set to normal tx power mode: +/- 0dBm
+ * set to normal bbp tx power control mode: +/- 0dBm
*/
rt2800_bbp_read(rt2x00dev, 1, &r1);
- rt2x00_set_field8(&r1, BBP1_TX_POWER, 0);
+ rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, 0);
rt2800_bbp_write(rt2x00dev, 1, r1);
-
- /*
- * The eeprom contains the tx power values for each rate. These
- * values map to 100% tx power. Each 16bit word contains four tx
- * power values and the order is the same as used in the TX_PWR_CFG
- * registers.
- */
offset = TX_PWR_CFG_0;
for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) {
@@ -1648,73 +1936,99 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i,
&eeprom);
- /* TX_PWR_CFG_0: 1MBS, TX_PWR_CFG_1: 24MBS,
+ is_rate_b = i ? 0 : 1;
+ /*
+ * TX_PWR_CFG_0: 1MBS, TX_PWR_CFG_1: 24MBS,
* TX_PWR_CFG_2: MCS4, TX_PWR_CFG_3: MCS12,
- * TX_PWR_CFG_4: unknown */
+ * TX_PWR_CFG_4: unknown
+ */
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE0);
- rt2x00_set_field32(&reg, TX_PWR_CFG_RATE0,
- min(txpower, max_value));
+ txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+ power_level, txpower);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_RATE0, txpower);
- /* TX_PWR_CFG_0: 2MBS, TX_PWR_CFG_1: 36MBS,
+ /*
+ * TX_PWR_CFG_0: 2MBS, TX_PWR_CFG_1: 36MBS,
* TX_PWR_CFG_2: MCS5, TX_PWR_CFG_3: MCS13,
- * TX_PWR_CFG_4: unknown */
+ * TX_PWR_CFG_4: unknown
+ */
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE1);
- rt2x00_set_field32(&reg, TX_PWR_CFG_RATE1,
- min(txpower, max_value));
+ txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+ power_level, txpower);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_RATE1, txpower);
- /* TX_PWR_CFG_0: 55MBS, TX_PWR_CFG_1: 48MBS,
+ /*
+ * TX_PWR_CFG_0: 5.5MBS, TX_PWR_CFG_1: 48MBS,
* TX_PWR_CFG_2: MCS6, TX_PWR_CFG_3: MCS14,
- * TX_PWR_CFG_4: unknown */
+ * TX_PWR_CFG_4: unknown
+ */
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE2);
- rt2x00_set_field32(&reg, TX_PWR_CFG_RATE2,
- min(txpower, max_value));
+ txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+ power_level, txpower);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_RATE2, txpower);
- /* TX_PWR_CFG_0: 11MBS, TX_PWR_CFG_1: 54MBS,
+ /*
+ * TX_PWR_CFG_0: 11MBS, TX_PWR_CFG_1: 54MBS,
* TX_PWR_CFG_2: MCS7, TX_PWR_CFG_3: MCS15,
- * TX_PWR_CFG_4: unknown */
+ * TX_PWR_CFG_4: unknown
+ */
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE3);
- rt2x00_set_field32(&reg, TX_PWR_CFG_RATE3,
- min(txpower, max_value));
+ txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+ power_level, txpower);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_RATE3, txpower);
/* read the next four txpower values */
rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i + 1,
&eeprom);
- /* TX_PWR_CFG_0: 6MBS, TX_PWR_CFG_1: MCS0,
+ is_rate_b = 0;
+ /*
+ * TX_PWR_CFG_0: 6MBS, TX_PWR_CFG_1: MCS0,
* TX_PWR_CFG_2: MCS8, TX_PWR_CFG_3: unknown,
- * TX_PWR_CFG_4: unknown */
+ * TX_PWR_CFG_4: unknown
+ */
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE0);
- rt2x00_set_field32(&reg, TX_PWR_CFG_RATE4,
- min(txpower, max_value));
+ txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+ power_level, txpower);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_RATE4, txpower);
- /* TX_PWR_CFG_0: 9MBS, TX_PWR_CFG_1: MCS1,
+ /*
+ * TX_PWR_CFG_0: 9MBS, TX_PWR_CFG_1: MCS1,
* TX_PWR_CFG_2: MCS9, TX_PWR_CFG_3: unknown,
- * TX_PWR_CFG_4: unknown */
+ * TX_PWR_CFG_4: unknown
+ */
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE1);
- rt2x00_set_field32(&reg, TX_PWR_CFG_RATE5,
- min(txpower, max_value));
+ txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+ power_level, txpower);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_RATE5, txpower);
- /* TX_PWR_CFG_0: 12MBS, TX_PWR_CFG_1: MCS2,
+ /*
+ * TX_PWR_CFG_0: 12MBS, TX_PWR_CFG_1: MCS2,
* TX_PWR_CFG_2: MCS10, TX_PWR_CFG_3: unknown,
- * TX_PWR_CFG_4: unknown */
+ * TX_PWR_CFG_4: unknown
+ */
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE2);
- rt2x00_set_field32(&reg, TX_PWR_CFG_RATE6,
- min(txpower, max_value));
+ txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+ power_level, txpower);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_RATE6, txpower);
- /* TX_PWR_CFG_0: 18MBS, TX_PWR_CFG_1: MCS3,
+ /*
+ * TX_PWR_CFG_0: 18MBS, TX_PWR_CFG_1: MCS3,
* TX_PWR_CFG_2: MCS11, TX_PWR_CFG_3: unknown,
- * TX_PWR_CFG_4: unknown */
+ * TX_PWR_CFG_4: unknown
+ */
txpower = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE3);
- rt2x00_set_field32(&reg, TX_PWR_CFG_RATE7,
- min(txpower, max_value));
+ txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
+ power_level, txpower);
+ rt2x00_set_field32(&reg, TX_PWR_CFG_RATE7, txpower);
rt2800_register_write(rt2x00dev, offset, reg);
@@ -1773,11 +2087,13 @@ void rt2800_config(struct rt2x00_dev *rt2x00dev,
/* Always recalculate LNA gain before changing configuration */
rt2800_config_lna_gain(rt2x00dev, libconf);
- if (flags & IEEE80211_CONF_CHANGE_CHANNEL)
+ if (flags & IEEE80211_CONF_CHANGE_CHANNEL) {
rt2800_config_channel(rt2x00dev, libconf->conf,
&libconf->rf, &libconf->channel);
+ rt2800_config_txpower(rt2x00dev, libconf->conf);
+ }
if (flags & IEEE80211_CONF_CHANGE_POWER)
- rt2800_config_txpower(rt2x00dev, libconf->conf->power_level);
+ rt2800_config_txpower(rt2x00dev, libconf->conf);
if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
rt2800_config_retry_limit(rt2x00dev, libconf);
if (flags & IEEE80211_CONF_CHANGE_PS)
@@ -1806,7 +2122,8 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
- rt2x00_rt(rt2x00dev, RT3390))
+ rt2x00_rt(rt2x00dev, RT3390) ||
+ rt2x00_rt(rt2x00dev, RT5390))
return 0x1c + (2 * rt2x00dev->lna_gain);
else
return 0x2e + rt2x00dev->lna_gain;
@@ -1914,8 +2231,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
- if (rt2x00_get_field16(eeprom, EEPROM_NIC_DAC_TEST))
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST))
rt2800_register_write(rt2x00dev, TX_SW_CFG2,
0x0000002c);
else
@@ -1938,6 +2255,10 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000001f);
+ } else if (rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
} else {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -2006,7 +2327,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 3);
rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0);
- rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV, 1);
+ rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV_SHORT, 1);
rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2019,7 +2340,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 3);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0);
- rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV, 1);
+ rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV_SHORT, 1);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2032,7 +2353,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_RATE, 0x4004);
rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_CTRL, 0);
- rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV, 1);
+ rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV_SHORT, 1);
rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2045,7 +2366,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0);
- rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1);
+ rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV_SHORT, 1);
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2058,7 +2379,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_RATE, 0x4004);
rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_CTRL, 0);
- rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV, 1);
+ rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV_SHORT, 1);
rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2071,7 +2392,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_RATE, 0x4084);
rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_CTRL, 0);
- rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV, 1);
+ rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV_SHORT, 1);
rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2097,7 +2418,23 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
}
- rt2800_register_write(rt2x00dev, TXOP_CTRL_CFG, 0x0000583f);
+ /*
+ * The legacy driver also sets TXOP_CTRL_CFG_RESERVED_TRUN_EN to 1
+ * although it is reserved.
+ */
+ rt2800_register_read(rt2x00dev, TXOP_CTRL_CFG, &reg);
+ rt2x00_set_field32(&reg, TXOP_CTRL_CFG_TIMEOUT_TRUN_EN, 1);
+ rt2x00_set_field32(&reg, TXOP_CTRL_CFG_AC_TRUN_EN, 1);
+ rt2x00_set_field32(&reg, TXOP_CTRL_CFG_TXRATEGRP_TRUN_EN, 1);
+ rt2x00_set_field32(&reg, TXOP_CTRL_CFG_USER_MODE_TRUN_EN, 1);
+ rt2x00_set_field32(&reg, TXOP_CTRL_CFG_MIMO_PS_TRUN_EN, 1);
+ rt2x00_set_field32(&reg, TXOP_CTRL_CFG_RESERVED_TRUN_EN, 1);
+ rt2x00_set_field32(&reg, TXOP_CTRL_CFG_LSIG_TXOP_EN, 0);
+ rt2x00_set_field32(&reg, TXOP_CTRL_CFG_EXT_CCA_EN, 0);
+ rt2x00_set_field32(&reg, TXOP_CTRL_CFG_EXT_CCA_DLY, 88);
+ rt2x00_set_field32(&reg, TXOP_CTRL_CFG_EXT_CWMIN, 0);
+ rt2800_register_write(rt2x00dev, TXOP_CTRL_CFG, reg);
+
rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, 0x00000002);
rt2800_register_read(rt2x00dev, TX_RTS_CFG, &reg);
@@ -2134,30 +2471,34 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
SHARED_KEY_MODE_ENTRY(i), 0);
for (i = 0; i < 256; i++) {
- u32 wcid[2] = { 0xffffffff, 0x00ffffff };
+ static const u32 wcid[2] = { 0xffffffff, 0x00ffffff };
rt2800_register_multiwrite(rt2x00dev, MAC_WCID_ENTRY(i),
wcid, sizeof(wcid));
- rt2800_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 1);
+ rt2800_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 0);
rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
}
/*
* Clear all beacons
*/
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE0);
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE1);
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE2);
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE3);
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE4);
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE5);
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE6);
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE7);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE0);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE1);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE2);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE3);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE4);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE5);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE6);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE7);
if (rt2x00_is_usb(rt2x00dev)) {
rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg);
rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 30);
rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
+ } else if (rt2x00_is_pcie(rt2x00dev)) {
+ rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg);
+ rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 125);
+ rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
}
rt2800_register_read(rt2x00dev, HT_FBK_CFG0, &reg);
@@ -2227,6 +2568,17 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, INT_TIMER_CFG_PRE_TBTT_TIMER, 6 << 4);
rt2800_register_write(rt2x00dev, INT_TIMER_CFG, reg);
+ /*
+ * Set up channel statistics timer
+ */
+ rt2800_register_read(rt2x00dev, CH_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, CH_TIME_CFG_EIFS_BUSY, 1);
+ rt2x00_set_field32(&reg, CH_TIME_CFG_NAV_BUSY, 1);
+ rt2x00_set_field32(&reg, CH_TIME_CFG_RX_BUSY, 1);
+ rt2x00_set_field32(&reg, CH_TIME_CFG_TX_BUSY, 1);
+ rt2x00_set_field32(&reg, CH_TIME_CFG_TMR_EN, 1);
+ rt2800_register_write(rt2x00dev, CH_TIME_CFG, reg);
+
return 0;
}
@@ -2282,15 +2634,31 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_wait_bbp_ready(rt2x00dev)))
return -EACCES;
- if (rt2800_is_305x_soc(rt2x00dev))
+ if (rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2800_bbp_read(rt2x00dev, 4, &value);
+ rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
+ rt2800_bbp_write(rt2x00dev, 4, value);
+ }
+
+ if (rt2800_is_305x_soc(rt2x00dev) ||
+ rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 31, 0x08);
rt2800_bbp_write(rt2x00dev, 65, 0x2c);
rt2800_bbp_write(rt2x00dev, 66, 0x38);
+ if (rt2x00_rt(rt2x00dev, RT5390))
+ rt2800_bbp_write(rt2x00dev, 68, 0x0b);
+
if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
rt2800_bbp_write(rt2x00dev, 69, 0x16);
rt2800_bbp_write(rt2x00dev, 73, 0x12);
+ } else if (rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2800_bbp_write(rt2x00dev, 69, 0x12);
+ rt2800_bbp_write(rt2x00dev, 73, 0x13);
+ rt2800_bbp_write(rt2x00dev, 75, 0x46);
+ rt2800_bbp_write(rt2x00dev, 76, 0x28);
+ rt2800_bbp_write(rt2x00dev, 77, 0x59);
} else {
rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x10);
@@ -2301,7 +2669,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
- rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2x00_rt(rt2x00dev, RT3390) ||
+ rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_bbp_write(rt2x00dev, 79, 0x13);
rt2800_bbp_write(rt2x00dev, 80, 0x05);
rt2800_bbp_write(rt2x00dev, 81, 0x33);
@@ -2313,46 +2682,109 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
}
rt2800_bbp_write(rt2x00dev, 82, 0x62);
- rt2800_bbp_write(rt2x00dev, 83, 0x6a);
+ if (rt2x00_rt(rt2x00dev, RT5390))
+ rt2800_bbp_write(rt2x00dev, 83, 0x7a);
+ else
+ rt2800_bbp_write(rt2x00dev, 83, 0x6a);
if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D))
rt2800_bbp_write(rt2x00dev, 84, 0x19);
+ else if (rt2x00_rt(rt2x00dev, RT5390))
+ rt2800_bbp_write(rt2x00dev, 84, 0x9a);
else
rt2800_bbp_write(rt2x00dev, 84, 0x99);
- rt2800_bbp_write(rt2x00dev, 86, 0x00);
+ if (rt2x00_rt(rt2x00dev, RT5390))
+ rt2800_bbp_write(rt2x00dev, 86, 0x38);
+ else
+ rt2800_bbp_write(rt2x00dev, 86, 0x00);
+
rt2800_bbp_write(rt2x00dev, 91, 0x04);
- rt2800_bbp_write(rt2x00dev, 92, 0x00);
+
+ if (rt2x00_rt(rt2x00dev, RT5390))
+ rt2800_bbp_write(rt2x00dev, 92, 0x02);
+ else
+ rt2800_bbp_write(rt2x00dev, 92, 0x00);
if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) ||
rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
rt2800_is_305x_soc(rt2x00dev))
rt2800_bbp_write(rt2x00dev, 103, 0xc0);
else
rt2800_bbp_write(rt2x00dev, 103, 0x00);
+ if (rt2x00_rt(rt2x00dev, RT5390))
+ rt2800_bbp_write(rt2x00dev, 104, 0x92);
+
if (rt2800_is_305x_soc(rt2x00dev))
rt2800_bbp_write(rt2x00dev, 105, 0x01);
+ else if (rt2x00_rt(rt2x00dev, RT5390))
+ rt2800_bbp_write(rt2x00dev, 105, 0x3c);
else
rt2800_bbp_write(rt2x00dev, 105, 0x05);
- rt2800_bbp_write(rt2x00dev, 106, 0x35);
+
+ if (rt2x00_rt(rt2x00dev, RT5390))
+ rt2800_bbp_write(rt2x00dev, 106, 0x03);
+ else
+ rt2800_bbp_write(rt2x00dev, 106, 0x35);
+
+ if (rt2x00_rt(rt2x00dev, RT5390))
+ rt2800_bbp_write(rt2x00dev, 128, 0x12);
if (rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
- rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2x00_rt(rt2x00dev, RT3390) ||
+ rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_bbp_read(rt2x00dev, 138, &value);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
- if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) == 1)
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
value |= 0x20;
- if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH) == 1)
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
value &= ~0x02;
rt2800_bbp_write(rt2x00dev, 138, value);
}
+ if (rt2x00_rt(rt2x00dev, RT5390)) {
+ int ant, div_mode;
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ div_mode = rt2x00_get_field16(eeprom,
+ EEPROM_NIC_CONF1_ANT_DIVERSITY);
+ ant = (div_mode == 3) ? 1 : 0;
+
+ /* check if this is a Bluetooth combo card */
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) {
+ u32 reg;
+
+ rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
+ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0);
+ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT6, 0);
+ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 0);
+ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 0);
+ if (ant == 0)
+ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 1);
+ else if (ant == 1)
+ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 1);
+ rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+ }
+
+ rt2800_bbp_read(rt2x00dev, 152, &value);
+ if (ant == 0)
+ rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
+ else
+ rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
+ rt2800_bbp_write(rt2x00dev, 152, value);
+
+ /* Init frequency calibration */
+ rt2800_bbp_write(rt2x00dev, 142, 1);
+ rt2800_bbp_write(rt2x00dev, 143, 57);
+ }
for (i = 0; i < EEPROM_BBP_SIZE; i++) {
rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
@@ -2383,6 +2815,10 @@ static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40);
rt2800_bbp_write(rt2x00dev, 4, bbp);
+ rt2800_rfcsr_read(rt2x00dev, 31, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR31_RX_H20M, bw40);
+ rt2800_rfcsr_write(rt2x00dev, 31, rfcsr);
+
rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1);
rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
@@ -2438,18 +2874,28 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
!rt2x00_rt(rt2x00dev, RT3071) &&
!rt2x00_rt(rt2x00dev, RT3090) &&
!rt2x00_rt(rt2x00dev, RT3390) &&
+ !rt2x00_rt(rt2x00dev, RT5390) &&
!rt2800_is_305x_soc(rt2x00dev))
return 0;
/*
* Init RF calibration.
*/
- rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
- rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
- rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
- msleep(1);
- rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
- rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+ if (rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
+ rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
+ msleep(1);
+ rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
+ } else {
+ rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
+ rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+ msleep(1);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
+ rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+ }
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3071) ||
@@ -2457,7 +2903,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
- rt2800_rfcsr_write(rt2x00dev, 7, 0x70);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0x60);
rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
rt2800_rfcsr_write(rt2x00dev, 10, 0x41);
rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
@@ -2540,6 +2986,87 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
rt2800_rfcsr_write(rt2x00dev, 31, 0x00);
return 0;
+ } else if (rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
+ rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+ rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 6, 0xa0);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0xc6);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
+ rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x03);
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x00);
+
+ rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 25, 0xc0);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
+ rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
+
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 34, 0x07);
+ rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
+ rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 37, 0x08);
+ rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
+
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+ rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 40, 0x4b);
+ rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
+ rt2800_rfcsr_write(rt2x00dev, 42, 0xd2);
+ rt2800_rfcsr_write(rt2x00dev, 43, 0x9a);
+ rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
+ rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+ rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 46, 0x7b);
+ rt2800_rfcsr_write(rt2x00dev, 47, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 49, 0x94);
+
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x38);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+ rt2800_rfcsr_write(rt2x00dev, 53, 0x00);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 53, 0x84);
+ rt2800_rfcsr_write(rt2x00dev, 54, 0x78);
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x44);
+ rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
+ rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
+ rt2800_rfcsr_write(rt2x00dev, 59, 0x63);
+
+ rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
+ rt2800_rfcsr_write(rt2x00dev, 61, 0xd1);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 61, 0xdd);
+ rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
}
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
@@ -2549,23 +3076,27 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
} else if (rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090)) {
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
+
rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1);
rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
- rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
-
rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E)) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
- if (rt2x00_get_field16(eeprom, EEPROM_NIC_DAC_TEST))
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST))
rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
else
rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0);
}
rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
+
+ rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
+ rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
+ rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
} else if (rt2x00_rt(rt2x00dev, RT3390)) {
rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
@@ -2589,21 +3120,23 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x15);
}
- /*
- * Set back to initial state
- */
- rt2800_bbp_write(rt2x00dev, 24, 0);
+ if (!rt2x00_rt(rt2x00dev, RT5390)) {
+ /*
+ * Set back to initial state
+ */
+ rt2800_bbp_write(rt2x00dev, 24, 0);
- rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
- rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
- rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
+ rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
+ rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
- /*
- * set BBP back to BW20
- */
- rt2800_bbp_read(rt2x00dev, 4, &bbp);
- rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
- rt2800_bbp_write(rt2x00dev, 4, bbp);
+ /*
+ * Set BBP back to BW20
+ */
+ rt2800_bbp_read(rt2x00dev, 4, &bbp);
+ rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
+ rt2800_bbp_write(rt2x00dev, 4, bbp);
+ }
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
@@ -2615,28 +3148,33 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, OPT_14_CSR_BIT0, 1);
rt2800_register_write(rt2x00dev, OPT_14_CSR, reg);
- rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
- rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
- if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
- rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
- rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
- if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
- rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
- }
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
- if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1)
- rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
- rt2x00_get_field16(eeprom,
- EEPROM_TXMIXER_GAIN_BG_VAL));
- rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+ if (!rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
+ if (rt2x00_rt(rt2x00dev, RT3070) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
+ if (!test_bit(CONFIG_EXTERNAL_LNA_BG,
+ &rt2x00dev->flags))
+ rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
+ }
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1)
+ rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
+ rt2x00_get_field16(eeprom,
+ EEPROM_TXMIXER_GAIN_BG_VAL));
+ rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+ }
if (rt2x00_rt(rt2x00dev, RT3090)) {
rt2800_bbp_read(rt2x00dev, 138, &bbp);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
- if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH) == 1)
+ /* Turn off unused DAC1 and ADC1 to reduce power consumption */
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0);
- if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) == 1)
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
rt2x00_set_field8(&bbp, BBP138_TX_DAC1, 1);
rt2800_bbp_write(rt2x00dev, 138, bbp);
@@ -2666,10 +3204,9 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 21, rfcsr);
}
- if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3071)) {
+ if (rt2x00_rt(rt2x00dev, RT3070)) {
rt2800_rfcsr_read(rt2x00dev, 27, &rfcsr);
- if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
- rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E))
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F))
rt2x00_set_field8(&rfcsr, RFCSR27_R1, 3);
else
rt2x00_set_field8(&rfcsr, RFCSR27_R1, 0);
@@ -2679,6 +3216,20 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 27, rfcsr);
}
+ if (rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 38, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 39, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR39_RX_LO2_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2);
+ rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+ }
+
return 0;
}
@@ -2735,16 +3286,16 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
/*
* Initialize LED control
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word);
- rt2800_mcu_request(rt2x00dev, MCU_LED_1, 0xff,
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_LED_AG_CONF, &word);
+ rt2800_mcu_request(rt2x00dev, MCU_LED_AG_CONF, 0xff,
word & 0xff, (word >> 8) & 0xff);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word);
- rt2800_mcu_request(rt2x00dev, MCU_LED_2, 0xff,
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_LED_ACT_CONF, &word);
+ rt2800_mcu_request(rt2x00dev, MCU_LED_ACT_CONF, 0xff,
word & 0xff, (word >> 8) & 0xff);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word);
- rt2800_mcu_request(rt2x00dev, MCU_LED_3, 0xff,
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_LED_POLARITY, &word);
+ rt2800_mcu_request(rt2x00dev, MCU_LED_LED_POLARITY, 0xff,
word & 0xff, (word >> 8) & 0xff);
return 0;
@@ -2757,10 +3308,7 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
- rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
- rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
- rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
/* Wait for DMA, ignore error */
@@ -2770,9 +3318,6 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 0);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
-
- rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
- rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
}
EXPORT_SYMBOL_GPL(rt2800_disable_radio);
@@ -2838,38 +3383,41 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
EEPROM(rt2x00dev, "MAC: %pM\n", mac);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &word);
if (word == 0xffff) {
- rt2x00_set_field16(&word, EEPROM_ANTENNA_RXPATH, 2);
- rt2x00_set_field16(&word, EEPROM_ANTENNA_TXPATH, 1);
- rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2820);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
+ rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RXPATH, 2);
+ rt2x00_set_field16(&word, EEPROM_NIC_CONF0_TXPATH, 1);
+ rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RF_TYPE, RF2820);
+ rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word);
EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
} else if (rt2x00_rt(rt2x00dev, RT2860) ||
rt2x00_rt(rt2x00dev, RT2872)) {
/*
* There is a max of 2 RX streams for RT28x0 series
*/
- if (rt2x00_get_field16(word, EEPROM_ANTENNA_RXPATH) > 2)
- rt2x00_set_field16(&word, EEPROM_ANTENNA_RXPATH, 2);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
+ if (rt2x00_get_field16(word, EEPROM_NIC_CONF0_RXPATH) > 2)
+ rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RXPATH, 2);
+ rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word);
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &word);
if (word == 0xffff) {
- rt2x00_set_field16(&word, EEPROM_NIC_HW_RADIO, 0);
- rt2x00_set_field16(&word, EEPROM_NIC_DYNAMIC_TX_AGC, 0);
- rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_BG, 0);
- rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_A, 0);
- rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0);
- rt2x00_set_field16(&word, EEPROM_NIC_BW40M_SB_BG, 0);
- rt2x00_set_field16(&word, EEPROM_NIC_BW40M_SB_A, 0);
- rt2x00_set_field16(&word, EEPROM_NIC_WPS_PBC, 0);
- rt2x00_set_field16(&word, EEPROM_NIC_BW40M_BG, 0);
- rt2x00_set_field16(&word, EEPROM_NIC_BW40M_A, 0);
- rt2x00_set_field16(&word, EEPROM_NIC_ANT_DIVERSITY, 0);
- rt2x00_set_field16(&word, EEPROM_NIC_DAC_TEST, 0);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word);
+ rt2x00_set_field16(&word, EEPROM_NIC_CONF1_HW_RADIO, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_CONF1_EXTERNAL_LNA_2G, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_CONF1_EXTERNAL_LNA_5G, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_CONF1_CARDBUS_ACCEL, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BW40M_SB_2G, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BW40M_SB_5G, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_CONF1_WPS_PBC, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BW40M_2G, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BW40M_5G, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BROADBAND_EXT_LNA, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_CONF1_ANT_DIVERSITY, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_CONF1_INTERNAL_TX_ALC, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BT_COEXIST, 0);
+ rt2x00_set_field16(&word, EEPROM_NIC_CONF1_DAC_TEST, 0);
+ rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF1, word);
EEPROM(rt2x00dev, "NIC: 0x%04x\n", word);
}
@@ -2884,9 +3432,9 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
LED_MODE_TXRX_ACTIVITY);
rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0);
rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_LED1, 0x5555);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_LED2, 0x2221);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_LED3, 0xa9f8);
+ rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_AG_CONF, 0x5555);
+ rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_ACT_CONF, 0x2221);
+ rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_POLARITY, 0xa9f8);
EEPROM(rt2x00dev, "Led Mode: 0x%04x\n", word);
}
@@ -2930,13 +3478,6 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
default_lna_gain);
rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &word);
- if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_24GHZ) == 0xff)
- rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_24GHZ, MAX_G_TXPOWER);
- if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_5GHZ) == 0xff)
- rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_5GHZ, MAX_A_TXPOWER);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_MAX_TX_POWER, word);
-
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_validate_eeprom);
@@ -2950,13 +3491,18 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
/*
* Read EEPROM word for configuration.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
/*
- * Identify RF chipset.
+ * Identify RF chipset by EEPROM value
+ * RT28xx/RT30xx: defined in "EEPROM_NIC_CONF0_RF_TYPE" field
+ * RT53xx: defined in "EEPROM_CHIP_ID" field
*/
- value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
+ if (rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5390)
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &value);
+ else
+ value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
@@ -2968,7 +3514,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
!rt2x00_rt(rt2x00dev, RT3071) &&
!rt2x00_rt(rt2x00dev, RT3090) &&
!rt2x00_rt(rt2x00dev, RT3390) &&
- !rt2x00_rt(rt2x00dev, RT3572)) {
+ !rt2x00_rt(rt2x00dev, RT3572) &&
+ !rt2x00_rt(rt2x00dev, RT5390)) {
ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
return -ENODEV;
}
@@ -2981,7 +3528,9 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
!rt2x00_rf(rt2x00dev, RF2020) &&
!rt2x00_rf(rt2x00dev, RF3021) &&
!rt2x00_rf(rt2x00dev, RF3022) &&
- !rt2x00_rf(rt2x00dev, RF3052)) {
+ !rt2x00_rf(rt2x00dev, RF3052) &&
+ !rt2x00_rf(rt2x00dev, RF3320) &&
+ !rt2x00_rf(rt2x00dev, RF5390)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -2989,10 +3538,34 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
/*
* Identify default antenna configuration.
*/
- rt2x00dev->default_ant.tx =
- rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH);
- rt2x00dev->default_ant.rx =
- rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH);
+ rt2x00dev->default_ant.tx_chain_num =
+ rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH);
+ rt2x00dev->default_ant.rx_chain_num =
+ rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH);
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+
+ if (rt2x00_rt(rt2x00dev, RT3070) ||
+ rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390)) {
+ value = rt2x00_get_field16(eeprom,
+ EEPROM_NIC_CONF1_ANT_DIVERSITY);
+ switch (value) {
+ case 0:
+ case 1:
+ case 2:
+ rt2x00dev->default_ant.tx = ANTENNA_A;
+ rt2x00dev->default_ant.rx = ANTENNA_A;
+ break;
+ case 3:
+ rt2x00dev->default_ant.tx = ANTENNA_A;
+ rt2x00dev->default_ant.rx = ANTENNA_B;
+ break;
+ }
+ } else {
+ rt2x00dev->default_ant.tx = ANTENNA_A;
+ rt2x00dev->default_ant.rx = ANTENNA_A;
+ }
/*
* Read frequency offset and RF programming sequence.
@@ -3003,17 +3576,17 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
/*
* Read external LNA informations.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
- if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_A))
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_LNA_5G))
__set_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags);
- if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG))
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_LNA_2G))
__set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags);
/*
* Detect if this device has an hardware controlled radio.
*/
- if (rt2x00_get_field16(eeprom, EEPROM_NIC_HW_RADIO))
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_HW_RADIO))
__set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
/*
@@ -3027,6 +3600,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &rt2x00dev->led_mcu_reg);
#endif /* CONFIG_RT2X00_LIB_LEDS */
+ /*
+ * Check if support EIRP tx power limit feature.
+ */
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, &eeprom);
+
+ if (rt2x00_get_field16(eeprom, EEPROM_EIRP_MAX_TX_POWER_2GHZ) <
+ EIRP_MAX_TX_POWER_LIMIT)
+ __set_bit(CONFIG_SUPPORT_POWER_LIMIT, &rt2x00dev->flags);
+
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_init_eeprom);
@@ -3179,7 +3761,6 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
char *default_power1;
char *default_power2;
unsigned int i;
- unsigned short max_power;
u16 eeprom;
/*
@@ -3225,7 +3806,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
rt2x00dev->hw->max_report_rates = 7;
rt2x00dev->hw->max_rate_tries = 1;
- rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
/*
* Initialize hw_mode information.
@@ -3245,7 +3826,9 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
} else if (rt2x00_rf(rt2x00dev, RF3020) ||
rt2x00_rf(rt2x00dev, RF2020) ||
rt2x00_rf(rt2x00dev, RF3021) ||
- rt2x00_rf(rt2x00dev, RF3022)) {
+ rt2x00_rf(rt2x00dev, RF3022) ||
+ rt2x00_rf(rt2x00dev, RF3320) ||
+ rt2x00_rf(rt2x00dev, RF5390)) {
spec->num_channels = 14;
spec->channels = rf_vals_3x;
} else if (rt2x00_rf(rt2x00dev, RF3052)) {
@@ -3268,11 +3851,11 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40;
- if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) >= 2)
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) >= 2)
spec->ht.cap |= IEEE80211_HT_CAP_TX_STBC;
spec->ht.cap |=
- rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH) <<
+ rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) <<
IEEE80211_HT_CAP_RX_STBC_SHIFT;
spec->ht.ampdu_factor = 3;
@@ -3280,10 +3863,10 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->ht.mcs.tx_params =
IEEE80211_HT_MCS_TX_DEFINED |
IEEE80211_HT_MCS_TX_RX_DIFF |
- ((rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) - 1) <<
+ ((rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) - 1) <<
IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
- switch (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH)) {
+ switch (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH)) {
case 3:
spec->ht.mcs.rx_mask[2] = 0xff;
case 2:
@@ -3303,26 +3886,21 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->channels_info = info;
- rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &eeprom);
- max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_24GHZ);
default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
for (i = 0; i < 14; i++) {
- info[i].max_power = max_power;
- info[i].default_power1 = TXPOWER_G_FROM_DEV(default_power1[i]);
- info[i].default_power2 = TXPOWER_G_FROM_DEV(default_power2[i]);
+ info[i].default_power1 = default_power1[i];
+ info[i].default_power2 = default_power2[i];
}
if (spec->num_channels > 14) {
- max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_5GHZ);
default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
for (i = 14; i < spec->num_channels; i++) {
- info[i].max_power = max_power;
- info[i].default_power1 = TXPOWER_A_FROM_DEV(default_power1[i]);
- info[i].default_power2 = TXPOWER_A_FROM_DEV(default_power2[i]);
+ info[i].default_power1 = default_power1[i];
+ info[i].default_power2 = default_power2[i];
}
}
@@ -3414,7 +3992,7 @@ int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
if (queue_idx >= 4)
return 0;
- queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
+ queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
/* Update WMM TXOP register */
offset = WMM_TXOP0_CFG + (sizeof(u32) * (!!(queue_idx & 2)));
@@ -3472,7 +4050,8 @@ EXPORT_SYMBOL_GPL(rt2800_get_tsf);
int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
{
int ret = 0;
@@ -3502,6 +4081,37 @@ int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(rt2800_ampdu_action);
+int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct rt2x00_dev *rt2x00dev = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ u32 idle, busy, busy_ext;
+
+ if (idx != 0)
+ return -ENOENT;
+
+ survey->channel = conf->channel;
+
+ rt2800_register_read(rt2x00dev, CH_IDLE_STA, &idle);
+ rt2800_register_read(rt2x00dev, CH_BUSY_STA, &busy);
+ rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &busy_ext);
+
+ if (idle || busy) {
+ survey->filled = SURVEY_INFO_CHANNEL_TIME |
+ SURVEY_INFO_CHANNEL_TIME_BUSY |
+ SURVEY_INFO_CHANNEL_TIME_EXT_BUSY;
+
+ survey->channel_time = (idle + busy) / 1000;
+ survey->channel_time_busy = busy / 1000;
+ survey->channel_time_ext_busy = busy_ext / 1000;
+ }
+
+ return 0;
+
+}
+EXPORT_SYMBOL_GPL(rt2800_get_survey);
+
MODULE_AUTHOR(DRV_PROJECT ", Bartlomiej Zolnierkiewicz");
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION("Ralink RT2800 library");
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 81cbc92e7857..0c92d86a36f4 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -156,6 +156,7 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev);
void rt2800_txdone_entry(struct queue_entry *entry, u32 status);
void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
+void rt2800_clear_beacon(struct queue_entry *entry);
extern const struct rt2x00debug rt2800_rt2x00debug;
@@ -198,6 +199,9 @@ int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
u64 rt2800_get_tsf(struct ieee80211_hw *hw);
int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size);
+int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey);
#endif /* RT2800LIB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index b26739535986..808073aa9dcc 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -84,20 +84,22 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
}
-#ifdef CONFIG_RT2800PCI_SOC
+#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
{
- u32 *base_addr = (u32 *) KSEG1ADDR(0x1F040000); /* XXX for RT3052 */
+ void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE);
memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE);
+
+ iounmap(base_addr);
}
#else
static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
{
}
-#endif /* CONFIG_RT2800PCI_SOC */
+#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */
-#ifdef CONFIG_RT2800PCI_PCI
+#ifdef CONFIG_PCI
static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
{
struct rt2x00_dev *rt2x00dev = eeprom->data;
@@ -181,7 +183,99 @@ static inline int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev)
static inline void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
{
}
-#endif /* CONFIG_RT2800PCI_PCI */
+#endif /* CONFIG_PCI */
+
+/*
+ * Queue handlers.
+ */
+static void rt2800pci_start_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u32 reg;
+
+ switch (queue->qid) {
+ case QID_RX:
+ rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+ break;
+ case QID_BEACON:
+ /*
+ * Allow beacon tasklets to be scheduled for periodic
+ * beacon updates.
+ */
+ tasklet_enable(&rt2x00dev->tbtt_tasklet);
+ tasklet_enable(&rt2x00dev->pretbtt_tasklet);
+
+ rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+ rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg);
+ rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
+ rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg);
+ break;
+ default:
+ break;
+ };
+}
+
+static void rt2800pci_kick_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ struct queue_entry *entry;
+
+ switch (queue->qid) {
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ entry = rt2x00queue_get_entry(queue, Q_INDEX);
+ rt2800_register_write(rt2x00dev, TX_CTX_IDX(queue->qid), entry->entry_idx);
+ break;
+ case QID_MGMT:
+ entry = rt2x00queue_get_entry(queue, Q_INDEX);
+ rt2800_register_write(rt2x00dev, TX_CTX_IDX(5), entry->entry_idx);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rt2800pci_stop_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u32 reg;
+
+ switch (queue->qid) {
+ case QID_RX:
+ rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+ break;
+ case QID_BEACON:
+ rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+ rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg);
+ rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
+ rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg);
+
+ /*
+ * Wait for tbtt tasklets to finish.
+ */
+ tasklet_disable(&rt2x00dev->tbtt_tasklet);
+ tasklet_disable(&rt2x00dev->pretbtt_tasklet);
+ break;
+ default:
+ break;
+ }
+}
/*
* Firmware functions
@@ -321,24 +415,12 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
/*
* Device state switch handlers.
*/
-static void rt2800pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
- enum dev_state state)
-{
- u32 reg;
-
- rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
- rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX,
- (state == STATE_RADIO_RX_ON) ||
- (state == STATE_RADIO_RX_ON_LINK));
- rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
-}
-
static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
- int mask = (state == STATE_RADIO_IRQ_ON) ||
- (state == STATE_RADIO_IRQ_ON_ISR);
+ int mask = (state == STATE_RADIO_IRQ_ON);
u32 reg;
+ unsigned long flags;
/*
* When interrupts are being enabled, the interrupt registers
@@ -347,8 +429,17 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
if (state == STATE_RADIO_IRQ_ON) {
rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
+
+ /*
+ * Enable tasklets. The beacon related tasklets are
+ * enabled when the beacon queue is started.
+ */
+ tasklet_enable(&rt2x00dev->txstatus_tasklet);
+ tasklet_enable(&rt2x00dev->rxdone_tasklet);
+ tasklet_enable(&rt2x00dev->autowake_tasklet);
}
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0);
rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0);
@@ -369,6 +460,17 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+ if (state == STATE_RADIO_IRQ_OFF) {
+ /*
+ * Ensure that all tasklets are finished before
+ * disabling the interrupts.
+ */
+ tasklet_disable(&rt2x00dev->txstatus_tasklet);
+ tasklet_disable(&rt2x00dev->rxdone_tasklet);
+ tasklet_disable(&rt2x00dev->autowake_tasklet);
+ }
}
static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
@@ -391,6 +493,13 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
+ if (rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
+ rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
+ rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
+ rt2800_register_write(rt2x00dev, AUX_CTRL, reg);
+ }
+
rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
@@ -414,39 +523,23 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
{
- u32 reg;
-
- rt2800_disable_radio(rt2x00dev);
-
- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280);
-
- rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
- rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
-
- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
+ if (rt2x00_is_soc(rt2x00dev)) {
+ rt2800_disable_radio(rt2x00dev);
+ rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
+ rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
+ }
}
static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
- /*
- * Always put the device to sleep (even when we intend to wakeup!)
- * if the device is booting and wasn't asleep it will return
- * failure when attempting to wakeup.
- */
- rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 2);
-
if (state == STATE_AWAKE) {
- rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0);
+ rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0x02);
rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP);
+ } else if (state == STATE_SLEEP) {
+ rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, 0xffffffff);
+ rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, 0xffffffff);
+ rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0x01, 0xff, 0x01);
}
return 0;
@@ -476,16 +569,8 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
rt2800pci_disable_radio(rt2x00dev);
rt2800pci_set_state(rt2x00dev, STATE_SLEEP);
break;
- case STATE_RADIO_RX_ON:
- case STATE_RADIO_RX_ON_LINK:
- case STATE_RADIO_RX_OFF:
- case STATE_RADIO_RX_OFF_LINK:
- rt2800pci_toggle_rx(rt2x00dev, state);
- break;
case STATE_RADIO_IRQ_ON:
- case STATE_RADIO_IRQ_ON_ISR:
case STATE_RADIO_IRQ_OFF:
- case STATE_RADIO_IRQ_OFF_ISR:
rt2800pci_toggle_irq(rt2x00dev, state);
break;
case STATE_DEEP_SLEEP:
@@ -567,41 +652,6 @@ static void rt2800pci_write_tx_desc(struct queue_entry *entry,
}
/*
- * TX data initialization
- */
-static void rt2800pci_kick_tx_queue(struct data_queue *queue)
-{
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
- struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
- unsigned int qidx;
-
- if (queue->qid == QID_MGMT)
- qidx = 5;
- else
- qidx = queue->qid;
-
- rt2800_register_write(rt2x00dev, TX_CTX_IDX(qidx), entry->entry_idx);
-}
-
-static void rt2800pci_kill_tx_queue(struct data_queue *queue)
-{
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
- u32 reg;
-
- if (queue->qid == QID_BEACON) {
- rt2800_register_write(rt2x00dev, BCN_TIME_CFG, 0);
- return;
- }
-
- rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, (queue->qid == QID_AC_BE));
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, (queue->qid == QID_AC_BK));
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, (queue->qid == QID_AC_VI));
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, (queue->qid == QID_AC_VO));
- rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
-}
-
-/*
* RX control handlers
*/
static void rt2800pci_fill_rxdone(struct queue_entry *entry,
@@ -632,6 +682,12 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
*/
rxdesc->flags |= RX_FLAG_IV_STRIPPED;
+ /*
+ * The hardware has already checked the Michael Mic and has
+ * stripped it from the frame. Signal this to mac80211.
+ */
+ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
rxdesc->flags |= RX_FLAG_DECRYPTED;
else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
@@ -668,43 +724,36 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
u32 status;
u8 qid;
- while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo)) {
- /* Now remove the tx status from the FIFO */
- if (kfifo_out(&rt2x00dev->txstatus_fifo, &status,
- sizeof(status)) != sizeof(status)) {
- WARN_ON(1);
- break;
- }
-
+ while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
- if (qid >= QID_RX) {
+ if (unlikely(qid >= QID_RX)) {
/*
* Unknown queue, this shouldn't happen. Just drop
* this tx status.
*/
WARNING(rt2x00dev, "Got TX status report with "
- "unexpected pid %u, dropping", qid);
+ "unexpected pid %u, dropping\n", qid);
break;
}
- queue = rt2x00queue_get_queue(rt2x00dev, qid);
+ queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
if (unlikely(queue == NULL)) {
/*
* The queue is NULL, this shouldn't happen. Stop
* processing here and drop the tx status
*/
WARNING(rt2x00dev, "Got TX status for an unavailable "
- "queue %u, dropping", qid);
+ "queue %u, dropping\n", qid);
break;
}
- if (rt2x00queue_empty(queue)) {
+ if (unlikely(rt2x00queue_empty(queue))) {
/*
* The queue is empty. Stop processing here
* and drop the tx status.
*/
WARNING(rt2x00dev, "Got TX status for an empty "
- "queue %u, dropping", qid);
+ "queue %u, dropping\n", qid);
break;
}
@@ -713,45 +762,59 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
}
}
-static void rt2800pci_txstatus_tasklet(unsigned long data)
+static void rt2800pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00_field32 irq_field)
{
- rt2800pci_txdone((struct rt2x00_dev *)data);
-}
-
-static irqreturn_t rt2800pci_interrupt_thread(int irq, void *dev_instance)
-{
- struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg = rt2x00dev->irqvalue[0];
+ u32 reg;
/*
- * 1 - Pre TBTT interrupt.
+ * Enable a single interrupt. The interrupt mask register
+ * access needs locking.
*/
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
- rt2x00lib_pretbtt(rt2x00dev);
+ spin_lock_irq(&rt2x00dev->irqmask_lock);
+ rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+ rt2x00_set_field32(&reg, irq_field, 1);
+ rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ spin_unlock_irq(&rt2x00dev->irqmask_lock);
+}
- /*
- * 2 - Beacondone interrupt.
- */
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
- rt2x00lib_beacondone(rt2x00dev);
+static void rt2800pci_txstatus_tasklet(unsigned long data)
+{
+ rt2800pci_txdone((struct rt2x00_dev *)data);
/*
- * 3 - Rx ring done interrupt.
+ * No need to enable the tx status interrupt here as we always
+ * leave it enabled to minimize the possibility of a tx status
+ * register overflow. See comment in interrupt handler.
*/
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
- rt2x00pci_rxdone(rt2x00dev);
+}
- /*
- * 4 - Auto wakeup interrupt.
- */
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
- rt2800pci_wakeup(rt2x00dev);
+static void rt2800pci_pretbtt_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00lib_pretbtt(rt2x00dev);
+ rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
+}
- /* Enable interrupts again. */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_ON_ISR);
+static void rt2800pci_tbtt_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00lib_beacondone(rt2x00dev);
+ rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
+}
- return IRQ_HANDLED;
+static void rt2800pci_rxdone_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00pci_rxdone(rt2x00dev);
+ rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
+}
+
+static void rt2800pci_autowake_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2800pci_wakeup(rt2x00dev);
+ rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_AUTO_WAKEUP);
}
static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
@@ -772,25 +835,18 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
*
* Furthermore we don't disable the TX_FIFO_STATUS
* interrupt here but leave it enabled so that the TX_STA_FIFO
- * can also be read while the interrupt thread gets executed.
+ * can also be read while the tx status tasklet gets executed.
*
* Since we have only one producer and one consumer we don't
* need to lock the kfifo.
*/
- for (i = 0; i < TX_ENTRIES; i++) {
+ for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) {
rt2800_register_read(rt2x00dev, TX_STA_FIFO, &status);
if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
break;
- if (kfifo_is_full(&rt2x00dev->txstatus_fifo)) {
- WARNING(rt2x00dev, "TX status FIFO overrun,"
- " drop tx status report.\n");
- break;
- }
-
- if (kfifo_in(&rt2x00dev->txstatus_fifo, &status,
- sizeof(status)) != sizeof(status)) {
+ if (!kfifo_put(&rt2x00dev->txstatus_fifo, &status)) {
WARNING(rt2x00dev, "TX status FIFO overrun,"
"drop tx status report.\n");
break;
@@ -804,8 +860,7 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg;
- irqreturn_t ret = IRQ_HANDLED;
+ u32 reg, mask;
/* Read status and ACK all interrupts */
rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
@@ -817,38 +872,44 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
return IRQ_HANDLED;
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS))
- rt2800pci_txstatus_interrupt(rt2x00dev);
+ /*
+ * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
+ * for interrupts and interrupt masks we can just use the value of
+ * INT_SOURCE_CSR to create the interrupt mask.
+ */
+ mask = ~reg;
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT) ||
- rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT) ||
- rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE) ||
- rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP)) {
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
+ rt2800pci_txstatus_interrupt(rt2x00dev);
/*
- * All other interrupts are handled in the interrupt thread.
- * Store irqvalue for use in the interrupt thread.
+ * Never disable the TX_FIFO_STATUS interrupt.
*/
- rt2x00dev->irqvalue[0] = reg;
+ rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
+ }
- /*
- * Disable interrupts, will be enabled again in the
- * interrupt thread.
- */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_OFF_ISR);
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
+ tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
- /*
- * Leave the TX_FIFO_STATUS interrupt enabled to not lose any
- * tx status reports.
- */
- rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
- rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
- rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
+ tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
- ret = IRQ_WAKE_THREAD;
- }
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
+ tasklet_schedule(&rt2x00dev->rxdone_tasklet);
- return ret;
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
+ tasklet_schedule(&rt2x00dev->autowake_tasklet);
+
+ /*
+ * Disable all interrupts for which a tasklet was scheduled right now,
+ * the tasklet will reenable the appropriate interrupts.
+ */
+ spin_lock(&rt2x00dev->irqmask_lock);
+ rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+ reg &= mask;
+ rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ spin_unlock(&rt2x00dev->irqmask_lock);
+
+ return IRQ_HANDLED;
}
/*
@@ -912,9 +973,11 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
__set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_TXSTATUS_FIFO, &rt2x00dev->flags);
+ __set_bit(DRIVER_REQUIRE_TASKLET_CONTEXT, &rt2x00dev->flags);
if (!modparam_nohwcrypt)
__set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
__set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags);
+ __set_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags);
/*
* Set the rssi offset.
@@ -943,6 +1006,8 @@ static const struct ieee80211_ops rt2800pci_mac80211_ops = {
.get_tsf = rt2800_get_tsf,
.rfkill_poll = rt2x00mac_rfkill_poll,
.ampdu_action = rt2800_ampdu_action,
+ .flush = rt2x00mac_flush,
+ .get_survey = rt2800_get_survey,
};
static const struct rt2800_ops rt2800pci_rt2800_ops = {
@@ -960,8 +1025,11 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = {
static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.irq_handler = rt2800pci_interrupt,
- .irq_handler_thread = rt2800pci_interrupt_thread,
- .txstatus_tasklet = rt2800pci_txstatus_tasklet,
+ .txstatus_tasklet = rt2800pci_txstatus_tasklet,
+ .pretbtt_tasklet = rt2800pci_pretbtt_tasklet,
+ .tbtt_tasklet = rt2800pci_tbtt_tasklet,
+ .rxdone_tasklet = rt2800pci_rxdone_tasklet,
+ .autowake_tasklet = rt2800pci_autowake_tasklet,
.probe_hw = rt2800pci_probe_hw,
.get_firmware_name = rt2800pci_get_firmware_name,
.check_firmware = rt2800_check_firmware,
@@ -975,11 +1043,13 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.link_stats = rt2800_link_stats,
.reset_tuner = rt2800_reset_tuner,
.link_tuner = rt2800_link_tuner,
+ .start_queue = rt2800pci_start_queue,
+ .kick_queue = rt2800pci_kick_queue,
+ .stop_queue = rt2800pci_stop_queue,
.write_tx_desc = rt2800pci_write_tx_desc,
.write_tx_data = rt2800_write_tx_data,
.write_beacon = rt2800_write_beacon,
- .kick_tx_queue = rt2800pci_kick_tx_queue,
- .kill_tx_queue = rt2800pci_kill_tx_queue,
+ .clear_beacon = rt2800_clear_beacon,
.fill_rxdone = rt2800pci_fill_rxdone,
.config_shared_key = rt2800_config_shared_key,
.config_pairwise_key = rt2800_config_pairwise_key,
@@ -991,21 +1061,21 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
};
static const struct data_queue_desc rt2800pci_queue_rx = {
- .entry_num = RX_ENTRIES,
+ .entry_num = 128,
.data_size = AGGREGATION_SIZE,
.desc_size = RXD_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_pci),
};
static const struct data_queue_desc rt2800pci_queue_tx = {
- .entry_num = TX_ENTRIES,
+ .entry_num = 64,
.data_size = AGGREGATION_SIZE,
.desc_size = TXD_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_pci),
};
static const struct data_queue_desc rt2800pci_queue_bcn = {
- .entry_num = 8 * BEACON_ENTRIES,
+ .entry_num = 8,
.data_size = 0, /* No DMA required for beacons */
.desc_size = TXWI_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_pci),
@@ -1033,12 +1103,15 @@ static const struct rt2x00_ops rt2800pci_ops = {
/*
* RT2800pci module information.
*/
-#ifdef CONFIG_RT2800PCI_PCI
+#ifdef CONFIG_PCI
static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x0701), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x0781), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1814, 0x3090), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1814, 0x3091), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1814, 0x3092), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1432, 0x7728), PCI_DEVICE_DATA(&rt2800pci_ops) },
@@ -1046,35 +1119,38 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1432, 0x7748), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1432, 0x7758), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1432, 0x7768), PCI_DEVICE_DATA(&rt2800pci_ops) },
- { PCI_DEVICE(0x1a3b, 0x1059), PCI_DEVICE_DATA(&rt2800pci_ops) },
-#ifdef CONFIG_RT2800PCI_RT30XX
- { PCI_DEVICE(0x1814, 0x3090), PCI_DEVICE_DATA(&rt2800pci_ops) },
- { PCI_DEVICE(0x1814, 0x3091), PCI_DEVICE_DATA(&rt2800pci_ops) },
- { PCI_DEVICE(0x1814, 0x3092), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1a3b, 0x1059), PCI_DEVICE_DATA(&rt2800pci_ops) },
+#ifdef CONFIG_RT2800PCI_RT33XX
+ { PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) },
#endif
#ifdef CONFIG_RT2800PCI_RT35XX
+ { PCI_DEVICE(0x1432, 0x7711), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1432, 0x7722), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3593), PCI_DEVICE_DATA(&rt2800pci_ops) },
#endif
+#ifdef CONFIG_RT2800PCI_RT53XX
+ { PCI_DEVICE(0x1814, 0x5390), PCI_DEVICE_DATA(&rt2800pci_ops) },
+#endif
{ 0, }
};
-#endif /* CONFIG_RT2800PCI_PCI */
+#endif /* CONFIG_PCI */
MODULE_AUTHOR(DRV_PROJECT);
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver.");
MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards");
-#ifdef CONFIG_RT2800PCI_PCI
+#ifdef CONFIG_PCI
MODULE_FIRMWARE(FIRMWARE_RT2860);
MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
-#endif /* CONFIG_RT2800PCI_PCI */
+#endif /* CONFIG_PCI */
MODULE_LICENSE("GPL");
-#ifdef CONFIG_RT2800PCI_SOC
+#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
static int rt2800soc_probe(struct platform_device *pdev)
{
return rt2x00soc_probe(pdev, &rt2800pci_ops);
@@ -1091,9 +1167,9 @@ static struct platform_driver rt2800soc_driver = {
.suspend = rt2x00soc_suspend,
.resume = rt2x00soc_resume,
};
-#endif /* CONFIG_RT2800PCI_SOC */
+#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */
-#ifdef CONFIG_RT2800PCI_PCI
+#ifdef CONFIG_PCI
static struct pci_driver rt2800pci_driver = {
.name = KBUILD_MODNAME,
.id_table = rt2800pci_device_table,
@@ -1102,21 +1178,21 @@ static struct pci_driver rt2800pci_driver = {
.suspend = rt2x00pci_suspend,
.resume = rt2x00pci_resume,
};
-#endif /* CONFIG_RT2800PCI_PCI */
+#endif /* CONFIG_PCI */
static int __init rt2800pci_init(void)
{
int ret = 0;
-#ifdef CONFIG_RT2800PCI_SOC
+#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
ret = platform_driver_register(&rt2800soc_driver);
if (ret)
return ret;
#endif
-#ifdef CONFIG_RT2800PCI_PCI
+#ifdef CONFIG_PCI
ret = pci_register_driver(&rt2800pci_driver);
if (ret) {
-#ifdef CONFIG_RT2800PCI_SOC
+#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
platform_driver_unregister(&rt2800soc_driver);
#endif
return ret;
@@ -1128,10 +1204,10 @@ static int __init rt2800pci_init(void)
static void __exit rt2800pci_exit(void)
{
-#ifdef CONFIG_RT2800PCI_PCI
+#ifdef CONFIG_PCI
pci_unregister_driver(&rt2800pci_driver);
#endif
-#ifdef CONFIG_RT2800PCI_SOC
+#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
platform_driver_unregister(&rt2800soc_driver);
#endif
}
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
index 5a8dda9b5b5a..70e050d904c8 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.h
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -38,10 +38,10 @@
* Queue register offset macros
*/
#define TX_QUEUE_REG_OFFSET 0x10
-#define TX_BASE_PTR(__x) TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET)
-#define TX_MAX_CNT(__x) TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET)
-#define TX_CTX_IDX(__x) TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET)
-#define TX_DTX_IDX(__x) TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET)
+#define TX_BASE_PTR(__x) (TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET))
+#define TX_MAX_CNT(__x) (TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET))
+#define TX_CTX_IDX(__x) (TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
+#define TX_DTX_IDX(__x) (TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
/*
* 8051 firmware image.
@@ -52,8 +52,8 @@
/*
* DMA descriptor defines.
*/
-#define TXD_DESC_SIZE ( 4 * sizeof(__le32) )
-#define RXD_DESC_SIZE ( 4 * sizeof(__le32) )
+#define TXD_DESC_SIZE (4 * sizeof(__le32))
+#define RXD_DESC_SIZE (4 * sizeof(__le32))
/*
* TX descriptor format for TX, PRIO and Beacon Ring.
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 3dff56ec195a..f1a92144996f 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -45,11 +45,60 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt = 0;
+static int modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
/*
+ * Queue handlers.
+ */
+static void rt2800usb_start_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u32 reg;
+
+ switch (queue->qid) {
+ case QID_RX:
+ rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+ break;
+ case QID_BEACON:
+ rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rt2800usb_stop_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u32 reg;
+
+ switch (queue->qid) {
+ case QID_RX:
+ rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+ rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+ break;
+ case QID_BEACON:
+ rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
* Firmware functions
*/
static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev)
@@ -107,18 +156,6 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
/*
* Device state switch handlers.
*/
-static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
- enum dev_state state)
-{
- u32 reg;
-
- rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
- rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX,
- (state == STATE_RADIO_RX_ON) ||
- (state == STATE_RADIO_RX_ON_LINK));
- rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
-}
-
static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
@@ -165,7 +202,8 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
* this limit so reduce the number to prevent errors.
*/
rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_LIMIT,
- ((RX_ENTRIES * DATA_FRAME_SIZE) / 1024) - 3);
+ ((rt2x00dev->ops->rx->entry_num * DATA_FRAME_SIZE)
+ / 1024) - 3);
rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_EN, 1);
rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1);
rt2800_register_write(rt2x00dev, USB_DMA_CFG, reg);
@@ -183,9 +221,9 @@ static int rt2800usb_set_state(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
if (state == STATE_AWAKE)
- rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 0);
+ rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 2);
else
- rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 2);
+ rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0xff, 2);
return 0;
}
@@ -214,16 +252,8 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
rt2800usb_disable_radio(rt2x00dev);
rt2800usb_set_state(rt2x00dev, STATE_SLEEP);
break;
- case STATE_RADIO_RX_ON:
- case STATE_RADIO_RX_ON_LINK:
- case STATE_RADIO_RX_OFF:
- case STATE_RADIO_RX_OFF_LINK:
- rt2800usb_toggle_rx(rt2x00dev, state);
- break;
case STATE_RADIO_IRQ_ON:
- case STATE_RADIO_IRQ_ON_ISR:
case STATE_RADIO_IRQ_OFF:
- case STATE_RADIO_IRQ_OFF_ISR:
/* No support, but no error either */
break;
case STATE_DEEP_SLEEP:
@@ -245,6 +275,49 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
}
/*
+ * Watchdog handlers
+ */
+static void rt2800usb_watchdog(struct rt2x00_dev *rt2x00dev)
+{
+ unsigned int i;
+ u32 reg;
+
+ rt2800_register_read(rt2x00dev, TXRXQ_PCNT, &reg);
+ if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX0Q)) {
+ WARNING(rt2x00dev, "TX HW queue 0 timed out,"
+ " invoke forced kick\n");
+
+ rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40012);
+
+ for (i = 0; i < 10; i++) {
+ udelay(10);
+ if (!rt2x00_get_field32(reg, TXRXQ_PCNT_TX0Q))
+ break;
+ }
+
+ rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40006);
+ }
+
+ rt2800_register_read(rt2x00dev, TXRXQ_PCNT, &reg);
+ if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX1Q)) {
+ WARNING(rt2x00dev, "TX HW queue 1 timed out,"
+ " invoke forced kick\n");
+
+ rt2800_register_write(rt2x00dev, PBF_CFG, 0xf4000a);
+
+ for (i = 0; i < 10; i++) {
+ udelay(10);
+ if (!rt2x00_get_field32(reg, TXRXQ_PCNT_TX1Q))
+ break;
+ }
+
+ rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40006);
+ }
+
+ rt2x00usb_watchdog(rt2x00dev);
+}
+
+/*
* TX descriptor initialization
*/
static __le32 *rt2800usb_get_txwi(struct queue_entry *entry)
@@ -266,8 +339,14 @@ static void rt2800usb_write_tx_desc(struct queue_entry *entry,
* Initialize TXINFO descriptor
*/
rt2x00_desc_read(txi, 0, &word);
+
+ /*
+ * The size of TXINFO_W0_USB_DMA_TX_PKT_LEN is
+ * TXWI + 802.11 header + L2 pad + payload + pad,
+ * so need to decrease size of TXINFO and USB end pad.
+ */
rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN,
- entry->skb->len - TXINFO_DESC_SIZE);
+ entry->skb->len - TXINFO_DESC_SIZE - 4);
rt2x00_set_field32(&word, TXINFO_W0_WIV,
!test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
@@ -285,22 +364,37 @@ static void rt2800usb_write_tx_desc(struct queue_entry *entry,
skbdesc->desc_len = TXINFO_DESC_SIZE + TXWI_DESC_SIZE;
}
-/*
- * TX data initialization
- */
-static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
+static void rt2800usb_write_tx_data(struct queue_entry *entry,
+ struct txentry_desc *txdesc)
{
- int length;
+ unsigned int len;
+ int err;
+
+ rt2800_write_tx_data(entry, txdesc);
/*
- * The length _must_ include 4 bytes padding,
- * it should always be multiple of 4,
- * but it must _not_ be a multiple of the USB packet size.
+ * pad(1~3 bytes) is added after each 802.11 payload.
+ * USB end pad(4 bytes) is added at each USB bulk out packet end.
+ * TX frame format is :
+ * | TXINFO | TXWI | 802.11 header | L2 pad | payload | pad | USB end pad |
+ * |<------------- tx_pkt_len ------------->|
*/
- length = roundup(entry->skb->len + 4, 4);
- length += (4 * !(length % entry->queue->usb_maxpacket));
+ len = roundup(entry->skb->len, 4) + 4;
+ err = skb_padto(entry->skb, len);
+ if (unlikely(err)) {
+ WARNING(entry->queue->rt2x00dev, "TX SKB padding error, out of memory\n");
+ return;
+ }
- return length;
+ entry->skb->len = len;
+}
+
+/*
+ * TX data initialization
+ */
+static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
+{
+ return entry->skb->len;
}
/*
@@ -335,14 +429,6 @@ static void rt2800usb_work_txdone(struct work_struct *work)
}
}
-static void rt2800usb_kill_tx_queue(struct data_queue *queue)
-{
- if (queue->qid == QID_BEACON)
- rt2x00usb_register_write(queue->rt2x00dev, BCN_TIME_CFG, 0);
-
- rt2x00usb_kill_tx_queue(queue);
-}
-
/*
* RX control handlers
*/
@@ -398,6 +484,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
*/
rxdesc->flags |= RX_FLAG_IV_STRIPPED;
+ /*
+ * The hardware has already checked the Michael Mic and has
+ * stripped it from the frame. Signal this to mac80211.
+ */
+ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
rxdesc->flags |= RX_FLAG_DECRYPTED;
else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
@@ -473,6 +565,7 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
__set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
__set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags);
__set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags);
+ __set_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags);
/*
* Set the rssi offset.
@@ -507,6 +600,8 @@ static const struct ieee80211_ops rt2800usb_mac80211_ops = {
.get_tsf = rt2800_get_tsf,
.rfkill_poll = rt2x00mac_rfkill_poll,
.ampdu_action = rt2800_ampdu_action,
+ .flush = rt2x00mac_flush,
+ .get_survey = rt2800_get_survey,
};
static const struct rt2800_ops rt2800usb_rt2800_ops = {
@@ -535,13 +630,16 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
.link_stats = rt2800_link_stats,
.reset_tuner = rt2800_reset_tuner,
.link_tuner = rt2800_link_tuner,
- .watchdog = rt2x00usb_watchdog,
+ .watchdog = rt2800usb_watchdog,
+ .start_queue = rt2800usb_start_queue,
+ .kick_queue = rt2x00usb_kick_queue,
+ .stop_queue = rt2800usb_stop_queue,
+ .flush_queue = rt2x00usb_flush_queue,
.write_tx_desc = rt2800usb_write_tx_desc,
- .write_tx_data = rt2800_write_tx_data,
+ .write_tx_data = rt2800usb_write_tx_data,
.write_beacon = rt2800_write_beacon,
+ .clear_beacon = rt2800_clear_beacon,
.get_tx_data_len = rt2800usb_get_tx_data_len,
- .kick_tx_queue = rt2x00usb_kick_tx_queue,
- .kill_tx_queue = rt2800usb_kill_tx_queue,
.fill_rxdone = rt2800usb_fill_rxdone,
.config_shared_key = rt2800_config_shared_key,
.config_pairwise_key = rt2800_config_pairwise_key,
@@ -553,21 +651,21 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
};
static const struct data_queue_desc rt2800usb_queue_rx = {
- .entry_num = RX_ENTRIES,
+ .entry_num = 128,
.data_size = AGGREGATION_SIZE,
.desc_size = RXINFO_DESC_SIZE + RXWI_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_usb),
};
static const struct data_queue_desc rt2800usb_queue_tx = {
- .entry_num = TX_ENTRIES,
+ .entry_num = 64,
.data_size = AGGREGATION_SIZE,
.desc_size = TXINFO_DESC_SIZE + TXWI_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_usb),
};
static const struct data_queue_desc rt2800usb_queue_bcn = {
- .entry_num = 8 * BEACON_ENTRIES,
+ .entry_num = 8,
.data_size = MGMT_FRAME_SIZE,
.desc_size = TXINFO_DESC_SIZE + TXWI_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_usb),
@@ -599,11 +697,19 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* Abocom */
{ USB_DEVICE(0x07b8, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07b8, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07b8, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07b8, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1482, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* AirTies */
+ { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Allwin */
{ USB_DEVICE(0x8516, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x8516, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x8516, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Amit */
{ USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Askey */
@@ -612,8 +718,13 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0b05, 0x1731), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
/* AzureWave */
{ USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3307), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3321), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Belkin */
{ USB_DEVICE(0x050d, 0x8053), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x050d, 0x805c), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -624,6 +735,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x14b2, 0x3c06), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x14b2, 0x3c07), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x14b2, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x14b2, 0x3c23), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x14b2, 0x3c25), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x14b2, 0x3c27), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -632,17 +744,36 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07aa, 0x002f), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07aa, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07aa, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x18c5, 0x0012), USB_DEVICE_DATA(&rt2800usb_ops) },
/* D-Link */
{ USB_DEVICE(0x07d1, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c0a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Draytek */
+ { USB_DEVICE(0x07fa, 0x7712), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Edimax */
+ { USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Encore */
+ { USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
/* EnGenius */
{ USB_DEVICE(0x1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Gigabyte */
{ USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Hawking */
{ USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -651,6 +782,10 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0e66, 0x0013), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0e66, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0e66, 0x0018), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* I-O DATA */
+ { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Linksys */
{ USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -658,17 +793,44 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0789, 0x0164), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0789, 0x0166), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Motorola */
{ USB_DEVICE(0x100d, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
/* MSI */
+ { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x822b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x822c), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x871b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x871c), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Para */
+ { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Pegatron */
+ { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Philips */
{ USB_DEVICE(0x0471, 0x200f), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Planex */
+ { USB_DEVICE(0x2019, 0xab25), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x2019, 0xed06), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Quanta */
+ { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Ralink */
+ { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x148f, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Samsung */
{ USB_DEVICE(0x04e8, 0x2018), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Siemens */
@@ -681,13 +843,22 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) },
/* SMC */
{ USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x083a, 0x7512), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x083a, 0x7522), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x083a, 0x8522), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x083a, 0xa618), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xa703), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x083a, 0xb522), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Sparklan */
{ USB_DEVICE(0x15a9, 0x0006), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -701,101 +872,16 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* Zinwell */
{ USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Zyxel */
{ USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) },
-#ifdef CONFIG_RT2800USB_RT30XX
- /* Abocom */
- { USB_DEVICE(0x07b8, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07b8, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* AirTies */
- { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Allwin */
- { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* ASUS */
- { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* AzureWave */
- { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x13d3, 0x3307), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x13d3, 0x3321), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Conceptronic */
- { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Corega */
- { USB_DEVICE(0x18c5, 0x0012), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* D-Link */
- { USB_DEVICE(0x07d1, 0x3c0a), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Draytek */
- { USB_DEVICE(0x07fa, 0x7712), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Edimax */
- { USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Encore */
- { USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* EnGenius */
- { USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Gigabyte */
- { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* I-O DATA */
- { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Logitec */
- { USB_DEVICE(0x0789, 0x0166), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* MSI */
- { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x822b), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x822c), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x871b), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x871c), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Para */
- { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Pegatron */
- { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Planex */
- { USB_DEVICE(0x2019, 0xab25), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Quanta */
- { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) },
+#ifdef CONFIG_RT2800USB_RT33XX
/* Ralink */
- { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x148f, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x148f, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x3370), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x8070), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Sitecom */
- { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* SMC */
- { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x083a, 0xa703), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Zinwell */
- { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0050), USB_DEVICE_DATA(&rt2800usb_ops) },
#endif
#ifdef CONFIG_RT2800USB_RT35XX
/* Allwin */
@@ -809,12 +895,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* I-O DATA */
{ USB_DEVICE(0x04bb, 0x0944), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Ralink */
- { USB_DEVICE(0x148f, 0x3370), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x148f, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x148f, 0x8070), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Sitecom */
{ USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x0050), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Zinwell */
{ USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) },
#endif
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index 0722badccf86..671ea3592610 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -40,8 +40,8 @@
/*
* DMA descriptor defines.
*/
-#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
-#define RXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
+#define TXINFO_DESC_SIZE (1 * sizeof(__le32))
+#define RXINFO_DESC_SIZE (1 * sizeof(__le32))
/*
* TX Info structure
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 94fe589acfaa..a3940d7300a4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -66,7 +66,7 @@
#ifdef CONFIG_RT2X00_DEBUG
#define DEBUG_PRINTK(__dev, __kernlvl, __lvl, __msg, __args...) \
- DEBUG_PRINTK_MSG(__dev, __kernlvl, __lvl, __msg, ##__args);
+ DEBUG_PRINTK_MSG(__dev, __kernlvl, __lvl, __msg, ##__args)
#else
#define DEBUG_PRINTK(__dev, __kernlvl, __lvl, __msg, __args...) \
do { } while (0)
@@ -189,6 +189,7 @@ struct rt2x00_chip {
#define RT3572 0x3572
#define RT3593 0x3593 /* PCIe */
#define RT3883 0x3883 /* WSOC */
+#define RT5390 0x5390 /* 2.4GHz */
u16 rf;
u16 rev;
@@ -225,6 +226,8 @@ struct channel_info {
struct antenna_setup {
enum antenna rx;
enum antenna tx;
+ u8 rx_chain_num;
+ u8 tx_chain_num;
};
/*
@@ -347,6 +350,10 @@ struct link {
struct delayed_work watchdog_work;
};
+enum rt2x00_delayed_flags {
+ DELAYED_UPDATE_BEACON,
+};
+
/*
* Interface structure
* Per interface configuration details, this structure
@@ -354,22 +361,6 @@ struct link {
*/
struct rt2x00_intf {
/*
- * All fields within the rt2x00_intf structure
- * must be protected with a spinlock.
- */
- spinlock_t lock;
-
- /*
- * MAC of the device.
- */
- u8 mac[ETH_ALEN];
-
- /*
- * BBSID of the AP to associate with.
- */
- u8 bssid[ETH_ALEN];
-
- /*
* beacon->skb must be protected with the mutex.
*/
struct mutex beacon_skb_mutex;
@@ -380,12 +371,12 @@ struct rt2x00_intf {
* dedicated beacon entry.
*/
struct queue_entry *beacon;
+ bool enable_beacon;
/*
* Actions that needed rescheduling.
*/
- unsigned int delayed_flags;
-#define DELAYED_UPDATE_BEACON 0x00000001
+ unsigned long delayed_flags;
/*
* Software sequence counter, this is only required
@@ -476,7 +467,6 @@ struct rt2x00lib_crypto {
const u8 *address;
u32 bssidx;
- u32 aid;
u8 key[16];
u8 tx_mic[8];
@@ -524,14 +514,13 @@ struct rt2x00lib_ops {
irq_handler_t irq_handler;
/*
- * Threaded Interrupt handlers.
- */
- irq_handler_t irq_handler_thread;
-
- /*
* TX status tasklet handler.
*/
void (*txstatus_tasklet) (unsigned long data);
+ void (*pretbtt_tasklet) (unsigned long data);
+ void (*tbtt_tasklet) (unsigned long data);
+ void (*rxdone_tasklet) (unsigned long data);
+ void (*autowake_tasklet) (unsigned long data);
/*
* Device init handlers.
@@ -567,7 +556,15 @@ struct rt2x00lib_ops {
struct link_qual *qual);
void (*link_tuner) (struct rt2x00_dev *rt2x00dev,
struct link_qual *qual, const u32 count);
+
+ /*
+ * Data queue handlers.
+ */
void (*watchdog) (struct rt2x00_dev *rt2x00dev);
+ void (*start_queue) (struct data_queue *queue);
+ void (*kick_queue) (struct data_queue *queue);
+ void (*stop_queue) (struct data_queue *queue);
+ void (*flush_queue) (struct data_queue *queue);
/*
* TX control handlers
@@ -578,9 +575,8 @@ struct rt2x00lib_ops {
struct txentry_desc *txdesc);
void (*write_beacon) (struct queue_entry *entry,
struct txentry_desc *txdesc);
+ void (*clear_beacon) (struct queue_entry *entry);
int (*get_tx_data_len) (struct queue_entry *entry);
- void (*kick_tx_queue) (struct data_queue *queue);
- void (*kill_tx_queue) (struct data_queue *queue);
/*
* RX control handlers
@@ -664,12 +660,16 @@ enum rt2x00_flags {
DRIVER_REQUIRE_COPY_IV,
DRIVER_REQUIRE_L2PAD,
DRIVER_REQUIRE_TXSTATUS_FIFO,
+ DRIVER_REQUIRE_TASKLET_CONTEXT,
+ DRIVER_REQUIRE_SW_SEQNO,
+ DRIVER_REQUIRE_HT_TX_DESC,
/*
* Driver features
*/
CONFIG_SUPPORT_HW_BUTTON,
CONFIG_SUPPORT_HW_CRYPTO,
+ CONFIG_SUPPORT_POWER_LIMIT,
DRIVER_SUPPORT_CONTROL_FILTERS,
DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL,
DRIVER_SUPPORT_PRE_TBTT_INTERRUPT,
@@ -794,10 +794,12 @@ struct rt2x00_dev {
* - Open ap interface count.
* - Open sta interface count.
* - Association count.
+ * - Beaconing enabled count.
*/
unsigned int intf_ap_count;
unsigned int intf_sta_count;
unsigned int intf_associated;
+ unsigned int intf_beaconing;
/*
* Link quality
@@ -863,6 +865,13 @@ struct rt2x00_dev {
*/
struct ieee80211_low_level_stats low_level_stats;
+ /**
+ * Work queue for all work which should not be placed
+ * on the mac80211 workqueue (because of dependencies
+ * between various work structures).
+ */
+ struct workqueue_struct *workqueue;
+
/*
* Scheduled work.
* NOTE: intf_work will use ieee80211_iterate_active_interfaces()
@@ -878,14 +887,13 @@ struct rt2x00_dev {
struct work_struct txdone_work;
/*
- * Data queue arrays for RX, TX and Beacon.
- * The Beacon array also contains the Atim queue
- * if that is supported by the device.
+ * Data queue arrays for RX, TX, Beacon and ATIM.
*/
unsigned int data_queues;
struct data_queue *rx;
struct data_queue *tx;
struct data_queue *bcn;
+ struct data_queue *atim;
/*
* Firmware image.
@@ -893,20 +901,23 @@ struct rt2x00_dev {
const struct firmware *fw;
/*
- * Interrupt values, stored between interrupt service routine
- * and interrupt thread routine.
- */
- u32 irqvalue[2];
-
- /*
* FIFO for storing tx status reports between isr and tasklet.
*/
- struct kfifo txstatus_fifo;
+ DECLARE_KFIFO_PTR(txstatus_fifo, u32);
/*
* Tasklet for processing tx status reports (rt2800pci).
*/
struct tasklet_struct txstatus_tasklet;
+ struct tasklet_struct pretbtt_tasklet;
+ struct tasklet_struct tbtt_tasklet;
+ struct tasklet_struct rxdone_tasklet;
+ struct tasklet_struct autowake_tasklet;
+
+ /*
+ * Protect the interrupt mask register.
+ */
+ spinlock_t irqmask_lock;
};
/*
@@ -915,7 +926,7 @@ struct rt2x00_dev {
* in those cases REGISTER_BUSY_COUNT attempts should be
* taken with a REGISTER_BUSY_DELAY interval.
*/
-#define REGISTER_BUSY_COUNT 5
+#define REGISTER_BUSY_COUNT 100
#define REGISTER_BUSY_DELAY 100
/*
@@ -1052,12 +1063,24 @@ void rt2x00queue_map_txskb(struct queue_entry *entry);
void rt2x00queue_unmap_skb(struct queue_entry *entry);
/**
- * rt2x00queue_get_queue - Convert queue index to queue pointer
+ * rt2x00queue_get_tx_queue - Convert tx queue index to queue pointer
* @rt2x00dev: Pointer to &struct rt2x00_dev.
* @queue: rt2x00 queue index (see &enum data_queue_qid).
+ *
+ * Returns NULL for non tx queues.
*/
-struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
- const enum data_queue_qid queue);
+static inline struct data_queue *
+rt2x00queue_get_tx_queue(struct rt2x00_dev *rt2x00dev,
+ const enum data_queue_qid queue)
+{
+ if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
+ return &rt2x00dev->tx[queue];
+
+ if (queue == QID_ATIM)
+ return rt2x00dev->atim;
+
+ return NULL;
+}
/**
* rt2x00queue_get_entry - Get queue entry where the given index points to.
@@ -1067,6 +1090,78 @@ struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
enum queue_index index);
+/**
+ * rt2x00queue_pause_queue - Pause a data queue
+ * @queue: Pointer to &struct data_queue.
+ *
+ * This function will pause the data queue locally, preventing
+ * new frames to be added to the queue (while the hardware is
+ * still allowed to run).
+ */
+void rt2x00queue_pause_queue(struct data_queue *queue);
+
+/**
+ * rt2x00queue_unpause_queue - unpause a data queue
+ * @queue: Pointer to &struct data_queue.
+ *
+ * This function will unpause the data queue locally, allowing
+ * new frames to be added to the queue again.
+ */
+void rt2x00queue_unpause_queue(struct data_queue *queue);
+
+/**
+ * rt2x00queue_start_queue - Start a data queue
+ * @queue: Pointer to &struct data_queue.
+ *
+ * This function will start handling all pending frames in the queue.
+ */
+void rt2x00queue_start_queue(struct data_queue *queue);
+
+/**
+ * rt2x00queue_stop_queue - Halt a data queue
+ * @queue: Pointer to &struct data_queue.
+ *
+ * This function will stop all pending frames in the queue.
+ */
+void rt2x00queue_stop_queue(struct data_queue *queue);
+
+/**
+ * rt2x00queue_flush_queue - Flush a data queue
+ * @queue: Pointer to &struct data_queue.
+ * @drop: True to drop all pending frames.
+ *
+ * This function will flush the queue. After this call
+ * the queue is guarenteed to be empty.
+ */
+void rt2x00queue_flush_queue(struct data_queue *queue, bool drop);
+
+/**
+ * rt2x00queue_start_queues - Start all data queues
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ *
+ * This function will loop through all available queues to start them
+ */
+void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev);
+
+/**
+ * rt2x00queue_stop_queues - Halt all data queues
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ *
+ * This function will loop through all available queues to stop
+ * any pending frames.
+ */
+void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev);
+
+/**
+ * rt2x00queue_flush_queues - Flush all data queues
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ * @drop: True to drop all pending frames.
+ *
+ * This function will loop through all available queues to flush
+ * any pending frames.
+ */
+void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop);
+
/*
* Debugfs handlers.
*/
@@ -1092,6 +1187,7 @@ static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
*/
void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev);
void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev);
+void rt2x00lib_dmastart(struct queue_entry *entry);
void rt2x00lib_dmadone(struct queue_entry *entry);
void rt2x00lib_txdone(struct queue_entry *entry,
struct txdone_entry_desc *txdesc);
@@ -1101,7 +1197,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry);
/*
* mac80211 handlers.
*/
-int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
int rt2x00mac_start(struct ieee80211_hw *hw);
void rt2x00mac_stop(struct ieee80211_hw *hw);
int rt2x00mac_add_interface(struct ieee80211_hw *hw,
@@ -1133,6 +1229,7 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *params);
void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw);
+void rt2x00mac_flush(struct ieee80211_hw *hw, bool drop);
/*
* Driver allocation handlers.
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 54ffb5aeb34e..e7f67d5eda52 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -62,13 +62,13 @@ void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
* This will prevent the device being confused when it wants
* to ACK frames or consideres itself associated.
*/
- memset(&conf.mac, 0, sizeof(conf.mac));
+ memset(conf.mac, 0, sizeof(conf.mac));
if (mac)
- memcpy(&conf.mac, mac, ETH_ALEN);
+ memcpy(conf.mac, mac, ETH_ALEN);
- memset(&conf.bssid, 0, sizeof(conf.bssid));
+ memset(conf.bssid, 0, sizeof(conf.bssid));
if (bssid)
- memcpy(&conf.bssid, bssid, ETH_ALEN);
+ memcpy(conf.bssid, bssid, ETH_ALEN);
flags |= CONFIG_UPDATE_TYPE;
if (mac || (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count))
@@ -133,7 +133,7 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
*/
if (!(ant->flags & ANTENNA_RX_DIVERSITY))
config.rx = rt2x00lib_config_antenna_check(config.rx, def->rx);
- else if(config.rx == ANTENNA_SW_DIVERSITY)
+ else if (config.rx == ANTENNA_SW_DIVERSITY)
config.rx = active->rx;
if (!(ant->flags & ANTENNA_TX_DIVERSITY))
@@ -146,7 +146,7 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
* else the changes will be ignored by the device.
*/
if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF_LINK);
+ rt2x00queue_stop_queue(rt2x00dev->rx);
/*
* Write new antenna setup to device and reset the link tuner.
@@ -160,7 +160,7 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
memcpy(active, &config, sizeof(config));
if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK);
+ rt2x00queue_start_queue(rt2x00dev->rx);
}
void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index fcdb6b0dc40f..c92db3264741 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -162,11 +162,11 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
struct timeval timestamp;
u32 data_len;
- do_gettimeofday(&timestamp);
-
- if (!test_bit(FRAME_DUMP_FILE_OPEN, &intf->frame_dump_flags))
+ if (likely(!test_bit(FRAME_DUMP_FILE_OPEN, &intf->frame_dump_flags)))
return;
+ do_gettimeofday(&timestamp);
+
if (skb_queue_len(&intf->frame_dump_skbqueue) > 20) {
DEBUG(rt2x00dev, "txrx dump queue length exceeded.\n");
return;
@@ -339,18 +339,19 @@ static ssize_t rt2x00debug_read_queue_stats(struct file *file,
return -ENOMEM;
temp = data +
- sprintf(data, "qid\tcount\tlimit\tlength\tindex\tdma done\tdone\n");
+ sprintf(data, "qid\tflags\t\tcount\tlimit\tlength\tindex\tdma done\tdone\n");
queue_for_each(intf->rt2x00dev, queue) {
- spin_lock_irqsave(&queue->lock, irqflags);
+ spin_lock_irqsave(&queue->index_lock, irqflags);
- temp += sprintf(temp, "%d\t%d\t%d\t%d\t%d\t%d\t%d\n", queue->qid,
+ temp += sprintf(temp, "%d\t0x%.8x\t%d\t%d\t%d\t%d\t%d\t\t%d\n",
+ queue->qid, (unsigned int)queue->flags,
queue->count, queue->limit, queue->length,
queue->index[Q_INDEX],
queue->index[Q_INDEX_DMA_DONE],
queue->index[Q_INDEX_DONE]);
- spin_unlock_irqrestore(&queue->lock, irqflags);
+ spin_unlock_irqrestore(&queue->index_lock, irqflags);
}
size = strlen(data);
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 5ba79b935f09..9de9dbe94399 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -66,20 +66,16 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
set_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags);
/*
- * Enable RX.
+ * Enable queues.
*/
- rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON);
+ rt2x00queue_start_queues(rt2x00dev);
+ rt2x00link_start_tuner(rt2x00dev);
/*
* Start watchdog monitoring.
*/
rt2x00link_start_watchdog(rt2x00dev);
- /*
- * Start the TX queues.
- */
- ieee80211_wake_queues(rt2x00dev->hw);
-
return 0;
}
@@ -89,20 +85,16 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
return;
/*
- * Stop the TX queues in mac80211.
- */
- ieee80211_stop_queues(rt2x00dev->hw);
- rt2x00queue_stop_queues(rt2x00dev);
-
- /*
* Stop watchdog monitoring.
*/
rt2x00link_stop_watchdog(rt2x00dev);
/*
- * Disable RX.
+ * Stop all queues
*/
- rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
+ rt2x00link_stop_tuner(rt2x00dev);
+ rt2x00queue_stop_queues(rt2x00dev);
+ rt2x00queue_flush_queues(rt2x00dev, true);
/*
* Disable radio.
@@ -113,41 +105,11 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
rt2x00leds_led_radio(rt2x00dev, false);
}
-void rt2x00lib_toggle_rx(struct rt2x00_dev *rt2x00dev, enum dev_state state)
-{
- /*
- * When we are disabling the RX, we should also stop the link tuner.
- */
- if (state == STATE_RADIO_RX_OFF)
- rt2x00link_stop_tuner(rt2x00dev);
-
- rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
-
- /*
- * When we are enabling the RX, we should also start the link tuner.
- */
- if (state == STATE_RADIO_RX_ON)
- rt2x00link_start_tuner(rt2x00dev);
-}
-
static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct rt2x00_dev *rt2x00dev = data;
struct rt2x00_intf *intf = vif_to_intf(vif);
- int delayed_flags;
-
- /*
- * Copy all data we need during this action under the protection
- * of a spinlock. Otherwise race conditions might occur which results
- * into an invalid configuration.
- */
- spin_lock(&intf->lock);
-
- delayed_flags = intf->delayed_flags;
- intf->delayed_flags = 0;
-
- spin_unlock(&intf->lock);
/*
* It is possible the radio was disabled while the work had been
@@ -158,8 +120,8 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
return;
- if (delayed_flags & DELAYED_UPDATE_BEACON)
- rt2x00queue_update_beacon(rt2x00dev, vif, true);
+ if (test_and_clear_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags))
+ rt2x00queue_update_beacon(rt2x00dev, vif);
}
static void rt2x00lib_intf_scheduled(struct work_struct *work)
@@ -212,7 +174,13 @@ static void rt2x00lib_beaconupdate_iter(void *data, u8 *mac,
vif->type != NL80211_IFTYPE_WDS)
return;
- rt2x00queue_update_beacon(rt2x00dev, vif, true);
+ /*
+ * Update the beacon without locking. This is safe on PCI devices
+ * as they only update the beacon periodically here. This should
+ * never be called for USB devices.
+ */
+ WARN_ON(rt2x00_is_usb(rt2x00dev));
+ rt2x00queue_update_beacon_locked(rt2x00dev, vif);
}
void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
@@ -221,9 +189,9 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
return;
/* send buffered bc/mc frames out for every bssid */
- ieee80211_iterate_active_interfaces(rt2x00dev->hw,
- rt2x00lib_bc_buffer_iter,
- rt2x00dev);
+ ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
+ rt2x00lib_bc_buffer_iter,
+ rt2x00dev);
/*
* Devices with pre tbtt interrupt don't need to update the beacon
* here as they will fetch the next beacon directly prior to
@@ -233,9 +201,9 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
return;
/* fetch next beacon */
- ieee80211_iterate_active_interfaces(rt2x00dev->hw,
- rt2x00lib_beaconupdate_iter,
- rt2x00dev);
+ ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
+ rt2x00lib_beaconupdate_iter,
+ rt2x00dev);
}
EXPORT_SYMBOL_GPL(rt2x00lib_beacondone);
@@ -245,14 +213,22 @@ void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev)
return;
/* fetch next beacon */
- ieee80211_iterate_active_interfaces(rt2x00dev->hw,
- rt2x00lib_beaconupdate_iter,
- rt2x00dev);
+ ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
+ rt2x00lib_beaconupdate_iter,
+ rt2x00dev);
}
EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt);
+void rt2x00lib_dmastart(struct queue_entry *entry)
+{
+ set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
+ rt2x00queue_index_inc(entry->queue, Q_INDEX);
+}
+EXPORT_SYMBOL_GPL(rt2x00lib_dmastart);
+
void rt2x00lib_dmadone(struct queue_entry *entry)
{
+ set_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags);
clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
rt2x00queue_index_inc(entry->queue, Q_INDEX_DMA_DONE);
}
@@ -264,11 +240,9 @@ void rt2x00lib_txdone(struct queue_entry *entry,
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
- enum data_queue_qid qid = skb_get_queue_mapping(entry->skb);
- unsigned int header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
+ unsigned int header_length, i;
u8 rate_idx, rate_flags, retry_rates;
u8 skbdesc_flags = skbdesc->flags;
- unsigned int i;
bool success;
/*
@@ -287,6 +261,11 @@ void rt2x00lib_txdone(struct queue_entry *entry,
skbdesc->flags &= ~SKBDESC_DESC_IN_SKB;
/*
+ * Determine the length of 802.11 header.
+ */
+ header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
+
+ /*
* Remove L2 padding which was added during
*/
if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags))
@@ -390,9 +369,12 @@ void rt2x00lib_txdone(struct queue_entry *entry,
* through a mac80211 library call (RTS/CTS) then we should not
* send the status report back.
*/
- if (!(skbdesc_flags & SKBDESC_NOT_MAC80211))
- ieee80211_tx_status(rt2x00dev->hw, entry->skb);
- else
+ if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) {
+ if (test_bit(DRIVER_REQUIRE_TASKLET_CONTEXT, &rt2x00dev->flags))
+ ieee80211_tx_status(rt2x00dev->hw, entry->skb);
+ else
+ ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb);
+ } else
dev_kfree_skb_any(entry->skb);
/*
@@ -411,7 +393,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
* is reenabled when the txdone handler has finished.
*/
if (!rt2x00queue_threshold(entry->queue))
- ieee80211_wake_queue(rt2x00dev->hw, qid);
+ rt2x00queue_unpause_queue(entry->queue);
}
EXPORT_SYMBOL_GPL(rt2x00lib_txdone);
@@ -483,6 +465,10 @@ void rt2x00lib_rxdone(struct queue_entry *entry)
unsigned int header_length;
int rate_idx;
+ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
+ !test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ goto submit_entry;
+
if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
goto submit_entry;
@@ -567,9 +553,11 @@ void rt2x00lib_rxdone(struct queue_entry *entry)
entry->skb = skb;
submit_entry:
- rt2x00dev->ops->lib->clear_entry(entry);
- rt2x00queue_index_inc(entry->queue, Q_INDEX);
+ entry->flags = 0;
rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
+ if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
+ test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ rt2x00dev->ops->lib->clear_entry(entry);
}
EXPORT_SYMBOL_GPL(rt2x00lib_rxdone);
@@ -667,7 +655,10 @@ static void rt2x00lib_channel(struct ieee80211_channel *entry,
const int channel, const int tx_power,
const int value)
{
- entry->center_freq = ieee80211_channel_to_frequency(channel);
+ /* XXX: this assumption about the band is wrong for 802.11j */
+ entry->band = channel <= 14 ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ entry->center_freq = ieee80211_channel_to_frequency(channel,
+ entry->band);
entry->hw_value = value;
entry->max_power = tx_power;
entry->max_antenna_gain = 0xff;
@@ -678,7 +669,7 @@ static void rt2x00lib_rate(struct ieee80211_rate *entry,
{
entry->flags = 0;
entry->bitrate = rate->bitrate;
- entry->hw_value =index;
+ entry->hw_value = index;
entry->hw_value_short = index;
if (rate->flags & DEV_RATE_SHORT_PREAMBLE)
@@ -818,8 +809,7 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* Allocate tx status FIFO for driver use.
*/
- if (test_bit(DRIVER_REQUIRE_TXSTATUS_FIFO, &rt2x00dev->flags) &&
- rt2x00dev->ops->lib->txstatus_tasklet) {
+ if (test_bit(DRIVER_REQUIRE_TXSTATUS_FIFO, &rt2x00dev->flags)) {
/*
* Allocate txstatus fifo and tasklet, we use a size of 512
* for the kfifo which is big enough to store 512/4=128 tx
@@ -831,14 +821,29 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
GFP_KERNEL);
if (status)
return status;
+ }
- /* tasklet for processing the tx status reports. */
- tasklet_init(&rt2x00dev->txstatus_tasklet,
- rt2x00dev->ops->lib->txstatus_tasklet,
- (unsigned long)rt2x00dev);
-
+ /*
+ * Initialize tasklets if used by the driver. Tasklets are
+ * disabled until the interrupts are turned on. The driver
+ * has to handle that.
+ */
+#define RT2X00_TASKLET_INIT(taskletname) \
+ if (rt2x00dev->ops->lib->taskletname) { \
+ tasklet_init(&rt2x00dev->taskletname, \
+ rt2x00dev->ops->lib->taskletname, \
+ (unsigned long)rt2x00dev); \
+ tasklet_disable(&rt2x00dev->taskletname); \
}
+ RT2X00_TASKLET_INIT(txstatus_tasklet);
+ RT2X00_TASKLET_INIT(pretbtt_tasklet);
+ RT2X00_TASKLET_INIT(tbtt_tasklet);
+ RT2X00_TASKLET_INIT(rxdone_tasklet);
+ RT2X00_TASKLET_INIT(autowake_tasklet);
+
+#undef RT2X00_TASKLET_INIT
+
/*
* Register HW.
*/
@@ -967,6 +972,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
{
int retval = -ENOMEM;
+ spin_lock_init(&rt2x00dev->irqmask_lock);
mutex_init(&rt2x00dev->csr_mutex);
set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
@@ -991,8 +997,15 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
BIT(NL80211_IFTYPE_WDS);
/*
- * Initialize configuration work.
+ * Initialize work.
*/
+ rt2x00dev->workqueue =
+ alloc_ordered_workqueue(wiphy_name(rt2x00dev->hw->wiphy), 0);
+ if (!rt2x00dev->workqueue) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
/*
@@ -1051,6 +1064,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
cancel_work_sync(&rt2x00dev->intf_work);
cancel_work_sync(&rt2x00dev->rxdone_work);
cancel_work_sync(&rt2x00dev->txdone_work);
+ destroy_workqueue(rt2x00dev->workqueue);
/*
* Free the tx status fifo.
@@ -1061,6 +1075,10 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
* Kill the tx status tasklet.
*/
tasklet_kill(&rt2x00dev->txstatus_tasklet);
+ tasklet_kill(&rt2x00dev->pretbtt_tasklet);
+ tasklet_kill(&rt2x00dev->tbtt_tasklet);
+ tasklet_kill(&rt2x00dev->rxdone_tasklet);
+ tasklet_kill(&rt2x00dev->autowake_tasklet);
/*
* Uninitialize device.
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c
index f0e1eb72befc..be0ff78c1b16 100644
--- a/drivers/net/wireless/rt2x00/rt2x00firmware.c
+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c
@@ -58,6 +58,7 @@ static int rt2x00lib_request_firmware(struct rt2x00_dev *rt2x00dev)
if (!fw || !fw->size || !fw->data) {
ERROR(rt2x00dev, "Failed to read Firmware.\n");
+ release_firmware(fw);
return -ENOENT;
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00ht.c b/drivers/net/wireless/rt2x00/rt2x00ht.c
index c637bcaec5f8..ae1219dffaae 100644
--- a/drivers/net/wireless/rt2x00/rt2x00ht.c
+++ b/drivers/net/wireless/rt2x00/rt2x00ht.c
@@ -38,14 +38,12 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
if (tx_info->control.sta)
- txdesc->mpdu_density =
+ txdesc->u.ht.mpdu_density =
tx_info->control.sta->ht_cap.ampdu_density;
- else
- txdesc->mpdu_density = 0;
- txdesc->ba_size = 7; /* FIXME: What value is needed? */
+ txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
- txdesc->stbc =
+ txdesc->u.ht.stbc =
(tx_info->flags & IEEE80211_TX_CTL_STBC) >> IEEE80211_TX_CTL_STBC_SHIFT;
/*
@@ -53,25 +51,24 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
* mcs rate to be used
*/
if (txrate->flags & IEEE80211_TX_RC_MCS) {
- txdesc->mcs = txrate->idx;
+ txdesc->u.ht.mcs = txrate->idx;
/*
* MIMO PS should be set to 1 for STA's using dynamic SM PS
* when using more then one tx stream (>MCS7).
*/
- if (tx_info->control.sta && txdesc->mcs > 7 &&
+ if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
((tx_info->control.sta->ht_cap.cap &
IEEE80211_HT_CAP_SM_PS) >>
IEEE80211_HT_CAP_SM_PS_SHIFT) ==
WLAN_HT_CAP_SM_PS_DYNAMIC)
__set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
} else {
- txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs);
+ txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
- txdesc->mcs |= 0x08;
+ txdesc->u.ht.mcs |= 0x08;
}
-
/*
* This frame is eligible for an AMPDU, however, don't aggregate
* frames that are intended to probe a specific tx rate.
@@ -81,14 +78,6 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
__set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
/*
- * Determine HT Mix/Greenfield rate mode
- */
- if (txrate->flags & IEEE80211_TX_RC_MCS)
- txdesc->rate_mode = RATE_MODE_HT_MIX;
- if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
- txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
-
- /*
* Set 40Mhz mode if necessary (for legacy rates this will
* duplicate the frame to both channels).
*/
@@ -108,11 +97,11 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
* for frames not transmitted with TXOP_HTTXOP
*/
if (ieee80211_is_mgmt(hdr->frame_control))
- txdesc->txop = TXOP_BACKOFF;
+ txdesc->u.ht.txop = TXOP_BACKOFF;
else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
- txdesc->txop = TXOP_SIFS;
+ txdesc->u.ht.txop = TXOP_SIFS;
else
- txdesc->txop = TXOP_HTTXOP;
+ txdesc->u.ht.txop = TXOP_HTTXOP;
}
u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 619da23b7b56..2d94cbaf5f4a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -57,7 +57,7 @@ static inline const struct rt2x00_rate *rt2x00_get_rate(const u16 hw_value)
}
#define RATE_MCS(__mode, __mcs) \
- ( (((__mode) & 0x00ff) << 8) | ((__mcs) & 0x00ff) )
+ ((((__mode) & 0x00ff) << 8) | ((__mcs) & 0x00ff))
static inline int rt2x00_get_rate_mcs(const u16 mcs_value)
{
@@ -69,7 +69,6 @@ static inline int rt2x00_get_rate_mcs(const u16 mcs_value)
*/
int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev);
void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev);
-void rt2x00lib_toggle_rx(struct rt2x00_dev *rt2x00dev, enum dev_state state);
/*
* Initialization handlers.
@@ -158,14 +157,30 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
bool local);
/**
- * rt2x00queue_update_beacon - Send new beacon from mac80211 to hardware
+ * rt2x00queue_update_beacon - Send new beacon from mac80211
+ * to hardware. Handles locking by itself (mutex).
* @rt2x00dev: Pointer to &struct rt2x00_dev.
* @vif: Interface for which the beacon should be updated.
- * @enable_beacon: Enable beaconing
*/
int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
- struct ieee80211_vif *vif,
- const bool enable_beacon);
+ struct ieee80211_vif *vif);
+
+/**
+ * rt2x00queue_update_beacon_locked - Send new beacon from mac80211
+ * to hardware. Caller needs to ensure locking.
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ * @vif: Interface for which the beacon should be updated.
+ */
+int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_vif *vif);
+
+/**
+ * rt2x00queue_clear_beacon - Clear beacon in hardware
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ * @vif: Interface for which the beacon should be updated.
+ */
+int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_vif *vif);
/**
* rt2x00queue_index_inc - Index incrementation function
@@ -179,15 +194,6 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index);
/**
- * rt2x00queue_stop_queues - Halt all data queues
- * @rt2x00dev: Pointer to &struct rt2x00_dev.
- *
- * This function will loop through all available queues to stop
- * any pending outgoing frames.
- */
-void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev);
-
-/**
* rt2x00queue_init_queues - Initialize all data queues
* @rt2x00dev: Pointer to &struct rt2x00_dev.
*
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index b971d8798ebf..c975b0a12e95 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -67,7 +67,7 @@
(__avg).avg_weight ? \
((((__avg).avg_weight * ((AVG_SAMPLES) - 1)) + \
((__val) * (AVG_FACTOR))) / \
- (AVG_SAMPLES) ) : \
+ (AVG_SAMPLES)) : \
((__val) * (AVG_FACTOR)); \
__new.avg = __new.avg_weight / (AVG_FACTOR); \
__new; \
@@ -417,7 +417,8 @@ void rt2x00link_start_watchdog(struct rt2x00_dev *rt2x00dev)
!test_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags))
return;
- schedule_delayed_work(&link->watchdog_work, WATCHDOG_INTERVAL);
+ ieee80211_queue_delayed_work(rt2x00dev->hw,
+ &link->watchdog_work, WATCHDOG_INTERVAL);
}
void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev)
@@ -441,7 +442,9 @@ static void rt2x00link_watchdog(struct work_struct *work)
rt2x00dev->ops->lib->watchdog(rt2x00dev);
if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
- schedule_delayed_work(&link->watchdog_work, WATCHDOG_INTERVAL);
+ ieee80211_queue_delayed_work(rt2x00dev->hw,
+ &link->watchdog_work,
+ WATCHDOG_INTERVAL);
}
void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index c3c206a97d54..661c6baad2b9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -99,12 +99,12 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
return retval;
}
-int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
enum data_queue_qid qid = skb_get_queue_mapping(skb);
- struct data_queue *queue;
+ struct data_queue *queue = NULL;
/*
* Mac80211 might be calling this function while we are trying
@@ -116,13 +116,13 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
goto exit_fail;
/*
- * Determine which queue to put packet on.
+ * Use the ATIM queue if appropriate and present.
*/
if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags))
- queue = rt2x00queue_get_queue(rt2x00dev, QID_ATIM);
- else
- queue = rt2x00queue_get_queue(rt2x00dev, qid);
+ qid = QID_ATIM;
+
+ queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
if (unlikely(!queue)) {
ERROR(rt2x00dev,
"Attempt to send packet over invalid queue %d.\n"
@@ -139,9 +139,9 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* either RTS or CTS-to-self frame and handles everything
* inside the hardware.
*/
- if ((tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
- IEEE80211_TX_RC_USE_CTS_PROTECT)) &&
- !rt2x00dev->ops->hw->set_rts_threshold) {
+ if (!rt2x00dev->ops->hw->set_rts_threshold &&
+ (tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
+ IEEE80211_TX_RC_USE_CTS_PROTECT))) {
if (rt2x00queue_available(queue) <= 1)
goto exit_fail;
@@ -149,18 +149,17 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
goto exit_fail;
}
- if (rt2x00queue_write_tx_frame(queue, skb, false))
+ if (unlikely(rt2x00queue_write_tx_frame(queue, skb, false)))
goto exit_fail;
if (rt2x00queue_threshold(queue))
- ieee80211_stop_queue(rt2x00dev->hw, qid);
+ rt2x00queue_pause_queue(queue);
- return NETDEV_TX_OK;
+ return;
exit_fail:
ieee80211_stop_queue(rt2x00dev->hw, qid);
dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
}
EXPORT_SYMBOL_GPL(rt2x00mac_tx);
@@ -191,7 +190,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct rt2x00_intf *intf = vif_to_intf(vif);
- struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON);
+ struct data_queue *queue = rt2x00dev->bcn;
struct queue_entry *entry = NULL;
unsigned int i;
@@ -268,13 +267,12 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
else
rt2x00dev->intf_sta_count++;
- spin_lock_init(&intf->lock);
spin_lock_init(&intf->seqlock);
mutex_init(&intf->beacon_skb_mutex);
intf->beacon = entry;
/*
- * The MAC adddress must be configured after the device
+ * The MAC address must be configured after the device
* has been initialized. Otherwise the device can reset
* the MAC registers.
* The BSSID address must only be configured in AP mode,
@@ -282,15 +280,8 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
* STA interfaces at this time, since this can cause
* invalid behavior in the device.
*/
- memcpy(&intf->mac, vif->addr, ETH_ALEN);
- if (vif->type == NL80211_IFTYPE_AP) {
- memcpy(&intf->bssid, vif->addr, ETH_ALEN);
- rt2x00lib_config_intf(rt2x00dev, intf, vif->type,
- intf->mac, intf->bssid);
- } else {
- rt2x00lib_config_intf(rt2x00dev, intf, vif->type,
- intf->mac, NULL);
- }
+ rt2x00lib_config_intf(rt2x00dev, intf, vif->type,
+ vif->addr, NULL);
/*
* Some filters depend on the current working mode. We can force
@@ -358,7 +349,7 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed)
* if for any reason the link tuner must be reset, this will be
* handled by rt2x00lib_config().
*/
- rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF_LINK);
+ rt2x00queue_stop_queue(rt2x00dev->rx);
/*
* When we've just turned on the radio, we want to reprogram
@@ -376,7 +367,7 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed)
rt2x00lib_config_antenna(rt2x00dev, rt2x00dev->default_ant);
/* Turn RX back on */
- rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK);
+ rt2x00queue_start_queue(rt2x00dev->rx);
return 0;
}
@@ -451,9 +442,7 @@ static void rt2x00mac_set_tim_iter(void *data, u8 *mac,
vif->type != NL80211_IFTYPE_WDS)
return;
- spin_lock(&intf->lock);
- intf->delayed_flags |= DELAYED_UPDATE_BEACON;
- spin_unlock(&intf->lock);
+ set_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags);
}
int rt2x00mac_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
@@ -478,17 +467,17 @@ EXPORT_SYMBOL_GPL(rt2x00mac_set_tim);
static void memcpy_tkip(struct rt2x00lib_crypto *crypto, u8 *key, u8 key_len)
{
if (key_len > NL80211_TKIP_DATA_OFFSET_ENCR_KEY)
- memcpy(&crypto->key,
+ memcpy(crypto->key,
&key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY],
sizeof(crypto->key));
if (key_len > NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY)
- memcpy(&crypto->tx_mic,
+ memcpy(crypto->tx_mic,
&key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
sizeof(crypto->tx_mic));
if (key_len > NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY)
- memcpy(&crypto->rx_mic,
+ memcpy(crypto->rx_mic,
&key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
sizeof(crypto->rx_mic));
}
@@ -498,7 +487,6 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
- struct rt2x00_intf *intf = vif_to_intf(vif);
int (*set_key) (struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_crypto *crypto,
struct ieee80211_key_conf *key);
@@ -522,7 +510,7 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (rt2x00dev->intf_sta_count)
crypto.bssidx = 0;
else
- crypto.bssidx = intf->mac[5] & (rt2x00dev->ops->max_ap_intf - 1);
+ crypto.bssidx = vif->addr[5] & (rt2x00dev->ops->max_ap_intf - 1);
crypto.cipher = rt2x00crypto_key_to_cipher(key);
if (crypto.cipher == CIPHER_NONE)
@@ -530,17 +518,15 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
crypto.cmd = cmd;
- if (sta) {
- /* some drivers need the AID */
- crypto.aid = sta->aid;
+ if (sta)
crypto.address = sta->addr;
- } else
+ else
crypto.address = bcast_addr;
if (crypto.cipher == CIPHER_TKIP)
memcpy_tkip(&crypto, &key->key[0], key->keylen);
else
- memcpy(&crypto.key, &key->key[0], key->keylen);
+ memcpy(crypto.key, &key->key[0], key->keylen);
/*
* Each BSS has a maximum of 4 shared keys.
* Shared key index values:
@@ -620,33 +606,55 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return;
- spin_lock(&intf->lock);
-
/*
- * conf->bssid can be NULL if coming from the internal
- * beacon update routine.
+ * Update the BSSID.
*/
if (changes & BSS_CHANGED_BSSID)
- memcpy(&intf->bssid, bss_conf->bssid, ETH_ALEN);
-
- spin_unlock(&intf->lock);
+ rt2x00lib_config_intf(rt2x00dev, intf, vif->type, NULL,
+ bss_conf->bssid);
/*
- * Call rt2x00_config_intf() outside of the spinlock context since
- * the call will sleep for USB drivers. By using the ieee80211_if_conf
- * values as arguments we make keep access to rt2x00_intf thread safe
- * even without the lock.
+ * Update the beacon. This is only required on USB devices. PCI
+ * devices fetch beacons periodically.
*/
- if (changes & BSS_CHANGED_BSSID)
- rt2x00lib_config_intf(rt2x00dev, intf, vif->type, NULL,
- bss_conf->bssid);
+ if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev))
+ rt2x00queue_update_beacon(rt2x00dev, vif);
/*
- * Update the beacon.
+ * Start/stop beaconing.
*/
- if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED))
- rt2x00queue_update_beacon(rt2x00dev, vif,
- bss_conf->enable_beacon);
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ if (!bss_conf->enable_beacon && intf->enable_beacon) {
+ rt2x00queue_clear_beacon(rt2x00dev, vif);
+ rt2x00dev->intf_beaconing--;
+ intf->enable_beacon = false;
+
+ if (rt2x00dev->intf_beaconing == 0) {
+ /*
+ * Last beaconing interface disabled
+ * -> stop beacon queue.
+ */
+ mutex_lock(&intf->beacon_skb_mutex);
+ rt2x00queue_stop_queue(rt2x00dev->bcn);
+ mutex_unlock(&intf->beacon_skb_mutex);
+ }
+
+
+ } else if (bss_conf->enable_beacon && !intf->enable_beacon) {
+ rt2x00dev->intf_beaconing++;
+ intf->enable_beacon = true;
+
+ if (rt2x00dev->intf_beaconing == 1) {
+ /*
+ * First beaconing interface enabled
+ * -> start beacon queue.
+ */
+ mutex_lock(&intf->beacon_skb_mutex);
+ rt2x00queue_start_queue(rt2x00dev->bcn);
+ mutex_unlock(&intf->beacon_skb_mutex);
+ }
+ }
+ }
/*
* When the association status has changed we must reset the link
@@ -682,7 +690,7 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
struct rt2x00_dev *rt2x00dev = hw->priv;
struct data_queue *queue;
- queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
+ queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
if (unlikely(!queue))
return -EINVAL;
@@ -719,3 +727,13 @@ void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw)
wiphy_rfkill_set_hw_state(hw->wiphy, !active);
}
EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll);
+
+void rt2x00mac_flush(struct ieee80211_hw *hw, bool drop)
+{
+ struct rt2x00_dev *rt2x00dev = hw->priv;
+ struct data_queue *queue;
+
+ tx_queue_for_each(rt2x00dev, queue)
+ rt2x00queue_flush_queue(queue, drop);
+}
+EXPORT_SYMBOL_GPL(rt2x00mac_flush);
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 2449d785cf8d..4dd82b0b0520 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -82,6 +82,13 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
skbdesc->desc_len = entry->queue->desc_size;
/*
+ * DMA is already done, notify rt2x00lib that
+ * it finished successfully.
+ */
+ rt2x00lib_dmastart(entry);
+ rt2x00lib_dmadone(entry);
+
+ /*
* Send the frame to rt2x00lib for further processing.
*/
rt2x00lib_rxdone(entry);
@@ -105,7 +112,7 @@ static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
*/
addr = dma_alloc_coherent(rt2x00dev->dev,
queue->limit * queue->desc_size,
- &dma, GFP_KERNEL | GFP_DMA);
+ &dma, GFP_KERNEL);
if (!addr)
return -ENOMEM;
@@ -153,10 +160,9 @@ int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
/*
* Register interrupt handler.
*/
- status = request_threaded_irq(rt2x00dev->irq,
- rt2x00dev->ops->lib->irq_handler,
- rt2x00dev->ops->lib->irq_handler_thread,
- IRQF_SHARED, rt2x00dev->name, rt2x00dev);
+ status = request_irq(rt2x00dev->irq,
+ rt2x00dev->ops->lib->irq_handler,
+ IRQF_SHARED, rt2x00dev->name, rt2x00dev);
if (status) {
ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
rt2x00dev->irq, status);
@@ -279,7 +285,7 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
rt2x00dev->irq = pci_dev->irq;
rt2x00dev->name = pci_name(pci_dev);
- if (pci_dev->is_pcie)
+ if (pci_is_pcie(pci_dev))
rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE);
else
rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
@@ -356,12 +362,12 @@ int rt2x00pci_resume(struct pci_dev *pci_dev)
struct rt2x00_dev *rt2x00dev = hw->priv;
if (pci_set_power_state(pci_dev, PCI_D0) ||
- pci_enable_device(pci_dev) ||
- pci_restore_state(pci_dev)) {
+ pci_enable_device(pci_dev)) {
ERROR(rt2x00dev, "Failed to resume device.\n");
return -EIO;
}
+ pci_restore_state(pci_dev);
return rt2x00lib_resume(rt2x00dev);
}
EXPORT_SYMBOL_GPL(rt2x00pci_resume);
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index b854d62ff99b..746ce8fe8cf4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -64,7 +64,7 @@ static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
const void *value,
const u32 length)
{
- memcpy_toio(rt2x00dev->csr.base + offset, value, length);
+ __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2);
}
/**
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index e360d287defb..4b3c70eeef1f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -199,7 +199,12 @@ void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
{
- unsigned int l2pad = L2PAD_SIZE(header_length);
+ /*
+ * L2 padding is only present if the skb contains more than just the
+ * IEEE 802.11 header.
+ */
+ unsigned int l2pad = (skb->len > header_length) ?
+ L2PAD_SIZE(header_length) : 0;
if (!l2pad)
return;
@@ -216,14 +221,17 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
unsigned long irqflags;
- if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) ||
- unlikely(!tx_info->control.vif))
+ if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
+ return;
+
+ __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
+
+ if (!test_bit(DRIVER_REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->flags))
return;
/*
- * Hardware should insert sequence counter.
- * FIXME: We insert a software sequence counter first for
- * hardware that doesn't support hardware sequence counting.
+ * The hardware is not able to insert a sequence number. Assign a
+ * software generated one here.
*
* This is wrong because beacons are not getting sequence
* numbers assigned properly.
@@ -241,7 +249,6 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
spin_unlock_irqrestore(&intf->seqlock, irqflags);
- __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
}
static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
@@ -255,6 +262,16 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
unsigned int duration;
unsigned int residual;
+ /*
+ * Determine with what IFS priority this frame should be send.
+ * Set ifs to IFS_SIFS when the this is not the first fragment,
+ * or this fragment came after RTS/CTS.
+ */
+ if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
+ txdesc->u.plcp.ifs = IFS_BACKOFF;
+ else
+ txdesc->u.plcp.ifs = IFS_SIFS;
+
/* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
data_length = entry->skb->len + 4;
data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb);
@@ -263,12 +280,12 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
* PLCP setup
* Length calculation depends on OFDM/CCK rate.
*/
- txdesc->signal = hwrate->plcp;
- txdesc->service = 0x04;
+ txdesc->u.plcp.signal = hwrate->plcp;
+ txdesc->u.plcp.service = 0x04;
if (hwrate->flags & DEV_RATE_OFDM) {
- txdesc->length_high = (data_length >> 6) & 0x3f;
- txdesc->length_low = data_length & 0x3f;
+ txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
+ txdesc->u.plcp.length_low = data_length & 0x3f;
} else {
/*
* Convert length to microseconds.
@@ -283,18 +300,18 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
* Check if we need to set the Length Extension
*/
if (hwrate->bitrate == 110 && residual <= 30)
- txdesc->service |= 0x80;
+ txdesc->u.plcp.service |= 0x80;
}
- txdesc->length_high = (duration >> 8) & 0xff;
- txdesc->length_low = duration & 0xff;
+ txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
+ txdesc->u.plcp.length_low = duration & 0xff;
/*
* When preamble is enabled we should set the
* preamble bit for the signal.
*/
if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
- txdesc->signal |= 0x08;
+ txdesc->u.plcp.signal |= 0x08;
}
}
@@ -304,21 +321,13 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
- struct ieee80211_rate *rate =
- ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
- const struct rt2x00_rate *hwrate;
+ struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
+ struct ieee80211_rate *rate;
+ const struct rt2x00_rate *hwrate = NULL;
memset(txdesc, 0, sizeof(*txdesc));
/*
- * Initialize information from queue
- */
- txdesc->qid = entry->queue->qid;
- txdesc->cw_min = entry->queue->cw_min;
- txdesc->cw_max = entry->queue->cw_max;
- txdesc->aifs = entry->queue->aifs;
-
- /*
* Header and frame information.
*/
txdesc->length = entry->skb->len;
@@ -368,42 +377,42 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
/*
* Beacons and probe responses require the tsf timestamp
- * to be inserted into the frame, except for a frame that has been injected
- * through a monitor interface. This latter is needed for testing a
- * monitor interface.
+ * to be inserted into the frame.
*/
- if ((ieee80211_is_beacon(hdr->frame_control) ||
- ieee80211_is_probe_resp(hdr->frame_control)) &&
- (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED)))
+ if (ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control))
__set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
- /*
- * Determine with what IFS priority this frame should be send.
- * Set ifs to IFS_SIFS when the this is not the first fragment,
- * or this fragment came after RTS/CTS.
- */
if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
- !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
+ !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
__set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
- txdesc->ifs = IFS_BACKOFF;
- } else
- txdesc->ifs = IFS_SIFS;
/*
* Determine rate modulation.
*/
- hwrate = rt2x00_get_rate(rate->hw_value);
- txdesc->rate_mode = RATE_MODE_CCK;
- if (hwrate->flags & DEV_RATE_OFDM)
- txdesc->rate_mode = RATE_MODE_OFDM;
+ if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
+ else if (txrate->flags & IEEE80211_TX_RC_MCS)
+ txdesc->rate_mode = RATE_MODE_HT_MIX;
+ else {
+ rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
+ hwrate = rt2x00_get_rate(rate->hw_value);
+ if (hwrate->flags & DEV_RATE_OFDM)
+ txdesc->rate_mode = RATE_MODE_OFDM;
+ else
+ txdesc->rate_mode = RATE_MODE_CCK;
+ }
/*
* Apply TX descriptor handling by components
*/
rt2x00crypto_create_tx_descriptor(entry, txdesc);
- rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
- rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
+
+ if (test_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags))
+ rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
+ else
+ rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
}
static int rt2x00queue_write_tx_data(struct queue_entry *entry,
@@ -460,12 +469,9 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
}
-static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
+static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
struct txentry_desc *txdesc)
{
- struct data_queue *queue = entry->queue;
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
-
/*
* Check if we need to kick the queue, there are however a few rules
* 1) Don't kick unless this is the last in frame in a burst.
@@ -477,7 +483,7 @@ static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
*/
if (rt2x00queue_threshold(queue) ||
!test_bit(ENTRY_TXD_BURST, &txdesc->flags))
- rt2x00dev->ops->lib->kick_tx_queue(queue);
+ queue->rt2x00dev->ops->lib->kick_queue(queue);
}
int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
@@ -567,18 +573,15 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
rt2x00queue_index_inc(queue, Q_INDEX);
rt2x00queue_write_tx_descriptor(entry, &txdesc);
- rt2x00queue_kick_tx_queue(entry, &txdesc);
+ rt2x00queue_kick_tx_queue(queue, &txdesc);
return 0;
}
-int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
- struct ieee80211_vif *vif,
- const bool enable_beacon)
+int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_vif *vif)
{
struct rt2x00_intf *intf = vif_to_intf(vif);
- struct skb_frame_desc *skbdesc;
- struct txentry_desc txdesc;
if (unlikely(!intf->beacon))
return -ENOBUFS;
@@ -590,17 +593,36 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
*/
rt2x00queue_free_skb(intf->beacon);
- if (!enable_beacon) {
- rt2x00dev->ops->lib->kill_tx_queue(intf->beacon->queue);
- mutex_unlock(&intf->beacon_skb_mutex);
- return 0;
- }
+ /*
+ * Clear beacon (single bssid devices don't need to clear the beacon
+ * since the beacon queue will get stopped anyway).
+ */
+ if (rt2x00dev->ops->lib->clear_beacon)
+ rt2x00dev->ops->lib->clear_beacon(intf->beacon);
+
+ mutex_unlock(&intf->beacon_skb_mutex);
+
+ return 0;
+}
+
+int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_vif *vif)
+{
+ struct rt2x00_intf *intf = vif_to_intf(vif);
+ struct skb_frame_desc *skbdesc;
+ struct txentry_desc txdesc;
+
+ if (unlikely(!intf->beacon))
+ return -ENOBUFS;
+
+ /*
+ * Clean up the beacon skb.
+ */
+ rt2x00queue_free_skb(intf->beacon);
intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
- if (!intf->beacon->skb) {
- mutex_unlock(&intf->beacon_skb_mutex);
+ if (!intf->beacon->skb)
return -ENOMEM;
- }
/*
* Copy all TX descriptor information into txdesc,
@@ -617,13 +639,25 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
skbdesc->entry = intf->beacon;
/*
- * Send beacon to hardware and enable beacon genaration..
+ * Send beacon to hardware.
*/
rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
+ return 0;
+
+}
+
+int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_vif *vif)
+{
+ struct rt2x00_intf *intf = vif_to_intf(vif);
+ int ret;
+
+ mutex_lock(&intf->beacon_skb_mutex);
+ ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif);
mutex_unlock(&intf->beacon_skb_mutex);
- return 0;
+ return ret;
}
void rt2x00queue_for_each_entry(struct data_queue *queue,
@@ -649,10 +683,10 @@ void rt2x00queue_for_each_entry(struct data_queue *queue,
* it should not be kicked during this run, since it
* is part of another TX operation.
*/
- spin_lock_irqsave(&queue->lock, irqflags);
+ spin_lock_irqsave(&queue->index_lock, irqflags);
index_start = queue->index[start];
index_end = queue->index[end];
- spin_unlock_irqrestore(&queue->lock, irqflags);
+ spin_unlock_irqrestore(&queue->index_lock, irqflags);
/*
* Start from the TX done pointer, this guarentees that we will
@@ -671,29 +705,6 @@ void rt2x00queue_for_each_entry(struct data_queue *queue,
}
EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
-struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
- const enum data_queue_qid queue)
-{
- int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
-
- if (queue == QID_RX)
- return rt2x00dev->rx;
-
- if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
- return &rt2x00dev->tx[queue];
-
- if (!rt2x00dev->bcn)
- return NULL;
-
- if (queue == QID_BEACON)
- return &rt2x00dev->bcn[0];
- else if (queue == QID_ATIM && atim)
- return &rt2x00dev->bcn[1];
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);
-
struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
enum queue_index index)
{
@@ -706,11 +717,11 @@ struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
return NULL;
}
- spin_lock_irqsave(&queue->lock, irqflags);
+ spin_lock_irqsave(&queue->index_lock, irqflags);
entry = &queue->entries[queue->index[index]];
- spin_unlock_irqrestore(&queue->lock, irqflags);
+ spin_unlock_irqrestore(&queue->index_lock, irqflags);
return entry;
}
@@ -726,7 +737,7 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
return;
}
- spin_lock_irqsave(&queue->lock, irqflags);
+ spin_lock_irqsave(&queue->index_lock, irqflags);
queue->index[index]++;
if (queue->index[index] >= queue->limit)
@@ -741,15 +752,219 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
queue->count++;
}
- spin_unlock_irqrestore(&queue->lock, irqflags);
+ spin_unlock_irqrestore(&queue->index_lock, irqflags);
+}
+
+void rt2x00queue_pause_queue(struct data_queue *queue)
+{
+ if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
+ !test_bit(QUEUE_STARTED, &queue->flags) ||
+ test_and_set_bit(QUEUE_PAUSED, &queue->flags))
+ return;
+
+ switch (queue->qid) {
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ /*
+ * For TX queues, we have to disable the queue
+ * inside mac80211.
+ */
+ ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
+
+void rt2x00queue_unpause_queue(struct data_queue *queue)
+{
+ if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
+ !test_bit(QUEUE_STARTED, &queue->flags) ||
+ !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
+ return;
+
+ switch (queue->qid) {
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ /*
+ * For TX queues, we have to enable the queue
+ * inside mac80211.
+ */
+ ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
+ break;
+ case QID_RX:
+ /*
+ * For RX we need to kick the queue now in order to
+ * receive frames.
+ */
+ queue->rt2x00dev->ops->lib->kick_queue(queue);
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
+
+void rt2x00queue_start_queue(struct data_queue *queue)
+{
+ mutex_lock(&queue->status_lock);
+
+ if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
+ test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
+ mutex_unlock(&queue->status_lock);
+ return;
+ }
+
+ set_bit(QUEUE_PAUSED, &queue->flags);
+
+ queue->rt2x00dev->ops->lib->start_queue(queue);
+
+ rt2x00queue_unpause_queue(queue);
+
+ mutex_unlock(&queue->status_lock);
}
+EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
+
+void rt2x00queue_stop_queue(struct data_queue *queue)
+{
+ mutex_lock(&queue->status_lock);
+
+ if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
+ mutex_unlock(&queue->status_lock);
+ return;
+ }
+
+ rt2x00queue_pause_queue(queue);
+
+ queue->rt2x00dev->ops->lib->stop_queue(queue);
+
+ mutex_unlock(&queue->status_lock);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
+
+void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
+{
+ unsigned int i;
+ bool started;
+ bool tx_queue =
+ (queue->qid == QID_AC_VO) ||
+ (queue->qid == QID_AC_VI) ||
+ (queue->qid == QID_AC_BE) ||
+ (queue->qid == QID_AC_BK);
+
+ mutex_lock(&queue->status_lock);
+
+ /*
+ * If the queue has been started, we must stop it temporarily
+ * to prevent any new frames to be queued on the device. If
+ * we are not dropping the pending frames, the queue must
+ * only be stopped in the software and not the hardware,
+ * otherwise the queue will never become empty on its own.
+ */
+ started = test_bit(QUEUE_STARTED, &queue->flags);
+ if (started) {
+ /*
+ * Pause the queue
+ */
+ rt2x00queue_pause_queue(queue);
+
+ /*
+ * If we are not supposed to drop any pending
+ * frames, this means we must force a start (=kick)
+ * to the queue to make sure the hardware will
+ * start transmitting.
+ */
+ if (!drop && tx_queue)
+ queue->rt2x00dev->ops->lib->kick_queue(queue);
+ }
+
+ /*
+ * Check if driver supports flushing, we can only guarentee
+ * full support for flushing if the driver is able
+ * to cancel all pending frames (drop = true).
+ */
+ if (drop && queue->rt2x00dev->ops->lib->flush_queue)
+ queue->rt2x00dev->ops->lib->flush_queue(queue);
+
+ /*
+ * When we don't want to drop any frames, or when
+ * the driver doesn't fully flush the queue correcly,
+ * we must wait for the queue to become empty.
+ */
+ for (i = 0; !rt2x00queue_empty(queue) && i < 100; i++)
+ msleep(10);
+
+ /*
+ * The queue flush has failed...
+ */
+ if (unlikely(!rt2x00queue_empty(queue)))
+ WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid);
+
+ /*
+ * Restore the queue to the previous status
+ */
+ if (started)
+ rt2x00queue_unpause_queue(queue);
+
+ mutex_unlock(&queue->status_lock);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
+
+void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+
+ /*
+ * rt2x00queue_start_queue will call ieee80211_wake_queue
+ * for each queue after is has been properly initialized.
+ */
+ tx_queue_for_each(rt2x00dev, queue)
+ rt2x00queue_start_queue(queue);
+
+ rt2x00queue_start_queue(rt2x00dev->rx);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
+
+void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+
+ /*
+ * rt2x00queue_stop_queue will call ieee80211_stop_queue
+ * as well, but we are completely shutting doing everything
+ * now, so it is much safer to stop all TX queues at once,
+ * and use rt2x00queue_stop_queue for cleaning up.
+ */
+ ieee80211_stop_queues(rt2x00dev->hw);
+
+ tx_queue_for_each(rt2x00dev, queue)
+ rt2x00queue_stop_queue(queue);
+
+ rt2x00queue_stop_queue(rt2x00dev->rx);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
+
+void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
+{
+ struct data_queue *queue;
+
+ tx_queue_for_each(rt2x00dev, queue)
+ rt2x00queue_flush_queue(queue, drop);
+
+ rt2x00queue_flush_queue(rt2x00dev->rx, drop);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
static void rt2x00queue_reset(struct data_queue *queue)
{
unsigned long irqflags;
unsigned int i;
- spin_lock_irqsave(&queue->lock, irqflags);
+ spin_lock_irqsave(&queue->index_lock, irqflags);
queue->count = 0;
queue->length = 0;
@@ -759,15 +974,7 @@ static void rt2x00queue_reset(struct data_queue *queue)
queue->last_action[i] = jiffies;
}
- spin_unlock_irqrestore(&queue->lock, irqflags);
-}
-
-void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue;
-
- txall_queue_for_each(rt2x00dev, queue)
- rt2x00dev->ops->lib->kill_tx_queue(queue);
+ spin_unlock_irqrestore(&queue->index_lock, irqflags);
}
void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
@@ -778,11 +985,8 @@ void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
queue_for_each(rt2x00dev, queue) {
rt2x00queue_reset(queue);
- for (i = 0; i < queue->limit; i++) {
+ for (i = 0; i < queue->limit; i++)
rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
- if (queue->qid == QID_RX)
- rt2x00queue_index_inc(queue, Q_INDEX);
- }
}
}
@@ -809,8 +1013,8 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue,
return -ENOMEM;
#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
- ( ((char *)(__base)) + ((__limit) * (__esize)) + \
- ((__index) * (__psize)) )
+ (((char *)(__base)) + ((__limit) * (__esize)) + \
+ ((__index) * (__psize)))
for (i = 0; i < queue->limit; i++) {
entries[i].flags = 0;
@@ -876,7 +1080,7 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
goto exit;
if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
- status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
+ status = rt2x00queue_alloc_entries(rt2x00dev->atim,
rt2x00dev->ops->atim);
if (status)
goto exit;
@@ -911,7 +1115,8 @@ void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
struct data_queue *queue, enum data_queue_qid qid)
{
- spin_lock_init(&queue->lock);
+ mutex_init(&queue->status_lock);
+ spin_lock_init(&queue->index_lock);
queue->rt2x00dev = rt2x00dev;
queue->qid = qid;
@@ -949,11 +1154,12 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
rt2x00dev->rx = queue;
rt2x00dev->tx = &queue[1];
rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
+ rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
/*
* Initialize queue parameters.
* RX: qid = QID_RX
- * TX: qid = QID_AC_BE + index
+ * TX: qid = QID_AC_VO + index
* TX: cw_min: 2^5 = 32.
* TX: cw_max: 2^10 = 1024.
* BCN: qid = QID_BEACON
@@ -961,13 +1167,13 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
*/
rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
- qid = QID_AC_BE;
+ qid = QID_AC_VO;
tx_queue_for_each(rt2x00dev, queue)
rt2x00queue_init(rt2x00dev, queue, qid++);
- rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON);
+ rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
if (req_atim)
- rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM);
+ rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
return 0;
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index d81d85f34866..0c8b0c699679 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -43,28 +43,12 @@
#define AGGREGATION_SIZE 3840
/**
- * DOC: Number of entries per queue
- *
- * Under normal load without fragmentation, 12 entries are sufficient
- * without the queue being filled up to the maximum. When using fragmentation
- * and the queue threshold code, we need to add some additional margins to
- * make sure the queue will never (or only under extreme load) fill up
- * completely.
- * Since we don't use preallocated DMA, having a large number of queue entries
- * will have minimal impact on the memory requirements for the queue.
- */
-#define RX_ENTRIES 24
-#define TX_ENTRIES 24
-#define BEACON_ENTRIES 1
-#define ATIM_ENTRIES 8
-
-/**
* enum data_queue_qid: Queue identification
*
+ * @QID_AC_VO: AC VO queue
+ * @QID_AC_VI: AC VI queue
* @QID_AC_BE: AC BE queue
* @QID_AC_BK: AC BK queue
- * @QID_AC_VI: AC VI queue
- * @QID_AC_VO: AC VO queue
* @QID_HCCA: HCCA queue
* @QID_MGMT: MGMT queue (prio queue)
* @QID_RX: RX queue
@@ -73,10 +57,10 @@
* @QID_ATIM: Atim queue (value unspeficied, don't send it to device)
*/
enum data_queue_qid {
- QID_AC_BE = 0,
- QID_AC_BK = 1,
- QID_AC_VI = 2,
- QID_AC_VO = 3,
+ QID_AC_VO = 0,
+ QID_AC_VI = 1,
+ QID_AC_BE = 2,
+ QID_AC_BK = 3,
QID_HCCA = 4,
QID_MGMT = 13,
QID_RX = 14,
@@ -296,7 +280,6 @@ enum txentry_desc_flags {
* Summary of information for the frame descriptor before sending a TX frame.
*
* @flags: Descriptor flags (See &enum queue_entry_flags).
- * @qid: Queue identification (See &enum data_queue_qid).
* @length: Length of the entire frame.
* @header_length: Length of 802.11 header.
* @length_high: PLCP length high word.
@@ -309,11 +292,8 @@ enum txentry_desc_flags {
* @rate_mode: Rate mode (See @enum rate_modulation).
* @mpdu_density: MDPU density.
* @retry_limit: Max number of retries.
- * @aifs: AIFS value.
* @ifs: IFS value.
* @txop: IFS value for 11n capable chips.
- * @cw_min: cwmin value.
- * @cw_max: cwmax value.
* @cipher: Cipher type used for encryption.
* @key_idx: Key index used for encryption.
* @iv_offset: Position where IV should be inserted by hardware.
@@ -322,28 +302,30 @@ enum txentry_desc_flags {
struct txentry_desc {
unsigned long flags;
- enum data_queue_qid qid;
-
u16 length;
u16 header_length;
- u16 length_high;
- u16 length_low;
- u16 signal;
- u16 service;
-
- u16 mcs;
- u16 stbc;
- u16 ba_size;
- u16 rate_mode;
- u16 mpdu_density;
+ union {
+ struct {
+ u16 length_high;
+ u16 length_low;
+ u16 signal;
+ u16 service;
+ enum ifs ifs;
+ } plcp;
+
+ struct {
+ u16 mcs;
+ u8 stbc;
+ u8 ba_size;
+ u8 mpdu_density;
+ enum txop txop;
+ } ht;
+ } u;
+
+ enum rate_modulation rate_mode;
short retry_limit;
- short aifs;
- short ifs;
- short txop;
- short cw_min;
- short cw_max;
enum cipher cipher;
u16 key_idx;
@@ -365,12 +347,16 @@ struct txentry_desc {
* @ENTRY_DATA_IO_FAILED: Hardware indicated that an IO error occured
* while transfering the data to the hardware. No TX status report will
* be expected from the hardware.
+ * @ENTRY_DATA_STATUS_PENDING: The entry has been send to the device and
+ * returned. It is now waiting for the status reporting before the
+ * entry can be reused again.
*/
enum queue_entry_flags {
ENTRY_BCN_ASSIGNED,
ENTRY_OWNER_DEVICE_DATA,
ENTRY_DATA_PENDING,
- ENTRY_DATA_IO_FAILED
+ ENTRY_DATA_IO_FAILED,
+ ENTRY_DATA_STATUS_PENDING,
};
/**
@@ -417,13 +403,33 @@ enum queue_index {
};
/**
+ * enum data_queue_flags: Status flags for data queues
+ *
+ * @QUEUE_STARTED: The queue has been started. Fox RX queues this means the
+ * device might be DMA'ing skbuffers. TX queues will accept skbuffers to
+ * be transmitted and beacon queues will start beaconing the configured
+ * beacons.
+ * @QUEUE_PAUSED: The queue has been started but is currently paused.
+ * When this bit is set, the queue has been stopped in mac80211,
+ * preventing new frames to be enqueued. However, a few frames
+ * might still appear shortly after the pausing...
+ */
+enum data_queue_flags {
+ QUEUE_STARTED,
+ QUEUE_PAUSED,
+};
+
+/**
* struct data_queue: Data queue
*
* @rt2x00dev: Pointer to main &struct rt2x00dev where this queue belongs to.
* @entries: Base address of the &struct queue_entry which are
* part of this queue.
* @qid: The queue identification, see &enum data_queue_qid.
- * @lock: Spinlock to protect index handling. Whenever @index, @index_done or
+ * @flags: Entry flags, see &enum queue_entry_flags.
+ * @status_lock: The mutex for protecting the start/stop/flush
+ * handling on this queue.
+ * @index_lock: Spinlock to protect index handling. Whenever @index, @index_done or
* @index_crypt needs to be changed this lock should be grabbed to prevent
* index corruption due to concurrency.
* @count: Number of frames handled in the queue.
@@ -446,8 +452,11 @@ struct data_queue {
struct queue_entry *entries;
enum data_queue_qid qid;
+ unsigned long flags;
+
+ struct mutex status_lock;
+ spinlock_t index_lock;
- spinlock_t lock;
unsigned int count;
unsigned short limit;
unsigned short threshold;
@@ -618,10 +627,10 @@ static inline int rt2x00queue_threshold(struct data_queue *queue)
}
/**
- * rt2x00queue_timeout - Check if a timeout occured for STATUS reorts
+ * rt2x00queue_status_timeout - Check if a timeout occured for STATUS reports
* @queue: Queue to check.
*/
-static inline int rt2x00queue_timeout(struct data_queue *queue)
+static inline int rt2x00queue_status_timeout(struct data_queue *queue)
{
return time_after(queue->last_action[Q_INDEX_DMA_DONE],
queue->last_action[Q_INDEX_DONE] + (HZ / 10));
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index cef94621cef7..6f867eec49cc 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -83,14 +83,8 @@ enum dev_state {
*/
STATE_RADIO_ON,
STATE_RADIO_OFF,
- STATE_RADIO_RX_ON,
- STATE_RADIO_RX_OFF,
- STATE_RADIO_RX_ON_LINK,
- STATE_RADIO_RX_OFF_LINK,
STATE_RADIO_IRQ_ON,
STATE_RADIO_IRQ_OFF,
- STATE_RADIO_IRQ_ON_ISR,
- STATE_RADIO_IRQ_OFF_ISR,
};
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.c b/drivers/net/wireless/rt2x00/rt2x00soc.c
index fc98063de71d..2aa5c38022f3 100644
--- a/drivers/net/wireless/rt2x00/rt2x00soc.c
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.c
@@ -40,6 +40,8 @@ static void rt2x00soc_free_reg(struct rt2x00_dev *rt2x00dev)
kfree(rt2x00dev->eeprom);
rt2x00dev->eeprom = NULL;
+
+ iounmap(rt2x00dev->csr.base);
}
static int rt2x00soc_alloc_reg(struct rt2x00_dev *rt2x00dev)
@@ -51,9 +53,9 @@ static int rt2x00soc_alloc_reg(struct rt2x00_dev *rt2x00dev)
if (!res)
return -ENODEV;
- rt2x00dev->csr.base = (void __iomem *)KSEG1ADDR(res->start);
+ rt2x00dev->csr.base = ioremap(res->start, resource_size(res));
if (!rt2x00dev->csr.base)
- goto exit;
+ return -ENOMEM;
rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
if (!rt2x00dev->eeprom)
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index b3317df7a7d4..fbe735f5b352 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -195,7 +195,8 @@ static void rt2x00usb_work_txdone(struct work_struct *work)
while (!rt2x00queue_empty(queue)) {
entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
- if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
+ if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
+ !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
break;
rt2x00usb_work_txdone_entry(entry);
@@ -226,9 +227,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
* Schedule the delayed work for reading the TX status
* from the device.
*/
- if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
- test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work);
+ queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
}
static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
@@ -237,8 +236,10 @@ static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
struct queue_entry_priv_usb *entry_priv = entry->priv_data;
u32 length;
+ int status;
- if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags))
+ if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags) ||
+ test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
return;
/*
@@ -253,121 +254,15 @@ static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
entry->skb->data, length,
rt2x00usb_interrupt_txdone, entry);
- if (usb_submit_urb(entry_priv->urb, GFP_ATOMIC)) {
+ status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
+ if (status) {
+ if (status == -ENODEV)
+ clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
rt2x00lib_dmadone(entry);
}
}
-void rt2x00usb_kick_tx_queue(struct data_queue *queue)
-{
- rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
- rt2x00usb_kick_tx_entry);
-}
-EXPORT_SYMBOL_GPL(rt2x00usb_kick_tx_queue);
-
-static void rt2x00usb_kill_tx_entry(struct queue_entry *entry)
-{
- struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct queue_entry_priv_usb *entry_priv = entry->priv_data;
- struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
-
- if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
- return;
-
- usb_kill_urb(entry_priv->urb);
-
- /*
- * Kill guardian urb (if required by driver).
- */
- if ((entry->queue->qid == QID_BEACON) &&
- (test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)))
- usb_kill_urb(bcn_priv->guardian_urb);
-}
-
-void rt2x00usb_kill_tx_queue(struct data_queue *queue)
-{
- rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
- rt2x00usb_kill_tx_entry);
-}
-EXPORT_SYMBOL_GPL(rt2x00usb_kill_tx_queue);
-
-static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
-{
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
- unsigned short threshold = queue->threshold;
-
- WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
- " invoke forced forced reset", queue->qid);
-
- /*
- * Temporarily disable the TX queue, this will force mac80211
- * to use the other queues until this queue has been restored.
- *
- * Set the queue threshold to the queue limit. This prevents the
- * queue from being enabled during the txdone handler.
- */
- queue->threshold = queue->limit;
- ieee80211_stop_queue(rt2x00dev->hw, queue->qid);
-
- /*
- * Kill all entries in the queue, afterwards we need to
- * wait a bit for all URBs to be cancelled.
- */
- rt2x00usb_kill_tx_queue(queue);
-
- /*
- * In case that a driver has overriden the txdone_work
- * function, we invoke the TX done through there.
- */
- rt2x00dev->txdone_work.func(&rt2x00dev->txdone_work);
-
- /*
- * Security measure: if the driver did override the
- * txdone_work function, and the hardware did arrive
- * in a state which causes it to malfunction, it is
- * possible that the driver couldn't handle the txdone
- * event correctly. So after giving the driver the
- * chance to cleanup, we now force a cleanup of any
- * leftovers.
- */
- if (!rt2x00queue_empty(queue)) {
- WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
- " status handling failed, invoke hard reset", queue->qid);
- rt2x00usb_work_txdone(&rt2x00dev->txdone_work);
- }
-
- /*
- * The queue has been reset, and mac80211 is allowed to use the
- * queue again.
- */
- queue->threshold = threshold;
- ieee80211_wake_queue(rt2x00dev->hw, queue->qid);
-}
-
-static void rt2x00usb_watchdog_tx_status(struct data_queue *queue)
-{
- WARNING(queue->rt2x00dev, "TX queue %d status timed out,"
- " invoke forced tx handler", queue->qid);
-
- ieee80211_queue_work(queue->rt2x00dev->hw, &queue->rt2x00dev->txdone_work);
-}
-
-void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue;
-
- tx_queue_for_each(rt2x00dev, queue) {
- if (!rt2x00queue_empty(queue)) {
- if (rt2x00queue_dma_timeout(queue))
- rt2x00usb_watchdog_tx_dma(queue);
- if (rt2x00queue_timeout(queue))
- rt2x00usb_watchdog_tx_status(queue);
- }
- }
-}
-EXPORT_SYMBOL_GPL(rt2x00usb_watchdog);
-
/*
* RX data handlers.
*/
@@ -382,7 +277,8 @@ static void rt2x00usb_work_rxdone(struct work_struct *work)
while (!rt2x00queue_empty(rt2x00dev->rx)) {
entry = rt2x00queue_get_entry(rt2x00dev->rx, Q_INDEX_DONE);
- if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
+ if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
+ !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
break;
/*
@@ -424,11 +320,157 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
* Schedule the delayed work for reading the RX status
* from the device.
*/
- if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
- test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work);
+ queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work);
}
+static void rt2x00usb_kick_rx_entry(struct queue_entry *entry)
+{
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
+ struct queue_entry_priv_usb *entry_priv = entry->priv_data;
+ int status;
+
+ if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
+ test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
+ return;
+
+ rt2x00lib_dmastart(entry);
+
+ usb_fill_bulk_urb(entry_priv->urb, usb_dev,
+ usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint),
+ entry->skb->data, entry->skb->len,
+ rt2x00usb_interrupt_rxdone, entry);
+
+ status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
+ if (status) {
+ if (status == -ENODEV)
+ clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
+ set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
+ rt2x00lib_dmadone(entry);
+ }
+}
+
+void rt2x00usb_kick_queue(struct data_queue *queue)
+{
+ switch (queue->qid) {
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ if (!rt2x00queue_empty(queue))
+ rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
+ rt2x00usb_kick_tx_entry);
+ break;
+ case QID_RX:
+ if (!rt2x00queue_full(queue))
+ rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
+ rt2x00usb_kick_rx_entry);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue);
+
+static void rt2x00usb_flush_entry(struct queue_entry *entry)
+{
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ struct queue_entry_priv_usb *entry_priv = entry->priv_data;
+ struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
+
+ if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
+ return;
+
+ usb_kill_urb(entry_priv->urb);
+
+ /*
+ * Kill guardian urb (if required by driver).
+ */
+ if ((entry->queue->qid == QID_BEACON) &&
+ (test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)))
+ usb_kill_urb(bcn_priv->guardian_urb);
+}
+
+void rt2x00usb_flush_queue(struct data_queue *queue)
+{
+ struct work_struct *completion;
+ unsigned int i;
+
+ rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
+ rt2x00usb_flush_entry);
+
+ /*
+ * Obtain the queue completion handler
+ */
+ switch (queue->qid) {
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ completion = &queue->rt2x00dev->txdone_work;
+ break;
+ case QID_RX:
+ completion = &queue->rt2x00dev->rxdone_work;
+ break;
+ default:
+ return;
+ }
+
+ for (i = 0; i < 20; i++) {
+ /*
+ * Check if the driver is already done, otherwise we
+ * have to sleep a little while to give the driver/hw
+ * the oppurtunity to complete interrupt process itself.
+ */
+ if (rt2x00queue_empty(queue))
+ break;
+
+ /*
+ * Schedule the completion handler manually, when this
+ * worker function runs, it should cleanup the queue.
+ */
+ queue_work(queue->rt2x00dev->workqueue, completion);
+
+ /*
+ * Wait for a little while to give the driver
+ * the oppurtunity to recover itself.
+ */
+ msleep(10);
+ }
+}
+EXPORT_SYMBOL_GPL(rt2x00usb_flush_queue);
+
+static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
+{
+ WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
+ " invoke forced forced reset\n", queue->qid);
+
+ rt2x00queue_flush_queue(queue, true);
+}
+
+static void rt2x00usb_watchdog_tx_status(struct data_queue *queue)
+{
+ WARNING(queue->rt2x00dev, "TX queue %d status timed out,"
+ " invoke forced tx handler\n", queue->qid);
+
+ queue_work(queue->rt2x00dev->workqueue, &queue->rt2x00dev->txdone_work);
+}
+
+void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+
+ tx_queue_for_each(rt2x00dev, queue) {
+ if (!rt2x00queue_empty(queue)) {
+ if (rt2x00queue_dma_timeout(queue))
+ rt2x00usb_watchdog_tx_dma(queue);
+ if (rt2x00queue_status_timeout(queue))
+ rt2x00usb_watchdog_tx_status(queue);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(rt2x00usb_watchdog);
+
/*
* Radio handlers
*/
@@ -436,12 +478,6 @@ void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
{
rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0,
REGISTER_TIMEOUT);
-
- /*
- * The USB version of kill_tx_queue also works
- * on the RX queue.
- */
- rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev->rx);
}
EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
@@ -450,25 +486,10 @@ EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
*/
void rt2x00usb_clear_entry(struct queue_entry *entry)
{
- struct usb_device *usb_dev =
- to_usb_device_intf(entry->queue->rt2x00dev->dev);
- struct queue_entry_priv_usb *entry_priv = entry->priv_data;
- int pipe;
-
entry->flags = 0;
- if (entry->queue->qid == QID_RX) {
- pipe = usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint);
- usb_fill_bulk_urb(entry_priv->urb, usb_dev, pipe,
- entry->skb->data, entry->skb->len,
- rt2x00usb_interrupt_rxdone, entry);
-
- set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
- if (usb_submit_urb(entry_priv->urb, GFP_ATOMIC)) {
- set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
- rt2x00lib_dmadone(entry);
- }
- }
+ if (entry->queue->qid == QID_RX)
+ rt2x00usb_kick_rx_entry(entry);
}
EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry);
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index c2d997f67b3e..6aaf51fc7ad8 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -378,22 +378,22 @@ struct queue_entry_priv_usb_bcn {
};
/**
- * rt2x00usb_kick_tx_queue - Kick data queue
+ * rt2x00usb_kick_queue - Kick data queue
* @queue: Data queue to kick
*
* This will walk through all entries of the queue and push all pending
* frames to the hardware as a single burst.
*/
-void rt2x00usb_kick_tx_queue(struct data_queue *queue);
+void rt2x00usb_kick_queue(struct data_queue *queue);
/**
- * rt2x00usb_kill_tx_queue - Kill data queue
- * @queue: Data queue to kill
+ * rt2x00usb_flush_queue - Flush data queue
+ * @queue: Data queue to stop
*
* This will walk through all entries of the queue and kill all
- * previously kicked frames before they can be send.
+ * URB's which were send to the device.
*/
-void rt2x00usb_kill_tx_queue(struct data_queue *queue);
+void rt2x00usb_flush_queue(struct data_queue *queue);
/**
* rt2x00usb_watchdog - Watchdog for USB communication
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index af548c87f108..77e8113b91e1 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -551,26 +551,14 @@ static void rt61pci_config_intf(struct rt2x00_dev *rt2x00dev,
struct rt2x00intf_conf *conf,
const unsigned int flags)
{
- unsigned int beacon_base;
u32 reg;
if (flags & CONFIG_UPDATE_TYPE) {
/*
- * Clear current synchronisation setup.
- * For the Beacon base registers, we only need to clear
- * the first byte since that byte contains the VALID and OWNER
- * bits which (when set to 0) will invalidate the entire beacon.
- */
- beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
- rt2x00pci_register_write(rt2x00dev, beacon_base, 0);
-
- /*
* Enable synchronisation.
*/
rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
- rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync);
- rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
}
@@ -1140,6 +1128,116 @@ dynamic_cca_tune:
}
/*
+ * Queue handlers.
+ */
+static void rt61pci_start_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u32 reg;
+
+ switch (queue->qid) {
+ case QID_RX:
+ rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg);
+ rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 0);
+ rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);
+ break;
+ case QID_BEACON:
+ /*
+ * Allow the tbtt tasklet to be scheduled.
+ */
+ tasklet_enable(&rt2x00dev->tbtt_tasklet);
+
+ rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
+ rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
+ rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
+ rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rt61pci_kick_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u32 reg;
+
+ switch (queue->qid) {
+ case QID_AC_VO:
+ rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
+ rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC0, 1);
+ rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
+ break;
+ case QID_AC_VI:
+ rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
+ rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC1, 1);
+ rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
+ break;
+ case QID_AC_BE:
+ rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
+ rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC2, 1);
+ rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
+ break;
+ case QID_AC_BK:
+ rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
+ rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC3, 1);
+ rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rt61pci_stop_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u32 reg;
+
+ switch (queue->qid) {
+ case QID_AC_VO:
+ rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
+ rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC0, 1);
+ rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
+ break;
+ case QID_AC_VI:
+ rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
+ rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC1, 1);
+ rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
+ break;
+ case QID_AC_BE:
+ rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
+ rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC2, 1);
+ rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
+ break;
+ case QID_AC_BK:
+ rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
+ rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC3, 1);
+ rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
+ break;
+ case QID_RX:
+ rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg);
+ rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 1);
+ rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);
+ break;
+ case QID_BEACON:
+ rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
+ rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 0);
+ rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0);
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
+ rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
+
+ /*
+ * Wait for possibly running tbtt tasklets.
+ */
+ tasklet_disable(&rt2x00dev->tbtt_tasklet);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
* Firmware functions
*/
static char *rt61pci_get_firmware_name(struct rt2x00_dev *rt2x00dev)
@@ -1616,24 +1714,12 @@ static int rt61pci_init_bbp(struct rt2x00_dev *rt2x00dev)
/*
* Device state switch handlers.
*/
-static void rt61pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
- enum dev_state state)
-{
- u32 reg;
-
- rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg);
- rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX,
- (state == STATE_RADIO_RX_OFF) ||
- (state == STATE_RADIO_RX_OFF_LINK));
- rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);
-}
-
static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
- int mask = (state == STATE_RADIO_IRQ_OFF) ||
- (state == STATE_RADIO_IRQ_OFF_ISR);
+ int mask = (state == STATE_RADIO_IRQ_OFF);
u32 reg;
+ unsigned long flags;
/*
* When interrupts are being enabled, the interrupt registers
@@ -1645,12 +1731,21 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, &reg);
rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg);
+
+ /*
+ * Enable tasklets.
+ */
+ tasklet_enable(&rt2x00dev->txstatus_tasklet);
+ tasklet_enable(&rt2x00dev->rxdone_tasklet);
+ tasklet_enable(&rt2x00dev->autowake_tasklet);
}
/*
* Only toggle the interrupts bits we are going to use.
* Non-checked interrupt bits are disabled by default.
*/
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
rt2x00_set_field32(&reg, INT_MASK_CSR_TXDONE, mask);
rt2x00_set_field32(&reg, INT_MASK_CSR_RXDONE, mask);
@@ -1670,6 +1765,17 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_7, mask);
rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_TWAKEUP, mask);
rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
+
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+ if (state == STATE_RADIO_IRQ_OFF) {
+ /*
+ * Ensure that all tasklets are finished.
+ */
+ tasklet_disable(&rt2x00dev->txstatus_tasklet);
+ tasklet_disable(&rt2x00dev->rxdone_tasklet);
+ tasklet_disable(&rt2x00dev->autowake_tasklet);
+ }
}
static int rt61pci_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -1744,16 +1850,8 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev,
case STATE_RADIO_OFF:
rt61pci_disable_radio(rt2x00dev);
break;
- case STATE_RADIO_RX_ON:
- case STATE_RADIO_RX_ON_LINK:
- case STATE_RADIO_RX_OFF:
- case STATE_RADIO_RX_OFF_LINK:
- rt61pci_toggle_rx(rt2x00dev, state);
- break;
case STATE_RADIO_IRQ_ON:
- case STATE_RADIO_IRQ_ON_ISR:
case STATE_RADIO_IRQ_OFF:
- case STATE_RADIO_IRQ_OFF_ISR:
rt61pci_toggle_irq(rt2x00dev, state);
break;
case STATE_DEEP_SLEEP:
@@ -1789,10 +1887,10 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
* Start writing the descriptor words.
*/
rt2x00_desc_read(txd, 1, &word);
- rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->qid);
- rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
- rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
- rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
+ rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, entry->queue->qid);
+ rt2x00_set_field32(&word, TXD_W1_AIFSN, entry->queue->aifs);
+ rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min);
+ rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max);
rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE,
test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
@@ -1800,10 +1898,12 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
rt2x00_desc_write(txd, 1, word);
rt2x00_desc_read(txd, 2, &word);
- rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal);
- rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service);
- rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low);
- rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW,
+ txdesc->u.plcp.length_low);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH,
+ txdesc->u.plcp.length_high);
rt2x00_desc_write(txd, 2, word);
if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
@@ -1820,7 +1920,7 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
rt2x00_desc_write(txd, 5, word);
- if (txdesc->qid != QID_BEACON) {
+ if (entry->queue->qid != QID_BEACON) {
rt2x00_desc_read(txd, 6, &word);
rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS,
skbdesc->skb_dma);
@@ -1848,7 +1948,7 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_OFDM,
(txdesc->rate_mode == RATE_MODE_OFDM));
- rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
+ rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
@@ -1866,8 +1966,8 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
* Register descriptor details in skb frame descriptor.
*/
skbdesc->desc = txd;
- skbdesc->desc_len =
- (txdesc->qid == QID_BEACON) ? TXINFO_SIZE : TXD_DESC_SIZE;
+ skbdesc->desc_len = (entry->queue->qid == QID_BEACON) ? TXINFO_SIZE :
+ TXD_DESC_SIZE;
}
/*
@@ -1879,13 +1979,15 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
unsigned int beacon_base;
- u32 reg;
+ unsigned int padding_len;
+ u32 orig_reg, reg;
/*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
+ orig_reg = reg;
rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
@@ -1900,13 +2002,23 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
/*
- * Write entire beacon with descriptor to register.
+ * Write entire beacon with descriptor and padding to register.
*/
+ padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
+ if (padding_len && skb_pad(entry->skb, padding_len)) {
+ ERROR(rt2x00dev, "Failure padding beacon, aborting\n");
+ /* skb freed by skb_pad() on failure */
+ entry->skb = NULL;
+ rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
+ return;
+ }
+
beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
rt2x00pci_register_multiwrite(rt2x00dev, beacon_base,
entry_priv->desc, TXINFO_SIZE);
rt2x00pci_register_multiwrite(rt2x00dev, beacon_base + TXINFO_SIZE,
- entry->skb->data, entry->skb->len);
+ entry->skb->data,
+ entry->skb->len + padding_len);
/*
* Enable beaconing again.
@@ -1916,8 +2028,6 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
*/
rt2x00pci_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
- rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
@@ -1928,35 +2038,30 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
entry->skb = NULL;
}
-static void rt61pci_kick_tx_queue(struct data_queue *queue)
+static void rt61pci_clear_beacon(struct queue_entry *entry)
{
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
u32 reg;
- rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
- rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC0, (queue->qid == QID_AC_BE));
- rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC1, (queue->qid == QID_AC_BK));
- rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC2, (queue->qid == QID_AC_VI));
- rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC3, (queue->qid == QID_AC_VO));
- rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
-}
-
-static void rt61pci_kill_tx_queue(struct data_queue *queue)
-{
- struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
- u32 reg;
+ /*
+ * Disable beaconing while we are reloading the beacon data,
+ * otherwise we might be sending out invalid data.
+ */
+ rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
+ rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
- if (queue->qid == QID_BEACON) {
- rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, 0);
- return;
- }
+ /*
+ * Clear beacon.
+ */
+ rt2x00pci_register_write(rt2x00dev,
+ HW_BEACON_OFFSET(entry->entry_idx), 0);
- rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
- rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC0, (queue->qid == QID_AC_BE));
- rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC1, (queue->qid == QID_AC_BK));
- rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC2, (queue->qid == QID_AC_VI));
- rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC3, (queue->qid == QID_AC_VO));
- rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
+ /*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
+ rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
}
/*
@@ -2023,9 +2128,8 @@ static void rt61pci_fill_rxdone(struct queue_entry *entry,
rxdesc->flags |= RX_FLAG_IV_STRIPPED;
/*
- * FIXME: Legacy driver indicates that the frame does
- * contain the Michael Mic. Unfortunately, in rt2x00
- * the MIC seems to be missing completely...
+ * The hardware has already checked the Michael Mic and has
+ * stripped it from the frame. Signal this to mac80211.
*/
rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
@@ -2078,7 +2182,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
* that the TX_STA_FIFO stack has a size of 16. We stick to our
* tx ring size for now.
*/
- for (i = 0; i < TX_ENTRIES; i++) {
+ for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) {
rt2x00pci_register_read(rt2x00dev, STA_CSR4, &reg);
if (!rt2x00_get_field32(reg, STA_CSR4_VALID))
break;
@@ -2088,7 +2192,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
* queue identication number.
*/
type = rt2x00_get_field32(reg, STA_CSR4_PID_TYPE);
- queue = rt2x00queue_get_queue(rt2x00dev, type);
+ queue = rt2x00queue_get_tx_queue(rt2x00dev, type);
if (unlikely(!queue))
continue;
@@ -2156,61 +2260,77 @@ static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
}
-static irqreturn_t rt61pci_interrupt_thread(int irq, void *dev_instance)
+static void rt61pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00_field32 irq_field)
{
- struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg = rt2x00dev->irqvalue[0];
- u32 reg_mcu = rt2x00dev->irqvalue[1];
+ u32 reg;
/*
- * Handle interrupts, walk through all bits
- * and run the tasks, the bits are checked in order of
- * priority.
+ * Enable a single interrupt. The interrupt mask register
+ * access needs locking.
*/
+ spin_lock_irq(&rt2x00dev->irqmask_lock);
- /*
- * 1 - Rx ring done interrupt.
- */
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RXDONE))
- rt2x00pci_rxdone(rt2x00dev);
+ rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+ rt2x00_set_field32(&reg, irq_field, 0);
+ rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
- /*
- * 2 - Tx ring done interrupt.
- */
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TXDONE))
- rt61pci_txdone(rt2x00dev);
+ spin_unlock_irq(&rt2x00dev->irqmask_lock);
+}
- /*
- * 3 - Handle MCU command done.
- */
- if (reg_mcu)
- rt2x00pci_register_write(rt2x00dev,
- M2H_CMD_DONE_CSR, 0xffffffff);
+static void rt61pci_enable_mcu_interrupt(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00_field32 irq_field)
+{
+ u32 reg;
/*
- * 4 - MCU Autowakeup interrupt.
+ * Enable a single MCU interrupt. The interrupt mask register
+ * access needs locking.
*/
- if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP))
- rt61pci_wakeup(rt2x00dev);
+ spin_lock_irq(&rt2x00dev->irqmask_lock);
- /*
- * 5 - Beacon done interrupt.
- */
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_BEACON_DONE))
- rt2x00lib_beacondone(rt2x00dev);
+ rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg);
+ rt2x00_set_field32(&reg, irq_field, 0);
+ rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
- /* Enable interrupts again. */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_ON_ISR);
- return IRQ_HANDLED;
+ spin_unlock_irq(&rt2x00dev->irqmask_lock);
+}
+
+static void rt61pci_txstatus_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt61pci_txdone(rt2x00dev);
+ rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TXDONE);
+}
+
+static void rt61pci_tbtt_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00lib_beacondone(rt2x00dev);
+ rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_BEACON_DONE);
}
+static void rt61pci_rxdone_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00pci_rxdone(rt2x00dev);
+ rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RXDONE);
+}
+
+static void rt61pci_autowake_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt61pci_wakeup(rt2x00dev);
+ rt2x00pci_register_write(rt2x00dev,
+ M2H_CMD_DONE_CSR, 0xffffffff);
+ rt61pci_enable_mcu_interrupt(rt2x00dev, MCU_INT_MASK_CSR_TWAKEUP);
+}
static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg_mcu;
- u32 reg;
+ u32 reg_mcu, mask_mcu;
+ u32 reg, mask;
/*
* Get the interrupt sources & saved to local variable.
@@ -2228,14 +2348,46 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
return IRQ_HANDLED;
- /* Store irqvalues for use in the interrupt thread. */
- rt2x00dev->irqvalue[0] = reg;
- rt2x00dev->irqvalue[1] = reg_mcu;
+ /*
+ * Schedule tasklets for interrupt handling.
+ */
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RXDONE))
+ tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TXDONE))
+ tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_BEACON_DONE))
+ tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
+
+ if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP))
+ tasklet_schedule(&rt2x00dev->autowake_tasklet);
+
+ /*
+ * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
+ * for interrupts and interrupt masks we can just use the value of
+ * INT_SOURCE_CSR to create the interrupt mask.
+ */
+ mask = reg;
+ mask_mcu = reg_mcu;
+
+ /*
+ * Disable all interrupts for which a tasklet was scheduled right now,
+ * the tasklet will reenable the appropriate interrupts.
+ */
+ spin_lock(&rt2x00dev->irqmask_lock);
+
+ rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+ reg |= mask;
+ rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
- /* Disable interrupts, will be enabled again in the interrupt thread. */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_OFF_ISR);
- return IRQ_WAKE_THREAD;
+ rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg);
+ reg |= mask_mcu;
+ rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
+
+ spin_unlock(&rt2x00dev->irqmask_lock);
+
+ return IRQ_HANDLED;
}
/*
@@ -2764,7 +2916,7 @@ static int rt61pci_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
if (queue_idx >= 4)
return 0;
- queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
+ queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
/* Update WMM TXOP register */
offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2)));
@@ -2824,11 +2976,15 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
.conf_tx = rt61pci_conf_tx,
.get_tsf = rt61pci_get_tsf,
.rfkill_poll = rt2x00mac_rfkill_poll,
+ .flush = rt2x00mac_flush,
};
static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
.irq_handler = rt61pci_interrupt,
- .irq_handler_thread = rt61pci_interrupt_thread,
+ .txstatus_tasklet = rt61pci_txstatus_tasklet,
+ .tbtt_tasklet = rt61pci_tbtt_tasklet,
+ .rxdone_tasklet = rt61pci_rxdone_tasklet,
+ .autowake_tasklet = rt61pci_autowake_tasklet,
.probe_hw = rt61pci_probe_hw,
.get_firmware_name = rt61pci_get_firmware_name,
.check_firmware = rt61pci_check_firmware,
@@ -2842,10 +2998,12 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
.link_stats = rt61pci_link_stats,
.reset_tuner = rt61pci_reset_tuner,
.link_tuner = rt61pci_link_tuner,
+ .start_queue = rt61pci_start_queue,
+ .kick_queue = rt61pci_kick_queue,
+ .stop_queue = rt61pci_stop_queue,
.write_tx_desc = rt61pci_write_tx_desc,
.write_beacon = rt61pci_write_beacon,
- .kick_tx_queue = rt61pci_kick_tx_queue,
- .kill_tx_queue = rt61pci_kill_tx_queue,
+ .clear_beacon = rt61pci_clear_beacon,
.fill_rxdone = rt61pci_fill_rxdone,
.config_shared_key = rt61pci_config_shared_key,
.config_pairwise_key = rt61pci_config_pairwise_key,
@@ -2857,21 +3015,21 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
};
static const struct data_queue_desc rt61pci_queue_rx = {
- .entry_num = RX_ENTRIES,
+ .entry_num = 32,
.data_size = DATA_FRAME_SIZE,
.desc_size = RXD_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_pci),
};
static const struct data_queue_desc rt61pci_queue_tx = {
- .entry_num = TX_ENTRIES,
+ .entry_num = 32,
.data_size = DATA_FRAME_SIZE,
.desc_size = TXD_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_pci),
};
static const struct data_queue_desc rt61pci_queue_bcn = {
- .entry_num = 4 * BEACON_ENTRIES,
+ .entry_num = 4,
.data_size = 0, /* No DMA required for beacons */
.desc_size = TXINFO_SIZE,
.priv_size = sizeof(struct queue_entry_priv_pci),
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index e2e728ab0b2e..e3cd6db76b0e 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -412,7 +412,7 @@ struct hw_pairwise_ta_entry {
* DROP_VERSION_ERROR: Drop version error frame.
* DROP_MULTICAST: Drop multicast frames.
* DROP_BORADCAST: Drop broadcast frames.
- * ROP_ACK_CTS: Drop received ACK and CTS.
+ * DROP_ACK_CTS: Drop received ACK and CTS.
*/
#define TXRX_CSR0 0x3040
#define TXRX_CSR0_RX_ACK_TIMEOUT FIELD32(0x000001ff)
@@ -784,25 +784,25 @@ struct hw_pairwise_ta_entry {
*/
/*
- * AC0_BASE_CSR: AC_BK base address.
+ * AC0_BASE_CSR: AC_VO base address.
*/
#define AC0_BASE_CSR 0x3400
#define AC0_BASE_CSR_RING_REGISTER FIELD32(0xffffffff)
/*
- * AC1_BASE_CSR: AC_BE base address.
+ * AC1_BASE_CSR: AC_VI base address.
*/
#define AC1_BASE_CSR 0x3404
#define AC1_BASE_CSR_RING_REGISTER FIELD32(0xffffffff)
/*
- * AC2_BASE_CSR: AC_VI base address.
+ * AC2_BASE_CSR: AC_BE base address.
*/
#define AC2_BASE_CSR 0x3408
#define AC2_BASE_CSR_RING_REGISTER FIELD32(0xffffffff)
/*
- * AC3_BASE_CSR: AC_VO base address.
+ * AC3_BASE_CSR: AC_BK base address.
*/
#define AC3_BASE_CSR 0x340c
#define AC3_BASE_CSR_RING_REGISTER FIELD32(0xffffffff)
@@ -814,7 +814,7 @@ struct hw_pairwise_ta_entry {
#define MGMT_BASE_CSR_RING_REGISTER FIELD32(0xffffffff)
/*
- * TX_RING_CSR0: TX Ring size for AC_BK, AC_BE, AC_VI, AC_VO.
+ * TX_RING_CSR0: TX Ring size for AC_VO, AC_VI, AC_BE, AC_BK.
*/
#define TX_RING_CSR0 0x3418
#define TX_RING_CSR0_AC0_RING_SIZE FIELD32(0x000000ff)
@@ -833,10 +833,10 @@ struct hw_pairwise_ta_entry {
/*
* AIFSN_CSR: AIFSN for each EDCA AC.
- * AIFSN0: For AC_BK.
- * AIFSN1: For AC_BE.
- * AIFSN2: For AC_VI.
- * AIFSN3: For AC_VO.
+ * AIFSN0: For AC_VO.
+ * AIFSN1: For AC_VI.
+ * AIFSN2: For AC_BE.
+ * AIFSN3: For AC_BK.
*/
#define AIFSN_CSR 0x3420
#define AIFSN_CSR_AIFSN0 FIELD32(0x0000000f)
@@ -846,10 +846,10 @@ struct hw_pairwise_ta_entry {
/*
* CWMIN_CSR: CWmin for each EDCA AC.
- * CWMIN0: For AC_BK.
- * CWMIN1: For AC_BE.
- * CWMIN2: For AC_VI.
- * CWMIN3: For AC_VO.
+ * CWMIN0: For AC_VO.
+ * CWMIN1: For AC_VI.
+ * CWMIN2: For AC_BE.
+ * CWMIN3: For AC_BK.
*/
#define CWMIN_CSR 0x3424
#define CWMIN_CSR_CWMIN0 FIELD32(0x0000000f)
@@ -859,10 +859,10 @@ struct hw_pairwise_ta_entry {
/*
* CWMAX_CSR: CWmax for each EDCA AC.
- * CWMAX0: For AC_BK.
- * CWMAX1: For AC_BE.
- * CWMAX2: For AC_VI.
- * CWMAX3: For AC_VO.
+ * CWMAX0: For AC_VO.
+ * CWMAX1: For AC_VI.
+ * CWMAX2: For AC_BE.
+ * CWMAX3: For AC_BK.
*/
#define CWMAX_CSR 0x3428
#define CWMAX_CSR_CWMAX0 FIELD32(0x0000000f)
@@ -883,14 +883,14 @@ struct hw_pairwise_ta_entry {
/*
* TX_CNTL_CSR: KICK/Abort TX.
- * KICK_TX_AC0: For AC_BK.
- * KICK_TX_AC1: For AC_BE.
- * KICK_TX_AC2: For AC_VI.
- * KICK_TX_AC3: For AC_VO.
- * ABORT_TX_AC0: For AC_BK.
- * ABORT_TX_AC1: For AC_BE.
- * ABORT_TX_AC2: For AC_VI.
- * ABORT_TX_AC3: For AC_VO.
+ * KICK_TX_AC0: For AC_VO.
+ * KICK_TX_AC1: For AC_VI.
+ * KICK_TX_AC2: For AC_BE.
+ * KICK_TX_AC3: For AC_BK.
+ * ABORT_TX_AC0: For AC_VO.
+ * ABORT_TX_AC1: For AC_VI.
+ * ABORT_TX_AC2: For AC_BE.
+ * ABORT_TX_AC3: For AC_BK.
*/
#define TX_CNTL_CSR 0x3430
#define TX_CNTL_CSR_KICK_TX_AC0 FIELD32(0x00000001)
@@ -1010,18 +1010,18 @@ struct hw_pairwise_ta_entry {
#define E2PROM_CSR_LOAD_STATUS FIELD32(0x00000040)
/*
- * AC_TXOP_CSR0: AC_BK/AC_BE TXOP register.
- * AC0_TX_OP: For AC_BK, in unit of 32us.
- * AC1_TX_OP: For AC_BE, in unit of 32us.
+ * AC_TXOP_CSR0: AC_VO/AC_VI TXOP register.
+ * AC0_TX_OP: For AC_VO, in unit of 32us.
+ * AC1_TX_OP: For AC_VI, in unit of 32us.
*/
#define AC_TXOP_CSR0 0x3474
#define AC_TXOP_CSR0_AC0_TX_OP FIELD32(0x0000ffff)
#define AC_TXOP_CSR0_AC1_TX_OP FIELD32(0xffff0000)
/*
- * AC_TXOP_CSR1: AC_VO/AC_VI TXOP register.
- * AC2_TX_OP: For AC_VI, in unit of 32us.
- * AC3_TX_OP: For AC_VO, in unit of 32us.
+ * AC_TXOP_CSR1: AC_BE/AC_BK TXOP register.
+ * AC2_TX_OP: For AC_BE, in unit of 32us.
+ * AC3_TX_OP: For AC_BK, in unit of 32us.
*/
#define AC_TXOP_CSR1 0x3478
#define AC_TXOP_CSR1_AC2_TX_OP FIELD32(0x0000ffff)
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 9be8089317e4..02f1148c577e 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -40,7 +40,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt = 0;
+static int modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -502,26 +502,14 @@ static void rt73usb_config_intf(struct rt2x00_dev *rt2x00dev,
struct rt2x00intf_conf *conf,
const unsigned int flags)
{
- unsigned int beacon_base;
u32 reg;
if (flags & CONFIG_UPDATE_TYPE) {
/*
- * Clear current synchronisation setup.
- * For the Beacon base registers we only need to clear
- * the first byte since that byte contains the VALID and OWNER
- * bits which (when set to 0) will invalidate the entire beacon.
- */
- beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
- rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
-
- /*
* Enable synchronisation.
*/
rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
- rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync);
- rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
}
@@ -1031,6 +1019,55 @@ dynamic_cca_tune:
}
/*
+ * Queue handlers.
+ */
+static void rt73usb_start_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u32 reg;
+
+ switch (queue->qid) {
+ case QID_RX:
+ rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
+ rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 0);
+ rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
+ break;
+ case QID_BEACON:
+ rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
+ rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
+ rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
+ rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rt73usb_stop_queue(struct data_queue *queue)
+{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ u32 reg;
+
+ switch (queue->qid) {
+ case QID_RX:
+ rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
+ rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 1);
+ rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
+ break;
+ case QID_BEACON:
+ rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
+ rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 0);
+ rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0);
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
+ rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
* Firmware functions
*/
static char *rt73usb_get_firmware_name(struct rt2x00_dev *rt2x00dev)
@@ -1324,18 +1361,6 @@ static int rt73usb_init_bbp(struct rt2x00_dev *rt2x00dev)
/*
* Device state switch handlers.
*/
-static void rt73usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
- enum dev_state state)
-{
- u32 reg;
-
- rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
- rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX,
- (state == STATE_RADIO_RX_OFF) ||
- (state == STATE_RADIO_RX_OFF_LINK));
- rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
-}
-
static int rt73usb_enable_radio(struct rt2x00_dev *rt2x00dev)
{
/*
@@ -1402,16 +1427,8 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
case STATE_RADIO_OFF:
rt73usb_disable_radio(rt2x00dev);
break;
- case STATE_RADIO_RX_ON:
- case STATE_RADIO_RX_ON_LINK:
- case STATE_RADIO_RX_OFF:
- case STATE_RADIO_RX_OFF_LINK:
- rt73usb_toggle_rx(rt2x00dev, state);
- break;
case STATE_RADIO_IRQ_ON:
- case STATE_RADIO_IRQ_ON_ISR:
case STATE_RADIO_IRQ_OFF:
- case STATE_RADIO_IRQ_OFF_ISR:
/* No support, but no error either */
break;
case STATE_DEEP_SLEEP:
@@ -1457,7 +1474,7 @@ static void rt73usb_write_tx_desc(struct queue_entry *entry,
test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_OFDM,
(txdesc->rate_mode == RATE_MODE_OFDM));
- rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
+ rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
@@ -1472,20 +1489,22 @@ static void rt73usb_write_tx_desc(struct queue_entry *entry,
rt2x00_desc_write(txd, 0, word);
rt2x00_desc_read(txd, 1, &word);
- rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->qid);
- rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
- rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
- rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
+ rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, entry->queue->qid);
+ rt2x00_set_field32(&word, TXD_W1_AIFSN, entry->queue->aifs);
+ rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min);
+ rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max);
rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE,
test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
rt2x00_desc_write(txd, 1, word);
rt2x00_desc_read(txd, 2, &word);
- rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal);
- rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service);
- rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low);
- rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW,
+ txdesc->u.plcp.length_low);
+ rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH,
+ txdesc->u.plcp.length_high);
rt2x00_desc_write(txd, 2, word);
if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
@@ -1515,13 +1534,15 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
unsigned int beacon_base;
- u32 reg;
+ unsigned int padding_len;
+ u32 orig_reg, reg;
/*
* Disable beaconing while we are reloading the beacon data,
* otherwise we might be sending out invalid data.
*/
rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
+ orig_reg = reg;
rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
@@ -1542,11 +1563,20 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
/*
- * Write entire beacon with descriptor to register.
+ * Write entire beacon with descriptor and padding to register.
*/
+ padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
+ if (padding_len && skb_pad(entry->skb, padding_len)) {
+ ERROR(rt2x00dev, "Failure padding beacon, aborting\n");
+ /* skb freed by skb_pad() on failure */
+ entry->skb = NULL;
+ rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
+ return;
+ }
+
beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
- rt2x00usb_register_multiwrite(rt2x00dev, beacon_base,
- entry->skb->data, entry->skb->len);
+ rt2x00usb_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data,
+ entry->skb->len + padding_len);
/*
* Enable beaconing again.
@@ -1556,8 +1586,6 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
*/
rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
- rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
@@ -1568,6 +1596,33 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
entry->skb = NULL;
}
+static void rt73usb_clear_beacon(struct queue_entry *entry)
+{
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ unsigned int beacon_base;
+ u32 reg;
+
+ /*
+ * Disable beaconing while we are reloading the beacon data,
+ * otherwise we might be sending out invalid data.
+ */
+ rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
+ rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
+
+ /*
+ * Clear beacon.
+ */
+ beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
+ rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
+
+ /*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
+ rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
+}
+
static int rt73usb_get_tx_data_len(struct queue_entry *entry)
{
int length;
@@ -1582,14 +1637,6 @@ static int rt73usb_get_tx_data_len(struct queue_entry *entry)
return length;
}
-static void rt73usb_kill_tx_queue(struct data_queue *queue)
-{
- if (queue->qid == QID_BEACON)
- rt2x00usb_register_write(queue->rt2x00dev, TXRX_CSR9, 0);
-
- rt2x00usb_kill_tx_queue(queue);
-}
-
/*
* RX control handlers
*/
@@ -1672,9 +1719,8 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry,
rxdesc->flags |= RX_FLAG_IV_STRIPPED;
/*
- * FIXME: Legacy driver indicates that the frame does
- * contain the Michael Mic. Unfortunately, in rt2x00
- * the MIC seems to be missing completely...
+ * The hardware has already checked the Michael Mic and has
+ * stripped it from the frame. Signal this to mac80211.
*/
rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
@@ -2203,7 +2249,7 @@ static int rt73usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
if (queue_idx >= 4)
return 0;
- queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
+ queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
/* Update WMM TXOP register */
offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2)));
@@ -2264,6 +2310,7 @@ static const struct ieee80211_ops rt73usb_mac80211_ops = {
.conf_tx = rt73usb_conf_tx,
.get_tsf = rt73usb_get_tsf,
.rfkill_poll = rt2x00mac_rfkill_poll,
+ .flush = rt2x00mac_flush,
};
static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
@@ -2280,11 +2327,14 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
.reset_tuner = rt73usb_reset_tuner,
.link_tuner = rt73usb_link_tuner,
.watchdog = rt2x00usb_watchdog,
+ .start_queue = rt73usb_start_queue,
+ .kick_queue = rt2x00usb_kick_queue,
+ .stop_queue = rt73usb_stop_queue,
+ .flush_queue = rt2x00usb_flush_queue,
.write_tx_desc = rt73usb_write_tx_desc,
.write_beacon = rt73usb_write_beacon,
+ .clear_beacon = rt73usb_clear_beacon,
.get_tx_data_len = rt73usb_get_tx_data_len,
- .kick_tx_queue = rt2x00usb_kick_tx_queue,
- .kill_tx_queue = rt73usb_kill_tx_queue,
.fill_rxdone = rt73usb_fill_rxdone,
.config_shared_key = rt73usb_config_shared_key,
.config_pairwise_key = rt73usb_config_pairwise_key,
@@ -2296,21 +2346,21 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
};
static const struct data_queue_desc rt73usb_queue_rx = {
- .entry_num = RX_ENTRIES,
+ .entry_num = 32,
.data_size = DATA_FRAME_SIZE,
.desc_size = RXD_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_usb),
};
static const struct data_queue_desc rt73usb_queue_tx = {
- .entry_num = TX_ENTRIES,
+ .entry_num = 32,
.data_size = DATA_FRAME_SIZE,
.desc_size = TXD_DESC_SIZE,
.priv_size = sizeof(struct queue_entry_priv_usb),
};
static const struct data_queue_desc rt73usb_queue_bcn = {
- .entry_num = 4 * BEACON_ENTRIES,
+ .entry_num = 4,
.data_size = MGMT_FRAME_SIZE,
.desc_size = TXINFO_SIZE,
.priv_size = sizeof(struct queue_entry_priv_usb),
@@ -2417,6 +2467,7 @@ static struct usb_device_id rt73usb_device_table[] = {
{ USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) },
+ { USB_DEVICE(0x0812, 0x3101), USB_DEVICE_DATA(&rt73usb_ops) },
/* Qcom */
{ USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 44d5b2bebd39..9f6b470414d3 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -322,7 +322,7 @@ struct hw_pairwise_ta_entry {
* DROP_VERSION_ERROR: Drop version error frame.
* DROP_MULTICAST: Drop multicast frames.
* DROP_BORADCAST: Drop broadcast frames.
- * ROP_ACK_CTS: Drop received ACK and CTS.
+ * DROP_ACK_CTS: Drop received ACK and CTS.
*/
#define TXRX_CSR0 0x3040
#define TXRX_CSR0_RX_ACK_TIMEOUT FIELD32(0x000001ff)
@@ -689,10 +689,10 @@ struct hw_pairwise_ta_entry {
/*
* AIFSN_CSR: AIFSN for each EDCA AC.
- * AIFSN0: For AC_BK.
- * AIFSN1: For AC_BE.
- * AIFSN2: For AC_VI.
- * AIFSN3: For AC_VO.
+ * AIFSN0: For AC_VO.
+ * AIFSN1: For AC_VI.
+ * AIFSN2: For AC_BE.
+ * AIFSN3: For AC_BK.
*/
#define AIFSN_CSR 0x0400
#define AIFSN_CSR_AIFSN0 FIELD32(0x0000000f)
@@ -702,10 +702,10 @@ struct hw_pairwise_ta_entry {
/*
* CWMIN_CSR: CWmin for each EDCA AC.
- * CWMIN0: For AC_BK.
- * CWMIN1: For AC_BE.
- * CWMIN2: For AC_VI.
- * CWMIN3: For AC_VO.
+ * CWMIN0: For AC_VO.
+ * CWMIN1: For AC_VI.
+ * CWMIN2: For AC_BE.
+ * CWMIN3: For AC_BK.
*/
#define CWMIN_CSR 0x0404
#define CWMIN_CSR_CWMIN0 FIELD32(0x0000000f)
@@ -715,10 +715,10 @@ struct hw_pairwise_ta_entry {
/*
* CWMAX_CSR: CWmax for each EDCA AC.
- * CWMAX0: For AC_BK.
- * CWMAX1: For AC_BE.
- * CWMAX2: For AC_VI.
- * CWMAX3: For AC_VO.
+ * CWMAX0: For AC_VO.
+ * CWMAX1: For AC_VI.
+ * CWMAX2: For AC_BE.
+ * CWMAX3: For AC_BK.
*/
#define CWMAX_CSR 0x0408
#define CWMAX_CSR_CWMAX0 FIELD32(0x0000000f)
@@ -727,18 +727,18 @@ struct hw_pairwise_ta_entry {
#define CWMAX_CSR_CWMAX3 FIELD32(0x0000f000)
/*
- * AC_TXOP_CSR0: AC_BK/AC_BE TXOP register.
- * AC0_TX_OP: For AC_BK, in unit of 32us.
- * AC1_TX_OP: For AC_BE, in unit of 32us.
+ * AC_TXOP_CSR0: AC_VO/AC_VI TXOP register.
+ * AC0_TX_OP: For AC_VO, in unit of 32us.
+ * AC1_TX_OP: For AC_VI, in unit of 32us.
*/
#define AC_TXOP_CSR0 0x040c
#define AC_TXOP_CSR0_AC0_TX_OP FIELD32(0x0000ffff)
#define AC_TXOP_CSR0_AC1_TX_OP FIELD32(0xffff0000)
/*
- * AC_TXOP_CSR1: AC_VO/AC_VI TXOP register.
- * AC2_TX_OP: For AC_VI, in unit of 32us.
- * AC3_TX_OP: For AC_VO, in unit of 32us.
+ * AC_TXOP_CSR1: AC_BE/AC_BK TXOP register.
+ * AC2_TX_OP: For AC_BE, in unit of 32us.
+ * AC3_TX_OP: For AC_BK, in unit of 32us.
*/
#define AC_TXOP_CSR1 0x0410
#define AC_TXOP_CSR1_AC2_TX_OP FIELD32(0x0000ffff)
diff --git a/drivers/net/wireless/rtl818x/Makefile b/drivers/net/wireless/rtl818x/Makefile
index 93cbfbedb46d..997569076923 100644
--- a/drivers/net/wireless/rtl818x/Makefile
+++ b/drivers/net/wireless/rtl818x/Makefile
@@ -1,7 +1,2 @@
-rtl8180-objs := rtl8180_dev.o rtl8180_rtl8225.o rtl8180_sa2400.o rtl8180_max2820.o rtl8180_grf5101.o
-rtl8187-objs := rtl8187_dev.o rtl8187_rtl8225.o rtl8187_leds.o rtl8187_rfkill.o
-
-obj-$(CONFIG_RTL8180) += rtl8180.o
-obj-$(CONFIG_RTL8187) += rtl8187.o
-
-
+obj-$(CONFIG_RTL8180) += rtl8180/
+obj-$(CONFIG_RTL8187) += rtl8187/
diff --git a/drivers/net/wireless/rtl818x/rtl8180/Makefile b/drivers/net/wireless/rtl818x/rtl8180/Makefile
new file mode 100644
index 000000000000..cb4fb8596f0b
--- /dev/null
+++ b/drivers/net/wireless/rtl818x/rtl8180/Makefile
@@ -0,0 +1,5 @@
+rtl8180-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o
+
+obj-$(CONFIG_RTL8180) += rtl8180.o
+
+ccflags-y += -Idrivers/net/wireless/rtl818x
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 707c688da618..80db5cabc9b9 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -24,10 +24,10 @@
#include <net/mac80211.h>
#include "rtl8180.h"
-#include "rtl8180_rtl8225.h"
-#include "rtl8180_sa2400.h"
-#include "rtl8180_max2820.h"
-#include "rtl8180_grf5101.h"
+#include "rtl8225.h"
+#include "sa2400.h"
+#include "max2820.h"
+#include "grf5101.h"
MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
@@ -146,7 +146,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
rx_status.freq = dev->conf.channel->center_freq;
rx_status.band = dev->conf.channel->band;
rx_status.mactime = le64_to_cpu(entry->tsft);
- rx_status.flag |= RX_FLAG_TSFT;
+ rx_status.flag |= RX_FLAG_MACTIME_MPDU;
if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -240,7 +240,7 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -321,8 +321,6 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
spin_unlock_irqrestore(&priv->lock, flags);
rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4)));
-
- return 0;
}
void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam)
@@ -687,7 +685,6 @@ static void rtl8180_beacon_work(struct work_struct *work)
struct ieee80211_hw *dev = vif_priv->dev;
struct ieee80211_mgmt *mgmt;
struct sk_buff *skb;
- int err = 0;
/* don't overflow the tx ring */
if (ieee80211_queue_stopped(dev, 0))
@@ -708,8 +705,7 @@ static void rtl8180_beacon_work(struct work_struct *work)
/* TODO: use actual beacon queue */
skb_set_queue_mapping(skb, 0);
- err = rtl8180_tx(dev, skb);
- WARN_ON(err);
+ rtl8180_tx(dev, skb);
resched:
/*
diff --git a/drivers/net/wireless/rtl818x/rtl8180_grf5101.c b/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
index 5cab9dfa8c07..5ee7589dd546 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_grf5101.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
@@ -25,7 +25,7 @@
#include <net/mac80211.h>
#include "rtl8180.h"
-#include "rtl8180_grf5101.h"
+#include "grf5101.h"
static const int grf5101_encode[] = {
0x0, 0x8, 0x4, 0xC,
diff --git a/drivers/net/wireless/rtl818x/rtl8180_grf5101.h b/drivers/net/wireless/rtl818x/rtl8180/grf5101.h
index 76647111bcff..76647111bcff 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_grf5101.h
+++ b/drivers/net/wireless/rtl818x/rtl8180/grf5101.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180_max2820.c b/drivers/net/wireless/rtl818x/rtl8180/max2820.c
index 16c4655181c0..667b3363d437 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_max2820.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/max2820.c
@@ -24,7 +24,7 @@
#include <net/mac80211.h>
#include "rtl8180.h"
-#include "rtl8180_max2820.h"
+#include "max2820.h"
static const u32 max2820_chan[] = {
12, /* CH 1 */
diff --git a/drivers/net/wireless/rtl818x/rtl8180_max2820.h b/drivers/net/wireless/rtl818x/rtl8180/max2820.h
index 61cf6d1e7d57..61cf6d1e7d57 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_max2820.h
+++ b/drivers/net/wireless/rtl818x/rtl8180/max2820.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180/rtl8180.h
index 30523314da43..30523314da43 100644
--- a/drivers/net/wireless/rtl818x/rtl8180.h
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8180.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180_rtl8225.c b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
index 69e4d4745dae..7c4574ba9d75 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
@@ -21,7 +21,7 @@
#include <net/mac80211.h>
#include "rtl8180.h"
-#include "rtl8180_rtl8225.h"
+#include "rtl8225.h"
static void rtl8225_write(struct ieee80211_hw *dev, u8 addr, u16 data)
{
diff --git a/drivers/net/wireless/rtl818x/rtl8180_rtl8225.h b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.h
index 310013a2d726..310013a2d726 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_rtl8225.h
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180_sa2400.c b/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
index d064fcc5ec08..44771a6286af 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_sa2400.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
@@ -25,7 +25,7 @@
#include <net/mac80211.h>
#include "rtl8180.h"
-#include "rtl8180_sa2400.h"
+#include "sa2400.h"
static const u32 sa2400_chan[] = {
0x00096c, /* ch1 */
diff --git a/drivers/net/wireless/rtl818x/rtl8180_sa2400.h b/drivers/net/wireless/rtl818x/rtl8180/sa2400.h
index a4aaa0d413f1..a4aaa0d413f1 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_sa2400.h
+++ b/drivers/net/wireless/rtl818x/rtl8180/sa2400.h
diff --git a/drivers/net/wireless/rtl818x/rtl8187/Makefile b/drivers/net/wireless/rtl818x/rtl8187/Makefile
new file mode 100644
index 000000000000..7b6299268ecf
--- /dev/null
+++ b/drivers/net/wireless/rtl818x/rtl8187/Makefile
@@ -0,0 +1,5 @@
+rtl8187-objs := dev.o rtl8225.o leds.o rfkill.o
+
+obj-$(CONFIG_RTL8187) += rtl8187.o
+
+ccflags-y += -Idrivers/net/wireless/rtl818x
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 38fa8244cc96..1e0be14d10d4 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -29,11 +29,11 @@
#include <net/mac80211.h>
#include "rtl8187.h"
-#include "rtl8187_rtl8225.h"
+#include "rtl8225.h"
#ifdef CONFIG_RTL8187_LEDS
-#include "rtl8187_leds.h"
+#include "leds.h"
#endif
-#include "rtl8187_rfkill.h"
+#include "rfkill.h"
MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
@@ -227,7 +227,7 @@ static void rtl8187_tx_cb(struct urb *urb)
}
}
-static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
struct rtl8187_priv *priv = dev->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -241,7 +241,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
kfree_skb(skb);
- return NETDEV_TX_OK;
+ return;
}
flags = skb->len;
@@ -309,8 +309,6 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
kfree_skb(skb);
}
usb_free_urb(urb);
-
- return NETDEV_TX_OK;
}
static void rtl8187_rx_cb(struct urb *urb)
@@ -373,7 +371,7 @@ static void rtl8187_rx_cb(struct urb *urb)
rx_status.rate_idx = rate;
rx_status.freq = dev->conf.channel->center_freq;
rx_status.band = dev->conf.channel->band;
- rx_status.flag |= RX_FLAG_TSFT;
+ rx_status.flag |= RX_FLAG_MACTIME_MPDU;
if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
@@ -553,6 +551,46 @@ static int rtl8187b_init_status_urb(struct ieee80211_hw *dev)
return ret;
}
+static void rtl8187_set_anaparam(struct rtl8187_priv *priv, bool rfon)
+{
+ u32 anaparam, anaparam2;
+ u8 anaparam3, reg;
+
+ if (!priv->is_rtl8187b) {
+ if (rfon) {
+ anaparam = RTL8187_RTL8225_ANAPARAM_ON;
+ anaparam2 = RTL8187_RTL8225_ANAPARAM2_ON;
+ } else {
+ anaparam = RTL8187_RTL8225_ANAPARAM_OFF;
+ anaparam2 = RTL8187_RTL8225_ANAPARAM2_OFF;
+ }
+ } else {
+ if (rfon) {
+ anaparam = RTL8187B_RTL8225_ANAPARAM_ON;
+ anaparam2 = RTL8187B_RTL8225_ANAPARAM2_ON;
+ anaparam3 = RTL8187B_RTL8225_ANAPARAM3_ON;
+ } else {
+ anaparam = RTL8187B_RTL8225_ANAPARAM_OFF;
+ anaparam2 = RTL8187B_RTL8225_ANAPARAM2_OFF;
+ anaparam3 = RTL8187B_RTL8225_ANAPARAM3_OFF;
+ }
+ }
+
+ rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
+ RTL818X_EEPROM_CMD_CONFIG);
+ reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
+ reg |= RTL818X_CONFIG3_ANAPARAM_WRITE;
+ rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
+ rtl818x_iowrite32(priv, &priv->map->ANAPARAM, anaparam);
+ rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, anaparam2);
+ if (priv->is_rtl8187b)
+ rtl818x_iowrite8(priv, &priv->map->ANAPARAM3, anaparam3);
+ reg &= ~RTL818X_CONFIG3_ANAPARAM_WRITE;
+ rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
+ rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
+ RTL818X_EEPROM_CMD_NORMAL);
+}
+
static int rtl8187_cmd_reset(struct ieee80211_hw *dev)
{
struct rtl8187_priv *priv = dev->priv;
@@ -603,19 +641,7 @@ static int rtl8187_init_hw(struct ieee80211_hw *dev)
int res;
/* reset */
- rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
- RTL818X_EEPROM_CMD_CONFIG);
- reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
- rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg |
- RTL818X_CONFIG3_ANAPARAM_WRITE);
- rtl818x_iowrite32(priv, &priv->map->ANAPARAM,
- RTL8187_RTL8225_ANAPARAM_ON);
- rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
- RTL8187_RTL8225_ANAPARAM2_ON);
- rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg &
- ~RTL818X_CONFIG3_ANAPARAM_WRITE);
- rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
- RTL818X_EEPROM_CMD_NORMAL);
+ rtl8187_set_anaparam(priv, true);
rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
@@ -629,17 +655,7 @@ static int rtl8187_init_hw(struct ieee80211_hw *dev)
if (res)
return res;
- rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
- reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
- rtl818x_iowrite8(priv, &priv->map->CONFIG3,
- reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
- rtl818x_iowrite32(priv, &priv->map->ANAPARAM,
- RTL8187_RTL8225_ANAPARAM_ON);
- rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
- RTL8187_RTL8225_ANAPARAM2_ON);
- rtl818x_iowrite8(priv, &priv->map->CONFIG3,
- reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
- rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
+ rtl8187_set_anaparam(priv, true);
/* setup card */
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0);
@@ -712,10 +728,9 @@ static const u8 rtl8187b_reg_table[][3] = {
{0x58, 0x4B, 1}, {0x59, 0x00, 1}, {0x5A, 0x4B, 1}, {0x5B, 0x00, 1},
{0x60, 0x4B, 1}, {0x61, 0x09, 1}, {0x62, 0x4B, 1}, {0x63, 0x09, 1},
- {0xCE, 0x0F, 1}, {0xCF, 0x00, 1}, {0xE0, 0xFF, 1}, {0xE1, 0x0F, 1},
- {0xE2, 0x00, 1}, {0xF0, 0x4E, 1}, {0xF1, 0x01, 1}, {0xF2, 0x02, 1},
- {0xF3, 0x03, 1}, {0xF4, 0x04, 1}, {0xF5, 0x05, 1}, {0xF6, 0x06, 1},
- {0xF7, 0x07, 1}, {0xF8, 0x08, 1},
+ {0xCE, 0x0F, 1}, {0xCF, 0x00, 1}, {0xF0, 0x4E, 1}, {0xF1, 0x01, 1},
+ {0xF2, 0x02, 1}, {0xF3, 0x03, 1}, {0xF4, 0x04, 1}, {0xF5, 0x05, 1},
+ {0xF6, 0x06, 1}, {0xF7, 0x07, 1}, {0xF8, 0x08, 1},
{0x4E, 0x00, 2}, {0x0C, 0x04, 2}, {0x21, 0x61, 2}, {0x22, 0x68, 2},
{0x23, 0x6F, 2}, {0x24, 0x76, 2}, {0x25, 0x7D, 2}, {0x26, 0x84, 2},
@@ -723,14 +738,13 @@ static const u8 rtl8187b_reg_table[][3] = {
{0x52, 0x04, 2}, {0x53, 0xA0, 2}, {0x54, 0x1F, 2}, {0x55, 0x23, 2},
{0x56, 0x45, 2}, {0x57, 0x67, 2}, {0x58, 0x08, 2}, {0x59, 0x08, 2},
{0x5A, 0x08, 2}, {0x5B, 0x08, 2}, {0x60, 0x08, 2}, {0x61, 0x08, 2},
- {0x62, 0x08, 2}, {0x63, 0x08, 2}, {0x64, 0xCF, 2}, {0x72, 0x56, 2},
- {0x73, 0x9A, 2},
+ {0x62, 0x08, 2}, {0x63, 0x08, 2}, {0x64, 0xCF, 2},
- {0x34, 0xF0, 0}, {0x35, 0x0F, 0}, {0x5B, 0x40, 0}, {0x84, 0x88, 0},
- {0x85, 0x24, 0}, {0x88, 0x54, 0}, {0x8B, 0xB8, 0}, {0x8C, 0x07, 0},
- {0x8D, 0x00, 0}, {0x94, 0x1B, 0}, {0x95, 0x12, 0}, {0x96, 0x00, 0},
- {0x97, 0x06, 0}, {0x9D, 0x1A, 0}, {0x9F, 0x10, 0}, {0xB4, 0x22, 0},
- {0xBE, 0x80, 0}, {0xDB, 0x00, 0}, {0xEE, 0x00, 0}, {0x4C, 0x00, 2},
+ {0x5B, 0x40, 0}, {0x84, 0x88, 0}, {0x85, 0x24, 0}, {0x88, 0x54, 0},
+ {0x8B, 0xB8, 0}, {0x8C, 0x07, 0}, {0x8D, 0x00, 0}, {0x94, 0x1B, 0},
+ {0x95, 0x12, 0}, {0x96, 0x00, 0}, {0x97, 0x06, 0}, {0x9D, 0x1A, 0},
+ {0x9F, 0x10, 0}, {0xB4, 0x22, 0}, {0xBE, 0x80, 0}, {0xDB, 0x00, 0},
+ {0xEE, 0x00, 0}, {0x4C, 0x00, 2},
{0x9F, 0x00, 3}, {0x8C, 0x01, 0}, {0x8D, 0x10, 0}, {0x8E, 0x08, 0},
{0x8F, 0x00, 0}
@@ -742,48 +756,34 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
int res, i;
u8 reg;
- rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
- RTL818X_EEPROM_CMD_CONFIG);
-
- reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
- reg |= RTL818X_CONFIG3_ANAPARAM_WRITE | RTL818X_CONFIG3_GNT_SELECT;
- rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
- rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
- RTL8187B_RTL8225_ANAPARAM2_ON);
- rtl818x_iowrite32(priv, &priv->map->ANAPARAM,
- RTL8187B_RTL8225_ANAPARAM_ON);
- rtl818x_iowrite8(priv, &priv->map->ANAPARAM3,
- RTL8187B_RTL8225_ANAPARAM3_ON);
+ rtl8187_set_anaparam(priv, true);
+ /* Reset PLL sequence on 8187B. Realtek note: reduces power
+ * consumption about 30 mA */
rtl818x_iowrite8(priv, (u8 *)0xFF61, 0x10);
reg = rtl818x_ioread8(priv, (u8 *)0xFF62);
rtl818x_iowrite8(priv, (u8 *)0xFF62, reg & ~(1 << 5));
rtl818x_iowrite8(priv, (u8 *)0xFF62, reg | (1 << 5));
- reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
- reg &= ~RTL818X_CONFIG3_ANAPARAM_WRITE;
- rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
-
- rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
- RTL818X_EEPROM_CMD_NORMAL);
-
res = rtl8187_cmd_reset(dev);
if (res)
return res;
- rtl818x_iowrite16(priv, (__le16 *)0xFF2D, 0x0FFF);
+ rtl8187_set_anaparam(priv, true);
+
+ /* BRSR (Basic Rate Set Register) on 8187B looks to be the same as
+ * RESP_RATE on 8187L in Realtek sources: each bit should be each
+ * one of the 12 rates, all are enabled */
+ rtl818x_iowrite16(priv, (__le16 *)0xFF34, 0x0FFF);
+
reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT;
rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
- reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
- reg |= RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT |
- RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
- rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
+ /* Auto Rate Fallback Register (ARFR): 1M-54M setting */
rtl818x_iowrite16_idx(priv, (__le16 *)0xFFE0, 0x0FFF, 1);
+ rtl818x_iowrite8_idx(priv, (u8 *)0xFFE2, 0x00, 1);
- rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL, 100);
- rtl818x_iowrite16(priv, &priv->map->ATIM_WND, 2);
rtl818x_iowrite16_idx(priv, (__le16 *)0xFFD4, 0xFFFF, 1);
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
@@ -811,16 +811,9 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x00004001);
+ /* RFSW_CTRL register */
rtl818x_iowrite16_idx(priv, (__le16 *)0xFF72, 0x569A, 2);
- rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
- RTL818X_EEPROM_CMD_CONFIG);
- reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
- reg |= RTL818X_CONFIG3_ANAPARAM_WRITE;
- rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
- rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
- RTL818X_EEPROM_CMD_NORMAL);
-
rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480);
rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x2488);
rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF);
@@ -876,23 +869,35 @@ static void rtl8187_work(struct work_struct *work)
/* The RTL8187 returns the retry count through register 0xFFFA. In
* addition, it appears to be a cumulative retry count, not the
* value for the current TX packet. When multiple TX entries are
- * queued, the retry count will be valid for the last one in the queue.
- * The "error" should not matter for purposes of rate setting. */
+ * waiting in the queue, the retry count will be the total for all.
+ * The "error" may matter for purposes of rate setting, but there is
+ * no other choice with this hardware.
+ */
struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv,
work.work);
struct ieee80211_tx_info *info;
struct ieee80211_hw *dev = priv->dev;
static u16 retry;
u16 tmp;
+ u16 avg_retry;
+ int length;
mutex_lock(&priv->conf_mutex);
tmp = rtl818x_ioread16(priv, (__le16 *)0xFFFA);
+ length = skb_queue_len(&priv->b_tx_status.queue);
+ if (unlikely(!length))
+ length = 1;
+ if (unlikely(tmp < retry))
+ tmp = retry;
+ avg_retry = (tmp - retry) / length;
while (skb_queue_len(&priv->b_tx_status.queue) > 0) {
struct sk_buff *old_skb;
old_skb = skb_dequeue(&priv->b_tx_status.queue);
info = IEEE80211_SKB_CB(old_skb);
- info->status.rates[0].count = tmp - retry + 1;
+ info->status.rates[0].count = avg_retry + 1;
+ if (info->status.rates[0].count > RETRY_COUNT)
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
ieee80211_tx_status_irqsafe(dev, old_skb);
}
retry = tmp;
@@ -929,11 +934,17 @@ static int rtl8187_start(struct ieee80211_hw *dev)
priv->rx_conf = reg;
rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
+ reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
+ reg &= ~RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
+ rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
+
rtl818x_iowrite32(priv, &priv->map->TX_CONF,
RTL818X_TX_CONF_HW_SEQNUM |
RTL818X_TX_CONF_DISREQQSIZE |
- (7 << 8 /* short retry limit */) |
- (7 << 0 /* long retry limit */) |
+ (RETRY_COUNT << 8 /* short retry limit */) |
+ (RETRY_COUNT << 0 /* long retry limit */) |
(7 << 21 /* MAX TX DMA */));
rtl8187_init_urbs(dev);
rtl8187b_init_status_urb(dev);
@@ -1002,6 +1013,7 @@ static void rtl8187_stop(struct ieee80211_hw *dev)
rtl818x_iowrite8(priv, &priv->map->CMD, reg);
priv->rf->stop(dev);
+ rtl8187_set_anaparam(priv, false);
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
reg = rtl818x_ioread8(priv, &priv->map->CONFIG4);
@@ -1376,6 +1388,9 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_RX_INCLUDES_FCS;
+ /* Initialize rate-control variables */
+ dev->max_rates = 1;
+ dev->max_rate_tries = RETRY_COUNT;
eeprom.data = dev;
eeprom.register_read = rtl8187_eeprom_register_read;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.c b/drivers/net/wireless/rtl818x/rtl8187/leds.c
index 4637337d5ce6..2e0de2f5f0f9 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_leds.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/leds.c
@@ -20,7 +20,7 @@
#include <linux/eeprom_93cx6.h>
#include "rtl8187.h"
-#include "rtl8187_leds.h"
+#include "leds.h"
static void led_turn_on(struct work_struct *work)
{
diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.h b/drivers/net/wireless/rtl818x/rtl8187/leds.h
index d743c96d4a20..d743c96d4a20 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_leds.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/leds.h
diff --git a/drivers/net/wireless/rtl818x/rtl8187_rfkill.c b/drivers/net/wireless/rtl818x/rtl8187/rfkill.c
index 03555e1e0cab..34116719974a 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_rfkill.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/rfkill.c
@@ -18,7 +18,7 @@
#include <net/mac80211.h>
#include "rtl8187.h"
-#include "rtl8187_rfkill.h"
+#include "rfkill.h"
static bool rtl8187_is_radio_enabled(struct rtl8187_priv *priv)
{
diff --git a/drivers/net/wireless/rtl818x/rtl8187_rfkill.h b/drivers/net/wireless/rtl818x/rtl8187/rfkill.h
index e12575e96d11..e12575e96d11 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_rfkill.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/rfkill.h
diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
index 98878160a65a..f1cc90751dbf 100644
--- a/drivers/net/wireless/rtl818x/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
@@ -16,7 +16,7 @@
#define RTL8187_H
#include "rtl818x.h"
-#include "rtl8187_leds.h"
+#include "leds.h"
#define RTL8187_EEPROM_TXPWR_BASE 0x05
#define RTL8187_EEPROM_MAC_ADDR 0x07
@@ -35,6 +35,8 @@
#define RFKILL_MASK_8187_89_97 0x2
#define RFKILL_MASK_8198 0x4
+#define RETRY_COUNT 7
+
struct rtl8187_rx_info {
struct urb *urb;
struct ieee80211_hw *dev;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
index 97eebdcf7eb9..908903f721f5 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
@@ -21,7 +21,7 @@
#include <net/mac80211.h>
#include "rtl8187.h"
-#include "rtl8187_rtl8225.h"
+#include "rtl8225.h"
static void rtl8225_write_bitbang(struct ieee80211_hw *dev, u8 addr, u16 data)
{
@@ -898,29 +898,7 @@ static void rtl8225z2_b_rf_init(struct ieee80211_hw *dev)
static void rtl8225_rf_stop(struct ieee80211_hw *dev)
{
- u8 reg;
- struct rtl8187_priv *priv = dev->priv;
-
rtl8225_write(dev, 0x4, 0x1f);
-
- rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
- reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
- rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
- if (!priv->is_rtl8187b) {
- rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
- RTL8187_RTL8225_ANAPARAM2_OFF);
- rtl818x_iowrite32(priv, &priv->map->ANAPARAM,
- RTL8187_RTL8225_ANAPARAM_OFF);
- } else {
- rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
- RTL8187B_RTL8225_ANAPARAM2_OFF);
- rtl818x_iowrite32(priv, &priv->map->ANAPARAM,
- RTL8187B_RTL8225_ANAPARAM_OFF);
- rtl818x_iowrite8(priv, &priv->map->ANAPARAM3,
- RTL8187B_RTL8225_ANAPARAM3_OFF);
- }
- rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
- rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
}
static void rtl8225_rf_set_channel(struct ieee80211_hw *dev,
diff --git a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h
index 20c5b6ead0f6..20c5b6ead0f6 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
new file mode 100644
index 000000000000..ce49e0ce7cad
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -0,0 +1,33 @@
+config RTL8192CE
+ tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter"
+ depends on MAC80211 && PCI && EXPERIMENTAL
+ select FW_LOADER
+ select RTLWIFI
+ select RTL8192C_COMMON
+ ---help---
+ This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe
+ wireless network adapters.
+
+ If you choose to build it as a module, it will be called rtl8192ce
+
+config RTL8192CU
+ tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
+ depends on MAC80211 && USB && EXPERIMENTAL
+ select FW_LOADER
+ select RTLWIFI
+ select RTL8192C_COMMON
+ ---help---
+ This is the driver for Realtek RTL8192CU/RTL8188CU 802.11n USB
+ wireless network adapters.
+
+ If you choose to build it as a module, it will be called rtl8192cu
+
+config RTLWIFI
+ tristate
+ depends on RTL8192CE || RTL8192CU
+ default m
+
+config RTL8192C_COMMON
+ tristate
+ depends on RTL8192CE || RTL8192CU
+ default m
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/rtlwifi/Makefile
new file mode 100644
index 000000000000..ec9393f24799
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/Makefile
@@ -0,0 +1,26 @@
+obj-$(CONFIG_RTLWIFI) += rtlwifi.o
+rtlwifi-objs := \
+ base.o \
+ cam.o \
+ core.o \
+ debug.o \
+ efuse.o \
+ ps.o \
+ rc.o \
+ regd.o
+
+rtl8192c_common-objs += \
+
+ifneq ($(CONFIG_PCI),)
+rtlwifi-objs += pci.o
+endif
+
+ifneq ($(CONFIG_USB),)
+rtlwifi-objs += usb.o
+endif
+
+obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/
+obj-$(CONFIG_RTL8192CE) += rtl8192ce/
+obj-$(CONFIG_RTL8192CU) += rtl8192cu/
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
new file mode 100644
index 000000000000..bb0c781f4a1b
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -0,0 +1,953 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include <linux/ip.h>
+#include "wifi.h"
+#include "rc.h"
+#include "base.h"
+#include "efuse.h"
+#include "cam.h"
+#include "ps.h"
+#include "regd.h"
+
+/*
+ *NOTICE!!!: This file will be very big, we hsould
+ *keep it clear under follwing roles:
+ *
+ *This file include follwing part, so, if you add new
+ *functions into this file, please check which part it
+ *should includes. or check if you should add new part
+ *for this file:
+ *
+ *1) mac80211 init functions
+ *2) tx information functions
+ *3) functions called by core.c
+ *4) wq & timer callback functions
+ *5) frame process functions
+ *6) sysfs functions
+ *7) ...
+ */
+
+/*********************************************************
+ *
+ * mac80211 init functions
+ *
+ *********************************************************/
+static struct ieee80211_channel rtl_channeltable[] = {
+ {.center_freq = 2412, .hw_value = 1,},
+ {.center_freq = 2417, .hw_value = 2,},
+ {.center_freq = 2422, .hw_value = 3,},
+ {.center_freq = 2427, .hw_value = 4,},
+ {.center_freq = 2432, .hw_value = 5,},
+ {.center_freq = 2437, .hw_value = 6,},
+ {.center_freq = 2442, .hw_value = 7,},
+ {.center_freq = 2447, .hw_value = 8,},
+ {.center_freq = 2452, .hw_value = 9,},
+ {.center_freq = 2457, .hw_value = 10,},
+ {.center_freq = 2462, .hw_value = 11,},
+ {.center_freq = 2467, .hw_value = 12,},
+ {.center_freq = 2472, .hw_value = 13,},
+ {.center_freq = 2484, .hw_value = 14,},
+};
+
+static struct ieee80211_rate rtl_ratetable[] = {
+ {.bitrate = 10, .hw_value = 0x00,},
+ {.bitrate = 20, .hw_value = 0x01,},
+ {.bitrate = 55, .hw_value = 0x02,},
+ {.bitrate = 110, .hw_value = 0x03,},
+ {.bitrate = 60, .hw_value = 0x04,},
+ {.bitrate = 90, .hw_value = 0x05,},
+ {.bitrate = 120, .hw_value = 0x06,},
+ {.bitrate = 180, .hw_value = 0x07,},
+ {.bitrate = 240, .hw_value = 0x08,},
+ {.bitrate = 360, .hw_value = 0x09,},
+ {.bitrate = 480, .hw_value = 0x0a,},
+ {.bitrate = 540, .hw_value = 0x0b,},
+};
+
+static const struct ieee80211_supported_band rtl_band_2ghz = {
+ .band = IEEE80211_BAND_2GHZ,
+
+ .channels = rtl_channeltable,
+ .n_channels = ARRAY_SIZE(rtl_channeltable),
+
+ .bitrates = rtl_ratetable,
+ .n_bitrates = ARRAY_SIZE(rtl_ratetable),
+
+ .ht_cap = {0},
+};
+
+static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
+ struct ieee80211_sta_ht_cap *ht_cap)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ ht_cap->ht_supported = true;
+ ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_DSSSCCK40 | IEEE80211_HT_CAP_MAX_AMSDU;
+
+ /*
+ *Maximum length of AMPDU that the STA can receive.
+ *Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
+ */
+ ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+
+ /*Minimum MPDU start spacing , */
+ ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+
+ ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+
+ /*
+ *hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+ *base on ant_num
+ *rx_mask: RX mask
+ *if rx_ant =1 rx_mask[0]=0xff;==>MCS0-MCS7
+ *if rx_ant =2 rx_mask[1]=0xff;==>MCS8-MCS15
+ *if rx_ant >=3 rx_mask[2]=0xff;
+ *if BW_40 rx_mask[4]=0x01;
+ *highest supported RX rate
+ */
+ if (get_rf_type(rtlphy) == RF_1T2R || get_rf_type(rtlphy) == RF_2T2R) {
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("1T2R or 2T2R\n"));
+
+ ht_cap->mcs.rx_mask[0] = 0xFF;
+ ht_cap->mcs.rx_mask[1] = 0xFF;
+ ht_cap->mcs.rx_mask[4] = 0x01;
+
+ ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS15);
+ } else if (get_rf_type(rtlphy) == RF_1T1R) {
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("1T1R\n"));
+
+ ht_cap->mcs.rx_mask[0] = 0xFF;
+ ht_cap->mcs.rx_mask[1] = 0x00;
+ ht_cap->mcs.rx_mask[4] = 0x01;
+
+ ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS7);
+ }
+}
+
+static void _rtl_init_mac80211(struct ieee80211_hw *hw)
+{
+ struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct ieee80211_supported_band *sband;
+
+ /* <1> use mac->bands as mem for hw->wiphy->bands */
+ sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]);
+
+ /*
+ * <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+ * to default value(1T1R)
+ */
+ memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]), &rtl_band_2ghz,
+ sizeof(struct ieee80211_supported_band));
+
+ /* <3> init ht cap base on ant_num */
+ _rtl_init_hw_ht_capab(hw, &sband->ht_cap);
+
+ /* <4> set mac->sband to wiphy->sband */
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+
+ /* <5> set hw caps */
+ hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_BEACON_FILTER | IEEE80211_HW_AMPDU_AGGREGATION | /*PS*/
+ /*IEEE80211_HW_SUPPORTS_PS | */
+ /*IEEE80211_HW_PS_NULLFUNC_STACK | */
+ /*IEEE80211_HW_SUPPORTS_DYNAMIC_PS | */
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS | 0;
+
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
+
+ hw->wiphy->rts_threshold = 2347;
+
+ hw->queues = AC_MAX;
+ hw->extra_tx_headroom = RTL_TX_HEADER_SIZE;
+
+ /* TODO: Correct this value for our hw */
+ /* TODO: define these hard code value */
+ hw->channel_change_time = 100;
+ hw->max_listen_interval = 5;
+ hw->max_rate_tries = 4;
+ /* hw->max_rates = 1; */
+
+ /* <6> mac address */
+ if (is_valid_ether_addr(rtlefuse->dev_addr)) {
+ SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr);
+ } else {
+ u8 rtlmac[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 };
+ get_random_bytes((rtlmac + (ETH_ALEN - 1)), 1);
+ SET_IEEE80211_PERM_ADDR(hw, rtlmac);
+ }
+
+}
+
+static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /* <1> timer */
+ init_timer(&rtlpriv->works.watchdog_timer);
+ setup_timer(&rtlpriv->works.watchdog_timer,
+ rtl_watch_dog_timer_callback, (unsigned long)hw);
+
+ /* <2> work queue */
+ rtlpriv->works.hw = hw;
+ rtlpriv->works.rtl_wq = alloc_workqueue(rtlpriv->cfg->name, 0, 0);
+ INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
+ (void *)rtl_watchdog_wq_callback);
+ INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
+ (void *)rtl_ips_nic_off_wq_callback);
+
+}
+
+void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ del_timer_sync(&rtlpriv->works.watchdog_timer);
+
+ cancel_delayed_work(&rtlpriv->works.watchdog_wq);
+ cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
+}
+
+void rtl_init_rfkill(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ bool radio_state;
+ bool blocked;
+ u8 valid = 0;
+
+ radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
+
+ /*set init state to that of switch */
+ rtlpriv->rfkill.rfkill_state = radio_state;
+ printk(KERN_INFO "rtlwifi: wireless switch is %s\n",
+ rtlpriv->rfkill.rfkill_state ? "on" : "off");
+
+ if (valid) {
+ rtlpriv->rfkill.rfkill_state = radio_state;
+
+ blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1;
+ wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
+ }
+
+ wiphy_rfkill_start_polling(hw->wiphy);
+}
+
+void rtl_deinit_rfkill(struct ieee80211_hw *hw)
+{
+ wiphy_rfkill_stop_polling(hw->wiphy);
+}
+
+int rtl_init_core(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
+
+ /* <1> init mac80211 */
+ _rtl_init_mac80211(hw);
+ rtlmac->hw = hw;
+
+ /* <2> rate control register */
+ hw->rate_control_algorithm = "rtl_rc";
+
+ /*
+ * <3> init CRDA must come after init
+ * mac80211 hw in _rtl_init_mac80211.
+ */
+ if (rtl_regd_init(hw, rtl_reg_notifier)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("REGD init failed\n"));
+ return 1;
+ } else {
+ /* CRDA regd hint must after init CRDA */
+ if (regulatory_hint(hw->wiphy, rtlpriv->regd.alpha2)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("regulatory_hint fail\n"));
+ }
+ }
+
+ /* <4> locks */
+ mutex_init(&rtlpriv->locks.conf_mutex);
+ spin_lock_init(&rtlpriv->locks.ips_lock);
+ spin_lock_init(&rtlpriv->locks.irq_th_lock);
+ spin_lock_init(&rtlpriv->locks.h2c_lock);
+ spin_lock_init(&rtlpriv->locks.rf_ps_lock);
+ spin_lock_init(&rtlpriv->locks.rf_lock);
+ spin_lock_init(&rtlpriv->locks.lps_lock);
+
+ rtlmac->link_state = MAC80211_NOLINK;
+
+ /* <5> init deferred work */
+ _rtl_init_deferred_work(hw);
+
+ return 0;
+}
+
+void rtl_deinit_core(struct ieee80211_hw *hw)
+{
+}
+
+void rtl_init_rx_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *) (&mac->rx_conf));
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_MGT_FILTER,
+ (u8 *) (&mac->rx_mgt_filter));
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_CTRL_FILTER,
+ (u8 *) (&mac->rx_ctrl_filter));
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_DATA_FILTER,
+ (u8 *) (&mac->rx_data_filter));
+}
+
+/*********************************************************
+ *
+ * tx information functions
+ *
+ *********************************************************/
+static void _rtl_qurey_shortpreamble_mode(struct ieee80211_hw *hw,
+ struct rtl_tcb_desc *tcb_desc,
+ struct ieee80211_tx_info *info)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 rate_flag = info->control.rates[0].flags;
+
+ tcb_desc->use_shortpreamble = false;
+
+ /* 1M can only use Long Preamble. 11B spec */
+ if (tcb_desc->hw_rate == rtlpriv->cfg->maps[RTL_RC_CCK_RATE1M])
+ return;
+ else if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ tcb_desc->use_shortpreamble = true;
+
+ return;
+}
+
+static void _rtl_query_shortgi(struct ieee80211_hw *hw,
+ struct rtl_tcb_desc *tcb_desc,
+ struct ieee80211_tx_info *info)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u8 rate_flag = info->control.rates[0].flags;
+
+ tcb_desc->use_shortgi = false;
+
+ if (!mac->ht_enable)
+ return;
+
+ if (!mac->sgi_40 && !mac->sgi_20)
+ return;
+
+ if ((mac->bw_40 == true) && mac->sgi_40)
+ tcb_desc->use_shortgi = true;
+ else if ((mac->bw_40 == false) && mac->sgi_20)
+ tcb_desc->use_shortgi = true;
+
+ if (!(rate_flag & IEEE80211_TX_RC_SHORT_GI))
+ tcb_desc->use_shortgi = false;
+
+}
+
+static void _rtl_query_protection_mode(struct ieee80211_hw *hw,
+ struct rtl_tcb_desc *tcb_desc,
+ struct ieee80211_tx_info *info)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 rate_flag = info->control.rates[0].flags;
+
+ /* Common Settings */
+ tcb_desc->rts_stbc = false;
+ tcb_desc->cts_enable = false;
+ tcb_desc->rts_sc = 0;
+ tcb_desc->rts_bw = false;
+ tcb_desc->rts_use_shortpreamble = false;
+ tcb_desc->rts_use_shortgi = false;
+
+ if (rate_flag & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ /* Use CTS-to-SELF in protection mode. */
+ tcb_desc->rts_enable = true;
+ tcb_desc->cts_enable = true;
+ tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
+ } else if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
+ /* Use RTS-CTS in protection mode. */
+ tcb_desc->rts_enable = true;
+ tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
+ }
+
+}
+
+static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
+ struct rtl_tcb_desc *tcb_desc)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ if (!tcb_desc->disable_ratefallback || !tcb_desc->use_driver_rate) {
+ if (mac->opmode == NL80211_IFTYPE_STATION)
+ tcb_desc->ratr_index = 0;
+ else if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+ if (tcb_desc->multicast || tcb_desc->broadcast) {
+ tcb_desc->hw_rate =
+ rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M];
+ tcb_desc->use_driver_rate = 1;
+ } else {
+ /* TODO */
+ }
+ }
+ }
+
+ if (rtlpriv->dm.useramask) {
+ /* TODO we will differentiate adhoc and station futrue */
+ tcb_desc->mac_id = 0;
+
+ if ((mac->mode == WIRELESS_MODE_N_24G) ||
+ (mac->mode == WIRELESS_MODE_N_5G)) {
+ tcb_desc->ratr_index = RATR_INX_WIRELESS_NGB;
+ } else if (mac->mode & WIRELESS_MODE_G) {
+ tcb_desc->ratr_index = RATR_INX_WIRELESS_GB;
+ } else if (mac->mode & WIRELESS_MODE_B) {
+ tcb_desc->ratr_index = RATR_INX_WIRELESS_B;
+ }
+ }
+
+}
+
+static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
+ struct rtl_tcb_desc *tcb_desc)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ tcb_desc->packet_bw = false;
+
+ if (!mac->bw_40 || !mac->ht_enable)
+ return;
+
+ if (tcb_desc->multicast || tcb_desc->broadcast)
+ return;
+
+ /*use legency rate, shall use 20MHz */
+ if (tcb_desc->hw_rate <= rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M])
+ return;
+
+ tcb_desc->packet_bw = true;
+}
+
+static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 hw_rate;
+
+ if (get_rf_type(rtlphy) == RF_2T2R)
+ hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15];
+ else
+ hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS7];
+
+ return hw_rate;
+}
+
+void rtl_get_tcb_desc(struct ieee80211_hw *hw,
+ struct ieee80211_tx_info *info,
+ struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
+ struct ieee80211_rate *txrate;
+ __le16 fc = hdr->frame_control;
+
+ memset(tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+
+ if (ieee80211_is_data(fc)) {
+ txrate = ieee80211_get_tx_rate(hw, info);
+ tcb_desc->hw_rate = txrate->hw_value;
+
+ /*
+ *we set data rate RTL_RC_CCK_RATE1M
+ *in rtl_rc.c if skb is special data or
+ *mgt which need low data rate.
+ */
+
+ /*
+ *So tcb_desc->hw_rate is just used for
+ *special data and mgt frames
+ */
+ if (tcb_desc->hw_rate < rtlpriv->cfg->maps[RTL_RC_CCK_RATE11M]) {
+ tcb_desc->use_driver_rate = true;
+ tcb_desc->ratr_index = 7;
+
+ tcb_desc->hw_rate =
+ rtlpriv->cfg->maps[RTL_RC_CCK_RATE1M];
+ tcb_desc->disable_ratefallback = 1;
+ } else {
+ /*
+ *because hw will nerver use hw_rate
+ *when tcb_desc->use_driver_rate = false
+ *so we never set highest N rate here,
+ *and N rate will all be controled by FW
+ *when tcb_desc->use_driver_rate = false
+ */
+ if (rtlmac->ht_enable) {
+ tcb_desc->hw_rate = _rtl_get_highest_n_rate(hw);
+ } else {
+ if (rtlmac->mode == WIRELESS_MODE_B) {
+ tcb_desc->hw_rate =
+ rtlpriv->cfg->maps[RTL_RC_CCK_RATE11M];
+ } else {
+ tcb_desc->hw_rate =
+ rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M];
+ }
+ }
+ }
+
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
+ tcb_desc->multicast = 1;
+ else if (is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
+ tcb_desc->broadcast = 1;
+
+ _rtl_txrate_selectmode(hw, tcb_desc);
+ _rtl_query_bandwidth_mode(hw, tcb_desc);
+ _rtl_qurey_shortpreamble_mode(hw, tcb_desc, info);
+ _rtl_query_shortgi(hw, tcb_desc, info);
+ _rtl_query_protection_mode(hw, tcb_desc, info);
+ } else {
+ tcb_desc->use_driver_rate = true;
+ tcb_desc->ratr_index = 7;
+ tcb_desc->disable_ratefallback = 1;
+ tcb_desc->mac_id = 0;
+
+ tcb_desc->hw_rate = rtlpriv->cfg->maps[RTL_RC_CCK_RATE1M];
+ }
+}
+EXPORT_SYMBOL(rtl_get_tcb_desc);
+
+bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
+ __le16 fc = hdr->frame_control;
+
+ if (ieee80211_is_auth(fc)) {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));
+ rtl_ips_nic_on(hw);
+
+ mac->link_state = MAC80211_LINKING;
+ }
+
+ return true;
+}
+
+bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ __le16 fc = hdr->frame_control;
+ u8 *act = (u8 *) (((u8 *) skb->data + MAC80211_3ADDR_LEN));
+ u8 category;
+
+ if (!ieee80211_is_action(fc))
+ return true;
+
+ category = *act;
+ act++;
+ switch (category) {
+ case ACT_CAT_BA:
+ switch (*act) {
+ case ACT_ADDBAREQ:
+ if (mac->act_scanning)
+ return false;
+
+ RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+ ("%s ACT_ADDBAREQ From :" MAC_FMT "\n",
+ is_tx ? "Tx" : "Rx", MAC_ARG(hdr->addr2)));
+ break;
+ case ACT_ADDBARSP:
+ RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+ ("%s ACT_ADDBARSP From :" MAC_FMT "\n",
+ is_tx ? "Tx" : "Rx", MAC_ARG(hdr->addr2)));
+ break;
+ case ACT_DELBA:
+ RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+ ("ACT_ADDBADEL From :" MAC_FMT "\n",
+ MAC_ARG(hdr->addr2)));
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+/*should call before software enc*/
+u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ __le16 fc = hdr->frame_control;
+ u16 ether_type;
+ u8 mac_hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ const struct iphdr *ip;
+
+ if (!ieee80211_is_data(fc))
+ goto end;
+
+ if (ieee80211_is_nullfunc(fc))
+ return true;
+
+ ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len +
+ SNAP_SIZE + PROTOC_TYPE_SIZE);
+ ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE);
+
+ if (ETH_P_IP == ether_type) {
+ if (IPPROTO_UDP == ip->protocol) {
+ struct udphdr *udp = (struct udphdr *)((u8 *) ip +
+ (ip->ihl << 2));
+ if (((((u8 *) udp)[1] == 68) &&
+ (((u8 *) udp)[3] == 67)) ||
+ ((((u8 *) udp)[1] == 67) &&
+ (((u8 *) udp)[3] == 68))) {
+ /*
+ * 68 : UDP BOOTP client
+ * 67 : UDP BOOTP server
+ */
+ RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV),
+ DBG_DMESG, ("dhcp %s !!\n",
+ (is_tx) ? "Tx" : "Rx"));
+
+ if (is_tx) {
+ rtl_lps_leave(hw);
+ ppsc->last_delaylps_stamp_jiffies =
+ jiffies;
+ }
+
+ return true;
+ }
+ }
+ } else if (ETH_P_ARP == ether_type) {
+ if (is_tx) {
+ rtl_lps_leave(hw);
+ ppsc->last_delaylps_stamp_jiffies = jiffies;
+ }
+
+ return true;
+ } else if (ETH_P_PAE == ether_type) {
+ RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+ ("802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx"));
+
+ if (is_tx) {
+ rtl_lps_leave(hw);
+ ppsc->last_delaylps_stamp_jiffies = jiffies;
+ }
+
+ return true;
+ } else if (ETH_P_IPV6 == ether_type) {
+ /* IPv6 */
+ return true;
+ }
+
+end:
+ return false;
+}
+
+/*********************************************************
+ *
+ * functions called by core.c
+ *
+ *********************************************************/
+int rtl_tx_agg_start(struct ieee80211_hw *hw, const u8 *ra, u16 tid, u16 *ssn)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_tid_data *tid_data;
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
+ ("on ra = %pM tid = %d\n", ra, tid));
+
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
+ if (mac->tids[tid].agg.agg_state != RTL_AGG_OFF) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("Start AGG when state is not RTL_AGG_OFF !\n"));
+ return -ENXIO;
+ }
+
+ tid_data = &mac->tids[tid];
+ *ssn = SEQ_TO_SN(tid_data->seq_number);
+
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
+ ("HW queue is empty tid:%d\n", tid));
+ tid_data->agg.agg_state = RTL_AGG_ON;
+
+ ieee80211_start_tx_ba_cb_irqsafe(mac->vif, ra, tid);
+
+ return 0;
+}
+
+int rtl_tx_agg_stop(struct ieee80211_hw *hw, const u8 * ra, u16 tid)
+{
+ int ssn = -1;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_tid_data *tid_data;
+
+ if (!ra) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("ra = NULL\n"));
+ return -EINVAL;
+ }
+
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
+ if (mac->tids[tid].agg.agg_state != RTL_AGG_ON)
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("Stopping AGG while state not ON or starting\n"));
+
+ tid_data = &mac->tids[tid];
+ ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
+
+ mac->tids[tid].agg.agg_state = RTL_AGG_OFF;
+
+ ieee80211_stop_tx_ba_cb_irqsafe(mac->vif, ra, tid);
+
+ return 0;
+}
+
+/*********************************************************
+ *
+ * wq & timer callback functions
+ *
+ *********************************************************/
+void rtl_watchdog_wq_callback(void *data)
+{
+ struct rtl_works *rtlworks = container_of_dwork_rtl(data,
+ struct rtl_works,
+ watchdog_wq);
+ struct ieee80211_hw *hw = rtlworks->hw;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ bool busytraffic = false;
+ bool higher_busytraffic = false;
+ bool higher_busyrxtraffic = false;
+ bool higher_busytxtraffic = false;
+
+ u8 idx = 0;
+ u32 rx_cnt_inp4eriod = 0;
+ u32 tx_cnt_inp4eriod = 0;
+ u32 aver_rx_cnt_inperiod = 0;
+ u32 aver_tx_cnt_inperiod = 0;
+
+ bool enter_ps = false;
+
+ if (is_hal_stop(rtlhal))
+ return;
+
+ /* <1> Determine if action frame is allowed */
+ if (mac->link_state > MAC80211_NOLINK) {
+ if (mac->cnt_after_linked < 20)
+ mac->cnt_after_linked++;
+ } else {
+ mac->cnt_after_linked = 0;
+ }
+
+ /* <2> DM */
+ rtlpriv->cfg->ops->dm_watchdog(hw);
+
+ /*
+ *<3> to check if traffic busy, if
+ * busytraffic we don't change channel
+ */
+ if (mac->link_state >= MAC80211_LINKED) {
+
+ /* (1) get aver_rx_cnt_inperiod & aver_tx_cnt_inperiod */
+ for (idx = 0; idx <= 2; idx++) {
+ rtlpriv->link_info.num_rx_in4period[idx] =
+ rtlpriv->link_info.num_rx_in4period[idx + 1];
+ rtlpriv->link_info.num_tx_in4period[idx] =
+ rtlpriv->link_info.num_tx_in4period[idx + 1];
+ }
+ rtlpriv->link_info.num_rx_in4period[3] =
+ rtlpriv->link_info.num_rx_inperiod;
+ rtlpriv->link_info.num_tx_in4period[3] =
+ rtlpriv->link_info.num_tx_inperiod;
+ for (idx = 0; idx <= 3; idx++) {
+ rx_cnt_inp4eriod +=
+ rtlpriv->link_info.num_rx_in4period[idx];
+ tx_cnt_inp4eriod +=
+ rtlpriv->link_info.num_tx_in4period[idx];
+ }
+ aver_rx_cnt_inperiod = rx_cnt_inp4eriod / 4;
+ aver_tx_cnt_inperiod = tx_cnt_inp4eriod / 4;
+
+ /* (2) check traffic busy */
+ if (aver_rx_cnt_inperiod > 100 || aver_tx_cnt_inperiod > 100)
+ busytraffic = true;
+
+ /* Higher Tx/Rx data. */
+ if (aver_rx_cnt_inperiod > 4000 ||
+ aver_tx_cnt_inperiod > 4000) {
+ higher_busytraffic = true;
+
+ /* Extremely high Rx data. */
+ if (aver_rx_cnt_inperiod > 5000)
+ higher_busyrxtraffic = true;
+ else
+ higher_busytxtraffic = false;
+ }
+
+ if (((rtlpriv->link_info.num_rx_inperiod +
+ rtlpriv->link_info.num_tx_inperiod) > 8) ||
+ (rtlpriv->link_info.num_rx_inperiod > 2))
+ enter_ps = false;
+ else
+ enter_ps = true;
+
+ /* LeisurePS only work in infra mode. */
+ if (enter_ps)
+ rtl_lps_enter(hw);
+ else
+ rtl_lps_leave(hw);
+ }
+
+ rtlpriv->link_info.num_rx_inperiod = 0;
+ rtlpriv->link_info.num_tx_inperiod = 0;
+
+ rtlpriv->link_info.busytraffic = busytraffic;
+ rtlpriv->link_info.higher_busytraffic = higher_busytraffic;
+ rtlpriv->link_info.higher_busyrxtraffic = higher_busyrxtraffic;
+
+}
+
+void rtl_watch_dog_timer_callback(unsigned long data)
+{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ queue_delayed_work(rtlpriv->works.rtl_wq,
+ &rtlpriv->works.watchdog_wq, 0);
+
+ mod_timer(&rtlpriv->works.watchdog_timer,
+ jiffies + MSECS(RTL_WATCH_DOG_TIME));
+}
+
+/*********************************************************
+ *
+ * sysfs functions
+ *
+ *********************************************************/
+static ssize_t rtl_show_debug_level(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct ieee80211_hw *hw = dev_get_drvdata(d);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ return sprintf(buf, "0x%08X\n", rtlpriv->dbg.global_debuglevel);
+}
+
+static ssize_t rtl_store_debug_level(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ieee80211_hw *hw = dev_get_drvdata(d);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret) {
+ printk(KERN_DEBUG "%s is not in hex or decimal form.\n", buf);
+ } else {
+ rtlpriv->dbg.global_debuglevel = val;
+ printk(KERN_DEBUG "debuglevel:%x\n",
+ rtlpriv->dbg.global_debuglevel);
+ }
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
+ rtl_show_debug_level, rtl_store_debug_level);
+
+static struct attribute *rtl_sysfs_entries[] = {
+
+ &dev_attr_debug_level.attr,
+
+ NULL
+};
+
+/*
+ * "name" is folder name witch will be
+ * put in device directory like :
+ * sys/devices/pci0000:00/0000:00:1c.4/
+ * 0000:06:00.0/rtl_sysfs
+ */
+struct attribute_group rtl_attribute_group = {
+ .name = "rtlsysfs",
+ .attrs = rtl_sysfs_entries,
+};
+
+MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
+
+static int __init rtl_core_module_init(void)
+{
+ if (rtl_rate_control_register())
+ printk(KERN_ERR "rtlwifi: Unable to register rtl_rc,"
+ "use default RC !!\n");
+ return 0;
+}
+
+static void __exit rtl_core_module_exit(void)
+{
+ /*RC*/
+ rtl_rate_control_unregister();
+}
+
+module_init(rtl_core_module_init);
+module_exit(rtl_core_module_exit);
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
new file mode 100644
index 000000000000..043045342bc7
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -0,0 +1,97 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *****************************************************************************/
+
+#ifndef __RTL_BASE_H__
+#define __RTL_BASE_H__
+
+#define RTL_DUMMY_OFFSET 0
+#define RTL_RX_DESC_SIZE 24
+#define RTL_DUMMY_UNIT 8
+#define RTL_TX_DUMMY_SIZE (RTL_DUMMY_OFFSET * RTL_DUMMY_UNIT)
+#define RTL_TX_DESC_SIZE 32
+#define RTL_TX_HEADER_SIZE (RTL_TX_DESC_SIZE + RTL_TX_DUMMY_SIZE)
+
+#define HT_AMSDU_SIZE_4K 3839
+#define HT_AMSDU_SIZE_8K 7935
+
+#define MAX_BIT_RATE_40MHZ_MCS15 300 /* Mbps */
+#define MAX_BIT_RATE_40MHZ_MCS7 150 /* Mbps */
+
+#define RTL_RATE_COUNT_LEGACY 12
+#define RTL_CHANNEL_COUNT 14
+
+#define FRAME_OFFSET_FRAME_CONTROL 0
+#define FRAME_OFFSET_DURATION 2
+#define FRAME_OFFSET_ADDRESS1 4
+#define FRAME_OFFSET_ADDRESS2 10
+#define FRAME_OFFSET_ADDRESS3 16
+#define FRAME_OFFSET_SEQUENCE 22
+#define FRAME_OFFSET_ADDRESS4 24
+
+
+#define SET_80211_PS_POLL_AID(_hdr, _val) \
+ (*(u16 *)((u8 *)(_hdr) + 2) = le16_to_cpu(_val))
+#define SET_80211_PS_POLL_BSSID(_hdr, _val) \
+ memcpy(((u8 *)(_hdr)) + 4, (u8 *)(_val), ETH_ALEN)
+#define SET_80211_PS_POLL_TA(_hdr, _val) \
+ memcpy(((u8 *)(_hdr)) + 10, (u8 *)(_val), ETH_ALEN)
+
+#define SET_80211_HDR_DURATION(_hdr, _val) \
+ (*(u16 *)((u8 *)(_hdr) + FRAME_OFFSET_DURATION) = le16_to_cpu(_val))
+#define SET_80211_HDR_ADDRESS1(_hdr, _val) \
+ memcpy((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS1, (u8*)(_val), ETH_ALEN)
+#define SET_80211_HDR_ADDRESS2(_hdr, _val) \
+ memcpy((u8 *)(_hdr) + FRAME_OFFSET_ADDRESS2, (u8 *)(_val), ETH_ALEN)
+#define SET_80211_HDR_ADDRESS3(_hdr, _val) \
+ memcpy((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8 *)(_val), ETH_ALEN)
+
+int rtl_init_core(struct ieee80211_hw *hw);
+void rtl_deinit_core(struct ieee80211_hw *hw);
+void rtl_init_rx_config(struct ieee80211_hw *hw);
+void rtl_init_rfkill(struct ieee80211_hw *hw);
+void rtl_deinit_rfkill(struct ieee80211_hw *hw);
+
+void rtl_watch_dog_timer_callback(unsigned long data);
+void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
+
+bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
+bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
+u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
+
+void rtl_watch_dog_timer_callback(unsigned long data);
+int rtl_tx_agg_start(struct ieee80211_hw *hw, const u8 *ra,
+ u16 tid, u16 *ssn);
+int rtl_tx_agg_stop(struct ieee80211_hw *hw, const u8 *ra, u16 tid);
+void rtl_watchdog_wq_callback(void *data);
+
+void rtl_get_tcb_desc(struct ieee80211_hw *hw,
+ struct ieee80211_tx_info *info,
+ struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc);
+
+extern struct attribute_group rtl_attribute_group;
+#endif
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c
new file mode 100644
index 000000000000..52c9c1367cac
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/cam.c
@@ -0,0 +1,291 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ *****************************************************************************/
+
+#include "wifi.h"
+#include "cam.h"
+
+void rtl_cam_reset_sec_info(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->sec.use_defaultkey = false;
+ rtlpriv->sec.pairwise_enc_algorithm = NO_ENCRYPTION;
+ rtlpriv->sec.group_enc_algorithm = NO_ENCRYPTION;
+ memset(rtlpriv->sec.key_buf, 0, KEY_BUF_SIZE * MAX_KEY_LEN);
+ memset(rtlpriv->sec.key_len, 0, KEY_BUF_SIZE);
+ rtlpriv->sec.pairwise_key = NULL;
+}
+
+static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
+ u8 *mac_addr, u8 *key_cont_128, u16 us_config)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ u32 target_command;
+ u32 target_content = 0;
+ u8 entry_i;
+
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("key_cont_128:\n %x:%x:%x:%x:%x:%x\n",
+ key_cont_128[0], key_cont_128[1],
+ key_cont_128[2], key_cont_128[3],
+ key_cont_128[4], key_cont_128[5]));
+
+ for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+ target_command = entry_i + CAM_CONTENT_COUNT * entry_no;
+ target_command = target_command | BIT(31) | BIT(16);
+
+ if (entry_i == 0) {
+ target_content = (u32) (*(mac_addr + 0)) << 16 |
+ (u32) (*(mac_addr + 1)) << 24 | (u32) us_config;
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI],
+ target_content);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+ target_command);
+
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("rtl_cam_program_entry(): "
+ "WRITE %x: %x\n",
+ rtlpriv->cfg->maps[WCAMI], target_content));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("The Key ID is %d\n", entry_no));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("rtl_cam_program_entry(): "
+ "WRITE %x: %x\n",
+ rtlpriv->cfg->maps[RWCAM], target_command));
+
+ } else if (entry_i == 1) {
+
+ target_content = (u32) (*(mac_addr + 5)) << 24 |
+ (u32) (*(mac_addr + 4)) << 16 |
+ (u32) (*(mac_addr + 3)) << 8 |
+ (u32) (*(mac_addr + 2));
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI],
+ target_content);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+ target_command);
+
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("rtl_cam_program_entry(): WRITE A4: %x\n",
+ target_content));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("rtl_cam_program_entry(): WRITE A0: %x\n",
+ target_command));
+
+ } else {
+
+ target_content =
+ (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 3)) <<
+ 24 | (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 2))
+ << 16 |
+ (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 1)) << 8
+ | (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 0));
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI],
+ target_content);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+ target_command);
+ udelay(100);
+
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("rtl_cam_program_entry(): WRITE A4: %x\n",
+ target_content));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("rtl_cam_program_entry(): WRITE A0: %x\n",
+ target_command));
+ }
+ }
+
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("after set key, usconfig:%x\n", us_config));
+}
+
+u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
+ u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
+ u32 ul_default_key, u8 *key_content)
+{
+ u32 us_config;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, "
+ "ulUseDK=%x MacAddr" MAC_FMT "\n",
+ ul_entry_idx, ul_key_id, ul_enc_alg,
+ ul_default_key, MAC_ARG(mac_addr)));
+
+ if (ul_key_id == TOTAL_CAM_ENTRY) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("<=== ulKeyId exceed!\n"));
+ return 0;
+ }
+
+ if (ul_default_key == 1) {
+ us_config = CFG_VALID | ((u16) (ul_enc_alg) << 2);
+ } else {
+ us_config = CFG_VALID | ((ul_enc_alg) << 2) | ul_key_id;
+ }
+
+ rtl_cam_program_entry(hw, ul_entry_idx, mac_addr,
+ (u8 *) key_content, us_config);
+
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("<===\n"));
+
+ return 1;
+
+}
+EXPORT_SYMBOL(rtl_cam_add_one_entry);
+
+int rtl_cam_delete_one_entry(struct ieee80211_hw *hw,
+ u8 *mac_addr, u32 ul_key_id)
+{
+ u32 ul_command;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("key_idx:%d\n", ul_key_id));
+
+ ul_command = ul_key_id * CAM_CONTENT_COUNT;
+ ul_command = ul_command | BIT(31) | BIT(16);
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], 0);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
+
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("rtl_cam_delete_one_entry(): WRITE A4: %x\n", 0));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("rtl_cam_delete_one_entry(): WRITE A0: %x\n", ul_command));
+
+ return 0;
+
+}
+EXPORT_SYMBOL(rtl_cam_delete_one_entry);
+
+void rtl_cam_reset_all_entry(struct ieee80211_hw *hw)
+{
+ u32 ul_command;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ ul_command = BIT(31) | BIT(30);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
+}
+EXPORT_SYMBOL(rtl_cam_reset_all_entry);
+
+void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ u32 ul_command;
+ u32 ul_content;
+ u32 ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES];
+
+ switch (rtlpriv->sec.pairwise_enc_algorithm) {
+ case WEP40_ENCRYPTION:
+ ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_WEP40];
+ break;
+ case WEP104_ENCRYPTION:
+ ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_WEP104];
+ break;
+ case TKIP_ENCRYPTION:
+ ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_TKIP];
+ break;
+ case AESCCMP_ENCRYPTION:
+ ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ break;
+ default:
+ ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ }
+
+ ul_content = (uc_index & 3) | ((u16) (ul_enc_algo) << 2);
+
+ ul_content |= BIT(15);
+ ul_command = CAM_CONTENT_COUNT * uc_index;
+ ul_command = ul_command | BIT(31) | BIT(16);
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
+
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("rtl_cam_mark_invalid(): WRITE A4: %x\n", ul_content));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("rtl_cam_mark_invalid(): WRITE A0: %x\n", ul_command));
+}
+EXPORT_SYMBOL(rtl_cam_mark_invalid);
+
+void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ u32 ul_command;
+ u32 ul_content;
+ u32 ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ u8 entry_i;
+
+ switch (rtlpriv->sec.pairwise_enc_algorithm) {
+ case WEP40_ENCRYPTION:
+ ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_WEP40];
+ break;
+ case WEP104_ENCRYPTION:
+ ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_WEP104];
+ break;
+ case TKIP_ENCRYPTION:
+ ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_TKIP];
+ break;
+ case AESCCMP_ENCRYPTION:
+ ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ break;
+ default:
+ ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ }
+
+ for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+
+ if (entry_i == 0) {
+ ul_content =
+ (uc_index & 0x03) | ((u16) (ul_encalgo) << 2);
+ ul_content |= BIT(15);
+
+ } else {
+ ul_content = 0;
+ }
+
+ ul_command = CAM_CONTENT_COUNT * uc_index + entry_i;
+ ul_command = ul_command | BIT(31) | BIT(16);
+
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content);
+ rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
+
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
+ ("rtl_cam_empty_entry(): WRITE A4: %x\n",
+ ul_content));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
+ ("rtl_cam_empty_entry(): WRITE A0: %x\n",
+ ul_command));
+ }
+
+}
+EXPORT_SYMBOL(rtl_cam_empty_entry);
diff --git a/drivers/net/wireless/rtlwifi/cam.h b/drivers/net/wireless/rtlwifi/cam.h
new file mode 100644
index 000000000000..dd82f057d53d
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/cam.h
@@ -0,0 +1,53 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_CAM_H_
+#define __RTL_CAM_H_
+
+#define TOTAL_CAM_ENTRY 32
+#define CAM_CONTENT_COUNT 8
+
+#define CFG_DEFAULT_KEY BIT(5)
+#define CFG_VALID BIT(15)
+
+#define PAIRWISE_KEYIDX 0
+#define CAM_PAIRWISE_KEY_POSITION 4
+
+#define CAM_CONFIG_USEDK 1
+#define CAM_CONFIG_NO_USEDK 0
+
+extern void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
+extern u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
+ u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
+ u32 ul_default_key, u8 *key_content);
+int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
+ u32 ul_key_id);
+void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index);
+void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index);
+void rtl_cam_reset_sec_info(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
new file mode 100644
index 000000000000..e4f4aee8f298
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -0,0 +1,1033 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *****************************************************************************/
+
+#include "wifi.h"
+#include "core.h"
+#include "cam.h"
+#include "base.h"
+#include "ps.h"
+
+/*mutex for start & stop is must here. */
+static int rtl_op_start(struct ieee80211_hw *hw)
+{
+ int err = 0;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (!is_hal_stop(rtlhal))
+ return 0;
+ if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
+ return 0;
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+ err = rtlpriv->intf_ops->adapter_start(hw);
+ if (err)
+ goto out;
+ rtl_watch_dog_timer_callback((unsigned long)hw);
+out:
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+ return err;
+}
+
+static void rtl_op_stop(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ if (is_hal_stop(rtlhal))
+ return;
+
+ if (unlikely(ppsc->rfpwr_state == ERFOFF)) {
+ rtl_ips_nic_on(hw);
+ mdelay(1);
+ }
+
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+
+ mac->link_state = MAC80211_NOLINK;
+ memset(mac->bssid, 0, 6);
+
+ /*reset sec info */
+ rtl_cam_reset_sec_info(hw);
+
+ rtl_deinit_deferred_work(hw);
+ rtlpriv->intf_ops->adapter_stop(hw);
+
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+}
+
+static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON))
+ goto err_free;
+
+ if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
+ goto err_free;
+
+
+ rtlpriv->intf_ops->adapter_tx(hw, skb);
+
+ return;
+
+err_free:
+ dev_kfree_skb_any(skb);
+}
+
+static int rtl_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ int err = 0;
+
+ if (mac->vif) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("vif has been set!! mac->vif = 0x%p\n", mac->vif));
+ return -EOPNOTSUPP;
+ }
+
+ rtl_ips_nic_on(hw);
+
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (mac->beacon_enabled == 1) {
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ ("NL80211_IFTYPE_STATION\n"));
+ mac->beacon_enabled = 0;
+ rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
+ rtlpriv->cfg->maps
+ [RTL_IBSS_INT_MASKS]);
+ }
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ ("NL80211_IFTYPE_ADHOC\n"));
+
+ mac->link_state = MAC80211_LINKED;
+ rtlpriv->cfg->ops->set_bcn_reg(hw);
+ break;
+ case NL80211_IFTYPE_AP:
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ ("NL80211_IFTYPE_AP\n"));
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("operation mode %d is not support!\n", vif->type));
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ mac->vif = vif;
+ mac->opmode = vif->type;
+ rtlpriv->cfg->ops->set_network_type(hw, vif->type);
+ memcpy(mac->mac_addr, vif->addr, ETH_ALEN);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
+
+out:
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+ return err;
+}
+
+static void rtl_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+
+ /* Free beacon resources */
+ if ((mac->opmode == NL80211_IFTYPE_AP) ||
+ (mac->opmode == NL80211_IFTYPE_ADHOC) ||
+ (mac->opmode == NL80211_IFTYPE_MESH_POINT)) {
+ if (mac->beacon_enabled == 1) {
+ mac->beacon_enabled = 0;
+ rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
+ rtlpriv->cfg->maps
+ [RTL_IBSS_INT_MASKS]);
+ }
+ }
+
+ /*
+ *Note: We assume NL80211_IFTYPE_UNSPECIFIED as
+ *NO LINK for our hardware.
+ */
+ mac->vif = NULL;
+ mac->link_state = MAC80211_NOLINK;
+ memset(mac->bssid, 0, 6);
+ mac->opmode = NL80211_IFTYPE_UNSPECIFIED;
+ rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
+
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+}
+
+
+static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct ieee80211_conf *conf = &hw->conf;
+
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+ if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { /*BIT(2)*/
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ ("IEEE80211_CONF_CHANGE_LISTEN_INTERVAL\n"));
+ }
+
+ /*For IPS */
+ if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ if (hw->conf.flags & IEEE80211_CONF_IDLE)
+ rtl_ips_nic_off(hw);
+ else
+ rtl_ips_nic_on(hw);
+ } else {
+ /*
+ *although rfoff may not cause by ips, but we will
+ *check the reason in set_rf_power_state function
+ */
+ if (unlikely(ppsc->rfpwr_state == ERFOFF))
+ rtl_ips_nic_on(hw);
+ }
+
+ /*For LPS */
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ if (conf->flags & IEEE80211_CONF_PS)
+ rtl_lps_enter(hw);
+ else
+ rtl_lps_leave(hw);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ ("IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
+ hw->conf.long_frame_max_tx_count));
+ mac->retry_long = hw->conf.long_frame_max_tx_count;
+ mac->retry_short = hw->conf.long_frame_max_tx_count;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
+ (u8 *) (&hw->conf.
+ long_frame_max_tx_count));
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ struct ieee80211_channel *channel = hw->conf.channel;
+ u8 wide_chan = (u8) channel->hw_value;
+
+ /*
+ *because we should back channel to
+ *current_network.chan in in scanning,
+ *So if set_chan == current_network.chan
+ *we should set it.
+ *because mac80211 tell us wrong bw40
+ *info for cisco1253 bw20, so we modify
+ *it here based on UPPER & LOWER
+ */
+ switch (hw->conf.channel_type) {
+ case NL80211_CHAN_HT20:
+ case NL80211_CHAN_NO_HT:
+ /* SC */
+ mac->cur_40_prime_sc =
+ PRIME_CHNL_OFFSET_DONT_CARE;
+ rtlphy->current_chan_bw = HT_CHANNEL_WIDTH_20;
+ mac->bw_40 = false;
+ break;
+ case NL80211_CHAN_HT40MINUS:
+ /* SC */
+ mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_UPPER;
+ rtlphy->current_chan_bw =
+ HT_CHANNEL_WIDTH_20_40;
+ mac->bw_40 = true;
+
+ /*wide channel */
+ wide_chan -= 2;
+
+ break;
+ case NL80211_CHAN_HT40PLUS:
+ /* SC */
+ mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_LOWER;
+ rtlphy->current_chan_bw =
+ HT_CHANNEL_WIDTH_20_40;
+ mac->bw_40 = true;
+
+ /*wide channel */
+ wide_chan += 2;
+
+ break;
+ default:
+ mac->bw_40 = false;
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not processed\n"));
+ break;
+ }
+
+ if (wide_chan <= 0)
+ wide_chan = 1;
+ rtlphy->current_channel = wide_chan;
+
+ rtlpriv->cfg->ops->set_channel_access(hw);
+ rtlpriv->cfg->ops->switch_channel(hw);
+ rtlpriv->cfg->ops->set_bw_mode(hw,
+ hw->conf.channel_type);
+ }
+
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+
+ return 0;
+}
+
+static void rtl_op_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *new_flags, u64 multicast)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ *new_flags &= RTL_SUPPORTED_FILTERS;
+ if (!changed_flags)
+ return;
+
+ /*TODO: we disable broadcase now, so enable here */
+ if (changed_flags & FIF_ALLMULTI) {
+ if (*new_flags & FIF_ALLMULTI) {
+ mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] |
+ rtlpriv->cfg->maps[MAC_RCR_AB];
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ ("Enable receive multicast frame.\n"));
+ } else {
+ mac->rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] |
+ rtlpriv->cfg->maps[MAC_RCR_AB]);
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ ("Disable receive multicast frame.\n"));
+ }
+ }
+
+ if (changed_flags & FIF_FCSFAIL) {
+ if (*new_flags & FIF_FCSFAIL) {
+ mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32];
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ ("Enable receive FCS error frame.\n"));
+ } else {
+ mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32];
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ ("Disable receive FCS error frame.\n"));
+ }
+ }
+
+ if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
+ /*
+ *TODO: BIT(5) is probe response BIT(8) is beacon
+ *TODO: Use define for BIT(5) and BIT(8)
+ */
+ if (*new_flags & FIF_BCN_PRBRESP_PROMISC)
+ mac->rx_mgt_filter |= (BIT(5) | BIT(8));
+ else
+ mac->rx_mgt_filter &= ~(BIT(5) | BIT(8));
+ }
+
+ if (changed_flags & FIF_CONTROL) {
+ if (*new_flags & FIF_CONTROL) {
+ mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF];
+ mac->rx_ctrl_filter |= RTL_SUPPORTED_CTRL_FILTER;
+
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ ("Enable receive control frame.\n"));
+ } else {
+ mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF];
+ mac->rx_ctrl_filter &= ~RTL_SUPPORTED_CTRL_FILTER;
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ ("Disable receive control frame.\n"));
+ }
+ }
+
+ if (changed_flags & FIF_OTHER_BSS) {
+ if (*new_flags & FIF_OTHER_BSS) {
+ mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP];
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ ("Enable receive other BSS's frame.\n"));
+ } else {
+ mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP];
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ ("Disable receive other BSS's frame.\n"));
+ }
+ }
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *) (&mac->rx_conf));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_MGT_FILTER,
+ (u8 *) (&mac->rx_mgt_filter));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CTRL_FILTER,
+ (u8 *) (&mac->rx_ctrl_filter));
+}
+
+static int _rtl_get_hal_qnum(u16 queue)
+{
+ int qnum;
+
+ switch (queue) {
+ case 0:
+ qnum = AC3_VO;
+ break;
+ case 1:
+ qnum = AC2_VI;
+ break;
+ case 2:
+ qnum = AC0_BE;
+ break;
+ case 3:
+ qnum = AC1_BK;
+ break;
+ default:
+ qnum = AC0_BE;
+ break;
+ }
+ return qnum;
+}
+
+/*
+ *for mac80211 VO=0, VI=1, BE=2, BK=3
+ *for rtl819x BE=0, BK=1, VI=2, VO=3
+ */
+static int rtl_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *param)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ int aci;
+
+ if (queue >= AC_MAX) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("queue number %d is incorrect!\n", queue));
+ return -EINVAL;
+ }
+
+ aci = _rtl_get_hal_qnum(queue);
+ mac->ac[aci].aifs = param->aifs;
+ mac->ac[aci].cw_min = cpu_to_le16(param->cw_min);
+ mac->ac[aci].cw_max = cpu_to_le16(param->cw_max);
+ mac->ac[aci].tx_op = cpu_to_le16(param->txop);
+ memcpy(&mac->edca_param[aci], param, sizeof(*param));
+ rtlpriv->cfg->ops->set_qos(hw, aci);
+ return 0;
+}
+
+static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf, u32 changed)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+
+ if ((vif->type == NL80211_IFTYPE_ADHOC) ||
+ (vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_MESH_POINT)) {
+
+ if ((changed & BSS_CHANGED_BEACON) ||
+ (changed & BSS_CHANGED_BEACON_ENABLED &&
+ bss_conf->enable_beacon)) {
+
+ if (mac->beacon_enabled == 0) {
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ ("BSS_CHANGED_BEACON_ENABLED\n"));
+
+ /*start hw beacon interrupt. */
+ /*rtlpriv->cfg->ops->set_bcn_reg(hw); */
+ mac->beacon_enabled = 1;
+ rtlpriv->cfg->ops->update_interrupt_mask(hw,
+ rtlpriv->cfg->maps
+ [RTL_IBSS_INT_MASKS],
+ 0);
+ }
+ } else {
+ if (mac->beacon_enabled == 1) {
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ ("ADHOC DISABLE BEACON\n"));
+
+ mac->beacon_enabled = 0;
+ rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
+ rtlpriv->cfg->maps
+ [RTL_IBSS_INT_MASKS]);
+ }
+ }
+
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ RT_TRACE(rtlpriv, COMP_BEACON, DBG_TRACE,
+ ("BSS_CHANGED_BEACON_INT\n"));
+ mac->beacon_interval = bss_conf->beacon_int;
+ rtlpriv->cfg->ops->set_bcn_intv(hw);
+ }
+ }
+
+ /*TODO: reference to enum ieee80211_bss_change */
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (bss_conf->assoc) {
+ mac->link_state = MAC80211_LINKED;
+ mac->cnt_after_linked = 0;
+ mac->assoc_id = bss_conf->aid;
+ memcpy(mac->bssid, bss_conf->bssid, 6);
+
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ ("BSS_CHANGED_ASSOC\n"));
+ } else {
+ if (mac->link_state == MAC80211_LINKED)
+ rtl_lps_leave(hw);
+
+ mac->link_state = MAC80211_NOLINK;
+ memset(mac->bssid, 0, 6);
+
+ /* reset sec info */
+ rtl_cam_reset_sec_info(hw);
+
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ ("BSS_CHANGED_UN_ASSOC\n"));
+ }
+ }
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ ("BSS_CHANGED_ERP_CTS_PROT\n"));
+ mac->use_cts_protect = bss_conf->use_cts_prot;
+ }
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ ("BSS_CHANGED_ERP_PREAMBLE use short preamble:%x\n",
+ bss_conf->use_short_preamble));
+
+ mac->short_preamble = bss_conf->use_short_preamble;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACK_PREAMBLE,
+ (u8 *) (&mac->short_preamble));
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ ("BSS_CHANGED_ERP_SLOT\n"));
+
+ if (bss_conf->use_short_slot)
+ mac->slot_time = RTL_SLOT_TIME_9;
+ else
+ mac->slot_time = RTL_SLOT_TIME_20;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
+ (u8 *) (&mac->slot_time));
+ }
+
+ if (changed & BSS_CHANGED_HT) {
+ struct ieee80211_sta *sta = NULL;
+
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ ("BSS_CHANGED_HT\n"));
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(mac->vif, mac->bssid);
+
+ if (sta) {
+ if (sta->ht_cap.ampdu_density >
+ mac->current_ampdu_density)
+ mac->current_ampdu_density =
+ sta->ht_cap.ampdu_density;
+ if (sta->ht_cap.ampdu_factor <
+ mac->current_ampdu_factor)
+ mac->current_ampdu_factor =
+ sta->ht_cap.ampdu_factor;
+ }
+ rcu_read_unlock();
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY,
+ (u8 *) (&mac->max_mss_density));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_FACTOR,
+ &mac->current_ampdu_factor);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_MIN_SPACE,
+ &mac->current_ampdu_density);
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ struct ieee80211_sta *sta = NULL;
+ u32 basic_rates;
+ u8 i;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BSSID,
+ (u8 *) bss_conf->bssid);
+
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ (MAC_FMT "\n", MAC_ARG(bss_conf->bssid)));
+
+ memcpy(mac->bssid, bss_conf->bssid, 6);
+ if (is_valid_ether_addr(bss_conf->bssid)) {
+ switch (vif->type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ break;
+ case NL80211_IFTYPE_STATION:
+ break;
+ case NL80211_IFTYPE_AP:
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ rtlpriv->cfg->ops->set_network_type(hw, vif->type);
+ } else
+ rtlpriv->cfg->ops->set_network_type(hw,
+ NL80211_IFTYPE_UNSPECIFIED);
+
+ memset(mac->mcs, 0, 16);
+ mac->ht_enable = false;
+ mac->sgi_40 = false;
+ mac->sgi_20 = false;
+
+ if (!bss_conf->use_short_slot)
+ mac->mode = WIRELESS_MODE_B;
+ else
+ mac->mode = WIRELESS_MODE_G;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(mac->vif, mac->bssid);
+
+ if (sta) {
+ if (sta->ht_cap.ht_supported) {
+ mac->mode = WIRELESS_MODE_N_24G;
+ mac->ht_enable = true;
+ }
+
+ if (mac->ht_enable) {
+ u16 ht_cap = sta->ht_cap.cap;
+ memcpy(mac->mcs, (u8 *) (&sta->ht_cap.mcs), 16);
+
+ for (i = 0; i < 16; i++)
+ RT_TRACE(rtlpriv, COMP_MAC80211,
+ DBG_LOUD, ("%x ",
+ mac->mcs[i]));
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ ("\n"));
+
+ if (ht_cap & IEEE80211_HT_CAP_SGI_40)
+ mac->sgi_40 = true;
+
+ if (ht_cap & IEEE80211_HT_CAP_SGI_20)
+ mac->sgi_20 = true;
+
+ /*
+ * for cisco 1252 bw20 it's wrong
+ * if (ht_cap &
+ * IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
+ * mac->bw_40 = true;
+ * }
+ */
+ }
+ }
+ rcu_read_unlock();
+
+ /*mac80211 just give us CCK rates any time
+ *So we add G rate in basic rates when
+ not in B mode*/
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ if (mac->mode == WIRELESS_MODE_B)
+ basic_rates = bss_conf->basic_rates | 0x00f;
+ else
+ basic_rates = bss_conf->basic_rates | 0xff0;
+
+ if (!vif)
+ goto out;
+
+ mac->basic_rates = basic_rates;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
+ (u8 *) (&basic_rates));
+
+ if (rtlpriv->dm.useramask)
+ rtlpriv->cfg->ops->update_rate_mask(hw, 0);
+ else
+ rtlpriv->cfg->ops->update_rate_table(hw);
+
+ }
+ }
+
+ /*
+ * For FW LPS:
+ * To tell firmware we have connected
+ * to an AP. For 92SE/CE power save v2.
+ */
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (bss_conf->assoc) {
+ if (ppsc->fwctrl_lps) {
+ u8 mstatus = RT_MEDIA_CONNECT;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_H2C_FW_JOINBSSRPT,
+ (u8 *) (&mstatus));
+ ppsc->report_linked = true;
+ }
+ } else {
+ if (ppsc->fwctrl_lps) {
+ u8 mstatus = RT_MEDIA_DISCONNECT;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_H2C_FW_JOINBSSRPT,
+ (u8 *)(&mstatus));
+ ppsc->report_linked = false;
+ }
+ }
+ }
+
+out:
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+}
+
+static u64 rtl_op_get_tsf(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u64 tsf;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *) (&tsf));
+ return tsf;
+}
+
+static void rtl_op_set_tsf(struct ieee80211_hw *hw, u64 tsf)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;;
+
+ mac->tsf = tsf;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *) (&bibss));
+}
+
+static void rtl_op_reset_tsf(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp = 0;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DUAL_TSF_RST, (u8 *) (&tmp));
+}
+
+static void rtl_op_sta_notify(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
+{
+ switch (cmd) {
+ case STA_NOTIFY_SLEEP:
+ break;
+ case STA_NOTIFY_AWAKE:
+ break;
+ default:
+ break;
+ }
+}
+
+static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ switch (action) {
+ case IEEE80211_AMPDU_TX_START:
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ ("IEEE80211_AMPDU_TX_START: TID:%d\n", tid));
+ return rtl_tx_agg_start(hw, sta->addr, tid, ssn);
+ break;
+ case IEEE80211_AMPDU_TX_STOP:
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ ("IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid));
+ return rtl_tx_agg_stop(hw, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ ("IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid));
+ break;
+ case IEEE80211_AMPDU_RX_START:
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ ("IEEE80211_AMPDU_RX_START:TID:%d\n", tid));
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ ("IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid));
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("IEEE80211_AMPDU_ERR!!!!:\n"));
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static void rtl_op_sw_scan_start(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ mac->act_scanning = true;
+
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("\n"));
+
+ if (mac->link_state == MAC80211_LINKED) {
+ rtl_lps_leave(hw);
+ mac->link_state = MAC80211_LINKED_SCANNING;
+ } else
+ rtl_ips_nic_on(hw);
+
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_SITE_SURVEY);
+ rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_BACKUP);
+}
+
+static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("\n"));
+
+ rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_RESTORE);
+ mac->act_scanning = false;
+ if (mac->link_state == MAC80211_LINKED_SCANNING) {
+ mac->link_state = MAC80211_LINKED;
+
+ /* fix fwlps issue */
+ rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
+
+ if (rtlpriv->dm.useramask)
+ rtlpriv->cfg->ops->update_rate_mask(hw, 0);
+ else
+ rtlpriv->cfg->ops->update_rate_table(hw);
+
+ }
+
+}
+
+static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u8 key_type = NO_ENCRYPTION;
+ u8 key_idx;
+ bool group_key = false;
+ bool wep_only = false;
+ int err = 0;
+ u8 mac_addr[ETH_ALEN];
+ u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ u8 zero_addr[ETH_ALEN] = { 0 };
+
+ if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("not open hw encryption\n"));
+ return -ENOSPC; /*User disabled HW-crypto */
+ }
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("%s hardware based encryption for keyidx: %d, mac: %pM\n",
+ cmd == SET_KEY ? "Using" : "Disabling", key->keyidx,
+ sta ? sta->addr : bcast_addr));
+ rtlpriv->sec.being_setkey = true;
+ rtl_ips_nic_on(hw);
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+ /* <1> get encryption alg */
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ key_type = WEP40_ENCRYPTION;
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("alg:WEP40\n"));
+ rtlpriv->sec.use_defaultkey = true;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("alg:WEP104\n"));
+ key_type = WEP104_ENCRYPTION;
+ rtlpriv->sec.use_defaultkey = true;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ key_type = TKIP_ENCRYPTION;
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("alg:TKIP\n"));
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ rtlpriv->sec.use_defaultkey = true;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ key_type = AESCCMP_ENCRYPTION;
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("alg:CCMP\n"));
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ rtlpriv->sec.use_defaultkey = true;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("alg_err:%x!!!!:\n", key->cipher));
+ goto out_unlock;
+ }
+ /* <2> get key_idx */
+ key_idx = (u8) (key->keyidx);
+ if (key_idx > 3)
+ goto out_unlock;
+ /* <3> if pairwise key enable_hw_sec */
+ group_key = !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+ if ((!group_key) || (mac->opmode == NL80211_IFTYPE_ADHOC) ||
+ rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION) {
+ if (rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION &&
+ (key_type == WEP40_ENCRYPTION ||
+ key_type == WEP104_ENCRYPTION))
+ wep_only = true;
+ rtlpriv->sec.pairwise_enc_algorithm = key_type;
+ rtlpriv->cfg->ops->enable_hw_sec(hw);
+ }
+ /* <4> set key based on cmd */
+ switch (cmd) {
+ case SET_KEY:
+ if (wep_only) {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("set WEP(group/pairwise) key\n"));
+ /* Pairwise key with an assigned MAC address. */
+ rtlpriv->sec.pairwise_enc_algorithm = key_type;
+ rtlpriv->sec.group_enc_algorithm = key_type;
+ /*set local buf about wep key. */
+ memcpy(rtlpriv->sec.key_buf[key_idx],
+ key->key, key->keylen);
+ rtlpriv->sec.key_len[key_idx] = key->keylen;
+ memcpy(mac_addr, zero_addr, ETH_ALEN);
+ } else if (group_key) { /* group key */
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("set group key\n"));
+ /* group key */
+ rtlpriv->sec.group_enc_algorithm = key_type;
+ /*set local buf about group key. */
+ memcpy(rtlpriv->sec.key_buf[key_idx],
+ key->key, key->keylen);
+ rtlpriv->sec.key_len[key_idx] = key->keylen;
+ memcpy(mac_addr, bcast_addr, ETH_ALEN);
+ } else { /* pairwise key */
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("set pairwise key\n"));
+ if (!sta) {
+ RT_ASSERT(false, ("pairwise key withnot"
+ "mac_addr\n"));
+ err = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+ /* Pairwise key with an assigned MAC address. */
+ rtlpriv->sec.pairwise_enc_algorithm = key_type;
+ /*set local buf about pairwise key. */
+ memcpy(rtlpriv->sec.key_buf[PAIRWISE_KEYIDX],
+ key->key, key->keylen);
+ rtlpriv->sec.key_len[PAIRWISE_KEYIDX] = key->keylen;
+ rtlpriv->sec.pairwise_key =
+ rtlpriv->sec.key_buf[PAIRWISE_KEYIDX];
+ memcpy(mac_addr, sta->addr, ETH_ALEN);
+ }
+ rtlpriv->cfg->ops->set_key(hw, key_idx, mac_addr,
+ group_key, key_type, wep_only,
+ false);
+ /* <5> tell mac80211 do something: */
+ /*must use sw generate IV, or can not work !!!!. */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ key->hw_key_idx = key_idx;
+ if (key_type == TKIP_ENCRYPTION)
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ break;
+ case DISABLE_KEY:
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("disable key delete one entry\n"));
+ /*set local buf about wep key. */
+ memset(rtlpriv->sec.key_buf[key_idx], 0, key->keylen);
+ rtlpriv->sec.key_len[key_idx] = 0;
+ memcpy(mac_addr, zero_addr, ETH_ALEN);
+ /*
+ *mac80211 will delete entrys one by one,
+ *so don't use rtl_cam_reset_all_entry
+ *or clear all entry here.
+ */
+ rtl_cam_delete_one_entry(hw, mac_addr, key_idx);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("cmd_err:%x!!!!:\n", cmd));
+ }
+out_unlock:
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+ rtlpriv->sec.being_setkey = false;
+ return err;
+}
+
+static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ bool radio_state;
+ bool blocked;
+ u8 valid = 0;
+
+ if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
+ return;
+
+ mutex_lock(&rtlpriv->locks.conf_mutex);
+
+ /*if Radio On return true here */
+ radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
+
+ if (valid) {
+ if (unlikely(radio_state != rtlpriv->rfkill.rfkill_state)) {
+ rtlpriv->rfkill.rfkill_state = radio_state;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ (KERN_INFO "wireless radio switch turned %s\n",
+ radio_state ? "on" : "off"));
+
+ blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1;
+ wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
+ }
+ }
+
+ mutex_unlock(&rtlpriv->locks.conf_mutex);
+}
+
+const struct ieee80211_ops rtl_ops = {
+ .start = rtl_op_start,
+ .stop = rtl_op_stop,
+ .tx = rtl_op_tx,
+ .add_interface = rtl_op_add_interface,
+ .remove_interface = rtl_op_remove_interface,
+ .config = rtl_op_config,
+ .configure_filter = rtl_op_configure_filter,
+ .set_key = rtl_op_set_key,
+ .conf_tx = rtl_op_conf_tx,
+ .bss_info_changed = rtl_op_bss_info_changed,
+ .get_tsf = rtl_op_get_tsf,
+ .set_tsf = rtl_op_set_tsf,
+ .reset_tsf = rtl_op_reset_tsf,
+ .sta_notify = rtl_op_sta_notify,
+ .ampdu_action = rtl_op_ampdu_action,
+ .sw_scan_start = rtl_op_sw_scan_start,
+ .sw_scan_complete = rtl_op_sw_scan_complete,
+ .rfkill_poll = rtl_op_rfkill_poll,
+};
diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/rtlwifi/core.h
new file mode 100644
index 000000000000..0ef31c3c6196
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/core.h
@@ -0,0 +1,42 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * Tmis program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * Tmis program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * tmis program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Tme full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *****************************************************************************/
+
+#ifndef __RTL_CORE_H__
+#define __RTL_CORE_H__
+
+#define RTL_SUPPORTED_FILTERS \
+ (FIF_PROMISC_IN_BSS | \
+ FIF_ALLMULTI | FIF_CONTROL | \
+ FIF_OTHER_BSS | \
+ FIF_FCSFAIL | \
+ FIF_BCN_PRBRESP_PROMISC)
+
+#define RTL_SUPPORTED_CTRL_FILTER 0xFF
+
+extern const struct ieee80211_ops rtl_ops;
+#endif
diff --git a/drivers/net/wireless/rtlwifi/debug.c b/drivers/net/wireless/rtlwifi/debug.c
new file mode 100644
index 000000000000..5fa73852cb66
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/debug.c
@@ -0,0 +1,50 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * Tmis program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * Tmis program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * tmis program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Tme full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *****************************************************************************/
+
+#include "wifi.h"
+
+void rtl_dbgp_flag_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 i;
+
+ rtlpriv->dbg.global_debuglevel = DBG_EMERG;
+
+ rtlpriv->dbg.global_debugcomponents =
+ COMP_ERR | COMP_FW | COMP_INIT | COMP_RECV | COMP_SEND |
+ COMP_MLME | COMP_SCAN | COMP_INTR | COMP_LED | COMP_SEC |
+ COMP_BEACON | COMP_RATE | COMP_RXDESC | COMP_DIG | COMP_TXAGC |
+ COMP_POWER | COMP_POWER_TRACKING | COMP_BB_POWERSAVING | COMP_SWAS |
+ COMP_RF | COMP_TURBO | COMP_RATR | COMP_CMD |
+ COMP_EFUSE | COMP_QOS | COMP_MAC80211 | COMP_REGD | COMP_CHAN;
+
+ for (i = 0; i < DBGP_TYPE_MAX; i++)
+ rtlpriv->dbg.dbgp_type[i] = 0;
+
+ /*Init Debug flag enable condition */
+}
diff --git a/drivers/net/wireless/rtlwifi/debug.h b/drivers/net/wireless/rtlwifi/debug.h
new file mode 100644
index 000000000000..e4aa8687408c
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/debug.h
@@ -0,0 +1,213 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * Tmis program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * Tmis program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * tmis program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Tme full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *****************************************************************************/
+
+#ifndef __RTL_DEBUG_H__
+#define __RTL_DEBUG_H__
+
+/*--------------------------------------------------------------
+ Debug level
+--------------------------------------------------------------*/
+/*
+ *Fatal bug.
+ *For example, Tx/Rx/IO locked up,
+ *memory access violation,
+ *resource allocation failed,
+ *unexpected HW behavior, HW BUG
+ *and so on.
+ */
+#define DBG_EMERG 0
+
+/*
+ *Abnormal, rare, or unexpeted cases.
+ *For example, Packet/IO Ctl canceled,
+ *device suprisely unremoved and so on.
+ */
+#define DBG_WARNING 2
+
+/*
+ *Normal case driver developer should
+ *open, we can see link status like
+ *assoc/AddBA/DHCP/adapter start and
+ *so on basic and useful infromations.
+ */
+#define DBG_DMESG 3
+
+/*
+ *Normal case with useful information
+ *about current SW or HW state.
+ *For example, Tx/Rx descriptor to fill,
+ *Tx/Rx descriptor completed status,
+ *SW protocol state change, dynamic
+ *mechanism state change and so on.
+ */
+#define DBG_LOUD 4
+
+/*
+ *Normal case with detail execution
+ *flow or information.
+ */
+#define DBG_TRACE 5
+
+/*--------------------------------------------------------------
+ Define the rt_trace components
+--------------------------------------------------------------*/
+#define COMP_ERR BIT(0)
+#define COMP_FW BIT(1)
+#define COMP_INIT BIT(2) /*For init/deinit */
+#define COMP_RECV BIT(3) /*For Rx. */
+#define COMP_SEND BIT(4) /*For Tx. */
+#define COMP_MLME BIT(5) /*For MLME. */
+#define COMP_SCAN BIT(6) /*For Scan. */
+#define COMP_INTR BIT(7) /*For interrupt Related. */
+#define COMP_LED BIT(8) /*For LED. */
+#define COMP_SEC BIT(9) /*For sec. */
+#define COMP_BEACON BIT(10) /*For beacon. */
+#define COMP_RATE BIT(11) /*For rate. */
+#define COMP_RXDESC BIT(12) /*For rx desc. */
+#define COMP_DIG BIT(13) /*For DIG */
+#define COMP_TXAGC BIT(14) /*For Tx power */
+#define COMP_HIPWR BIT(15) /*For High Power Mechanism */
+#define COMP_POWER BIT(16) /*For lps/ips/aspm. */
+#define COMP_POWER_TRACKING BIT(17) /*For TX POWER TRACKING */
+#define COMP_BB_POWERSAVING BIT(18)
+#define COMP_SWAS BIT(19) /*For SW Antenna Switch */
+#define COMP_RF BIT(20) /*For RF. */
+#define COMP_TURBO BIT(21) /*For EDCA TURBO. */
+#define COMP_RATR BIT(22)
+#define COMP_CMD BIT(23)
+#define COMP_EFUSE BIT(24)
+#define COMP_QOS BIT(25)
+#define COMP_MAC80211 BIT(26)
+#define COMP_REGD BIT(27)
+#define COMP_CHAN BIT(28)
+#define COMP_USB BIT(29)
+
+/*--------------------------------------------------------------
+ Define the rt_print components
+--------------------------------------------------------------*/
+/* Define EEPROM and EFUSE check module bit*/
+#define EEPROM_W BIT(0)
+#define EFUSE_PG BIT(1)
+#define EFUSE_READ_ALL BIT(2)
+
+/* Define init check for module bit*/
+#define INIT_EEPROM BIT(0)
+#define INIT_TxPower BIT(1)
+#define INIT_IQK BIT(2)
+#define INIT_RF BIT(3)
+
+/* Define PHY-BB/RF/MAC check module bit */
+#define PHY_BBR BIT(0)
+#define PHY_BBW BIT(1)
+#define PHY_RFR BIT(2)
+#define PHY_RFW BIT(3)
+#define PHY_MACR BIT(4)
+#define PHY_MACW BIT(5)
+#define PHY_ALLR BIT(6)
+#define PHY_ALLW BIT(7)
+#define PHY_TXPWR BIT(8)
+#define PHY_PWRDIFF BIT(9)
+
+enum dbgp_flag_e {
+ FQOS = 0,
+ FTX = 1,
+ FRX = 2,
+ FSEC = 3,
+ FMGNT = 4,
+ FMLME = 5,
+ FRESOURCE = 6,
+ FBEACON = 7,
+ FISR = 8,
+ FPHY = 9,
+ FMP = 10,
+ FEEPROM = 11,
+ FPWR = 12,
+ FDM = 13,
+ FDBGCtrl = 14,
+ FC2H = 15,
+ FBT = 16,
+ FINIT = 17,
+ FIOCTL = 18,
+ DBGP_TYPE_MAX
+};
+
+#define RT_ASSERT(_exp, fmt) \
+ do { \
+ if (!(_exp)) { \
+ printk(KERN_DEBUG "%s:%s(): ", KBUILD_MODNAME, \
+ __func__); \
+ printk fmt; \
+ } \
+ } while (0);
+
+#define RT_TRACE(rtlpriv, comp, level, fmt)\
+ do { \
+ if (unlikely(((comp) & rtlpriv->dbg.global_debugcomponents) && \
+ ((level) <= rtlpriv->dbg.global_debuglevel))) {\
+ printk(KERN_DEBUG "%s:%s():<%lx-%x> ", KBUILD_MODNAME, \
+ __func__, in_interrupt(), in_atomic()); \
+ printk fmt; \
+ } \
+ } while (0);
+
+#define RTPRINT(rtlpriv, dbgtype, dbgflag, printstr) \
+ do { \
+ if (unlikely(rtlpriv->dbg.dbgp_type[dbgtype] & dbgflag)) { \
+ printk(KERN_DEBUG "%s: ", KBUILD_MODNAME); \
+ printk printstr; \
+ } \
+ } while (0);
+
+#define RT_PRINT_DATA(rtlpriv, _comp, _level, _titlestring, _hexdata, \
+ _hexdatalen) \
+ do {\
+ if (unlikely(((_comp) & rtlpriv->dbg.global_debugcomponents) &&\
+ (_level <= rtlpriv->dbg.global_debuglevel))) { \
+ int __i; \
+ u8* ptr = (u8 *)_hexdata; \
+ printk(KERN_DEBUG "%s: ", KBUILD_MODNAME); \
+ printk("In process \"%s\" (pid %i):", current->comm,\
+ current->pid); \
+ printk(_titlestring); \
+ for (__i = 0; __i < (int)_hexdatalen; __i++) { \
+ printk("%02X%s", ptr[__i], (((__i + 1) % 4)\
+ == 0) ? " " : " ");\
+ if (((__i + 1) % 16) == 0) \
+ printk("\n"); \
+ } \
+ printk(KERN_DEBUG "\n"); \
+ } \
+ } while (0);
+
+#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC_ARG(x) \
+ ((u8 *)(x))[0], ((u8 *)(x))[1], ((u8 *)(x))[2],\
+ ((u8 *)(x))[3], ((u8 *)(x))[4], ((u8 *)(x))[5]
+
+void rtl_dbgp_flag_init(struct ieee80211_hw *hw);
+#endif
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
new file mode 100644
index 000000000000..4f92cba6810a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -0,0 +1,1171 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * Tmis program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * Tmis program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * tmis program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * Tme full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "wifi.h"
+#include "efuse.h"
+
+static const u8 MAX_PGPKT_SIZE = 9;
+static const u8 PGPKT_DATA_SIZE = 8;
+static const int EFUSE_MAX_SIZE = 512;
+
+static const u8 EFUSE_OOB_PROTECT_BYTES = 15;
+
+static const struct efuse_map RTL8712_SDIO_EFUSE_TABLE[] = {
+ {0, 0, 0, 2},
+ {0, 1, 0, 2},
+ {0, 2, 0, 2},
+ {1, 0, 0, 1},
+ {1, 0, 1, 1},
+ {1, 1, 0, 1},
+ {1, 1, 1, 3},
+ {1, 3, 0, 17},
+ {3, 3, 1, 48},
+ {10, 0, 0, 6},
+ {10, 3, 0, 1},
+ {10, 3, 1, 1},
+ {11, 0, 0, 28}
+};
+
+static void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset,
+ u8 *pbuf);
+static void efuse_shadow_read_1byte(struct ieee80211_hw *hw, u16 offset,
+ u8 *value);
+static void efuse_shadow_read_2byte(struct ieee80211_hw *hw, u16 offset,
+ u16 *value);
+static void efuse_shadow_read_4byte(struct ieee80211_hw *hw, u16 offset,
+ u32 *value);
+static void efuse_shadow_write_1byte(struct ieee80211_hw *hw, u16 offset,
+ u8 value);
+static void efuse_shadow_write_2byte(struct ieee80211_hw *hw, u16 offset,
+ u16 value);
+static void efuse_shadow_write_4byte(struct ieee80211_hw *hw, u16 offset,
+ u32 value);
+static int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr,
+ u8 *data);
+static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr,
+ u8 data);
+static void efuse_read_all_map(struct ieee80211_hw *hw, u8 *efuse);
+static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset,
+ u8 *data);
+static int efuse_pg_packet_write(struct ieee80211_hw *hw, u8 offset,
+ u8 word_en, u8 *data);
+static void efuse_word_enable_data_read(u8 word_en, u8 *sourdata,
+ u8 *targetdata);
+static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw,
+ u16 efuse_addr, u8 word_en, u8 *data);
+static void efuse_power_switch(struct ieee80211_hw *hw, u8 bwrite,
+ u8 pwrstate);
+static u16 efuse_get_current_size(struct ieee80211_hw *hw);
+static u8 efuse_calculate_word_cnts(u8 word_en);
+
+void efuse_initialize(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 bytetemp;
+ u8 temp;
+
+ bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN] + 1);
+ temp = bytetemp | 0x20;
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN] + 1, temp);
+
+ bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[SYS_ISO_CTRL] + 1);
+ temp = bytetemp & 0xFE;
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_ISO_CTRL] + 1, temp);
+
+ bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3);
+ temp = bytetemp | 0x80;
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3, temp);
+
+ rtl_write_byte(rtlpriv, 0x2F8, 0x3);
+
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0x72);
+
+}
+
+u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 data;
+ u8 bytetemp;
+ u8 temp;
+ u32 k = 0;
+
+ if (address < EFUSE_REAL_CONTENT_LEN) {
+ temp = address & 0xFF;
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
+ temp);
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 2);
+ temp = ((address >> 8) & 0x03) | (bytetemp & 0xFC);
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
+ temp);
+
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
+ temp = bytetemp & 0x7F;
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3,
+ temp);
+
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
+ while (!(bytetemp & 0x80)) {
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->
+ maps[EFUSE_CTRL] + 3);
+ k++;
+ if (k == 1000) {
+ k = 0;
+ break;
+ }
+ }
+ data = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
+ return data;
+ } else
+ return 0xFF;
+
+}
+EXPORT_SYMBOL(efuse_read_1byte);
+
+void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 bytetemp;
+ u8 temp;
+ u32 k = 0;
+
+ RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ ("Addr=%x Data =%x\n", address, value));
+
+ if (address < EFUSE_REAL_CONTENT_LEN) {
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], value);
+
+ temp = address & 0xFF;
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
+ temp);
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 2);
+
+ temp = ((address >> 8) & 0x03) | (bytetemp & 0xFC);
+ rtl_write_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 2, temp);
+
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
+ temp = bytetemp | 0x80;
+ rtl_write_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 3, temp);
+
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
+
+ while (bytetemp & 0x80) {
+ bytetemp = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->
+ maps[EFUSE_CTRL] + 3);
+ k++;
+ if (k == 100) {
+ k = 0;
+ break;
+ }
+ }
+ }
+
+}
+
+static void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 value32;
+ u8 readbyte;
+ u16 retry;
+
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
+ (_offset & 0xff));
+ readbyte = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2);
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
+ ((_offset >> 8) & 0x03) | (readbyte & 0xfc));
+
+ readbyte = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3,
+ (readbyte & 0x7f));
+
+ retry = 0;
+ value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
+ while (!(((value32 >> 24) & 0xff) & 0x80) && (retry < 10000)) {
+ value32 = rtl_read_dword(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL]);
+ retry++;
+ }
+
+ udelay(50);
+ value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
+
+ *pbuf = (u8) (value32 & 0xff);
+}
+
+void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 efuse_tbl[EFUSE_MAP_LEN];
+ u8 rtemp8[1];
+ u16 efuse_addr = 0;
+ u8 offset, wren;
+ u16 i;
+ u16 j;
+ u16 efuse_word[EFUSE_MAX_SECTION][EFUSE_MAX_WORD_UNIT];
+ u16 efuse_utilized = 0;
+ u8 efuse_usage;
+
+ if ((_offset + _size_byte) > EFUSE_MAP_LEN) {
+ RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ ("read_efuse(): Invalid offset(%#x) with read "
+ "bytes(%#x)!!\n", _offset, _size_byte));
+ return;
+ }
+
+ for (i = 0; i < EFUSE_MAX_SECTION; i++)
+ for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++)
+ efuse_word[i][j] = 0xFFFF;
+
+ read_efuse_byte(hw, efuse_addr, rtemp8);
+ if (*rtemp8 != 0xFF) {
+ efuse_utilized++;
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
+ ("Addr=%d\n", efuse_addr));
+ efuse_addr++;
+ }
+
+ while ((*rtemp8 != 0xFF) && (efuse_addr < EFUSE_REAL_CONTENT_LEN)) {
+ offset = ((*rtemp8 >> 4) & 0x0f);
+
+ if (offset < EFUSE_MAX_SECTION) {
+ wren = (*rtemp8 & 0x0f);
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
+ ("offset-%d Worden=%x\n", offset, wren));
+
+ for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
+ if (!(wren & 0x01)) {
+ RTPRINT(rtlpriv, FEEPROM,
+ EFUSE_READ_ALL, ("Addr=%d\n",
+ efuse_addr));
+
+ read_efuse_byte(hw, efuse_addr, rtemp8);
+ efuse_addr++;
+ efuse_utilized++;
+ efuse_word[offset][i] = (*rtemp8 & 0xff);
+
+ if (efuse_addr >= EFUSE_REAL_CONTENT_LEN)
+ break;
+
+ RTPRINT(rtlpriv, FEEPROM,
+ EFUSE_READ_ALL, ("Addr=%d\n",
+ efuse_addr));
+
+ read_efuse_byte(hw, efuse_addr, rtemp8);
+ efuse_addr++;
+ efuse_utilized++;
+ efuse_word[offset][i] |=
+ (((u16)*rtemp8 << 8) & 0xff00);
+
+ if (efuse_addr >= EFUSE_REAL_CONTENT_LEN)
+ break;
+ }
+
+ wren >>= 1;
+ }
+ }
+
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
+ ("Addr=%d\n", efuse_addr));
+ read_efuse_byte(hw, efuse_addr, rtemp8);
+ if (*rtemp8 != 0xFF && (efuse_addr < 512)) {
+ efuse_utilized++;
+ efuse_addr++;
+ }
+ }
+
+ for (i = 0; i < EFUSE_MAX_SECTION; i++) {
+ for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++) {
+ efuse_tbl[(i * 8) + (j * 2)] =
+ (efuse_word[i][j] & 0xff);
+ efuse_tbl[(i * 8) + ((j * 2) + 1)] =
+ ((efuse_word[i][j] >> 8) & 0xff);
+ }
+ }
+
+ for (i = 0; i < _size_byte; i++)
+ pbuf[i] = efuse_tbl[_offset + i];
+
+ rtlefuse->efuse_usedbytes = efuse_utilized;
+ efuse_usage = (u8)((efuse_utilized * 100) / EFUSE_REAL_CONTENT_LEN);
+ rtlefuse->efuse_usedpercentage = efuse_usage;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_BYTES,
+ (u8 *)&efuse_utilized);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_USAGE,
+ (u8 *)&efuse_usage);
+}
+
+bool efuse_shadow_update_chk(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 section_idx, i, Base;
+ u16 words_need = 0, hdr_num = 0, totalbytes, efuse_used;
+ bool bwordchanged, bresult = true;
+
+ for (section_idx = 0; section_idx < 16; section_idx++) {
+ Base = section_idx * 8;
+ bwordchanged = false;
+
+ for (i = 0; i < 8; i = i + 2) {
+ if ((rtlefuse->efuse_map[EFUSE_INIT_MAP][Base + i] !=
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][Base + i]) ||
+ (rtlefuse->efuse_map[EFUSE_INIT_MAP][Base + i + 1] !=
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][Base + i +
+ 1])) {
+ words_need++;
+ bwordchanged = true;
+ }
+ }
+
+ if (bwordchanged == true)
+ hdr_num++;
+ }
+
+ totalbytes = hdr_num + words_need * 2;
+ efuse_used = rtlefuse->efuse_usedbytes;
+
+ if ((totalbytes + efuse_used) >=
+ (EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES))
+ bresult = false;
+
+ RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ ("efuse_shadow_update_chk(): totalbytes(%#x), "
+ "hdr_num(%#x), words_need(%#x), efuse_used(%d)\n",
+ totalbytes, hdr_num, words_need, efuse_used));
+
+ return bresult;
+}
+
+void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
+ u16 offset, u32 *value)
+{
+ if (type == 1)
+ efuse_shadow_read_1byte(hw, offset, (u8 *) value);
+ else if (type == 2)
+ efuse_shadow_read_2byte(hw, offset, (u16 *) value);
+ else if (type == 4)
+ efuse_shadow_read_4byte(hw, offset, (u32 *) value);
+
+}
+
+void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, u16 offset,
+ u32 value)
+{
+ if (type == 1)
+ efuse_shadow_write_1byte(hw, offset, (u8) value);
+ else if (type == 2)
+ efuse_shadow_write_2byte(hw, offset, (u16) value);
+ else if (type == 4)
+ efuse_shadow_write_4byte(hw, offset, (u32) value);
+
+}
+
+bool efuse_shadow_update(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u16 i, offset, base;
+ u8 word_en = 0x0F;
+ u8 first_pg = false;
+
+ RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, ("--->\n"));
+
+ if (!efuse_shadow_update_chk(hw)) {
+ efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
+ memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
+ (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
+
+ RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ ("<---efuse out of capacity!!\n"));
+ return false;
+ }
+ efuse_power_switch(hw, true, true);
+
+ for (offset = 0; offset < 16; offset++) {
+
+ word_en = 0x0F;
+ base = offset * 8;
+
+ for (i = 0; i < 8; i++) {
+ if (first_pg == true) {
+
+ word_en &= ~(BIT(i / 2));
+
+ rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] =
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i];
+ } else {
+
+ if (rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] !=
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i]) {
+ word_en &= ~(BIT(i / 2));
+
+ rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] =
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i];
+ }
+ }
+ }
+
+ if (word_en != 0x0F) {
+ u8 tmpdata[8];
+ memcpy((void *)tmpdata,
+ (void *)(&rtlefuse->
+ efuse_map[EFUSE_MODIFY_MAP][base]), 8);
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("U-efuse\n"), tmpdata, 8);
+
+ if (!efuse_pg_packet_write(hw, (u8) offset, word_en,
+ tmpdata)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("PG section(%#x) fail!!\n", offset));
+ break;
+ }
+ }
+
+ }
+
+ efuse_power_switch(hw, true, false);
+ efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
+
+ memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
+ (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
+
+ RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, ("<---\n"));
+ return true;
+}
+
+void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ if (rtlefuse->autoload_failflag == true) {
+ memset((void *)(&rtlefuse->efuse_map[EFUSE_INIT_MAP][0]), 128,
+ 0xFF);
+ } else
+ efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
+
+ memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
+ (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
+
+}
+EXPORT_SYMBOL(rtl_efuse_shadow_map_update);
+
+void efuse_force_write_vendor_Id(struct ieee80211_hw *hw)
+{
+ u8 tmpdata[8] = { 0xFF, 0xFF, 0xEC, 0x10, 0xFF, 0xFF, 0xFF, 0xFF };
+
+ efuse_power_switch(hw, true, true);
+
+ efuse_pg_packet_write(hw, 1, 0xD, tmpdata);
+
+ efuse_power_switch(hw, true, false);
+
+}
+
+void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx)
+{
+}
+
+static void efuse_shadow_read_1byte(struct ieee80211_hw *hw,
+ u16 offset, u8 *value)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ *value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset];
+}
+
+static void efuse_shadow_read_2byte(struct ieee80211_hw *hw,
+ u16 offset, u16 *value)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ *value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset];
+ *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] << 8;
+
+}
+
+static void efuse_shadow_read_4byte(struct ieee80211_hw *hw,
+ u16 offset, u32 *value)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ *value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset];
+ *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] << 8;
+ *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 2] << 16;
+ *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 3] << 24;
+}
+
+static void efuse_shadow_write_1byte(struct ieee80211_hw *hw,
+ u16 offset, u8 value)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] = value;
+}
+
+static void efuse_shadow_write_2byte(struct ieee80211_hw *hw,
+ u16 offset, u16 value)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] = value & 0x00FF;
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] = value >> 8;
+
+}
+
+static void efuse_shadow_write_4byte(struct ieee80211_hw *hw,
+ u16 offset, u32 value)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] =
+ (u8) (value & 0x000000FF);
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] =
+ (u8) ((value >> 8) & 0x0000FF);
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 2] =
+ (u8) ((value >> 16) & 0x00FF);
+ rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 3] =
+ (u8) ((value >> 24) & 0xFF);
+
+}
+
+static int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr, u8 *data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmpidx = 0;
+ int bresult;
+
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
+ (u8) (addr & 0xff));
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
+ ((u8) ((addr >> 8) & 0x03)) |
+ (rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 2) &
+ 0xFC));
+
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0x72);
+
+ while (!(0x80 & rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 3))
+ && (tmpidx < 100)) {
+ tmpidx++;
+ }
+
+ if (tmpidx < 100) {
+ *data = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
+ bresult = true;
+ } else {
+ *data = 0xff;
+ bresult = false;
+ }
+ return bresult;
+}
+
+static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, u8 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmpidx = 0;
+ bool bresult;
+
+ RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ ("Addr = %x Data=%x\n", addr, data));
+
+ rtl_write_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 1, (u8) (addr & 0xff));
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
+ (rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] +
+ 2) & 0xFC) | (u8) ((addr >> 8) & 0x03));
+
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], data);
+ rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0xF2);
+
+ while ((0x80 & rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_CTRL] + 3))
+ && (tmpidx < 100)) {
+ tmpidx++;
+ }
+
+ if (tmpidx < 100)
+ bresult = true;
+ else
+ bresult = false;
+
+ return bresult;
+}
+
+static void efuse_read_all_map(struct ieee80211_hw *hw, u8 * efuse)
+{
+ efuse_power_switch(hw, false, true);
+ read_efuse(hw, 0, 128, efuse);
+ efuse_power_switch(hw, false, false);
+}
+
+static void efuse_read_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
+ u8 efuse_data, u8 offset, u8 *tmpdata,
+ u8 *readstate)
+{
+ bool bdataempty = true;
+ u8 hoffset;
+ u8 tmpidx;
+ u8 hworden;
+ u8 word_cnts;
+
+ hoffset = (efuse_data >> 4) & 0x0F;
+ hworden = efuse_data & 0x0F;
+ word_cnts = efuse_calculate_word_cnts(hworden);
+
+ if (hoffset == offset) {
+ for (tmpidx = 0; tmpidx < word_cnts * 2; tmpidx++) {
+ if (efuse_one_byte_read(hw, *efuse_addr + 1 + tmpidx,
+ &efuse_data)) {
+ tmpdata[tmpidx] = efuse_data;
+ if (efuse_data != 0xff)
+ bdataempty = true;
+ }
+ }
+
+ if (bdataempty == true)
+ *readstate = PG_STATE_DATA;
+ else {
+ *efuse_addr = *efuse_addr + (word_cnts * 2) + 1;
+ *readstate = PG_STATE_HEADER;
+ }
+
+ } else {
+ *efuse_addr = *efuse_addr + (word_cnts * 2) + 1;
+ *readstate = PG_STATE_HEADER;
+ }
+}
+
+static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
+{
+ u8 readstate = PG_STATE_HEADER;
+
+ bool bcontinual = true;
+
+ u8 efuse_data, word_cnts = 0;
+ u16 efuse_addr = 0;
+ u8 hworden;
+ u8 tmpdata[8];
+
+ if (data == NULL)
+ return false;
+ if (offset > 15)
+ return false;
+
+ memset((void *)data, PGPKT_DATA_SIZE * sizeof(u8), 0xff);
+ memset((void *)tmpdata, PGPKT_DATA_SIZE * sizeof(u8), 0xff);
+
+ while (bcontinual && (efuse_addr < EFUSE_MAX_SIZE)) {
+ if (readstate & PG_STATE_HEADER) {
+ if (efuse_one_byte_read(hw, efuse_addr, &efuse_data)
+ && (efuse_data != 0xFF))
+ efuse_read_data_case1(hw, &efuse_addr,
+ efuse_data,
+ offset, tmpdata,
+ &readstate);
+ else
+ bcontinual = false;
+ } else if (readstate & PG_STATE_DATA) {
+ efuse_word_enable_data_read(hworden, tmpdata, data);
+ efuse_addr = efuse_addr + (word_cnts * 2) + 1;
+ readstate = PG_STATE_HEADER;
+ }
+
+ }
+
+ if ((data[0] == 0xff) && (data[1] == 0xff) &&
+ (data[2] == 0xff) && (data[3] == 0xff) &&
+ (data[4] == 0xff) && (data[5] == 0xff) &&
+ (data[6] == 0xff) && (data[7] == 0xff))
+ return false;
+ else
+ return true;
+
+}
+
+static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
+ u8 efuse_data, u8 offset, int *bcontinual,
+ u8 *write_state, struct pgpkt_struct *target_pkt,
+ int *repeat_times, int *bresult, u8 word_en)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct pgpkt_struct tmp_pkt;
+ int bdataempty = true;
+ u8 originaldata[8 * sizeof(u8)];
+ u8 badworden = 0x0F;
+ u8 match_word_en, tmp_word_en;
+ u8 tmpindex;
+ u8 tmp_header = efuse_data;
+ u8 tmp_word_cnts;
+
+ tmp_pkt.offset = (tmp_header >> 4) & 0x0F;
+ tmp_pkt.word_en = tmp_header & 0x0F;
+ tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
+
+ if (tmp_pkt.offset != target_pkt->offset) {
+ *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
+ *write_state = PG_STATE_HEADER;
+ } else {
+ for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) {
+ u16 address = *efuse_addr + 1 + tmpindex;
+ if (efuse_one_byte_read(hw, address,
+ &efuse_data) && (efuse_data != 0xFF))
+ bdataempty = false;
+ }
+
+ if (bdataempty == false) {
+ *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
+ *write_state = PG_STATE_HEADER;
+ } else {
+ match_word_en = 0x0F;
+ if (!((target_pkt->word_en & BIT(0)) |
+ (tmp_pkt.word_en & BIT(0))))
+ match_word_en &= (~BIT(0));
+
+ if (!((target_pkt->word_en & BIT(1)) |
+ (tmp_pkt.word_en & BIT(1))))
+ match_word_en &= (~BIT(1));
+
+ if (!((target_pkt->word_en & BIT(2)) |
+ (tmp_pkt.word_en & BIT(2))))
+ match_word_en &= (~BIT(2));
+
+ if (!((target_pkt->word_en & BIT(3)) |
+ (tmp_pkt.word_en & BIT(3))))
+ match_word_en &= (~BIT(3));
+
+ if ((match_word_en & 0x0F) != 0x0F) {
+ badworden = efuse_word_enable_data_write(
+ hw, *efuse_addr + 1,
+ tmp_pkt.word_en,
+ target_pkt->data);
+
+ if (0x0F != (badworden & 0x0F)) {
+ u8 reorg_offset = offset;
+ u8 reorg_worden = badworden;
+ efuse_pg_packet_write(hw, reorg_offset,
+ reorg_worden,
+ originaldata);
+ }
+
+ tmp_word_en = 0x0F;
+ if ((target_pkt->word_en & BIT(0)) ^
+ (match_word_en & BIT(0)))
+ tmp_word_en &= (~BIT(0));
+
+ if ((target_pkt->word_en & BIT(1)) ^
+ (match_word_en & BIT(1)))
+ tmp_word_en &= (~BIT(1));
+
+ if ((target_pkt->word_en & BIT(2)) ^
+ (match_word_en & BIT(2)))
+ tmp_word_en &= (~BIT(2));
+
+ if ((target_pkt->word_en & BIT(3)) ^
+ (match_word_en & BIT(3)))
+ tmp_word_en &= (~BIT(3));
+
+ if ((tmp_word_en & 0x0F) != 0x0F) {
+ *efuse_addr = efuse_get_current_size(hw);
+ target_pkt->offset = offset;
+ target_pkt->word_en = tmp_word_en;
+ } else
+ *bcontinual = false;
+ *write_state = PG_STATE_HEADER;
+ *repeat_times += 1;
+ if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
+ *bcontinual = false;
+ *bresult = false;
+ }
+ } else {
+ *efuse_addr += (2 * tmp_word_cnts) + 1;
+ target_pkt->offset = offset;
+ target_pkt->word_en = word_en;
+ *write_state = PG_STATE_HEADER;
+ }
+ }
+ }
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, ("efuse PG_STATE_HEADER-1\n"));
+}
+
+static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr,
+ int *bcontinual, u8 *write_state,
+ struct pgpkt_struct target_pkt,
+ int *repeat_times, int *bresult)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct pgpkt_struct tmp_pkt;
+ u8 pg_header;
+ u8 tmp_header;
+ u8 originaldata[8 * sizeof(u8)];
+ u8 tmp_word_cnts;
+ u8 badworden = 0x0F;
+
+ pg_header = ((target_pkt.offset << 4) & 0xf0) | target_pkt.word_en;
+ efuse_one_byte_write(hw, *efuse_addr, pg_header);
+ efuse_one_byte_read(hw, *efuse_addr, &tmp_header);
+
+ if (tmp_header == pg_header)
+ *write_state = PG_STATE_DATA;
+ else if (tmp_header == 0xFF) {
+ *write_state = PG_STATE_HEADER;
+ *repeat_times += 1;
+ if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
+ *bcontinual = false;
+ *bresult = false;
+ }
+ } else {
+ tmp_pkt.offset = (tmp_header >> 4) & 0x0F;
+ tmp_pkt.word_en = tmp_header & 0x0F;
+
+ tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
+
+ memset((void *)originaldata, 8 * sizeof(u8), 0xff);
+
+ if (efuse_pg_packet_read(hw, tmp_pkt.offset, originaldata)) {
+ badworden = efuse_word_enable_data_write(hw,
+ *efuse_addr + 1, tmp_pkt.word_en,
+ originaldata);
+
+ if (0x0F != (badworden & 0x0F)) {
+ u8 reorg_offset = tmp_pkt.offset;
+ u8 reorg_worden = badworden;
+ efuse_pg_packet_write(hw, reorg_offset,
+ reorg_worden,
+ originaldata);
+ *efuse_addr = efuse_get_current_size(hw);
+ } else
+ *efuse_addr = *efuse_addr + (tmp_word_cnts * 2)
+ + 1;
+ } else
+ *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
+
+ *write_state = PG_STATE_HEADER;
+ *repeat_times += 1;
+ if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
+ *bcontinual = false;
+ *bresult = false;
+ }
+
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
+ ("efuse PG_STATE_HEADER-2\n"));
+ }
+}
+
+static int efuse_pg_packet_write(struct ieee80211_hw *hw,
+ u8 offset, u8 word_en, u8 *data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct pgpkt_struct target_pkt;
+ u8 write_state = PG_STATE_HEADER;
+ int bcontinual = true, bdataempty = true, bresult = true;
+ u16 efuse_addr = 0;
+ u8 efuse_data;
+ u8 target_word_cnts = 0;
+ u8 badworden = 0x0F;
+ static int repeat_times;
+
+ if (efuse_get_current_size(hw) >=
+ (EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES)) {
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
+ ("efuse_pg_packet_write error\n"));
+ return false;
+ }
+
+ target_pkt.offset = offset;
+ target_pkt.word_en = word_en;
+
+ memset((void *)target_pkt.data, 8 * sizeof(u8), 0xFF);
+
+ efuse_word_enable_data_read(word_en, data, target_pkt.data);
+ target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en);
+
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, ("efuse Power ON\n"));
+
+ while (bcontinual && (efuse_addr <
+ (EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES))) {
+
+ if (write_state == PG_STATE_HEADER) {
+ bdataempty = true;
+ badworden = 0x0F;
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
+ ("efuse PG_STATE_HEADER\n"));
+
+ if (efuse_one_byte_read(hw, efuse_addr, &efuse_data) &&
+ (efuse_data != 0xFF))
+ efuse_write_data_case1(hw, &efuse_addr,
+ efuse_data, offset,
+ &bcontinual,
+ &write_state, &target_pkt,
+ &repeat_times, &bresult,
+ word_en);
+ else
+ efuse_write_data_case2(hw, &efuse_addr,
+ &bcontinual,
+ &write_state,
+ target_pkt,
+ &repeat_times,
+ &bresult);
+
+ } else if (write_state == PG_STATE_DATA) {
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
+ ("efuse PG_STATE_DATA\n"));
+ badworden = 0x0f;
+ badworden =
+ efuse_word_enable_data_write(hw, efuse_addr + 1,
+ target_pkt.word_en,
+ target_pkt.data);
+
+ if ((badworden & 0x0F) == 0x0F) {
+ bcontinual = false;
+ } else {
+ efuse_addr =
+ efuse_addr + (2 * target_word_cnts) + 1;
+
+ target_pkt.offset = offset;
+ target_pkt.word_en = badworden;
+ target_word_cnts =
+ efuse_calculate_word_cnts(target_pkt.
+ word_en);
+ write_state = PG_STATE_HEADER;
+ repeat_times++;
+ if (repeat_times > EFUSE_REPEAT_THRESHOLD_) {
+ bcontinual = false;
+ bresult = false;
+ }
+ RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
+ ("efuse PG_STATE_HEADER-3\n"));
+ }
+ }
+ }
+
+ if (efuse_addr >= (EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES)) {
+ RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ ("efuse_addr(%#x) Out of size!!\n", efuse_addr));
+ }
+
+ return true;
+}
+
+static void efuse_word_enable_data_read(u8 word_en,
+ u8 *sourdata, u8 *targetdata)
+{
+ if (!(word_en & BIT(0))) {
+ targetdata[0] = sourdata[0];
+ targetdata[1] = sourdata[1];
+ }
+
+ if (!(word_en & BIT(1))) {
+ targetdata[2] = sourdata[2];
+ targetdata[3] = sourdata[3];
+ }
+
+ if (!(word_en & BIT(2))) {
+ targetdata[4] = sourdata[4];
+ targetdata[5] = sourdata[5];
+ }
+
+ if (!(word_en & BIT(3))) {
+ targetdata[6] = sourdata[6];
+ targetdata[7] = sourdata[7];
+ }
+}
+
+static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw,
+ u16 efuse_addr, u8 word_en, u8 *data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 tmpaddr;
+ u16 start_addr = efuse_addr;
+ u8 badworden = 0x0F;
+ u8 tmpdata[8];
+
+ memset((void *)tmpdata, PGPKT_DATA_SIZE, 0xff);
+ RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ ("word_en = %x efuse_addr=%x\n", word_en, efuse_addr));
+
+ if (!(word_en & BIT(0))) {
+ tmpaddr = start_addr;
+ efuse_one_byte_write(hw, start_addr++, data[0]);
+ efuse_one_byte_write(hw, start_addr++, data[1]);
+
+ efuse_one_byte_read(hw, tmpaddr, &tmpdata[0]);
+ efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[1]);
+ if ((data[0] != tmpdata[0]) || (data[1] != tmpdata[1]))
+ badworden &= (~BIT(0));
+ }
+
+ if (!(word_en & BIT(1))) {
+ tmpaddr = start_addr;
+ efuse_one_byte_write(hw, start_addr++, data[2]);
+ efuse_one_byte_write(hw, start_addr++, data[3]);
+
+ efuse_one_byte_read(hw, tmpaddr, &tmpdata[2]);
+ efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[3]);
+ if ((data[2] != tmpdata[2]) || (data[3] != tmpdata[3]))
+ badworden &= (~BIT(1));
+ }
+
+ if (!(word_en & BIT(2))) {
+ tmpaddr = start_addr;
+ efuse_one_byte_write(hw, start_addr++, data[4]);
+ efuse_one_byte_write(hw, start_addr++, data[5]);
+
+ efuse_one_byte_read(hw, tmpaddr, &tmpdata[4]);
+ efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[5]);
+ if ((data[4] != tmpdata[4]) || (data[5] != tmpdata[5]))
+ badworden &= (~BIT(2));
+ }
+
+ if (!(word_en & BIT(3))) {
+ tmpaddr = start_addr;
+ efuse_one_byte_write(hw, start_addr++, data[6]);
+ efuse_one_byte_write(hw, start_addr++, data[7]);
+
+ efuse_one_byte_read(hw, tmpaddr, &tmpdata[6]);
+ efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[7]);
+ if ((data[6] != tmpdata[6]) || (data[7] != tmpdata[7]))
+ badworden &= (~BIT(3));
+ }
+
+ return badworden;
+}
+
+static void efuse_power_switch(struct ieee80211_hw *hw, u8 bwrite, u8 pwrstate)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tempval;
+ u16 tmpV16;
+
+ if (pwrstate == true) {
+ tmpV16 = rtl_read_word(rtlpriv,
+ rtlpriv->cfg->maps[SYS_ISO_CTRL]);
+ if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_PWC_EV12V])) {
+ tmpV16 |= rtlpriv->cfg->maps[EFUSE_PWC_EV12V];
+ rtl_write_word(rtlpriv,
+ rtlpriv->cfg->maps[SYS_ISO_CTRL],
+ tmpV16);
+ }
+
+ tmpV16 = rtl_read_word(rtlpriv,
+ rtlpriv->cfg->maps[SYS_FUNC_EN]);
+ if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_FEN_ELDR])) {
+ tmpV16 |= rtlpriv->cfg->maps[EFUSE_FEN_ELDR];
+ rtl_write_word(rtlpriv,
+ rtlpriv->cfg->maps[SYS_FUNC_EN], tmpV16);
+ }
+
+ tmpV16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_CLK]);
+ if ((!(tmpV16 & rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN])) ||
+ (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_ANA8M]))) {
+ tmpV16 |= (rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN] |
+ rtlpriv->cfg->maps[EFUSE_ANA8M]);
+ rtl_write_word(rtlpriv,
+ rtlpriv->cfg->maps[SYS_CLK], tmpV16);
+ }
+ }
+
+ if (pwrstate == true) {
+ if (bwrite == true) {
+ tempval = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_TEST] +
+ 3);
+ tempval &= 0x0F;
+ tempval |= (VOLTAGE_V25 << 4);
+ rtl_write_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_TEST] + 3,
+ (tempval | 0x80));
+ }
+
+ } else {
+ if (bwrite == true) {
+ tempval = rtl_read_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_TEST] +
+ 3);
+ rtl_write_byte(rtlpriv,
+ rtlpriv->cfg->maps[EFUSE_TEST] + 3,
+ (tempval & 0x7F));
+ }
+
+ }
+
+}
+
+static u16 efuse_get_current_size(struct ieee80211_hw *hw)
+{
+ int bcontinual = true;
+ u16 efuse_addr = 0;
+ u8 hoffset, hworden;
+ u8 efuse_data, word_cnts;
+
+ while (bcontinual && efuse_one_byte_read(hw, efuse_addr, &efuse_data)
+ && (efuse_addr < EFUSE_MAX_SIZE)) {
+ if (efuse_data != 0xFF) {
+ hoffset = (efuse_data >> 4) & 0x0F;
+ hworden = efuse_data & 0x0F;
+ word_cnts = efuse_calculate_word_cnts(hworden);
+ efuse_addr = efuse_addr + (word_cnts * 2) + 1;
+ } else {
+ bcontinual = false;
+ }
+ }
+
+ return efuse_addr;
+}
+
+static u8 efuse_calculate_word_cnts(u8 word_en)
+{
+ u8 word_cnts = 0;
+ if (!(word_en & BIT(0)))
+ word_cnts++;
+ if (!(word_en & BIT(1)))
+ word_cnts++;
+ if (!(word_en & BIT(2)))
+ word_cnts++;
+ if (!(word_en & BIT(3)))
+ word_cnts++;
+ return word_cnts;
+}
+
diff --git a/drivers/net/wireless/rtlwifi/efuse.h b/drivers/net/wireless/rtlwifi/efuse.h
new file mode 100644
index 000000000000..47774dd4c2a6
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/efuse.h
@@ -0,0 +1,121 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_EFUSE_H_
+#define __RTL_EFUSE_H_
+
+#define EFUSE_REAL_CONTENT_LEN 512
+#define EFUSE_MAP_LEN 128
+#define EFUSE_MAX_SECTION 16
+#define EFUSE_MAX_WORD_UNIT 4
+
+#define EFUSE_INIT_MAP 0
+#define EFUSE_MODIFY_MAP 1
+
+#define PG_STATE_HEADER 0x01
+#define PG_STATE_WORD_0 0x02
+#define PG_STATE_WORD_1 0x04
+#define PG_STATE_WORD_2 0x08
+#define PG_STATE_WORD_3 0x10
+#define PG_STATE_DATA 0x20
+
+#define PG_SWBYTE_H 0x01
+#define PG_SWBYTE_L 0x02
+
+#define _POWERON_DELAY_
+#define _PRE_EXECUTE_READ_CMD_
+
+#define EFUSE_REPEAT_THRESHOLD_ 3
+
+struct efuse_map {
+ u8 offset;
+ u8 word_start;
+ u8 byte_start;
+ u8 byte_cnts;
+};
+
+struct pgpkt_struct {
+ u8 offset;
+ u8 word_en;
+ u8 data[8];
+};
+
+enum efuse_data_item {
+ EFUSE_CHIP_ID = 0,
+ EFUSE_LDO_SETTING,
+ EFUSE_CLK_SETTING,
+ EFUSE_SDIO_SETTING,
+ EFUSE_CCCR,
+ EFUSE_SDIO_MODE,
+ EFUSE_OCR,
+ EFUSE_F0CIS,
+ EFUSE_F1CIS,
+ EFUSE_MAC_ADDR,
+ EFUSE_EEPROM_VER,
+ EFUSE_CHAN_PLAN,
+ EFUSE_TXPW_TAB
+};
+
+enum {
+ VOLTAGE_V25 = 0x03,
+ LDOE25_SHIFT = 28,
+};
+
+struct efuse_priv {
+ u8 id[2];
+ u8 ldo_setting[2];
+ u8 clk_setting[2];
+ u8 cccr;
+ u8 sdio_mode;
+ u8 ocr[3];
+ u8 cis0[17];
+ u8 cis1[48];
+ u8 mac_addr[6];
+ u8 eeprom_verno;
+ u8 channel_plan;
+ u8 tx_power_b[14];
+ u8 tx_power_g[14];
+};
+
+extern void efuse_initialize(struct ieee80211_hw *hw);
+extern u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address);
+extern void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value);
+extern void read_efuse(struct ieee80211_hw *hw, u16 _offset,
+ u16 _size_byte, u8 *pbuf);
+extern void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
+ u16 offset, u32 *value);
+extern void efuse_shadow_write(struct ieee80211_hw *hw, u8 type,
+ u16 offset, u32 value);
+extern bool efuse_shadow_update(struct ieee80211_hw *hw);
+extern bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
+extern void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
+extern void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
+extern void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
new file mode 100644
index 000000000000..9cd7703c2a30
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -0,0 +1,1880 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "core.h"
+#include "wifi.h"
+#include "pci.h"
+#include "base.h"
+#include "ps.h"
+
+static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
+ INTEL_VENDOR_ID,
+ ATI_VENDOR_ID,
+ AMD_VENDOR_ID,
+ SIS_VENDOR_ID
+};
+
+/* Update PCI dependent default settings*/
+static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
+
+ ppsc->reg_rfps_level = 0;
+ ppsc->support_aspm = 0;
+
+ /*Update PCI ASPM setting */
+ ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
+ switch (rtlpci->const_pci_aspm) {
+ case 0:
+ /*No ASPM */
+ break;
+
+ case 1:
+ /*ASPM dynamically enabled/disable. */
+ ppsc->reg_rfps_level |= RT_RF_LPS_LEVEL_ASPM;
+ break;
+
+ case 2:
+ /*ASPM with Clock Req dynamically enabled/disable. */
+ ppsc->reg_rfps_level |= (RT_RF_LPS_LEVEL_ASPM |
+ RT_RF_OFF_LEVL_CLK_REQ);
+ break;
+
+ case 3:
+ /*
+ * Always enable ASPM and Clock Req
+ * from initialization to halt.
+ * */
+ ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM);
+ ppsc->reg_rfps_level |= (RT_RF_PS_LEVEL_ALWAYS_ASPM |
+ RT_RF_OFF_LEVL_CLK_REQ);
+ break;
+
+ case 4:
+ /*
+ * Always enable ASPM without Clock Req
+ * from initialization to halt.
+ * */
+ ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM |
+ RT_RF_OFF_LEVL_CLK_REQ);
+ ppsc->reg_rfps_level |= RT_RF_PS_LEVEL_ALWAYS_ASPM;
+ break;
+ }
+
+ ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
+
+ /*Update Radio OFF setting */
+ switch (rtlpci->const_hwsw_rfoff_d3) {
+ case 1:
+ if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
+ ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
+ break;
+
+ case 2:
+ if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
+ ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
+ ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
+ break;
+
+ case 3:
+ ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_PCI_D3;
+ break;
+ }
+
+ /*Set HW definition to determine if it supports ASPM. */
+ switch (rtlpci->const_support_pciaspm) {
+ case 0:{
+ /*Not support ASPM. */
+ bool support_aspm = false;
+ ppsc->support_aspm = support_aspm;
+ break;
+ }
+ case 1:{
+ /*Support ASPM. */
+ bool support_aspm = true;
+ bool support_backdoor = true;
+ ppsc->support_aspm = support_aspm;
+
+ /*if(priv->oem_id == RT_CID_TOSHIBA &&
+ !priv->ndis_adapter.amd_l1_patch)
+ support_backdoor = false; */
+
+ ppsc->support_backdoor = support_backdoor;
+
+ break;
+ }
+ case 2:
+ /*ASPM value set by chipset. */
+ if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) {
+ bool support_aspm = true;
+ ppsc->support_aspm = support_aspm;
+ }
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+}
+
+static bool _rtl_pci_platform_switch_device_pci_aspm(
+ struct ieee80211_hw *hw,
+ u8 value)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ bool bresult = false;
+
+ value |= 0x40;
+
+ pci_write_config_byte(rtlpci->pdev, 0x80, value);
+
+ return bresult;
+}
+
+/*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
+static bool _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u8 buffer;
+ bool bresult = false;
+
+ buffer = value;
+
+ pci_write_config_byte(rtlpci->pdev, 0x81, value);
+ bresult = true;
+
+ return bresult;
+}
+
+/*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/
+static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
+ u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
+ u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
+ /*Retrieve original configuration settings. */
+ u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg;
+ u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter.
+ pcibridge_linkctrlreg;
+ u16 aspmlevel = 0;
+
+ if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
+ ("PCI(Bridge) UNKNOWN.\n"));
+
+ return;
+ }
+
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
+ RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
+ _rtl_pci_switch_clk_req(hw, 0x0);
+ }
+
+ if (1) {
+ /*for promising device will in L0 state after an I/O. */
+ u8 tmp_u1b;
+ pci_read_config_byte(rtlpci->pdev, 0x80, &tmp_u1b);
+ }
+
+ /*Set corresponding value. */
+ aspmlevel |= BIT(0) | BIT(1);
+ linkctrl_reg &= ~aspmlevel;
+ pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1));
+
+ _rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg);
+ udelay(50);
+
+ /*4 Disable Pci Bridge ASPM */
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+ pcicfg_addrport + (num4bytes << 2));
+ rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, pcibridge_linkctrlreg);
+
+ udelay(50);
+
+}
+
+/*
+ *Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
+ *power saving We should follow the sequence to enable
+ *RTL8192SE first then enable Pci Bridge ASPM
+ *or the system will show bluescreen.
+ */
+static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u8 pcibridge_busnum = pcipriv->ndis_adapter.pcibridge_busnum;
+ u8 pcibridge_devnum = pcipriv->ndis_adapter.pcibridge_devnum;
+ u8 pcibridge_funcnum = pcipriv->ndis_adapter.pcibridge_funcnum;
+ u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
+ u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
+ u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
+ u16 aspmlevel;
+ u8 u_pcibridge_aspmsetting;
+ u8 u_device_aspmsetting;
+
+ if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
+ ("PCI(Bridge) UNKNOWN.\n"));
+ return;
+ }
+
+ /*4 Enable Pci Bridge ASPM */
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+ pcicfg_addrport + (num4bytes << 2));
+
+ u_pcibridge_aspmsetting =
+ pcipriv->ndis_adapter.pcibridge_linkctrlreg |
+ rtlpci->const_hostpci_aspm_setting;
+
+ if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL)
+ u_pcibridge_aspmsetting &= ~BIT(0);
+
+ rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, u_pcibridge_aspmsetting);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("PlatformEnableASPM():PciBridge busnumber[%x], "
+ "DevNumbe[%x], funcnumber[%x], Write reg[%x] = %x\n",
+ pcibridge_busnum, pcibridge_devnum, pcibridge_funcnum,
+ (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
+ u_pcibridge_aspmsetting));
+
+ udelay(50);
+
+ /*Get ASPM level (with/without Clock Req) */
+ aspmlevel = rtlpci->const_devicepci_aspm_setting;
+ u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg;
+
+ /*_rtl_pci_platform_switch_device_pci_aspm(dev,*/
+ /*(priv->ndis_adapter.linkctrl_reg | ASPMLevel)); */
+
+ u_device_aspmsetting |= aspmlevel;
+
+ _rtl_pci_platform_switch_device_pci_aspm(hw, u_device_aspmsetting);
+
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
+ _rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level &
+ RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
+ }
+ udelay(200);
+}
+
+static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
+
+ bool status = false;
+ u8 offset_e0;
+ unsigned offset_e4;
+
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+ pcicfg_addrport + 0xE0);
+ rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, 0xA0);
+
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+ pcicfg_addrport + 0xE0);
+ rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &offset_e0);
+
+ if (offset_e0 == 0xA0) {
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+ pcicfg_addrport + 0xE4);
+ rtl_pci_raw_read_port_ulong(PCI_CONF_DATA, &offset_e4);
+ if (offset_e4 & BIT(23))
+ status = true;
+ }
+
+ return status;
+}
+
+static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset;
+ u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
+ u8 linkctrl_reg;
+ u8 num4bBytes;
+
+ num4bBytes = (capabilityoffset + 0x10) / 4;
+
+ /*Read Link Control Register */
+ rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
+ pcicfg_addrport + (num4bBytes << 2));
+ rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &linkctrl_reg);
+
+ pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg;
+}
+
+static void rtl_pci_parse_configuration(struct pci_dev *pdev,
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+
+ u8 tmp;
+ int pos;
+ u8 linkctrl_reg;
+
+ /*Link Control Register */
+ pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &linkctrl_reg);
+ pcipriv->ndis_adapter.linkctrl_reg = linkctrl_reg;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Link Control Register =%x\n",
+ pcipriv->ndis_adapter.linkctrl_reg));
+
+ pci_read_config_byte(pdev, 0x98, &tmp);
+ tmp |= BIT(4);
+ pci_write_config_byte(pdev, 0x98, tmp);
+
+ tmp = 0x17;
+ pci_write_config_byte(pdev, 0x70f, tmp);
+}
+
+static void _rtl_pci_initialize_adapter_common(struct ieee80211_hw *hw)
+{
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ _rtl_pci_update_default_setting(hw);
+
+ if (ppsc->reg_rfps_level & RT_RF_PS_LEVEL_ALWAYS_ASPM) {
+ /*Always enable ASPM & Clock Req. */
+ rtl_pci_enable_aspm(hw);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_PS_LEVEL_ALWAYS_ASPM);
+ }
+
+}
+
+static void rtl_pci_init_aspm(struct ieee80211_hw *hw)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ /*close ASPM for AMD defaultly */
+ rtlpci->const_amdpci_aspm = 0;
+
+ /*
+ * ASPM PS mode.
+ * 0 - Disable ASPM,
+ * 1 - Enable ASPM without Clock Req,
+ * 2 - Enable ASPM with Clock Req,
+ * 3 - Alwyas Enable ASPM with Clock Req,
+ * 4 - Always Enable ASPM without Clock Req.
+ * set defult to RTL8192CE:3 RTL8192E:2
+ * */
+ rtlpci->const_pci_aspm = 3;
+
+ /*Setting for PCI-E device */
+ rtlpci->const_devicepci_aspm_setting = 0x03;
+
+ /*Setting for PCI-E bridge */
+ rtlpci->const_hostpci_aspm_setting = 0x02;
+
+ /*
+ * In Hw/Sw Radio Off situation.
+ * 0 - Default,
+ * 1 - From ASPM setting without low Mac Pwr,
+ * 2 - From ASPM setting with low Mac Pwr,
+ * 3 - Bus D3
+ * set default to RTL8192CE:0 RTL8192SE:2
+ */
+ rtlpci->const_hwsw_rfoff_d3 = 0;
+
+ /*
+ * This setting works for those device with
+ * backdoor ASPM setting such as EPHY setting.
+ * 0 - Not support ASPM,
+ * 1 - Support ASPM,
+ * 2 - According to chipset.
+ */
+ rtlpci->const_support_pciaspm = 1;
+
+ _rtl_pci_initialize_adapter_common(hw);
+}
+
+static void _rtl_pci_io_handler_init(struct device *dev,
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->io.dev = dev;
+
+ rtlpriv->io.write8_async = pci_write8_async;
+ rtlpriv->io.write16_async = pci_write16_async;
+ rtlpriv->io.write32_async = pci_write32_async;
+
+ rtlpriv->io.read8_sync = pci_read8_sync;
+ rtlpriv->io.read16_sync = pci_read16_sync;
+ rtlpriv->io.read32_sync = pci_read32_sync;
+
+}
+
+static void _rtl_pci_io_handler_release(struct ieee80211_hw *hw)
+{
+}
+
+static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
+
+ while (skb_queue_len(&ring->queue)) {
+ struct rtl_tx_desc *entry = &ring->desc[ring->idx];
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *info;
+
+ u8 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) entry, true,
+ HW_DESC_OWN);
+
+ /*
+ *beacon packet will only use the first
+ *descriptor defautly,and the own may not
+ *be cleared by the hardware
+ */
+ if (own)
+ return;
+ ring->idx = (ring->idx + 1) % ring->entries;
+
+ skb = __skb_dequeue(&ring->queue);
+ pci_unmap_single(rtlpci->pdev,
+ rtlpriv->cfg->ops->
+ get_desc((u8 *) entry, true,
+ HW_DESC_TXBUFF_ADDR),
+ skb->len, PCI_DMA_TODEVICE);
+
+ RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
+ ("new ring->idx:%d, "
+ "free: skb_queue_len:%d, free: seq:%x\n",
+ ring->idx,
+ skb_queue_len(&ring->queue),
+ *(u16 *) (skb->data + 22)));
+
+ info = IEEE80211_SKB_CB(skb);
+ ieee80211_tx_info_clear_status(info);
+
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ /*info->status.rates[0].count = 1; */
+
+ ieee80211_tx_status_irqsafe(hw, skb);
+
+ if ((ring->entries - skb_queue_len(&ring->queue))
+ == 2) {
+
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
+ ("more desc left, wake"
+ "skb_queue@%d,ring->idx = %d,"
+ "skb_queue_len = 0x%d\n",
+ prio, ring->idx,
+ skb_queue_len(&ring->queue)));
+
+ ieee80211_wake_queue(hw,
+ skb_get_queue_mapping
+ (skb));
+ }
+
+ skb = NULL;
+ }
+
+ if (((rtlpriv->link_info.num_rx_inperiod +
+ rtlpriv->link_info.num_tx_inperiod) > 8) ||
+ (rtlpriv->link_info.num_rx_inperiod > 2)) {
+ rtl_lps_leave(hw);
+ }
+}
+
+static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ int rx_queue_idx = RTL_PCI_RX_MPDU_QUEUE;
+
+ struct ieee80211_rx_status rx_status = { 0 };
+ unsigned int count = rtlpci->rxringcount;
+ u8 own;
+ u8 tmp_one;
+ u32 bufferaddress;
+ bool unicast = false;
+
+ struct rtl_stats stats = {
+ .signal = 0,
+ .noise = -98,
+ .rate = 0,
+ };
+
+ /*RX NORMAL PKT */
+ while (count--) {
+ /*rx descriptor */
+ struct rtl_rx_desc *pdesc = &rtlpci->rx_ring[rx_queue_idx].desc[
+ rtlpci->rx_ring[rx_queue_idx].idx];
+ /*rx pkt */
+ struct sk_buff *skb = rtlpci->rx_ring[rx_queue_idx].rx_buf[
+ rtlpci->rx_ring[rx_queue_idx].idx];
+
+ own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
+ false, HW_DESC_OWN);
+
+ if (own) {
+ /*wait data to be filled by hardware */
+ return;
+ } else {
+ struct ieee80211_hdr *hdr;
+ __le16 fc;
+ struct sk_buff *new_skb = NULL;
+
+ rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
+ &rx_status,
+ (u8 *) pdesc, skb);
+
+ pci_unmap_single(rtlpci->pdev,
+ *((dma_addr_t *) skb->cb),
+ rtlpci->rxbuffersize,
+ PCI_DMA_FROMDEVICE);
+
+ skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
+ false,
+ HW_DESC_RXPKT_LEN));
+ skb_reserve(skb,
+ stats.rx_drvinfo_size + stats.rx_bufshift);
+
+ /*
+ *NOTICE This can not be use for mac80211,
+ *this is done in mac80211 code,
+ *if you done here sec DHCP will fail
+ *skb_trim(skb, skb->len - 4);
+ */
+
+ hdr = (struct ieee80211_hdr *)(skb->data);
+ fc = hdr->frame_control;
+
+ if (!stats.crc) {
+ memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
+ sizeof(rx_status));
+
+ if (is_broadcast_ether_addr(hdr->addr1))
+ ;/*TODO*/
+ else {
+ if (is_multicast_ether_addr(hdr->addr1))
+ ;/*TODO*/
+ else {
+ unicast = true;
+ rtlpriv->stats.rxbytesunicast +=
+ skb->len;
+ }
+ }
+
+ rtl_is_special_data(hw, skb, false);
+
+ if (ieee80211_is_data(fc)) {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_RX);
+
+ if (unicast)
+ rtlpriv->link_info.
+ num_rx_inperiod++;
+ }
+
+ if (unlikely(!rtl_action_proc(hw, skb,
+ false))) {
+ dev_kfree_skb_any(skb);
+ } else {
+ struct sk_buff *uskb = NULL;
+ u8 *pdata;
+ uskb = dev_alloc_skb(skb->len + 128);
+ if (!uskb) {
+ RT_TRACE(rtlpriv,
+ (COMP_INTR | COMP_RECV),
+ DBG_EMERG,
+ ("can't alloc rx skb\n"));
+ goto done;
+ }
+ memcpy(IEEE80211_SKB_RXCB(uskb),
+ &rx_status,
+ sizeof(rx_status));
+ pdata = (u8 *)skb_put(uskb, skb->len);
+ memcpy(pdata, skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+
+ ieee80211_rx_irqsafe(hw, uskb);
+ }
+ } else {
+ dev_kfree_skb_any(skb);
+ }
+
+ if (((rtlpriv->link_info.num_rx_inperiod +
+ rtlpriv->link_info.num_tx_inperiod) > 8) ||
+ (rtlpriv->link_info.num_rx_inperiod > 2)) {
+ rtl_lps_leave(hw);
+ }
+
+ new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
+ if (unlikely(!new_skb)) {
+ RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
+ DBG_EMERG,
+ ("can't alloc skb for rx\n"));
+ goto done;
+ }
+ skb = new_skb;
+ /*skb->dev = dev; */
+
+ rtlpci->rx_ring[rx_queue_idx].rx_buf[rtlpci->
+ rx_ring
+ [rx_queue_idx].
+ idx] = skb;
+ *((dma_addr_t *) skb->cb) =
+ pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
+ rtlpci->rxbuffersize,
+ PCI_DMA_FROMDEVICE);
+
+ }
+done:
+ bufferaddress = (u32)(*((dma_addr_t *) skb->cb));
+ tmp_one = 1;
+ rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false,
+ HW_DESC_RXBUFF_ADDR,
+ (u8 *)&bufferaddress);
+ rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN,
+ (u8 *)&tmp_one);
+ rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
+ HW_DESC_RXPKT_LEN,
+ (u8 *)&rtlpci->rxbuffersize);
+
+ if (rtlpci->rx_ring[rx_queue_idx].idx ==
+ rtlpci->rxringcount - 1)
+ rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
+ HW_DESC_RXERO,
+ (u8 *)&tmp_one);
+
+ rtlpci->rx_ring[rx_queue_idx].idx =
+ (rtlpci->rx_ring[rx_queue_idx].idx + 1) %
+ rtlpci->rxringcount;
+ }
+
+}
+
+static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
+{
+ struct ieee80211_hw *hw = dev_id;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ unsigned long flags;
+ u32 inta = 0;
+ u32 intb = 0;
+
+ if (rtlpci->irq_enabled == 0)
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+
+ /*read ISR: 4/8bytes */
+ rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
+
+ /*Shared IRQ or HW disappared */
+ if (!inta || inta == 0xffff)
+ goto done;
+
+ /*<1> beacon related */
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
+ ("beacon ok interrupt!\n"));
+ }
+
+ if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER])) {
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
+ ("beacon err interrupt!\n"));
+ }
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK]) {
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
+ ("beacon interrupt!\n"));
+ }
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_BcnInt]) {
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
+ ("prepare beacon for interrupt!\n"));
+ tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
+ }
+
+ /*<3> Tx related */
+ if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("IMR_TXFOVW!\n"));
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
+ ("Manage ok interrupt!\n"));
+ _rtl_pci_tx_isr(hw, MGNT_QUEUE);
+ }
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
+ ("HIGH_QUEUE ok interrupt!\n"));
+ _rtl_pci_tx_isr(hw, HIGH_QUEUE);
+ }
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
+ rtlpriv->link_info.num_tx_inperiod++;
+
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
+ ("BK Tx OK interrupt!\n"));
+ _rtl_pci_tx_isr(hw, BK_QUEUE);
+ }
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
+ rtlpriv->link_info.num_tx_inperiod++;
+
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
+ ("BE TX OK interrupt!\n"));
+ _rtl_pci_tx_isr(hw, BE_QUEUE);
+ }
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
+ rtlpriv->link_info.num_tx_inperiod++;
+
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
+ ("VI TX OK interrupt!\n"));
+ _rtl_pci_tx_isr(hw, VI_QUEUE);
+ }
+
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
+ rtlpriv->link_info.num_tx_inperiod++;
+
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
+ ("Vo TX OK interrupt!\n"));
+ _rtl_pci_tx_isr(hw, VO_QUEUE);
+ }
+
+ /*<2> Rx related */
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, ("Rx ok interrupt!\n"));
+ tasklet_schedule(&rtlpriv->works.irq_tasklet);
+ }
+
+ if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("rx descriptor unavailable!\n"));
+ tasklet_schedule(&rtlpriv->works.irq_tasklet);
+ }
+
+ if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("rx overflow !\n"));
+ tasklet_schedule(&rtlpriv->works.irq_tasklet);
+ }
+
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+ return IRQ_HANDLED;
+
+done:
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
+{
+ _rtl_pci_rx_interrupt(hw);
+}
+
+static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
+ struct ieee80211_hdr *hdr = NULL;
+ struct ieee80211_tx_info *info = NULL;
+ struct sk_buff *pskb = NULL;
+ struct rtl_tx_desc *pdesc = NULL;
+ unsigned int queue_index;
+ u8 temp_one = 1;
+
+ ring = &rtlpci->tx_ring[BEACON_QUEUE];
+ pskb = __skb_dequeue(&ring->queue);
+ if (pskb)
+ kfree_skb(pskb);
+
+ /*NB: the beacon data buffer must be 32-bit aligned. */
+ pskb = ieee80211_beacon_get(hw, mac->vif);
+ if (pskb == NULL)
+ return;
+ hdr = (struct ieee80211_hdr *)(pskb->data);
+ info = IEEE80211_SKB_CB(pskb);
+
+ queue_index = BEACON_QUEUE;
+
+ pdesc = &ring->desc[0];
+ rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
+ info, pskb, queue_index);
+
+ __skb_queue_tail(&ring->queue, pskb);
+
+ rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true, HW_DESC_OWN,
+ (u8 *)&temp_one);
+
+ return;
+}
+
+static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u8 i;
+
+ for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
+ rtlpci->txringcount[i] = RT_TXDESC_NUM;
+
+ /*
+ *we just alloc 2 desc for beacon queue,
+ *because we just need first desc in hw beacon.
+ */
+ rtlpci->txringcount[BEACON_QUEUE] = 2;
+
+ /*
+ *BE queue need more descriptor for performance
+ *consideration or, No more tx desc will happen,
+ *and may cause mac80211 mem leakage.
+ */
+ rtlpci->txringcount[BE_QUEUE] = RT_TXDESC_NUM_BE_QUEUE;
+
+ rtlpci->rxbuffersize = 9100; /*2048/1024; */
+ rtlpci->rxringcount = RTL_PCI_MAX_RX_COUNT; /*64; */
+}
+
+static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
+ struct pci_dev *pdev)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ rtlpci->up_first_time = true;
+ rtlpci->being_init_adapter = false;
+
+ rtlhal->hw = hw;
+ rtlpci->pdev = pdev;
+
+ ppsc->inactiveps = false;
+ ppsc->leisure_ps = true;
+ ppsc->fwctrl_lps = true;
+ ppsc->reg_fwctrl_lps = 3;
+ ppsc->reg_max_lps_awakeintvl = 5;
+
+ if (ppsc->reg_fwctrl_lps == 1)
+ ppsc->fwctrl_psmode = FW_PS_MIN_MODE;
+ else if (ppsc->reg_fwctrl_lps == 2)
+ ppsc->fwctrl_psmode = FW_PS_MAX_MODE;
+ else if (ppsc->reg_fwctrl_lps == 3)
+ ppsc->fwctrl_psmode = FW_PS_DTIM_MODE;
+
+ /*Tx/Rx related var */
+ _rtl_pci_init_trx_var(hw);
+
+ /*IBSS*/ mac->beacon_interval = 100;
+
+ /*AMPDU*/ mac->min_space_cfg = 0;
+ mac->max_mss_density = 0;
+ /*set sane AMPDU defaults */
+ mac->current_ampdu_density = 7;
+ mac->current_ampdu_factor = 3;
+
+ /*QOS*/ rtlpci->acm_method = eAcmWay2_SW;
+
+ /*task */
+ tasklet_init(&rtlpriv->works.irq_tasklet,
+ (void (*)(unsigned long))_rtl_pci_irq_tasklet,
+ (unsigned long)hw);
+ tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
+ (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
+ (unsigned long)hw);
+}
+
+static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
+ unsigned int prio, unsigned int entries)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_tx_desc *ring;
+ dma_addr_t dma;
+ u32 nextdescaddress;
+ int i;
+
+ ring = pci_alloc_consistent(rtlpci->pdev,
+ sizeof(*ring) * entries, &dma);
+
+ if (!ring || (unsigned long)ring & 0xFF) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Cannot allocate TX ring (prio = %d)\n", prio));
+ return -ENOMEM;
+ }
+
+ memset(ring, 0, sizeof(*ring) * entries);
+ rtlpci->tx_ring[prio].desc = ring;
+ rtlpci->tx_ring[prio].dma = dma;
+ rtlpci->tx_ring[prio].idx = 0;
+ rtlpci->tx_ring[prio].entries = entries;
+ skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("queue:%d, ring_addr:%p\n", prio, ring));
+
+ for (i = 0; i < entries; i++) {
+ nextdescaddress = (u32) dma + ((i + 1) % entries) *
+ sizeof(*ring);
+
+ rtlpriv->cfg->ops->set_desc((u8 *)&(ring[i]),
+ true, HW_DESC_TX_NEXTDESC_ADDR,
+ (u8 *)&nextdescaddress);
+ }
+
+ return 0;
+}
+
+static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_rx_desc *entry = NULL;
+ int i, rx_queue_idx;
+ u8 tmp_one = 1;
+
+ /*
+ *rx_queue_idx 0:RX_MPDU_QUEUE
+ *rx_queue_idx 1:RX_CMD_QUEUE
+ */
+ for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
+ rx_queue_idx++) {
+ rtlpci->rx_ring[rx_queue_idx].desc =
+ pci_alloc_consistent(rtlpci->pdev,
+ sizeof(*rtlpci->rx_ring[rx_queue_idx].
+ desc) * rtlpci->rxringcount,
+ &rtlpci->rx_ring[rx_queue_idx].dma);
+
+ if (!rtlpci->rx_ring[rx_queue_idx].desc ||
+ (unsigned long)rtlpci->rx_ring[rx_queue_idx].desc & 0xFF) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Cannot allocate RX ring\n"));
+ return -ENOMEM;
+ }
+
+ memset(rtlpci->rx_ring[rx_queue_idx].desc, 0,
+ sizeof(*rtlpci->rx_ring[rx_queue_idx].desc) *
+ rtlpci->rxringcount);
+
+ rtlpci->rx_ring[rx_queue_idx].idx = 0;
+
+ for (i = 0; i < rtlpci->rxringcount; i++) {
+ struct sk_buff *skb =
+ dev_alloc_skb(rtlpci->rxbuffersize);
+ u32 bufferaddress;
+ if (!skb)
+ return 0;
+ entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
+
+ /*skb->dev = dev; */
+
+ rtlpci->rx_ring[rx_queue_idx].rx_buf[i] = skb;
+
+ /*
+ *just set skb->cb to mapping addr
+ *for pci_unmap_single use
+ */
+ *((dma_addr_t *) skb->cb) =
+ pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
+ rtlpci->rxbuffersize,
+ PCI_DMA_FROMDEVICE);
+
+ bufferaddress = (u32)(*((dma_addr_t *)skb->cb));
+ rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
+ HW_DESC_RXBUFF_ADDR,
+ (u8 *)&bufferaddress);
+ rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
+ HW_DESC_RXPKT_LEN,
+ (u8 *)&rtlpci->
+ rxbuffersize);
+ rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
+ HW_DESC_RXOWN,
+ (u8 *)&tmp_one);
+ }
+
+ rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
+ HW_DESC_RXERO, (u8 *)&tmp_one);
+ }
+ return 0;
+}
+
+static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
+ unsigned int prio)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
+
+ while (skb_queue_len(&ring->queue)) {
+ struct rtl_tx_desc *entry = &ring->desc[ring->idx];
+ struct sk_buff *skb = __skb_dequeue(&ring->queue);
+
+ pci_unmap_single(rtlpci->pdev,
+ rtlpriv->cfg->
+ ops->get_desc((u8 *) entry, true,
+ HW_DESC_TXBUFF_ADDR),
+ skb->len, PCI_DMA_TODEVICE);
+ kfree_skb(skb);
+ ring->idx = (ring->idx + 1) % ring->entries;
+ }
+
+ pci_free_consistent(rtlpci->pdev,
+ sizeof(*ring->desc) * ring->entries,
+ ring->desc, ring->dma);
+ ring->desc = NULL;
+}
+
+static void _rtl_pci_free_rx_ring(struct rtl_pci *rtlpci)
+{
+ int i, rx_queue_idx;
+
+ /*rx_queue_idx 0:RX_MPDU_QUEUE */
+ /*rx_queue_idx 1:RX_CMD_QUEUE */
+ for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
+ rx_queue_idx++) {
+ for (i = 0; i < rtlpci->rxringcount; i++) {
+ struct sk_buff *skb =
+ rtlpci->rx_ring[rx_queue_idx].rx_buf[i];
+ if (!skb)
+ continue;
+
+ pci_unmap_single(rtlpci->pdev,
+ *((dma_addr_t *) skb->cb),
+ rtlpci->rxbuffersize,
+ PCI_DMA_FROMDEVICE);
+ kfree_skb(skb);
+ }
+
+ pci_free_consistent(rtlpci->pdev,
+ sizeof(*rtlpci->rx_ring[rx_queue_idx].
+ desc) * rtlpci->rxringcount,
+ rtlpci->rx_ring[rx_queue_idx].desc,
+ rtlpci->rx_ring[rx_queue_idx].dma);
+ rtlpci->rx_ring[rx_queue_idx].desc = NULL;
+ }
+}
+
+static int _rtl_pci_init_trx_ring(struct ieee80211_hw *hw)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ int ret;
+ int i;
+
+ ret = _rtl_pci_init_rx_ring(hw);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
+ ret = _rtl_pci_init_tx_ring(hw, i,
+ rtlpci->txringcount[i]);
+ if (ret)
+ goto err_free_rings;
+ }
+
+ return 0;
+
+err_free_rings:
+ _rtl_pci_free_rx_ring(rtlpci);
+
+ for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
+ if (rtlpci->tx_ring[i].desc)
+ _rtl_pci_free_tx_ring(hw, i);
+
+ return 1;
+}
+
+static int _rtl_pci_deinit_trx_ring(struct ieee80211_hw *hw)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u32 i;
+
+ /*free rx rings */
+ _rtl_pci_free_rx_ring(rtlpci);
+
+ /*free tx rings */
+ for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
+ _rtl_pci_free_tx_ring(hw, i);
+
+ return 0;
+}
+
+int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ int i, rx_queue_idx;
+ unsigned long flags;
+ u8 tmp_one = 1;
+
+ /*rx_queue_idx 0:RX_MPDU_QUEUE */
+ /*rx_queue_idx 1:RX_CMD_QUEUE */
+ for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
+ rx_queue_idx++) {
+ /*
+ *force the rx_ring[RX_MPDU_QUEUE/
+ *RX_CMD_QUEUE].idx to the first one
+ */
+ if (rtlpci->rx_ring[rx_queue_idx].desc) {
+ struct rtl_rx_desc *entry = NULL;
+
+ for (i = 0; i < rtlpci->rxringcount; i++) {
+ entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
+ rtlpriv->cfg->ops->set_desc((u8 *) entry,
+ false,
+ HW_DESC_RXOWN,
+ (u8 *)&tmp_one);
+ }
+ rtlpci->rx_ring[rx_queue_idx].idx = 0;
+ }
+ }
+
+ /*
+ *after reset, release previous pending packet,
+ *and force the tx idx to the first one
+ */
+ spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+ for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
+ if (rtlpci->tx_ring[i].desc) {
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i];
+
+ while (skb_queue_len(&ring->queue)) {
+ struct rtl_tx_desc *entry =
+ &ring->desc[ring->idx];
+ struct sk_buff *skb =
+ __skb_dequeue(&ring->queue);
+
+ pci_unmap_single(rtlpci->pdev,
+ rtlpriv->cfg->ops->
+ get_desc((u8 *)
+ entry,
+ true,
+ HW_DESC_TXBUFF_ADDR),
+ skb->len, PCI_DMA_TODEVICE);
+ kfree_skb(skb);
+ ring->idx = (ring->idx + 1) % ring->entries;
+ }
+ ring->idx = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+ return 0;
+}
+
+static unsigned int _rtl_mac_to_hwqueue(__le16 fc,
+ unsigned int mac80211_queue_index)
+{
+ unsigned int hw_queue_index;
+
+ if (unlikely(ieee80211_is_beacon(fc))) {
+ hw_queue_index = BEACON_QUEUE;
+ goto out;
+ }
+
+ if (ieee80211_is_mgmt(fc)) {
+ hw_queue_index = MGNT_QUEUE;
+ goto out;
+ }
+
+ switch (mac80211_queue_index) {
+ case 0:
+ hw_queue_index = VO_QUEUE;
+ break;
+ case 1:
+ hw_queue_index = VI_QUEUE;
+ break;
+ case 2:
+ hw_queue_index = BE_QUEUE;;
+ break;
+ case 3:
+ hw_queue_index = BK_QUEUE;
+ break;
+ default:
+ hw_queue_index = BE_QUEUE;
+ RT_ASSERT(false, ("QSLT_BE queue, skb_queue:%d\n",
+ mac80211_queue_index));
+ break;
+ }
+
+out:
+ return hw_queue_index;
+}
+
+static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct rtl8192_tx_ring *ring;
+ struct rtl_tx_desc *pdesc;
+ u8 idx;
+ unsigned int queue_index, hw_queue;
+ unsigned long flags;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
+ __le16 fc = hdr->frame_control;
+ u8 *pda_addr = hdr->addr1;
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ /*ssn */
+ u8 *qc = NULL;
+ u8 tid = 0;
+ u16 seq_number = 0;
+ u8 own;
+ u8 temp_one = 1;
+
+ if (ieee80211_is_mgmt(fc))
+ rtl_tx_mgmt_proc(hw, skb);
+ rtl_action_proc(hw, skb, true);
+
+ queue_index = skb_get_queue_mapping(skb);
+ hw_queue = _rtl_mac_to_hwqueue(fc, queue_index);
+
+ if (is_multicast_ether_addr(pda_addr))
+ rtlpriv->stats.txbytesmulticast += skb->len;
+ else if (is_broadcast_ether_addr(pda_addr))
+ rtlpriv->stats.txbytesbroadcast += skb->len;
+ else
+ rtlpriv->stats.txbytesunicast += skb->len;
+
+ spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+
+ ring = &rtlpci->tx_ring[hw_queue];
+ if (hw_queue != BEACON_QUEUE)
+ idx = (ring->idx + skb_queue_len(&ring->queue)) %
+ ring->entries;
+ else
+ idx = 0;
+
+ pdesc = &ring->desc[idx];
+ own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
+ true, HW_DESC_OWN);
+
+ if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("No more TX desc@%d, ring->idx = %d,"
+ "idx = %d, skb_queue_len = 0x%d\n",
+ hw_queue, ring->idx, idx,
+ skb_queue_len(&ring->queue)));
+
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+ return skb->len;
+ }
+
+ /*
+ *if(ieee80211_is_nullfunc(fc)) {
+ * spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+ * return 1;
+ *}
+ */
+
+ if (ieee80211_is_data_qos(fc)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+
+ seq_number = mac->tids[tid].seq_number;
+ seq_number &= IEEE80211_SCTL_SEQ;
+ /*
+ *hdr->seq_ctrl = hdr->seq_ctrl &
+ *cpu_to_le16(IEEE80211_SCTL_FRAG);
+ *hdr->seq_ctrl |= cpu_to_le16(seq_number);
+ */
+
+ seq_number += 1;
+ }
+
+ if (ieee80211_is_data(fc))
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
+
+ rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
+ info, skb, hw_queue);
+
+ __skb_queue_tail(&ring->queue, skb);
+
+ rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true,
+ HW_DESC_OWN, (u8 *)&temp_one);
+
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ if (qc)
+ mac->tids[tid].seq_number = seq_number;
+ }
+
+ if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
+ hw_queue != BEACON_QUEUE) {
+
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
+ ("less desc left, stop skb_queue@%d, "
+ "ring->idx = %d,"
+ "idx = %d, skb_queue_len = 0x%d\n",
+ hw_queue, ring->idx, idx,
+ skb_queue_len(&ring->queue)));
+
+ ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
+ }
+
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+ rtlpriv->cfg->ops->tx_polling(hw, hw_queue);
+
+ return 0;
+}
+
+static void rtl_pci_deinit(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ _rtl_pci_deinit_trx_ring(hw);
+
+ synchronize_irq(rtlpci->pdev->irq);
+ tasklet_kill(&rtlpriv->works.irq_tasklet);
+
+ flush_workqueue(rtlpriv->works.rtl_wq);
+ destroy_workqueue(rtlpriv->works.rtl_wq);
+
+}
+
+static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int err;
+
+ _rtl_pci_init_struct(hw, pdev);
+
+ err = _rtl_pci_init_trx_ring(hw);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("tx ring initialization failed"));
+ return err;
+ }
+
+ return 1;
+}
+
+static int rtl_pci_start(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ int err;
+
+ rtl_pci_reset_trx_ring(hw);
+
+ rtlpci->driver_is_goingto_unload = false;
+ err = rtlpriv->cfg->ops->hw_init(hw);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Failed to config hardware!\n"));
+ return err;
+ }
+
+ rtlpriv->cfg->ops->enable_interrupt(hw);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("enable_interrupt OK\n"));
+
+ rtl_init_rx_config(hw);
+
+ /*should after adapter start and interrupt enable. */
+ set_hal_start(rtlhal);
+
+ RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+
+ rtlpci->up_first_time = false;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("OK\n"));
+ return 0;
+}
+
+static void rtl_pci_stop(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ unsigned long flags;
+ u8 RFInProgressTimeOut = 0;
+
+ /*
+ *should before disable interrrupt&adapter
+ *and will do it immediately.
+ */
+ set_hal_stop(rtlhal);
+
+ rtlpriv->cfg->ops->disable_interrupt(hw);
+
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
+ while (ppsc->rfchange_inprogress) {
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
+ if (RFInProgressTimeOut > 100) {
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
+ break;
+ }
+ mdelay(1);
+ RFInProgressTimeOut++;
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
+ }
+ ppsc->rfchange_inprogress = true;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
+
+ rtlpci->driver_is_goingto_unload = true;
+ rtlpriv->cfg->ops->hw_disable(hw);
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
+
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
+
+ rtl_pci_enable_aspm(hw);
+}
+
+static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct pci_dev *bridge_pdev = pdev->bus->self;
+ u16 venderid;
+ u16 deviceid;
+ u16 irqline;
+ u8 tmp;
+
+ venderid = pdev->vendor;
+ deviceid = pdev->device;
+ pci_read_config_word(pdev, 0x3C, &irqline);
+
+ if (deviceid == RTL_PCI_8192_DID ||
+ deviceid == RTL_PCI_0044_DID ||
+ deviceid == RTL_PCI_0047_DID ||
+ deviceid == RTL_PCI_8192SE_DID ||
+ deviceid == RTL_PCI_8174_DID ||
+ deviceid == RTL_PCI_8173_DID ||
+ deviceid == RTL_PCI_8172_DID ||
+ deviceid == RTL_PCI_8171_DID) {
+ switch (pdev->revision) {
+ case RTL_PCI_REVISION_ID_8192PCIE:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("8192 PCI-E is found - "
+ "vid/did=%x/%x\n", venderid, deviceid));
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8192E;
+ break;
+ case RTL_PCI_REVISION_ID_8192SE:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("8192SE is found - "
+ "vid/did=%x/%x\n", venderid, deviceid));
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("Err: Unknown device - "
+ "vid/did=%x/%x\n", venderid, deviceid));
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
+ break;
+
+ }
+ } else if (deviceid == RTL_PCI_8192CET_DID ||
+ deviceid == RTL_PCI_8192CE_DID ||
+ deviceid == RTL_PCI_8191CE_DID ||
+ deviceid == RTL_PCI_8188CE_DID) {
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8192CE;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("8192C PCI-E is found - "
+ "vid/did=%x/%x\n", venderid, deviceid));
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("Err: Unknown device -"
+ " vid/did=%x/%x\n", venderid, deviceid));
+
+ rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE;
+ }
+
+ /*find bus info */
+ pcipriv->ndis_adapter.busnumber = pdev->bus->number;
+ pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn);
+ pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn);
+
+ /*find bridge info */
+ pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
+ for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
+ if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
+ pcipriv->ndis_adapter.pcibridge_vendor = tmp;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Pci Bridge Vendor is found index: %d\n",
+ tmp));
+ break;
+ }
+ }
+
+ if (pcipriv->ndis_adapter.pcibridge_vendor !=
+ PCI_BRIDGE_VENDOR_UNKNOWN) {
+ pcipriv->ndis_adapter.pcibridge_busnum =
+ bridge_pdev->bus->number;
+ pcipriv->ndis_adapter.pcibridge_devnum =
+ PCI_SLOT(bridge_pdev->devfn);
+ pcipriv->ndis_adapter.pcibridge_funcnum =
+ PCI_FUNC(bridge_pdev->devfn);
+ pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
+ pci_pcie_cap(bridge_pdev);
+ pcipriv->ndis_adapter.pcicfg_addrport =
+ (pcipriv->ndis_adapter.pcibridge_busnum << 16) |
+ (pcipriv->ndis_adapter.pcibridge_devnum << 11) |
+ (pcipriv->ndis_adapter.pcibridge_funcnum << 8) | (1 << 31);
+ pcipriv->ndis_adapter.num4bytes =
+ (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4;
+
+ rtl_pci_get_linkcontrol_field(hw);
+
+ if (pcipriv->ndis_adapter.pcibridge_vendor ==
+ PCI_BRIDGE_VENDOR_AMD) {
+ pcipriv->ndis_adapter.amd_l1_patch =
+ rtl_pci_get_amd_l1_patch(hw);
+ }
+ }
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("pcidev busnumber:devnumber:funcnumber:"
+ "vendor:link_ctl %d:%d:%d:%x:%x\n",
+ pcipriv->ndis_adapter.busnumber,
+ pcipriv->ndis_adapter.devnumber,
+ pcipriv->ndis_adapter.funcnumber,
+ pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg));
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("pci_bridge busnumber:devnumber:funcnumber:vendor:"
+ "pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
+ pcipriv->ndis_adapter.pcibridge_busnum,
+ pcipriv->ndis_adapter.pcibridge_devnum,
+ pcipriv->ndis_adapter.pcibridge_funcnum,
+ pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
+ pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
+ pcipriv->ndis_adapter.pcibridge_linkctrlreg,
+ pcipriv->ndis_adapter.amd_l1_patch));
+
+ rtl_pci_parse_configuration(pdev, hw);
+
+ return true;
+}
+
+int __devinit rtl_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct ieee80211_hw *hw = NULL;
+
+ struct rtl_priv *rtlpriv = NULL;
+ struct rtl_pci_priv *pcipriv = NULL;
+ struct rtl_pci *rtlpci;
+ unsigned long pmem_start, pmem_len, pmem_flags;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ RT_ASSERT(false,
+ ("%s : Cannot enable new PCI device\n",
+ pci_name(pdev)));
+ return err;
+ }
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ RT_ASSERT(false, ("Unable to obtain 32bit DMA "
+ "for consistent allocations\n"));
+ pci_disable_device(pdev);
+ return -ENOMEM;
+ }
+ }
+
+ pci_set_master(pdev);
+
+ hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) +
+ sizeof(struct rtl_priv), &rtl_ops);
+ if (!hw) {
+ RT_ASSERT(false,
+ ("%s : ieee80211 alloc failed\n", pci_name(pdev)));
+ err = -ENOMEM;
+ goto fail1;
+ }
+
+ SET_IEEE80211_DEV(hw, &pdev->dev);
+ pci_set_drvdata(pdev, hw);
+
+ rtlpriv = hw->priv;
+ pcipriv = (void *)rtlpriv->priv;
+ pcipriv->dev.pdev = pdev;
+
+ /*
+ *init dbgp flags before all
+ *other functions, because we will
+ *use it in other funtions like
+ *RT_TRACE/RT_PRINT/RTL_PRINT_DATA
+ *you can not use these macro
+ *before this
+ */
+ rtl_dbgp_flag_init(hw);
+
+ /* MEM map */
+ err = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (err) {
+ RT_ASSERT(false, ("Can't obtain PCI resources\n"));
+ return err;
+ }
+
+ pmem_start = pci_resource_start(pdev, 2);
+ pmem_len = pci_resource_len(pdev, 2);
+ pmem_flags = pci_resource_flags(pdev, 2);
+
+ /*shared mem start */
+ rtlpriv->io.pci_mem_start =
+ (unsigned long)pci_iomap(pdev, 2, pmem_len);
+ if (rtlpriv->io.pci_mem_start == 0) {
+ RT_ASSERT(false, ("Can't map PCI mem\n"));
+ goto fail2;
+ }
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("mem mapped space: start: 0x%08lx len:%08lx "
+ "flags:%08lx, after map:0x%08lx\n",
+ pmem_start, pmem_len, pmem_flags,
+ rtlpriv->io.pci_mem_start));
+
+ /* Disable Clk Request */
+ pci_write_config_byte(pdev, 0x81, 0);
+ /* leave D3 mode */
+ pci_write_config_byte(pdev, 0x44, 0);
+ pci_write_config_byte(pdev, 0x04, 0x06);
+ pci_write_config_byte(pdev, 0x04, 0x07);
+
+ /* init cfg & intf_ops */
+ rtlpriv->rtlhal.interface = INTF_PCI;
+ rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
+ rtlpriv->intf_ops = &rtl_pci_ops;
+
+ /* find adapter */
+ _rtl_pci_find_adapter(pdev, hw);
+
+ /* Init IO handler */
+ _rtl_pci_io_handler_init(&pdev->dev, hw);
+
+ /*like read eeprom and so on */
+ rtlpriv->cfg->ops->read_eeprom_info(hw);
+
+ if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Can't init_sw_vars.\n"));
+ goto fail3;
+ }
+
+ rtlpriv->cfg->ops->init_sw_leds(hw);
+
+ /*aspm */
+ rtl_pci_init_aspm(hw);
+
+ /* Init mac80211 sw */
+ err = rtl_init_core(hw);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Can't allocate sw for mac80211.\n"));
+ goto fail3;
+ }
+
+ /* Init PCI sw */
+ err = !rtl_pci_init(hw, pdev);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Failed to init PCI.\n"));
+ goto fail3;
+ }
+
+ err = ieee80211_register_hw(hw);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Can't register mac80211 hw.\n"));
+ goto fail3;
+ } else {
+ rtlpriv->mac80211.mac80211_registered = 1;
+ }
+
+ err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("failed to create sysfs device attributes\n"));
+ goto fail3;
+ }
+
+ /*init rfkill */
+ rtl_init_rfkill(hw);
+
+ rtlpci = rtl_pcidev(pcipriv);
+ err = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
+ IRQF_SHARED, KBUILD_MODNAME, hw);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("%s: failed to register IRQ handler\n",
+ wiphy_name(hw->wiphy)));
+ goto fail3;
+ } else {
+ rtlpci->irq_alloc = 1;
+ }
+
+ set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
+ return 0;
+
+fail3:
+ pci_set_drvdata(pdev, NULL);
+ rtl_deinit_core(hw);
+ _rtl_pci_io_handler_release(hw);
+ ieee80211_free_hw(hw);
+
+ if (rtlpriv->io.pci_mem_start != 0)
+ pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
+
+fail2:
+ pci_release_regions(pdev);
+
+fail1:
+
+ pci_disable_device(pdev);
+
+ return -ENODEV;
+
+}
+EXPORT_SYMBOL(rtl_pci_probe);
+
+void rtl_pci_disconnect(struct pci_dev *pdev)
+{
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+ struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
+
+ clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
+
+ sysfs_remove_group(&pdev->dev.kobj, &rtl_attribute_group);
+
+ /*ieee80211_unregister_hw will call ops_stop */
+ if (rtlmac->mac80211_registered == 1) {
+ ieee80211_unregister_hw(hw);
+ rtlmac->mac80211_registered = 0;
+ } else {
+ rtl_deinit_deferred_work(hw);
+ rtlpriv->intf_ops->adapter_stop(hw);
+ }
+
+ /*deinit rfkill */
+ rtl_deinit_rfkill(hw);
+
+ rtl_pci_deinit(hw);
+ rtl_deinit_core(hw);
+ rtlpriv->cfg->ops->deinit_sw_leds(hw);
+ _rtl_pci_io_handler_release(hw);
+ rtlpriv->cfg->ops->deinit_sw_vars(hw);
+
+ if (rtlpci->irq_alloc) {
+ free_irq(rtlpci->pdev->irq, hw);
+ rtlpci->irq_alloc = 0;
+ }
+
+ if (rtlpriv->io.pci_mem_start != 0) {
+ pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
+ pci_release_regions(pdev);
+ }
+
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ ieee80211_free_hw(hw);
+}
+EXPORT_SYMBOL(rtl_pci_disconnect);
+
+/***************************************
+kernel pci power state define:
+PCI_D0 ((pci_power_t __force) 0)
+PCI_D1 ((pci_power_t __force) 1)
+PCI_D2 ((pci_power_t __force) 2)
+PCI_D3hot ((pci_power_t __force) 3)
+PCI_D3cold ((pci_power_t __force) 4)
+PCI_UNKNOWN ((pci_power_t __force) 5)
+
+This function is called when system
+goes into suspend state mac80211 will
+call rtl_mac_stop() from the mac80211
+suspend function first, So there is
+no need to call hw_disable here.
+****************************************/
+int rtl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtl_pci_suspend);
+
+int rtl_pci_resume(struct pci_dev *pdev)
+{
+ int ret;
+
+ pci_set_power_state(pdev, PCI_D0);
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ RT_ASSERT(false, ("ERR: <======\n"));
+ return ret;
+ }
+
+ pci_restore_state(pdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtl_pci_resume);
+
+struct rtl_intf_ops rtl_pci_ops = {
+ .adapter_start = rtl_pci_start,
+ .adapter_stop = rtl_pci_stop,
+ .adapter_tx = rtl_pci_tx,
+ .reset_trx_ring = rtl_pci_reset_trx_ring,
+
+ .disable_aspm = rtl_pci_disable_aspm,
+ .enable_aspm = rtl_pci_enable_aspm,
+};
diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h
new file mode 100644
index 000000000000..0caa81429726
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/pci.h
@@ -0,0 +1,302 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_PCI_H__
+#define __RTL_PCI_H__
+
+#include <linux/pci.h>
+/*
+1: MSDU packet queue,
+2: Rx Command Queue
+*/
+#define RTL_PCI_RX_MPDU_QUEUE 0
+#define RTL_PCI_RX_CMD_QUEUE 1
+#define RTL_PCI_MAX_RX_QUEUE 2
+
+#define RTL_PCI_MAX_RX_COUNT 64
+#define RTL_PCI_MAX_TX_QUEUE_COUNT 9
+
+#define RT_TXDESC_NUM 128
+#define RT_TXDESC_NUM_BE_QUEUE 256
+
+#define BK_QUEUE 0
+#define BE_QUEUE 1
+#define VI_QUEUE 2
+#define VO_QUEUE 3
+#define BEACON_QUEUE 4
+#define TXCMD_QUEUE 5
+#define MGNT_QUEUE 6
+#define HIGH_QUEUE 7
+#define HCCA_QUEUE 8
+
+#define RTL_PCI_DEVICE(vend, dev, cfg) \
+ .vendor = (vend), \
+ .device = (dev), \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID,\
+ .driver_data = (kernel_ulong_t)&(cfg)
+
+#define INTEL_VENDOR_ID 0x8086
+#define SIS_VENDOR_ID 0x1039
+#define ATI_VENDOR_ID 0x1002
+#define ATI_DEVICE_ID 0x7914
+#define AMD_VENDOR_ID 0x1022
+
+#define PCI_MAX_BRIDGE_NUMBER 255
+#define PCI_MAX_DEVICES 32
+#define PCI_MAX_FUNCTION 8
+
+#define PCI_CONF_ADDRESS 0x0CF8 /*PCI Configuration Space Address */
+#define PCI_CONF_DATA 0x0CFC /*PCI Configuration Space Data */
+
+#define PCI_CLASS_BRIDGE_DEV 0x06
+#define PCI_SUBCLASS_BR_PCI_TO_PCI 0x04
+#define PCI_CAPABILITY_ID_PCI_EXPRESS 0x10
+#define PCI_CAP_ID_EXP 0x10
+
+#define U1DONTCARE 0xFF
+#define U2DONTCARE 0xFFFF
+#define U4DONTCARE 0xFFFFFFFF
+
+#define RTL_PCI_8192_DID 0x8192 /*8192 PCI-E */
+#define RTL_PCI_8192SE_DID 0x8192 /*8192 SE */
+#define RTL_PCI_8174_DID 0x8174 /*8192 SE */
+#define RTL_PCI_8173_DID 0x8173 /*8191 SE Crab */
+#define RTL_PCI_8172_DID 0x8172 /*8191 SE RE */
+#define RTL_PCI_8171_DID 0x8171 /*8191 SE Unicron */
+#define RTL_PCI_0045_DID 0x0045 /*8190 PCI for Ceraga */
+#define RTL_PCI_0046_DID 0x0046 /*8190 Cardbus for Ceraga */
+#define RTL_PCI_0044_DID 0x0044 /*8192e PCIE for Ceraga */
+#define RTL_PCI_0047_DID 0x0047 /*8192e Express Card for Ceraga */
+#define RTL_PCI_700F_DID 0x700F
+#define RTL_PCI_701F_DID 0x701F
+#define RTL_PCI_DLINK_DID 0x3304
+#define RTL_PCI_8192CET_DID 0x8191 /*8192ce */
+#define RTL_PCI_8192CE_DID 0x8178 /*8192ce */
+#define RTL_PCI_8191CE_DID 0x8177 /*8192ce */
+#define RTL_PCI_8188CE_DID 0x8176 /*8192ce */
+#define RTL_PCI_8192CU_DID 0x8191 /*8192ce */
+#define RTL_PCI_8192DE_DID 0x092D /*8192ce */
+#define RTL_PCI_8192DU_DID 0x092D /*8192ce */
+
+/*8192 support 16 pages of IO registers*/
+#define RTL_MEM_MAPPED_IO_RANGE_8190PCI 0x1000
+#define RTL_MEM_MAPPED_IO_RANGE_8192PCIE 0x4000
+#define RTL_MEM_MAPPED_IO_RANGE_8192SE 0x4000
+#define RTL_MEM_MAPPED_IO_RANGE_8192CE 0x4000
+#define RTL_MEM_MAPPED_IO_RANGE_8192DE 0x4000
+
+#define RTL_PCI_REVISION_ID_8190PCI 0x00
+#define RTL_PCI_REVISION_ID_8192PCIE 0x01
+#define RTL_PCI_REVISION_ID_8192SE 0x10
+#define RTL_PCI_REVISION_ID_8192CE 0x1
+#define RTL_PCI_REVISION_ID_8192DE 0x0
+
+#define RTL_DEFAULT_HARDWARE_TYPE HARDWARE_TYPE_RTL8192CE
+
+enum pci_bridge_vendor {
+ PCI_BRIDGE_VENDOR_INTEL = 0x0, /*0b'0000,0001 */
+ PCI_BRIDGE_VENDOR_ATI, /*0b'0000,0010*/
+ PCI_BRIDGE_VENDOR_AMD, /*0b'0000,0100*/
+ PCI_BRIDGE_VENDOR_SIS, /*0b'0000,1000*/
+ PCI_BRIDGE_VENDOR_UNKNOWN, /*0b'0100,0000*/
+ PCI_BRIDGE_VENDOR_MAX,
+};
+
+struct rtl_rx_desc {
+ u32 dword[8];
+} __packed;
+
+struct rtl_tx_desc {
+ u32 dword[16];
+} __packed;
+
+struct rtl_tx_cmd_desc {
+ u32 dword[16];
+} __packed;
+
+struct rtl8192_tx_ring {
+ struct rtl_tx_desc *desc;
+ dma_addr_t dma;
+ unsigned int idx;
+ unsigned int entries;
+ struct sk_buff_head queue;
+};
+
+struct rtl8192_rx_ring {
+ struct rtl_rx_desc *desc;
+ dma_addr_t dma;
+ unsigned int idx;
+ struct sk_buff *rx_buf[RTL_PCI_MAX_RX_COUNT];
+};
+
+struct rtl_pci {
+ struct pci_dev *pdev;
+
+ bool driver_is_goingto_unload;
+ bool up_first_time;
+ bool being_init_adapter;
+ bool irq_enabled;
+
+ /*Tx */
+ struct rtl8192_tx_ring tx_ring[RTL_PCI_MAX_TX_QUEUE_COUNT];
+ int txringcount[RTL_PCI_MAX_TX_QUEUE_COUNT];
+ u32 transmit_config;
+
+ /*Rx */
+ struct rtl8192_rx_ring rx_ring[RTL_PCI_MAX_RX_QUEUE];
+ int rxringcount;
+ u16 rxbuffersize;
+ u32 receive_config;
+
+ /*irq */
+ u8 irq_alloc;
+ u32 irq_mask[2];
+
+ /*Bcn control register setting */
+ u32 reg_bcn_ctrl_val;
+
+ /*ASPM*/ u8 const_pci_aspm;
+ u8 const_amdpci_aspm;
+ u8 const_hwsw_rfoff_d3;
+ u8 const_support_pciaspm;
+ /*pci-e bridge */
+ u8 const_hostpci_aspm_setting;
+ /*pci-e device */
+ u8 const_devicepci_aspm_setting;
+ /*If it supports ASPM, Offset[560h] = 0x40,
+ otherwise Offset[560h] = 0x00. */
+ bool b_support_aspm;
+ bool b_support_backdoor;
+
+ /*QOS & EDCA */
+ enum acm_method acm_method;
+};
+
+struct mp_adapter {
+ u8 linkctrl_reg;
+
+ u8 busnumber;
+ u8 devnumber;
+ u8 funcnumber;
+
+ u8 pcibridge_busnum;
+ u8 pcibridge_devnum;
+ u8 pcibridge_funcnum;
+
+ u8 pcibridge_vendor;
+ u16 pcibridge_vendorid;
+ u16 pcibridge_deviceid;
+
+ u32 pcicfg_addrport;
+ u8 num4bytes;
+
+ u8 pcibridge_pciehdr_offset;
+ u8 pcibridge_linkctrlreg;
+
+ bool amd_l1_patch;
+};
+
+struct rtl_pci_priv {
+ struct rtl_pci dev;
+ struct mp_adapter ndis_adapter;
+ struct rtl_led_ctl ledctl;
+};
+
+#define rtl_pcipriv(hw) (((struct rtl_pci_priv *)(rtl_priv(hw))->priv))
+#define rtl_pcidev(pcipriv) (&((pcipriv)->dev))
+
+int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw);
+
+extern struct rtl_intf_ops rtl_pci_ops;
+
+int __devinit rtl_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id);
+void rtl_pci_disconnect(struct pci_dev *pdev);
+int rtl_pci_suspend(struct pci_dev *pdev, pm_message_t state);
+int rtl_pci_resume(struct pci_dev *pdev);
+
+static inline u8 pci_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+ return readb((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline u16 pci_read16_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+ return readw((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline u32 pci_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+ return readl((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline void pci_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val)
+{
+ writeb(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline void pci_write16_async(struct rtl_priv *rtlpriv,
+ u32 addr, u16 val)
+{
+ writew(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline void pci_write32_async(struct rtl_priv *rtlpriv,
+ u32 addr, u32 val)
+{
+ writel(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+}
+
+static inline void rtl_pci_raw_write_port_ulong(u32 port, u32 val)
+{
+ outl(val, port);
+}
+
+static inline void rtl_pci_raw_write_port_uchar(u32 port, u8 val)
+{
+ outb(val, port);
+}
+
+static inline void rtl_pci_raw_read_port_uchar(u32 port, u8 *pval)
+{
+ *pval = inb(port);
+}
+
+static inline void rtl_pci_raw_read_port_ushort(u32 port, u16 *pval)
+{
+ *pval = inw(port);
+}
+
+static inline void rtl_pci_raw_read_port_ulong(u32 port, u32 *pval)
+{
+ *pval = inl(port);
+}
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
new file mode 100644
index 000000000000..6b7e217b6b89
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -0,0 +1,493 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "wifi.h"
+#include "base.h"
+#include "ps.h"
+
+bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool init_status = true;
+
+ /*<1> reset trx ring */
+ if (rtlhal->interface == INTF_PCI)
+ rtlpriv->intf_ops->reset_trx_ring(hw);
+
+ if (is_hal_stop(rtlhal))
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("Driver is already down!\n"));
+
+ /*<2> Enable Adapter */
+ rtlpriv->cfg->ops->hw_init(hw);
+ RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ /*init_status = false; */
+
+ /*<3> Enable Interrupt */
+ rtlpriv->cfg->ops->enable_interrupt(hw);
+
+ /*<enable timer> */
+ rtl_watch_dog_timer_callback((unsigned long)hw);
+
+ return init_status;
+}
+EXPORT_SYMBOL(rtl_ps_enable_nic);
+
+bool rtl_ps_disable_nic(struct ieee80211_hw *hw)
+{
+ bool status = true;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /*<1> Stop all timer */
+ rtl_deinit_deferred_work(hw);
+
+ /*<2> Disable Interrupt */
+ rtlpriv->cfg->ops->disable_interrupt(hw);
+
+ /*<3> Disable Adapter */
+ rtlpriv->cfg->ops->hw_disable(hw);
+
+ return status;
+}
+EXPORT_SYMBOL(rtl_ps_disable_nic);
+
+bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate state_toset,
+ u32 changesource, bool protect_or_not)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ enum rf_pwrstate rtstate;
+ bool actionallowed = false;
+ u16 rfwait_cnt = 0;
+ unsigned long flag;
+
+ /*protect_or_not = true; */
+
+ if (protect_or_not)
+ goto no_protect;
+
+ /*
+ *Only one thread can change
+ *the RF state at one time, and others
+ *should wait to be executed.
+ */
+ while (true) {
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ if (ppsc->rfchange_inprogress) {
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock,
+ flag);
+
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("RF Change in progress!"
+ "Wait to set..state_toset(%d).\n",
+ state_toset));
+
+ /* Set RF after the previous action is done. */
+ while (ppsc->rfchange_inprogress) {
+ rfwait_cnt++;
+ mdelay(1);
+
+ /*
+ *Wait too long, return false to avoid
+ *to be stuck here.
+ */
+ if (rfwait_cnt > 100)
+ return false;
+ }
+ } else {
+ ppsc->rfchange_inprogress = true;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock,
+ flag);
+ break;
+ }
+ }
+
+no_protect:
+ rtstate = ppsc->rfpwr_state;
+
+ switch (state_toset) {
+ case ERFON:
+ ppsc->rfoff_reason &= (~changesource);
+
+ if ((changesource == RF_CHANGE_BY_HW) &&
+ (ppsc->hwradiooff == true)) {
+ ppsc->hwradiooff = false;
+ }
+
+ if (!ppsc->rfoff_reason) {
+ ppsc->rfoff_reason = 0;
+ actionallowed = true;
+ }
+
+ break;
+
+ case ERFOFF:
+
+ if ((changesource == RF_CHANGE_BY_HW)
+ && (ppsc->hwradiooff == false)) {
+ ppsc->hwradiooff = true;
+ }
+
+ ppsc->rfoff_reason |= changesource;
+ actionallowed = true;
+ break;
+
+ case ERFSLEEP:
+ ppsc->rfoff_reason |= changesource;
+ actionallowed = true;
+ break;
+
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+
+ if (actionallowed)
+ rtlpriv->cfg->ops->set_rf_power_state(hw, state_toset);
+
+ if (!protect_or_not) {
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ }
+
+ return actionallowed;
+}
+EXPORT_SYMBOL(rtl_ps_set_rf_state);
+
+static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ ppsc->swrf_processing = true;
+
+ if (ppsc->inactive_pwrstate == ERFON && rtlhal->interface == INTF_PCI) {
+ if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
+ RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM) &&
+ rtlhal->interface == INTF_PCI) {
+ rtlpriv->intf_ops->disable_aspm(hw);
+ RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
+ }
+ }
+
+ rtl_ps_set_rf_state(hw, ppsc->inactive_pwrstate,
+ RF_CHANGE_BY_IPS, false);
+
+ if (ppsc->inactive_pwrstate == ERFOFF &&
+ rtlhal->interface == INTF_PCI) {
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) {
+ rtlpriv->intf_ops->enable_aspm(hw);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
+ }
+ }
+
+ ppsc->swrf_processing = false;
+}
+
+void rtl_ips_nic_off_wq_callback(void *data)
+{
+ struct rtl_works *rtlworks =
+ container_of_dwork_rtl(data, struct rtl_works, ips_nic_off_wq);
+ struct ieee80211_hw *hw = rtlworks->hw;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ enum rf_pwrstate rtstate;
+
+ if (mac->opmode != NL80211_IFTYPE_STATION) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("not station return\n"));
+ return;
+ }
+
+ if (is_hal_stop(rtlhal))
+ return;
+
+ if (rtlpriv->sec.being_setkey)
+ return;
+
+ if (ppsc->inactiveps) {
+ rtstate = ppsc->rfpwr_state;
+
+ /*
+ *Do not enter IPS in the following conditions:
+ *(1) RF is already OFF or Sleep
+ *(2) swrf_processing (indicates the IPS is still under going)
+ *(3) Connectted (only disconnected can trigger IPS)
+ *(4) IBSS (send Beacon)
+ *(5) AP mode (send Beacon)
+ *(6) monitor mode (rcv packet)
+ */
+
+ if (rtstate == ERFON &&
+ !ppsc->swrf_processing &&
+ (mac->link_state == MAC80211_NOLINK) &&
+ !mac->act_scanning) {
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ ("IPSEnter(): Turn off RF.\n"));
+
+ ppsc->inactive_pwrstate = ERFOFF;
+ ppsc->in_powersavemode = true;
+
+ /*rtl_pci_reset_trx_ring(hw); */
+ _rtl_ps_inactive_ps(hw);
+ }
+ }
+}
+
+void rtl_ips_nic_off(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /*
+ *because when link with ap, mac80211 will ask us
+ *to disable nic quickly after scan before linking,
+ *this will cause link failed, so we delay 100ms here
+ */
+ queue_delayed_work(rtlpriv->works.rtl_wq,
+ &rtlpriv->works.ips_nic_off_wq, MSECS(100));
+}
+
+void rtl_ips_nic_on(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ enum rf_pwrstate rtstate;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtlpriv->locks.ips_lock, flags);
+
+ if (ppsc->inactiveps) {
+ rtstate = ppsc->rfpwr_state;
+
+ if (rtstate != ERFON &&
+ !ppsc->swrf_processing &&
+ ppsc->rfoff_reason <= RF_CHANGE_BY_IPS) {
+
+ ppsc->inactive_pwrstate = ERFON;
+ ppsc->in_powersavemode = false;
+
+ _rtl_ps_inactive_ps(hw);
+ }
+ }
+
+ spin_unlock_irqrestore(&rtlpriv->locks.ips_lock, flags);
+}
+
+/*for FW LPS*/
+
+/*
+ *Determine if we can set Fw into PS mode
+ *in current condition.Return TRUE if it
+ *can enter PS mode.
+ */
+static bool rtl_get_fwlps_doze(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ u32 ps_timediff;
+
+ ps_timediff = jiffies_to_msecs(jiffies -
+ ppsc->last_delaylps_stamp_jiffies);
+
+ if (ps_timediff < 2000) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("Delay enter Fw LPS for DHCP, ARP,"
+ " or EAPOL exchanging state.\n"));
+ return false;
+ }
+
+ if (mac->link_state != MAC80211_LINKED)
+ return false;
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ return false;
+
+ return true;
+}
+
+/* Change current and default preamble mode.*/
+static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ u8 rpwm_val, fw_pwrmode;
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ return;
+
+ if (mac->link_state != MAC80211_LINKED)
+ return;
+
+ if (ppsc->dot11_psmode == rt_psmode)
+ return;
+
+ /* Update power save mode configured. */
+ ppsc->dot11_psmode = rt_psmode;
+
+ /*
+ *<FW control LPS>
+ *1. Enter PS mode
+ * Set RPWM to Fw to turn RF off and send H2C fw_pwrmode
+ * cmd to set Fw into PS mode.
+ *2. Leave PS mode
+ * Send H2C fw_pwrmode cmd to Fw to set Fw into Active
+ * mode and set RPWM to turn RF on.
+ */
+
+ if ((ppsc->fwctrl_lps) && (ppsc->leisure_ps) &&
+ ppsc->report_linked) {
+ bool fw_current_inps;
+ if (ppsc->dot11_psmode == EACTIVE) {
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ ("FW LPS leave ps_mode:%x\n",
+ FW_PS_ACTIVE_MODE));
+
+ rpwm_val = 0x0C; /* RF on */
+ fw_pwrmode = FW_PS_ACTIVE_MODE;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
+ (u8 *) (&rpwm_val));
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_H2C_FW_PWRMODE,
+ (u8 *) (&fw_pwrmode));
+ fw_current_inps = false;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_FW_PSMODE_STATUS,
+ (u8 *) (&fw_current_inps));
+
+ } else {
+ if (rtl_get_fwlps_doze(hw)) {
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ ("FW LPS enter ps_mode:%x\n",
+ ppsc->fwctrl_psmode));
+
+ rpwm_val = 0x02; /* RF off */
+ fw_current_inps = true;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_FW_PSMODE_STATUS,
+ (u8 *) (&fw_current_inps));
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_H2C_FW_PWRMODE,
+ (u8 *) (&ppsc->fwctrl_psmode));
+
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_SET_RPWM,
+ (u8 *) (&rpwm_val));
+ } else {
+ /* Reset the power save related parameters. */
+ ppsc->dot11_psmode = EACTIVE;
+ }
+ }
+ }
+}
+
+/*Enter the leisure power save mode.*/
+void rtl_lps_enter(struct ieee80211_hw *hw)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ unsigned long flag;
+
+ if (!(ppsc->fwctrl_lps && ppsc->leisure_ps))
+ return;
+
+ if (rtlpriv->sec.being_setkey)
+ return;
+
+ if (rtlpriv->link_info.busytraffic)
+ return;
+
+ /*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */
+ if (mac->cnt_after_linked < 5)
+ return;
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ return;
+
+ if (mac->link_state != MAC80211_LINKED)
+ return;
+
+ spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+
+ if (ppsc->leisure_ps) {
+ /* Idle for a while if we connect to AP a while ago. */
+ if (mac->cnt_after_linked >= 2) {
+ if (ppsc->dot11_psmode == EACTIVE) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("Enter 802.11 power save mode...\n"));
+
+ rtl_lps_set_psmode(hw, EAUTOPS);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+}
+
+/*Leave the leisure power save mode.*/
+void rtl_lps_leave(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ unsigned long flag;
+
+ spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+
+ if (ppsc->fwctrl_lps && ppsc->leisure_ps) {
+ if (ppsc->dot11_psmode != EACTIVE) {
+
+ /*FIX ME */
+ rtlpriv->cfg->ops->enable_interrupt(hw);
+
+ if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM &&
+ RT_IN_PS_LEVEL(ppsc, RT_RF_LPS_LEVEL_ASPM) &&
+ rtlhal->interface == INTF_PCI) {
+ rtlpriv->intf_ops->disable_aspm(hw);
+ RT_CLEAR_PS_LEVEL(ppsc, RT_RF_LPS_LEVEL_ASPM);
+ }
+
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("Busy Traffic,Leave 802.11 power save..\n"));
+
+ rtl_lps_set_psmode(hw, EACTIVE);
+ }
+ }
+ spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+}
diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/rtlwifi/ps.h
new file mode 100644
index 000000000000..ae56da801a23
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/ps.h
@@ -0,0 +1,43 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __REALTEK_RTL_PCI_PS_H__
+#define __REALTEK_RTL_PCI_PS_H__
+
+bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate state_toset, u32 changesource,
+ bool protect_or_not);
+bool rtl_ps_enable_nic(struct ieee80211_hw *hw);
+bool rtl_ps_disable_nic(struct ieee80211_hw *hw);
+void rtl_ips_nic_off(struct ieee80211_hw *hw);
+void rtl_ips_nic_on(struct ieee80211_hw *hw);
+void rtl_ips_nic_off_wq_callback(void *data);
+void rtl_lps_enter(struct ieee80211_hw *hw);
+void rtl_lps_leave(struct ieee80211_hw *hw);
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c
new file mode 100644
index 000000000000..91634107434a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rc.c
@@ -0,0 +1,329 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "wifi.h"
+#include "base.h"
+#include "rc.h"
+
+/*
+ *Finds the highest rate index we can use
+ *if skb is special data like DHCP/EAPOL, we set should
+ *it to lowest rate CCK_1M, otherwise we set rate to
+ *CCK11M or OFDM_54M based on wireless mode.
+ */
+static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv,
+ struct sk_buff *skb, bool not_data)
+{
+ struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
+
+ /*
+ *mgt use 1M, although we have check it
+ *before this function use rate_control_send_low,
+ *we still check it here
+ */
+ if (not_data)
+ return rtlpriv->cfg->maps[RTL_RC_CCK_RATE1M];
+
+ /*
+ *this rate is no use for true rate, firmware
+ *will control rate at all it just used for
+ *1.show in iwconfig in B/G mode
+ *2.in rtl_get_tcb_desc when we check rate is
+ * 1M we will not use FW rate but user rate.
+ */
+ if (rtl_is_special_data(rtlpriv->mac80211.hw, skb, true)) {
+ return rtlpriv->cfg->maps[RTL_RC_CCK_RATE1M];
+ } else {
+ if (rtlmac->mode == WIRELESS_MODE_B)
+ return rtlpriv->cfg->maps[RTL_RC_CCK_RATE11M];
+ else
+ return rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M];
+ }
+}
+
+static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
+ struct ieee80211_tx_rate *rate,
+ struct ieee80211_tx_rate_control *txrc,
+ u8 tries, u8 rix, int rtsctsenable,
+ bool not_data)
+{
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+ rate->count = tries;
+ rate->idx = (rix > 0x2) ? rix : 0x2;
+
+ if (!not_data) {
+ if (txrc->short_preamble)
+ rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
+ if (mac->bw_40)
+ rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (mac->sgi_20 || mac->sgi_40)
+ rate->flags |= IEEE80211_TX_RC_SHORT_GI;
+ if (mac->ht_enable)
+ rate->flags |= IEEE80211_TX_RC_MCS;
+ }
+}
+
+static void rtl_get_rate(void *ppriv, struct ieee80211_sta *sta,
+ void *priv_sta, struct ieee80211_tx_rate_control *txrc)
+{
+ struct rtl_priv *rtlpriv = ppriv;
+ struct sk_buff *skb = txrc->skb;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rates = tx_info->control.rates;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ __le16 fc = hdr->frame_control;
+ u8 try_per_rate, i, rix;
+ bool not_data = !ieee80211_is_data(fc);
+
+ if (rate_control_send_low(sta, priv_sta, txrc))
+ return;
+
+ rix = _rtl_rc_get_highest_rix(rtlpriv, skb, not_data);
+
+ try_per_rate = 1;
+ _rtl_rc_rate_set_series(rtlpriv, &rates[0], txrc,
+ try_per_rate, rix, 1, not_data);
+
+ if (!not_data) {
+ for (i = 1; i < 4; i++)
+ _rtl_rc_rate_set_series(rtlpriv, &rates[i],
+ txrc, i, (rix - i), 1,
+ not_data);
+ }
+}
+
+static bool _rtl_tx_aggr_check(struct rtl_priv *rtlpriv, u16 tid)
+{
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+ if (mac->act_scanning)
+ return false;
+
+ if (mac->cnt_after_linked < 3)
+ return false;
+
+ if (mac->tids[tid].agg.agg_state == RTL_AGG_OFF)
+ return true;
+
+ return false;
+}
+
+/*mac80211 Rate Control callbacks*/
+static void rtl_tx_status(void *ppriv,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta,
+ struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = ppriv;
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ struct ieee80211_hdr *hdr;
+ __le16 fc;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
+
+ if (!priv_sta || !ieee80211_is_data(fc))
+ return;
+
+ if (rtl_is_special_data(mac->hw, skb, true))
+ return;
+
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr))
+ || is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
+ return;
+
+ /* Check if aggregation has to be enabled for this tid */
+ if (conf_is_ht(&mac->hw->conf) &&
+ !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc, tid;
+
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+
+ if (_rtl_tx_aggr_check(rtlpriv, tid))
+ ieee80211_start_tx_ba_session(sta, tid, 5000);
+ }
+ }
+}
+
+static void rtl_rate_init(void *ppriv,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta)
+{
+ struct rtl_priv *rtlpriv = ppriv;
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ u8 is_ht = conf_is_ht(&mac->hw->conf);
+
+ if ((mac->opmode == NL80211_IFTYPE_STATION) ||
+ (mac->opmode == NL80211_IFTYPE_MESH_POINT) ||
+ (mac->opmode == NL80211_IFTYPE_ADHOC)) {
+
+ switch (sband->band) {
+ case IEEE80211_BAND_2GHZ:
+ rtlpriv->rate_priv->cur_ratetab_idx =
+ RATR_INX_WIRELESS_G;
+ if (is_ht)
+ rtlpriv->rate_priv->cur_ratetab_idx =
+ RATR_INX_WIRELESS_NGB;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ rtlpriv->rate_priv->cur_ratetab_idx =
+ RATR_INX_WIRELESS_A;
+ if (is_ht)
+ rtlpriv->rate_priv->cur_ratetab_idx =
+ RATR_INX_WIRELESS_NGB;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("Invalid band\n"));
+ rtlpriv->rate_priv->cur_ratetab_idx =
+ RATR_INX_WIRELESS_NGB;
+ break;
+ }
+
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_DMESG,
+ ("Choosing rate table index: %d\n",
+ rtlpriv->rate_priv->cur_ratetab_idx));
+
+ }
+
+}
+
+static void rtl_rate_update(void *ppriv,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *priv_sta,
+ u32 changed,
+ enum nl80211_channel_type oper_chan_type)
+{
+ struct rtl_priv *rtlpriv = ppriv;
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ bool oper_cw40 = false, oper_sgi40;
+ bool local_cw40 = mac->bw_40;
+ bool local_sgi40 = mac->sgi_40;
+ u8 is_ht = conf_is_ht(&mac->hw->conf);
+
+ if (changed & IEEE80211_RC_HT_CHANGED) {
+ if (mac->opmode != NL80211_IFTYPE_STATION)
+ return;
+
+ if (rtlhal->hw->conf.channel_type == NL80211_CHAN_HT40MINUS ||
+ rtlhal->hw->conf.channel_type == NL80211_CHAN_HT40PLUS)
+ oper_cw40 = true;
+
+ oper_sgi40 = mac->sgi_40;
+
+ if ((local_cw40 != oper_cw40) || (local_sgi40 != oper_sgi40)) {
+ switch (sband->band) {
+ case IEEE80211_BAND_2GHZ:
+ rtlpriv->rate_priv->cur_ratetab_idx =
+ RATR_INX_WIRELESS_G;
+ if (is_ht)
+ rtlpriv->rate_priv->cur_ratetab_idx =
+ RATR_INX_WIRELESS_NGB;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ rtlpriv->rate_priv->cur_ratetab_idx =
+ RATR_INX_WIRELESS_A;
+ if (is_ht)
+ rtlpriv->rate_priv->cur_ratetab_idx =
+ RATR_INX_WIRELESS_NGB;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Invalid band\n"));
+ rtlpriv->rate_priv->cur_ratetab_idx =
+ RATR_INX_WIRELESS_NGB;
+ break;
+ }
+ }
+ }
+}
+
+static void *rtl_rate_alloc(struct ieee80211_hw *hw,
+ struct dentry *debugfsdir)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ return rtlpriv;
+}
+
+static void rtl_rate_free(void *rtlpriv)
+{
+ return;
+}
+
+static void *rtl_rate_alloc_sta(void *ppriv,
+ struct ieee80211_sta *sta, gfp_t gfp)
+{
+ struct rtl_priv *rtlpriv = ppriv;
+ struct rtl_rate_priv *rate_priv;
+
+ rate_priv = kzalloc(sizeof(struct rtl_rate_priv), gfp);
+ if (!rate_priv) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Unable to allocate private rc structure\n"));
+ return NULL;
+ }
+
+ rtlpriv->rate_priv = rate_priv;
+
+ return rate_priv;
+}
+
+static void rtl_rate_free_sta(void *rtlpriv,
+ struct ieee80211_sta *sta, void *priv_sta)
+{
+ struct rtl_rate_priv *rate_priv = priv_sta;
+ kfree(rate_priv);
+}
+
+static struct rate_control_ops rtl_rate_ops = {
+ .module = NULL,
+ .name = "rtl_rc",
+ .alloc = rtl_rate_alloc,
+ .free = rtl_rate_free,
+ .alloc_sta = rtl_rate_alloc_sta,
+ .free_sta = rtl_rate_free_sta,
+ .rate_init = rtl_rate_init,
+ .rate_update = rtl_rate_update,
+ .tx_status = rtl_tx_status,
+ .get_rate = rtl_get_rate,
+};
+
+int rtl_rate_control_register(void)
+{
+ return ieee80211_rate_control_register(&rtl_rate_ops);
+}
+
+void rtl_rate_control_unregister(void)
+{
+ ieee80211_rate_control_unregister(&rtl_rate_ops);
+}
diff --git a/drivers/net/wireless/rtlwifi/rc.h b/drivers/net/wireless/rtlwifi/rc.h
new file mode 100644
index 000000000000..b4667c035f0b
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rc.h
@@ -0,0 +1,40 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_RC_H__
+#define __RTL_RC_H__
+
+struct rtl_rate_priv {
+ u8 cur_ratetab_idx;
+ u8 ht_cap;
+};
+
+int rtl_rate_control_register(void);
+void rtl_rate_control_unregister(void);
+#endif
diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/rtlwifi/regd.c
new file mode 100644
index 000000000000..3336ca999dfd
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/regd.c
@@ -0,0 +1,400 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "wifi.h"
+#include "regd.h"
+
+static struct country_code_to_enum_rd allCountries[] = {
+ {COUNTRY_CODE_FCC, "US"},
+ {COUNTRY_CODE_IC, "US"},
+ {COUNTRY_CODE_ETSI, "EC"},
+ {COUNTRY_CODE_SPAIN, "EC"},
+ {COUNTRY_CODE_FRANCE, "EC"},
+ {COUNTRY_CODE_MKK, "JP"},
+ {COUNTRY_CODE_MKK1, "JP"},
+ {COUNTRY_CODE_ISRAEL, "EC"},
+ {COUNTRY_CODE_TELEC, "JP"},
+ {COUNTRY_CODE_MIC, "JP"},
+ {COUNTRY_CODE_GLOBAL_DOMAIN, "JP"},
+ {COUNTRY_CODE_WORLD_WIDE_13, "EC"},
+ {COUNTRY_CODE_TELEC_NETGEAR, "EC"},
+};
+
+/*
+ *Only these channels all allow active
+ *scan on all world regulatory domains
+ */
+#define RTL819x_2GHZ_CH01_11 \
+ REG_RULE(2412-10, 2462+10, 40, 0, 20, 0)
+
+/*
+ *We enable active scan on these a case
+ *by case basis by regulatory domain
+ */
+#define RTL819x_2GHZ_CH12_13 \
+ REG_RULE(2467-10, 2472+10, 40, 0, 20,\
+ NL80211_RRF_PASSIVE_SCAN)
+
+#define RTL819x_2GHZ_CH14 \
+ REG_RULE(2484-10, 2484+10, 40, 0, 20, \
+ NL80211_RRF_PASSIVE_SCAN | \
+ NL80211_RRF_NO_OFDM)
+
+static const struct ieee80211_regdomain rtl_regdom_11 = {
+ .n_reg_rules = 1,
+ .alpha2 = "99",
+ .reg_rules = {
+ RTL819x_2GHZ_CH01_11,
+ }
+};
+
+static const struct ieee80211_regdomain rtl_regdom_global = {
+ .n_reg_rules = 3,
+ .alpha2 = "99",
+ .reg_rules = {
+ RTL819x_2GHZ_CH01_11,
+ RTL819x_2GHZ_CH12_13,
+ RTL819x_2GHZ_CH14,
+ }
+};
+
+static const struct ieee80211_regdomain rtl_regdom_world = {
+ .n_reg_rules = 2,
+ .alpha2 = "99",
+ .reg_rules = {
+ RTL819x_2GHZ_CH01_11,
+ RTL819x_2GHZ_CH12_13,
+ }
+};
+
+static bool _rtl_is_radar_freq(u16 center_freq)
+{
+ return (center_freq >= 5260 && center_freq <= 5700);
+}
+
+static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy,
+ enum nl80211_reg_initiator initiator)
+{
+ enum ieee80211_band band;
+ struct ieee80211_supported_band *sband;
+ const struct ieee80211_reg_rule *reg_rule;
+ struct ieee80211_channel *ch;
+ unsigned int i;
+ u32 bandwidth = 0;
+ int r;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+
+ if (!wiphy->bands[band])
+ continue;
+
+ sband = wiphy->bands[band];
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+ if (_rtl_is_radar_freq(ch->center_freq) ||
+ (ch->flags & IEEE80211_CHAN_RADAR))
+ continue;
+ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
+ r = freq_reg_info(wiphy, ch->center_freq,
+ bandwidth, &reg_rule);
+ if (r)
+ continue;
+
+ /*
+ *If 11d had a rule for this channel ensure
+ *we enable adhoc/beaconing if it allows us to
+ *use it. Note that we would have disabled it
+ *by applying our static world regdomain by
+ *default during init, prior to calling our
+ *regulatory_hint().
+ */
+
+ if (!(reg_rule->flags & NL80211_RRF_NO_IBSS))
+ ch->flags &= ~IEEE80211_CHAN_NO_IBSS;
+ if (!(reg_rule->
+ flags & NL80211_RRF_PASSIVE_SCAN))
+ ch->flags &=
+ ~IEEE80211_CHAN_PASSIVE_SCAN;
+ } else {
+ if (ch->beacon_found)
+ ch->flags &= ~(IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN);
+ }
+ }
+ }
+}
+
+/* Allows active scan scan on Ch 12 and 13 */
+static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
+ enum nl80211_reg_initiator
+ initiator)
+{
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ const struct ieee80211_reg_rule *reg_rule;
+ u32 bandwidth = 0;
+ int r;
+
+ sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+
+ /*
+ *If no country IE has been received always enable active scan
+ *on these channels. This is only done for specific regulatory SKUs
+ */
+ if (initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) {
+ ch = &sband->channels[11]; /* CH 12 */
+ if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+ ch = &sband->channels[12]; /* CH 13 */
+ if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+ return;
+ }
+
+ /*
+ *If a country IE has been recieved check its rule for this
+ *channel first before enabling active scan. The passive scan
+ *would have been enforced by the initial processing of our
+ *custom regulatory domain.
+ */
+
+ ch = &sband->channels[11]; /* CH 12 */
+ r = freq_reg_info(wiphy, ch->center_freq, bandwidth, &reg_rule);
+ if (!r) {
+ if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
+ if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+ }
+
+ ch = &sband->channels[12]; /* CH 13 */
+ r = freq_reg_info(wiphy, ch->center_freq, bandwidth, &reg_rule);
+ if (!r) {
+ if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
+ if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
+ }
+}
+
+/*
+ *Always apply Radar/DFS rules on
+ *freq range 5260 MHz - 5700 MHz
+ */
+static void _rtl_reg_apply_radar_flags(struct wiphy *wiphy)
+{
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ unsigned int i;
+
+ if (!wiphy->bands[IEEE80211_BAND_5GHZ])
+ return;
+
+ sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+ if (!_rtl_is_radar_freq(ch->center_freq))
+ continue;
+
+ /*
+ *We always enable radar detection/DFS on this
+ *frequency range. Additionally we also apply on
+ *this frequency range:
+ *- If STA mode does not yet have DFS supports disable
+ * active scanning
+ *- If adhoc mode does not support DFS yet then disable
+ * adhoc in the frequency.
+ *- If AP mode does not yet support radar detection/DFS
+ *do not allow AP mode
+ */
+ if (!(ch->flags & IEEE80211_CHAN_DISABLED))
+ ch->flags |= IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN;
+ }
+}
+
+static void _rtl_reg_apply_world_flags(struct wiphy *wiphy,
+ enum nl80211_reg_initiator initiator,
+ struct rtl_regulatory *reg)
+{
+ _rtl_reg_apply_beaconing_flags(wiphy, initiator);
+ _rtl_reg_apply_active_scan_flags(wiphy, initiator);
+ return;
+}
+
+static void _rtl_dump_channel_map(struct wiphy *wiphy)
+{
+ enum ieee80211_band band;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ unsigned int i;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ if (!wiphy->bands[band])
+ continue;
+ sband = wiphy->bands[band];
+ for (i = 0; i < sband->n_channels; i++)
+ ch = &sband->channels[i];
+ }
+}
+
+static int _rtl_reg_notifier_apply(struct wiphy *wiphy,
+ struct regulatory_request *request,
+ struct rtl_regulatory *reg)
+{
+ /* We always apply this */
+ _rtl_reg_apply_radar_flags(wiphy);
+
+ switch (request->initiator) {
+ case NL80211_REGDOM_SET_BY_DRIVER:
+ case NL80211_REGDOM_SET_BY_CORE:
+ case NL80211_REGDOM_SET_BY_USER:
+ break;
+ case NL80211_REGDOM_SET_BY_COUNTRY_IE:
+ _rtl_reg_apply_world_flags(wiphy, request->initiator, reg);
+ break;
+ }
+
+ _rtl_dump_channel_map(wiphy);
+
+ return 0;
+}
+
+static const struct ieee80211_regdomain *_rtl_regdomain_select(
+ struct rtl_regulatory *reg)
+{
+ switch (reg->country_code) {
+ case COUNTRY_CODE_FCC:
+ case COUNTRY_CODE_IC:
+ return &rtl_regdom_11;
+ case COUNTRY_CODE_ETSI:
+ case COUNTRY_CODE_SPAIN:
+ case COUNTRY_CODE_FRANCE:
+ case COUNTRY_CODE_ISRAEL:
+ case COUNTRY_CODE_TELEC_NETGEAR:
+ return &rtl_regdom_world;
+ case COUNTRY_CODE_MKK:
+ case COUNTRY_CODE_MKK1:
+ case COUNTRY_CODE_TELEC:
+ case COUNTRY_CODE_MIC:
+ return &rtl_regdom_global;
+ case COUNTRY_CODE_GLOBAL_DOMAIN:
+ return &rtl_regdom_global;
+ case COUNTRY_CODE_WORLD_WIDE_13:
+ return &rtl_regdom_world;
+ default:
+ return &rtl_regdom_world;
+ }
+}
+
+static int _rtl_regd_init_wiphy(struct rtl_regulatory *reg,
+ struct wiphy *wiphy,
+ int (*reg_notifier) (struct wiphy *wiphy,
+ struct regulatory_request *
+ request))
+{
+ const struct ieee80211_regdomain *regd;
+
+ wiphy->reg_notifier = reg_notifier;
+ wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+ wiphy->flags &= ~WIPHY_FLAG_STRICT_REGULATORY;
+ wiphy->flags &= ~WIPHY_FLAG_DISABLE_BEACON_HINTS;
+ regd = _rtl_regdomain_select(reg);
+ wiphy_apply_custom_regulatory(wiphy, regd);
+ _rtl_reg_apply_radar_flags(wiphy);
+ _rtl_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg);
+ return 0;
+}
+
+static struct country_code_to_enum_rd *_rtl_regd_find_country(u16 countrycode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
+ if (allCountries[i].countrycode == countrycode)
+ return &allCountries[i];
+ }
+ return NULL;
+}
+
+int rtl_regd_init(struct ieee80211_hw *hw,
+ int (*reg_notifier) (struct wiphy *wiphy,
+ struct regulatory_request *request))
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct wiphy *wiphy = hw->wiphy;
+ struct country_code_to_enum_rd *country = NULL;
+
+ if (wiphy == NULL || &rtlpriv->regd == NULL)
+ return -EINVAL;
+
+ /* force the channel plan to world wide 13 */
+ rtlpriv->regd.country_code = COUNTRY_CODE_WORLD_WIDE_13;
+
+ RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE,
+ (KERN_DEBUG "rtl: EEPROM regdomain: 0x%0x\n",
+ rtlpriv->regd.country_code));
+
+ if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) {
+ RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
+ (KERN_DEBUG "rtl: EEPROM indicates invalid contry code"
+ "world wide 13 should be used\n"));
+
+ rtlpriv->regd.country_code = COUNTRY_CODE_WORLD_WIDE_13;
+ }
+
+ country = _rtl_regd_find_country(rtlpriv->regd.country_code);
+
+ if (country) {
+ rtlpriv->regd.alpha2[0] = country->isoName[0];
+ rtlpriv->regd.alpha2[1] = country->isoName[1];
+ } else {
+ rtlpriv->regd.alpha2[0] = '0';
+ rtlpriv->regd.alpha2[1] = '0';
+ }
+
+ RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE,
+ (KERN_DEBUG "rtl: Country alpha2 being used: %c%c\n",
+ rtlpriv->regd.alpha2[0], rtlpriv->regd.alpha2[1]));
+
+ _rtl_regd_init_wiphy(&rtlpriv->regd, wiphy, reg_notifier);
+
+ return 0;
+}
+
+int rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(rtlpriv, COMP_REGD, DBG_LOUD, ("\n"));
+
+ return _rtl_reg_notifier_apply(wiphy, request, &rtlpriv->regd);
+}
diff --git a/drivers/net/wireless/rtlwifi/regd.h b/drivers/net/wireless/rtlwifi/regd.h
new file mode 100644
index 000000000000..4cdbc4ae76d4
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/regd.h
@@ -0,0 +1,61 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_REGD_H__
+#define __RTL_REGD_H__
+
+struct country_code_to_enum_rd {
+ u16 countrycode;
+ const char *isoName;
+};
+
+enum country_code_type_t {
+ COUNTRY_CODE_FCC = 0,
+ COUNTRY_CODE_IC = 1,
+ COUNTRY_CODE_ETSI = 2,
+ COUNTRY_CODE_SPAIN = 3,
+ COUNTRY_CODE_FRANCE = 4,
+ COUNTRY_CODE_MKK = 5,
+ COUNTRY_CODE_MKK1 = 6,
+ COUNTRY_CODE_ISRAEL = 7,
+ COUNTRY_CODE_TELEC = 8,
+ COUNTRY_CODE_MIC = 9,
+ COUNTRY_CODE_GLOBAL_DOMAIN = 10,
+ COUNTRY_CODE_WORLD_WIDE_13 = 11,
+ COUNTRY_CODE_TELEC_NETGEAR = 12,
+
+ /*add new channel plan above this line */
+ COUNTRY_CODE_MAX
+};
+
+int rtl_regd_init(struct ieee80211_hw *hw,
+ int (*reg_notifier) (struct wiphy *wiphy,
+ struct regulatory_request *request));
+int rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/Makefile b/drivers/net/wireless/rtlwifi/rtl8192c/Makefile
new file mode 100644
index 000000000000..aee42d7ae8a2
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/Makefile
@@ -0,0 +1,9 @@
+rtl8192c-common-objs := \
+ main.o \
+ dm_common.o \
+ fw_common.o \
+ phy_common.o
+
+obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c-common.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
new file mode 100644
index 000000000000..bb023274414c
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -0,0 +1,1398 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "dm_common.h"
+
+struct dig_t dm_digtable;
+static struct ps_t dm_pstable;
+
+static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
+ 0x7f8001fe,
+ 0x788001e2,
+ 0x71c001c7,
+ 0x6b8001ae,
+ 0x65400195,
+ 0x5fc0017f,
+ 0x5a400169,
+ 0x55400155,
+ 0x50800142,
+ 0x4c000130,
+ 0x47c0011f,
+ 0x43c0010f,
+ 0x40000100,
+ 0x3c8000f2,
+ 0x390000e4,
+ 0x35c000d7,
+ 0x32c000cb,
+ 0x300000c0,
+ 0x2d4000b5,
+ 0x2ac000ab,
+ 0x288000a2,
+ 0x26000098,
+ 0x24000090,
+ 0x22000088,
+ 0x20000080,
+ 0x1e400079,
+ 0x1c800072,
+ 0x1b00006c,
+ 0x19800066,
+ 0x18000060,
+ 0x16c0005b,
+ 0x15800056,
+ 0x14400051,
+ 0x1300004c,
+ 0x12000048,
+ 0x11000044,
+ 0x10000040,
+};
+
+static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
+ {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
+ {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04},
+ {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
+ {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03},
+ {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
+ {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03},
+ {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
+ {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03},
+ {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
+ {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02},
+ {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
+ {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02},
+ {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
+ {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02},
+ {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
+ {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02},
+ {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
+ {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02},
+ {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
+ {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
+ {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
+ {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01},
+ {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01},
+ {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01},
+ {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01},
+ {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01},
+ {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01},
+ {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01},
+ {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01},
+ {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01},
+ {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01},
+ {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01},
+ {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}
+};
+
+static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
+ {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
+ {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00},
+ {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
+ {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00},
+ {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
+ {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00},
+ {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
+ {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00},
+ {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
+ {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00},
+ {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
+ {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00},
+ {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
+ {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00},
+ {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
+ {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00},
+ {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
+ {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00},
+ {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
+ {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
+ {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
+ {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00},
+ {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00},
+ {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
+ {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
+ {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00},
+ {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
+ {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
+ {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
+ {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
+ {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
+ {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
+ {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
+};
+
+static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
+{
+ dm_digtable.dig_enable_flag = true;
+ dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+ dm_digtable.cur_igvalue = 0x20;
+ dm_digtable.pre_igvalue = 0x0;
+ dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
+ dm_digtable.presta_connectstate = DIG_STA_DISCONNECT;
+ dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
+ dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
+ dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
+ dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
+ dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
+ dm_digtable.rx_gain_range_max = DM_DIG_MAX;
+ dm_digtable.rx_gain_range_min = DM_DIG_MIN;
+ dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
+ dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
+ dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
+ dm_digtable.pre_cck_pd_state = CCK_PD_STAGE_MAX;
+ dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
+}
+
+static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ long rssi_val_min = 0;
+
+ if ((dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
+ (dm_digtable.cursta_connectctate == DIG_STA_CONNECT)) {
+ if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
+ rssi_val_min =
+ (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
+ rtlpriv->dm.undecorated_smoothed_pwdb) ?
+ rtlpriv->dm.undecorated_smoothed_pwdb :
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ else
+ rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
+ } else if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT ||
+ dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT) {
+ rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
+ } else if (dm_digtable.curmultista_connectstate ==
+ DIG_MULTISTA_CONNECT) {
+ rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ }
+
+ return (u8) rssi_val_min;
+}
+
+static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
+{
+ u32 ret_value;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
+
+ ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
+ falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
+
+ ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
+ falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
+ falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
+
+ ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
+ falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
+ falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
+ falsealm_cnt->cnt_rate_illegal +
+ falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail;
+
+ rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1);
+ ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
+ falsealm_cnt->cnt_cck_fail = ret_value;
+
+ ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
+ falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
+ falsealm_cnt->cnt_all = (falsealm_cnt->cnt_parity_fail +
+ falsealm_cnt->cnt_rate_illegal +
+ falsealm_cnt->cnt_crc8_fail +
+ falsealm_cnt->cnt_mcs_fail +
+ falsealm_cnt->cnt_cck_fail);
+
+ rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
+ rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
+ rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
+ rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+ ("cnt_parity_fail = %d, cnt_rate_illegal = %d, "
+ "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
+ falsealm_cnt->cnt_parity_fail,
+ falsealm_cnt->cnt_rate_illegal,
+ falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail));
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+ ("cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
+ falsealm_cnt->cnt_ofdm_fail,
+ falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all));
+}
+
+static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 value_igi = dm_digtable.cur_igvalue;
+
+ if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
+ value_igi--;
+ else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)
+ value_igi += 0;
+ else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH2)
+ value_igi++;
+ else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2)
+ value_igi += 2;
+ if (value_igi > DM_DIG_FA_UPPER)
+ value_igi = DM_DIG_FA_UPPER;
+ else if (value_igi < DM_DIG_FA_LOWER)
+ value_igi = DM_DIG_FA_LOWER;
+ if (rtlpriv->falsealm_cnt.cnt_all > 10000)
+ value_igi = 0x32;
+
+ dm_digtable.cur_igvalue = value_igi;
+ rtl92c_dm_write_dig(hw);
+}
+
+static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable.fa_highthresh) {
+ if ((dm_digtable.backoff_val - 2) <
+ dm_digtable.backoff_val_range_min)
+ dm_digtable.backoff_val =
+ dm_digtable.backoff_val_range_min;
+ else
+ dm_digtable.backoff_val -= 2;
+ } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable.fa_lowthresh) {
+ if ((dm_digtable.backoff_val + 2) >
+ dm_digtable.backoff_val_range_max)
+ dm_digtable.backoff_val =
+ dm_digtable.backoff_val_range_max;
+ else
+ dm_digtable.backoff_val += 2;
+ }
+
+ if ((dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val) >
+ dm_digtable.rx_gain_range_max)
+ dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_max;
+ else if ((dm_digtable.rssi_val_min + 10 -
+ dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min)
+ dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_min;
+ else
+ dm_digtable.cur_igvalue = dm_digtable.rssi_val_min + 10 -
+ dm_digtable.backoff_val;
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+ ("rssi_val_min = %x backoff_val %x\n",
+ dm_digtable.rssi_val_min, dm_digtable.backoff_val));
+
+ rtl92c_dm_write_dig(hw);
+}
+
+static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
+{
+ static u8 binitialized; /* initialized to false */
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ bool multi_sta = false;
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ multi_sta = true;
+
+ if ((multi_sta == false) || (dm_digtable.cursta_connectctate !=
+ DIG_STA_DISCONNECT)) {
+ binitialized = false;
+ dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+ return;
+ } else if (binitialized == false) {
+ binitialized = true;
+ dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
+ dm_digtable.cur_igvalue = 0x20;
+ rtl92c_dm_write_dig(hw);
+ }
+
+ if (dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) {
+ if ((rssi_strength < dm_digtable.rssi_lowthresh) &&
+ (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
+
+ if (dm_digtable.dig_ext_port_stage ==
+ DIG_EXT_PORT_STAGE_2) {
+ dm_digtable.cur_igvalue = 0x20;
+ rtl92c_dm_write_dig(hw);
+ }
+
+ dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_1;
+ } else if (rssi_strength > dm_digtable.rssi_highthresh) {
+ dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_2;
+ rtl92c_dm_ctrl_initgain_by_fa(hw);
+ }
+ } else if (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) {
+ dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
+ dm_digtable.cur_igvalue = 0x20;
+ rtl92c_dm_write_dig(hw);
+ }
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+ ("curmultista_connectstate = "
+ "%x dig_ext_port_stage %x\n",
+ dm_digtable.curmultista_connectstate,
+ dm_digtable.dig_ext_port_stage));
+}
+
+static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+ ("presta_connectstate = %x,"
+ " cursta_connectctate = %x\n",
+ dm_digtable.presta_connectstate,
+ dm_digtable.cursta_connectctate));
+
+ if (dm_digtable.presta_connectstate == dm_digtable.cursta_connectctate
+ || dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT
+ || dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
+
+ if (dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) {
+ dm_digtable.rssi_val_min =
+ rtl92c_dm_initial_gain_min_pwdb(hw);
+ rtl92c_dm_ctrl_initgain_by_rssi(hw);
+ }
+ } else {
+ dm_digtable.rssi_val_min = 0;
+ dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+ dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
+ dm_digtable.cur_igvalue = 0x20;
+ dm_digtable.pre_igvalue = 0;
+ rtl92c_dm_write_dig(hw);
+ }
+}
+
+static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
+ dm_digtable.rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
+
+ if (dm_digtable.pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
+ if (dm_digtable.rssi_val_min <= 25)
+ dm_digtable.cur_cck_pd_state =
+ CCK_PD_STAGE_LowRssi;
+ else
+ dm_digtable.cur_cck_pd_state =
+ CCK_PD_STAGE_HighRssi;
+ } else {
+ if (dm_digtable.rssi_val_min <= 20)
+ dm_digtable.cur_cck_pd_state =
+ CCK_PD_STAGE_LowRssi;
+ else
+ dm_digtable.cur_cck_pd_state =
+ CCK_PD_STAGE_HighRssi;
+ }
+ } else {
+ dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
+ }
+
+ if (dm_digtable.pre_cck_pd_state != dm_digtable.cur_cck_pd_state) {
+ if (dm_digtable.cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
+ if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
+ dm_digtable.cur_cck_fa_state =
+ CCK_FA_STAGE_High;
+ else
+ dm_digtable.cur_cck_fa_state = CCK_FA_STAGE_Low;
+
+ if (dm_digtable.pre_cck_fa_state !=
+ dm_digtable.cur_cck_fa_state) {
+ if (dm_digtable.cur_cck_fa_state ==
+ CCK_FA_STAGE_Low)
+ rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
+ 0x83);
+ else
+ rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
+ 0xcd);
+
+ dm_digtable.pre_cck_fa_state =
+ dm_digtable.cur_cck_fa_state;
+ }
+
+ rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40);
+
+ if (IS_92C_SERIAL(rtlhal->version))
+ rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
+ MASKBYTE2, 0xd7);
+ } else {
+ rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
+ rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47);
+
+ if (IS_92C_SERIAL(rtlhal->version))
+ rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
+ MASKBYTE2, 0xd3);
+ }
+ dm_digtable.pre_cck_pd_state = dm_digtable.cur_cck_pd_state;
+ }
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+ ("CCKPDStage=%x\n", dm_digtable.cur_cck_pd_state));
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+ ("is92C=%x\n", IS_92C_SERIAL(rtlhal->version)));
+}
+
+static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ if (mac->act_scanning == true)
+ return;
+
+ if ((mac->link_state > MAC80211_NOLINK) &&
+ (mac->link_state < MAC80211_LINKED))
+ dm_digtable.cursta_connectctate = DIG_STA_BEFORE_CONNECT;
+ else if (mac->link_state >= MAC80211_LINKED)
+ dm_digtable.cursta_connectctate = DIG_STA_CONNECT;
+ else
+ dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
+
+ rtl92c_dm_initial_gain_sta(hw);
+ rtl92c_dm_initial_gain_multi_sta(hw);
+ rtl92c_dm_cck_packet_detection_thresh(hw);
+
+ dm_digtable.presta_connectstate = dm_digtable.cursta_connectctate;
+
+}
+
+static void rtl92c_dm_dig(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->dm.dm_initialgain_enable == false)
+ return;
+ if (dm_digtable.dig_enable_flag == false)
+ return;
+
+ rtl92c_dm_ctrl_initgain_by_twoport(hw);
+
+}
+
+static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.dynamic_txpower_enable = false;
+
+ rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+}
+
+void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
+ ("cur_igvalue = 0x%x, "
+ "pre_igvalue = 0x%x, backoff_val = %d\n",
+ dm_digtable.cur_igvalue, dm_digtable.pre_igvalue,
+ dm_digtable.backoff_val));
+
+ if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) {
+ rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
+ dm_digtable.cur_igvalue);
+ rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
+ dm_digtable.cur_igvalue);
+
+ dm_digtable.pre_igvalue = dm_digtable.cur_igvalue;
+ }
+}
+EXPORT_SYMBOL(rtl92c_dm_write_dig);
+
+static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
+
+ u8 h2c_parameter[3] = { 0 };
+
+ return;
+
+ if (tmpentry_max_pwdb != 0) {
+ rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb =
+ tmpentry_max_pwdb;
+ } else {
+ rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0;
+ }
+
+ if (tmpentry_min_pwdb != 0xff) {
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb =
+ tmpentry_min_pwdb;
+ } else {
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0;
+ }
+
+ h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF);
+ h2c_parameter[0] = 0;
+
+ rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
+}
+
+void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ rtlpriv->dm.current_turbo_edca = false;
+ rtlpriv->dm.is_any_nonbepkts = false;
+ rtlpriv->dm.is_cur_rdlstate = false;
+}
+EXPORT_SYMBOL(rtl92c_dm_init_edca_turbo);
+
+static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ static u64 last_txok_cnt;
+ static u64 last_rxok_cnt;
+ u64 cur_txok_cnt;
+ u64 cur_rxok_cnt;
+ u32 edca_be_ul = 0x5ea42b;
+ u32 edca_be_dl = 0x5ea42b;
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ goto dm_checkedcaturbo_exit;
+
+ if (mac->link_state != MAC80211_LINKED) {
+ rtlpriv->dm.current_turbo_edca = false;
+ return;
+ }
+
+ if (!mac->ht_enable) { /*FIX MERGE */
+ if (!(edca_be_ul & 0xffff0000))
+ edca_be_ul |= 0x005e0000;
+
+ if (!(edca_be_dl & 0xffff0000))
+ edca_be_dl |= 0x005e0000;
+ }
+
+ if ((!rtlpriv->dm.is_any_nonbepkts) &&
+ (!rtlpriv->dm.disable_framebursting)) {
+ cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
+ cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
+ if (cur_rxok_cnt > 4 * cur_txok_cnt) {
+ if (!rtlpriv->dm.is_cur_rdlstate ||
+ !rtlpriv->dm.current_turbo_edca) {
+ rtl_write_dword(rtlpriv,
+ REG_EDCA_BE_PARAM,
+ edca_be_dl);
+ rtlpriv->dm.is_cur_rdlstate = true;
+ }
+ } else {
+ if (rtlpriv->dm.is_cur_rdlstate ||
+ !rtlpriv->dm.current_turbo_edca) {
+ rtl_write_dword(rtlpriv,
+ REG_EDCA_BE_PARAM,
+ edca_be_ul);
+ rtlpriv->dm.is_cur_rdlstate = false;
+ }
+ }
+ rtlpriv->dm.current_turbo_edca = true;
+ } else {
+ if (rtlpriv->dm.current_turbo_edca) {
+ u8 tmp = AC0_BE;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_AC_PARAM,
+ (u8 *) (&tmp));
+ rtlpriv->dm.current_turbo_edca = false;
+ }
+ }
+
+dm_checkedcaturbo_exit:
+ rtlpriv->dm.is_any_nonbepkts = false;
+ last_txok_cnt = rtlpriv->stats.txbytesunicast;
+ last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
+}
+
+static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
+ *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 thermalvalue, delta, delta_lck, delta_iqk;
+ long ele_a, ele_d, temp_cck, val_x, value32;
+ long val_y, ele_c;
+ u8 ofdm_index[2], cck_index, ofdm_index_old[2], cck_index_old;
+ int i;
+ bool is2t = IS_92C_SERIAL(rtlhal->version);
+ u8 txpwr_level[2] = {0, 0};
+ u8 ofdm_min_index = 6, rf;
+
+ rtlpriv->dm.txpower_trackingInit = true;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("rtl92c_dm_txpower_tracking_callback_thermalmeter\n"));
+
+ thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f);
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
+ "eeprom_thermalmeter 0x%x\n",
+ thermalvalue, rtlpriv->dm.thermalvalue,
+ rtlefuse->eeprom_thermalmeter));
+
+ rtl92c_phy_ap_calibrate(hw, (thermalvalue -
+ rtlefuse->eeprom_thermalmeter));
+ if (is2t)
+ rf = 2;
+ else
+ rf = 1;
+
+ if (thermalvalue) {
+ ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+ MASKDWORD) & MASKOFDM_D;
+
+ for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
+ if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
+ ofdm_index_old[0] = (u8) i;
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("Initial pathA ele_d reg0x%x = 0x%lx, "
+ "ofdm_index=0x%x\n",
+ ROFDM0_XATXIQIMBALANCE,
+ ele_d, ofdm_index_old[0]));
+ break;
+ }
+ }
+
+ if (is2t) {
+ ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
+ MASKDWORD) & MASKOFDM_D;
+
+ for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
+ if (ele_d == (ofdmswing_table[i] &
+ MASKOFDM_D)) {
+ ofdm_index_old[1] = (u8) i;
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ ("Initial pathB ele_d reg0x%x = "
+ "0x%lx, ofdm_index=0x%x\n",
+ ROFDM0_XBTXIQIMBALANCE, ele_d,
+ ofdm_index_old[1]));
+ break;
+ }
+ }
+ }
+
+ temp_cck =
+ rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK;
+
+ for (i = 0; i < CCK_TABLE_LENGTH; i++) {
+ if (rtlpriv->dm.cck_inch14) {
+ if (memcmp((void *)&temp_cck,
+ (void *)&cckswing_table_ch14[i][2],
+ 4) == 0) {
+ cck_index_old = (u8) i;
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ ("Initial reg0x%x = 0x%lx, "
+ "cck_index=0x%x, ch 14 %d\n",
+ RCCK0_TXFILTER2, temp_cck,
+ cck_index_old,
+ rtlpriv->dm.cck_inch14));
+ break;
+ }
+ } else {
+ if (memcmp((void *)&temp_cck,
+ (void *)
+ &cckswing_table_ch1ch13[i][2],
+ 4) == 0) {
+ cck_index_old = (u8) i;
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ ("Initial reg0x%x = 0x%lx, "
+ "cck_index=0x%x, ch14 %d\n",
+ RCCK0_TXFILTER2, temp_cck,
+ cck_index_old,
+ rtlpriv->dm.cck_inch14));
+ break;
+ }
+ }
+ }
+
+ if (!rtlpriv->dm.thermalvalue) {
+ rtlpriv->dm.thermalvalue =
+ rtlefuse->eeprom_thermalmeter;
+ rtlpriv->dm.thermalvalue_lck = thermalvalue;
+ rtlpriv->dm.thermalvalue_iqk = thermalvalue;
+ for (i = 0; i < rf; i++)
+ rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
+ rtlpriv->dm.cck_index = cck_index_old;
+ }
+
+ delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
+ (thermalvalue - rtlpriv->dm.thermalvalue) :
+ (rtlpriv->dm.thermalvalue - thermalvalue);
+
+ delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
+ (thermalvalue - rtlpriv->dm.thermalvalue_lck) :
+ (rtlpriv->dm.thermalvalue_lck - thermalvalue);
+
+ delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
+ (thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
+ (rtlpriv->dm.thermalvalue_iqk - thermalvalue);
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
+ "eeprom_thermalmeter 0x%x delta 0x%x "
+ "delta_lck 0x%x delta_iqk 0x%x\n",
+ thermalvalue, rtlpriv->dm.thermalvalue,
+ rtlefuse->eeprom_thermalmeter, delta, delta_lck,
+ delta_iqk));
+
+ if (delta_lck > 1) {
+ rtlpriv->dm.thermalvalue_lck = thermalvalue;
+ rtl92c_phy_lc_calibrate(hw);
+ }
+
+ if (delta > 0 && rtlpriv->dm.txpower_track_control) {
+ if (thermalvalue > rtlpriv->dm.thermalvalue) {
+ for (i = 0; i < rf; i++)
+ rtlpriv->dm.ofdm_index[i] -= delta;
+ rtlpriv->dm.cck_index -= delta;
+ } else {
+ for (i = 0; i < rf; i++)
+ rtlpriv->dm.ofdm_index[i] += delta;
+ rtlpriv->dm.cck_index += delta;
+ }
+
+ if (is2t) {
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("temp OFDM_A_index=0x%x, "
+ "OFDM_B_index=0x%x,"
+ "cck_index=0x%x\n",
+ rtlpriv->dm.ofdm_index[0],
+ rtlpriv->dm.ofdm_index[1],
+ rtlpriv->dm.cck_index));
+ } else {
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("temp OFDM_A_index=0x%x,"
+ "cck_index=0x%x\n",
+ rtlpriv->dm.ofdm_index[0],
+ rtlpriv->dm.cck_index));
+ }
+
+ if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
+ for (i = 0; i < rf; i++)
+ ofdm_index[i] =
+ rtlpriv->dm.ofdm_index[i]
+ + 1;
+ cck_index = rtlpriv->dm.cck_index + 1;
+ } else {
+ for (i = 0; i < rf; i++)
+ ofdm_index[i] =
+ rtlpriv->dm.ofdm_index[i];
+ cck_index = rtlpriv->dm.cck_index;
+ }
+
+ for (i = 0; i < rf; i++) {
+ if (txpwr_level[i] >= 0 &&
+ txpwr_level[i] <= 26) {
+ if (thermalvalue >
+ rtlefuse->eeprom_thermalmeter) {
+ if (delta < 5)
+ ofdm_index[i] -= 1;
+
+ else
+ ofdm_index[i] -= 2;
+ } else if (delta > 5 && thermalvalue <
+ rtlefuse->
+ eeprom_thermalmeter) {
+ ofdm_index[i] += 1;
+ }
+ } else if (txpwr_level[i] >= 27 &&
+ txpwr_level[i] <= 32
+ && thermalvalue >
+ rtlefuse->eeprom_thermalmeter) {
+ if (delta < 5)
+ ofdm_index[i] -= 1;
+
+ else
+ ofdm_index[i] -= 2;
+ } else if (txpwr_level[i] >= 32 &&
+ txpwr_level[i] <= 38 &&
+ thermalvalue >
+ rtlefuse->eeprom_thermalmeter
+ && delta > 5) {
+ ofdm_index[i] -= 1;
+ }
+ }
+
+ if (txpwr_level[i] >= 0 && txpwr_level[i] <= 26) {
+ if (thermalvalue >
+ rtlefuse->eeprom_thermalmeter) {
+ if (delta < 5)
+ cck_index -= 1;
+
+ else
+ cck_index -= 2;
+ } else if (delta > 5 && thermalvalue <
+ rtlefuse->eeprom_thermalmeter) {
+ cck_index += 1;
+ }
+ } else if (txpwr_level[i] >= 27 &&
+ txpwr_level[i] <= 32 &&
+ thermalvalue >
+ rtlefuse->eeprom_thermalmeter) {
+ if (delta < 5)
+ cck_index -= 1;
+
+ else
+ cck_index -= 2;
+ } else if (txpwr_level[i] >= 32 &&
+ txpwr_level[i] <= 38 &&
+ thermalvalue > rtlefuse->eeprom_thermalmeter
+ && delta > 5) {
+ cck_index -= 1;
+ }
+
+ for (i = 0; i < rf; i++) {
+ if (ofdm_index[i] > OFDM_TABLE_SIZE - 1)
+ ofdm_index[i] = OFDM_TABLE_SIZE - 1;
+
+ else if (ofdm_index[i] < ofdm_min_index)
+ ofdm_index[i] = ofdm_min_index;
+ }
+
+ if (cck_index > CCK_TABLE_SIZE - 1)
+ cck_index = CCK_TABLE_SIZE - 1;
+ else if (cck_index < 0)
+ cck_index = 0;
+
+ if (is2t) {
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("new OFDM_A_index=0x%x, "
+ "OFDM_B_index=0x%x,"
+ "cck_index=0x%x\n",
+ ofdm_index[0], ofdm_index[1],
+ cck_index));
+ } else {
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("new OFDM_A_index=0x%x,"
+ "cck_index=0x%x\n",
+ ofdm_index[0], cck_index));
+ }
+ }
+
+ if (rtlpriv->dm.txpower_track_control && delta != 0) {
+ ele_d =
+ (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22;
+ val_x = rtlphy->reg_e94;
+ val_y = rtlphy->reg_e9c;
+
+ if (val_x != 0) {
+ if ((val_x & 0x00000200) != 0)
+ val_x = val_x | 0xFFFFFC00;
+ ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
+
+ if ((val_y & 0x00000200) != 0)
+ val_y = val_y | 0xFFFFFC00;
+ ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
+
+ value32 = (ele_d << 22) |
+ ((ele_c & 0x3F) << 16) | ele_a;
+
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+ MASKDWORD, value32);
+
+ value32 = (ele_c & 0x000003C0) >> 6;
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
+ value32);
+
+ value32 = ((val_x * ele_d) >> 7) & 0x01;
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+ BIT(31), value32);
+
+ value32 = ((val_y * ele_d) >> 7) & 0x01;
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+ BIT(29), value32);
+ } else {
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+ MASKDWORD,
+ ofdmswing_table[ofdm_index[0]]);
+
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
+ 0x00);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+ BIT(31) | BIT(29), 0x00);
+ }
+
+ if (!rtlpriv->dm.cck_inch14) {
+ rtl_write_byte(rtlpriv, 0xa22,
+ cckswing_table_ch1ch13[cck_index]
+ [0]);
+ rtl_write_byte(rtlpriv, 0xa23,
+ cckswing_table_ch1ch13[cck_index]
+ [1]);
+ rtl_write_byte(rtlpriv, 0xa24,
+ cckswing_table_ch1ch13[cck_index]
+ [2]);
+ rtl_write_byte(rtlpriv, 0xa25,
+ cckswing_table_ch1ch13[cck_index]
+ [3]);
+ rtl_write_byte(rtlpriv, 0xa26,
+ cckswing_table_ch1ch13[cck_index]
+ [4]);
+ rtl_write_byte(rtlpriv, 0xa27,
+ cckswing_table_ch1ch13[cck_index]
+ [5]);
+ rtl_write_byte(rtlpriv, 0xa28,
+ cckswing_table_ch1ch13[cck_index]
+ [6]);
+ rtl_write_byte(rtlpriv, 0xa29,
+ cckswing_table_ch1ch13[cck_index]
+ [7]);
+ } else {
+ rtl_write_byte(rtlpriv, 0xa22,
+ cckswing_table_ch14[cck_index]
+ [0]);
+ rtl_write_byte(rtlpriv, 0xa23,
+ cckswing_table_ch14[cck_index]
+ [1]);
+ rtl_write_byte(rtlpriv, 0xa24,
+ cckswing_table_ch14[cck_index]
+ [2]);
+ rtl_write_byte(rtlpriv, 0xa25,
+ cckswing_table_ch14[cck_index]
+ [3]);
+ rtl_write_byte(rtlpriv, 0xa26,
+ cckswing_table_ch14[cck_index]
+ [4]);
+ rtl_write_byte(rtlpriv, 0xa27,
+ cckswing_table_ch14[cck_index]
+ [5]);
+ rtl_write_byte(rtlpriv, 0xa28,
+ cckswing_table_ch14[cck_index]
+ [6]);
+ rtl_write_byte(rtlpriv, 0xa29,
+ cckswing_table_ch14[cck_index]
+ [7]);
+ }
+
+ if (is2t) {
+ ele_d = (ofdmswing_table[ofdm_index[1]] &
+ 0xFFC00000) >> 22;
+
+ val_x = rtlphy->reg_eb4;
+ val_y = rtlphy->reg_ebc;
+
+ if (val_x != 0) {
+ if ((val_x & 0x00000200) != 0)
+ val_x = val_x | 0xFFFFFC00;
+ ele_a = ((val_x * ele_d) >> 8) &
+ 0x000003FF;
+
+ if ((val_y & 0x00000200) != 0)
+ val_y = val_y | 0xFFFFFC00;
+ ele_c = ((val_y * ele_d) >> 8) &
+ 0x00003FF;
+
+ value32 = (ele_d << 22) |
+ ((ele_c & 0x3F) << 16) | ele_a;
+ rtl_set_bbreg(hw,
+ ROFDM0_XBTXIQIMBALANCE,
+ MASKDWORD, value32);
+
+ value32 = (ele_c & 0x000003C0) >> 6;
+ rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
+ MASKH4BITS, value32);
+
+ value32 = ((val_x * ele_d) >> 7) & 0x01;
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+ BIT(27), value32);
+
+ value32 = ((val_y * ele_d) >> 7) & 0x01;
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+ BIT(25), value32);
+ } else {
+ rtl_set_bbreg(hw,
+ ROFDM0_XBTXIQIMBALANCE,
+ MASKDWORD,
+ ofdmswing_table[ofdm_index
+ [1]]);
+ rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
+ MASKH4BITS, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+ BIT(27) | BIT(25), 0x00);
+ }
+
+ }
+ }
+
+ if (delta_iqk > 3) {
+ rtlpriv->dm.thermalvalue_iqk = thermalvalue;
+ rtl92c_phy_iq_calibrate(hw, false);
+ }
+
+ if (rtlpriv->dm.txpower_track_control)
+ rtlpriv->dm.thermalvalue = thermalvalue;
+ }
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, ("<===\n"));
+
+}
+
+static void rtl92c_dm_initialize_txpower_tracking_thermalmeter(
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.txpower_tracking = true;
+ rtlpriv->dm.txpower_trackingInit = false;
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("pMgntInfo->txpower_tracking = %d\n",
+ rtlpriv->dm.txpower_tracking));
+}
+
+static void rtl92c_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
+{
+ rtl92c_dm_initialize_txpower_tracking_thermalmeter(hw);
+}
+
+static void rtl92c_dm_txpower_tracking_directcall(struct ieee80211_hw *hw)
+{
+ rtl92c_dm_txpower_tracking_callback_thermalmeter(hw);
+}
+
+static void rtl92c_dm_check_txpower_tracking_thermal_meter(
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ static u8 tm_trigger;
+
+ if (!rtlpriv->dm.txpower_tracking)
+ return;
+
+ if (!tm_trigger) {
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK,
+ 0x60);
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("Trigger 92S Thermal Meter!!\n"));
+ tm_trigger = 1;
+ return;
+ } else {
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("Schedule TxPowerTracking direct call!!\n"));
+ rtl92c_dm_txpower_tracking_directcall(hw);
+ tm_trigger = 0;
+ }
+}
+
+void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw)
+{
+ rtl92c_dm_check_txpower_tracking_thermal_meter(hw);
+}
+EXPORT_SYMBOL(rtl92c_dm_check_txpower_tracking);
+
+void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rate_adaptive *p_ra = &(rtlpriv->ra);
+
+ p_ra->ratr_state = DM_RATR_STA_INIT;
+ p_ra->pre_ratr_state = DM_RATR_STA_INIT;
+
+ if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
+ rtlpriv->dm.useramask = true;
+ else
+ rtlpriv->dm.useramask = false;
+
+}
+EXPORT_SYMBOL(rtl92c_dm_init_rate_adaptive_mask);
+
+static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rate_adaptive *p_ra = &(rtlpriv->ra);
+ u32 low_rssithresh_for_ra, high_rssithresh_for_ra;
+
+ if (is_hal_stop(rtlhal)) {
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ ("<---- driver is going to unload\n"));
+ return;
+ }
+
+ if (!rtlpriv->dm.useramask) {
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ ("<---- driver does not control rate adaptive mask\n"));
+ return;
+ }
+
+ if (mac->link_state == MAC80211_LINKED) {
+
+ switch (p_ra->pre_ratr_state) {
+ case DM_RATR_STA_HIGH:
+ high_rssithresh_for_ra = 50;
+ low_rssithresh_for_ra = 20;
+ break;
+ case DM_RATR_STA_MIDDLE:
+ high_rssithresh_for_ra = 55;
+ low_rssithresh_for_ra = 20;
+ break;
+ case DM_RATR_STA_LOW:
+ high_rssithresh_for_ra = 50;
+ low_rssithresh_for_ra = 25;
+ break;
+ default:
+ high_rssithresh_for_ra = 50;
+ low_rssithresh_for_ra = 20;
+ break;
+ }
+
+ if (rtlpriv->dm.undecorated_smoothed_pwdb >
+ (long)high_rssithresh_for_ra)
+ p_ra->ratr_state = DM_RATR_STA_HIGH;
+ else if (rtlpriv->dm.undecorated_smoothed_pwdb >
+ (long)low_rssithresh_for_ra)
+ p_ra->ratr_state = DM_RATR_STA_MIDDLE;
+ else
+ p_ra->ratr_state = DM_RATR_STA_LOW;
+
+ if (p_ra->pre_ratr_state != p_ra->ratr_state) {
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ ("RSSI = %ld\n",
+ rtlpriv->dm.undecorated_smoothed_pwdb));
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ ("RSSI_LEVEL = %d\n", p_ra->ratr_state));
+ RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+ ("PreState = %d, CurState = %d\n",
+ p_ra->pre_ratr_state, p_ra->ratr_state));
+
+ rtlpriv->cfg->ops->update_rate_mask(hw,
+ p_ra->ratr_state);
+
+ p_ra->pre_ratr_state = p_ra->ratr_state;
+ }
+ }
+}
+
+static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
+{
+ dm_pstable.pre_ccastate = CCA_MAX;
+ dm_pstable.cur_ccasate = CCA_MAX;
+ dm_pstable.pre_rfstate = RF_MAX;
+ dm_pstable.cur_rfstate = RF_MAX;
+ dm_pstable.rssi_val_min = 0;
+}
+
+static void rtl92c_dm_1r_cca(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (dm_pstable.rssi_val_min != 0) {
+ if (dm_pstable.pre_ccastate == CCA_2R) {
+ if (dm_pstable.rssi_val_min >= 35)
+ dm_pstable.cur_ccasate = CCA_1R;
+ else
+ dm_pstable.cur_ccasate = CCA_2R;
+ } else {
+ if (dm_pstable.rssi_val_min <= 30)
+ dm_pstable.cur_ccasate = CCA_2R;
+ else
+ dm_pstable.cur_ccasate = CCA_1R;
+ }
+ } else {
+ dm_pstable.cur_ccasate = CCA_MAX;
+ }
+
+ if (dm_pstable.pre_ccastate != dm_pstable.cur_ccasate) {
+ if (dm_pstable.cur_ccasate == CCA_1R) {
+ if (get_rf_type(rtlphy) == RF_2T2R) {
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
+ MASKBYTE0, 0x13);
+ rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x20);
+ } else {
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
+ MASKBYTE0, 0x23);
+ rtl_set_bbreg(hw, 0xe70, 0x7fc00000, 0x10c);
+ }
+ } else {
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0,
+ 0x33);
+ rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x63);
+ }
+ dm_pstable.pre_ccastate = dm_pstable.cur_ccasate;
+ }
+
+ RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, ("CCAStage = %s\n",
+ (dm_pstable.cur_ccasate ==
+ 0) ? "1RCCA" : "2RCCA"));
+}
+
+void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
+{
+ static u8 initialize;
+ static u32 reg_874, reg_c70, reg_85c, reg_a74;
+
+ if (initialize == 0) {
+ reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
+ MASKDWORD) & 0x1CC000) >> 14;
+
+ reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1,
+ MASKDWORD) & BIT(3)) >> 3;
+
+ reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
+ MASKDWORD) & 0xFF000000) >> 24;
+
+ reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12;
+
+ initialize = 1;
+ }
+
+ if (!bforce_in_normal) {
+ if (dm_pstable.rssi_val_min != 0) {
+ if (dm_pstable.pre_rfstate == RF_NORMAL) {
+ if (dm_pstable.rssi_val_min >= 30)
+ dm_pstable.cur_rfstate = RF_SAVE;
+ else
+ dm_pstable.cur_rfstate = RF_NORMAL;
+ } else {
+ if (dm_pstable.rssi_val_min <= 25)
+ dm_pstable.cur_rfstate = RF_NORMAL;
+ else
+ dm_pstable.cur_rfstate = RF_SAVE;
+ }
+ } else {
+ dm_pstable.cur_rfstate = RF_MAX;
+ }
+ } else {
+ dm_pstable.cur_rfstate = RF_NORMAL;
+ }
+
+ if (dm_pstable.pre_rfstate != dm_pstable.cur_rfstate) {
+ if (dm_pstable.cur_rfstate == RF_SAVE) {
+ rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
+ 0x1C0000, 0x2);
+ rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0);
+ rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
+ 0xFF000000, 0x63);
+ rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
+ 0xC000, 0x2);
+ rtl_set_bbreg(hw, 0xa74, 0xF000, 0x3);
+ rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
+ rtl_set_bbreg(hw, 0x818, BIT(28), 0x1);
+ } else {
+ rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
+ 0x1CC000, reg_874);
+ rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3),
+ reg_c70);
+ rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000,
+ reg_85c);
+ rtl_set_bbreg(hw, 0xa74, 0xF000, reg_a74);
+ rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
+ }
+
+ dm_pstable.pre_rfstate = dm_pstable.cur_rfstate;
+ }
+}
+EXPORT_SYMBOL(rtl92c_dm_rf_saving);
+
+static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (((mac->link_state == MAC80211_NOLINK)) &&
+ (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
+ dm_pstable.rssi_val_min = 0;
+ RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
+ ("Not connected to any\n"));
+ }
+
+ if (mac->link_state == MAC80211_LINKED) {
+ if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+ dm_pstable.rssi_val_min =
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
+ ("AP Client PWDB = 0x%lx\n",
+ dm_pstable.rssi_val_min));
+ } else {
+ dm_pstable.rssi_val_min =
+ rtlpriv->dm.undecorated_smoothed_pwdb;
+ RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
+ ("STA Default Port PWDB = 0x%lx\n",
+ dm_pstable.rssi_val_min));
+ }
+ } else {
+ dm_pstable.rssi_val_min =
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+
+ RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
+ ("AP Ext Port PWDB = 0x%lx\n",
+ dm_pstable.rssi_val_min));
+ }
+
+ if (IS_92C_SERIAL(rtlhal->version))
+ rtl92c_dm_1r_cca(hw);
+}
+
+void rtl92c_dm_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
+ rtl92c_dm_diginit(hw);
+ rtl92c_dm_init_dynamic_txpower(hw);
+ rtl92c_dm_init_edca_turbo(hw);
+ rtl92c_dm_init_rate_adaptive_mask(hw);
+ rtl92c_dm_initialize_txpower_tracking(hw);
+ rtl92c_dm_init_dynamic_bb_powersaving(hw);
+}
+EXPORT_SYMBOL(rtl92c_dm_init);
+
+void rtl92c_dm_watchdog(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool fw_current_inpsmode = false;
+ bool fw_ps_awake = true;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+ (u8 *) (&fw_current_inpsmode));
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
+ (u8 *) (&fw_ps_awake));
+
+ if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) &&
+ fw_ps_awake)
+ && (!ppsc->rfchange_inprogress)) {
+ rtl92c_dm_pwdb_monitor(hw);
+ rtl92c_dm_dig(hw);
+ rtl92c_dm_false_alarm_counter_statistics(hw);
+ rtl92c_dm_dynamic_bb_powersaving(hw);
+ rtlpriv->cfg->ops->dm_dynamic_txpower(hw);
+ rtl92c_dm_check_txpower_tracking(hw);
+ rtl92c_dm_refresh_rate_adaptive_mask(hw);
+ rtl92c_dm_check_edca_turbo(hw);
+
+ }
+}
+EXPORT_SYMBOL(rtl92c_dm_watchdog);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
new file mode 100644
index 000000000000..b9cbb0a3c03f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
@@ -0,0 +1,204 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92COMMON_DM_H__
+#define __RTL92COMMON_DM_H__
+
+#include "../wifi.h"
+#include "../rtl8192ce/def.h"
+#include "../rtl8192ce/reg.h"
+#include "fw_common.h"
+
+#define HAL_DM_DIG_DISABLE BIT(0)
+#define HAL_DM_HIPWR_DISABLE BIT(1)
+
+#define OFDM_TABLE_LENGTH 37
+#define CCK_TABLE_LENGTH 33
+
+#define OFDM_TABLE_SIZE 37
+#define CCK_TABLE_SIZE 33
+
+#define BW_AUTO_SWITCH_HIGH_LOW 25
+#define BW_AUTO_SWITCH_LOW_HIGH 30
+
+#define DM_DIG_THRESH_HIGH 40
+#define DM_DIG_THRESH_LOW 35
+
+#define DM_FALSEALARM_THRESH_LOW 400
+#define DM_FALSEALARM_THRESH_HIGH 1000
+
+#define DM_DIG_MAX 0x3e
+#define DM_DIG_MIN 0x1e
+
+#define DM_DIG_FA_UPPER 0x32
+#define DM_DIG_FA_LOWER 0x20
+#define DM_DIG_FA_TH0 0x20
+#define DM_DIG_FA_TH1 0x100
+#define DM_DIG_FA_TH2 0x200
+
+#define DM_DIG_BACKOFF_MAX 12
+#define DM_DIG_BACKOFF_MIN -4
+#define DM_DIG_BACKOFF_DEFAULT 10
+
+#define RXPATHSELECTION_SS_TH_lOW 30
+#define RXPATHSELECTION_DIFF_TH 18
+
+#define DM_RATR_STA_INIT 0
+#define DM_RATR_STA_HIGH 1
+#define DM_RATR_STA_MIDDLE 2
+#define DM_RATR_STA_LOW 3
+
+#define CTS2SELF_THVAL 30
+#define REGC38_TH 20
+
+#define WAIOTTHVal 25
+
+#define TXHIGHPWRLEVEL_NORMAL 0
+#define TXHIGHPWRLEVEL_LEVEL1 1
+#define TXHIGHPWRLEVEL_LEVEL2 2
+#define TXHIGHPWRLEVEL_BT1 3
+#define TXHIGHPWRLEVEL_BT2 4
+
+#define DM_TYPE_BYFW 0
+#define DM_TYPE_BYDRIVER 1
+
+#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
+#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
+
+struct ps_t {
+ u8 pre_ccastate;
+ u8 cur_ccasate;
+ u8 pre_rfstate;
+ u8 cur_rfstate;
+ long rssi_val_min;
+};
+
+struct dig_t {
+ u8 dig_enable_flag;
+ u8 dig_ext_port_stage;
+ u32 rssi_lowthresh;
+ u32 rssi_highthresh;
+ u32 fa_lowthresh;
+ u32 fa_highthresh;
+ u8 cursta_connectctate;
+ u8 presta_connectstate;
+ u8 curmultista_connectstate;
+ u8 pre_igvalue;
+ u8 cur_igvalue;
+ char backoff_val;
+ char backoff_val_range_max;
+ char backoff_val_range_min;
+ u8 rx_gain_range_max;
+ u8 rx_gain_range_min;
+ u8 rssi_val_min;
+ u8 pre_cck_pd_state;
+ u8 cur_cck_pd_state;
+ u8 pre_cck_fa_state;
+ u8 cur_cck_fa_state;
+ u8 pre_ccastate;
+ u8 cur_ccasate;
+};
+
+struct swat_t {
+ u8 failure_cnt;
+ u8 try_flag;
+ u8 stop_trying;
+ long pre_rssi;
+ long trying_threshold;
+ u8 cur_antenna;
+ u8 pre_antenna;
+};
+
+enum tag_dynamic_init_gain_operation_type_definition {
+ DIG_TYPE_THRESH_HIGH = 0,
+ DIG_TYPE_THRESH_LOW = 1,
+ DIG_TYPE_BACKOFF = 2,
+ DIG_TYPE_RX_GAIN_MIN = 3,
+ DIG_TYPE_RX_GAIN_MAX = 4,
+ DIG_TYPE_ENABLE = 5,
+ DIG_TYPE_DISABLE = 6,
+ DIG_OP_TYPE_MAX
+};
+
+enum tag_cck_packet_detection_threshold_type_definition {
+ CCK_PD_STAGE_LowRssi = 0,
+ CCK_PD_STAGE_HighRssi = 1,
+ CCK_FA_STAGE_Low = 2,
+ CCK_FA_STAGE_High = 3,
+ CCK_PD_STAGE_MAX = 4,
+};
+
+enum dm_1r_cca_e {
+ CCA_1R = 0,
+ CCA_2R = 1,
+ CCA_MAX = 2,
+};
+
+enum dm_rf_e {
+ RF_SAVE = 0,
+ RF_NORMAL = 1,
+ RF_MAX = 2,
+};
+
+enum dm_sw_ant_switch_e {
+ ANS_ANTENNA_B = 1,
+ ANS_ANTENNA_A = 2,
+ ANS_ANTENNA_MAX = 3,
+};
+
+enum dm_dig_ext_port_alg_e {
+ DIG_EXT_PORT_STAGE_0 = 0,
+ DIG_EXT_PORT_STAGE_1 = 1,
+ DIG_EXT_PORT_STAGE_2 = 2,
+ DIG_EXT_PORT_STAGE_3 = 3,
+ DIG_EXT_PORT_STAGE_MAX = 4,
+};
+
+enum dm_dig_connect_e {
+ DIG_STA_DISCONNECT = 0,
+ DIG_STA_CONNECT = 1,
+ DIG_STA_BEFORE_CONNECT = 2,
+ DIG_MULTISTA_DISCONNECT = 3,
+ DIG_MULTISTA_CONNECT = 4,
+ DIG_CONNECT_MAX
+};
+
+extern struct dig_t dm_digtable;
+void rtl92c_dm_init(struct ieee80211_hw *hw);
+void rtl92c_dm_watchdog(struct ieee80211_hw *hw);
+void rtl92c_dm_write_dig(struct ieee80211_hw *hw);
+void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw);
+void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw);
+void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
+void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
+void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
+void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
+void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
new file mode 100644
index 000000000000..5ef91374b230
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -0,0 +1,774 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include <linux/firmware.h>
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "../rtl8192ce/reg.h"
+#include "../rtl8192ce/def.h"
+#include "fw_common.h"
+
+static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CU) {
+ u32 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+ if (enable)
+ value32 |= MCUFWDL_EN;
+ else
+ value32 &= ~MCUFWDL_EN;
+ rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
+ } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE) {
+ u8 tmp;
+ if (enable) {
+
+ tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1,
+ tmp | 0x04);
+
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01);
+
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
+ } else {
+
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
+
+ rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);
+ }
+ }
+}
+
+static void _rtl92c_fw_block_write(struct ieee80211_hw *hw,
+ const u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 blockSize = sizeof(u32);
+ u8 *bufferPtr = (u8 *) buffer;
+ u32 *pu4BytePtr = (u32 *) buffer;
+ u32 i, offset, blockCount, remainSize;
+
+ blockCount = size / blockSize;
+ remainSize = size % blockSize;
+
+ for (i = 0; i < blockCount; i++) {
+ offset = i * blockSize;
+ rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
+ *(pu4BytePtr + i));
+ }
+
+ if (remainSize) {
+ offset = blockCount * blockSize;
+ bufferPtr += offset;
+ for (i = 0; i < remainSize; i++) {
+ rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
+ offset + i), *(bufferPtr + i));
+ }
+ }
+}
+
+static void _rtl92c_fw_page_write(struct ieee80211_hw *hw,
+ u32 page, const u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 value8;
+ u8 u8page = (u8) (page & 0x07);
+
+ value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
+
+ rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
+ _rtl92c_fw_block_write(hw, buffer, size);
+}
+
+static void _rtl92c_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
+{
+ u32 fwlen = *pfwlen;
+ u8 remain = (u8) (fwlen % 4);
+
+ remain = (remain == 0) ? 0 : (4 - remain);
+
+ while (remain > 0) {
+ pfwbuf[fwlen] = 0;
+ fwlen++;
+ remain--;
+ }
+
+ *pfwlen = fwlen;
+}
+
+static void _rtl92c_write_fw(struct ieee80211_hw *hw,
+ enum version_8192c version, u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 *bufferPtr = (u8 *) buffer;
+
+ RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, ("FW size is %d bytes,\n", size));
+
+ if (IS_CHIP_VER_B(version)) {
+ u32 pageNums, remainSize;
+ u32 page, offset;
+
+ if (IS_HARDWARE_TYPE_8192CE(rtlhal))
+ _rtl92c_fill_dummy(bufferPtr, &size);
+
+ pageNums = size / FW_8192C_PAGE_SIZE;
+ remainSize = size % FW_8192C_PAGE_SIZE;
+
+ if (pageNums > 4) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Page numbers should not greater then 4\n"));
+ }
+
+ for (page = 0; page < pageNums; page++) {
+ offset = page * FW_8192C_PAGE_SIZE;
+ _rtl92c_fw_page_write(hw, page, (bufferPtr + offset),
+ FW_8192C_PAGE_SIZE);
+ }
+
+ if (remainSize) {
+ offset = pageNums * FW_8192C_PAGE_SIZE;
+ page = pageNums;
+ _rtl92c_fw_page_write(hw, page, (bufferPtr + offset),
+ remainSize);
+ }
+ } else {
+ _rtl92c_fw_block_write(hw, buffer, size);
+ }
+}
+
+static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int err = -EIO;
+ u32 counter = 0;
+ u32 value32;
+
+ do {
+ value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+ } while ((counter++ < FW_8192C_POLLING_TIMEOUT_COUNT) &&
+ (!(value32 & FWDL_ChkSum_rpt)));
+
+ if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("chksum report faill ! REG_MCUFWDL:0x%08x .\n",
+ value32));
+ goto exit;
+ }
+
+ RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+ ("Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32));
+
+ value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+ value32 |= MCUFWDL_RDY;
+ value32 &= ~WINTINI_RDY;
+ rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
+
+ counter = 0;
+
+ do {
+ value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+ if (value32 & WINTINI_RDY) {
+ RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+ ("Polling FW ready success!!"
+ " REG_MCUFWDL:0x%08x .\n",
+ value32));
+ err = 0;
+ goto exit;
+ }
+
+ mdelay(FW_8192C_POLLING_DELAY);
+
+ } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT);
+
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32));
+
+exit:
+ return err;
+}
+
+int rtl92c_download_fw(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl92c_firmware_header *pfwheader;
+ u8 *pfwdata;
+ u32 fwsize;
+ int err;
+ enum version_8192c version = rtlhal->version;
+ const struct firmware *firmware;
+
+ printk(KERN_INFO "rtl8192cu: Loading firmware file %s\n",
+ rtlpriv->cfg->fw_name);
+ err = request_firmware(&firmware, rtlpriv->cfg->fw_name,
+ rtlpriv->io.dev);
+ if (err) {
+ printk(KERN_ERR "rtl8192cu: Firmware loading failed\n");
+ return 1;
+ }
+
+ if (firmware->size > 0x4000) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Firmware is too big!\n"));
+ release_firmware(firmware);
+ return 1;
+ }
+
+ memcpy(rtlhal->pfirmware, firmware->data, firmware->size);
+ fwsize = firmware->size;
+ release_firmware(firmware);
+
+ pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
+ pfwdata = (u8 *) rtlhal->pfirmware;
+
+ if (IS_FW_HEADER_EXIST(pfwheader)) {
+ RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
+ ("Firmware Version(%d), Signature(%#x),Size(%d)\n",
+ pfwheader->version, pfwheader->signature,
+ (uint)sizeof(struct rtl92c_firmware_header)));
+
+ pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
+ fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+ }
+
+ _rtl92c_enable_fw_download(hw, true);
+ _rtl92c_write_fw(hw, version, pfwdata, fwsize);
+ _rtl92c_enable_fw_download(hw, false);
+
+ err = _rtl92c_fw_free_to_go(hw);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Firmware is not ready to run!\n"));
+ } else {
+ RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+ ("Firmware is ready to run!\n"));
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(rtl92c_download_fw);
+
+static bool _rtl92c_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 val_hmetfr, val_mcutst_1;
+ bool result = false;
+
+ val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR);
+ val_mcutst_1 = rtl_read_byte(rtlpriv, (REG_MCUTST_1 + boxnum));
+
+ if (((val_hmetfr >> boxnum) & BIT(0)) == 0 && val_mcutst_1 == 0)
+ result = true;
+ return result;
+}
+
+static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
+ u8 element_id, u32 cmd_len, u8 *p_cmdbuffer)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 boxnum;
+ u16 box_reg, box_extreg;
+ u8 u1b_tmp;
+ bool isfw_read = false;
+ u8 buf_index;
+ bool bwrite_sucess = false;
+ u8 wait_h2c_limmit = 100;
+ u8 wait_writeh2c_limmit = 100;
+ u8 boxcontent[4], boxextcontent[2];
+ u32 h2c_waitcounter = 0;
+ unsigned long flag;
+ u8 idx;
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("come in\n"));
+
+ while (true) {
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+ if (rtlhal->h2c_setinprogress) {
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ ("H2C set in progress! Wait to set.."
+ "element_id(%d).\n", element_id));
+
+ while (rtlhal->h2c_setinprogress) {
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
+ flag);
+ h2c_waitcounter++;
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ ("Wait 100 us (%d times)...\n",
+ h2c_waitcounter));
+ udelay(100);
+
+ if (h2c_waitcounter > 1000)
+ return;
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock,
+ flag);
+ }
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+ } else {
+ rtlhal->h2c_setinprogress = true;
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+ break;
+ }
+ }
+
+ while (!bwrite_sucess) {
+ wait_writeh2c_limmit--;
+ if (wait_writeh2c_limmit == 0) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Write H2C fail because no trigger "
+ "for FW INT!\n"));
+ break;
+ }
+
+ boxnum = rtlhal->last_hmeboxnum;
+ switch (boxnum) {
+ case 0:
+ box_reg = REG_HMEBOX_0;
+ box_extreg = REG_HMEBOX_EXT_0;
+ break;
+ case 1:
+ box_reg = REG_HMEBOX_1;
+ box_extreg = REG_HMEBOX_EXT_1;
+ break;
+ case 2:
+ box_reg = REG_HMEBOX_2;
+ box_extreg = REG_HMEBOX_EXT_2;
+ break;
+ case 3:
+ box_reg = REG_HMEBOX_3;
+ box_extreg = REG_HMEBOX_EXT_3;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+
+ isfw_read = _rtl92c_check_fw_read_last_h2c(hw, boxnum);
+ while (!isfw_read) {
+
+ wait_h2c_limmit--;
+ if (wait_h2c_limmit == 0) {
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ ("Wating too long for FW read "
+ "clear HMEBox(%d)!\n", boxnum));
+ break;
+ }
+
+ udelay(10);
+
+ isfw_read = _rtl92c_check_fw_read_last_h2c(hw, boxnum);
+ u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF);
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ ("Wating for FW read clear HMEBox(%d)!!! "
+ "0x1BF = %2x\n", boxnum, u1b_tmp));
+ }
+
+ if (!isfw_read) {
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ ("Write H2C register BOX[%d] fail!!!!! "
+ "Fw do not read.\n", boxnum));
+ break;
+ }
+
+ memset(boxcontent, 0, sizeof(boxcontent));
+ memset(boxextcontent, 0, sizeof(boxextcontent));
+ boxcontent[0] = element_id;
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ ("Write element_id box_reg(%4x) = %2x\n",
+ box_reg, element_id));
+
+ switch (cmd_len) {
+ case 1:
+ boxcontent[0] &= ~(BIT(7));
+ memcpy((u8 *) (boxcontent) + 1,
+ p_cmdbuffer + buf_index, 1);
+
+ for (idx = 0; idx < 4; idx++) {
+ rtl_write_byte(rtlpriv, box_reg + idx,
+ boxcontent[idx]);
+ }
+ break;
+ case 2:
+ boxcontent[0] &= ~(BIT(7));
+ memcpy((u8 *) (boxcontent) + 1,
+ p_cmdbuffer + buf_index, 2);
+
+ for (idx = 0; idx < 4; idx++) {
+ rtl_write_byte(rtlpriv, box_reg + idx,
+ boxcontent[idx]);
+ }
+ break;
+ case 3:
+ boxcontent[0] &= ~(BIT(7));
+ memcpy((u8 *) (boxcontent) + 1,
+ p_cmdbuffer + buf_index, 3);
+
+ for (idx = 0; idx < 4; idx++) {
+ rtl_write_byte(rtlpriv, box_reg + idx,
+ boxcontent[idx]);
+ }
+ break;
+ case 4:
+ boxcontent[0] |= (BIT(7));
+ memcpy((u8 *) (boxextcontent),
+ p_cmdbuffer + buf_index, 2);
+ memcpy((u8 *) (boxcontent) + 1,
+ p_cmdbuffer + buf_index + 2, 2);
+
+ for (idx = 0; idx < 2; idx++) {
+ rtl_write_byte(rtlpriv, box_extreg + idx,
+ boxextcontent[idx]);
+ }
+
+ for (idx = 0; idx < 4; idx++) {
+ rtl_write_byte(rtlpriv, box_reg + idx,
+ boxcontent[idx]);
+ }
+ break;
+ case 5:
+ boxcontent[0] |= (BIT(7));
+ memcpy((u8 *) (boxextcontent),
+ p_cmdbuffer + buf_index, 2);
+ memcpy((u8 *) (boxcontent) + 1,
+ p_cmdbuffer + buf_index + 2, 3);
+
+ for (idx = 0; idx < 2; idx++) {
+ rtl_write_byte(rtlpriv, box_extreg + idx,
+ boxextcontent[idx]);
+ }
+
+ for (idx = 0; idx < 4; idx++) {
+ rtl_write_byte(rtlpriv, box_reg + idx,
+ boxcontent[idx]);
+ }
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+
+ bwrite_sucess = true;
+
+ rtlhal->last_hmeboxnum = boxnum + 1;
+ if (rtlhal->last_hmeboxnum == 4)
+ rtlhal->last_hmeboxnum = 0;
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ ("pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum));
+ }
+
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+ rtlhal->h2c_setinprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("go out\n"));
+}
+
+void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
+ u8 element_id, u32 cmd_len, u8 *p_cmdbuffer)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 tmp_cmdbuf[2];
+
+ if (rtlhal->fw_ready == false) {
+ RT_ASSERT(false, ("return H2C cmd because of Fw "
+ "download fail!!!\n"));
+ return;
+ }
+
+ memset(tmp_cmdbuf, 0, 8);
+ memcpy(tmp_cmdbuf, p_cmdbuffer, cmd_len);
+ _rtl92c_fill_h2c_command(hw, element_id, cmd_len, (u8 *)&tmp_cmdbuf);
+
+ return;
+}
+EXPORT_SYMBOL(rtl92c_fill_h2c_cmd);
+
+void rtl92c_firmware_selfreset(struct ieee80211_hw *hw)
+{
+ u8 u1b_tmp;
+ u8 delay = 100;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20);
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+
+ while (u1b_tmp & BIT(2)) {
+ delay--;
+ if (delay == 0) {
+ RT_ASSERT(false, ("8051 reset fail.\n"));
+ break;
+ }
+ udelay(50);
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ }
+}
+EXPORT_SYMBOL(rtl92c_firmware_selfreset);
+
+void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 u1_h2c_set_pwrmode[3] = {0};
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("FW LPS mode = %d\n", mode));
+
+ SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
+ SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, 1);
+ SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode,
+ ppsc->reg_max_lps_awakeintvl);
+
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+ "rtl92c_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n",
+ u1_h2c_set_pwrmode, 3);
+ rtl92c_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
+
+}
+EXPORT_SYMBOL(rtl92c_set_fw_pwrmode_cmd);
+
+#define BEACON_PG 0 /*->1*/
+#define PSPOLL_PG 2
+#define NULL_PG 3
+#define PROBERSP_PG 4 /*->5*/
+
+#define TOTAL_RESERVED_PKT_LEN 768
+
+static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
+ /* page 0 beacon */
+ 0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
+ 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x50, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69,
+ 0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C,
+ 0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96,
+ 0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A,
+ 0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C,
+ 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18,
+ 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02,
+ 0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* page 1 beacon */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x10, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* page 2 ps-poll */
+ 0xA4, 0x10, 0x01, 0xC0, 0x00, 0x40, 0x10, 0x10,
+ 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+ 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* page 3 null */
+ 0x48, 0x01, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10,
+ 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
+ 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x72, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+ 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* page 4 probe_resp */
+ 0x50, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10,
+ 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
+ 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00,
+ 0x9E, 0x46, 0x15, 0x32, 0x27, 0xF2, 0x2D, 0x00,
+ 0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69,
+ 0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C,
+ 0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96,
+ 0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A,
+ 0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C,
+ 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18,
+ 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02,
+ 0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* page 5 probe_resp */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct sk_buff *skb = NULL;
+
+ u32 totalpacketlen;
+ bool rtstatus;
+ u8 u1RsvdPageLoc[3] = {0};
+ bool b_dlok = false;
+
+ u8 *beacon;
+ u8 *p_pspoll;
+ u8 *nullfunc;
+ u8 *p_probersp;
+ /*---------------------------------------------------------
+ (1) beacon
+ ---------------------------------------------------------*/
+ beacon = &reserved_page_packet[BEACON_PG * 128];
+ SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(beacon, mac->bssid);
+
+ /*-------------------------------------------------------
+ (2) ps-poll
+ --------------------------------------------------------*/
+ p_pspoll = &reserved_page_packet[PSPOLL_PG * 128];
+ SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000));
+ SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
+ SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
+
+ SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG);
+
+ /*--------------------------------------------------------
+ (3) null data
+ ---------------------------------------------------------*/
+ nullfunc = &reserved_page_packet[NULL_PG * 128];
+ SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid);
+ SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
+
+ SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG);
+
+ /*---------------------------------------------------------
+ (4) probe response
+ ----------------------------------------------------------*/
+ p_probersp = &reserved_page_packet[PROBERSP_PG * 128];
+ SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid);
+ SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid);
+
+ SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG);
+
+ totalpacketlen = TOTAL_RESERVED_PKT_LEN;
+
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
+ "rtl92c_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
+ &reserved_page_packet[0], totalpacketlen);
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+ "rtl92c_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
+ u1RsvdPageLoc, 3);
+
+
+ skb = dev_alloc_skb(totalpacketlen);
+ memcpy((u8 *) skb_put(skb, totalpacketlen),
+ &reserved_page_packet, totalpacketlen);
+
+ rtstatus = rtlpriv->cfg->ops->cmd_send_packet(hw, skb);
+
+ if (rtstatus)
+ b_dlok = true;
+
+ if (b_dlok) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("Set RSVD page location to Fw.\n"));
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+ "H2C_RSVDPAGE:\n",
+ u1RsvdPageLoc, 3);
+ rtl92c_fill_h2c_cmd(hw, H2C_RSVDPAGE,
+ sizeof(u1RsvdPageLoc), u1RsvdPageLoc);
+ } else
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("Set RSVD page location to Fw FAIL!!!!!!.\n"));
+}
+EXPORT_SYMBOL(rtl92c_set_fw_rsvdpagepkt);
+
+void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
+{
+ u8 u1_joinbssrpt_parm[1] = {0};
+
+ SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus);
+
+ rtl92c_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm);
+}
+EXPORT_SYMBOL(rtl92c_set_fw_joinbss_report_cmd);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
new file mode 100644
index 000000000000..3db33bd14666
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
@@ -0,0 +1,98 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92C__FW__H__
+#define __RTL92C__FW__H__
+
+#define FW_8192C_SIZE 0x3000
+#define FW_8192C_START_ADDRESS 0x1000
+#define FW_8192C_END_ADDRESS 0x3FFF
+#define FW_8192C_PAGE_SIZE 4096
+#define FW_8192C_POLLING_DELAY 5
+#define FW_8192C_POLLING_TIMEOUT_COUNT 100
+
+#define IS_FW_HEADER_EXIST(_pfwhdr) \
+ ((_pfwhdr->signature&0xFFF0) == 0x92C0 ||\
+ (_pfwhdr->signature&0xFFF0) == 0x88C0)
+
+struct rtl92c_firmware_header {
+ u16 signature;
+ u8 category;
+ u8 function;
+ u16 version;
+ u8 subversion;
+ u8 rsvd1;
+ u8 month;
+ u8 date;
+ u8 hour;
+ u8 minute;
+ u16 ramcodeSize;
+ u16 rsvd2;
+ u32 svnindex;
+ u32 rsvd3;
+ u32 rsvd4;
+ u32 rsvd5;
+};
+
+enum rtl8192c_h2c_cmd {
+ H2C_AP_OFFLOAD = 0,
+ H2C_SETPWRMODE = 1,
+ H2C_JOINBSSRPT = 2,
+ H2C_RSVDPAGE = 3,
+ H2C_RSSI_REPORT = 5,
+ H2C_RA_MASK = 6,
+ MAX_H2CCMD
+};
+
+#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
+
+#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+
+int rtl92c_download_fw(struct ieee80211_hw *hw);
+void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
+ u32 cmd_len, u8 *p_cmdbuffer);
+void rtl92c_firmware_selfreset(struct ieee80211_hw *hw);
+void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
+void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
+void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/main.c b/drivers/net/wireless/rtlwifi/rtl8192c/main.c
new file mode 100644
index 000000000000..2f624fc27499
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/main.c
@@ -0,0 +1,39 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+
+
+MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Georgia <georgia@realtek.com>");
+MODULE_AUTHOR("Ziv Huang <ziv_huang@realtek.com>");
+MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n PCI wireless");
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
new file mode 100644
index 000000000000..a70228278398
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -0,0 +1,2042 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../rtl8192ce/reg.h"
+#include "../rtl8192ce/def.h"
+#include "dm_common.h"
+#include "phy_common.h"
+
+/* Define macro to shorten lines */
+#define MCS_TXPWR mcs_txpwrlevel_origoffset
+
+u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 returnvalue, originalvalue, bitshift;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+ "bitmask(%#x)\n", regaddr,
+ bitmask));
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ returnvalue = (originalvalue & bitmask) >> bitshift;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("BBR MASK=0x%x "
+ "Addr[0x%x]=0x%x\n", bitmask,
+ regaddr, originalvalue));
+
+ return returnvalue;
+
+}
+EXPORT_SYMBOL(rtl92c_phy_query_bb_reg);
+
+void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 originalvalue, bitshift;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
+ " data(%#x)\n", regaddr, bitmask,
+ data));
+
+ if (bitmask != MASKDWORD) {
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ data = ((originalvalue & (~bitmask)) | (data << bitshift));
+ }
+
+ rtl_write_dword(rtlpriv, regaddr, data);
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
+ " data(%#x)\n", regaddr, bitmask,
+ data));
+}
+EXPORT_SYMBOL(rtl92c_phy_set_bb_reg);
+
+u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset)
+{
+ RT_ASSERT(false, ("deprecated!\n"));
+ return 0;
+}
+EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_read);
+
+void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset,
+ u32 data)
+{
+ RT_ASSERT(false, ("deprecated!\n"));
+}
+EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_write);
+
+u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+ u32 newoffset;
+ u32 tmplong, tmplong2;
+ u8 rfpi_enable = 0;
+ u32 retvalue;
+
+ offset &= 0x3f;
+ newoffset = offset;
+ if (RT_CANNOT_IO(hw)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("return all one\n"));
+ return 0xFFFFFFFF;
+ }
+ tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
+ if (rfpath == RF90_PATH_A)
+ tmplong2 = tmplong;
+ else
+ tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
+ tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
+ (newoffset << 23) | BLSSIREADEDGE;
+ rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+ tmplong & (~BLSSIREADEDGE));
+ mdelay(1);
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
+ mdelay(1);
+ rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+ tmplong | BLSSIREADEDGE);
+ mdelay(1);
+ if (rfpath == RF90_PATH_A)
+ rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
+ BIT(8));
+ else if (rfpath == RF90_PATH_B)
+ rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
+ BIT(8));
+ if (rfpi_enable)
+ retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
+ BLSSIREADBACKDATA);
+ else
+ retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
+ BLSSIREADBACKDATA);
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rflssi_readback,
+ retvalue));
+ return retvalue;
+}
+EXPORT_SYMBOL(_rtl92c_phy_rf_serial_read);
+
+void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset,
+ u32 data)
+{
+ u32 data_and_addr;
+ u32 newoffset;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+
+ if (RT_CANNOT_IO(hw)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("stop\n"));
+ return;
+ }
+ offset &= 0x3f;
+ newoffset = offset;
+ data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
+ rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf3wire_offset,
+ data_and_addr));
+}
+EXPORT_SYMBOL(_rtl92c_phy_rf_serial_write);
+
+u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask)
+{
+ u32 i;
+
+ for (i = 0; i <= 31; i++) {
+ if (((bitmask >> i) & 0x1) == 1)
+ break;
+ }
+ return i;
+}
+EXPORT_SYMBOL(_rtl92c_phy_calculate_bit_shift);
+
+static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw)
+{
+ rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
+ rtl_set_bbreg(hw, RFPGA1_TXINFO, 0x300033, 0x200022);
+ rtl_set_bbreg(hw, RCCK0_AFESETTING, MASKBYTE3, 0x45);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x23);
+ rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, 0x30, 0x1);
+ rtl_set_bbreg(hw, 0xe74, 0x0c000000, 0x2);
+ rtl_set_bbreg(hw, 0xe78, 0x0c000000, 0x2);
+ rtl_set_bbreg(hw, 0xe7c, 0x0c000000, 0x2);
+ rtl_set_bbreg(hw, 0xe80, 0x0c000000, 0x2);
+ rtl_set_bbreg(hw, 0xe88, 0x0c000000, 0x2);
+}
+bool rtl92c_phy_rf_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ return rtlpriv->cfg->ops->phy_rf6052_config(hw);
+}
+EXPORT_SYMBOL(rtl92c_phy_rf_config);
+
+bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ bool rtstatus;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("==>\n"));
+ rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw,
+ BASEBAND_CONFIG_PHY_REG);
+ if (rtstatus != true) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!"));
+ return false;
+ }
+ if (rtlphy->rf_type == RF_1T2R) {
+ _rtl92c_phy_bb_config_1t(hw);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Config to 1T!!\n"));
+ }
+ if (rtlefuse->autoload_failflag == false) {
+ rtlphy->pwrgroup_cnt = 0;
+ rtstatus = rtlpriv->cfg->ops->config_bb_with_pgheaderfile(hw,
+ BASEBAND_CONFIG_PHY_REG);
+ }
+ if (rtstatus != true) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!"));
+ return false;
+ }
+ rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw,
+ BASEBAND_CONFIG_AGC_TAB);
+ if (rtstatus != true) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("AGC Table Fail\n"));
+ return false;
+ }
+ rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER2,
+ 0x200));
+ return true;
+}
+EXPORT_SYMBOL(_rtl92c_phy_bb8192c_config_parafile);
+
+void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask,
+ u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (regaddr == RTXAGC_A_RATE18_06) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][0] = data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][0]));
+ }
+ if (regaddr == RTXAGC_A_RATE54_24) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][1] = data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][1]));
+ }
+ if (regaddr == RTXAGC_A_CCK1_MCS32) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][6] = data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][6]));
+ }
+ if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][7] = data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][7]));
+ }
+ if (regaddr == RTXAGC_A_MCS03_MCS00) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][2] = data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][2]));
+ }
+ if (regaddr == RTXAGC_A_MCS07_MCS04) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][3] = data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][3]));
+ }
+ if (regaddr == RTXAGC_A_MCS11_MCS08) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][4] = data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][4]));
+ }
+ if (regaddr == RTXAGC_A_MCS15_MCS12) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][5] = data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][5]));
+ }
+ if (regaddr == RTXAGC_B_RATE18_06) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][8] = data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][8]));
+ }
+ if (regaddr == RTXAGC_B_RATE54_24) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][9] = data;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][9]));
+ }
+
+ if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][14] = data;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][14]));
+ }
+
+ if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][15] = data;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][15]));
+ }
+
+ if (regaddr == RTXAGC_B_MCS03_MCS00) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][10] = data;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][10]));
+ }
+
+ if (regaddr == RTXAGC_B_MCS07_MCS04) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][11] = data;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][11]));
+ }
+
+ if (regaddr == RTXAGC_B_MCS11_MCS08) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][12] = data;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][12]));
+ }
+
+ if (regaddr == RTXAGC_B_MCS15_MCS12) {
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][13] = data;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][13]));
+
+ rtlphy->pwrgroup_cnt++;
+ }
+}
+EXPORT_SYMBOL(_rtl92c_store_pwrIndex_diffrate_offset);
+
+void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ rtlphy->default_initialgain[0] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[1] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[2] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[3] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Default initial gain (c50=0x%x, "
+ "c58=0x%x, c60=0x%x, c68=0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]));
+
+ rtlphy->framesync = (u8) rtl_get_bbreg(hw,
+ ROFDM0_RXDETECTOR3, MASKBYTE0);
+ rtlphy->framesync_c34 = rtl_get_bbreg(hw,
+ ROFDM0_RXDETECTOR2, MASKDWORD);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync));
+}
+
+void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+ rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
+ rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
+ rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
+ rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
+ RFPGA0_XA_LSSIPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
+ RFPGA0_XB_LSSIPARAMETER;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
+ rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
+ rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control =
+ RFPGA0_XAB_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control =
+ RFPGA0_XAB_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
+ RFPGA0_XCD_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
+ RFPGA0_XCD_SWITCHCONTROL;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
+ rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
+ rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
+ rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
+ rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
+ rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
+ rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance =
+ ROFDM0_XARXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance =
+ ROFDM0_XBRXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
+ ROFDM0_XCRXIQIMBANLANCE;
+ rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
+ ROFDM0_XDRXIQIMBALANCE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
+ rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
+ rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance =
+ ROFDM0_XATXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance =
+ ROFDM0_XBTXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
+ ROFDM0_XCTXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
+ ROFDM0_XDTXIQIMBALANCE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback =
+ RFPGA0_XA_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback =
+ RFPGA0_XB_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
+ RFPGA0_XC_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
+ RFPGA0_XD_LSSIREADBACK;
+
+ rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi =
+ TRANSCEIVEA_HSPI_READBACK;
+ rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
+ TRANSCEIVEB_HSPI_READBACK;
+
+}
+EXPORT_SYMBOL(_rtl92c_phy_init_bb_rf_register_definition);
+
+void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 txpwr_level;
+ long txpwr_dbm;
+
+ txpwr_level = rtlphy->cur_cck_txpwridx;
+ txpwr_dbm = _rtl92c_phy_txpwr_idx_to_dbm(hw,
+ WIRELESS_MODE_B, txpwr_level);
+ txpwr_level = rtlphy->cur_ofdm24g_txpwridx +
+ rtlefuse->legacy_ht_txpowerdiff;
+ if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
+ WIRELESS_MODE_G,
+ txpwr_level) > txpwr_dbm)
+ txpwr_dbm =
+ _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
+ txpwr_level);
+ txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
+ if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
+ WIRELESS_MODE_N_24G,
+ txpwr_level) > txpwr_dbm)
+ txpwr_dbm =
+ _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
+ txpwr_level);
+ *powerlevel = txpwr_dbm;
+}
+
+static void _rtl92c_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
+ u8 *cckpowerlevel, u8 *ofdmpowerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 index = (channel - 1);
+
+ cckpowerlevel[RF90_PATH_A] =
+ rtlefuse->txpwrlevel_cck[RF90_PATH_A][index];
+ cckpowerlevel[RF90_PATH_B] =
+ rtlefuse->txpwrlevel_cck[RF90_PATH_B][index];
+ if (get_rf_type(rtlphy) == RF_1T2R || get_rf_type(rtlphy) == RF_1T1R) {
+ ofdmpowerlevel[RF90_PATH_A] =
+ rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index];
+ ofdmpowerlevel[RF90_PATH_B] =
+ rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index];
+ } else if (get_rf_type(rtlphy) == RF_2T2R) {
+ ofdmpowerlevel[RF90_PATH_A] =
+ rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index];
+ ofdmpowerlevel[RF90_PATH_B] =
+ rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index];
+ }
+}
+
+static void _rtl92c_ccxpower_index_check(struct ieee80211_hw *hw,
+ u8 channel, u8 *cckpowerlevel,
+ u8 *ofdmpowerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ rtlphy->cur_cck_txpwridx = cckpowerlevel[0];
+ rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0];
+}
+
+void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
+ u8 cckpowerlevel[2], ofdmpowerlevel[2];
+
+ if (rtlefuse->txpwr_fromeprom == false)
+ return;
+ _rtl92c_get_txpower_index(hw, channel,
+ &cckpowerlevel[0], &ofdmpowerlevel[0]);
+ _rtl92c_ccxpower_index_check(hw,
+ channel, &cckpowerlevel[0],
+ &ofdmpowerlevel[0]);
+ rtlpriv->cfg->ops->phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]);
+ rtlpriv->cfg->ops->phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0],
+ channel);
+}
+EXPORT_SYMBOL(rtl92c_phy_set_txpower_level);
+
+bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 idx;
+ u8 rf_path;
+
+ u8 ccktxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
+ WIRELESS_MODE_B,
+ power_indbm);
+ u8 ofdmtxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
+ WIRELESS_MODE_N_24G,
+ power_indbm);
+ if (ofdmtxpwridx - rtlefuse->legacy_ht_txpowerdiff > 0)
+ ofdmtxpwridx -= rtlefuse->legacy_ht_txpowerdiff;
+ else
+ ofdmtxpwridx = 0;
+ RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE,
+ ("%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
+ power_indbm, ccktxpwridx, ofdmtxpwridx));
+ for (idx = 0; idx < 14; idx++) {
+ for (rf_path = 0; rf_path < 2; rf_path++) {
+ rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx;
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][idx] =
+ ofdmtxpwridx;
+ rtlefuse->txpwrlevel_ht40_2s[rf_path][idx] =
+ ofdmtxpwridx;
+ }
+ }
+ rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
+ return true;
+}
+EXPORT_SYMBOL(rtl92c_phy_update_txpower_dbm);
+
+void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval)
+{
+}
+EXPORT_SYMBOL(rtl92c_phy_set_beacon_hw_reg);
+
+u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
+ enum wireless_mode wirelessmode,
+ long power_indbm)
+{
+ u8 txpwridx;
+ long offset;
+
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ offset = -7;
+ break;
+ case WIRELESS_MODE_G:
+ case WIRELESS_MODE_N_24G:
+ offset = -8;
+ break;
+ default:
+ offset = -8;
+ break;
+ }
+
+ if ((power_indbm - offset) > 0)
+ txpwridx = (u8) ((power_indbm - offset) * 2);
+ else
+ txpwridx = 0;
+
+ if (txpwridx > MAX_TXPWR_IDX_NMODE_92S)
+ txpwridx = MAX_TXPWR_IDX_NMODE_92S;
+
+ return txpwridx;
+}
+EXPORT_SYMBOL(_rtl92c_phy_dbm_to_txpwr_Idx);
+
+long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+ enum wireless_mode wirelessmode,
+ u8 txpwridx)
+{
+ long offset;
+ long pwrout_dbm;
+
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ offset = -7;
+ break;
+ case WIRELESS_MODE_G:
+ case WIRELESS_MODE_N_24G:
+ offset = -8;
+ break;
+ default:
+ offset = -8;
+ break;
+ }
+ pwrout_dbm = txpwridx / 2 + offset;
+ return pwrout_dbm;
+}
+EXPORT_SYMBOL(_rtl92c_phy_txpwr_idx_to_dbm);
+
+void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ enum io_type iotype;
+
+ if (!is_hal_stop(rtlhal)) {
+ switch (operation) {
+ case SCAN_OPT_BACKUP:
+ iotype = IO_CMD_PAUSE_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_IO_CMD,
+ (u8 *)&iotype);
+
+ break;
+ case SCAN_OPT_RESTORE:
+ iotype = IO_CMD_RESUME_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_IO_CMD,
+ (u8 *)&iotype);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Unknown Scan Backup operation.\n"));
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(rtl92c_phy_scan_operation_backup);
+
+void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tmp_bw = rtlphy->current_chan_bw;
+
+ if (rtlphy->set_bwmode_inprogress)
+ return;
+ rtlphy->set_bwmode_inprogress = true;
+ if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw)))
+ rtlpriv->cfg->ops->phy_set_bw_mode_callback(hw);
+ else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("FALSE driver sleep or unload\n"));
+ rtlphy->set_bwmode_inprogress = false;
+ rtlphy->current_chan_bw = tmp_bw;
+ }
+}
+EXPORT_SYMBOL(rtl92c_phy_set_bw_mode);
+
+void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 delay;
+
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
+ ("switch to channel%d\n", rtlphy->current_channel));
+ if (is_hal_stop(rtlhal))
+ return;
+ do {
+ if (!rtlphy->sw_chnl_inprogress)
+ break;
+ if (!_rtl92c_phy_sw_chnl_step_by_step
+ (hw, rtlphy->current_channel, &rtlphy->sw_chnl_stage,
+ &rtlphy->sw_chnl_step, &delay)) {
+ if (delay > 0)
+ mdelay(delay);
+ else
+ continue;
+ } else
+ rtlphy->sw_chnl_inprogress = false;
+ break;
+ } while (true);
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
+}
+EXPORT_SYMBOL(rtl92c_phy_sw_chnl_callback);
+
+u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (rtlphy->sw_chnl_inprogress)
+ return 0;
+ if (rtlphy->set_bwmode_inprogress)
+ return 0;
+ RT_ASSERT((rtlphy->current_channel <= 14),
+ ("WIRELESS_MODE_G but channel>14"));
+ rtlphy->sw_chnl_inprogress = true;
+ rtlphy->sw_chnl_stage = 0;
+ rtlphy->sw_chnl_step = 0;
+ if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
+ rtl92c_phy_sw_chnl_callback(hw);
+ RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
+ ("sw_chnl_inprogress false schdule workitem\n"));
+ rtlphy->sw_chnl_inprogress = false;
+ } else {
+ RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
+ ("sw_chnl_inprogress false driver sleep or"
+ " unload\n"));
+ rtlphy->sw_chnl_inprogress = false;
+ }
+ return 1;
+}
+EXPORT_SYMBOL(rtl92c_phy_sw_chnl);
+
+static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
+ u8 channel, u8 *stage, u8 *step,
+ u32 *delay)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
+ u32 precommoncmdcnt;
+ struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
+ u32 postcommoncmdcnt;
+ struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
+ u32 rfdependcmdcnt;
+ struct swchnlcmd *currentcmd = NULL;
+ u8 rfpath;
+ u8 num_total_rfpath = rtlphy->num_total_rfpath;
+
+ precommoncmdcnt = 0;
+ _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+ MAX_PRECMD_CNT,
+ CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0);
+ _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+ MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
+
+ postcommoncmdcnt = 0;
+
+ _rtl92c_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
+ MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
+
+ rfdependcmdcnt = 0;
+
+ RT_ASSERT((channel >= 1 && channel <= 14),
+ ("illegal channel for Zebra: %d\n", channel));
+
+ _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+ MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
+ RF_CHNLBW, channel, 10);
+
+ _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+ MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0,
+ 0);
+
+ do {
+ switch (*stage) {
+ case 0:
+ currentcmd = &precommoncmd[*step];
+ break;
+ case 1:
+ currentcmd = &rfdependcmd[*step];
+ break;
+ case 2:
+ currentcmd = &postcommoncmd[*step];
+ break;
+ }
+
+ if (currentcmd->cmdid == CMDID_END) {
+ if ((*stage) == 2) {
+ return true;
+ } else {
+ (*stage)++;
+ (*step) = 0;
+ continue;
+ }
+ }
+
+ switch (currentcmd->cmdid) {
+ case CMDID_SET_TXPOWEROWER_LEVEL:
+ rtl92c_phy_set_txpower_level(hw, channel);
+ break;
+ case CMDID_WRITEPORT_ULONG:
+ rtl_write_dword(rtlpriv, currentcmd->para1,
+ currentcmd->para2);
+ break;
+ case CMDID_WRITEPORT_USHORT:
+ rtl_write_word(rtlpriv, currentcmd->para1,
+ (u16) currentcmd->para2);
+ break;
+ case CMDID_WRITEPORT_UCHAR:
+ rtl_write_byte(rtlpriv, currentcmd->para1,
+ (u8) currentcmd->para2);
+ break;
+ case CMDID_RF_WRITEREG:
+ for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
+ rtlphy->rfreg_chnlval[rfpath] =
+ ((rtlphy->rfreg_chnlval[rfpath] &
+ 0xfffffc00) | currentcmd->para2);
+
+ rtl_set_rfreg(hw, (enum radio_path)rfpath,
+ currentcmd->para1,
+ RFREG_OFFSET_MASK,
+ rtlphy->rfreg_chnlval[rfpath]);
+ }
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+
+ break;
+ } while (true);
+
+ (*delay) = currentcmd->msdelay;
+ (*step)++;
+ return false;
+}
+
+static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
+ u32 cmdtableidx, u32 cmdtablesz,
+ enum swchnlcmd_id cmdid,
+ u32 para1, u32 para2, u32 msdelay)
+{
+ struct swchnlcmd *pcmd;
+
+ if (cmdtable == NULL) {
+ RT_ASSERT(false, ("cmdtable cannot be NULL.\n"));
+ return false;
+ }
+
+ if (cmdtableidx >= cmdtablesz)
+ return false;
+
+ pcmd = cmdtable + cmdtableidx;
+ pcmd->cmdid = cmdid;
+ pcmd->para1 = para1;
+ pcmd->para2 = para2;
+ pcmd->msdelay = msdelay;
+ return true;
+}
+
+bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath)
+{
+ return true;
+}
+EXPORT_SYMBOL(rtl8192_phy_check_is_legal_rfpath);
+
+static u8 _rtl92c_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
+{
+ u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
+ u8 result = 0x00;
+
+ rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1f);
+ rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c1f);
+ rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140102);
+ rtl_set_bbreg(hw, 0xe3c, MASKDWORD,
+ config_pathb ? 0x28160202 : 0x28160502);
+
+ if (config_pathb) {
+ rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x10008c22);
+ rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x10008c22);
+ rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140102);
+ rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x28160202);
+ }
+
+ rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x001028d1);
+ rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
+ rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
+
+ mdelay(IQK_DELAY_TIME);
+
+ reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
+ reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
+ reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
+ reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
+
+ if (!(reg_eac & BIT(28)) &&
+ (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
+ (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
+ result |= 0x01;
+ else
+ return result;
+
+ if (!(reg_eac & BIT(27)) &&
+ (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
+ (((reg_eac & 0x03FF0000) >> 16) != 0x36))
+ result |= 0x02;
+ return result;
+}
+
+static u8 _rtl92c_phy_path_b_iqk(struct ieee80211_hw *hw)
+{
+ u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+ u8 result = 0x00;
+
+ rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002);
+ rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000);
+ mdelay(IQK_DELAY_TIME);
+ reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
+ reg_eb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
+ reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
+ reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
+ reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
+ if (!(reg_eac & BIT(31)) &&
+ (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) &&
+ (((reg_ebc & 0x03FF0000) >> 16) != 0x42))
+ result |= 0x01;
+ else
+ return result;
+
+ if (!(reg_eac & BIT(30)) &&
+ (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) &&
+ (((reg_ecc & 0x03FF0000) >> 16) != 0x36))
+ result |= 0x02;
+ return result;
+}
+
+static void _rtl92c_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw,
+ bool iqk_ok, long result[][8],
+ u8 final_candidate, bool btxonly)
+{
+ u32 oldval_0, x, tx0_a, reg;
+ long y, tx0_c;
+
+ if (final_candidate == 0xFF)
+ return;
+ else if (iqk_ok) {
+ oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+ MASKDWORD) >> 22) & 0x3FF;
+ x = result[final_candidate][0];
+ if ((x & 0x00000200) != 0)
+ x = x | 0xFFFFFC00;
+ tx0_a = (x * oldval_0) >> 8;
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
+ ((x * oldval_0 >> 7) & 0x1));
+ y = result[final_candidate][1];
+ if ((y & 0x00000200) != 0)
+ y = y | 0xFFFFFC00;
+ tx0_c = (y * oldval_0) >> 8;
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
+ ((tx0_c & 0x3C0) >> 6));
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
+ (tx0_c & 0x3F));
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
+ ((y * oldval_0 >> 7) & 0x1));
+ if (btxonly)
+ return;
+ reg = result[final_candidate][2];
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
+ reg = result[final_candidate][3] & 0x3F;
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
+ reg = (result[final_candidate][3] >> 6) & 0xF;
+ rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
+ }
+}
+
+static void _rtl92c_phy_path_b_fill_iqk_matrix(struct ieee80211_hw *hw,
+ bool iqk_ok, long result[][8],
+ u8 final_candidate, bool btxonly)
+{
+ u32 oldval_1, x, tx1_a, reg;
+ long y, tx1_c;
+
+ if (final_candidate == 0xFF)
+ return;
+ else if (iqk_ok) {
+ oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
+ MASKDWORD) >> 22) & 0x3FF;
+ x = result[final_candidate][4];
+ if ((x & 0x00000200) != 0)
+ x = x | 0xFFFFFC00;
+ tx1_a = (x * oldval_1) >> 8;
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x3FF, tx1_a);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(27),
+ ((x * oldval_1 >> 7) & 0x1));
+ y = result[final_candidate][5];
+ if ((y & 0x00000200) != 0)
+ y = y | 0xFFFFFC00;
+ tx1_c = (y * oldval_1) >> 8;
+ rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000,
+ ((tx1_c & 0x3C0) >> 6));
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x003F0000,
+ (tx1_c & 0x3F));
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(25),
+ ((y * oldval_1 >> 7) & 0x1));
+ if (btxonly)
+ return;
+ reg = result[final_candidate][6];
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg);
+ reg = result[final_candidate][7] & 0x3F;
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg);
+ reg = (result[final_candidate][7] >> 6) & 0xF;
+ rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, reg);
+ }
+}
+
+static void _rtl92c_phy_save_adda_registers(struct ieee80211_hw *hw,
+ u32 *addareg, u32 *addabackup,
+ u32 registernum)
+{
+ u32 i;
+
+ for (i = 0; i < registernum; i++)
+ addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
+}
+
+static void _rtl92c_phy_save_mac_registers(struct ieee80211_hw *hw,
+ u32 *macreg, u32 *macbackup)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+ macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
+ macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
+}
+
+static void _rtl92c_phy_reload_adda_registers(struct ieee80211_hw *hw,
+ u32 *addareg, u32 *addabackup,
+ u32 regiesternum)
+{
+ u32 i;
+
+ for (i = 0; i < regiesternum; i++)
+ rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
+}
+
+static void _rtl92c_phy_reload_mac_registers(struct ieee80211_hw *hw,
+ u32 *macreg, u32 *macbackup)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+ rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
+ rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
+}
+
+static void _rtl92c_phy_path_adda_on(struct ieee80211_hw *hw,
+ u32 *addareg, bool is_patha_on, bool is2t)
+{
+ u32 pathOn;
+ u32 i;
+
+ pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
+ if (false == is2t) {
+ pathOn = 0x0bdb25a0;
+ rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
+ } else {
+ rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn);
+ }
+
+ for (i = 1; i < IQK_ADDA_REG_NUM; i++)
+ rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn);
+}
+
+static void _rtl92c_phy_mac_setting_calibration(struct ieee80211_hw *hw,
+ u32 *macreg, u32 *macbackup)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ rtl_write_byte(rtlpriv, macreg[0], 0x3F);
+
+ for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
+ rtl_write_byte(rtlpriv, macreg[i],
+ (u8) (macbackup[i] & (~BIT(3))));
+ rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
+}
+
+static void _rtl92c_phy_path_a_standby(struct ieee80211_hw *hw)
+{
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
+ rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
+}
+
+static void _rtl92c_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
+{
+ u32 mode;
+
+ mode = pi_mode ? 0x01000100 : 0x01000000;
+ rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
+ rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
+}
+
+static bool _rtl92c_phy_simularity_compare(struct ieee80211_hw *hw,
+ long result[][8], u8 c1, u8 c2)
+{
+ u32 i, j, diff, simularity_bitmap, bound;
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ u8 final_candidate[2] = { 0xFF, 0xFF };
+ bool bresult = true, is2t = IS_92C_SERIAL(rtlhal->version);
+
+ if (is2t)
+ bound = 8;
+ else
+ bound = 4;
+
+ simularity_bitmap = 0;
+
+ for (i = 0; i < bound; i++) {
+ diff = (result[c1][i] > result[c2][i]) ?
+ (result[c1][i] - result[c2][i]) :
+ (result[c2][i] - result[c1][i]);
+
+ if (diff > MAX_TOLERANCE) {
+ if ((i == 2 || i == 6) && !simularity_bitmap) {
+ if (result[c1][i] + result[c1][i + 1] == 0)
+ final_candidate[(i / 4)] = c2;
+ else if (result[c2][i] + result[c2][i + 1] == 0)
+ final_candidate[(i / 4)] = c1;
+ else
+ simularity_bitmap = simularity_bitmap |
+ (1 << i);
+ } else
+ simularity_bitmap =
+ simularity_bitmap | (1 << i);
+ }
+ }
+
+ if (simularity_bitmap == 0) {
+ for (i = 0; i < (bound / 4); i++) {
+ if (final_candidate[i] != 0xFF) {
+ for (j = i * 4; j < (i + 1) * 4 - 2; j++)
+ result[3][j] =
+ result[final_candidate[i]][j];
+ bresult = false;
+ }
+ }
+ return bresult;
+ } else if (!(simularity_bitmap & 0x0F)) {
+ for (i = 0; i < 4; i++)
+ result[3][i] = result[c1][i];
+ return false;
+ } else if (!(simularity_bitmap & 0xF0) && is2t) {
+ for (i = 4; i < 8; i++)
+ result[3][i] = result[c1][i];
+ return false;
+ } else {
+ return false;
+ }
+
+}
+
+static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
+ long result[][8], u8 t, bool is2t)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 i;
+ u8 patha_ok, pathb_ok;
+ u32 adda_reg[IQK_ADDA_REG_NUM] = {
+ 0x85c, 0xe6c, 0xe70, 0xe74,
+ 0xe78, 0xe7c, 0xe80, 0xe84,
+ 0xe88, 0xe8c, 0xed0, 0xed4,
+ 0xed8, 0xedc, 0xee0, 0xeec
+ };
+
+ u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
+ 0x522, 0x550, 0x551, 0x040
+ };
+
+ const u32 retrycount = 2;
+
+ u32 bbvalue;
+
+ if (t == 0) {
+ bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD);
+
+ _rtl92c_phy_save_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup, 16);
+ _rtl92c_phy_save_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ }
+ _rtl92c_phy_path_adda_on(hw, adda_reg, true, is2t);
+ if (t == 0) {
+ rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER1,
+ BIT(8));
+ }
+ if (!rtlphy->rfpi_enable)
+ _rtl92c_phy_pi_mode_switch(hw, true);
+ if (t == 0) {
+ rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD);
+ rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD);
+ rtlphy->reg_874 = rtl_get_bbreg(hw, 0x874, MASKDWORD);
+ }
+ rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600);
+ rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4);
+ rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000);
+ if (is2t) {
+ rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
+ rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000);
+ }
+ _rtl92c_phy_mac_setting_calibration(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x00080000);
+ if (is2t)
+ rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x00080000);
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
+ rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
+ rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800);
+ for (i = 0; i < retrycount; i++) {
+ patha_ok = _rtl92c_phy_path_a_iqk(hw, is2t);
+ if (patha_ok == 0x03) {
+ result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
+ 0x3FF0000) >> 16;
+ break;
+ } else if (i == (retrycount - 1) && patha_ok == 0x01)
+ result[t][0] = (rtl_get_bbreg(hw, 0xe94,
+ MASKDWORD) & 0x3FF0000) >>
+ 16;
+ result[t][1] =
+ (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & 0x3FF0000) >> 16;
+
+ }
+
+ if (is2t) {
+ _rtl92c_phy_path_a_standby(hw);
+ _rtl92c_phy_path_adda_on(hw, adda_reg, false, is2t);
+ for (i = 0; i < retrycount; i++) {
+ pathb_ok = _rtl92c_phy_path_b_iqk(hw);
+ if (pathb_ok == 0x03) {
+ result[t][4] = (rtl_get_bbreg(hw,
+ 0xeb4,
+ MASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][5] =
+ (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][6] =
+ (rtl_get_bbreg(hw, 0xec4, MASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][7] =
+ (rtl_get_bbreg(hw, 0xecc, MASKDWORD) &
+ 0x3FF0000) >> 16;
+ break;
+ } else if (i == (retrycount - 1) && pathb_ok == 0x01) {
+ result[t][4] = (rtl_get_bbreg(hw,
+ 0xeb4,
+ MASKDWORD) &
+ 0x3FF0000) >> 16;
+ }
+ result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
+ 0x3FF0000) >> 16;
+ }
+ }
+ rtl_set_bbreg(hw, 0xc04, MASKDWORD, rtlphy->reg_c04);
+ rtl_set_bbreg(hw, 0x874, MASKDWORD, rtlphy->reg_874);
+ rtl_set_bbreg(hw, 0xc08, MASKDWORD, rtlphy->reg_c08);
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
+ rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3);
+ if (is2t)
+ rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
+ if (t != 0) {
+ if (!rtlphy->rfpi_enable)
+ _rtl92c_phy_pi_mode_switch(hw, false);
+ _rtl92c_phy_reload_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup, 16);
+ _rtl92c_phy_reload_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ }
+}
+
+static void _rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw,
+ char delta, bool is2t)
+{
+ /* This routine is deliberately dummied out for later fixes */
+#if 0
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ u32 reg_d[PATH_NUM];
+ u32 tmpreg, index, offset, path, i, pathbound = PATH_NUM, apkbound;
+
+ u32 bb_backup[APK_BB_REG_NUM];
+ u32 bb_reg[APK_BB_REG_NUM] = {
+ 0x904, 0xc04, 0x800, 0xc08, 0x874
+ };
+ u32 bb_ap_mode[APK_BB_REG_NUM] = {
+ 0x00000020, 0x00a05430, 0x02040000,
+ 0x000800e4, 0x00204000
+ };
+ u32 bb_normal_ap_mode[APK_BB_REG_NUM] = {
+ 0x00000020, 0x00a05430, 0x02040000,
+ 0x000800e4, 0x22204000
+ };
+
+ u32 afe_backup[APK_AFE_REG_NUM];
+ u32 afe_reg[APK_AFE_REG_NUM] = {
+ 0x85c, 0xe6c, 0xe70, 0xe74, 0xe78,
+ 0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c,
+ 0xed0, 0xed4, 0xed8, 0xedc, 0xee0,
+ 0xeec
+ };
+
+ u32 mac_backup[IQK_MAC_REG_NUM];
+ u32 mac_reg[IQK_MAC_REG_NUM] = {
+ 0x522, 0x550, 0x551, 0x040
+ };
+
+ u32 apk_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
+ {0x0852c, 0x1852c, 0x5852c, 0x1852c, 0x5852c},
+ {0x2852e, 0x0852e, 0x3852e, 0x0852e, 0x0852e}
+ };
+
+ u32 apk_normal_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
+ {0x0852c, 0x0a52c, 0x3a52c, 0x5a52c, 0x5a52c},
+ {0x0852c, 0x0a52c, 0x5a52c, 0x5a52c, 0x5a52c}
+ };
+
+ u32 apk_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
+ {0x52019, 0x52014, 0x52013, 0x5200f, 0x5208d},
+ {0x5201a, 0x52019, 0x52016, 0x52033, 0x52050}
+ };
+
+ u32 apk_normal_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
+ {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a},
+ {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a}
+ };
+
+ u32 afe_on_off[PATH_NUM] = {
+ 0x04db25a4, 0x0b1b25a4
+ };
+
+ u32 apk_offset[PATH_NUM] = { 0xb68, 0xb6c };
+
+ u32 apk_normal_offset[PATH_NUM] = { 0xb28, 0xb98 };
+
+ u32 apk_value[PATH_NUM] = { 0x92fc0000, 0x12fc0000 };
+
+ u32 apk_normal_value[PATH_NUM] = { 0x92680000, 0x12680000 };
+
+ const char apk_delta_mapping[APK_BB_REG_NUM][13] = {
+ {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+ {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+ {-6, -4, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+ {-1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+ {-11, -9, -7, -5, -3, -1, 0, 0, 0, 0, 0, 0, 0}
+ };
+
+ const u32 apk_normal_setting_value_1[13] = {
+ 0x01017018, 0xf7ed8f84, 0x1b1a1816, 0x2522201e, 0x322e2b28,
+ 0x433f3a36, 0x5b544e49, 0x7b726a62, 0xa69a8f84, 0xdfcfc0b3,
+ 0x12680000, 0x00880000, 0x00880000
+ };
+
+ const u32 apk_normal_setting_value_2[16] = {
+ 0x01c7021d, 0x01670183, 0x01000123, 0x00bf00e2, 0x008d00a3,
+ 0x0068007b, 0x004d0059, 0x003a0042, 0x002b0031, 0x001f0025,
+ 0x0017001b, 0x00110014, 0x000c000f, 0x0009000b, 0x00070008,
+ 0x00050006
+ };
+
+ const u32 apk_result[PATH_NUM][APK_BB_REG_NUM];
+
+ long bb_offset, delta_v, delta_offset;
+
+ if (!is2t)
+ pathbound = 1;
+
+ for (index = 0; index < PATH_NUM; index++) {
+ apk_offset[index] = apk_normal_offset[index];
+ apk_value[index] = apk_normal_value[index];
+ afe_on_off[index] = 0x6fdb25a4;
+ }
+
+ for (index = 0; index < APK_BB_REG_NUM; index++) {
+ for (path = 0; path < pathbound; path++) {
+ apk_rf_init_value[path][index] =
+ apk_normal_rf_init_value[path][index];
+ apk_rf_value_0[path][index] =
+ apk_normal_rf_value_0[path][index];
+ }
+ bb_ap_mode[index] = bb_normal_ap_mode[index];
+
+ apkbound = 6;
+ }
+
+ for (index = 0; index < APK_BB_REG_NUM; index++) {
+ if (index == 0)
+ continue;
+ bb_backup[index] = rtl_get_bbreg(hw, bb_reg[index], MASKDWORD);
+ }
+
+ _rtl92c_phy_save_mac_registers(hw, mac_reg, mac_backup);
+
+ _rtl92c_phy_save_adda_registers(hw, afe_reg, afe_backup, 16);
+
+ for (path = 0; path < pathbound; path++) {
+ if (path == RF90_PATH_A) {
+ offset = 0xb00;
+ for (index = 0; index < 11; index++) {
+ rtl_set_bbreg(hw, offset, MASKDWORD,
+ apk_normal_setting_value_1
+ [index]);
+
+ offset += 0x04;
+ }
+
+ rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
+
+ offset = 0xb68;
+ for (; index < 13; index++) {
+ rtl_set_bbreg(hw, offset, MASKDWORD,
+ apk_normal_setting_value_1
+ [index]);
+
+ offset += 0x04;
+ }
+
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
+
+ offset = 0xb00;
+ for (index = 0; index < 16; index++) {
+ rtl_set_bbreg(hw, offset, MASKDWORD,
+ apk_normal_setting_value_2
+ [index]);
+
+ offset += 0x04;
+ }
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
+ } else if (path == RF90_PATH_B) {
+ offset = 0xb70;
+ for (index = 0; index < 10; index++) {
+ rtl_set_bbreg(hw, offset, MASKDWORD,
+ apk_normal_setting_value_1
+ [index]);
+
+ offset += 0x04;
+ }
+ rtl_set_bbreg(hw, 0xb28, MASKDWORD, 0x12680000);
+ rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
+
+ offset = 0xb68;
+ index = 11;
+ for (; index < 13; index++) {
+ rtl_set_bbreg(hw, offset, MASKDWORD,
+ apk_normal_setting_value_1
+ [index]);
+
+ offset += 0x04;
+ }
+
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
+
+ offset = 0xb60;
+ for (index = 0; index < 16; index++) {
+ rtl_set_bbreg(hw, offset, MASKDWORD,
+ apk_normal_setting_value_2
+ [index]);
+
+ offset += 0x04;
+ }
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
+ }
+
+ reg_d[path] = rtl_get_rfreg(hw, (enum radio_path)path,
+ 0xd, MASKDWORD);
+
+ for (index = 0; index < APK_AFE_REG_NUM; index++)
+ rtl_set_bbreg(hw, afe_reg[index], MASKDWORD,
+ afe_on_off[path]);
+
+ if (path == RF90_PATH_A) {
+ for (index = 0; index < APK_BB_REG_NUM; index++) {
+ if (index == 0)
+ continue;
+ rtl_set_bbreg(hw, bb_reg[index], MASKDWORD,
+ bb_ap_mode[index]);
+ }
+ }
+
+ _rtl92c_phy_mac_setting_calibration(hw, mac_reg, mac_backup);
+
+ if (path == 0) {
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x0, MASKDWORD, 0x10000);
+ } else {
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASKDWORD,
+ 0x10000);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
+ 0x1000f);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
+ 0x20103);
+ }
+
+ delta_offset = ((delta + 14) / 2);
+ if (delta_offset < 0)
+ delta_offset = 0;
+ else if (delta_offset > 12)
+ delta_offset = 12;
+
+ for (index = 0; index < APK_BB_REG_NUM; index++) {
+ if (index != 1)
+ continue;
+
+ tmpreg = apk_rf_init_value[path][index];
+
+ if (!rtlefuse->apk_thermalmeterignore) {
+ bb_offset = (tmpreg & 0xF0000) >> 16;
+
+ if (!(tmpreg & BIT(15)))
+ bb_offset = -bb_offset;
+
+ delta_v =
+ apk_delta_mapping[index][delta_offset];
+
+ bb_offset += delta_v;
+
+ if (bb_offset < 0) {
+ tmpreg = tmpreg & (~BIT(15));
+ bb_offset = -bb_offset;
+ } else {
+ tmpreg = tmpreg | BIT(15);
+ }
+
+ tmpreg =
+ (tmpreg & 0xFFF0FFFF) | (bb_offset << 16);
+ }
+
+ rtl_set_rfreg(hw, (enum radio_path)path, 0xc,
+ MASKDWORD, 0x8992e);
+ rtl_set_rfreg(hw, (enum radio_path)path, 0x0,
+ MASKDWORD, apk_rf_value_0[path][index]);
+ rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
+ MASKDWORD, tmpreg);
+
+ i = 0;
+ do {
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80000000);
+ rtl_set_bbreg(hw, apk_offset[path],
+ MASKDWORD, apk_value[0]);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("PHY_APCalibrate() offset 0x%x "
+ "value 0x%x\n",
+ apk_offset[path],
+ rtl_get_bbreg(hw, apk_offset[path],
+ MASKDWORD)));
+
+ mdelay(3);
+
+ rtl_set_bbreg(hw, apk_offset[path],
+ MASKDWORD, apk_value[1]);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("PHY_APCalibrate() offset 0x%x "
+ "value 0x%x\n",
+ apk_offset[path],
+ rtl_get_bbreg(hw, apk_offset[path],
+ MASKDWORD)));
+
+ mdelay(20);
+
+ rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
+
+ if (path == RF90_PATH_A)
+ tmpreg = rtl_get_bbreg(hw, 0xbd8,
+ 0x03E00000);
+ else
+ tmpreg = rtl_get_bbreg(hw, 0xbd8,
+ 0xF8000000);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("PHY_APCalibrate() offset "
+ "0xbd8[25:21] %x\n", tmpreg));
+
+ i++;
+
+ } while (tmpreg > apkbound && i < 4);
+
+ apk_result[path][index] = tmpreg;
+ }
+ }
+
+ _rtl92c_phy_reload_mac_registers(hw, mac_reg, mac_backup);
+
+ for (index = 0; index < APK_BB_REG_NUM; index++) {
+ if (index == 0)
+ continue;
+ rtl_set_bbreg(hw, bb_reg[index], MASKDWORD, bb_backup[index]);
+ }
+
+ _rtl92c_phy_reload_adda_registers(hw, afe_reg, afe_backup, 16);
+
+ for (path = 0; path < pathbound; path++) {
+ rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
+ MASKDWORD, reg_d[path]);
+
+ if (path == RF90_PATH_B) {
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
+ 0x1000f);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
+ 0x20101);
+ }
+
+ if (apk_result[path][1] > 6)
+ apk_result[path][1] = 6;
+ }
+
+ for (path = 0; path < pathbound; path++) {
+ rtl_set_rfreg(hw, (enum radio_path)path, 0x3, MASKDWORD,
+ ((apk_result[path][1] << 15) |
+ (apk_result[path][1] << 10) |
+ (apk_result[path][1] << 5) |
+ apk_result[path][1]));
+
+ if (path == RF90_PATH_A)
+ rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
+ ((apk_result[path][1] << 15) |
+ (apk_result[path][1] << 10) |
+ (0x00 << 5) | 0x05));
+ else
+ rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
+ ((apk_result[path][1] << 15) |
+ (apk_result[path][1] << 10) |
+ (0x02 << 5) | 0x05));
+
+ rtl_set_rfreg(hw, (enum radio_path)path, 0xe, MASKDWORD,
+ ((0x08 << 15) | (0x08 << 10) | (0x08 << 5) |
+ 0x08));
+
+ }
+
+ rtlphy->apk_done = true;
+#endif
+}
+
+static void _rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw,
+ bool bmain, bool is2t)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (is_hal_stop(rtlhal)) {
+ rtl_set_bbreg(hw, REG_LEDCFG0, BIT(23), 0x01);
+ rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
+ }
+ if (is2t) {
+ if (bmain)
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+ BIT(5) | BIT(6), 0x1);
+ else
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+ BIT(5) | BIT(6), 0x2);
+ } else {
+ if (bmain)
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x2);
+ else
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x1);
+
+ }
+}
+
+#undef IQK_ADDA_REG_NUM
+#undef IQK_DELAY_TIME
+
+void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ long result[4][8];
+ u8 i, final_candidate;
+ bool patha_ok, pathb_ok;
+ long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
+ reg_ecc, reg_tmp = 0;
+ bool is12simular, is13simular, is23simular;
+ bool start_conttx = false, singletone = false;
+ u32 iqk_bb_reg[10] = {
+ ROFDM0_XARXIQIMBALANCE,
+ ROFDM0_XBRXIQIMBALANCE,
+ ROFDM0_ECCATHRESHOLD,
+ ROFDM0_AGCRSSITABLE,
+ ROFDM0_XATXIQIMBALANCE,
+ ROFDM0_XBTXIQIMBALANCE,
+ ROFDM0_XCTXIQIMBALANCE,
+ ROFDM0_XCTXAFE,
+ ROFDM0_XDTXAFE,
+ ROFDM0_RXIQEXTANTA
+ };
+
+ if (recovery) {
+ _rtl92c_phy_reload_adda_registers(hw,
+ iqk_bb_reg,
+ rtlphy->iqk_bb_backup, 10);
+ return;
+ }
+ if (start_conttx || singletone)
+ return;
+ for (i = 0; i < 8; i++) {
+ result[0][i] = 0;
+ result[1][i] = 0;
+ result[2][i] = 0;
+ result[3][i] = 0;
+ }
+ final_candidate = 0xff;
+ patha_ok = false;
+ pathb_ok = false;
+ is12simular = false;
+ is23simular = false;
+ is13simular = false;
+ for (i = 0; i < 3; i++) {
+ if (IS_92C_SERIAL(rtlhal->version))
+ _rtl92c_phy_iq_calibrate(hw, result, i, true);
+ else
+ _rtl92c_phy_iq_calibrate(hw, result, i, false);
+ if (i == 1) {
+ is12simular = _rtl92c_phy_simularity_compare(hw,
+ result, 0,
+ 1);
+ if (is12simular) {
+ final_candidate = 0;
+ break;
+ }
+ }
+ if (i == 2) {
+ is13simular = _rtl92c_phy_simularity_compare(hw,
+ result, 0,
+ 2);
+ if (is13simular) {
+ final_candidate = 0;
+ break;
+ }
+ is23simular = _rtl92c_phy_simularity_compare(hw,
+ result, 1,
+ 2);
+ if (is23simular)
+ final_candidate = 1;
+ else {
+ for (i = 0; i < 8; i++)
+ reg_tmp += result[3][i];
+
+ if (reg_tmp != 0)
+ final_candidate = 3;
+ else
+ final_candidate = 0xFF;
+ }
+ }
+ }
+ for (i = 0; i < 4; i++) {
+ reg_e94 = result[i][0];
+ reg_e9c = result[i][1];
+ reg_ea4 = result[i][2];
+ reg_eac = result[i][3];
+ reg_eb4 = result[i][4];
+ reg_ebc = result[i][5];
+ reg_ec4 = result[i][6];
+ reg_ecc = result[i][7];
+ }
+ if (final_candidate != 0xff) {
+ rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
+ rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
+ reg_ea4 = result[final_candidate][2];
+ reg_eac = result[final_candidate][3];
+ rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
+ rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
+ reg_ec4 = result[final_candidate][6];
+ reg_ecc = result[final_candidate][7];
+ patha_ok = pathb_ok = true;
+ } else {
+ rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100;
+ rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0;
+ }
+ if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
+ _rtl92c_phy_path_a_fill_iqk_matrix(hw, patha_ok, result,
+ final_candidate,
+ (reg_ea4 == 0));
+ if (IS_92C_SERIAL(rtlhal->version)) {
+ if (reg_eb4 != 0) /*&&(reg_ec4 != 0) */
+ _rtl92c_phy_path_b_fill_iqk_matrix(hw, pathb_ok,
+ result,
+ final_candidate,
+ (reg_ec4 == 0));
+ }
+ _rtl92c_phy_save_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup, 10);
+}
+EXPORT_SYMBOL(rtl92c_phy_iq_calibrate);
+
+void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool start_conttx = false, singletone = false;
+
+ if (start_conttx || singletone)
+ return;
+ if (IS_92C_SERIAL(rtlhal->version))
+ rtlpriv->cfg->ops->phy_lc_calibrate(hw, true);
+ else
+ rtlpriv->cfg->ops->phy_lc_calibrate(hw, false);
+}
+EXPORT_SYMBOL(rtl92c_phy_lc_calibrate);
+
+void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (rtlphy->apk_done)
+ return;
+ if (IS_92C_SERIAL(rtlhal->version))
+ _rtl92c_phy_ap_calibrate(hw, delta, true);
+ else
+ _rtl92c_phy_ap_calibrate(hw, delta, false);
+}
+EXPORT_SYMBOL(rtl92c_phy_ap_calibrate);
+
+void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (IS_92C_SERIAL(rtlhal->version))
+ _rtl92c_phy_set_rfpath_switch(hw, bmain, true);
+ else
+ _rtl92c_phy_set_rfpath_switch(hw, bmain, false);
+}
+EXPORT_SYMBOL(rtl92c_phy_set_rfpath_switch);
+
+bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ bool postprocessing = false;
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ ("-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress));
+ do {
+ switch (iotype) {
+ case IO_CMD_RESUME_DM_BY_SCAN:
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ ("[IO CMD] Resume DM after scan.\n"));
+ postprocessing = true;
+ break;
+ case IO_CMD_PAUSE_DM_BY_SCAN:
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ ("[IO CMD] Pause DM before scan.\n"));
+ postprocessing = true;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ } while (false);
+ if (postprocessing && !rtlphy->set_io_inprogress) {
+ rtlphy->set_io_inprogress = true;
+ rtlphy->current_io_type = iotype;
+ } else {
+ return false;
+ }
+ rtl92c_phy_set_io(hw);
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, ("<--IO Type(%#x)\n", iotype));
+ return true;
+}
+EXPORT_SYMBOL(rtl92c_phy_set_io_cmd);
+
+void rtl92c_phy_set_io(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ ("--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress));
+ switch (rtlphy->current_io_type) {
+ case IO_CMD_RESUME_DM_BY_SCAN:
+ dm_digtable.cur_igvalue = rtlphy->initgain_backup.xaagccore1;
+ rtl92c_dm_write_dig(hw);
+ rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
+ break;
+ case IO_CMD_PAUSE_DM_BY_SCAN:
+ rtlphy->initgain_backup.xaagccore1 = dm_digtable.cur_igvalue;
+ dm_digtable.cur_igvalue = 0x17;
+ rtl92c_dm_write_dig(hw);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ rtlphy->set_io_inprogress = false;
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ ("<---(%#x)\n", rtlphy->current_io_type));
+}
+EXPORT_SYMBOL(rtl92c_phy_set_io);
+
+void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+}
+EXPORT_SYMBOL(rtl92ce_phy_set_rf_on);
+
+void _rtl92c_phy_set_rf_sleep(struct ieee80211_hw *hw)
+{
+ u32 u4b_tmp;
+ u8 delay = 5;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+ u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
+ while (u4b_tmp != 0 && delay > 0) {
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+ u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
+ delay--;
+ }
+ if (delay == 0) {
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
+ ("Switch RF timeout !!!.\n"));
+ return;
+ }
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
+}
+EXPORT_SYMBOL(_rtl92c_phy_set_rf_sleep);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
new file mode 100644
index 000000000000..53ffb0981586
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
@@ -0,0 +1,246 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92C_PHY_H__
+#define __RTL92C_PHY_H__
+
+#define MAX_PRECMD_CNT 16
+#define MAX_RFDEPENDCMD_CNT 16
+#define MAX_POSTCMD_CNT 16
+
+#define MAX_DOZE_WAITING_TIMES_9x 64
+
+#define RT_CANNOT_IO(hw) false
+#define HIGHPOWER_RADIOA_ARRAYLEN 22
+
+#define MAX_TOLERANCE 5
+#define IQK_DELAY_TIME 1
+
+#define APK_BB_REG_NUM 5
+#define APK_AFE_REG_NUM 16
+#define APK_CURVE_REG_NUM 4
+#define PATH_NUM 2
+
+#define LOOP_LIMIT 5
+#define MAX_STALL_TIME 50
+#define AntennaDiversityValue 0x80
+#define MAX_TXPWR_IDX_NMODE_92S 63
+#define Reset_Cnt_Limit 3
+
+#define IQK_ADDA_REG_NUM 16
+#define IQK_MAC_REG_NUM 4
+
+#define RF90_PATH_MAX 2
+
+#define CT_OFFSET_MAC_ADDR 0X16
+
+#define CT_OFFSET_CCK_TX_PWR_IDX 0x5A
+#define CT_OFFSET_HT401S_TX_PWR_IDX 0x60
+#define CT_OFFSET_HT402S_TX_PWR_IDX_DIF 0x66
+#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69
+#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C
+
+#define CT_OFFSET_HT40_MAX_PWR_OFFSET 0x6F
+#define CT_OFFSET_HT20_MAX_PWR_OFFSET 0x72
+
+#define CT_OFFSET_CHANNEL_PLAH 0x75
+#define CT_OFFSET_THERMAL_METER 0x78
+#define CT_OFFSET_RF_OPTION 0x79
+#define CT_OFFSET_VERSION 0x7E
+#define CT_OFFSET_CUSTOMER_ID 0x7F
+
+#define RTL92C_MAX_PATH_NUM 2
+#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255
+enum swchnlcmd_id {
+ CMDID_END,
+ CMDID_SET_TXPOWEROWER_LEVEL,
+ CMDID_BBREGWRITE10,
+ CMDID_WRITEPORT_ULONG,
+ CMDID_WRITEPORT_USHORT,
+ CMDID_WRITEPORT_UCHAR,
+ CMDID_RF_WRITEREG,
+};
+
+struct swchnlcmd {
+ enum swchnlcmd_id cmdid;
+ u32 para1;
+ u32 para2;
+ u32 msdelay;
+};
+
+enum hw90_block_e {
+ HW90_BLOCK_MAC = 0,
+ HW90_BLOCK_PHY0 = 1,
+ HW90_BLOCK_PHY1 = 2,
+ HW90_BLOCK_RF = 3,
+ HW90_BLOCK_MAXIMUM = 4,
+};
+
+enum baseband_config_type {
+ BASEBAND_CONFIG_PHY_REG = 0,
+ BASEBAND_CONFIG_AGC_TAB = 1,
+};
+
+enum ra_offset_area {
+ RA_OFFSET_LEGACY_OFDM1,
+ RA_OFFSET_LEGACY_OFDM2,
+ RA_OFFSET_HT_OFDM1,
+ RA_OFFSET_HT_OFDM2,
+ RA_OFFSET_HT_OFDM3,
+ RA_OFFSET_HT_OFDM4,
+ RA_OFFSET_HT_CCK,
+};
+
+enum antenna_path {
+ ANTENNA_NONE,
+ ANTENNA_D,
+ ANTENNA_C,
+ ANTENNA_CD,
+ ANTENNA_B,
+ ANTENNA_BD,
+ ANTENNA_BC,
+ ANTENNA_BCD,
+ ANTENNA_A,
+ ANTENNA_AD,
+ ANTENNA_AC,
+ ANTENNA_ACD,
+ ANTENNA_AB,
+ ANTENNA_ABD,
+ ANTENNA_ABC,
+ ANTENNA_ABCD
+};
+
+struct r_antenna_select_ofdm {
+ u32 r_tx_antenna:4;
+ u32 r_ant_l:4;
+ u32 r_ant_non_ht:4;
+ u32 r_ant_ht1:4;
+ u32 r_ant_ht2:4;
+ u32 r_ant_ht_s1:4;
+ u32 r_ant_non_ht_s1:4;
+ u32 ofdm_txsc:2;
+ u32 reserved:2;
+};
+
+struct r_antenna_select_cck {
+ u8 r_cckrx_enable_2:2;
+ u8 r_cckrx_enable:2;
+ u8 r_ccktx_enable:4;
+};
+
+struct efuse_contents {
+ u8 mac_addr[ETH_ALEN];
+ u8 cck_tx_power_idx[6];
+ u8 ht40_1s_tx_power_idx[6];
+ u8 ht40_2s_tx_power_idx_diff[3];
+ u8 ht20_tx_power_idx_diff[3];
+ u8 ofdm_tx_power_idx_diff[3];
+ u8 ht40_max_power_offset[3];
+ u8 ht20_max_power_offset[3];
+ u8 channel_plan;
+ u8 thermal_meter;
+ u8 rf_option[5];
+ u8 version;
+ u8 oem_id;
+ u8 regulatory;
+};
+
+struct tx_power_struct {
+ u8 cck[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 ht40_1s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 ht40_2s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 ht20_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 legacy_ht_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 legacy_ht_txpowerdiff;
+ u8 groupht20[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 groupht40[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 pwrgroup_cnt;
+ u32 mcs_original_offset[4][16];
+};
+
+extern u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask);
+extern void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data);
+extern u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask);
+extern void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask, u32 data);
+extern bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
+extern bool rtl92c_phy_bb_config(struct ieee80211_hw *hw);
+extern bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
+extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+extern void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+extern void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
+ long *powerlevel);
+extern void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+extern bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
+ long power_indbm);
+extern void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
+ u8 operation);
+extern void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+extern void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+extern void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+extern u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw);
+extern void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
+extern void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw,
+ u16 beaconinterval);
+void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
+void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
+void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
+bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+extern bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
+ u32 rfpath);
+extern bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
+void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
+void rtl92c_phy_set_io(struct ieee80211_hw *hw);
+void rtl92c_bb_block_on(struct ieee80211_hw *hw);
+u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
+long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+ enum wireless_mode wirelessmode,
+ u8 txpwridx);
+u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
+ enum wireless_mode wirelessmode,
+ long power_indbm);
+void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
+static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
+ u32 cmdtableidx, u32 cmdtablesz,
+ enum swchnlcmd_id cmdid, u32 para1,
+ u32 para2, u32 msdelay);
+static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
+ u8 channel, u8 *stage, u8 *step,
+ u32 *delay);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile b/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile
new file mode 100644
index 000000000000..c0cb0cfe7d37
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile
@@ -0,0 +1,13 @@
+rtl8192ce-objs := \
+ dm.o \
+ hw.o \
+ led.o \
+ phy.o \
+ rf.o \
+ sw.o \
+ table.o \
+ trx.o
+
+obj-$(CONFIG_RTL8192CE) += rtl8192ce.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
new file mode 100644
index 000000000000..2f577c8828fc
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
@@ -0,0 +1,401 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92C_DEF_H__
+#define __RTL92C_DEF_H__
+
+#define HAL_RETRY_LIMIT_INFRA 48
+#define HAL_RETRY_LIMIT_AP_ADHOC 7
+
+#define PHY_RSSI_SLID_WIN_MAX 100
+#define PHY_LINKQUALITY_SLID_WIN_MAX 20
+#define PHY_BEACON_RSSI_SLID_WIN_MAX 10
+
+#define RESET_DELAY_8185 20
+
+#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
+#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
+
+#define NUM_OF_FIRMWARE_QUEUE 10
+#define NUM_OF_PAGES_IN_FW 0x100
+#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0
+#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0
+#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02
+#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02
+#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2
+#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1
+
+#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026
+#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048
+#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048
+#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026
+#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00
+
+#define MAX_LINES_HWCONFIG_TXT 1000
+#define MAX_BYTES_LINE_HWCONFIG_TXT 256
+
+#define SW_THREE_WIRE 0
+#define HW_THREE_WIRE 2
+
+#define BT_DEMO_BOARD 0
+#define BT_QA_BOARD 1
+#define BT_FPGA 2
+
+#define RX_SMOOTH_FACTOR 20
+
+#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
+#define HAL_PRIME_CHNL_OFFSET_LOWER 1
+#define HAL_PRIME_CHNL_OFFSET_UPPER 2
+
+#define MAX_H2C_QUEUE_NUM 10
+
+#define RX_MPDU_QUEUE 0
+#define RX_CMD_QUEUE 1
+#define RX_MAX_QUEUE 2
+#define AC2QUEUEID(_AC) (_AC)
+
+#define C2H_RX_CMD_HDR_LEN 8
+#define GET_C2H_CMD_CMD_LEN(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 0, 16)
+#define GET_C2H_CMD_ELEMENT_ID(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 16, 8)
+#define GET_C2H_CMD_CMD_SEQ(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 24, 7)
+#define GET_C2H_CMD_CONTINUE(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 31, 1)
+#define GET_C2H_CMD_CONTENT(__prxhdr) \
+ ((u8 *)(__prxhdr) + C2H_RX_CMD_HDR_LEN)
+
+#define GET_C2H_CMD_FEEDBACK_ELEMENT_ID(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE((__pcmdfbhdr), 0, 8)
+#define GET_C2H_CMD_FEEDBACK_CCX_LEN(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE((__pcmdfbhdr), 8, 8)
+#define GET_C2H_CMD_FEEDBACK_CCX_CMD_CNT(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE((__pcmdfbhdr), 16, 16)
+#define GET_C2H_CMD_FEEDBACK_CCX_MAC_ID(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 0, 5)
+#define GET_C2H_CMD_FEEDBACK_CCX_VALID(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 7, 1)
+#define GET_C2H_CMD_FEEDBACK_CCX_RETRY_CNT(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 8, 5)
+#define GET_C2H_CMD_FEEDBACK_CCX_TOK(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 15, 1)
+#define GET_C2H_CMD_FEEDBACK_CCX_QSEL(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4)
+#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12)
+
+#define CHIP_VER_B BIT(4)
+#define CHIP_92C_BITMASK BIT(0)
+#define CHIP_92C_1T2R 0x03
+#define CHIP_92C 0x01
+#define CHIP_88C 0x00
+
+/* Add vendor information into chip version definition.
+ * Add UMC B-Cut and RTL8723 chip info definition.
+ *
+ * BIT 7 Reserved
+ * BIT 6 UMC BCut
+ * BIT 5 Manufacturer(TSMC/UMC)
+ * BIT 4 TEST/NORMAL
+ * BIT 3 8723 Version
+ * BIT 2 8723?
+ * BIT 1 1T2R?
+ * BIT 0 88C/92C
+*/
+
+enum version_8192c {
+ VERSION_A_CHIP_92C = 0x01,
+ VERSION_A_CHIP_88C = 0x00,
+ VERSION_B_CHIP_92C = 0x11,
+ VERSION_B_CHIP_88C = 0x10,
+ VERSION_TEST_CHIP_88C = 0x00,
+ VERSION_TEST_CHIP_92C = 0x01,
+ VERSION_NORMAL_TSMC_CHIP_88C = 0x10,
+ VERSION_NORMAL_TSMC_CHIP_92C = 0x11,
+ VERSION_NORMAL_TSMC_CHIP_92C_1T2R = 0x13,
+ VERSION_NORMAL_UMC_CHIP_88C_A_CUT = 0x30,
+ VERSION_NORMAL_UMC_CHIP_92C_A_CUT = 0x31,
+ VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT = 0x33,
+ VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT = 0x34,
+ VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT = 0x3c,
+ VERSION_NORMAL_UMC_CHIP_88C_B_CUT = 0x70,
+ VERSION_NORMAL_UMC_CHIP_92C_B_CUT = 0x71,
+ VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT = 0x73,
+ VERSION_UNKNOWN = 0x88,
+};
+
+#define IS_CHIP_VER_B(version) ((version & CHIP_VER_B) ? true : false)
+#define IS_92C_SERIAL(version) ((version & CHIP_92C_BITMASK) ? true : false)
+
+enum rtl819x_loopback_e {
+ RTL819X_NO_LOOPBACK = 0,
+ RTL819X_MAC_LOOPBACK = 1,
+ RTL819X_DMA_LOOPBACK = 2,
+ RTL819X_CCK_LOOPBACK = 3,
+};
+
+enum rf_optype {
+ RF_OP_BY_SW_3WIRE = 0,
+ RF_OP_BY_FW,
+ RF_OP_MAX
+};
+
+enum rf_power_state {
+ RF_ON,
+ RF_OFF,
+ RF_SLEEP,
+ RF_SHUT_DOWN,
+};
+
+enum power_save_mode {
+ POWER_SAVE_MODE_ACTIVE,
+ POWER_SAVE_MODE_SAVE,
+};
+
+enum power_polocy_config {
+ POWERCFG_MAX_POWER_SAVINGS,
+ POWERCFG_GLOBAL_POWER_SAVINGS,
+ POWERCFG_LOCAL_POWER_SAVINGS,
+ POWERCFG_LENOVO,
+};
+
+enum interface_select_pci {
+ INTF_SEL1_MINICARD = 0,
+ INTF_SEL0_PCIE = 1,
+ INTF_SEL2_RSV = 2,
+ INTF_SEL3_RSV = 3,
+};
+
+enum hal_fw_c2h_cmd_id {
+ HAL_FW_C2H_CMD_Read_MACREG = 0,
+ HAL_FW_C2H_CMD_Read_BBREG = 1,
+ HAL_FW_C2H_CMD_Read_RFREG = 2,
+ HAL_FW_C2H_CMD_Read_EEPROM = 3,
+ HAL_FW_C2H_CMD_Read_EFUSE = 4,
+ HAL_FW_C2H_CMD_Read_CAM = 5,
+ HAL_FW_C2H_CMD_Get_BasicRate = 6,
+ HAL_FW_C2H_CMD_Get_DataRate = 7,
+ HAL_FW_C2H_CMD_Survey = 8,
+ HAL_FW_C2H_CMD_SurveyDone = 9,
+ HAL_FW_C2H_CMD_JoinBss = 10,
+ HAL_FW_C2H_CMD_AddSTA = 11,
+ HAL_FW_C2H_CMD_DelSTA = 12,
+ HAL_FW_C2H_CMD_AtimDone = 13,
+ HAL_FW_C2H_CMD_TX_Report = 14,
+ HAL_FW_C2H_CMD_CCX_Report = 15,
+ HAL_FW_C2H_CMD_DTM_Report = 16,
+ HAL_FW_C2H_CMD_TX_Rate_Statistics = 17,
+ HAL_FW_C2H_CMD_C2HLBK = 18,
+ HAL_FW_C2H_CMD_C2HDBG = 19,
+ HAL_FW_C2H_CMD_C2HFEEDBACK = 20,
+ HAL_FW_C2H_CMD_MAX
+};
+
+enum rtl_desc_qsel {
+ QSLT_BK = 0x2,
+ QSLT_BE = 0x0,
+ QSLT_VI = 0x5,
+ QSLT_VO = 0x7,
+ QSLT_BEACON = 0x10,
+ QSLT_HIGH = 0x11,
+ QSLT_MGNT = 0x12,
+ QSLT_CMD = 0x13,
+};
+
+enum rtl_desc92c_rate {
+ DESC92C_RATE1M = 0x00,
+ DESC92C_RATE2M = 0x01,
+ DESC92C_RATE5_5M = 0x02,
+ DESC92C_RATE11M = 0x03,
+
+ DESC92C_RATE6M = 0x04,
+ DESC92C_RATE9M = 0x05,
+ DESC92C_RATE12M = 0x06,
+ DESC92C_RATE18M = 0x07,
+ DESC92C_RATE24M = 0x08,
+ DESC92C_RATE36M = 0x09,
+ DESC92C_RATE48M = 0x0a,
+ DESC92C_RATE54M = 0x0b,
+
+ DESC92C_RATEMCS0 = 0x0c,
+ DESC92C_RATEMCS1 = 0x0d,
+ DESC92C_RATEMCS2 = 0x0e,
+ DESC92C_RATEMCS3 = 0x0f,
+ DESC92C_RATEMCS4 = 0x10,
+ DESC92C_RATEMCS5 = 0x11,
+ DESC92C_RATEMCS6 = 0x12,
+ DESC92C_RATEMCS7 = 0x13,
+ DESC92C_RATEMCS8 = 0x14,
+ DESC92C_RATEMCS9 = 0x15,
+ DESC92C_RATEMCS10 = 0x16,
+ DESC92C_RATEMCS11 = 0x17,
+ DESC92C_RATEMCS12 = 0x18,
+ DESC92C_RATEMCS13 = 0x19,
+ DESC92C_RATEMCS14 = 0x1a,
+ DESC92C_RATEMCS15 = 0x1b,
+ DESC92C_RATEMCS15_SG = 0x1c,
+ DESC92C_RATEMCS32 = 0x20,
+};
+
+struct phy_sts_cck_8192s_t {
+ u8 adc_pwdb_X[4];
+ u8 sq_rpt;
+ u8 cck_agc_rpt;
+};
+
+struct h2c_cmd_8192c {
+ u8 element_id;
+ u32 cmd_len;
+ u8 *p_cmdbuffer;
+};
+
+static inline u8 _rtl92c_get_chnl_group(u8 chnl)
+{
+ u8 group = 0;
+
+ if (chnl < 3)
+ group = 0;
+ else if (chnl < 9)
+ group = 1;
+ else
+ group = 2;
+
+ return group;
+}
+
+/* NOTE: reference to rtl8192c_rates struct */
+static inline int _rtl92c_rate_mapping(struct ieee80211_hw *hw, bool isHT,
+ u8 desc_rate, bool first_ampdu)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int rate_idx = 0;
+
+ if (first_ampdu) {
+ if (false == isHT) {
+ switch (desc_rate) {
+ case DESC92C_RATE1M:
+ rate_idx = 0;
+ break;
+ case DESC92C_RATE2M:
+ rate_idx = 1;
+ break;
+ case DESC92C_RATE5_5M:
+ rate_idx = 2;
+ break;
+ case DESC92C_RATE11M:
+ rate_idx = 3;
+ break;
+ case DESC92C_RATE6M:
+ rate_idx = 4;
+ break;
+ case DESC92C_RATE9M:
+ rate_idx = 5;
+ break;
+ case DESC92C_RATE12M:
+ rate_idx = 6;
+ break;
+ case DESC92C_RATE18M:
+ rate_idx = 7;
+ break;
+ case DESC92C_RATE24M:
+ rate_idx = 8;
+ break;
+ case DESC92C_RATE36M:
+ rate_idx = 9;
+ break;
+ case DESC92C_RATE48M:
+ rate_idx = 10;
+ break;
+ case DESC92C_RATE54M:
+ rate_idx = 11;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
+ ("Rate %d is not support, set to "
+ "1M rate.\n", desc_rate));
+ rate_idx = 0;
+ break;
+ }
+ } else {
+ rate_idx = 11;
+ }
+ return rate_idx;
+ }
+ switch (desc_rate) {
+ case DESC92C_RATE1M:
+ rate_idx = 0;
+ break;
+ case DESC92C_RATE2M:
+ rate_idx = 1;
+ break;
+ case DESC92C_RATE5_5M:
+ rate_idx = 2;
+ break;
+ case DESC92C_RATE11M:
+ rate_idx = 3;
+ break;
+ case DESC92C_RATE6M:
+ rate_idx = 4;
+ break;
+ case DESC92C_RATE9M:
+ rate_idx = 5;
+ break;
+ case DESC92C_RATE12M:
+ rate_idx = 6;
+ break;
+ case DESC92C_RATE18M:
+ rate_idx = 7;
+ break;
+ case DESC92C_RATE24M:
+ rate_idx = 8;
+ break;
+ case DESC92C_RATE36M:
+ rate_idx = 9;
+ break;
+ case DESC92C_RATE48M:
+ rate_idx = 10;
+ break;
+ case DESC92C_RATE54M:
+ rate_idx = 11;
+ break;
+ /* TODO: How to mapping MCS rate? */
+ /* NOTE: referenc to __ieee80211_rx */
+ default:
+ rate_idx = 11;
+ break;
+ }
+ return rate_idx;
+}
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
new file mode 100644
index 000000000000..7d76504df4d1
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
@@ -0,0 +1,113 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../base.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+
+void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ long undecorated_smoothed_pwdb;
+
+ if (!rtlpriv->dm.dynamic_txpower_enable)
+ return;
+
+ if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+ return;
+ }
+
+ if ((mac->link_state < MAC80211_LINKED) &&
+ (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
+ ("Not connected to any\n"));
+
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+
+ rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
+ return;
+ }
+
+ if (mac->link_state >= MAC80211_LINKED) {
+ if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+ undecorated_smoothed_pwdb =
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("AP Client PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb));
+ } else {
+ undecorated_smoothed_pwdb =
+ rtlpriv->dm.undecorated_smoothed_pwdb;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("STA Default Port PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb));
+ }
+ } else {
+ undecorated_smoothed_pwdb =
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("AP Ext Port PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb));
+ }
+
+ if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"));
+ } else if ((undecorated_smoothed_pwdb <
+ (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
+ (undecorated_smoothed_pwdb >=
+ TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
+
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"));
+ } else if (undecorated_smoothed_pwdb <
+ (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("TXHIGHPWRLEVEL_NORMAL\n"));
+ }
+
+ if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("PHY_SetTxPowerLevel8192S() Channel = %d\n",
+ rtlphy->current_channel));
+ rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
+ }
+
+ rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
new file mode 100644
index 000000000000..36302ebae4a3
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
@@ -0,0 +1,197 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92C_DM_H__
+#define __RTL92C_DM_H__
+
+#define HAL_DM_DIG_DISABLE BIT(0)
+#define HAL_DM_HIPWR_DISABLE BIT(1)
+
+#define OFDM_TABLE_LENGTH 37
+#define CCK_TABLE_LENGTH 33
+
+#define OFDM_TABLE_SIZE 37
+#define CCK_TABLE_SIZE 33
+
+#define BW_AUTO_SWITCH_HIGH_LOW 25
+#define BW_AUTO_SWITCH_LOW_HIGH 30
+
+#define DM_DIG_THRESH_HIGH 40
+#define DM_DIG_THRESH_LOW 35
+
+#define DM_FALSEALARM_THRESH_LOW 400
+#define DM_FALSEALARM_THRESH_HIGH 1000
+
+#define DM_DIG_MAX 0x3e
+#define DM_DIG_MIN 0x1e
+
+#define DM_DIG_FA_UPPER 0x32
+#define DM_DIG_FA_LOWER 0x20
+#define DM_DIG_FA_TH0 0x20
+#define DM_DIG_FA_TH1 0x100
+#define DM_DIG_FA_TH2 0x200
+
+#define DM_DIG_BACKOFF_MAX 12
+#define DM_DIG_BACKOFF_MIN -4
+#define DM_DIG_BACKOFF_DEFAULT 10
+
+#define RXPATHSELECTION_SS_TH_lOW 30
+#define RXPATHSELECTION_DIFF_TH 18
+
+#define DM_RATR_STA_INIT 0
+#define DM_RATR_STA_HIGH 1
+#define DM_RATR_STA_MIDDLE 2
+#define DM_RATR_STA_LOW 3
+
+#define CTS2SELF_THVAL 30
+#define REGC38_TH 20
+
+#define WAIOTTHVal 25
+
+#define TXHIGHPWRLEVEL_NORMAL 0
+#define TXHIGHPWRLEVEL_LEVEL1 1
+#define TXHIGHPWRLEVEL_LEVEL2 2
+#define TXHIGHPWRLEVEL_BT1 3
+#define TXHIGHPWRLEVEL_BT2 4
+
+#define DM_TYPE_BYFW 0
+#define DM_TYPE_BYDRIVER 1
+
+#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
+#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
+
+struct ps_t {
+ u8 pre_ccastate;
+ u8 cur_ccasate;
+ u8 pre_rfstate;
+ u8 cur_rfstate;
+ long rssi_val_min;
+};
+
+struct dig_t {
+ u8 dig_enable_flag;
+ u8 dig_ext_port_stage;
+ u32 rssi_lowthresh;
+ u32 rssi_highthresh;
+ u32 fa_lowthresh;
+ u32 fa_highthresh;
+ u8 cursta_connectctate;
+ u8 presta_connectstate;
+ u8 curmultista_connectstate;
+ u8 pre_igvalue;
+ u8 cur_igvalue;
+ char backoff_val;
+ char backoff_val_range_max;
+ char backoff_val_range_min;
+ u8 rx_gain_range_max;
+ u8 rx_gain_range_min;
+ u8 rssi_val_min;
+ u8 pre_cck_pd_state;
+ u8 cur_cck_pd_state;
+ u8 pre_cck_fa_state;
+ u8 cur_cck_fa_state;
+ u8 pre_ccastate;
+ u8 cur_ccasate;
+};
+
+struct swat_t {
+ u8 failure_cnt;
+ u8 try_flag;
+ u8 stop_trying;
+ long pre_rssi;
+ long trying_threshold;
+ u8 cur_antenna;
+ u8 pre_antenna;
+};
+
+enum tag_dynamic_init_gain_operation_type_definition {
+ DIG_TYPE_THRESH_HIGH = 0,
+ DIG_TYPE_THRESH_LOW = 1,
+ DIG_TYPE_BACKOFF = 2,
+ DIG_TYPE_RX_GAIN_MIN = 3,
+ DIG_TYPE_RX_GAIN_MAX = 4,
+ DIG_TYPE_ENABLE = 5,
+ DIG_TYPE_DISABLE = 6,
+ DIG_OP_TYPE_MAX
+};
+
+enum tag_cck_packet_detection_threshold_type_definition {
+ CCK_PD_STAGE_LowRssi = 0,
+ CCK_PD_STAGE_HighRssi = 1,
+ CCK_FA_STAGE_Low = 2,
+ CCK_FA_STAGE_High = 3,
+ CCK_PD_STAGE_MAX = 4,
+};
+
+enum dm_1r_cca_e {
+ CCA_1R = 0,
+ CCA_2R = 1,
+ CCA_MAX = 2,
+};
+
+enum dm_rf_e {
+ RF_SAVE = 0,
+ RF_NORMAL = 1,
+ RF_MAX = 2,
+};
+
+enum dm_sw_ant_switch_e {
+ ANS_ANTENNA_B = 1,
+ ANS_ANTENNA_A = 2,
+ ANS_ANTENNA_MAX = 3,
+};
+
+enum dm_dig_ext_port_alg_e {
+ DIG_EXT_PORT_STAGE_0 = 0,
+ DIG_EXT_PORT_STAGE_1 = 1,
+ DIG_EXT_PORT_STAGE_2 = 2,
+ DIG_EXT_PORT_STAGE_3 = 3,
+ DIG_EXT_PORT_STAGE_MAX = 4,
+};
+
+enum dm_dig_connect_e {
+ DIG_STA_DISCONNECT = 0,
+ DIG_STA_CONNECT = 1,
+ DIG_STA_BEFORE_CONNECT = 2,
+ DIG_MULTISTA_DISCONNECT = 3,
+ DIG_MULTISTA_CONNECT = 4,
+ DIG_CONNECT_MAX
+};
+
+extern struct dig_t dm_digtable;
+void rtl92c_dm_init(struct ieee80211_hw *hw);
+void rtl92c_dm_watchdog(struct ieee80211_hw *hw);
+void rtl92c_dm_write_dig(struct ieee80211_hw *hw);
+void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw);
+void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw);
+void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
+void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
+void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
new file mode 100644
index 000000000000..05477f465a75
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -0,0 +1,2148 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../efuse.h"
+#include "../base.h"
+#include "../cam.h"
+#include "../ps.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "led.h"
+#include "hw.h"
+
+#define LLT_CONFIG 5
+
+static void _rtl92ce_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
+ u8 set_bits, u8 clear_bits)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpci->reg_bcn_ctrl_val |= set_bits;
+ rtlpci->reg_bcn_ctrl_val &= ~clear_bits;
+
+ rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val);
+}
+
+static void _rtl92ce_stop_tx_beacon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp1byte;
+
+ tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6)));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
+ tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+ tmp1byte &= ~(BIT(0));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+}
+
+static void _rtl92ce_resume_tx_beacon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp1byte;
+
+ tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
+ tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+ tmp1byte |= BIT(0);
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+}
+
+static void _rtl92ce_enable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+ _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(1));
+}
+
+static void _rtl92ce_disable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+ _rtl92ce_set_bcn_ctrl_reg(hw, BIT(1), 0);
+}
+
+void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ switch (variable) {
+ case HW_VAR_RCR:
+ *((u32 *) (val)) = rtlpci->receive_config;
+ break;
+ case HW_VAR_RF_STATE:
+ *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
+ break;
+ case HW_VAR_FWLPS_RF_ON:{
+ enum rf_pwrstate rfState;
+ u32 val_rcr;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw,
+ HW_VAR_RF_STATE,
+ (u8 *) (&rfState));
+ if (rfState == ERFOFF) {
+ *((bool *) (val)) = true;
+ } else {
+ val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+ val_rcr &= 0x00070000;
+ if (val_rcr)
+ *((bool *) (val)) = false;
+ else
+ *((bool *) (val)) = true;
+ }
+ break;
+ }
+ case HW_VAR_FW_PSMODE_STATUS:
+ *((bool *) (val)) = ppsc->fw_current_inpsmode;
+ break;
+ case HW_VAR_CORRECT_TSF:{
+ u64 tsf;
+ u32 *ptsf_low = (u32 *)&tsf;
+ u32 *ptsf_high = ((u32 *)&tsf) + 1;
+
+ *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
+ *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
+
+ *((u64 *) (val)) = tsf;
+
+ break;
+ }
+ case HW_VAR_MGT_FILTER:
+ *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP0);
+ break;
+ case HW_VAR_CTRL_FILTER:
+ *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP1);
+ break;
+ case HW_VAR_DATA_FILTER:
+ *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+}
+
+void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ u8 idx;
+
+ switch (variable) {
+ case HW_VAR_ETHER_ADDR:{
+ for (idx = 0; idx < ETH_ALEN; idx++) {
+ rtl_write_byte(rtlpriv, (REG_MACID + idx),
+ val[idx]);
+ }
+ break;
+ }
+ case HW_VAR_BASIC_RATE:{
+ u16 rate_cfg = ((u16 *) val)[0];
+ u8 rate_index = 0;
+ rate_cfg &= 0x15f;
+ rate_cfg |= 0x01;
+ rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
+ rtl_write_byte(rtlpriv, REG_RRSR + 1,
+ (rate_cfg >> 8)&0xff);
+ while (rate_cfg > 0x1) {
+ rate_cfg = (rate_cfg >> 1);
+ rate_index++;
+ }
+ rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
+ rate_index);
+ break;
+ }
+ case HW_VAR_BSSID:{
+ for (idx = 0; idx < ETH_ALEN; idx++) {
+ rtl_write_byte(rtlpriv, (REG_BSSID + idx),
+ val[idx]);
+ }
+ break;
+ }
+ case HW_VAR_SIFS:{
+ rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]);
+ rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]);
+
+ rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
+ rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
+
+ if (!mac->ht_enable)
+ rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
+ 0x0e0e);
+ else
+ rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
+ *((u16 *) val));
+ break;
+ }
+ case HW_VAR_SLOT_TIME:{
+ u8 e_aci;
+
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ ("HW_VAR_SLOT_TIME %x\n", val[0]));
+
+ rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
+
+ for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_AC_PARAM,
+ (u8 *) (&e_aci));
+ }
+ break;
+ }
+ case HW_VAR_ACK_PREAMBLE:{
+ u8 reg_tmp;
+ u8 short_preamble = (bool) (*(u8 *) val);
+ reg_tmp = (mac->cur_40_prime_sc) << 5;
+ if (short_preamble)
+ reg_tmp |= 0x80;
+
+ rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_tmp);
+ break;
+ }
+ case HW_VAR_AMPDU_MIN_SPACE:{
+ u8 min_spacing_to_set;
+ u8 sec_min_space;
+
+ min_spacing_to_set = *((u8 *) val);
+ if (min_spacing_to_set <= 7) {
+ sec_min_space = 0;
+
+ if (min_spacing_to_set < sec_min_space)
+ min_spacing_to_set = sec_min_space;
+
+ mac->min_space_cfg = ((mac->min_space_cfg &
+ 0xf8) |
+ min_spacing_to_set);
+
+ *val = min_spacing_to_set;
+
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ ("Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg));
+
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+ mac->min_space_cfg);
+ }
+ break;
+ }
+ case HW_VAR_SHORTGI_DENSITY:{
+ u8 density_to_set;
+
+ density_to_set = *((u8 *) val);
+ mac->min_space_cfg |= (density_to_set << 3);
+
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ ("Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg));
+
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+ mac->min_space_cfg);
+
+ break;
+ }
+ case HW_VAR_AMPDU_FACTOR:{
+ u8 regtoset_normal[4] = { 0x41, 0xa8, 0x72, 0xb9 };
+
+ u8 factor_toset;
+ u8 *p_regtoset = NULL;
+ u8 index = 0;
+
+ p_regtoset = regtoset_normal;
+
+ factor_toset = *((u8 *) val);
+ if (factor_toset <= 3) {
+ factor_toset = (1 << (factor_toset + 2));
+ if (factor_toset > 0xf)
+ factor_toset = 0xf;
+
+ for (index = 0; index < 4; index++) {
+ if ((p_regtoset[index] & 0xf0) >
+ (factor_toset << 4))
+ p_regtoset[index] =
+ (p_regtoset[index] & 0x0f) |
+ (factor_toset << 4);
+
+ if ((p_regtoset[index] & 0x0f) >
+ factor_toset)
+ p_regtoset[index] =
+ (p_regtoset[index] & 0xf0) |
+ (factor_toset);
+
+ rtl_write_byte(rtlpriv,
+ (REG_AGGLEN_LMT + index),
+ p_regtoset[index]);
+
+ }
+
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ ("Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset));
+ }
+ break;
+ }
+ case HW_VAR_AC_PARAM:{
+ u8 e_aci = *((u8 *) val);
+ u32 u4b_ac_param;
+ u16 cw_min = le16_to_cpu(mac->ac[e_aci].cw_min);
+ u16 cw_max = le16_to_cpu(mac->ac[e_aci].cw_max);
+ u16 tx_op = le16_to_cpu(mac->ac[e_aci].tx_op);
+
+ u4b_ac_param = (u32) mac->ac[e_aci].aifs;
+ u4b_ac_param |= ((u32)cw_min
+ & 0xF) << AC_PARAM_ECW_MIN_OFFSET;
+ u4b_ac_param |= ((u32)cw_max &
+ 0xF) << AC_PARAM_ECW_MAX_OFFSET;
+ u4b_ac_param |= (u32)tx_op << AC_PARAM_TXOP_OFFSET;
+
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ ("queue:%x, ac_param:%x\n", e_aci,
+ u4b_ac_param));
+
+ switch (e_aci) {
+ case AC1_BK:
+ rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM,
+ u4b_ac_param);
+ break;
+ case AC0_BE:
+ rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
+ u4b_ac_param);
+ break;
+ case AC2_VI:
+ rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM,
+ u4b_ac_param);
+ break;
+ case AC3_VO:
+ rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM,
+ u4b_ac_param);
+ break;
+ default:
+ RT_ASSERT(false,
+ ("SetHwReg8185(): invalid aci: %d !\n",
+ e_aci));
+ break;
+ }
+
+ if (rtlpci->acm_method != eAcmWay2_SW)
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_ACM_CTRL,
+ (u8 *) (&e_aci));
+ break;
+ }
+ case HW_VAR_ACM_CTRL:{
+ u8 e_aci = *((u8 *) val);
+ union aci_aifsn *p_aci_aifsn =
+ (union aci_aifsn *)(&(mac->ac[0].aifs));
+ u8 acm = p_aci_aifsn->f.acm;
+ u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
+
+ acm_ctrl =
+ acm_ctrl | ((rtlpci->acm_method == 2) ? 0x0 : 0x1);
+
+ if (acm) {
+ switch (e_aci) {
+ case AC0_BE:
+ acm_ctrl |= AcmHw_BeqEn;
+ break;
+ case AC2_VI:
+ acm_ctrl |= AcmHw_ViqEn;
+ break;
+ case AC3_VO:
+ acm_ctrl |= AcmHw_VoqEn;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("HW_VAR_ACM_CTRL acm set "
+ "failed: eACI is %d\n", acm));
+ break;
+ }
+ } else {
+ switch (e_aci) {
+ case AC0_BE:
+ acm_ctrl &= (~AcmHw_BeqEn);
+ break;
+ case AC2_VI:
+ acm_ctrl &= (~AcmHw_ViqEn);
+ break;
+ case AC3_VO:
+ acm_ctrl &= (~AcmHw_BeqEn);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ }
+
+ RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
+ ("SetHwReg8190pci(): [HW_VAR_ACM_CTRL] "
+ "Write 0x%X\n", acm_ctrl));
+ rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
+ break;
+ }
+ case HW_VAR_RCR:{
+ rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]);
+ rtlpci->receive_config = ((u32 *) (val))[0];
+ break;
+ }
+ case HW_VAR_RETRY_LIMIT:{
+ u8 retry_limit = ((u8 *) (val))[0];
+
+ rtl_write_word(rtlpriv, REG_RL,
+ retry_limit << RETRY_LIMIT_SHORT_SHIFT |
+ retry_limit << RETRY_LIMIT_LONG_SHIFT);
+ break;
+ }
+ case HW_VAR_DUAL_TSF_RST:
+ rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
+ break;
+ case HW_VAR_EFUSE_BYTES:
+ rtlefuse->efuse_usedbytes = *((u16 *) val);
+ break;
+ case HW_VAR_EFUSE_USAGE:
+ rtlefuse->efuse_usedpercentage = *((u8 *) val);
+ break;
+ case HW_VAR_IO_CMD:
+ rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val));
+ break;
+ case HW_VAR_WPA_CONFIG:
+ rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
+ break;
+ case HW_VAR_SET_RPWM:{
+ u8 rpwm_val;
+
+ rpwm_val = rtl_read_byte(rtlpriv, REG_PCIE_HRPWM);
+ udelay(1);
+
+ if (rpwm_val & BIT(7)) {
+ rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
+ (*(u8 *) val));
+ } else {
+ rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
+ ((*(u8 *) val) | BIT(7)));
+ }
+
+ break;
+ }
+ case HW_VAR_H2C_FW_PWRMODE:{
+ u8 psmode = (*(u8 *) val);
+
+ if ((psmode != FW_PS_ACTIVE_MODE) &&
+ (!IS_92C_SERIAL(rtlhal->version))) {
+ rtl92c_dm_rf_saving(hw, true);
+ }
+
+ rtl92c_set_fw_pwrmode_cmd(hw, (*(u8 *) val));
+ break;
+ }
+ case HW_VAR_FW_PSMODE_STATUS:
+ ppsc->fw_current_inpsmode = *((bool *) val);
+ break;
+ case HW_VAR_H2C_FW_JOINBSSRPT:{
+ u8 mstatus = (*(u8 *) val);
+ u8 tmp_regcr, tmp_reg422;
+ bool recover = false;
+
+ if (mstatus == RT_MEDIA_CONNECT) {
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID,
+ NULL);
+
+ tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1);
+ rtl_write_byte(rtlpriv, REG_CR + 1,
+ (tmp_regcr | BIT(0)));
+
+ _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(3));
+ _rtl92ce_set_bcn_ctrl_reg(hw, BIT(4), 0);
+
+ tmp_reg422 =
+ rtl_read_byte(rtlpriv,
+ REG_FWHW_TXQ_CTRL + 2);
+ if (tmp_reg422 & BIT(6))
+ recover = true;
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+ tmp_reg422 & (~BIT(6)));
+
+ rtl92c_set_fw_rsvdpagepkt(hw, 0);
+
+ _rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0);
+ _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4));
+
+ if (recover) {
+ rtl_write_byte(rtlpriv,
+ REG_FWHW_TXQ_CTRL + 2,
+ tmp_reg422);
+ }
+
+ rtl_write_byte(rtlpriv, REG_CR + 1,
+ (tmp_regcr & ~(BIT(0))));
+ }
+ rtl92c_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
+
+ break;
+ }
+ case HW_VAR_AID:{
+ u16 u2btmp;
+ u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
+ u2btmp &= 0xC000;
+ rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp |
+ mac->assoc_id));
+
+ break;
+ }
+ case HW_VAR_CORRECT_TSF:{
+ u8 btype_ibss = ((u8 *) (val))[0];
+
+ /*btype_ibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ?
+ 1 : 0;*/
+
+ if (btype_ibss == true)
+ _rtl92ce_stop_tx_beacon(hw);
+
+ _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(3));
+
+ rtl_write_dword(rtlpriv, REG_TSFTR,
+ (u32) (mac->tsf & 0xffffffff));
+ rtl_write_dword(rtlpriv, REG_TSFTR + 4,
+ (u32) ((mac->tsf >> 32)&0xffffffff));
+
+ _rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0);
+
+ if (btype_ibss == true)
+ _rtl92ce_resume_tx_beacon(hw);
+
+ break;
+
+ }
+ case HW_VAR_MGT_FILTER:
+ rtl_write_word(rtlpriv, REG_RXFLTMAP0, *(u16 *) val);
+ break;
+ case HW_VAR_CTRL_FILTER:
+ rtl_write_word(rtlpriv, REG_RXFLTMAP1, *(u16 *) val);
+ break;
+ case HW_VAR_DATA_FILTER:
+ rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *) val);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case "
+ "not process\n"));
+ break;
+ }
+}
+
+static bool _rtl92ce_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ bool status = true;
+ long count = 0;
+ u32 value = _LLT_INIT_ADDR(address) |
+ _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
+
+ rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
+
+ do {
+ value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
+ if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
+ break;
+
+ if (count > POLLING_LLT_THRESHOLD) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Failed to polling write LLT done at "
+ "address %d!\n", address));
+ status = false;
+ break;
+ }
+ } while (++count);
+
+ return status;
+}
+
+static bool _rtl92ce_llt_table_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ unsigned short i;
+ u8 txpktbuf_bndy;
+ u8 maxPage;
+ bool status;
+
+#if LLT_CONFIG == 1
+ maxPage = 255;
+ txpktbuf_bndy = 252;
+#elif LLT_CONFIG == 2
+ maxPage = 127;
+ txpktbuf_bndy = 124;
+#elif LLT_CONFIG == 3
+ maxPage = 255;
+ txpktbuf_bndy = 174;
+#elif LLT_CONFIG == 4
+ maxPage = 255;
+ txpktbuf_bndy = 246;
+#elif LLT_CONFIG == 5
+ maxPage = 255;
+ txpktbuf_bndy = 246;
+#endif
+
+#if LLT_CONFIG == 1
+ rtl_write_byte(rtlpriv, REG_RQPN_NPQ, 0x1c);
+ rtl_write_dword(rtlpriv, REG_RQPN, 0x80a71c1c);
+#elif LLT_CONFIG == 2
+ rtl_write_dword(rtlpriv, REG_RQPN, 0x845B1010);
+#elif LLT_CONFIG == 3
+ rtl_write_dword(rtlpriv, REG_RQPN, 0x84838484);
+#elif LLT_CONFIG == 4
+ rtl_write_dword(rtlpriv, REG_RQPN, 0x80bd1c1c);
+#elif LLT_CONFIG == 5
+ rtl_write_word(rtlpriv, REG_RQPN_NPQ, 0x0000);
+
+ rtl_write_dword(rtlpriv, REG_RQPN, 0x80b01c29);
+#endif
+
+ rtl_write_dword(rtlpriv, REG_TRXFF_BNDY, (0x27FF0000 | txpktbuf_bndy));
+ rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy);
+
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
+
+ rtl_write_byte(rtlpriv, 0x45D, txpktbuf_bndy);
+ rtl_write_byte(rtlpriv, REG_PBP, 0x11);
+ rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4);
+
+ for (i = 0; i < (txpktbuf_bndy - 1); i++) {
+ status = _rtl92ce_llt_write(hw, i, i + 1);
+ if (true != status)
+ return status;
+ }
+
+ status = _rtl92ce_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
+ if (true != status)
+ return status;
+
+ for (i = txpktbuf_bndy; i < maxPage; i++) {
+ status = _rtl92ce_llt_write(hw, i, (i + 1));
+ if (true != status)
+ return status;
+ }
+
+ status = _rtl92ce_llt_write(hw, maxPage, txpktbuf_bndy);
+ if (true != status)
+ return status;
+
+ return true;
+}
+
+static void _rtl92ce_gen_refresh_led_state(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+
+ if (rtlpci->up_first_time)
+ return;
+
+ if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
+ rtl92ce_sw_led_on(hw, pLed0);
+ else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
+ rtl92ce_sw_led_on(hw, pLed0);
+ else
+ rtl92ce_sw_led_off(hw, pLed0);
+
+}
+
+static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ unsigned char bytetmp;
+ unsigned short wordtmp;
+ u16 retry;
+
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
+ rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL, 0x0F);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1) | BIT(0);
+ udelay(2);
+
+ rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, bytetmp);
+ udelay(2);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1);
+ udelay(2);
+
+ retry = 0;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("reg0xec:%x:%x\n",
+ rtl_read_dword(rtlpriv, 0xEC),
+ bytetmp));
+
+ while ((bytetmp & BIT(0)) && retry < 1000) {
+ retry++;
+ udelay(50);
+ bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("reg0xec:%x:%x\n",
+ rtl_read_dword(rtlpriv,
+ 0xEC),
+ bytetmp));
+ udelay(50);
+ }
+
+ rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x1012);
+
+ rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL + 1, 0x82);
+ udelay(2);
+
+ rtl_write_word(rtlpriv, REG_CR, 0x2ff);
+
+ if (_rtl92ce_llt_table_init(hw) == false)
+ return false;;
+
+ rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff);
+ rtl_write_byte(rtlpriv, REG_HISRE, 0xff);
+
+ rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x27ff);
+
+ wordtmp = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL);
+ wordtmp &= 0xf;
+ wordtmp |= 0xF771;
+ rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, wordtmp);
+
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 1, 0x1F);
+ rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
+ rtl_write_dword(rtlpriv, REG_TCR, rtlpci->transmit_config);
+
+ rtl_write_byte(rtlpriv, 0x4d0, 0x0);
+
+ rtl_write_dword(rtlpriv, REG_BCNQ_DESA,
+ ((u64) rtlpci->tx_ring[BEACON_QUEUE].dma) &
+ DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_MGQ_DESA,
+ (u64) rtlpci->tx_ring[MGNT_QUEUE].dma &
+ DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_VOQ_DESA,
+ (u64) rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_VIQ_DESA,
+ (u64) rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_BEQ_DESA,
+ (u64) rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_BKQ_DESA,
+ (u64) rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_HQ_DESA,
+ (u64) rtlpci->tx_ring[HIGH_QUEUE].dma &
+ DMA_BIT_MASK(32));
+ rtl_write_dword(rtlpriv, REG_RX_DESA,
+ (u64) rtlpci->rx_ring[RX_MPDU_QUEUE].dma &
+ DMA_BIT_MASK(32));
+
+ if (IS_92C_SERIAL(rtlhal->version))
+ rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 3, 0x77);
+ else
+ rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 3, 0x22);
+
+ rtl_write_dword(rtlpriv, REG_INT_MIG, 0);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL);
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, bytetmp & ~BIT(6));
+ do {
+ retry++;
+ bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL);
+ } while ((retry < 200) && (bytetmp & BIT(7)));
+
+ _rtl92ce_gen_refresh_led_state(hw);
+
+ rtl_write_dword(rtlpriv, REG_MCUTST_1, 0x0);
+
+ return true;;
+}
+
+static void _rtl92ce_hw_configure(struct ieee80211_hw *hw)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 reg_bw_opmode;
+ u32 reg_ratr, reg_prsr;
+
+ reg_bw_opmode = BW_OPMODE_20MHZ;
+ reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
+ RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
+ reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+
+ rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, 0x8);
+
+ rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+
+ rtl_write_dword(rtlpriv, REG_RRSR, reg_prsr);
+
+ rtl_write_byte(rtlpriv, REG_SLOT, 0x09);
+
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, 0x0);
+
+ rtl_write_word(rtlpriv, REG_FWHW_TXQ_CTRL, 0x1F80);
+
+ rtl_write_word(rtlpriv, REG_RL, 0x0707);
+
+ rtl_write_dword(rtlpriv, REG_BAR_MODE_CTRL, 0x02012802);
+
+ rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF);
+
+ rtl_write_dword(rtlpriv, REG_DARFRC, 0x01000000);
+ rtl_write_dword(rtlpriv, REG_DARFRC + 4, 0x07060504);
+ rtl_write_dword(rtlpriv, REG_RARFRC, 0x01000000);
+ rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x07060504);
+
+ rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0xb972a841);
+
+ rtl_write_byte(rtlpriv, REG_ATIMWND, 0x2);
+
+ rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xff);
+
+ rtlpci->reg_bcn_ctrl_val = 0x1f;
+ rtl_write_byte(rtlpriv, REG_BCN_CTRL, rtlpci->reg_bcn_ctrl_val);
+
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
+
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
+
+ rtl_write_byte(rtlpriv, REG_PIFS, 0x1C);
+ rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
+
+ rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020);
+
+ rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020);
+
+ rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x086666);
+
+ rtl_write_byte(rtlpriv, REG_ACKTO, 0x40);
+
+ rtl_write_word(rtlpriv, REG_SPEC_SIFS, 0x1010);
+ rtl_write_word(rtlpriv, REG_MAC_SPEC_SIFS, 0x1010);
+
+ rtl_write_word(rtlpriv, REG_SIFS_CTX, 0x1010);
+
+ rtl_write_word(rtlpriv, REG_SIFS_TRX, 0x1010);
+
+ rtl_write_dword(rtlpriv, REG_MAR, 0xffffffff);
+ rtl_write_dword(rtlpriv, REG_MAR + 4, 0xffffffff);
+
+}
+
+static void _rtl92ce_enable_aspm_back_door(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ rtl_write_byte(rtlpriv, 0x34b, 0x93);
+ rtl_write_word(rtlpriv, 0x350, 0x870c);
+ rtl_write_byte(rtlpriv, 0x352, 0x1);
+
+ if (ppsc->support_backdoor)
+ rtl_write_byte(rtlpriv, 0x349, 0x1b);
+ else
+ rtl_write_byte(rtlpriv, 0x349, 0x03);
+
+ rtl_write_word(rtlpriv, 0x350, 0x2718);
+ rtl_write_byte(rtlpriv, 0x352, 0x1);
+}
+
+void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 sec_reg_value;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm));
+
+ if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("not open "
+ "hw encryption\n"));
+ return;
+ }
+
+ sec_reg_value = SCR_TxEncEnable | SCR_RxDecEnable;
+
+ if (rtlpriv->sec.use_defaultkey) {
+ sec_reg_value |= SCR_TxUseDK;
+ sec_reg_value |= SCR_RxUseDK;
+ }
+
+ sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
+
+ rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
+
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
+ ("The SECR-value %x\n", sec_reg_value));
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
+
+}
+
+int rtl92ce_hw_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ static bool iqk_initialized; /* initialized to false */
+ bool rtstatus = true;
+ bool is92c;
+ int err;
+ u8 tmp_u1b;
+
+ rtlpci->being_init_adapter = true;
+ rtlpriv->intf_ops->disable_aspm(hw);
+ rtstatus = _rtl92ce_init_mac(hw);
+ if (rtstatus != true) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Init MAC failed\n"));
+ err = 1;
+ return err;
+ }
+
+ err = rtl92c_download_fw(hw);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("Failed to download FW. Init HW "
+ "without FW now..\n"));
+ err = 1;
+ rtlhal->fw_ready = false;
+ return err;
+ } else {
+ rtlhal->fw_ready = true;
+ }
+
+ rtlhal->last_hmeboxnum = 0;
+ rtl92ce_phy_mac_config(hw);
+ rtl92ce_phy_bb_config(hw);
+ rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
+ rtl92c_phy_rf_config(hw);
+ rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
+ RF_CHNLBW, RFREG_OFFSET_MASK);
+ rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
+ RF_CHNLBW, RFREG_OFFSET_MASK);
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1);
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1);
+ _rtl92ce_hw_configure(hw);
+ rtl_cam_reset_all_entry(hw);
+ rtl92ce_enable_hw_security_config(hw);
+ ppsc->rfpwr_state = ERFON;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
+ _rtl92ce_enable_aspm_back_door(hw);
+ rtlpriv->intf_ops->enable_aspm(hw);
+ if (ppsc->rfpwr_state == ERFON) {
+ rtl92c_phy_set_rfpath_switch(hw, 1);
+ if (iqk_initialized)
+ rtl92c_phy_iq_calibrate(hw, true);
+ else {
+ rtl92c_phy_iq_calibrate(hw, false);
+ iqk_initialized = true;
+ }
+
+ rtl92c_dm_check_txpower_tracking(hw);
+ rtl92c_phy_lc_calibrate(hw);
+ }
+
+ is92c = IS_92C_SERIAL(rtlhal->version);
+ tmp_u1b = efuse_read_1byte(hw, 0x1FA);
+ if (!(tmp_u1b & BIT(0))) {
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("PA BIAS path A\n"));
+ }
+
+ if (!(tmp_u1b & BIT(1)) && is92c) {
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0F, 0x05);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("PA BIAS path B\n"));
+ }
+
+ if (!(tmp_u1b & BIT(4))) {
+ tmp_u1b = rtl_read_byte(rtlpriv, 0x16);
+ tmp_u1b &= 0x0F;
+ rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x80);
+ udelay(10);
+ rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x90);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("under 1.5V\n"));
+ }
+ rtl92c_dm_init(hw);
+ rtlpci->being_init_adapter = false;
+ return err;
+}
+
+static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ enum version_8192c version = VERSION_UNKNOWN;
+ u32 value32;
+
+ value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
+ if (value32 & TRP_VAUX_EN) {
+ version = (value32 & TYPE_ID) ? VERSION_A_CHIP_92C :
+ VERSION_A_CHIP_88C;
+ } else {
+ version = (value32 & TYPE_ID) ? VERSION_B_CHIP_92C :
+ VERSION_B_CHIP_88C;
+ }
+
+ switch (version) {
+ case VERSION_B_CHIP_92C:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_B_CHIP_92C.\n"));
+ break;
+ case VERSION_B_CHIP_88C:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_B_CHIP_88C.\n"));
+ break;
+ case VERSION_A_CHIP_92C:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_A_CHIP_92C.\n"));
+ break;
+ case VERSION_A_CHIP_88C:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_A_CHIP_88C.\n"));
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Chip Version ID: Unknown. Bug?\n"));
+ break;
+ }
+
+ switch (version & 0x3) {
+ case CHIP_88C:
+ rtlphy->rf_type = RF_1T1R;
+ break;
+ case CHIP_92C:
+ rtlphy->rf_type = RF_2T2R;
+ break;
+ case CHIP_92C_1T2R:
+ rtlphy->rf_type = RF_1T2R;
+ break;
+ default:
+ rtlphy->rf_type = RF_1T1R;
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("ERROR RF_Type is set!!"));
+ break;
+ }
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
+ "RF_2T2R" : "RF_1T1R"));
+
+ return version;
+}
+
+static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
+ enum nl80211_iftype type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
+ enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
+ bt_msr &= 0xfc;
+
+ if (type == NL80211_IFTYPE_UNSPECIFIED ||
+ type == NL80211_IFTYPE_STATION) {
+ _rtl92ce_stop_tx_beacon(hw);
+ _rtl92ce_enable_bcn_sub_func(hw);
+ } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP) {
+ _rtl92ce_resume_tx_beacon(hw);
+ _rtl92ce_disable_bcn_sub_func(hw);
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("Set HW_VAR_MEDIA_STATUS: "
+ "No such media status(%x).\n", type));
+ }
+
+ switch (type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ bt_msr |= MSR_NOLINK;
+ ledaction = LED_CTL_LINK;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Set Network type to NO LINK!\n"));
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ bt_msr |= MSR_ADHOC;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Set Network type to Ad Hoc!\n"));
+ break;
+ case NL80211_IFTYPE_STATION:
+ bt_msr |= MSR_INFRA;
+ ledaction = LED_CTL_LINK;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Set Network type to STA!\n"));
+ break;
+ case NL80211_IFTYPE_AP:
+ bt_msr |= MSR_AP;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Set Network type to AP!\n"));
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Network type %d not support!\n", type));
+ return 1;
+ break;
+
+ }
+
+ rtl_write_byte(rtlpriv, (MSR), bt_msr);
+ rtlpriv->cfg->ops->led_control(hw, ledaction);
+ if ((bt_msr & 0xfc) == MSR_AP)
+ rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+ else
+ rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
+ return 0;
+}
+
+static void _rtl92ce_set_check_bssid(struct ieee80211_hw *hw,
+ enum nl80211_iftype type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+ u8 filterout_non_associated_bssid = false;
+
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ filterout_non_associated_bssid = true;
+ break;
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_AP:
+ default:
+ break;
+ }
+
+ if (filterout_non_associated_bssid == true) {
+ reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+ (u8 *) (&reg_rcr));
+ _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4));
+ } else if (filterout_non_associated_bssid == false) {
+ reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
+ _rtl92ce_set_bcn_ctrl_reg(hw, BIT(4), 0);
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_RCR, (u8 *) (&reg_rcr));
+ }
+}
+
+int rtl92ce_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
+{
+ if (_rtl92ce_set_media_status(hw, type))
+ return -EOPNOTSUPP;
+ _rtl92ce_set_check_bssid(hw, type);
+ return 0;
+}
+
+void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u32 u4b_ac_param;
+ u16 cw_min = le16_to_cpu(mac->ac[aci].cw_min);
+ u16 cw_max = le16_to_cpu(mac->ac[aci].cw_max);
+ u16 tx_op = le16_to_cpu(mac->ac[aci].tx_op);
+
+ rtl92c_dm_init_edca_turbo(hw);
+ u4b_ac_param = (u32) mac->ac[aci].aifs;
+ u4b_ac_param |= (u32) ((cw_min & 0xF) << AC_PARAM_ECW_MIN_OFFSET);
+ u4b_ac_param |= (u32) ((cw_max & 0xF) << AC_PARAM_ECW_MAX_OFFSET);
+ u4b_ac_param |= (u32) (tx_op << AC_PARAM_TXOP_OFFSET);
+ RT_TRACE(rtlpriv, COMP_QOS, DBG_DMESG,
+ ("queue:%x, ac_param:%x aifs:%x cwmin:%x cwmax:%x txop:%x\n",
+ aci, u4b_ac_param, mac->ac[aci].aifs, cw_min,
+ cw_max, tx_op));
+ switch (aci) {
+ case AC1_BK:
+ rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param);
+ break;
+ case AC0_BE:
+ rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param);
+ break;
+ case AC2_VI:
+ rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, u4b_ac_param);
+ break;
+ case AC3_VO:
+ rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, u4b_ac_param);
+ break;
+ default:
+ RT_ASSERT(false, ("invalid aci: %d !\n", aci));
+ break;
+ }
+}
+
+void rtl92ce_enable_interrupt(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
+ rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
+ rtlpci->irq_enabled = true;
+}
+
+void rtl92ce_disable_interrupt(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED);
+ rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED);
+ rtlpci->irq_enabled = false;
+}
+
+static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 u1b_tmp;
+
+ rtlpriv->intf_ops->enable_aspm(hw);
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+ rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00);
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE0);
+ if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && rtlhal->fw_ready)
+ rtl92c_firmware_selfreset(hw);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x51);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
+ rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x00000000);
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_GPIO_PIN_CTRL);
+ rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x00FF0000 |
+ (u1b_tmp << 8));
+ rtl_write_word(rtlpriv, REG_GPIO_IO_SEL, 0x0790);
+ rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8080);
+ rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x80);
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23);
+ rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL, 0x0e);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e);
+ rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, 0x10);
+}
+
+void rtl92ce_card_disable(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ enum nl80211_iftype opmode;
+
+ mac->link_state = MAC80211_NOLINK;
+ opmode = NL80211_IFTYPE_UNSPECIFIED;
+ _rtl92ce_set_media_status(hw, opmode);
+ if (rtlpci->driver_is_goingto_unload ||
+ ppsc->rfoff_reason > RF_CHANGE_BY_PS)
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ _rtl92ce_poweroff_adapter(hw);
+}
+
+void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
+ u32 *p_inta, u32 *p_intb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+ rtl_write_dword(rtlpriv, ISR, *p_inta);
+
+ /*
+ * *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
+ * rtl_write_dword(rtlpriv, ISR + 4, *p_intb);
+ */
+}
+
+void rtl92ce_set_beacon_related_registers(struct ieee80211_hw *hw)
+{
+
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 bcn_interval, atim_window;
+
+ bcn_interval = mac->beacon_interval;
+ atim_window = 2; /*FIX MERGE */
+ rtl92ce_disable_interrupt(hw);
+ rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
+ rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+ rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f);
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x18);
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x18);
+ rtl_write_byte(rtlpriv, 0x606, 0x30);
+ rtl92ce_enable_interrupt(hw);
+}
+
+void rtl92ce_set_beacon_interval(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 bcn_interval = mac->beacon_interval;
+
+ RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
+ ("beacon_interval:%d\n", bcn_interval));
+ rtl92ce_disable_interrupt(hw);
+ rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+ rtl92ce_enable_interrupt(hw);
+}
+
+void rtl92ce_update_interrupt_mask(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
+ ("add_msr:%x, rm_msr:%x\n", add_msr, rm_msr));
+ if (add_msr)
+ rtlpci->irq_mask[0] |= add_msr;
+ if (rm_msr)
+ rtlpci->irq_mask[0] &= (~rm_msr);
+ rtl92ce_disable_interrupt(hw);
+ rtl92ce_enable_interrupt(hw);
+}
+
+static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
+ bool autoload_fail,
+ u8 *hwinfo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 rf_path, index, tempval;
+ u16 i;
+
+ for (rf_path = 0; rf_path < 2; rf_path++) {
+ for (i = 0; i < 3; i++) {
+ if (!autoload_fail) {
+ rtlefuse->
+ eeprom_chnlarea_txpwr_cck[rf_path][i] =
+ hwinfo[EEPROM_TXPOWERCCK + rf_path * 3 + i];
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] =
+ hwinfo[EEPROM_TXPOWERHT40_1S + rf_path * 3 +
+ i];
+ } else {
+ rtlefuse->
+ eeprom_chnlarea_txpwr_cck[rf_path][i] =
+ EEPROM_DEFAULT_TXPOWERLEVEL;
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] =
+ EEPROM_DEFAULT_TXPOWERLEVEL;
+ }
+ }
+ }
+
+ for (i = 0; i < 3; i++) {
+ if (!autoload_fail)
+ tempval = hwinfo[EEPROM_TXPOWERHT40_2SDIFF + i];
+ else
+ tempval = EEPROM_DEFAULT_HT40_2SDIFF;
+ rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_A][i] =
+ (tempval & 0xf);
+ rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_B][i] =
+ ((tempval & 0xf0) >> 4);
+ }
+
+ for (rf_path = 0; rf_path < 2; rf_path++)
+ for (i = 0; i < 3; i++)
+ RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+ ("RF(%d) EEPROM CCK Area(%d) = 0x%x\n", rf_path,
+ i,
+ rtlefuse->
+ eeprom_chnlarea_txpwr_cck[rf_path][i]));
+ for (rf_path = 0; rf_path < 2; rf_path++)
+ for (i = 0; i < 3; i++)
+ RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+ ("RF(%d) EEPROM HT40 1S Area(%d) = 0x%x\n",
+ rf_path, i,
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_1s[rf_path][i]));
+ for (rf_path = 0; rf_path < 2; rf_path++)
+ for (i = 0; i < 3; i++)
+ RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+ ("RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
+ rf_path, i,
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path]
+ [i]));
+
+ for (rf_path = 0; rf_path < 2; rf_path++) {
+ for (i = 0; i < 14; i++) {
+ index = _rtl92c_get_chnl_group((u8) i);
+
+ rtlefuse->txpwrlevel_cck[rf_path][i] =
+ rtlefuse->eeprom_chnlarea_txpwr_cck[rf_path][index];
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_1s[rf_path][index];
+
+ if ((rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] -
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][index])
+ > 0) {
+ rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_1s[rf_path]
+ [index] -
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path]
+ [index];
+ } else {
+ rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0;
+ }
+ }
+
+ for (i = 0; i < 14; i++) {
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = "
+ "[0x%x / 0x%x / 0x%x]\n", rf_path, i,
+ rtlefuse->txpwrlevel_cck[rf_path][i],
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][i],
+ rtlefuse->txpwrlevel_ht40_2s[rf_path][i]));
+ }
+ }
+
+ for (i = 0; i < 3; i++) {
+ if (!autoload_fail) {
+ rtlefuse->eeprom_pwrlimit_ht40[i] =
+ hwinfo[EEPROM_TXPWR_GROUP + i];
+ rtlefuse->eeprom_pwrlimit_ht20[i] =
+ hwinfo[EEPROM_TXPWR_GROUP + 3 + i];
+ } else {
+ rtlefuse->eeprom_pwrlimit_ht40[i] = 0;
+ rtlefuse->eeprom_pwrlimit_ht20[i] = 0;
+ }
+ }
+
+ for (rf_path = 0; rf_path < 2; rf_path++) {
+ for (i = 0; i < 14; i++) {
+ index = _rtl92c_get_chnl_group((u8) i);
+
+ if (rf_path == RF90_PATH_A) {
+ rtlefuse->pwrgroup_ht20[rf_path][i] =
+ (rtlefuse->eeprom_pwrlimit_ht20[index]
+ & 0xf);
+ rtlefuse->pwrgroup_ht40[rf_path][i] =
+ (rtlefuse->eeprom_pwrlimit_ht40[index]
+ & 0xf);
+ } else if (rf_path == RF90_PATH_B) {
+ rtlefuse->pwrgroup_ht20[rf_path][i] =
+ ((rtlefuse->eeprom_pwrlimit_ht20[index]
+ & 0xf0) >> 4);
+ rtlefuse->pwrgroup_ht40[rf_path][i] =
+ ((rtlefuse->eeprom_pwrlimit_ht40[index]
+ & 0xf0) >> 4);
+ }
+
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("RF-%d pwrgroup_ht20[%d] = 0x%x\n",
+ rf_path, i,
+ rtlefuse->pwrgroup_ht20[rf_path][i]));
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("RF-%d pwrgroup_ht40[%d] = 0x%x\n",
+ rf_path, i,
+ rtlefuse->pwrgroup_ht40[rf_path][i]));
+ }
+ }
+
+ for (i = 0; i < 14; i++) {
+ index = _rtl92c_get_chnl_group((u8) i);
+
+ if (!autoload_fail)
+ tempval = hwinfo[EEPROM_TXPOWERHT20DIFF + index];
+ else
+ tempval = EEPROM_DEFAULT_HT20_DIFF;
+
+ rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] = (tempval & 0xF);
+ rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] =
+ ((tempval >> 4) & 0xF);
+
+ if (rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] & BIT(3))
+ rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] |= 0xF0;
+
+ if (rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] & BIT(3))
+ rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] |= 0xF0;
+
+ index = _rtl92c_get_chnl_group((u8) i);
+
+ if (!autoload_fail)
+ tempval = hwinfo[EEPROM_TXPOWER_OFDMDIFF + index];
+ else
+ tempval = EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF;
+
+ rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i] = (tempval & 0xF);
+ rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i] =
+ ((tempval >> 4) & 0xF);
+ }
+
+ rtlefuse->legacy_ht_txpowerdiff =
+ rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][7];
+
+ for (i = 0; i < 14; i++)
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("RF-A Ht20 to HT40 Diff[%d] = 0x%x\n", i,
+ rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]));
+ for (i = 0; i < 14; i++)
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("RF-A Legacy to Ht40 Diff[%d] = 0x%x\n", i,
+ rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]));
+ for (i = 0; i < 14; i++)
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("RF-B Ht20 to HT40 Diff[%d] = 0x%x\n", i,
+ rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]));
+ for (i = 0; i < 14; i++)
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("RF-B Legacy to HT40 Diff[%d] = 0x%x\n", i,
+ rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]));
+
+ if (!autoload_fail)
+ rtlefuse->eeprom_regulatory = (hwinfo[RF_OPTION1] & 0x7);
+ else
+ rtlefuse->eeprom_regulatory = 0;
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory));
+
+ if (!autoload_fail) {
+ rtlefuse->eeprom_tssi[RF90_PATH_A] = hwinfo[EEPROM_TSSI_A];
+ rtlefuse->eeprom_tssi[RF90_PATH_B] = hwinfo[EEPROM_TSSI_B];
+ } else {
+ rtlefuse->eeprom_tssi[RF90_PATH_A] = EEPROM_DEFAULT_TSSI;
+ rtlefuse->eeprom_tssi[RF90_PATH_B] = EEPROM_DEFAULT_TSSI;
+ }
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("TSSI_A = 0x%x, TSSI_B = 0x%x\n",
+ rtlefuse->eeprom_tssi[RF90_PATH_A],
+ rtlefuse->eeprom_tssi[RF90_PATH_B]));
+
+ if (!autoload_fail)
+ tempval = hwinfo[EEPROM_THERMAL_METER];
+ else
+ tempval = EEPROM_DEFAULT_THERMALMETER;
+ rtlefuse->eeprom_thermalmeter = (tempval & 0x1f);
+
+ if (rtlefuse->eeprom_thermalmeter == 0x1f || autoload_fail)
+ rtlefuse->apk_thermalmeterignore = true;
+
+ rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter));
+}
+
+static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u16 i, usvalue;
+ u8 hwinfo[HWSET_MAX_SIZE];
+ u16 eeprom_id;
+
+ if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+ rtl_efuse_shadow_map_update(hw);
+
+ memcpy((void *)hwinfo,
+ (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ HWSET_MAX_SIZE);
+ } else if (rtlefuse->epromtype == EEPROM_93C46) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("RTL819X Not boot from eeprom, check it !!"));
+ }
+
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"),
+ hwinfo, HWSET_MAX_SIZE);
+
+ eeprom_id = *((u16 *)&hwinfo[0]);
+ if (eeprom_id != RTL8190_EEPROM_ID) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
+ rtlefuse->autoload_failflag = true;
+ } else {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+ rtlefuse->autoload_failflag = false;
+ }
+
+ if (rtlefuse->autoload_failflag == true)
+ return;
+
+ for (i = 0; i < 6; i += 2) {
+ usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
+ *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
+ }
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ (MAC_FMT "\n", MAC_ARG(rtlefuse->dev_addr)));
+
+ _rtl92ce_read_txpower_info_from_hwpg(hw,
+ rtlefuse->autoload_failflag,
+ hwinfo);
+
+ rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
+ rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
+ rtlefuse->txpwr_fromeprom = true;
+ rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid));
+
+ if (rtlhal->oem_id == RT_CID_DEFAULT) {
+ switch (rtlefuse->eeprom_oemid) {
+ case EEPROM_CID_DEFAULT:
+ if (rtlefuse->eeprom_did == 0x8176) {
+ if ((rtlefuse->eeprom_svid == 0x103C &&
+ rtlefuse->eeprom_smid == 0x1629))
+ rtlhal->oem_id = RT_CID_819x_HP;
+ else
+ rtlhal->oem_id = RT_CID_DEFAULT;
+ } else {
+ rtlhal->oem_id = RT_CID_DEFAULT;
+ }
+ break;
+ case EEPROM_CID_TOSHIBA:
+ rtlhal->oem_id = RT_CID_TOSHIBA;
+ break;
+ case EEPROM_CID_QMI:
+ rtlhal->oem_id = RT_CID_819x_QMI;
+ break;
+ case EEPROM_CID_WHQL:
+ default:
+ rtlhal->oem_id = RT_CID_DEFAULT;
+ break;
+
+ }
+ }
+
+}
+
+static void _rtl92ce_hal_customized_behavior(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ switch (rtlhal->oem_id) {
+ case RT_CID_819x_HP:
+ pcipriv->ledctl.led_opendrain = true;
+ break;
+ case RT_CID_819x_Lenovo:
+ case RT_CID_DEFAULT:
+ case RT_CID_TOSHIBA:
+ case RT_CID_CCX:
+ case RT_CID_819x_Acer:
+ case RT_CID_WHQL:
+ default:
+ break;
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("RT Customized ID: 0x%02X\n", rtlhal->oem_id));
+}
+
+void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tmp_u1b;
+
+ rtlhal->version = _rtl92ce_read_chip_version(hw);
+ if (get_rf_type(rtlphy) == RF_1T1R)
+ rtlpriv->dm.rfpath_rxenable[0] = true;
+ else
+ rtlpriv->dm.rfpath_rxenable[0] =
+ rtlpriv->dm.rfpath_rxenable[1] = true;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n",
+ rtlhal->version));
+ tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
+ if (tmp_u1b & BIT(4)) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from EEPROM\n"));
+ rtlefuse->epromtype = EEPROM_93C46;
+ } else {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from EFUSE\n"));
+ rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
+ }
+ if (tmp_u1b & BIT(5)) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+ rtlefuse->autoload_failflag = false;
+ _rtl92ce_read_adapter_info(hw);
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Autoload ERR!!\n"));
+ }
+
+ _rtl92ce_hal_customized_behavior(hw);
+}
+
+void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ u32 ratr_value = (u32) mac->basic_rates;
+ u8 *mcsrate = mac->mcs;
+ u8 ratr_index = 0;
+ u8 nmode = mac->ht_enable;
+ u8 mimo_ps = 1;
+ u16 shortgi_rate;
+ u32 tmp_ratr_value;
+ u8 curtxbw_40mhz = mac->bw_40;
+ u8 curshortgi_40mhz = mac->sgi_40;
+ u8 curshortgi_20mhz = mac->sgi_20;
+ enum wireless_mode wirelessmode = mac->mode;
+
+ ratr_value |= ((*(u16 *) (mcsrate))) << 12;
+
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ if (ratr_value & 0x0000000c)
+ ratr_value &= 0x0000000d;
+ else
+ ratr_value &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_G:
+ ratr_value &= 0x00000FF5;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ nmode = 1;
+ if (mimo_ps == 0) {
+ ratr_value &= 0x0007F005;
+ } else {
+ u32 ratr_mask;
+
+ if (get_rf_type(rtlphy) == RF_1T2R ||
+ get_rf_type(rtlphy) == RF_1T1R)
+ ratr_mask = 0x000ff005;
+ else
+ ratr_mask = 0x0f0ff005;
+
+ ratr_value &= ratr_mask;
+ }
+ break;
+ default:
+ if (rtlphy->rf_type == RF_1T2R)
+ ratr_value &= 0x000ff0ff;
+ else
+ ratr_value &= 0x0f0ff0ff;
+
+ break;
+ }
+
+ ratr_value &= 0x0FFFFFFF;
+
+ if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) || (!curtxbw_40mhz &&
+ curshortgi_20mhz))) {
+
+ ratr_value |= 0x10000000;
+ tmp_ratr_value = (ratr_value >> 12);
+
+ for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
+ if ((1 << shortgi_rate) & tmp_ratr_value)
+ break;
+ }
+
+ shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
+ (shortgi_rate << 4) | (shortgi_rate);
+ }
+
+ rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
+
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
+ ("%x\n", rtl_read_dword(rtlpriv, REG_ARFR0)));
+}
+
+void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u32 ratr_bitmap = (u32) mac->basic_rates;
+ u8 *p_mcsrate = mac->mcs;
+ u8 ratr_index;
+ u8 curtxbw_40mhz = mac->bw_40;
+ u8 curshortgi_40mhz = mac->sgi_40;
+ u8 curshortgi_20mhz = mac->sgi_20;
+ enum wireless_mode wirelessmode = mac->mode;
+ bool shortgi = false;
+ u8 rate_mask[5];
+ u8 macid = 0;
+ u8 mimops = 1;
+
+ ratr_bitmap |= (p_mcsrate[1] << 20) | (p_mcsrate[0] << 12);
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ ratr_index = RATR_INX_WIRELESS_B;
+ if (ratr_bitmap & 0x0000000c)
+ ratr_bitmap &= 0x0000000d;
+ else
+ ratr_bitmap &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_G:
+ ratr_index = RATR_INX_WIRELESS_GB;
+
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x00000f00;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x00000ff0;
+ else
+ ratr_bitmap &= 0x00000ff5;
+ break;
+ case WIRELESS_MODE_A:
+ ratr_index = RATR_INX_WIRELESS_A;
+ ratr_bitmap &= 0x00000ff0;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ ratr_index = RATR_INX_WIRELESS_NGB;
+
+ if (mimops == 0) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x00070000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0007f000;
+ else
+ ratr_bitmap &= 0x0007f005;
+ } else {
+ if (rtlphy->rf_type == RF_1T2R ||
+ rtlphy->rf_type == RF_1T1R) {
+ if (curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff015;
+ } else {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff005;
+ }
+ } else {
+ if (curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0f0f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0f0ff000;
+ else
+ ratr_bitmap &= 0x0f0ff015;
+ } else {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0f0f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0f0ff000;
+ else
+ ratr_bitmap &= 0x0f0ff005;
+ }
+ }
+ }
+
+ if ((curtxbw_40mhz && curshortgi_40mhz) ||
+ (!curtxbw_40mhz && curshortgi_20mhz)) {
+
+ if (macid == 0)
+ shortgi = true;
+ else if (macid == 1)
+ shortgi = false;
+ }
+ break;
+ default:
+ ratr_index = RATR_INX_WIRELESS_NGB;
+
+ if (rtlphy->rf_type == RF_1T2R)
+ ratr_bitmap &= 0x000ff0ff;
+ else
+ ratr_bitmap &= 0x0f0ff0ff;
+ break;
+ }
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
+ ("ratr_bitmap :%x\n", ratr_bitmap));
+ *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
+ (ratr_index << 28);
+ rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("Rate_index:%x, "
+ "ratr_val:%x, %x:%x:%x:%x:%x\n",
+ ratr_index, ratr_bitmap,
+ rate_mask[0], rate_mask[1],
+ rate_mask[2], rate_mask[3],
+ rate_mask[4]));
+ rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
+}
+
+void rtl92ce_update_channel_access_setting(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 sifs_timer;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
+ (u8 *)&mac->slot_time);
+ if (!mac->ht_enable)
+ sifs_timer = 0x0a0a;
+ else
+ sifs_timer = 0x1010;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
+}
+
+bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
+ u8 u1tmp;
+ bool actuallyset = false;
+ unsigned long flag;
+
+ if ((rtlpci->up_first_time == 1) || (rtlpci->being_init_adapter))
+ return false;
+
+ if (ppsc->swrf_processing)
+ return false;
+
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ if (ppsc->rfchange_inprogress) {
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ return false;
+ } else {
+ ppsc->rfchange_inprogress = true;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ }
+
+ cur_rfstate = ppsc->rfpwr_state;
+
+ if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
+ RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM)) {
+ rtlpriv->intf_ops->disable_aspm(hw);
+ RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
+ }
+
+ rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, rtl_read_byte(rtlpriv,
+ REG_MAC_PINMUX_CFG)&~(BIT(3)));
+
+ u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
+ e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF;
+
+ if ((ppsc->hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) {
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ ("GPIOChangeRF - HW Radio ON, RF ON\n"));
+
+ e_rfpowerstate_toset = ERFON;
+ ppsc->hwradiooff = false;
+ actuallyset = true;
+ } else if ((ppsc->hwradiooff == false)
+ && (e_rfpowerstate_toset == ERFOFF)) {
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ ("GPIOChangeRF - HW Radio OFF, RF OFF\n"));
+
+ e_rfpowerstate_toset = ERFOFF;
+ ppsc->hwradiooff = true;
+ actuallyset = true;
+ }
+
+ if (actuallyset) {
+ if (e_rfpowerstate_toset == ERFON) {
+ if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
+ RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM)) {
+ rtlpriv->intf_ops->disable_aspm(hw);
+ RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
+ }
+ }
+
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+
+ if (e_rfpowerstate_toset == ERFOFF) {
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) {
+ rtlpriv->intf_ops->enable_aspm(hw);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
+ }
+ }
+
+ } else if (e_rfpowerstate_toset == ERFOFF || cur_rfstate == ERFOFF) {
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC)
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) {
+ rtlpriv->intf_ops->enable_aspm(hw);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
+ }
+
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ } else {
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ }
+
+ *valid = 1;
+ return !ppsc->hwradiooff;
+
+}
+
+void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 *macaddr = p_macaddr;
+ u32 entry_id = 0;
+ bool is_pairwise = false;
+
+ static u8 cam_const_addr[4][6] = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
+ };
+ static u8 cam_const_broad[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+
+ if (clear_all) {
+ u8 idx = 0;
+ u8 cam_offset = 0;
+ u8 clear_number = 5;
+
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("clear_all\n"));
+
+ for (idx = 0; idx < clear_number; idx++) {
+ rtl_cam_mark_invalid(hw, cam_offset + idx);
+ rtl_cam_empty_entry(hw, cam_offset + idx);
+
+ if (idx < 5) {
+ memset(rtlpriv->sec.key_buf[idx], 0,
+ MAX_KEY_LEN);
+ rtlpriv->sec.key_len[idx] = 0;
+ }
+ }
+
+ } else {
+ switch (enc_algo) {
+ case WEP40_ENCRYPTION:
+ enc_algo = CAM_WEP40;
+ break;
+ case WEP104_ENCRYPTION:
+ enc_algo = CAM_WEP104;
+ break;
+ case TKIP_ENCRYPTION:
+ enc_algo = CAM_TKIP;
+ break;
+ case AESCCMP_ENCRYPTION:
+ enc_algo = CAM_AES;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case "
+ "not process\n"));
+ enc_algo = CAM_TKIP;
+ break;
+ }
+
+ if (is_wepkey || rtlpriv->sec.use_defaultkey) {
+ macaddr = cam_const_addr[key_index];
+ entry_id = key_index;
+ } else {
+ if (is_group) {
+ macaddr = cam_const_broad;
+ entry_id = key_index;
+ } else {
+ key_index = PAIRWISE_KEYIDX;
+ entry_id = CAM_PAIRWISE_KEY_POSITION;
+ is_pairwise = true;
+ }
+ }
+
+ if (rtlpriv->sec.key_len[key_index] == 0) {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("delete one entry\n"));
+ rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
+ } else {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
+ ("The insert KEY length is %d\n",
+ rtlpriv->sec.key_len[PAIRWISE_KEYIDX]));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
+ ("The insert KEY is %x %x\n",
+ rtlpriv->sec.key_buf[0][0],
+ rtlpriv->sec.key_buf[0][1]));
+
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("add one entry\n"));
+ if (is_pairwise) {
+ RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
+ "Pairwiase Key content :",
+ rtlpriv->sec.pairwise_key,
+ rtlpriv->sec.
+ key_len[PAIRWISE_KEYIDX]);
+
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("set Pairwiase key\n"));
+
+ rtl_cam_add_one_entry(hw, macaddr, key_index,
+ entry_id, enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.
+ key_buf[key_index]);
+ } else {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("set group key\n"));
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+ rtl_cam_add_one_entry(hw,
+ rtlefuse->dev_addr,
+ PAIRWISE_KEYIDX,
+ CAM_PAIRWISE_KEY_POSITION,
+ enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf
+ [entry_id]);
+ }
+
+ rtl_cam_add_one_entry(hw, macaddr, key_index,
+ entry_id, enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf[entry_id]);
+ }
+
+ }
+ }
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
new file mode 100644
index 000000000000..a3dfdb635168
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
@@ -0,0 +1,68 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CE_HW_H__
+#define __RTL92CE_HW_H__
+
+#define H2C_RA_MASK 6
+
+void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw);
+void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
+ u32 *p_inta, u32 *p_intb);
+int rtl92ce_hw_init(struct ieee80211_hw *hw);
+void rtl92ce_card_disable(struct ieee80211_hw *hw);
+void rtl92ce_enable_interrupt(struct ieee80211_hw *hw);
+void rtl92ce_disable_interrupt(struct ieee80211_hw *hw);
+int rtl92ce_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
+void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci);
+void rtl92ce_set_beacon_related_registers(struct ieee80211_hw *hw);
+void rtl92ce_set_beacon_interval(struct ieee80211_hw *hw);
+void rtl92ce_update_interrupt_mask(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr);
+void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw);
+void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level);
+void rtl92ce_update_channel_access_setting(struct ieee80211_hw *hw);
+bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
+void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw);
+void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all);
+bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
+void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
+void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+int rtl92c_download_fw(struct ieee80211_hw *hw);
+void rtl92c_firmware_selfreset(struct ieee80211_hw *hw);
+void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
+ u8 element_id, u32 cmd_len, u8 *p_cmdbuffer);
+bool rtl92ce_phy_mac_config(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/rtlwifi/rtl8192ce/led.c
new file mode 100644
index 000000000000..7b1da8d7508f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/led.c
@@ -0,0 +1,144 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "reg.h"
+#include "led.h"
+
+void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+ u8 ledcfg;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
+ ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+
+ switch (pled->ledpin) {
+ case LED_PIN_GPIO0:
+ break;
+ case LED_PIN_LED0:
+ rtl_write_byte(rtlpriv,
+ REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5) | BIT(6));
+ break;
+ case LED_PIN_LED1:
+ rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5));
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ pled->ledon = true;
+}
+
+void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ u8 ledcfg;
+
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
+ ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+
+ switch (pled->ledpin) {
+ case LED_PIN_GPIO0:
+ break;
+ case LED_PIN_LED0:
+ ledcfg &= 0xf0;
+ if (pcipriv->ledctl.led_opendrain == true)
+ rtl_write_byte(rtlpriv, REG_LEDCFG2,
+ (ledcfg | BIT(1) | BIT(5) | BIT(6)));
+ else
+ rtl_write_byte(rtlpriv, REG_LEDCFG2,
+ (ledcfg | BIT(3) | BIT(5) | BIT(6)));
+ break;
+ case LED_PIN_LED1:
+ ledcfg &= 0x0f;
+ rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3)));
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ pled->ledon = false;
+}
+
+void rtl92ce_init_sw_leds(struct ieee80211_hw *hw)
+{
+}
+
+void rtl92ce_deinit_sw_leds(struct ieee80211_hw *hw)
+{
+}
+
+void _rtl92ce_sw_led_control(struct ieee80211_hw *hw,
+ enum led_ctl_mode ledaction)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+ switch (ledaction) {
+ case LED_CTL_POWER_ON:
+ case LED_CTL_LINK:
+ case LED_CTL_NO_LINK:
+ rtl92ce_sw_led_on(hw, pLed0);
+ break;
+ case LED_CTL_POWER_OFF:
+ rtl92ce_sw_led_off(hw, pLed0);
+ break;
+ default:
+ break;
+ }
+}
+
+void rtl92ce_led_control(struct ieee80211_hw *hw,
+ enum led_ctl_mode ledaction)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
+ (ledaction == LED_CTL_TX ||
+ ledaction == LED_CTL_RX ||
+ ledaction == LED_CTL_SITE_SURVEY ||
+ ledaction == LED_CTL_LINK ||
+ ledaction == LED_CTL_NO_LINK ||
+ ledaction == LED_CTL_START_TO_LINK ||
+ ledaction == LED_CTL_POWER_ON)) {
+ return;
+ }
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, ("ledaction %d,\n",
+ ledaction));
+ _rtl92ce_sw_led_control(hw, ledaction);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/led.h b/drivers/net/wireless/rtlwifi/rtl8192ce/led.h
new file mode 100644
index 000000000000..10da3018f4b7
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/led.h
@@ -0,0 +1,41 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CE_LED_H__
+#define __RTL92CE_LED_H__
+
+void rtl92ce_init_sw_leds(struct ieee80211_hw *hw);
+void rtl92ce_deinit_sw_leds(struct ieee80211_hw *hw);
+void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl92ce_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction);
+void _rtl92ce_sw_led_control(struct ieee80211_hw *hw,
+ enum led_ctl_mode ledaction);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
new file mode 100644
index 000000000000..d0541e8c6012
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -0,0 +1,627 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../ps.h"
+#include "reg.h"
+#include "def.h"
+#include "hw.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+#include "table.h"
+
+u32 rtl92ce_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr, u32 bitmask)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 original_value, readback_value, bitshift;
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ unsigned long flags;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+ "rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask));
+
+ spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+
+ if (rtlphy->rf_mode != RF_OP_BY_FW) {
+ original_value = _rtl92c_phy_rf_serial_read(hw,
+ rfpath, regaddr);
+ } else {
+ original_value = _rtl92c_phy_fw_rf_serial_read(hw,
+ rfpath, regaddr);
+ }
+
+ bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ ("regaddr(%#x), rfpath(%#x), "
+ "bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value));
+
+ return readback_value;
+}
+
+void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 original_value, bitshift;
+ unsigned long flags;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ ("regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath));
+
+ spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+
+ if (rtlphy->rf_mode != RF_OP_BY_FW) {
+ if (bitmask != RFREG_OFFSET_MASK) {
+ original_value = _rtl92c_phy_rf_serial_read(hw,
+ rfpath,
+ regaddr);
+ bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ data =
+ ((original_value & (~bitmask)) |
+ (data << bitshift));
+ }
+
+ _rtl92c_phy_rf_serial_write(hw, rfpath, regaddr, data);
+ } else {
+ if (bitmask != RFREG_OFFSET_MASK) {
+ original_value = _rtl92c_phy_fw_rf_serial_read(hw,
+ rfpath,
+ regaddr);
+ bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ data =
+ ((original_value & (~bitmask)) |
+ (data << bitshift));
+ }
+ _rtl92c_phy_fw_rf_serial_write(hw, rfpath, regaddr, data);
+ }
+
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+ "bitmask(%#x), data(%#x), "
+ "rfpath(%#x)\n", regaddr,
+ bitmask, data, rfpath));
+}
+
+bool rtl92ce_phy_mac_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool is92c = IS_92C_SERIAL(rtlhal->version);
+ bool rtstatus = _rtl92ce_phy_config_mac_with_headerfile(hw);
+
+ if (is92c)
+ rtl_write_byte(rtlpriv, 0x14, 0x71);
+ return rtstatus;
+}
+
+bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw)
+{
+ bool rtstatus = true;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 regval;
+ u32 regvaldw;
+ u8 reg_hwparafile = 1;
+
+ _rtl92c_phy_init_bb_rf_register_definition(hw);
+ regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+ rtl_write_word(rtlpriv, REG_SYS_FUNC_EN,
+ regval | BIT(13) | BIT(0) | BIT(1));
+ rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x83);
+ rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL + 1, 0xdb);
+ rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN,
+ FEN_PPLL | FEN_PCIEA | FEN_DIO_PCIE |
+ FEN_BB_GLB_RSTn | FEN_BBRSTB);
+ rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
+ regvaldw = rtl_read_dword(rtlpriv, REG_LEDCFG0);
+ rtl_write_dword(rtlpriv, REG_LEDCFG0, regvaldw | BIT(23));
+ if (reg_hwparafile == 1)
+ rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw);
+ return rtstatus;
+}
+
+bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+ u32 arraylength;
+ u32 *ptrarray;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Read Rtl819XMACPHY_Array\n"));
+ arraylength = MAC_2T_ARRAYLENGTH;
+ ptrarray = RTL8192CEMAC_2T_ARRAY;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Img:RTL8192CEMAC_2T_ARRAY\n"));
+ for (i = 0; i < arraylength; i = i + 2)
+ rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
+ return true;
+}
+
+bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+ u8 configtype)
+{
+ int i;
+ u32 *phy_regarray_table;
+ u32 *agctab_array_table;
+ u16 phy_reg_arraylen, agctab_arraylen;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (IS_92C_SERIAL(rtlhal->version)) {
+ agctab_arraylen = AGCTAB_2TARRAYLENGTH;
+ agctab_array_table = RTL8192CEAGCTAB_2TARRAY;
+ phy_reg_arraylen = PHY_REG_2TARRAY_LENGTH;
+ phy_regarray_table = RTL8192CEPHY_REG_2TARRAY;
+ } else {
+ agctab_arraylen = AGCTAB_1TARRAYLENGTH;
+ agctab_array_table = RTL8192CEAGCTAB_1TARRAY;
+ phy_reg_arraylen = PHY_REG_1TARRAY_LENGTH;
+ phy_regarray_table = RTL8192CEPHY_REG_1TARRAY;
+ }
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ for (i = 0; i < phy_reg_arraylen; i = i + 2) {
+ if (phy_regarray_table[i] == 0xfe)
+ mdelay(50);
+ else if (phy_regarray_table[i] == 0xfd)
+ mdelay(5);
+ else if (phy_regarray_table[i] == 0xfc)
+ mdelay(1);
+ else if (phy_regarray_table[i] == 0xfb)
+ udelay(50);
+ else if (phy_regarray_table[i] == 0xfa)
+ udelay(5);
+ else if (phy_regarray_table[i] == 0xf9)
+ udelay(1);
+ rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
+ phy_regarray_table[i + 1]);
+ udelay(1);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("The phy_regarray_table[0] is %x"
+ " Rtl819XPHY_REGArray[1] is %x\n",
+ phy_regarray_table[i],
+ phy_regarray_table[i + 1]));
+ }
+ } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
+ for (i = 0; i < agctab_arraylen; i = i + 2) {
+ rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD,
+ agctab_array_table[i + 1]);
+ udelay(1);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("The agctab_array_table[0] is "
+ "%x Rtl819XPHY_REGArray[1] is %x\n",
+ agctab_array_table[i],
+ agctab_array_table[i + 1]));
+ }
+ }
+ return true;
+}
+
+bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+ u8 configtype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i;
+ u32 *phy_regarray_table_pg;
+ u16 phy_regarray_pg_len;
+
+ phy_regarray_pg_len = PHY_REG_ARRAY_PGLENGTH;
+ phy_regarray_table_pg = RTL8192CEPHY_REG_ARRAY_PG;
+
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
+ if (phy_regarray_table_pg[i] == 0xfe)
+ mdelay(50);
+ else if (phy_regarray_table_pg[i] == 0xfd)
+ mdelay(5);
+ else if (phy_regarray_table_pg[i] == 0xfc)
+ mdelay(1);
+ else if (phy_regarray_table_pg[i] == 0xfb)
+ udelay(50);
+ else if (phy_regarray_table_pg[i] == 0xfa)
+ udelay(5);
+ else if (phy_regarray_table_pg[i] == 0xf9)
+ udelay(1);
+
+ _rtl92c_store_pwrIndex_diffrate_offset(hw,
+ phy_regarray_table_pg[i],
+ phy_regarray_table_pg[i + 1],
+ phy_regarray_table_pg[i + 2]);
+ }
+ } else {
+
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ ("configtype != BaseBand_Config_PHY_REG\n"));
+ }
+ return true;
+}
+
+bool rtl92ce_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath)
+{
+
+ int i;
+ bool rtstatus = true;
+ u32 *radioa_array_table;
+ u32 *radiob_array_table;
+ u16 radioa_arraylen, radiob_arraylen;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (IS_92C_SERIAL(rtlhal->version)) {
+ radioa_arraylen = RADIOA_2TARRAYLENGTH;
+ radioa_array_table = RTL8192CERADIOA_2TARRAY;
+ radiob_arraylen = RADIOB_2TARRAYLENGTH;
+ radiob_array_table = RTL8192CE_RADIOB_2TARRAY;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Radio_A:RTL8192CERADIOA_2TARRAY\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Radio_B:RTL8192CE_RADIOB_2TARRAY\n"));
+ } else {
+ radioa_arraylen = RADIOA_1TARRAYLENGTH;
+ radioa_array_table = RTL8192CE_RADIOA_1TARRAY;
+ radiob_arraylen = RADIOB_1TARRAYLENGTH;
+ radiob_array_table = RTL8192CE_RADIOB_1TARRAY;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Radio_A:RTL8192CE_RADIOA_1TARRAY\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Radio_B:RTL8192CE_RADIOB_1TARRAY\n"));
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Radio No %x\n", rfpath));
+ rtstatus = true;
+ switch (rfpath) {
+ case RF90_PATH_A:
+ for (i = 0; i < radioa_arraylen; i = i + 2) {
+ if (radioa_array_table[i] == 0xfe)
+ mdelay(50);
+ else if (radioa_array_table[i] == 0xfd)
+ mdelay(5);
+ else if (radioa_array_table[i] == 0xfc)
+ mdelay(1);
+ else if (radioa_array_table[i] == 0xfb)
+ udelay(50);
+ else if (radioa_array_table[i] == 0xfa)
+ udelay(5);
+ else if (radioa_array_table[i] == 0xf9)
+ udelay(1);
+ else {
+ rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
+ RFREG_OFFSET_MASK,
+ radioa_array_table[i + 1]);
+ udelay(1);
+ }
+ }
+ break;
+ case RF90_PATH_B:
+ for (i = 0; i < radiob_arraylen; i = i + 2) {
+ if (radiob_array_table[i] == 0xfe) {
+ mdelay(50);
+ } else if (radiob_array_table[i] == 0xfd)
+ mdelay(5);
+ else if (radiob_array_table[i] == 0xfc)
+ mdelay(1);
+ else if (radiob_array_table[i] == 0xfb)
+ udelay(50);
+ else if (radiob_array_table[i] == 0xfa)
+ udelay(5);
+ else if (radiob_array_table[i] == 0xf9)
+ udelay(1);
+ else {
+ rtl_set_rfreg(hw, rfpath, radiob_array_table[i],
+ RFREG_OFFSET_MASK,
+ radiob_array_table[i + 1]);
+ udelay(1);
+ }
+ }
+ break;
+ case RF90_PATH_C:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ case RF90_PATH_D:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ return true;
+}
+
+void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u8 reg_bw_opmode;
+ u8 reg_prsr_rsc;
+
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
+ ("Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz"))
+
+ if (is_hal_stop(rtlhal))
+ return;
+
+ reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
+ reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
+
+ switch (rtlphy->current_chan_bw) {
+ case HT_CHANNEL_WIDTH_20:
+ reg_bw_opmode |= BW_OPMODE_20MHZ;
+ rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+ break;
+
+ case HT_CHANNEL_WIDTH_20_40:
+ reg_bw_opmode &= ~BW_OPMODE_20MHZ;
+ rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+
+ reg_prsr_rsc =
+ (reg_prsr_rsc & 0x90) | (mac->cur_40_prime_sc << 5);
+ rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
+ break;
+
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
+ break;
+ }
+
+ switch (rtlphy->current_chan_bw) {
+ case HT_CHANNEL_WIDTH_20:
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0);
+ rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1);
+ rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1);
+ rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND,
+ (mac->cur_40_prime_sc >> 1));
+ rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 0);
+ rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)),
+ (mac->cur_40_prime_sc ==
+ HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
+ break;
+ }
+ rtl92c_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
+ rtlphy->set_bwmode_inprogress = false;
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
+}
+
+void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
+{
+ u8 tmpreg;
+ u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ tmpreg = rtl_read_byte(rtlpriv, 0xd03);
+
+ if ((tmpreg & 0x70) != 0)
+ rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
+ else
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+
+ if ((tmpreg & 0x70) != 0) {
+ rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS);
+
+ if (is2t)
+ rf_b_mode = rtl_get_rfreg(hw, RF90_PATH_B, 0x00,
+ MASK12BITS);
+
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS,
+ (rf_a_mode & 0x8FFFF) | 0x10000);
+
+ if (is2t)
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
+ (rf_b_mode & 0x8FFFF) | 0x10000);
+ }
+ lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS);
+
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, lc_cal | 0x08000);
+
+ mdelay(100);
+
+ if ((tmpreg & 0x70) != 0) {
+ rtl_write_byte(rtlpriv, 0xd03, tmpreg);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, rf_a_mode);
+
+ if (is2t)
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
+ rf_b_mode);
+ } else {
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+ }
+}
+
+static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool bresult = true;
+ u8 i, queue_id;
+ struct rtl8192_tx_ring *ring = NULL;
+
+ ppsc->set_rfpowerstate_inprogress = true;
+ switch (rfpwr_state) {
+ case ERFON:{
+ if ((ppsc->rfpwr_state == ERFOFF) &&
+ RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
+ bool rtstatus;
+ u32 InitializeCount = 0;
+ do {
+ InitializeCount++;
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ ("IPS Set eRf nic enable\n"));
+ rtstatus = rtl_ps_enable_nic(hw);
+ } while ((rtstatus != true)
+ && (InitializeCount < 10));
+ RT_CLEAR_PS_LEVEL(ppsc,
+ RT_RF_OFF_LEVL_HALT_NIC);
+ } else {
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ ("Set ERFON sleeped:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->
+ last_sleep_jiffies)));
+ ppsc->last_awake_jiffies = jiffies;
+ rtl92ce_phy_set_rf_on(hw);
+ }
+ if (mac->link_state == MAC80211_LINKED) {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_LINK);
+ } else {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_NO_LINK);
+ }
+ break;
+ }
+ case ERFOFF:{
+ for (queue_id = 0, i = 0;
+ queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+ ring = &pcipriv->dev.tx_ring[queue_id];
+ if (skb_queue_len(&ring->queue) == 0 ||
+ queue_id == BEACON_QUEUE) {
+ queue_id++;
+ continue;
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("eRf Off/Sleep: %d times "
+ "TcbBusyQueue[%d] "
+ "=%d before doze!\n", (i + 1),
+ queue_id,
+ skb_queue_len(&ring->queue)));
+ udelay(10);
+ i++;
+ }
+ if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("\nERFOFF: %d times "
+ "TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue)));
+ break;
+ }
+ }
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ ("IPS Set eRf nic disable\n"));
+ rtl_ps_disable_nic(hw);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ } else {
+ if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_NO_LINK);
+ } else {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_POWER_OFF);
+ }
+ }
+ break;
+ }
+ case ERFSLEEP:{
+ if (ppsc->rfpwr_state == ERFOFF)
+ break;
+ for (queue_id = 0, i = 0;
+ queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+ ring = &pcipriv->dev.tx_ring[queue_id];
+ if (skb_queue_len(&ring->queue) == 0) {
+ queue_id++;
+ continue;
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("eRf Off/Sleep: %d times "
+ "TcbBusyQueue[%d] =%d before "
+ "doze!\n", (i + 1), queue_id,
+ skb_queue_len(&ring->queue)));
+ udelay(10);
+ i++;
+ }
+ if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("\n ERFSLEEP: %d times "
+ "TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue)));
+ break;
+ }
+ }
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ ("Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies)));
+ ppsc->last_sleep_jiffies = jiffies;
+ _rtl92c_phy_set_rf_sleep(hw);
+ break;
+ }
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ bresult = false;
+ break;
+ }
+ if (bresult)
+ ppsc->rfpwr_state = rfpwr_state;
+ ppsc->set_rfpowerstate_inprogress = false;
+ return bresult;
+}
+
+bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state)
+{
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool bresult = false;
+
+ if (rfpwr_state == ppsc->rfpwr_state)
+ return bresult;
+ bresult = _rtl92ce_phy_set_rf_power_state(hw, rfpwr_state);
+ return bresult;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
new file mode 100644
index 000000000000..a37267e3fc22
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
@@ -0,0 +1,254 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92C_PHY_H__
+#define __RTL92C_PHY_H__
+
+#define MAX_PRECMD_CNT 16
+#define MAX_RFDEPENDCMD_CNT 16
+#define MAX_POSTCMD_CNT 16
+
+#define MAX_DOZE_WAITING_TIMES_9x 64
+
+#define RT_CANNOT_IO(hw) false
+#define HIGHPOWER_RADIOA_ARRAYLEN 22
+
+#define MAX_TOLERANCE 5
+#define IQK_DELAY_TIME 1
+
+#define APK_BB_REG_NUM 5
+#define APK_AFE_REG_NUM 16
+#define APK_CURVE_REG_NUM 4
+#define PATH_NUM 2
+
+#define LOOP_LIMIT 5
+#define MAX_STALL_TIME 50
+#define AntennaDiversityValue 0x80
+#define MAX_TXPWR_IDX_NMODE_92S 63
+#define Reset_Cnt_Limit 3
+
+#define IQK_ADDA_REG_NUM 16
+#define IQK_MAC_REG_NUM 4
+
+#define RF90_PATH_MAX 2
+
+#define CT_OFFSET_MAC_ADDR 0X16
+
+#define CT_OFFSET_CCK_TX_PWR_IDX 0x5A
+#define CT_OFFSET_HT401S_TX_PWR_IDX 0x60
+#define CT_OFFSET_HT402S_TX_PWR_IDX_DIF 0x66
+#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69
+#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C
+
+#define CT_OFFSET_HT40_MAX_PWR_OFFSET 0x6F
+#define CT_OFFSET_HT20_MAX_PWR_OFFSET 0x72
+
+#define CT_OFFSET_CHANNEL_PLAH 0x75
+#define CT_OFFSET_THERMAL_METER 0x78
+#define CT_OFFSET_RF_OPTION 0x79
+#define CT_OFFSET_VERSION 0x7E
+#define CT_OFFSET_CUSTOMER_ID 0x7F
+
+#define RTL92C_MAX_PATH_NUM 2
+#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255
+enum swchnlcmd_id {
+ CMDID_END,
+ CMDID_SET_TXPOWEROWER_LEVEL,
+ CMDID_BBREGWRITE10,
+ CMDID_WRITEPORT_ULONG,
+ CMDID_WRITEPORT_USHORT,
+ CMDID_WRITEPORT_UCHAR,
+ CMDID_RF_WRITEREG,
+};
+
+struct swchnlcmd {
+ enum swchnlcmd_id cmdid;
+ u32 para1;
+ u32 para2;
+ u32 msdelay;
+};
+
+enum hw90_block_e {
+ HW90_BLOCK_MAC = 0,
+ HW90_BLOCK_PHY0 = 1,
+ HW90_BLOCK_PHY1 = 2,
+ HW90_BLOCK_RF = 3,
+ HW90_BLOCK_MAXIMUM = 4,
+};
+
+enum baseband_config_type {
+ BASEBAND_CONFIG_PHY_REG = 0,
+ BASEBAND_CONFIG_AGC_TAB = 1,
+};
+
+enum ra_offset_area {
+ RA_OFFSET_LEGACY_OFDM1,
+ RA_OFFSET_LEGACY_OFDM2,
+ RA_OFFSET_HT_OFDM1,
+ RA_OFFSET_HT_OFDM2,
+ RA_OFFSET_HT_OFDM3,
+ RA_OFFSET_HT_OFDM4,
+ RA_OFFSET_HT_CCK,
+};
+
+enum antenna_path {
+ ANTENNA_NONE,
+ ANTENNA_D,
+ ANTENNA_C,
+ ANTENNA_CD,
+ ANTENNA_B,
+ ANTENNA_BD,
+ ANTENNA_BC,
+ ANTENNA_BCD,
+ ANTENNA_A,
+ ANTENNA_AD,
+ ANTENNA_AC,
+ ANTENNA_ACD,
+ ANTENNA_AB,
+ ANTENNA_ABD,
+ ANTENNA_ABC,
+ ANTENNA_ABCD
+};
+
+struct r_antenna_select_ofdm {
+ u32 r_tx_antenna:4;
+ u32 r_ant_l:4;
+ u32 r_ant_non_ht:4;
+ u32 r_ant_ht1:4;
+ u32 r_ant_ht2:4;
+ u32 r_ant_ht_s1:4;
+ u32 r_ant_non_ht_s1:4;
+ u32 ofdm_txsc:2;
+ u32 reserved:2;
+};
+
+struct r_antenna_select_cck {
+ u8 r_cckrx_enable_2:2;
+ u8 r_cckrx_enable:2;
+ u8 r_ccktx_enable:4;
+};
+
+struct efuse_contents {
+ u8 mac_addr[ETH_ALEN];
+ u8 cck_tx_power_idx[6];
+ u8 ht40_1s_tx_power_idx[6];
+ u8 ht40_2s_tx_power_idx_diff[3];
+ u8 ht20_tx_power_idx_diff[3];
+ u8 ofdm_tx_power_idx_diff[3];
+ u8 ht40_max_power_offset[3];
+ u8 ht20_max_power_offset[3];
+ u8 channel_plan;
+ u8 thermal_meter;
+ u8 rf_option[5];
+ u8 version;
+ u8 oem_id;
+ u8 regulatory;
+};
+
+struct tx_power_struct {
+ u8 cck[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 ht40_1s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 ht40_2s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 ht20_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 legacy_ht_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 legacy_ht_txpowerdiff;
+ u8 groupht20[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 groupht40[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+ u8 pwrgroup_cnt;
+ u32 mcs_original_offset[4][16];
+};
+
+extern u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask);
+extern void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data);
+extern u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask);
+extern void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask, u32 data);
+extern bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw);
+extern bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
+extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+extern void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+extern void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
+ long *powerlevel);
+extern void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+extern bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
+ long power_indbm);
+extern void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
+ u8 operation);
+extern void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+extern void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+extern void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+extern u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw);
+extern void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
+extern void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw,
+ u16 beaconinterval);
+void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
+void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
+void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
+bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+extern bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
+ u32 rfpath);
+bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
+bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
+void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
+bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
+void rtl92c_phy_set_io(struct ieee80211_hw *hw);
+void rtl92c_bb_block_on(struct ieee80211_hw *hw);
+u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset);
+u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset);
+u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
+void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset,
+ u32 data);
+void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask,
+ u32 data);
+void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset,
+ u32 data);
+void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask,
+ u32 data);
+bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
+bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
+void _rtl92c_phy_set_rf_sleep(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
new file mode 100644
index 000000000000..b0868a613841
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
@@ -0,0 +1,2130 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92C_REG_H__
+#define __RTL92C_REG_H__
+
+#define REG_SYS_ISO_CTRL 0x0000
+#define REG_SYS_FUNC_EN 0x0002
+#define REG_APS_FSMCO 0x0004
+#define REG_SYS_CLKR 0x0008
+#define REG_9346CR 0x000A
+#define REG_EE_VPD 0x000C
+#define REG_AFE_MISC 0x0010
+#define REG_SPS0_CTRL 0x0011
+#define REG_SPS_OCP_CFG 0x0018
+#define REG_RSV_CTRL 0x001C
+#define REG_RF_CTRL 0x001F
+#define REG_LDOA15_CTRL 0x0020
+#define REG_LDOV12D_CTRL 0x0021
+#define REG_LDOHCI12_CTRL 0x0022
+#define REG_LPLDO_CTRL 0x0023
+#define REG_AFE_XTAL_CTRL 0x0024
+#define REG_AFE_PLL_CTRL 0x0028
+#define REG_EFUSE_CTRL 0x0030
+#define REG_EFUSE_TEST 0x0034
+#define REG_PWR_DATA 0x0038
+#define REG_CAL_TIMER 0x003C
+#define REG_ACLK_MON 0x003E
+#define REG_GPIO_MUXCFG 0x0040
+#define REG_GPIO_IO_SEL 0x0042
+#define REG_MAC_PINMUX_CFG 0x0043
+#define REG_GPIO_PIN_CTRL 0x0044
+#define REG_GPIO_INTM 0x0048
+#define REG_LEDCFG0 0x004C
+#define REG_LEDCFG1 0x004D
+#define REG_LEDCFG2 0x004E
+#define REG_LEDCFG3 0x004F
+#define REG_FSIMR 0x0050
+#define REG_FSISR 0x0054
+#define REG_HSIMR 0x0058
+#define REG_HSISR 0x005c
+
+/* RTL8723 WIFI/BT/GPS Multi-Function GPIO Pin Control. */
+#define REG_GPIO_PIN_CTRL_2 0x0060
+/* RTL8723 WIFI/BT/GPS Multi-Function GPIO Select. */
+#define REG_GPIO_IO_SEL_2 0x0062
+/* RTL8723 WIFI/BT/GPS Multi-Function control source. */
+#define REG_MULTI_FUNC_CTRL 0x0068
+#define REG_MCUFWDL 0x0080
+
+#define REG_HMEBOX_EXT_0 0x0088
+#define REG_HMEBOX_EXT_1 0x008A
+#define REG_HMEBOX_EXT_2 0x008C
+#define REG_HMEBOX_EXT_3 0x008E
+
+#define REG_BIST_SCAN 0x00D0
+#define REG_BIST_RPT 0x00D4
+#define REG_BIST_ROM_RPT 0x00D8
+#define REG_USB_SIE_INTF 0x00E0
+#define REG_PCIE_MIO_INTF 0x00E4
+#define REG_PCIE_MIO_INTD 0x00E8
+#define REG_HPON_FSM 0x00EC
+#define REG_SYS_CFG 0x00F0
+#define REG_GPIO_OUTSTS 0x00F4 /* For RTL8723 only.*/
+
+#define REG_CR 0x0100
+#define REG_PBP 0x0104
+#define REG_TRXDMA_CTRL 0x010C
+#define REG_TRXFF_BNDY 0x0114
+#define REG_TRXFF_STATUS 0x0118
+#define REG_RXFF_PTR 0x011C
+#define REG_HIMR 0x0120
+#define REG_HISR 0x0124
+#define REG_HIMRE 0x0128
+#define REG_HISRE 0x012C
+#define REG_CPWM 0x012F
+#define REG_FWIMR 0x0130
+#define REG_FWISR 0x0134
+#define REG_PKTBUF_DBG_CTRL 0x0140
+#define REG_PKTBUF_DBG_DATA_L 0x0144
+#define REG_PKTBUF_DBG_DATA_H 0x0148
+
+#define REG_TC0_CTRL 0x0150
+#define REG_TC1_CTRL 0x0154
+#define REG_TC2_CTRL 0x0158
+#define REG_TC3_CTRL 0x015C
+#define REG_TC4_CTRL 0x0160
+#define REG_TCUNIT_BASE 0x0164
+#define REG_MBIST_START 0x0174
+#define REG_MBIST_DONE 0x0178
+#define REG_MBIST_FAIL 0x017C
+#define REG_C2HEVT_MSG_NORMAL 0x01A0
+#define REG_C2HEVT_MSG_TEST 0x01B8
+#define REG_C2HEVT_CLEAR 0x01BF
+#define REG_MCUTST_1 0x01c0
+#define REG_FMETHR 0x01C8
+#define REG_HMETFR 0x01CC
+#define REG_HMEBOX_0 0x01D0
+#define REG_HMEBOX_1 0x01D4
+#define REG_HMEBOX_2 0x01D8
+#define REG_HMEBOX_3 0x01DC
+
+#define REG_LLT_INIT 0x01E0
+#define REG_BB_ACCEESS_CTRL 0x01E8
+#define REG_BB_ACCESS_DATA 0x01EC
+
+#define REG_RQPN 0x0200
+#define REG_FIFOPAGE 0x0204
+#define REG_TDECTRL 0x0208
+#define REG_TXDMA_OFFSET_CHK 0x020C
+#define REG_TXDMA_STATUS 0x0210
+#define REG_RQPN_NPQ 0x0214
+
+#define REG_RXDMA_AGG_PG_TH 0x0280
+#define REG_RXPKT_NUM 0x0284
+#define REG_RXDMA_STATUS 0x0288
+
+#define REG_PCIE_CTRL_REG 0x0300
+#define REG_INT_MIG 0x0304
+#define REG_BCNQ_DESA 0x0308
+#define REG_HQ_DESA 0x0310
+#define REG_MGQ_DESA 0x0318
+#define REG_VOQ_DESA 0x0320
+#define REG_VIQ_DESA 0x0328
+#define REG_BEQ_DESA 0x0330
+#define REG_BKQ_DESA 0x0338
+#define REG_RX_DESA 0x0340
+#define REG_DBI 0x0348
+#define REG_MDIO 0x0354
+#define REG_DBG_SEL 0x0360
+#define REG_PCIE_HRPWM 0x0361
+#define REG_PCIE_HCPWM 0x0363
+#define REG_UART_CTRL 0x0364
+#define REG_UART_TX_DESA 0x0370
+#define REG_UART_RX_DESA 0x0378
+
+#define REG_HDAQ_DESA_NODEF 0x0000
+#define REG_CMDQ_DESA_NODEF 0x0000
+
+#define REG_VOQ_INFORMATION 0x0400
+#define REG_VIQ_INFORMATION 0x0404
+#define REG_BEQ_INFORMATION 0x0408
+#define REG_BKQ_INFORMATION 0x040C
+#define REG_MGQ_INFORMATION 0x0410
+#define REG_HGQ_INFORMATION 0x0414
+#define REG_BCNQ_INFORMATION 0x0418
+
+#define REG_CPU_MGQ_INFORMATION 0x041C
+#define REG_FWHW_TXQ_CTRL 0x0420
+#define REG_HWSEQ_CTRL 0x0423
+#define REG_TXPKTBUF_BCNQ_BDNY 0x0424
+#define REG_TXPKTBUF_MGQ_BDNY 0x0425
+#define REG_MULTI_BCNQ_EN 0x0426
+#define REG_MULTI_BCNQ_OFFSET 0x0427
+#define REG_SPEC_SIFS 0x0428
+#define REG_RL 0x042A
+#define REG_DARFRC 0x0430
+#define REG_RARFRC 0x0438
+#define REG_RRSR 0x0440
+#define REG_ARFR0 0x0444
+#define REG_ARFR1 0x0448
+#define REG_ARFR2 0x044C
+#define REG_ARFR3 0x0450
+#define REG_AGGLEN_LMT 0x0458
+#define REG_AMPDU_MIN_SPACE 0x045C
+#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D
+#define REG_FAST_EDCA_CTRL 0x0460
+#define REG_RD_RESP_PKT_TH 0x0463
+#define REG_INIRTS_RATE_SEL 0x0480
+#define REG_INIDATA_RATE_SEL 0x0484
+#define REG_POWER_STATUS 0x04A4
+#define REG_POWER_STAGE1 0x04B4
+#define REG_POWER_STAGE2 0x04B8
+#define REG_PKT_LIFE_TIME 0x04C0
+#define REG_STBC_SETTING 0x04C4
+#define REG_PROT_MODE_CTRL 0x04C8
+#define REG_BAR_MODE_CTRL 0x04CC
+#define REG_RA_TRY_RATE_AGG_LMT 0x04CF
+#define REG_NQOS_SEQ 0x04DC
+#define REG_QOS_SEQ 0x04DE
+#define REG_NEED_CPU_HANDLE 0x04E0
+#define REG_PKT_LOSE_RPT 0x04E1
+#define REG_PTCL_ERR_STATUS 0x04E2
+#define REG_DUMMY 0x04FC
+
+#define REG_EDCA_VO_PARAM 0x0500
+#define REG_EDCA_VI_PARAM 0x0504
+#define REG_EDCA_BE_PARAM 0x0508
+#define REG_EDCA_BK_PARAM 0x050C
+#define REG_BCNTCFG 0x0510
+#define REG_PIFS 0x0512
+#define REG_RDG_PIFS 0x0513
+#define REG_SIFS_CTX 0x0514
+#define REG_SIFS_TRX 0x0516
+#define REG_SIFS_CCK 0x0514
+#define REG_SIFS_OFDM 0x0516
+#define REG_AGGR_BREAK_TIME 0x051A
+#define REG_SLOT 0x051B
+#define REG_TX_PTCL_CTRL 0x0520
+#define REG_TXPAUSE 0x0522
+#define REG_DIS_TXREQ_CLR 0x0523
+#define REG_RD_CTRL 0x0524
+#define REG_TBTT_PROHIBIT 0x0540
+#define REG_RD_NAV_NXT 0x0544
+#define REG_NAV_PROT_LEN 0x0546
+#define REG_BCN_CTRL 0x0550
+#define REG_USTIME_TSF 0x0551
+#define REG_MBID_NUM 0x0552
+#define REG_DUAL_TSF_RST 0x0553
+#define REG_BCN_INTERVAL 0x0554
+#define REG_MBSSID_BCN_SPACE 0x0554
+#define REG_DRVERLYINT 0x0558
+#define REG_BCNDMATIM 0x0559
+#define REG_ATIMWND 0x055A
+#define REG_BCN_MAX_ERR 0x055D
+#define REG_RXTSF_OFFSET_CCK 0x055E
+#define REG_RXTSF_OFFSET_OFDM 0x055F
+#define REG_TSFTR 0x0560
+#define REG_INIT_TSFTR 0x0564
+#define REG_PSTIMER 0x0580
+#define REG_TIMER0 0x0584
+#define REG_TIMER1 0x0588
+#define REG_ACMHWCTRL 0x05C0
+#define REG_ACMRSTCTRL 0x05C1
+#define REG_ACMAVG 0x05C2
+#define REG_VO_ADMTIME 0x05C4
+#define REG_VI_ADMTIME 0x05C6
+#define REG_BE_ADMTIME 0x05C8
+#define REG_EDCA_RANDOM_GEN 0x05CC
+#define REG_SCH_TXCMD 0x05D0
+
+#define REG_APSD_CTRL 0x0600
+#define REG_BWOPMODE 0x0603
+#define REG_TCR 0x0604
+#define REG_RCR 0x0608
+#define REG_RX_PKT_LIMIT 0x060C
+#define REG_RX_DLK_TIME 0x060D
+#define REG_RX_DRVINFO_SZ 0x060F
+
+#define REG_MACID 0x0610
+#define REG_BSSID 0x0618
+#define REG_MAR 0x0620
+#define REG_MBIDCAMCFG 0x0628
+
+#define REG_USTIME_EDCA 0x0638
+#define REG_MAC_SPEC_SIFS 0x063A
+#define REG_RESP_SIFS_CCK 0x063C
+#define REG_RESP_SIFS_OFDM 0x063E
+/* [15:8]SIFS_R2T_OFDM, [7:0]SIFS_R2T_CCK */
+#define REG_R2T_SIFS 0x063C
+/* [15:8]SIFS_T2T_OFDM, [7:0]SIFS_T2T_CCK */
+#define REG_T2T_SIFS 0x063E
+#define REG_ACKTO 0x0640
+#define REG_CTS2TO 0x0641
+#define REG_EIFS 0x0642
+
+#define REG_NAV_CTRL 0x0650
+#define REG_BACAMCMD 0x0654
+#define REG_BACAMCONTENT 0x0658
+#define REG_LBDLY 0x0660
+#define REG_FWDLY 0x0661
+#define REG_RXERR_RPT 0x0664
+#define REG_WMAC_TRXPTCL_CTL 0x0668
+
+#define REG_CAMCMD 0x0670
+#define REG_CAMWRITE 0x0674
+#define REG_CAMREAD 0x0678
+#define REG_CAMDBG 0x067C
+#define REG_SECCFG 0x0680
+
+#define REG_WOW_CTRL 0x0690
+#define REG_PSSTATUS 0x0691
+#define REG_PS_RX_INFO 0x0692
+#define REG_LPNAV_CTRL 0x0694
+#define REG_WKFMCAM_CMD 0x0698
+#define REG_WKFMCAM_RWD 0x069C
+#define REG_RXFLTMAP0 0x06A0
+#define REG_RXFLTMAP1 0x06A2
+#define REG_RXFLTMAP2 0x06A4
+#define REG_BCN_PSR_RPT 0x06A8
+#define REG_CALB32K_CTRL 0x06AC
+#define REG_PKT_MON_CTRL 0x06B4
+#define REG_BT_COEX_TABLE 0x06C0
+#define REG_WMAC_RESP_TXINFO 0x06D8
+
+#define REG_USB_INFO 0xFE17
+#define REG_USB_SPECIAL_OPTION 0xFE55
+#define REG_USB_DMA_AGG_TO 0xFE5B
+#define REG_USB_AGG_TO 0xFE5C
+#define REG_USB_AGG_TH 0xFE5D
+
+#define REG_TEST_USB_TXQS 0xFE48
+#define REG_TEST_SIE_VID 0xFE60
+#define REG_TEST_SIE_PID 0xFE62
+#define REG_TEST_SIE_OPTIONAL 0xFE64
+#define REG_TEST_SIE_CHIRP_K 0xFE65
+#define REG_TEST_SIE_PHY 0xFE66
+#define REG_TEST_SIE_MAC_ADDR 0xFE70
+#define REG_TEST_SIE_STRING 0xFE80
+
+#define REG_NORMAL_SIE_VID 0xFE60
+#define REG_NORMAL_SIE_PID 0xFE62
+#define REG_NORMAL_SIE_OPTIONAL 0xFE64
+#define REG_NORMAL_SIE_EP 0xFE65
+#define REG_NORMAL_SIE_PHY 0xFE68
+#define REG_NORMAL_SIE_MAC_ADDR 0xFE70
+#define REG_NORMAL_SIE_STRING 0xFE80
+
+#define CR9346 REG_9346CR
+#define MSR (REG_CR + 2)
+#define ISR REG_HISR
+#define TSFR REG_TSFTR
+
+#define MACIDR0 REG_MACID
+#define MACIDR4 (REG_MACID + 4)
+
+#define PBP REG_PBP
+
+#define IDR0 MACIDR0
+#define IDR4 MACIDR4
+
+#define UNUSED_REGISTER 0x1BF
+#define DCAM UNUSED_REGISTER
+#define PSR UNUSED_REGISTER
+#define BBADDR UNUSED_REGISTER
+#define PHYDATAR UNUSED_REGISTER
+
+#define INVALID_BBRF_VALUE 0x12345678
+
+#define MAX_MSS_DENSITY_2T 0x13
+#define MAX_MSS_DENSITY_1T 0x0A
+
+#define CMDEEPROM_EN BIT(5)
+#define CMDEEPROM_SEL BIT(4)
+#define CMD9346CR_9356SEL BIT(4)
+#define AUTOLOAD_EEPROM (CMDEEPROM_EN|CMDEEPROM_SEL)
+#define AUTOLOAD_EFUSE CMDEEPROM_EN
+
+#define GPIOSEL_GPIO 0
+#define GPIOSEL_ENBT BIT(5)
+
+#define GPIO_IN REG_GPIO_PIN_CTRL
+#define GPIO_OUT (REG_GPIO_PIN_CTRL+1)
+#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL+2)
+#define GPIO_MOD (REG_GPIO_PIN_CTRL+3)
+
+#define MSR_NOLINK 0x00
+#define MSR_ADHOC 0x01
+#define MSR_INFRA 0x02
+#define MSR_AP 0x03
+
+#define RRSR_RSC_OFFSET 21
+#define RRSR_SHORT_OFFSET 23
+#define RRSR_RSC_BW_40M 0x600000
+#define RRSR_RSC_UPSUBCHNL 0x400000
+#define RRSR_RSC_LOWSUBCHNL 0x200000
+#define RRSR_SHORT 0x800000
+#define RRSR_1M BIT(0)
+#define RRSR_2M BIT(1)
+#define RRSR_5_5M BIT(2)
+#define RRSR_11M BIT(3)
+#define RRSR_6M BIT(4)
+#define RRSR_9M BIT(5)
+#define RRSR_12M BIT(6)
+#define RRSR_18M BIT(7)
+#define RRSR_24M BIT(8)
+#define RRSR_36M BIT(9)
+#define RRSR_48M BIT(10)
+#define RRSR_54M BIT(11)
+#define RRSR_MCS0 BIT(12)
+#define RRSR_MCS1 BIT(13)
+#define RRSR_MCS2 BIT(14)
+#define RRSR_MCS3 BIT(15)
+#define RRSR_MCS4 BIT(16)
+#define RRSR_MCS5 BIT(17)
+#define RRSR_MCS6 BIT(18)
+#define RRSR_MCS7 BIT(19)
+#define BRSR_ACKSHORTPMB BIT(23)
+
+#define RATR_1M 0x00000001
+#define RATR_2M 0x00000002
+#define RATR_55M 0x00000004
+#define RATR_11M 0x00000008
+#define RATR_6M 0x00000010
+#define RATR_9M 0x00000020
+#define RATR_12M 0x00000040
+#define RATR_18M 0x00000080
+#define RATR_24M 0x00000100
+#define RATR_36M 0x00000200
+#define RATR_48M 0x00000400
+#define RATR_54M 0x00000800
+#define RATR_MCS0 0x00001000
+#define RATR_MCS1 0x00002000
+#define RATR_MCS2 0x00004000
+#define RATR_MCS3 0x00008000
+#define RATR_MCS4 0x00010000
+#define RATR_MCS5 0x00020000
+#define RATR_MCS6 0x00040000
+#define RATR_MCS7 0x00080000
+#define RATR_MCS8 0x00100000
+#define RATR_MCS9 0x00200000
+#define RATR_MCS10 0x00400000
+#define RATR_MCS11 0x00800000
+#define RATR_MCS12 0x01000000
+#define RATR_MCS13 0x02000000
+#define RATR_MCS14 0x04000000
+#define RATR_MCS15 0x08000000
+
+#define RATE_1M BIT(0)
+#define RATE_2M BIT(1)
+#define RATE_5_5M BIT(2)
+#define RATE_11M BIT(3)
+#define RATE_6M BIT(4)
+#define RATE_9M BIT(5)
+#define RATE_12M BIT(6)
+#define RATE_18M BIT(7)
+#define RATE_24M BIT(8)
+#define RATE_36M BIT(9)
+#define RATE_48M BIT(10)
+#define RATE_54M BIT(11)
+#define RATE_MCS0 BIT(12)
+#define RATE_MCS1 BIT(13)
+#define RATE_MCS2 BIT(14)
+#define RATE_MCS3 BIT(15)
+#define RATE_MCS4 BIT(16)
+#define RATE_MCS5 BIT(17)
+#define RATE_MCS6 BIT(18)
+#define RATE_MCS7 BIT(19)
+#define RATE_MCS8 BIT(20)
+#define RATE_MCS9 BIT(21)
+#define RATE_MCS10 BIT(22)
+#define RATE_MCS11 BIT(23)
+#define RATE_MCS12 BIT(24)
+#define RATE_MCS13 BIT(25)
+#define RATE_MCS14 BIT(26)
+#define RATE_MCS15 BIT(27)
+
+#define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | RATR_11M)
+#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | RATR_18M \
+ | RATR_24M | RATR_36M | RATR_48M | RATR_54M)
+#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 | \
+ RATR_MCS3 | RATR_MCS4 | RATR_MCS5 | \
+ RATR_MCS6 | RATR_MCS7)
+#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 | \
+ RATR_MCS11 | RATR_MCS12 | RATR_MCS13 | \
+ RATR_MCS14 | RATR_MCS15)
+
+#define BW_OPMODE_20MHZ BIT(2)
+#define BW_OPMODE_5G BIT(1)
+#define BW_OPMODE_11J BIT(0)
+
+#define CAM_VALID BIT(15)
+#define CAM_NOTVALID 0x0000
+#define CAM_USEDK BIT(5)
+
+#define CAM_NONE 0x0
+#define CAM_WEP40 0x01
+#define CAM_TKIP 0x02
+#define CAM_AES 0x04
+#define CAM_WEP104 0x05
+
+#define TOTAL_CAM_ENTRY 32
+#define HALF_CAM_ENTRY 16
+
+#define CAM_WRITE BIT(16)
+#define CAM_READ 0x00000000
+#define CAM_POLLINIG BIT(31)
+
+#define SCR_USEDK 0x01
+#define SCR_TXSEC_ENABLE 0x02
+#define SCR_RXSEC_ENABLE 0x04
+
+#define WOW_PMEN BIT(0)
+#define WOW_WOMEN BIT(1)
+#define WOW_MAGIC BIT(2)
+#define WOW_UWF BIT(3)
+
+#define IMR8190_DISABLED 0x0
+#define IMR_BCNDMAINT6 BIT(31)
+#define IMR_BCNDMAINT5 BIT(30)
+#define IMR_BCNDMAINT4 BIT(29)
+#define IMR_BCNDMAINT3 BIT(28)
+#define IMR_BCNDMAINT2 BIT(27)
+#define IMR_BCNDMAINT1 BIT(26)
+#define IMR_BCNDOK8 BIT(25)
+#define IMR_BCNDOK7 BIT(24)
+#define IMR_BCNDOK6 BIT(23)
+#define IMR_BCNDOK5 BIT(22)
+#define IMR_BCNDOK4 BIT(21)
+#define IMR_BCNDOK3 BIT(20)
+#define IMR_BCNDOK2 BIT(19)
+#define IMR_BCNDOK1 BIT(18)
+#define IMR_TIMEOUT2 BIT(17)
+#define IMR_TIMEOUT1 BIT(16)
+#define IMR_TXFOVW BIT(15)
+#define IMR_PSTIMEOUT BIT(14)
+#define IMR_BCNINT BIT(13)
+#define IMR_RXFOVW BIT(12)
+#define IMR_RDU BIT(11)
+#define IMR_ATIMEND BIT(10)
+#define IMR_BDOK BIT(9)
+#define IMR_HIGHDOK BIT(8)
+#define IMR_TBDOK BIT(7)
+#define IMR_MGNTDOK BIT(6)
+#define IMR_TBDER BIT(5)
+#define IMR_BKDOK BIT(4)
+#define IMR_BEDOK BIT(3)
+#define IMR_VIDOK BIT(2)
+#define IMR_VODOK BIT(1)
+#define IMR_ROK BIT(0)
+
+#define IMR_TXERR BIT(11)
+#define IMR_RXERR BIT(10)
+#define IMR_C2HCMD BIT(9)
+#define IMR_CPWM BIT(8)
+#define IMR_OCPINT BIT(1)
+#define IMR_WLANOFF BIT(0)
+
+#define HWSET_MAX_SIZE 128
+
+#define EEPROM_DEFAULT_TSSI 0x0
+#define EEPROM_DEFAULT_TXPOWERDIFF 0x0
+#define EEPROM_DEFAULT_CRYSTALCAP 0x5
+#define EEPROM_DEFAULT_BOARDTYPE 0x02
+#define EEPROM_DEFAULT_TXPOWER 0x1010
+#define EEPROM_DEFAULT_HT2T_TXPWR 0x10
+
+#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
+#define EEPROM_DEFAULT_THERMALMETER 0x12
+#define EEPROM_DEFAULT_ANTTXPOWERDIFF 0x0
+#define EEPROM_DEFAULT_TXPWDIFF_CRYSTALCAP 0x5
+#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22
+#define EEPROM_DEFAULT_HT40_2SDIFF 0x0
+#define EEPROM_DEFAULT_HT20_DIFF 2
+#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
+#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET 0
+#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET 0
+
+#define RF_OPTION1 0x79
+#define RF_OPTION2 0x7A
+#define RF_OPTION3 0x7B
+#define RF_OPTION4 0x7C
+
+#define EEPROM_DEFAULT_PID 0x1234
+#define EEPROM_DEFAULT_VID 0x5678
+#define EEPROM_DEFAULT_CUSTOMERID 0xAB
+#define EEPROM_DEFAULT_SUBCUSTOMERID 0xCD
+#define EEPROM_DEFAULT_VERSION 0
+
+#define EEPROM_CHANNEL_PLAN_FCC 0x0
+#define EEPROM_CHANNEL_PLAN_IC 0x1
+#define EEPROM_CHANNEL_PLAN_ETSI 0x2
+#define EEPROM_CHANNEL_PLAN_SPAIN 0x3
+#define EEPROM_CHANNEL_PLAN_FRANCE 0x4
+#define EEPROM_CHANNEL_PLAN_MKK 0x5
+#define EEPROM_CHANNEL_PLAN_MKK1 0x6
+#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7
+#define EEPROM_CHANNEL_PLAN_TELEC 0x8
+#define EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN 0x9
+#define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA
+#define EEPROM_CHANNEL_PLAN_NCC 0xB
+#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
+
+#define EEPROM_CID_DEFAULT 0x0
+#define EEPROM_CID_TOSHIBA 0x4
+#define EEPROM_CID_CCX 0x10
+#define EEPROM_CID_QMI 0x0D
+#define EEPROM_CID_WHQL 0xFE
+
+#define RTL8192_EEPROM_ID 0x8129
+
+#define RTL8190_EEPROM_ID 0x8129
+#define EEPROM_HPON 0x02
+#define EEPROM_CLK 0x06
+#define EEPROM_TESTR 0x08
+
+#define EEPROM_VID 0x0A
+#define EEPROM_DID 0x0C
+#define EEPROM_SVID 0x0E
+#define EEPROM_SMID 0x10
+
+#define EEPROM_MAC_ADDR 0x16
+
+#define EEPROM_CCK_TX_PWR_INX 0x5A
+#define EEPROM_HT40_1S_TX_PWR_INX 0x60
+#define EEPROM_HT40_2S_TX_PWR_INX_DIFF 0x66
+#define EEPROM_HT20_TX_PWR_INX_DIFF 0x69
+#define EEPROM_OFDM_TX_PWR_INX_DIFF 0x6C
+#define EEPROM_HT40_MAX_PWR_OFFSET 0x6F
+#define EEPROM_HT20_MAX_PWR_OFFSET 0x72
+
+#define EEPROM_TSSI_A 0x76
+#define EEPROM_TSSI_B 0x77
+#define EEPROM_THERMAL_METER 0x78
+#define EEPROM_XTAL_K 0x78
+#define EEPROM_RF_OPT1 0x79
+#define EEPROM_RF_OPT2 0x7A
+#define EEPROM_RF_OPT3 0x7B
+#define EEPROM_RF_OPT4 0x7C
+#define EEPROM_CHANNEL_PLAN 0x7D
+#define EEPROM_VERSION 0x7E
+#define EEPROM_CUSTOMER_ID 0x7F
+
+#define EEPROM_PWRDIFF 0x54
+
+#define EEPROM_TXPOWERCCK 0x5A
+#define EEPROM_TXPOWERHT40_1S 0x60
+#define EEPROM_TXPOWERHT40_2SDIFF 0x66
+#define EEPROM_TXPOWERHT20DIFF 0x69
+#define EEPROM_TXPOWER_OFDMDIFF 0x6C
+
+#define EEPROM_TXPWR_GROUP 0x6F
+
+#define EEPROM_TSSI_A 0x76
+#define EEPROM_TSSI_B 0x77
+#define EEPROM_THERMAL_METER 0x78
+
+#define EEPROM_CHANNELPLAN 0x75
+
+#define RF_OPTION1 0x79
+#define RF_OPTION2 0x7A
+#define RF_OPTION3 0x7B
+#define RF_OPTION4 0x7C
+
+#define STOPBECON BIT(6)
+#define STOPHIGHT BIT(5)
+#define STOPMGT BIT(4)
+#define STOPVO BIT(3)
+#define STOPVI BIT(2)
+#define STOPBE BIT(1)
+#define STOPBK BIT(0)
+
+#define RCR_APP_FCS BIT(31)
+#define RCR_APP_MIC BIT(30)
+#define RCR_APP_ICV BIT(29)
+#define RCR_APP_PHYSTS BIT(28)
+#define RCR_APP_PHYST_RXFF BIT(28)
+#define RCR_APP_BA_SSN BIT(27)
+#define RCR_ENMBID BIT(24)
+#define RCR_LSIGEN BIT(23)
+#define RCR_MFBEN BIT(22)
+#define RCR_HTC_LOC_CTRL BIT(14)
+#define RCR_AMF BIT(13)
+#define RCR_ACF BIT(12)
+#define RCR_ADF BIT(11)
+#define RCR_AICV BIT(9)
+#define RCR_ACRC32 BIT(8)
+#define RCR_CBSSID_BCN BIT(7)
+#define RCR_CBSSID_DATA BIT(6)
+#define RCR_CBSSID RCR_CBSSID_DATA
+#define RCR_APWRMGT BIT(5)
+#define RCR_ADD3 BIT(4)
+#define RCR_AB BIT(3)
+#define RCR_AM BIT(2)
+#define RCR_APM BIT(1)
+#define RCR_AAP BIT(0)
+#define RCR_MXDMA_OFFSET 8
+#define RCR_FIFO_OFFSET 13
+
+#define RSV_CTRL 0x001C
+#define RD_CTRL 0x0524
+
+#define REG_USB_INFO 0xFE17
+#define REG_USB_SPECIAL_OPTION 0xFE55
+#define REG_USB_DMA_AGG_TO 0xFE5B
+#define REG_USB_AGG_TO 0xFE5C
+#define REG_USB_AGG_TH 0xFE5D
+
+#define REG_USB_VID 0xFE60
+#define REG_USB_PID 0xFE62
+#define REG_USB_OPTIONAL 0xFE64
+#define REG_USB_CHIRP_K 0xFE65
+#define REG_USB_PHY 0xFE66
+#define REG_USB_MAC_ADDR 0xFE70
+#define REG_USB_HRPWM 0xFE58
+#define REG_USB_HCPWM 0xFE57
+
+#define SW18_FPWM BIT(3)
+
+#define ISO_MD2PP BIT(0)
+#define ISO_UA2USB BIT(1)
+#define ISO_UD2CORE BIT(2)
+#define ISO_PA2PCIE BIT(3)
+#define ISO_PD2CORE BIT(4)
+#define ISO_IP2MAC BIT(5)
+#define ISO_DIOP BIT(6)
+#define ISO_DIOE BIT(7)
+#define ISO_EB2CORE BIT(8)
+#define ISO_DIOR BIT(9)
+
+#define PWC_EV25V BIT(14)
+#define PWC_EV12V BIT(15)
+
+#define FEN_BBRSTB BIT(0)
+#define FEN_BB_GLB_RSTn BIT(1)
+#define FEN_USBA BIT(2)
+#define FEN_UPLL BIT(3)
+#define FEN_USBD BIT(4)
+#define FEN_DIO_PCIE BIT(5)
+#define FEN_PCIEA BIT(6)
+#define FEN_PPLL BIT(7)
+#define FEN_PCIED BIT(8)
+#define FEN_DIOE BIT(9)
+#define FEN_CPUEN BIT(10)
+#define FEN_DCORE BIT(11)
+#define FEN_ELDR BIT(12)
+#define FEN_DIO_RF BIT(13)
+#define FEN_HWPDN BIT(14)
+#define FEN_MREGEN BIT(15)
+
+#define PFM_LDALL BIT(0)
+#define PFM_ALDN BIT(1)
+#define PFM_LDKP BIT(2)
+#define PFM_WOWL BIT(3)
+#define EnPDN BIT(4)
+#define PDN_PL BIT(5)
+#define APFM_ONMAC BIT(8)
+#define APFM_OFF BIT(9)
+#define APFM_RSM BIT(10)
+#define AFSM_HSUS BIT(11)
+#define AFSM_PCIE BIT(12)
+#define APDM_MAC BIT(13)
+#define APDM_HOST BIT(14)
+#define APDM_HPDN BIT(15)
+#define RDY_MACON BIT(16)
+#define SUS_HOST BIT(17)
+#define ROP_ALD BIT(20)
+#define ROP_PWR BIT(21)
+#define ROP_SPS BIT(22)
+#define SOP_MRST BIT(25)
+#define SOP_FUSE BIT(26)
+#define SOP_ABG BIT(27)
+#define SOP_AMB BIT(28)
+#define SOP_RCK BIT(29)
+#define SOP_A8M BIT(30)
+#define XOP_BTCK BIT(31)
+
+#define ANAD16V_EN BIT(0)
+#define ANA8M BIT(1)
+#define MACSLP BIT(4)
+#define LOADER_CLK_EN BIT(5)
+#define _80M_SSC_DIS BIT(7)
+#define _80M_SSC_EN_HO BIT(8)
+#define PHY_SSC_RSTB BIT(9)
+#define SEC_CLK_EN BIT(10)
+#define MAC_CLK_EN BIT(11)
+#define SYS_CLK_EN BIT(12)
+#define RING_CLK_EN BIT(13)
+
+#define BOOT_FROM_EEPROM BIT(4)
+#define EEPROM_EN BIT(5)
+#define EEPROMSEL BOOT_FROM_EEPROM
+
+#define AFE_BGEN BIT(0)
+#define AFE_MBEN BIT(1)
+#define MAC_ID_EN BIT(7)
+
+#define WLOCK_ALL BIT(0)
+#define WLOCK_00 BIT(1)
+#define WLOCK_04 BIT(2)
+#define WLOCK_08 BIT(3)
+#define WLOCK_40 BIT(4)
+#define R_DIS_PRST_0 BIT(5)
+#define R_DIS_PRST_1 BIT(6)
+#define LOCK_ALL_EN BIT(7)
+
+#define RF_EN BIT(0)
+#define RF_RSTB BIT(1)
+#define RF_SDMRSTB BIT(2)
+
+#define LDA15_EN BIT(0)
+#define LDA15_STBY BIT(1)
+#define LDA15_OBUF BIT(2)
+#define LDA15_REG_VOS BIT(3)
+#define _LDA15_VOADJ(x) (((x) & 0x7) << 4)
+
+#define LDV12_EN BIT(0)
+#define LDV12_SDBY BIT(1)
+#define LPLDO_HSM BIT(2)
+#define LPLDO_LSM_DIS BIT(3)
+#define _LDV12_VADJ(x) (((x) & 0xF) << 4)
+
+#define XTAL_EN BIT(0)
+#define XTAL_BSEL BIT(1)
+#define _XTAL_BOSC(x) (((x) & 0x3) << 2)
+#define _XTAL_CADJ(x) (((x) & 0xF) << 4)
+#define XTAL_GATE_USB BIT(8)
+#define _XTAL_USB_DRV(x) (((x) & 0x3) << 9)
+#define XTAL_GATE_AFE BIT(11)
+#define _XTAL_AFE_DRV(x) (((x) & 0x3) << 12)
+#define XTAL_RF_GATE BIT(14)
+#define _XTAL_RF_DRV(x) (((x) & 0x3) << 15)
+#define XTAL_GATE_DIG BIT(17)
+#define _XTAL_DIG_DRV(x) (((x) & 0x3) << 18)
+#define XTAL_BT_GATE BIT(20)
+#define _XTAL_BT_DRV(x) (((x) & 0x3) << 21)
+#define _XTAL_GPIO(x) (((x) & 0x7) << 23)
+
+#define CKDLY_AFE BIT(26)
+#define CKDLY_USB BIT(27)
+#define CKDLY_DIG BIT(28)
+#define CKDLY_BT BIT(29)
+
+#define APLL_EN BIT(0)
+#define APLL_320_EN BIT(1)
+#define APLL_FREF_SEL BIT(2)
+#define APLL_EDGE_SEL BIT(3)
+#define APLL_WDOGB BIT(4)
+#define APLL_LPFEN BIT(5)
+
+#define APLL_REF_CLK_13MHZ 0x1
+#define APLL_REF_CLK_19_2MHZ 0x2
+#define APLL_REF_CLK_20MHZ 0x3
+#define APLL_REF_CLK_25MHZ 0x4
+#define APLL_REF_CLK_26MHZ 0x5
+#define APLL_REF_CLK_38_4MHZ 0x6
+#define APLL_REF_CLK_40MHZ 0x7
+
+#define APLL_320EN BIT(14)
+#define APLL_80EN BIT(15)
+#define APLL_1MEN BIT(24)
+
+#define ALD_EN BIT(18)
+#define EF_PD BIT(19)
+#define EF_FLAG BIT(31)
+
+#define EF_TRPT BIT(7)
+#define LDOE25_EN BIT(31)
+
+#define RSM_EN BIT(0)
+#define Timer_EN BIT(4)
+
+#define TRSW0EN BIT(2)
+#define TRSW1EN BIT(3)
+#define EROM_EN BIT(4)
+#define EnBT BIT(5)
+#define EnUart BIT(8)
+#define Uart_910 BIT(9)
+#define EnPMAC BIT(10)
+#define SIC_SWRST BIT(11)
+#define EnSIC BIT(12)
+#define SIC_23 BIT(13)
+#define EnHDP BIT(14)
+#define SIC_LBK BIT(15)
+
+#define LED0PL BIT(4)
+#define LED1PL BIT(12)
+#define LED0DIS BIT(7)
+
+#define MCUFWDL_EN BIT(0)
+#define MCUFWDL_RDY BIT(1)
+#define FWDL_ChkSum_rpt BIT(2)
+#define MACINI_RDY BIT(3)
+#define BBINI_RDY BIT(4)
+#define RFINI_RDY BIT(5)
+#define WINTINI_RDY BIT(6)
+#define CPRST BIT(23)
+
+#define XCLK_VLD BIT(0)
+#define ACLK_VLD BIT(1)
+#define UCLK_VLD BIT(2)
+#define PCLK_VLD BIT(3)
+#define PCIRSTB BIT(4)
+#define V15_VLD BIT(5)
+#define TRP_B15V_EN BIT(7)
+#define SIC_IDLE BIT(8)
+#define BD_MAC2 BIT(9)
+#define BD_MAC1 BIT(10)
+#define IC_MACPHY_MODE BIT(11)
+#define BT_FUNC BIT(16)
+#define VENDOR_ID BIT(19)
+#define PAD_HWPD_IDN BIT(22)
+#define TRP_VAUX_EN BIT(23)
+#define TRP_BT_EN BIT(24)
+#define BD_PKG_SEL BIT(25)
+#define BD_HCI_SEL BIT(26)
+#define TYPE_ID BIT(27)
+
+/* REG_GPIO_OUTSTS (For RTL8723 only) */
+#define EFS_HCI_SEL (BIT(0)|BIT(1))
+#define PAD_HCI_SEL (BIT(2)|BIT(3))
+#define HCI_SEL (BIT(4)|BIT(5))
+#define PKG_SEL_HCI BIT(6)
+#define FEN_GPS BIT(7)
+#define FEN_BT BIT(8)
+#define FEN_WL BIT(9)
+#define FEN_PCI BIT(10)
+#define FEN_USB BIT(11)
+#define BTRF_HWPDN_N BIT(12)
+#define WLRF_HWPDN_N BIT(13)
+#define PDN_BT_N BIT(14)
+#define PDN_GPS_N BIT(15)
+#define BT_CTL_HWPDN BIT(16)
+#define GPS_CTL_HWPDN BIT(17)
+#define PPHY_SUSB BIT(20)
+#define UPHY_SUSB BIT(21)
+#define PCI_SUSEN BIT(22)
+#define USB_SUSEN BIT(23)
+#define RF_RL_ID (BIT(31) | BIT(30) | BIT(29) | BIT(28))
+
+#define CHIP_VER_RTL_MASK 0xF000
+#define CHIP_VER_RTL_SHIFT 12
+
+#define REG_LBMODE (REG_CR + 3)
+
+#define HCI_TXDMA_EN BIT(0)
+#define HCI_RXDMA_EN BIT(1)
+#define TXDMA_EN BIT(2)
+#define RXDMA_EN BIT(3)
+#define PROTOCOL_EN BIT(4)
+#define SCHEDULE_EN BIT(5)
+#define MACTXEN BIT(6)
+#define MACRXEN BIT(7)
+#define ENSWBCN BIT(8)
+#define ENSEC BIT(9)
+
+#define _NETTYPE(x) (((x) & 0x3) << 16)
+#define MASK_NETTYPE 0x30000
+#define NT_NO_LINK 0x0
+#define NT_LINK_AD_HOC 0x1
+#define NT_LINK_AP 0x2
+#define NT_AS_AP 0x3
+
+#define _LBMODE(x) (((x) & 0xF) << 24)
+#define MASK_LBMODE 0xF000000
+#define LOOPBACK_NORMAL 0x0
+#define LOOPBACK_IMMEDIATELY 0xB
+#define LOOPBACK_MAC_DELAY 0x3
+#define LOOPBACK_PHY 0x1
+#define LOOPBACK_DMA 0x7
+
+#define GET_RX_PAGE_SIZE(value) ((value) & 0xF)
+#define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4)
+#define _PSRX_MASK 0xF
+#define _PSTX_MASK 0xF0
+#define _PSRX(x) (x)
+#define _PSTX(x) ((x) << 4)
+
+#define PBP_64 0x0
+#define PBP_128 0x1
+#define PBP_256 0x2
+#define PBP_512 0x3
+#define PBP_1024 0x4
+
+#define RXDMA_ARBBW_EN BIT(0)
+#define RXSHFT_EN BIT(1)
+#define RXDMA_AGG_EN BIT(2)
+#define QS_VO_QUEUE BIT(8)
+#define QS_VI_QUEUE BIT(9)
+#define QS_BE_QUEUE BIT(10)
+#define QS_BK_QUEUE BIT(11)
+#define QS_MANAGER_QUEUE BIT(12)
+#define QS_HIGH_QUEUE BIT(13)
+
+#define HQSEL_VOQ BIT(0)
+#define HQSEL_VIQ BIT(1)
+#define HQSEL_BEQ BIT(2)
+#define HQSEL_BKQ BIT(3)
+#define HQSEL_MGTQ BIT(4)
+#define HQSEL_HIQ BIT(5)
+
+#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14)
+#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12)
+#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10)
+#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8)
+#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6)
+#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4)
+
+#define QUEUE_LOW 1
+#define QUEUE_NORMAL 2
+#define QUEUE_HIGH 3
+
+#define _LLT_NO_ACTIVE 0x0
+#define _LLT_WRITE_ACCESS 0x1
+#define _LLT_READ_ACCESS 0x2
+
+#define _LLT_INIT_DATA(x) ((x) & 0xFF)
+#define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8)
+#define _LLT_OP(x) (((x) & 0x3) << 30)
+#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3)
+
+#define BB_WRITE_READ_MASK (BIT(31) | BIT(30))
+#define BB_WRITE_EN BIT(30)
+#define BB_READ_EN BIT(31)
+
+#define _HPQ(x) ((x) & 0xFF)
+#define _LPQ(x) (((x) & 0xFF) << 8)
+#define _PUBQ(x) (((x) & 0xFF) << 16)
+#define _NPQ(x) ((x) & 0xFF)
+
+#define HPQ_PUBLIC_DIS BIT(24)
+#define LPQ_PUBLIC_DIS BIT(25)
+#define LD_RQPN BIT(31)
+
+#define BCN_VALID BIT(16)
+#define BCN_HEAD(x) (((x) & 0xFF) << 8)
+#define BCN_HEAD_MASK 0xFF00
+
+#define BLK_DESC_NUM_SHIFT 4
+#define BLK_DESC_NUM_MASK 0xF
+
+#define DROP_DATA_EN BIT(9)
+
+#define EN_AMPDU_RTY_NEW BIT(7)
+
+#define _INIRTSMCS_SEL(x) ((x) & 0x3F)
+
+#define _SPEC_SIFS_CCK(x) ((x) & 0xFF)
+#define _SPEC_SIFS_OFDM(x) (((x) & 0xFF) << 8)
+
+#define RATE_REG_BITMAP_ALL 0xFFFFF
+
+#define _RRSC_BITMAP(x) ((x) & 0xFFFFF)
+
+#define _RRSR_RSC(x) (((x) & 0x3) << 21)
+#define RRSR_RSC_RESERVED 0x0
+#define RRSR_RSC_UPPER_SUBCHANNEL 0x1
+#define RRSR_RSC_LOWER_SUBCHANNEL 0x2
+#define RRSR_RSC_DUPLICATE_MODE 0x3
+
+#define USE_SHORT_G1 BIT(20)
+
+#define _AGGLMT_MCS0(x) ((x) & 0xF)
+#define _AGGLMT_MCS1(x) (((x) & 0xF) << 4)
+#define _AGGLMT_MCS2(x) (((x) & 0xF) << 8)
+#define _AGGLMT_MCS3(x) (((x) & 0xF) << 12)
+#define _AGGLMT_MCS4(x) (((x) & 0xF) << 16)
+#define _AGGLMT_MCS5(x) (((x) & 0xF) << 20)
+#define _AGGLMT_MCS6(x) (((x) & 0xF) << 24)
+#define _AGGLMT_MCS7(x) (((x) & 0xF) << 28)
+
+#define RETRY_LIMIT_SHORT_SHIFT 8
+#define RETRY_LIMIT_LONG_SHIFT 0
+
+#define _DARF_RC1(x) ((x) & 0x1F)
+#define _DARF_RC2(x) (((x) & 0x1F) << 8)
+#define _DARF_RC3(x) (((x) & 0x1F) << 16)
+#define _DARF_RC4(x) (((x) & 0x1F) << 24)
+#define _DARF_RC5(x) ((x) & 0x1F)
+#define _DARF_RC6(x) (((x) & 0x1F) << 8)
+#define _DARF_RC7(x) (((x) & 0x1F) << 16)
+#define _DARF_RC8(x) (((x) & 0x1F) << 24)
+
+#define _RARF_RC1(x) ((x) & 0x1F)
+#define _RARF_RC2(x) (((x) & 0x1F) << 8)
+#define _RARF_RC3(x) (((x) & 0x1F) << 16)
+#define _RARF_RC4(x) (((x) & 0x1F) << 24)
+#define _RARF_RC5(x) ((x) & 0x1F)
+#define _RARF_RC6(x) (((x) & 0x1F) << 8)
+#define _RARF_RC7(x) (((x) & 0x1F) << 16)
+#define _RARF_RC8(x) (((x) & 0x1F) << 24)
+
+#define AC_PARAM_TXOP_OFFSET 16
+#define AC_PARAM_ECW_MAX_OFFSET 12
+#define AC_PARAM_ECW_MIN_OFFSET 8
+#define AC_PARAM_AIFS_OFFSET 0
+
+#define _AIFS(x) (x)
+#define _ECW_MAX_MIN(x) ((x) << 8)
+#define _TXOP_LIMIT(x) ((x) << 16)
+
+#define _BCNIFS(x) ((x) & 0xFF)
+#define _BCNECW(x) ((((x) & 0xF)) << 8)
+
+#define _LRL(x) ((x) & 0x3F)
+#define _SRL(x) (((x) & 0x3F) << 8)
+
+#define _SIFS_CCK_CTX(x) ((x) & 0xFF)
+#define _SIFS_CCK_TRX(x) (((x) & 0xFF) << 8);
+
+#define _SIFS_OFDM_CTX(x) ((x) & 0xFF)
+#define _SIFS_OFDM_TRX(x) (((x) & 0xFF) << 8);
+
+#define _TBTT_PROHIBIT_HOLD(x) (((x) & 0xFF) << 8)
+
+#define DIS_EDCA_CNT_DWN BIT(11)
+
+#define EN_MBSSID BIT(1)
+#define EN_TXBCN_RPT BIT(2)
+#define EN_BCN_FUNCTION BIT(3)
+
+#define TSFTR_RST BIT(0)
+#define TSFTR1_RST BIT(1)
+
+#define STOP_BCNQ BIT(6)
+
+#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4)
+#define DIS_TSF_UDT0_TEST_CHIP BIT(5)
+
+#define AcmHw_HwEn BIT(0)
+#define AcmHw_BeqEn BIT(1)
+#define AcmHw_ViqEn BIT(2)
+#define AcmHw_VoqEn BIT(3)
+#define AcmHw_BeqStatus BIT(4)
+#define AcmHw_ViqStatus BIT(5)
+#define AcmHw_VoqStatus BIT(6)
+
+#define APSDOFF BIT(6)
+#define APSDOFF_STATUS BIT(7)
+
+#define BW_20MHZ BIT(2)
+
+#define RATE_BITMAP_ALL 0xFFFFF
+
+#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1
+
+#define TSFRST BIT(0)
+#define DIS_GCLK BIT(1)
+#define PAD_SEL BIT(2)
+#define PWR_ST BIT(6)
+#define PWRBIT_OW_EN BIT(7)
+#define ACRC BIT(8)
+#define CFENDFORM BIT(9)
+#define ICV BIT(10)
+
+#define AAP BIT(0)
+#define APM BIT(1)
+#define AM BIT(2)
+#define AB BIT(3)
+#define ADD3 BIT(4)
+#define APWRMGT BIT(5)
+#define CBSSID BIT(6)
+#define CBSSID_DATA BIT(6)
+#define CBSSID_BCN BIT(7)
+#define ACRC32 BIT(8)
+#define AICV BIT(9)
+#define ADF BIT(11)
+#define ACF BIT(12)
+#define AMF BIT(13)
+#define HTC_LOC_CTRL BIT(14)
+#define UC_DATA_EN BIT(16)
+#define BM_DATA_EN BIT(17)
+#define MFBEN BIT(22)
+#define LSIGEN BIT(23)
+#define EnMBID BIT(24)
+#define APP_BASSN BIT(27)
+#define APP_PHYSTS BIT(28)
+#define APP_ICV BIT(29)
+#define APP_MIC BIT(30)
+#define APP_FCS BIT(31)
+
+#define _MIN_SPACE(x) ((x) & 0x7)
+#define _SHORT_GI_PADDING(x) (((x) & 0x1F) << 3)
+
+#define RXERR_TYPE_OFDM_PPDU 0
+#define RXERR_TYPE_OFDM_FALSE_ALARM 1
+#define RXERR_TYPE_OFDM_MPDU_OK 2
+#define RXERR_TYPE_OFDM_MPDU_FAIL 3
+#define RXERR_TYPE_CCK_PPDU 4
+#define RXERR_TYPE_CCK_FALSE_ALARM 5
+#define RXERR_TYPE_CCK_MPDU_OK 6
+#define RXERR_TYPE_CCK_MPDU_FAIL 7
+#define RXERR_TYPE_HT_PPDU 8
+#define RXERR_TYPE_HT_FALSE_ALARM 9
+#define RXERR_TYPE_HT_MPDU_TOTAL 10
+#define RXERR_TYPE_HT_MPDU_OK 11
+#define RXERR_TYPE_HT_MPDU_FAIL 12
+#define RXERR_TYPE_RX_FULL_DROP 15
+
+#define RXERR_COUNTER_MASK 0xFFFFF
+#define RXERR_RPT_RST BIT(27)
+#define _RXERR_RPT_SEL(type) ((type) << 28)
+
+#define SCR_TxUseDK BIT(0)
+#define SCR_RxUseDK BIT(1)
+#define SCR_TxEncEnable BIT(2)
+#define SCR_RxDecEnable BIT(3)
+#define SCR_SKByA2 BIT(4)
+#define SCR_NoSKMC BIT(5)
+#define SCR_TXBCUSEDK BIT(6)
+#define SCR_RXBCUSEDK BIT(7)
+
+#define USB_IS_HIGH_SPEED 0
+#define USB_IS_FULL_SPEED 1
+#define USB_SPEED_MASK BIT(5)
+
+#define USB_NORMAL_SIE_EP_MASK 0xF
+#define USB_NORMAL_SIE_EP_SHIFT 4
+
+#define USB_TEST_EP_MASK 0x30
+#define USB_TEST_EP_SHIFT 4
+
+#define USB_AGG_EN BIT(3)
+
+#define MAC_ADDR_LEN 6
+#define LAST_ENTRY_OF_TX_PKT_BUFFER 255
+
+#define POLLING_LLT_THRESHOLD 20
+#define POLLING_READY_TIMEOUT_COUNT 1000
+
+#define MAX_MSS_DENSITY_2T 0x13
+#define MAX_MSS_DENSITY_1T 0x0A
+
+#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6))
+#define EPROM_CMD_CONFIG 0x3
+#define EPROM_CMD_LOAD 1
+
+#define HWSET_MAX_SIZE_92S HWSET_MAX_SIZE
+
+#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2)
+
+/* REG_MULTI_FUNC_CTRL(For RTL8723 Only) */
+/* Enable GPIO[9] as WiFi HW PDn source */
+#define WL_HWPDN_EN BIT(0)
+/* WiFi HW PDn polarity control */
+#define WL_HWPDN_SL BIT(1)
+/* WiFi function enable */
+#define WL_FUNC_EN BIT(2)
+/* Enable GPIO[9] as WiFi RF HW PDn source */
+#define WL_HWROF_EN BIT(3)
+/* Enable GPIO[11] as BT HW PDn source */
+#define BT_HWPDN_EN BIT(16)
+/* BT HW PDn polarity control */
+#define BT_HWPDN_SL BIT(17)
+/* BT function enable */
+#define BT_FUNC_EN BIT(18)
+/* Enable GPIO[11] as BT/GPS RF HW PDn source */
+#define BT_HWROF_EN BIT(19)
+/* Enable GPIO[10] as GPS HW PDn source */
+#define GPS_HWPDN_EN BIT(20)
+/* GPS HW PDn polarity control */
+#define GPS_HWPDN_SL BIT(21)
+/* GPS function enable */
+#define GPS_FUNC_EN BIT(22)
+
+#define RPMAC_RESET 0x100
+#define RPMAC_TXSTART 0x104
+#define RPMAC_TXLEGACYSIG 0x108
+#define RPMAC_TXHTSIG1 0x10c
+#define RPMAC_TXHTSIG2 0x110
+#define RPMAC_PHYDEBUG 0x114
+#define RPMAC_TXPACKETNUM 0x118
+#define RPMAC_TXIDLE 0x11c
+#define RPMAC_TXMACHEADER0 0x120
+#define RPMAC_TXMACHEADER1 0x124
+#define RPMAC_TXMACHEADER2 0x128
+#define RPMAC_TXMACHEADER3 0x12c
+#define RPMAC_TXMACHEADER4 0x130
+#define RPMAC_TXMACHEADER5 0x134
+#define RPMAC_TXDADATYPE 0x138
+#define RPMAC_TXRANDOMSEED 0x13c
+#define RPMAC_CCKPLCPPREAMBLE 0x140
+#define RPMAC_CCKPLCPHEADER 0x144
+#define RPMAC_CCKCRC16 0x148
+#define RPMAC_OFDMRXCRC32OK 0x170
+#define RPMAC_OFDMRXCRC32Er 0x174
+#define RPMAC_OFDMRXPARITYER 0x178
+#define RPMAC_OFDMRXCRC8ER 0x17c
+#define RPMAC_CCKCRXRC16ER 0x180
+#define RPMAC_CCKCRXRC32ER 0x184
+#define RPMAC_CCKCRXRC32OK 0x188
+#define RPMAC_TXSTATUS 0x18c
+
+#define RFPGA0_RFMOD 0x800
+
+#define RFPGA0_TXINFO 0x804
+#define RFPGA0_PSDFUNCTION 0x808
+
+#define RFPGA0_TXGAINSTAGE 0x80c
+
+#define RFPGA0_RFTIMING1 0x810
+#define RFPGA0_RFTIMING2 0x814
+
+#define RFPGA0_XA_HSSIPARAMETER1 0x820
+#define RFPGA0_XA_HSSIPARAMETER2 0x824
+#define RFPGA0_XB_HSSIPARAMETER1 0x828
+#define RFPGA0_XB_HSSIPARAMETER2 0x82c
+
+#define RFPGA0_XA_LSSIPARAMETER 0x840
+#define RFPGA0_XB_LSSIPARAMETER 0x844
+
+#define RFPGA0_RFWAKEUPPARAMETER 0x850
+#define RFPGA0_RFSLEEPUPPARAMETER 0x854
+
+#define RFPGA0_XAB_SWITCHCONTROL 0x858
+#define RFPGA0_XCD_SWITCHCONTROL 0x85c
+
+#define RFPGA0_XA_RFINTERFACEOE 0x860
+#define RFPGA0_XB_RFINTERFACEOE 0x864
+
+#define RFPGA0_XAB_RFINTERFACESW 0x870
+#define RFPGA0_XCD_RFINTERFACESW 0x874
+
+#define rFPGA0_XAB_RFPARAMETER 0x878
+#define rFPGA0_XCD_RFPARAMETER 0x87c
+
+#define RFPGA0_ANALOGPARAMETER1 0x880
+#define RFPGA0_ANALOGPARAMETER2 0x884
+#define RFPGA0_ANALOGPARAMETER3 0x888
+#define RFPGA0_ANALOGPARAMETER4 0x88c
+
+#define RFPGA0_XA_LSSIREADBACK 0x8a0
+#define RFPGA0_XB_LSSIREADBACK 0x8a4
+#define RFPGA0_XC_LSSIREADBACK 0x8a8
+#define RFPGA0_XD_LSSIREADBACK 0x8ac
+
+#define RFPGA0_PSDREPORT 0x8b4
+#define TRANSCEIVEA_HSPI_READBACK 0x8b8
+#define TRANSCEIVEB_HSPI_READBACK 0x8bc
+#define RFPGA0_XAB_RFINTERFACERB 0x8e0
+#define RFPGA0_XCD_RFINTERFACERB 0x8e4
+
+#define RFPGA1_RFMOD 0x900
+
+#define RFPGA1_TXBLOCK 0x904
+#define RFPGA1_DEBUGSELECT 0x908
+#define RFPGA1_TXINFO 0x90c
+
+#define RCCK0_SYSTEM 0xa00
+
+#define RCCK0_AFESETTING 0xa04
+#define RCCK0_CCA 0xa08
+
+#define RCCK0_RXAGC1 0xa0c
+#define RCCK0_RXAGC2 0xa10
+
+#define RCCK0_RXHP 0xa14
+
+#define RCCK0_DSPPARAMETER1 0xa18
+#define RCCK0_DSPPARAMETER2 0xa1c
+
+#define RCCK0_TXFILTER1 0xa20
+#define RCCK0_TXFILTER2 0xa24
+#define RCCK0_DEBUGPORT 0xa28
+#define RCCK0_FALSEALARMREPORT 0xa2c
+#define RCCK0_TRSSIREPORT 0xa50
+#define RCCK0_RXREPORT 0xa54
+#define RCCK0_FACOUNTERLOWER 0xa5c
+#define RCCK0_FACOUNTERUPPER 0xa58
+
+#define ROFDM0_LSTF 0xc00
+
+#define ROFDM0_TRXPATHENABLE 0xc04
+#define ROFDM0_TRMUXPAR 0xc08
+#define ROFDM0_TRSWISOLATION 0xc0c
+
+#define ROFDM0_XARXAFE 0xc10
+#define ROFDM0_XARXIQIMBALANCE 0xc14
+#define ROFDM0_XBRXAFE 0xc18
+#define ROFDM0_XBRXIQIMBALANCE 0xc1c
+#define ROFDM0_XCRXAFE 0xc20
+#define ROFDM0_XCRXIQIMBANLANCE 0xc24
+#define ROFDM0_XDRXAFE 0xc28
+#define ROFDM0_XDRXIQIMBALANCE 0xc2c
+
+#define ROFDM0_RXDETECTOR1 0xc30
+#define ROFDM0_RXDETECTOR2 0xc34
+#define ROFDM0_RXDETECTOR3 0xc38
+#define ROFDM0_RXDETECTOR4 0xc3c
+
+#define ROFDM0_RXDSP 0xc40
+#define ROFDM0_CFOANDDAGC 0xc44
+#define ROFDM0_CCADROPTHRESHOLD 0xc48
+#define ROFDM0_ECCATHRESHOLD 0xc4c
+
+#define ROFDM0_XAAGCCORE1 0xc50
+#define ROFDM0_XAAGCCORE2 0xc54
+#define ROFDM0_XBAGCCORE1 0xc58
+#define ROFDM0_XBAGCCORE2 0xc5c
+#define ROFDM0_XCAGCCORE1 0xc60
+#define ROFDM0_XCAGCCORE2 0xc64
+#define ROFDM0_XDAGCCORE1 0xc68
+#define ROFDM0_XDAGCCORE2 0xc6c
+
+#define ROFDM0_AGCPARAMETER1 0xc70
+#define ROFDM0_AGCPARAMETER2 0xc74
+#define ROFDM0_AGCRSSITABLE 0xc78
+#define ROFDM0_HTSTFAGC 0xc7c
+
+#define ROFDM0_XATXIQIMBALANCE 0xc80
+#define ROFDM0_XATXAFE 0xc84
+#define ROFDM0_XBTXIQIMBALANCE 0xc88
+#define ROFDM0_XBTXAFE 0xc8c
+#define ROFDM0_XCTXIQIMBALANCE 0xc90
+#define ROFDM0_XCTXAFE 0xc94
+#define ROFDM0_XDTXIQIMBALANCE 0xc98
+#define ROFDM0_XDTXAFE 0xc9c
+
+#define ROFDM0_RXIQEXTANTA 0xca0
+
+#define ROFDM0_RXHPPARAMETER 0xce0
+#define ROFDM0_TXPSEUDONOISEWGT 0xce4
+#define ROFDM0_FRAMESYNC 0xcf0
+#define ROFDM0_DFSREPORT 0xcf4
+#define ROFDM0_TXCOEFF1 0xca4
+#define ROFDM0_TXCOEFF2 0xca8
+#define ROFDM0_TXCOEFF3 0xcac
+#define ROFDM0_TXCOEFF4 0xcb0
+#define ROFDM0_TXCOEFF5 0xcb4
+#define ROFDM0_TXCOEFF6 0xcb8
+
+#define ROFDM1_LSTF 0xd00
+#define ROFDM1_TRXPATHENABLE 0xd04
+
+#define ROFDM1_CF0 0xd08
+#define ROFDM1_CSI1 0xd10
+#define ROFDM1_SBD 0xd14
+#define ROFDM1_CSI2 0xd18
+#define ROFDM1_CFOTRACKING 0xd2c
+#define ROFDM1_TRXMESAURE1 0xd34
+#define ROFDM1_INTFDET 0xd3c
+#define ROFDM1_PSEUDONOISESTATEAB 0xd50
+#define ROFDM1_PSEUDONOISESTATECD 0xd54
+#define ROFDM1_RXPSEUDONOISEWGT 0xd58
+
+#define ROFDM_PHYCOUNTER1 0xda0
+#define ROFDM_PHYCOUNTER2 0xda4
+#define ROFDM_PHYCOUNTER3 0xda8
+
+#define ROFDM_SHORTCFOAB 0xdac
+#define ROFDM_SHORTCFOCD 0xdb0
+#define ROFDM_LONGCFOAB 0xdb4
+#define ROFDM_LONGCFOCD 0xdb8
+#define ROFDM_TAILCF0AB 0xdbc
+#define ROFDM_TAILCF0CD 0xdc0
+#define ROFDM_PWMEASURE1 0xdc4
+#define ROFDM_PWMEASURE2 0xdc8
+#define ROFDM_BWREPORT 0xdcc
+#define ROFDM_AGCREPORT 0xdd0
+#define ROFDM_RXSNR 0xdd4
+#define ROFDM_RXEVMCSI 0xdd8
+#define ROFDM_SIGREPORT 0xddc
+
+#define RTXAGC_A_RATE18_06 0xe00
+#define RTXAGC_A_RATE54_24 0xe04
+#define RTXAGC_A_CCK1_MCS32 0xe08
+#define RTXAGC_A_MCS03_MCS00 0xe10
+#define RTXAGC_A_MCS07_MCS04 0xe14
+#define RTXAGC_A_MCS11_MCS08 0xe18
+#define RTXAGC_A_MCS15_MCS12 0xe1c
+
+#define RTXAGC_B_RATE18_06 0x830
+#define RTXAGC_B_RATE54_24 0x834
+#define RTXAGC_B_CCK1_55_MCS32 0x838
+#define RTXAGC_B_MCS03_MCS00 0x83c
+#define RTXAGC_B_MCS07_MCS04 0x848
+#define RTXAGC_B_MCS11_MCS08 0x84c
+#define RTXAGC_B_MCS15_MCS12 0x868
+#define RTXAGC_B_CCK11_A_CCK2_11 0x86c
+
+#define RZEBRA1_HSSIENABLE 0x0
+#define RZEBRA1_TRXENABLE1 0x1
+#define RZEBRA1_TRXENABLE2 0x2
+#define RZEBRA1_AGC 0x4
+#define RZEBRA1_CHARGEPUMP 0x5
+#define RZEBRA1_CHANNEL 0x7
+
+#define RZEBRA1_TXGAIN 0x8
+#define RZEBRA1_TXLPF 0x9
+#define RZEBRA1_RXLPF 0xb
+#define RZEBRA1_RXHPFCORNER 0xc
+
+#define RGLOBALCTRL 0
+#define RRTL8256_TXLPF 19
+#define RRTL8256_RXLPF 11
+#define RRTL8258_TXLPF 0x11
+#define RRTL8258_RXLPF 0x13
+#define RRTL8258_RSSILPF 0xa
+
+#define RF_AC 0x00
+
+#define RF_IQADJ_G1 0x01
+#define RF_IQADJ_G2 0x02
+#define RF_POW_TRSW 0x05
+
+#define RF_GAIN_RX 0x06
+#define RF_GAIN_TX 0x07
+
+#define RF_TXM_IDAC 0x08
+#define RF_BS_IQGEN 0x0F
+
+#define RF_MODE1 0x10
+#define RF_MODE2 0x11
+
+#define RF_RX_AGC_HP 0x12
+#define RF_TX_AGC 0x13
+#define RF_BIAS 0x14
+#define RF_IPA 0x15
+#define RF_POW_ABILITY 0x17
+#define RF_MODE_AG 0x18
+#define RRFCHANNEL 0x18
+#define RF_CHNLBW 0x18
+#define RF_TOP 0x19
+
+#define RF_RX_G1 0x1A
+#define RF_RX_G2 0x1B
+
+#define RF_RX_BB2 0x1C
+#define RF_RX_BB1 0x1D
+
+#define RF_RCK1 0x1E
+#define RF_RCK2 0x1F
+
+#define RF_TX_G1 0x20
+#define RF_TX_G2 0x21
+#define RF_TX_G3 0x22
+
+#define RF_TX_BB1 0x23
+#define RF_T_METER 0x24
+
+#define RF_SYN_G1 0x25
+#define RF_SYN_G2 0x26
+#define RF_SYN_G3 0x27
+#define RF_SYN_G4 0x28
+#define RF_SYN_G5 0x29
+#define RF_SYN_G6 0x2A
+#define RF_SYN_G7 0x2B
+#define RF_SYN_G8 0x2C
+
+#define RF_RCK_OS 0x30
+#define RF_TXPA_G1 0x31
+#define RF_TXPA_G2 0x32
+#define RF_TXPA_G3 0x33
+
+#define BBBRESETB 0x100
+#define BGLOBALRESETB 0x200
+#define BOFDMTXSTART 0x4
+#define BCCKTXSTART 0x8
+#define BCRC32DEBUG 0x100
+#define BPMACLOOPBACK 0x10
+#define BTXLSIG 0xffffff
+#define BOFDMTXRATE 0xf
+#define BOFDMTXRESERVED 0x10
+#define BOFDMTXLENGTH 0x1ffe0
+#define BOFDMTXPARITY 0x20000
+#define BTXHTSIG1 0xffffff
+#define BTXHTMCSRATE 0x7f
+#define BTXHTBW 0x80
+#define BTXHTLENGTH 0xffff00
+#define BTXHTSIG2 0xffffff
+#define BTXHTSMOOTHING 0x1
+#define BTXHTSOUNDING 0x2
+#define BTXHTRESERVED 0x4
+#define BTXHTAGGREATION 0x8
+#define BTXHTSTBC 0x30
+#define BTXHTADVANCECODING 0x40
+#define BTXHTSHORTGI 0x80
+#define BTXHTNUMBERHT_LTF 0x300
+#define BTXHTCRC8 0x3fc00
+#define BCOUNTERRESET 0x10000
+#define BNUMOFOFDMTX 0xffff
+#define BNUMOFCCKTX 0xffff0000
+#define BTXIDLEINTERVAL 0xffff
+#define BOFDMSERVICE 0xffff0000
+#define BTXMACHEADER 0xffffffff
+#define BTXDATAINIT 0xff
+#define BTXHTMODE 0x100
+#define BTXDATATYPE 0x30000
+#define BTXRANDOMSEED 0xffffffff
+#define BCCKTXPREAMBLE 0x1
+#define BCCKTXSFD 0xffff0000
+#define BCCKTXSIG 0xff
+#define BCCKTXSERVICE 0xff00
+#define BCCKLENGTHEXT 0x8000
+#define BCCKTXLENGHT 0xffff0000
+#define BCCKTXCRC16 0xffff
+#define BCCKTXSTATUS 0x1
+#define BOFDMTXSTATUS 0x2
+#define IS_BB_REG_OFFSET_92S(_Offset) \
+ ((_Offset >= 0x800) && (_Offset <= 0xfff))
+
+#define BRFMOD 0x1
+#define BJAPANMODE 0x2
+#define BCCKTXSC 0x30
+#define BCCKEN 0x1000000
+#define BOFDMEN 0x2000000
+
+#define BOFDMRXADCPHASE 0x10000
+#define BOFDMTXDACPHASE 0x40000
+#define BXATXAGC 0x3f
+
+#define BXBTXAGC 0xf00
+#define BXCTXAGC 0xf000
+#define BXDTXAGC 0xf0000
+
+#define BPASTART 0xf0000000
+#define BTRSTART 0x00f00000
+#define BRFSTART 0x0000f000
+#define BBBSTART 0x000000f0
+#define BBBCCKSTART 0x0000000f
+#define BPAEND 0xf
+#define BTREND 0x0f000000
+#define BRFEND 0x000f0000
+#define BCCAMASK 0x000000f0
+#define BR2RCCAMASK 0x00000f00
+#define BHSSI_R2TDELAY 0xf8000000
+#define BHSSI_T2RDELAY 0xf80000
+#define BCONTXHSSI 0x400
+#define BIGFROMCCK 0x200
+#define BAGCADDRESS 0x3f
+#define BRXHPTX 0x7000
+#define BRXHP2RX 0x38000
+#define BRXHPCCKINI 0xc0000
+#define BAGCTXCODE 0xc00000
+#define BAGCRXCODE 0x300000
+
+#define B3WIREDATALENGTH 0x800
+#define B3WIREADDREAALENGTH 0x400
+
+#define B3WIRERFPOWERDOWN 0x1
+#define B5GPAPEPOLARITY 0x40000000
+#define B2GPAPEPOLARITY 0x80000000
+#define BRFSW_TXDEFAULTANT 0x3
+#define BRFSW_TXOPTIONANT 0x30
+#define BRFSW_RXDEFAULTANT 0x300
+#define BRFSW_RXOPTIONANT 0x3000
+#define BRFSI_3WIREDATA 0x1
+#define BRFSI_3WIRECLOCK 0x2
+#define BRFSI_3WIRELOAD 0x4
+#define BRFSI_3WIRERW 0x8
+#define BRFSI_3WIRE 0xf
+
+#define BRFSI_RFENV 0x10
+
+#define BRFSI_TRSW 0x20
+#define BRFSI_TRSWB 0x40
+#define BRFSI_ANTSW 0x100
+#define BRFSI_ANTSWB 0x200
+#define BRFSI_PAPE 0x400
+#define BRFSI_PAPE5G 0x800
+#define BBANDSELECT 0x1
+#define BHTSIG2_GI 0x80
+#define BHTSIG2_SMOOTHING 0x01
+#define BHTSIG2_SOUNDING 0x02
+#define BHTSIG2_AGGREATON 0x08
+#define BHTSIG2_STBC 0x30
+#define BHTSIG2_ADVCODING 0x40
+#define BHTSIG2_NUMOFHTLTF 0x300
+#define BHTSIG2_CRC8 0x3fc
+#define BHTSIG1_MCS 0x7f
+#define BHTSIG1_BANDWIDTH 0x80
+#define BHTSIG1_HTLENGTH 0xffff
+#define BLSIG_RATE 0xf
+#define BLSIG_RESERVED 0x10
+#define BLSIG_LENGTH 0x1fffe
+#define BLSIG_PARITY 0x20
+#define BCCKRXPHASE 0x4
+
+#define BLSSIREADADDRESS 0x7f800000
+#define BLSSIREADEDGE 0x80000000
+
+#define BLSSIREADBACKDATA 0xfffff
+
+#define BLSSIREADOKFLAG 0x1000
+#define BCCKSAMPLERATE 0x8
+#define BREGULATOR0STANDBY 0x1
+#define BREGULATORPLLSTANDBY 0x2
+#define BREGULATOR1STANDBY 0x4
+#define BPLLPOWERUP 0x8
+#define BDPLLPOWERUP 0x10
+#define BDA10POWERUP 0x20
+#define BAD7POWERUP 0x200
+#define BDA6POWERUP 0x2000
+#define BXTALPOWERUP 0x4000
+#define B40MDCLKPOWERUP 0x8000
+#define BDA6DEBUGMODE 0x20000
+#define BDA6SWING 0x380000
+
+#define BADCLKPHASE 0x4000000
+#define B80MCLKDELAY 0x18000000
+#define BAFEWATCHDOGENABLE 0x20000000
+
+#define BXTALCAP01 0xc0000000
+#define BXTALCAP23 0x3
+#define BXTALCAP92X 0x0f000000
+#define BXTALCAP 0x0f000000
+
+#define BINTDIFCLKENABLE 0x400
+#define BEXTSIGCLKENABLE 0x800
+#define BBANDGAP_MBIAS_POWERUP 0x10000
+#define BAD11SH_GAIN 0xc0000
+#define BAD11NPUT_RANGE 0x700000
+#define BAD110P_CURRENT 0x3800000
+#define BLPATH_LOOPBACK 0x4000000
+#define BQPATH_LOOPBACK 0x8000000
+#define BAFE_LOOPBACK 0x10000000
+#define BDA10_SWING 0x7e0
+#define BDA10_REVERSE 0x800
+#define BDA_CLK_SOURCE 0x1000
+#define BDA7INPUT_RANGE 0x6000
+#define BDA7_GAIN 0x38000
+#define BDA7OUTPUT_CM_MODE 0x40000
+#define BDA7INPUT_CM_MODE 0x380000
+#define BDA7CURRENT 0xc00000
+#define BREGULATOR_ADJUST 0x7000000
+#define BAD11POWERUP_ATTX 0x1
+#define BDA10PS_ATTX 0x10
+#define BAD11POWERUP_ATRX 0x100
+#define BDA10PS_ATRX 0x1000
+#define BCCKRX_AGC_FORMAT 0x200
+#define BPSDFFT_SAMPLE_POINT 0xc000
+#define BPSD_AVERAGE_NUM 0x3000
+#define BIQPATH_CONTROL 0xc00
+#define BPSD_FREQ 0x3ff
+#define BPSD_ANTENNA_PATH 0x30
+#define BPSD_IQ_SWITCH 0x40
+#define BPSD_RX_TRIGGER 0x400000
+#define BPSD_TX_TRIGGER 0x80000000
+#define BPSD_SINE_TONE_SCALE 0x7f000000
+#define BPSD_REPORT 0xffff
+
+#define BOFDM_TXSC 0x30000000
+#define BCCK_TXON 0x1
+#define BOFDM_TXON 0x2
+#define BDEBUG_PAGE 0xfff
+#define BDEBUG_ITEM 0xff
+#define BANTL 0x10
+#define BANT_NONHT 0x100
+#define BANT_HT1 0x1000
+#define BANT_HT2 0x10000
+#define BANT_HT1S1 0x100000
+#define BANT_NONHTS1 0x1000000
+
+#define BCCK_BBMODE 0x3
+#define BCCK_TXPOWERSAVING 0x80
+#define BCCK_RXPOWERSAVING 0x40
+
+#define BCCK_SIDEBAND 0x10
+
+#define BCCK_SCRAMBLE 0x8
+#define BCCK_ANTDIVERSITY 0x8000
+#define BCCK_CARRIER_RECOVERY 0x4000
+#define BCCK_TXRATE 0x3000
+#define BCCK_DCCANCEL 0x0800
+#define BCCK_ISICANCEL 0x0400
+#define BCCK_MATCH_FILTER 0x0200
+#define BCCK_EQUALIZER 0x0100
+#define BCCK_PREAMBLE_DETECT 0x800000
+#define BCCK_FAST_FALSECCA 0x400000
+#define BCCK_CH_ESTSTART 0x300000
+#define BCCK_CCA_COUNT 0x080000
+#define BCCK_CS_LIM 0x070000
+#define BCCK_BIST_MODE 0x80000000
+#define BCCK_CCAMASK 0x40000000
+#define BCCK_TX_DAC_PHASE 0x4
+#define BCCK_RX_ADC_PHASE 0x20000000
+#define BCCKR_CP_MODE 0x0100
+#define BCCK_TXDC_OFFSET 0xf0
+#define BCCK_RXDC_OFFSET 0xf
+#define BCCK_CCA_MODE 0xc000
+#define BCCK_FALSECS_LIM 0x3f00
+#define BCCK_CS_RATIO 0xc00000
+#define BCCK_CORGBIT_SEL 0x300000
+#define BCCK_PD_LIM 0x0f0000
+#define BCCK_NEWCCA 0x80000000
+#define BCCK_RXHP_OF_IG 0x8000
+#define BCCK_RXIG 0x7f00
+#define BCCK_LNA_POLARITY 0x800000
+#define BCCK_RX1ST_BAIN 0x7f0000
+#define BCCK_RF_EXTEND 0x20000000
+#define BCCK_RXAGC_SATLEVEL 0x1f000000
+#define BCCK_RXAGC_SATCOUNT 0xe0
+#define bCCKRxRFSettle 0x1f
+#define BCCK_FIXED_RXAGC 0x8000
+#define BCCK_ANTENNA_POLARITY 0x2000
+#define BCCK_TXFILTER_TYPE 0x0c00
+#define BCCK_RXAGC_REPORTTYPE 0x0300
+#define BCCK_RXDAGC_EN 0x80000000
+#define BCCK_RXDAGC_PERIOD 0x20000000
+#define BCCK_RXDAGC_SATLEVEL 0x1f000000
+#define BCCK_TIMING_RECOVERY 0x800000
+#define BCCK_TXC0 0x3f0000
+#define BCCK_TXC1 0x3f000000
+#define BCCK_TXC2 0x3f
+#define BCCK_TXC3 0x3f00
+#define BCCK_TXC4 0x3f0000
+#define BCCK_TXC5 0x3f000000
+#define BCCK_TXC6 0x3f
+#define BCCK_TXC7 0x3f00
+#define BCCK_DEBUGPORT 0xff0000
+#define BCCK_DAC_DEBUG 0x0f000000
+#define BCCK_FALSEALARM_ENABLE 0x8000
+#define BCCK_FALSEALARM_READ 0x4000
+#define BCCK_TRSSI 0x7f
+#define BCCK_RXAGC_REPORT 0xfe
+#define BCCK_RXREPORT_ANTSEL 0x80000000
+#define BCCK_RXREPORT_MFOFF 0x40000000
+#define BCCK_RXREPORT_SQLOSS 0x20000000
+#define BCCK_RXREPORT_PKTLOSS 0x10000000
+#define BCCK_RXREPORT_LOCKEDBIT 0x08000000
+#define BCCK_RXREPORT_RATEERROR 0x04000000
+#define BCCK_RXREPORT_RXRATE 0x03000000
+#define BCCK_RXFA_COUNTER_LOWER 0xff
+#define BCCK_RXFA_COUNTER_UPPER 0xff000000
+#define BCCK_RXHPAGC_START 0xe000
+#define BCCK_RXHPAGC_FINAL 0x1c00
+#define BCCK_RXFALSEALARM_ENABLE 0x8000
+#define BCCK_FACOUNTER_FREEZE 0x4000
+#define BCCK_TXPATH_SEL 0x10000000
+#define BCCK_DEFAULT_RXPATH 0xc000000
+#define BCCK_OPTION_RXPATH 0x3000000
+
+#define BNUM_OFSTF 0x3
+#define BSHIFT_L 0xc0
+#define BGI_TH 0xc
+#define BRXPATH_A 0x1
+#define BRXPATH_B 0x2
+#define BRXPATH_C 0x4
+#define BRXPATH_D 0x8
+#define BTXPATH_A 0x1
+#define BTXPATH_B 0x2
+#define BTXPATH_C 0x4
+#define BTXPATH_D 0x8
+#define BTRSSI_FREQ 0x200
+#define BADC_BACKOFF 0x3000
+#define BDFIR_BACKOFF 0xc000
+#define BTRSSI_LATCH_PHASE 0x10000
+#define BRX_LDC_OFFSET 0xff
+#define BRX_QDC_OFFSET 0xff00
+#define BRX_DFIR_MODE 0x1800000
+#define BRX_DCNF_TYPE 0xe000000
+#define BRXIQIMB_A 0x3ff
+#define BRXIQIMB_B 0xfc00
+#define BRXIQIMB_C 0x3f0000
+#define BRXIQIMB_D 0xffc00000
+#define BDC_DC_NOTCH 0x60000
+#define BRXNB_NOTCH 0x1f000000
+#define BPD_TH 0xf
+#define BPD_TH_OPT2 0xc000
+#define BPWED_TH 0x700
+#define BIFMF_WIN_L 0x800
+#define BPD_OPTION 0x1000
+#define BMF_WIN_L 0xe000
+#define BBW_SEARCH_L 0x30000
+#define BWIN_ENH_L 0xc0000
+#define BBW_TH 0x700000
+#define BED_TH2 0x3800000
+#define BBW_OPTION 0x4000000
+#define BRADIO_TH 0x18000000
+#define BWINDOW_L 0xe0000000
+#define BSBD_OPTION 0x1
+#define BFRAME_TH 0x1c
+#define BFS_OPTION 0x60
+#define BDC_SLOPE_CHECK 0x80
+#define BFGUARD_COUNTER_DC_L 0xe00
+#define BFRAME_WEIGHT_SHORT 0x7000
+#define BSUB_TUNE 0xe00000
+#define BFRAME_DC_LENGTH 0xe000000
+#define BSBD_START_OFFSET 0x30000000
+#define BFRAME_TH_2 0x7
+#define BFRAME_GI2_TH 0x38
+#define BGI2_SYNC_EN 0x40
+#define BSARCH_SHORT_EARLY 0x300
+#define BSARCH_SHORT_LATE 0xc00
+#define BSARCH_GI2_LATE 0x70000
+#define BCFOANTSUM 0x1
+#define BCFOACC 0x2
+#define BCFOSTARTOFFSET 0xc
+#define BCFOLOOPBACK 0x70
+#define BCFOSUMWEIGHT 0x80
+#define BDAGCENABLE 0x10000
+#define BTXIQIMB_A 0x3ff
+#define BTXIQIMB_b 0xfc00
+#define BTXIQIMB_C 0x3f0000
+#define BTXIQIMB_D 0xffc00000
+#define BTXIDCOFFSET 0xff
+#define BTXIQDCOFFSET 0xff00
+#define BTXDFIRMODE 0x10000
+#define BTXPESUDO_NOISEON 0x4000000
+#define BTXPESUDO_NOISE_A 0xff
+#define BTXPESUDO_NOISE_B 0xff00
+#define BTXPESUDO_NOISE_C 0xff0000
+#define BTXPESUDO_NOISE_D 0xff000000
+#define BCCA_DROPOPTION 0x20000
+#define BCCA_DROPTHRES 0xfff00000
+#define BEDCCA_H 0xf
+#define BEDCCA_L 0xf0
+#define BLAMBDA_ED 0x300
+#define BRX_INITIALGAIN 0x7f
+#define BRX_ANTDIV_EN 0x80
+#define BRX_AGC_ADDRESS_FOR_LNA 0x7f00
+#define BRX_HIGHPOWER_FLOW 0x8000
+#define BRX_AGC_FREEZE_THRES 0xc0000
+#define BRX_FREEZESTEP_AGC1 0x300000
+#define BRX_FREEZESTEP_AGC2 0xc00000
+#define BRX_FREEZESTEP_AGC3 0x3000000
+#define BRX_FREEZESTEP_AGC0 0xc000000
+#define BRXRSSI_CMP_EN 0x10000000
+#define BRXQUICK_AGCEN 0x20000000
+#define BRXAGC_FREEZE_THRES_MODE 0x40000000
+#define BRX_OVERFLOW_CHECKTYPE 0x80000000
+#define BRX_AGCSHIFT 0x7f
+#define BTRSW_TRI_ONLY 0x80
+#define BPOWER_THRES 0x300
+#define BRXAGC_EN 0x1
+#define BRXAGC_TOGETHER_EN 0x2
+#define BRXAGC_MIN 0x4
+#define BRXHP_INI 0x7
+#define BRXHP_TRLNA 0x70
+#define BRXHP_RSSI 0x700
+#define BRXHP_BBP1 0x7000
+#define BRXHP_BBP2 0x70000
+#define BRXHP_BBP3 0x700000
+#define BRSSI_H 0x7f0000
+#define BRSSI_GEN 0x7f000000
+#define BRXSETTLE_TRSW 0x7
+#define BRXSETTLE_LNA 0x38
+#define BRXSETTLE_RSSI 0x1c0
+#define BRXSETTLE_BBP 0xe00
+#define BRXSETTLE_RXHP 0x7000
+#define BRXSETTLE_ANTSW_RSSI 0x38000
+#define BRXSETTLE_ANTSW 0xc0000
+#define BRXPROCESS_TIME_DAGC 0x300000
+#define BRXSETTLE_HSSI 0x400000
+#define BRXPROCESS_TIME_BBPPW 0x800000
+#define BRXANTENNA_POWER_SHIFT 0x3000000
+#define BRSSI_TABLE_SELECT 0xc000000
+#define BRXHP_FINAL 0x7000000
+#define BRXHPSETTLE_BBP 0x7
+#define BRXHTSETTLE_HSSI 0x8
+#define BRXHTSETTLE_RXHP 0x70
+#define BRXHTSETTLE_BBPPW 0x80
+#define BRXHTSETTLE_IDLE 0x300
+#define BRXHTSETTLE_RESERVED 0x1c00
+#define BRXHT_RXHP_EN 0x8000
+#define BRXAGC_FREEZE_THRES 0x30000
+#define BRXAGC_TOGETHEREN 0x40000
+#define BRXHTAGC_MIN 0x80000
+#define BRXHTAGC_EN 0x100000
+#define BRXHTDAGC_EN 0x200000
+#define BRXHT_RXHP_BBP 0x1c00000
+#define BRXHT_RXHP_FINAL 0xe0000000
+#define BRXPW_RADIO_TH 0x3
+#define BRXPW_RADIO_EN 0x4
+#define BRXMF_HOLD 0x3800
+#define BRXPD_DELAY_TH1 0x38
+#define BRXPD_DELAY_TH2 0x1c0
+#define BRXPD_DC_COUNT_MAX 0x600
+#define BRXPD_DELAY_TH 0x8000
+#define BRXPROCESS_DELAY 0xf0000
+#define BRXSEARCHRANGE_GI2_EARLY 0x700000
+#define BRXFRAME_FUARD_COUNTER_L 0x3800000
+#define BRXSGI_GUARD_L 0xc000000
+#define BRXSGI_SEARCH_L 0x30000000
+#define BRXSGI_TH 0xc0000000
+#define BDFSCNT0 0xff
+#define BDFSCNT1 0xff00
+#define BDFSFLAG 0xf0000
+#define BMF_WEIGHT_SUM 0x300000
+#define BMINIDX_TH 0x7f000000
+#define BDAFORMAT 0x40000
+#define BTXCH_EMU_ENABLE 0x01000000
+#define BTRSW_ISOLATION_A 0x7f
+#define BTRSW_ISOLATION_B 0x7f00
+#define BTRSW_ISOLATION_C 0x7f0000
+#define BTRSW_ISOLATION_D 0x7f000000
+#define BEXT_LNA_GAIN 0x7c00
+
+#define BSTBC_EN 0x4
+#define BANTENNA_MAPPING 0x10
+#define BNSS 0x20
+#define BCFO_ANTSUM_ID 0x200
+#define BPHY_COUNTER_RESET 0x8000000
+#define BCFO_REPORT_GET 0x4000000
+#define BOFDM_CONTINUE_TX 0x10000000
+#define BOFDM_SINGLE_CARRIER 0x20000000
+#define BOFDM_SINGLE_TONE 0x40000000
+#define BHT_DETECT 0x100
+#define BCFOEN 0x10000
+#define BCFOVALUE 0xfff00000
+#define BSIGTONE_RE 0x3f
+#define BSIGTONE_IM 0x7f00
+#define BCOUNTER_CCA 0xffff
+#define BCOUNTER_PARITYFAIL 0xffff0000
+#define BCOUNTER_RATEILLEGAL 0xffff
+#define BCOUNTER_CRC8FAIL 0xffff0000
+#define BCOUNTER_MCSNOSUPPORT 0xffff
+#define BCOUNTER_FASTSYNC 0xffff
+#define BSHORTCFO 0xfff
+#define BSHORTCFOT_LENGTH 12
+#define BSHORTCFOF_LENGTH 11
+#define BLONGCFO 0x7ff
+#define BLONGCFOT_LENGTH 11
+#define BLONGCFOF_LENGTH 11
+#define BTAILCFO 0x1fff
+#define BTAILCFOT_LENGTH 13
+#define BTAILCFOF_LENGTH 12
+#define BNOISE_EN_PWDB 0xffff
+#define BCC_POWER_DB 0xffff0000
+#define BMOISE_PWDB 0xffff
+#define BPOWERMEAST_LENGTH 10
+#define BPOWERMEASF_LENGTH 3
+#define BRX_HT_BW 0x1
+#define BRXSC 0x6
+#define BRX_HT 0x8
+#define BNB_INTF_DET_ON 0x1
+#define BINTF_WIN_LEN_CFG 0x30
+#define BNB_INTF_TH_CFG 0x1c0
+#define BRFGAIN 0x3f
+#define BTABLESEL 0x40
+#define BTRSW 0x80
+#define BRXSNR_A 0xff
+#define BRXSNR_B 0xff00
+#define BRXSNR_C 0xff0000
+#define BRXSNR_D 0xff000000
+#define BSNR_EVMT_LENGTH 8
+#define BSNR_EVMF_LENGTH 1
+#define BCSI1ST 0xff
+#define BCSI2ND 0xff00
+#define BRXEVM1ST 0xff0000
+#define BRXEVM2ND 0xff000000
+#define BSIGEVM 0xff
+#define BPWDB 0xff00
+#define BSGIEN 0x10000
+
+#define BSFACTOR_QMA1 0xf
+#define BSFACTOR_QMA2 0xf0
+#define BSFACTOR_QMA3 0xf00
+#define BSFACTOR_QMA4 0xf000
+#define BSFACTOR_QMA5 0xf0000
+#define BSFACTOR_QMA6 0xf0000
+#define BSFACTOR_QMA7 0xf00000
+#define BSFACTOR_QMA8 0xf000000
+#define BSFACTOR_QMA9 0xf0000000
+#define BCSI_SCHEME 0x100000
+
+#define BNOISE_LVL_TOP_SET 0x3
+#define BCHSMOOTH 0x4
+#define BCHSMOOTH_CFG1 0x38
+#define BCHSMOOTH_CFG2 0x1c0
+#define BCHSMOOTH_CFG3 0xe00
+#define BCHSMOOTH_CFG4 0x7000
+#define BMRCMODE 0x800000
+#define BTHEVMCFG 0x7000000
+
+#define BLOOP_FIT_TYPE 0x1
+#define BUPD_CFO 0x40
+#define BUPD_CFO_OFFDATA 0x80
+#define BADV_UPD_CFO 0x100
+#define BADV_TIME_CTRL 0x800
+#define BUPD_CLKO 0x1000
+#define BFC 0x6000
+#define BTRACKING_MODE 0x8000
+#define BPHCMP_ENABLE 0x10000
+#define BUPD_CLKO_LTF 0x20000
+#define BCOM_CH_CFO 0x40000
+#define BCSI_ESTI_MODE 0x80000
+#define BADV_UPD_EQZ 0x100000
+#define BUCHCFG 0x7000000
+#define BUPDEQZ 0x8000000
+
+#define BRX_PESUDO_NOISE_ON 0x20000000
+#define BRX_PESUDO_NOISE_A 0xff
+#define BRX_PESUDO_NOISE_B 0xff00
+#define BRX_PESUDO_NOISE_C 0xff0000
+#define BRX_PESUDO_NOISE_D 0xff000000
+#define BRX_PESUDO_NOISESTATE_A 0xffff
+#define BRX_PESUDO_NOISESTATE_B 0xffff0000
+#define BRX_PESUDO_NOISESTATE_C 0xffff
+#define BRX_PESUDO_NOISESTATE_D 0xffff0000
+
+#define BZEBRA1_HSSIENABLE 0x8
+#define BZEBRA1_TRXCONTROL 0xc00
+#define BZEBRA1_TRXGAINSETTING 0x07f
+#define BZEBRA1_RXCOUNTER 0xc00
+#define BZEBRA1_TXCHANGEPUMP 0x38
+#define BZEBRA1_RXCHANGEPUMP 0x7
+#define BZEBRA1_CHANNEL_NUM 0xf80
+#define BZEBRA1_TXLPFBW 0x400
+#define BZEBRA1_RXLPFBW 0x600
+
+#define BRTL8256REG_MODE_CTRL1 0x100
+#define BRTL8256REG_MODE_CTRL0 0x40
+#define BRTL8256REG_TXLPFBW 0x18
+#define BRTL8256REG_RXLPFBW 0x600
+
+#define BRTL8258_TXLPFBW 0xc
+#define BRTL8258_RXLPFBW 0xc00
+#define BRTL8258_RSSILPFBW 0xc0
+
+#define BBYTE0 0x1
+#define BBYTE1 0x2
+#define BBYTE2 0x4
+#define BBYTE3 0x8
+#define BWORD0 0x3
+#define BWORD1 0xc
+#define BWORD 0xf
+
+#define MASKBYTE0 0xff
+#define MASKBYTE1 0xff00
+#define MASKBYTE2 0xff0000
+#define MASKBYTE3 0xff000000
+#define MASKHWORD 0xffff0000
+#define MASKLWORD 0x0000ffff
+#define MASKDWORD 0xffffffff
+#define MASK12BITS 0xfff
+#define MASKH4BITS 0xf0000000
+#define MASKOFDM_D 0xffc00000
+#define MASKCCK 0x3f3f3f3f
+
+#define MASK4BITS 0x0f
+#define MASK20BITS 0xfffff
+#define RFREG_OFFSET_MASK 0xfffff
+
+#define BENABLE 0x1
+#define BDISABLE 0x0
+
+#define LEFT_ANTENNA 0x0
+#define RIGHT_ANTENNA 0x1
+
+#define TCHECK_TXSTATUS 500
+#define TUPDATE_RXCOUNTER 100
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
new file mode 100644
index 000000000000..669b1168dbec
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
@@ -0,0 +1,523 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+
+static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw);
+
+void rtl92c_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ switch (bandwidth) {
+ case HT_CHANNEL_WIDTH_20:
+ rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
+ 0xfffff3ff) | 0x0400);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
+ rtlphy->rfreg_chnlval[0]);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
+ 0xfffff3ff));
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
+ rtlphy->rfreg_chnlval[0]);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("unknown bandwidth: %#X\n", bandwidth));
+ break;
+ }
+}
+
+void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u32 tx_agc[2] = {0, 0}, tmpval;
+ bool turbo_scanoff = false;
+ u8 idx1, idx2;
+ u8 *ptr;
+
+ if (rtlefuse->eeprom_regulatory != 0)
+ turbo_scanoff = true;
+
+ if (mac->act_scanning == true) {
+ tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
+ tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
+
+ if (turbo_scanoff) {
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ }
+ }
+ } else {
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ }
+
+ if (rtlefuse->eeprom_regulatory == 0) {
+ tmpval =
+ (rtlphy->mcs_txpwrlevel_origoffset[0][6]) +
+ (rtlphy->mcs_txpwrlevel_origoffset[0][7] <<
+ 8);
+ tx_agc[RF90_PATH_A] += tmpval;
+
+ tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][14]) +
+ (rtlphy->mcs_txpwrlevel_origoffset[0][15] <<
+ 24);
+ tx_agc[RF90_PATH_B] += tmpval;
+ }
+ }
+
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ ptr = (u8 *) (&(tx_agc[idx1]));
+ for (idx2 = 0; idx2 < 4; idx2++) {
+ if (*ptr > RF6052_MAX_TX_PWR)
+ *ptr = RF6052_MAX_TX_PWR;
+ ptr++;
+ }
+ }
+
+ tmpval = tx_agc[RF90_PATH_A] & 0xff;
+ rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_A_CCK1_MCS32));
+
+ tmpval = tx_agc[RF90_PATH_A] >> 8;
+
+ if (mac->mode == WIRELESS_MODE_B)
+ tmpval = tmpval & 0xff00ffff;
+
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_B_CCK11_A_CCK2_11));
+
+ tmpval = tx_agc[RF90_PATH_B] >> 24;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_B_CCK11_A_CCK2_11));
+
+ tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_B_CCK1_55_MCS32));
+}
+
+static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel,
+ u32 *ofdmbase, u32 *mcsbase)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u32 powerBase0, powerBase1;
+ u8 legacy_pwrdiff, ht20_pwrdiff;
+ u8 i, powerlevel[2];
+
+ for (i = 0; i < 2; i++) {
+ powerlevel[i] = ppowerlevel[i];
+ legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1];
+ powerBase0 = powerlevel[i] + legacy_pwrdiff;
+
+ powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) |
+ (powerBase0 << 8) | powerBase0;
+ *(ofdmbase + i) = powerBase0;
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ (" [OFDM power base index rf(%c) = 0x%x]\n",
+ ((i == 0) ? 'A' : 'B'), *(ofdmbase + i)));
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {
+ ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1];
+ powerlevel[i] += ht20_pwrdiff;
+ }
+ powerBase1 = powerlevel[i];
+ powerBase1 = (powerBase1 << 24) |
+ (powerBase1 << 16) | (powerBase1 << 8) | powerBase1;
+
+ *(mcsbase + i) = powerBase1;
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ (" [MCS power base index rf(%c) = 0x%x]\n",
+ ((i == 0) ? 'A' : 'B'), *(mcsbase + i)));
+ }
+}
+
+static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
+ u8 channel, u8 index,
+ u32 *powerBase0,
+ u32 *powerBase1,
+ u32 *p_outwriteval)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 i, chnlgroup, pwr_diff_limit[4];
+ u32 writeVal, customer_limit, rf;
+
+ for (rf = 0; rf < 2; rf++) {
+ switch (rtlefuse->eeprom_regulatory) {
+ case 0:
+ chnlgroup = 0;
+
+ writeVal =
+ rtlphy->mcs_txpwrlevel_origoffset[chnlgroup][index +
+ (rf ? 8 : 0)]
+ + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("RTK better performance, "
+ "writeVal(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ break;
+ case 1:
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+ writeVal = ((index < 2) ? powerBase0[rf] :
+ powerBase1[rf]);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Realtek regulatory, 40MHz, "
+ "writeVal(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ } else {
+ if (rtlphy->pwrgroup_cnt == 1)
+ chnlgroup = 0;
+ if (rtlphy->pwrgroup_cnt >= 3) {
+ if (channel <= 3)
+ chnlgroup = 0;
+ else if (channel >= 4 && channel <= 9)
+ chnlgroup = 1;
+ else if (channel > 9)
+ chnlgroup = 2;
+ if (rtlphy->pwrgroup_cnt == 4)
+ chnlgroup++;
+ }
+
+ writeVal =
+ rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
+ [index + (rf ? 8 : 0)] + ((index < 2) ?
+ powerBase0[rf] :
+ powerBase1[rf]);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Realtek regulatory, 20MHz, "
+ "writeVal(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ }
+ break;
+ case 2:
+ writeVal =
+ ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Better regulatory, "
+ "writeVal(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ break;
+ case 3:
+ chnlgroup = 0;
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("customer's limit, 40MHz "
+ "rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'),
+ rtlefuse->pwrgroup_ht40[rf][channel -
+ 1]));
+ } else {
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("customer's limit, 20MHz "
+ "rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'),
+ rtlefuse->pwrgroup_ht20[rf][channel -
+ 1]));
+ }
+ for (i = 0; i < 4; i++) {
+ pwr_diff_limit[i] =
+ (u8) ((rtlphy->mcs_txpwrlevel_origoffset
+ [chnlgroup][index +
+ (rf ? 8 : 0)] & (0x7f << (i * 8))) >>
+ (i * 8));
+
+ if (rtlphy->current_chan_bw ==
+ HT_CHANNEL_WIDTH_20_40) {
+ if (pwr_diff_limit[i] >
+ rtlefuse->
+ pwrgroup_ht40[rf][channel - 1])
+ pwr_diff_limit[i] =
+ rtlefuse->pwrgroup_ht40[rf]
+ [channel - 1];
+ } else {
+ if (pwr_diff_limit[i] >
+ rtlefuse->
+ pwrgroup_ht20[rf][channel - 1])
+ pwr_diff_limit[i] =
+ rtlefuse->pwrgroup_ht20[rf]
+ [channel - 1];
+ }
+ }
+
+ customer_limit = (pwr_diff_limit[3] << 24) |
+ (pwr_diff_limit[2] << 16) |
+ (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Customer's limit rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), customer_limit));
+
+ writeVal = customer_limit +
+ ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Customer, writeVal rf(%c)= 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ break;
+ default:
+ chnlgroup = 0;
+ writeVal =
+ rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
+ [index + (rf ? 8 : 0)]
+ + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("RTK better performance, writeVal "
+ "rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ break;
+ }
+
+ if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
+ writeVal = writeVal - 0x06060606;
+ else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_BT2)
+ writeVal = writeVal - 0x0c0c0c0c;
+ *(p_outwriteval + rf) = writeVal;
+ }
+}
+
+static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
+ u8 index, u32 *pValue)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ u16 regoffset_a[6] = {
+ RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24,
+ RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
+ RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
+ };
+ u16 regoffset_b[6] = {
+ RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24,
+ RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
+ RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
+ };
+ u8 i, rf, pwr_val[4];
+ u32 writeVal;
+ u16 regoffset;
+
+ for (rf = 0; rf < 2; rf++) {
+ writeVal = pValue[rf];
+ for (i = 0; i < 4; i++) {
+ pwr_val[i] = (u8) ((writeVal & (0x7f <<
+ (i * 8))) >> (i * 8));
+
+ if (pwr_val[i] > RF6052_MAX_TX_PWR)
+ pwr_val[i] = RF6052_MAX_TX_PWR;
+ }
+ writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
+ (pwr_val[1] << 8) | pwr_val[0];
+
+ if (rf == 0)
+ regoffset = regoffset_a[index];
+ else
+ regoffset = regoffset_b[index];
+ rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Set 0x%x = %08x\n", regoffset, writeVal));
+
+ if (((get_rf_type(rtlphy) == RF_2T2R) &&
+ (regoffset == RTXAGC_A_MCS15_MCS12 ||
+ regoffset == RTXAGC_B_MCS15_MCS12)) ||
+ ((get_rf_type(rtlphy) != RF_2T2R) &&
+ (regoffset == RTXAGC_A_MCS07_MCS04 ||
+ regoffset == RTXAGC_B_MCS07_MCS04))) {
+
+ writeVal = pwr_val[3];
+ if (regoffset == RTXAGC_A_MCS15_MCS12 ||
+ regoffset == RTXAGC_A_MCS07_MCS04)
+ regoffset = 0xc90;
+ if (regoffset == RTXAGC_B_MCS15_MCS12 ||
+ regoffset == RTXAGC_B_MCS07_MCS04)
+ regoffset = 0xc98;
+
+ for (i = 0; i < 3; i++) {
+ writeVal = (writeVal > 6) ? (writeVal - 6) : 0;
+ rtl_write_byte(rtlpriv, (u32) (regoffset + i),
+ (u8) writeVal);
+ }
+ }
+ }
+}
+
+void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel)
+{
+ u32 writeVal[2], powerBase0[2], powerBase1[2];
+ u8 index;
+
+ rtl92c_phy_get_power_base(hw, ppowerlevel,
+ channel, &powerBase0[0], &powerBase1[0]);
+
+ for (index = 0; index < 6; index++) {
+ _rtl92c_get_txpower_writeval_by_regulatory(hw,
+ channel, index,
+ &powerBase0[0],
+ &powerBase1[0],
+ &writeVal[0]);
+
+ _rtl92c_write_ofdm_power_reg(hw, index, &writeVal[0]);
+ }
+}
+
+bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (rtlphy->rf_type == RF_1T1R)
+ rtlphy->num_total_rfpath = 1;
+ else
+ rtlphy->num_total_rfpath = 2;
+
+ return _rtl92c_phy_rf6052_config_parafile(hw);
+}
+
+static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 u4_regvalue;
+ u8 rfpath;
+ bool rtstatus;
+ struct bb_reg_def *pphyreg;
+
+ for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
+
+ pphyreg = &rtlphy->phyreg_def[rfpath];
+
+ switch (rfpath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV << 16);
+ break;
+ }
+
+ rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
+ udelay(1);
+
+ rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
+ udelay(1);
+
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2,
+ B3WIREADDREAALENGTH, 0x0);
+ udelay(1);
+
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
+ udelay(1);
+
+ switch (rfpath) {
+ case RF90_PATH_A:
+ rtstatus = rtl92ce_phy_config_rf_with_headerfile(hw,
+ (enum radio_path) rfpath);
+ break;
+ case RF90_PATH_B:
+ rtstatus = rtl92ce_phy_config_rf_with_headerfile(hw,
+ (enum radio_path) rfpath);
+ break;
+ case RF90_PATH_C:
+ break;
+ case RF90_PATH_D:
+ break;
+ }
+
+ switch (rfpath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ rtl_set_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV, u4_regvalue);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ rtl_set_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV << 16, u4_regvalue);
+ break;
+ }
+
+ if (rtstatus != true) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Radio[%d] Fail!!", rfpath));
+ return false;
+ }
+
+ }
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("<---\n"));
+ return rtstatus;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
new file mode 100644
index 000000000000..3aa520c1c171
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
@@ -0,0 +1,47 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92C_RF_H__
+#define __RTL92C_RF_H__
+
+#define RF6052_MAX_TX_PWR 0x3F
+#define RF6052_MAX_REG 0x3F
+#define RF6052_MAX_PATH 2
+
+extern void rtl92c_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
+ u8 bandwidth);
+extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
+bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
+bool rtl92ce_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
new file mode 100644
index 000000000000..b1cc4d44f534
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -0,0 +1,292 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include <linux/vmalloc.h>
+
+#include "../wifi.h"
+#include "../core.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "hw.h"
+#include "rf.h"
+#include "sw.h"
+#include "trx.h"
+#include "led.h"
+
+int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_flag = 0;
+ rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.thermalvalue = 0;
+ rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13);
+
+ rtlpci->receive_config = (RCR_APP_FCS |
+ RCR_AMF |
+ RCR_ADF |
+ RCR_APP_MIC |
+ RCR_APP_ICV |
+ RCR_AICV |
+ RCR_ACRC32 |
+ RCR_AB |
+ RCR_AM |
+ RCR_APM |
+ RCR_APP_PHYST_RXFF | RCR_HTC_LOC_CTRL | 0);
+
+ rtlpci->irq_mask[0] =
+ (u32) (IMR_ROK |
+ IMR_VODOK |
+ IMR_VIDOK |
+ IMR_BEDOK |
+ IMR_BKDOK |
+ IMR_MGNTDOK |
+ IMR_HIGHDOK | IMR_BDOK | IMR_RDU | IMR_RXFOVW | 0);
+
+ rtlpci->irq_mask[1] = (u32) (IMR_CPWM | IMR_C2HCMD | 0);
+
+ rtlpriv->rtlhal.pfirmware = (u8 *) vmalloc(0x4000);
+ if (!rtlpriv->rtlhal.pfirmware) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Can't alloc buffer for fw.\n"));
+ return 1;
+ }
+
+ return 0;
+}
+
+void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->rtlhal.pfirmware) {
+ vfree(rtlpriv->rtlhal.pfirmware);
+ rtlpriv->rtlhal.pfirmware = NULL;
+ }
+}
+
+static struct rtl_hal_ops rtl8192ce_hal_ops = {
+ .init_sw_vars = rtl92c_init_sw_vars,
+ .deinit_sw_vars = rtl92c_deinit_sw_vars,
+ .read_eeprom_info = rtl92ce_read_eeprom_info,
+ .interrupt_recognized = rtl92ce_interrupt_recognized,
+ .hw_init = rtl92ce_hw_init,
+ .hw_disable = rtl92ce_card_disable,
+ .enable_interrupt = rtl92ce_enable_interrupt,
+ .disable_interrupt = rtl92ce_disable_interrupt,
+ .set_network_type = rtl92ce_set_network_type,
+ .set_qos = rtl92ce_set_qos,
+ .set_bcn_reg = rtl92ce_set_beacon_related_registers,
+ .set_bcn_intv = rtl92ce_set_beacon_interval,
+ .update_interrupt_mask = rtl92ce_update_interrupt_mask,
+ .get_hw_reg = rtl92ce_get_hw_reg,
+ .set_hw_reg = rtl92ce_set_hw_reg,
+ .update_rate_table = rtl92ce_update_hal_rate_table,
+ .update_rate_mask = rtl92ce_update_hal_rate_mask,
+ .fill_tx_desc = rtl92ce_tx_fill_desc,
+ .fill_tx_cmddesc = rtl92ce_tx_fill_cmddesc,
+ .query_rx_desc = rtl92ce_rx_query_desc,
+ .set_channel_access = rtl92ce_update_channel_access_setting,
+ .radio_onoff_checking = rtl92ce_gpio_radio_on_off_checking,
+ .set_bw_mode = rtl92c_phy_set_bw_mode,
+ .switch_channel = rtl92c_phy_sw_chnl,
+ .dm_watchdog = rtl92c_dm_watchdog,
+ .scan_operation_backup = rtl92c_phy_scan_operation_backup,
+ .set_rf_power_state = rtl92ce_phy_set_rf_power_state,
+ .led_control = rtl92ce_led_control,
+ .set_desc = rtl92ce_set_desc,
+ .get_desc = rtl92ce_get_desc,
+ .tx_polling = rtl92ce_tx_polling,
+ .enable_hw_sec = rtl92ce_enable_hw_security_config,
+ .set_key = rtl92ce_set_key,
+ .init_sw_leds = rtl92ce_init_sw_leds,
+ .deinit_sw_leds = rtl92ce_deinit_sw_leds,
+ .get_bbreg = rtl92c_phy_query_bb_reg,
+ .set_bbreg = rtl92c_phy_set_bb_reg,
+ .get_rfreg = rtl92ce_phy_query_rf_reg,
+ .set_rfreg = rtl92ce_phy_set_rf_reg,
+ .cmd_send_packet = _rtl92c_cmd_send_packet,
+ .phy_rf6052_config = rtl92ce_phy_rf6052_config,
+ .phy_rf6052_set_cck_txpower = rtl92ce_phy_rf6052_set_cck_txpower,
+ .phy_rf6052_set_ofdm_txpower = rtl92ce_phy_rf6052_set_ofdm_txpower,
+ .config_bb_with_headerfile = _rtl92ce_phy_config_bb_with_headerfile,
+ .config_bb_with_pgheaderfile = _rtl92ce_phy_config_bb_with_pgheaderfile,
+ .phy_lc_calibrate = _rtl92ce_phy_lc_calibrate,
+ .phy_set_bw_mode_callback = rtl92ce_phy_set_bw_mode_callback,
+ .dm_dynamic_txpower = rtl92ce_dm_dynamic_txpower,
+};
+
+static struct rtl_mod_params rtl92ce_mod_params = {
+ .sw_crypto = 0,
+};
+
+static struct rtl_hal_cfg rtl92ce_hal_cfg = {
+ .name = "rtl92c_pci",
+ .fw_name = "rtlwifi/rtl8192cfw.bin",
+ .ops = &rtl8192ce_hal_ops,
+ .mod_params = &rtl92ce_mod_params,
+
+ .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
+ .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
+ .maps[SYS_CLK] = REG_SYS_CLKR,
+ .maps[MAC_RCR_AM] = AM,
+ .maps[MAC_RCR_AB] = AB,
+ .maps[MAC_RCR_ACRC32] = ACRC32,
+ .maps[MAC_RCR_ACF] = ACF,
+ .maps[MAC_RCR_AAP] = AAP,
+
+ .maps[EFUSE_TEST] = REG_EFUSE_TEST,
+ .maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
+ .maps[EFUSE_CLK] = 0,
+ .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
+ .maps[EFUSE_PWC_EV12V] = PWC_EV12V,
+ .maps[EFUSE_FEN_ELDR] = FEN_ELDR,
+ .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
+ .maps[EFUSE_ANA8M] = EFUSE_ANA8M,
+ .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
+
+ .maps[RWCAM] = REG_CAMCMD,
+ .maps[WCAMI] = REG_CAMWRITE,
+ .maps[RCAMO] = REG_CAMREAD,
+ .maps[CAMDBG] = REG_CAMDBG,
+ .maps[SECR] = REG_SECCFG,
+ .maps[SEC_CAM_NONE] = CAM_NONE,
+ .maps[SEC_CAM_WEP40] = CAM_WEP40,
+ .maps[SEC_CAM_TKIP] = CAM_TKIP,
+ .maps[SEC_CAM_AES] = CAM_AES,
+ .maps[SEC_CAM_WEP104] = CAM_WEP104,
+
+ .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
+ .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
+ .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
+ .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
+ .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
+ .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
+ .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8,
+ .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
+ .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
+ .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
+ .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
+ .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
+ .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
+ .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
+ .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,
+ .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,
+
+ .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
+ .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
+ .maps[RTL_IMR_BcnInt] = IMR_BCNINT,
+ .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
+ .maps[RTL_IMR_RDU] = IMR_RDU,
+ .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
+ .maps[RTL_IMR_BDOK] = IMR_BDOK,
+ .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
+ .maps[RTL_IMR_TBDER] = IMR_TBDER,
+ .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
+ .maps[RTL_IMR_TBDOK] = IMR_TBDOK,
+ .maps[RTL_IMR_BKDOK] = IMR_BKDOK,
+ .maps[RTL_IMR_BEDOK] = IMR_BEDOK,
+ .maps[RTL_IMR_VIDOK] = IMR_VIDOK,
+ .maps[RTL_IMR_VODOK] = IMR_VODOK,
+ .maps[RTL_IMR_ROK] = IMR_ROK,
+ .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
+
+ .maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M,
+ .maps[RTL_RC_CCK_RATE2M] = DESC92C_RATE2M,
+ .maps[RTL_RC_CCK_RATE5_5M] = DESC92C_RATE5_5M,
+ .maps[RTL_RC_CCK_RATE11M] = DESC92C_RATE11M,
+ .maps[RTL_RC_OFDM_RATE6M] = DESC92C_RATE6M,
+ .maps[RTL_RC_OFDM_RATE9M] = DESC92C_RATE9M,
+ .maps[RTL_RC_OFDM_RATE12M] = DESC92C_RATE12M,
+ .maps[RTL_RC_OFDM_RATE18M] = DESC92C_RATE18M,
+ .maps[RTL_RC_OFDM_RATE24M] = DESC92C_RATE24M,
+ .maps[RTL_RC_OFDM_RATE36M] = DESC92C_RATE36M,
+ .maps[RTL_RC_OFDM_RATE48M] = DESC92C_RATE48M,
+ .maps[RTL_RC_OFDM_RATE54M] = DESC92C_RATE54M,
+
+ .maps[RTL_RC_HT_RATEMCS7] = DESC92C_RATEMCS7,
+ .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15,
+};
+
+static struct pci_device_id rtl92ce_pci_ids[] __devinitdata = {
+ {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8191, rtl92ce_hal_cfg)},
+ {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8178, rtl92ce_hal_cfg)},
+ {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8177, rtl92ce_hal_cfg)},
+ {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8176, rtl92ce_hal_cfg)},
+ {},
+};
+
+MODULE_DEVICE_TABLE(pci, rtl92ce_pci_ids);
+
+MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n PCI wireless");
+MODULE_FIRMWARE("rtlwifi/rtl8192cfw.bin");
+
+module_param_named(swenc, rtl92ce_mod_params.sw_crypto, bool, 0444);
+MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n");
+
+static struct pci_driver rtl92ce_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = rtl92ce_pci_ids,
+ .probe = rtl_pci_probe,
+ .remove = rtl_pci_disconnect,
+
+#ifdef CONFIG_PM
+ .suspend = rtl_pci_suspend,
+ .resume = rtl_pci_resume,
+#endif
+
+};
+
+static int __init rtl92ce_module_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&rtl92ce_driver);
+ if (ret)
+ RT_ASSERT(false, (": No device found\n"));
+
+ return ret;
+}
+
+static void __exit rtl92ce_module_exit(void)
+{
+ pci_unregister_driver(&rtl92ce_driver);
+}
+
+module_init(rtl92ce_module_init);
+module_exit(rtl92ce_module_exit);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
new file mode 100644
index 000000000000..36e657668c1e
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
@@ -0,0 +1,51 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CE_SW_H__
+#define __RTL92CE_SW_H__
+
+int rtl92c_init_sw_vars(struct ieee80211_hw *hw);
+void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw);
+void rtl92c_init_var_map(struct ieee80211_hw *hw);
+bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
+ struct sk_buff *skb);
+void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
+bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+ u8 configtype);
+bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+ u8 configtype);
+void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
+u32 rtl92ce_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr, u32 bitmask);
+void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/table.c b/drivers/net/wireless/rtlwifi/rtl8192ce/table.c
new file mode 100644
index 000000000000..ba938b91aa6f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/table.c
@@ -0,0 +1,1224 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Created on 2010/ 5/18, 1:41
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "table.h"
+
+
+u32 RTL8192CEPHY_REG_2TARRAY[PHY_REG_2TARRAY_LENGTH] = {
+ 0x024, 0x0011800f,
+ 0x028, 0x00ffdb83,
+ 0x800, 0x80040002,
+ 0x804, 0x00000003,
+ 0x808, 0x0000fc00,
+ 0x80c, 0x0000000a,
+ 0x810, 0x10005388,
+ 0x814, 0x020c3d10,
+ 0x818, 0x02200385,
+ 0x81c, 0x00000000,
+ 0x820, 0x01000100,
+ 0x824, 0x00390004,
+ 0x828, 0x01000100,
+ 0x82c, 0x00390004,
+ 0x830, 0x27272727,
+ 0x834, 0x27272727,
+ 0x838, 0x27272727,
+ 0x83c, 0x27272727,
+ 0x840, 0x00010000,
+ 0x844, 0x00010000,
+ 0x848, 0x27272727,
+ 0x84c, 0x27272727,
+ 0x850, 0x00000000,
+ 0x854, 0x00000000,
+ 0x858, 0x569a569a,
+ 0x85c, 0x0c1b25a4,
+ 0x860, 0x66e60230,
+ 0x864, 0x061f0130,
+ 0x868, 0x27272727,
+ 0x86c, 0x2b2b2b27,
+ 0x870, 0x07000700,
+ 0x874, 0x22184000,
+ 0x878, 0x08080808,
+ 0x87c, 0x00000000,
+ 0x880, 0xc0083070,
+ 0x884, 0x000004d5,
+ 0x888, 0x00000000,
+ 0x88c, 0xcc0000c0,
+ 0x890, 0x00000800,
+ 0x894, 0xfffffffe,
+ 0x898, 0x40302010,
+ 0x89c, 0x00706050,
+ 0x900, 0x00000000,
+ 0x904, 0x00000023,
+ 0x908, 0x00000000,
+ 0x90c, 0x81121313,
+ 0xa00, 0x00d047c8,
+ 0xa04, 0x80ff000c,
+ 0xa08, 0x8c838300,
+ 0xa0c, 0x2e68120f,
+ 0xa10, 0x9500bb78,
+ 0xa14, 0x11144028,
+ 0xa18, 0x00881117,
+ 0xa1c, 0x89140f00,
+ 0xa20, 0x1a1b0000,
+ 0xa24, 0x090e1317,
+ 0xa28, 0x00000204,
+ 0xa2c, 0x00d30000,
+ 0xa70, 0x101fbf00,
+ 0xa74, 0x00000007,
+ 0xc00, 0x48071d40,
+ 0xc04, 0x03a05633,
+ 0xc08, 0x000000e4,
+ 0xc0c, 0x6c6c6c6c,
+ 0xc10, 0x08800000,
+ 0xc14, 0x40000100,
+ 0xc18, 0x08800000,
+ 0xc1c, 0x40000100,
+ 0xc20, 0x00000000,
+ 0xc24, 0x00000000,
+ 0xc28, 0x00000000,
+ 0xc2c, 0x00000000,
+ 0xc30, 0x69e9ac44,
+ 0xc34, 0x469652cf,
+ 0xc38, 0x49795994,
+ 0xc3c, 0x0a97971c,
+ 0xc40, 0x1f7c403f,
+ 0xc44, 0x000100b7,
+ 0xc48, 0xec020107,
+ 0xc4c, 0x007f037f,
+ 0xc50, 0x69543420,
+ 0xc54, 0x43bc0094,
+ 0xc58, 0x69543420,
+ 0xc5c, 0x433c0094,
+ 0xc60, 0x00000000,
+ 0xc64, 0x5116848b,
+ 0xc68, 0x47c00bff,
+ 0xc6c, 0x00000036,
+ 0xc70, 0x2c7f000d,
+ 0xc74, 0x018610db,
+ 0xc78, 0x0000001f,
+ 0xc7c, 0x00b91612,
+ 0xc80, 0x40000100,
+ 0xc84, 0x20f60000,
+ 0xc88, 0x40000100,
+ 0xc8c, 0x20200000,
+ 0xc90, 0x00121820,
+ 0xc94, 0x00000000,
+ 0xc98, 0x00121820,
+ 0xc9c, 0x00007f7f,
+ 0xca0, 0x00000000,
+ 0xca4, 0x00000080,
+ 0xca8, 0x00000000,
+ 0xcac, 0x00000000,
+ 0xcb0, 0x00000000,
+ 0xcb4, 0x00000000,
+ 0xcb8, 0x00000000,
+ 0xcbc, 0x28000000,
+ 0xcc0, 0x00000000,
+ 0xcc4, 0x00000000,
+ 0xcc8, 0x00000000,
+ 0xccc, 0x00000000,
+ 0xcd0, 0x00000000,
+ 0xcd4, 0x00000000,
+ 0xcd8, 0x64b22427,
+ 0xcdc, 0x00766932,
+ 0xce0, 0x00222222,
+ 0xce4, 0x00000000,
+ 0xce8, 0x37644302,
+ 0xcec, 0x2f97d40c,
+ 0xd00, 0x00080740,
+ 0xd04, 0x00020403,
+ 0xd08, 0x0000907f,
+ 0xd0c, 0x20010201,
+ 0xd10, 0xa0633333,
+ 0xd14, 0x3333bc43,
+ 0xd18, 0x7a8f5b6b,
+ 0xd2c, 0xcc979975,
+ 0xd30, 0x00000000,
+ 0xd34, 0x80608000,
+ 0xd38, 0x00000000,
+ 0xd3c, 0x00027293,
+ 0xd40, 0x00000000,
+ 0xd44, 0x00000000,
+ 0xd48, 0x00000000,
+ 0xd4c, 0x00000000,
+ 0xd50, 0x6437140a,
+ 0xd54, 0x00000000,
+ 0xd58, 0x00000000,
+ 0xd5c, 0x30032064,
+ 0xd60, 0x4653de68,
+ 0xd64, 0x04518a3c,
+ 0xd68, 0x00002101,
+ 0xd6c, 0x2a201c16,
+ 0xd70, 0x1812362e,
+ 0xd74, 0x322c2220,
+ 0xd78, 0x000e3c24,
+ 0xe00, 0x2a2a2a2a,
+ 0xe04, 0x2a2a2a2a,
+ 0xe08, 0x03902a2a,
+ 0xe10, 0x2a2a2a2a,
+ 0xe14, 0x2a2a2a2a,
+ 0xe18, 0x2a2a2a2a,
+ 0xe1c, 0x2a2a2a2a,
+ 0xe28, 0x00000000,
+ 0xe30, 0x1000dc1f,
+ 0xe34, 0x10008c1f,
+ 0xe38, 0x02140102,
+ 0xe3c, 0x681604c2,
+ 0xe40, 0x01007c00,
+ 0xe44, 0x01004800,
+ 0xe48, 0xfb000000,
+ 0xe4c, 0x000028d1,
+ 0xe50, 0x1000dc1f,
+ 0xe54, 0x10008c1f,
+ 0xe58, 0x02140102,
+ 0xe5c, 0x28160d05,
+ 0xe60, 0x00000010,
+ 0xe68, 0x001b25a4,
+ 0xe6c, 0x63db25a4,
+ 0xe70, 0x63db25a4,
+ 0xe74, 0x0c1b25a4,
+ 0xe78, 0x0c1b25a4,
+ 0xe7c, 0x0c1b25a4,
+ 0xe80, 0x0c1b25a4,
+ 0xe84, 0x63db25a4,
+ 0xe88, 0x0c1b25a4,
+ 0xe8c, 0x63db25a4,
+ 0xed0, 0x63db25a4,
+ 0xed4, 0x63db25a4,
+ 0xed8, 0x63db25a4,
+ 0xedc, 0x001b25a4,
+ 0xee0, 0x001b25a4,
+ 0xeec, 0x6fdb25a4,
+ 0xf14, 0x00000003,
+ 0xf4c, 0x00000000,
+ 0xf00, 0x00000300,
+};
+
+u32 RTL8192CEPHY_REG_1TARRAY[PHY_REG_1TARRAY_LENGTH] = {
+ 0x024, 0x0011800f,
+ 0x028, 0x00ffdb83,
+ 0x800, 0x80040000,
+ 0x804, 0x00000001,
+ 0x808, 0x0000fc00,
+ 0x80c, 0x0000000a,
+ 0x810, 0x10005388,
+ 0x814, 0x020c3d10,
+ 0x818, 0x02200385,
+ 0x81c, 0x00000000,
+ 0x820, 0x01000100,
+ 0x824, 0x00390004,
+ 0x828, 0x00000000,
+ 0x82c, 0x00000000,
+ 0x830, 0x00000000,
+ 0x834, 0x00000000,
+ 0x838, 0x00000000,
+ 0x83c, 0x00000000,
+ 0x840, 0x00010000,
+ 0x844, 0x00000000,
+ 0x848, 0x00000000,
+ 0x84c, 0x00000000,
+ 0x850, 0x00000000,
+ 0x854, 0x00000000,
+ 0x858, 0x569a569a,
+ 0x85c, 0x001b25a4,
+ 0x860, 0x66e60230,
+ 0x864, 0x061f0130,
+ 0x868, 0x00000000,
+ 0x86c, 0x32323200,
+ 0x870, 0x07000700,
+ 0x874, 0x22004000,
+ 0x878, 0x00000808,
+ 0x87c, 0x00000000,
+ 0x880, 0xc0083070,
+ 0x884, 0x000004d5,
+ 0x888, 0x00000000,
+ 0x88c, 0xccc000c0,
+ 0x890, 0x00000800,
+ 0x894, 0xfffffffe,
+ 0x898, 0x40302010,
+ 0x89c, 0x00706050,
+ 0x900, 0x00000000,
+ 0x904, 0x00000023,
+ 0x908, 0x00000000,
+ 0x90c, 0x81121111,
+ 0xa00, 0x00d047c8,
+ 0xa04, 0x80ff000c,
+ 0xa08, 0x8c838300,
+ 0xa0c, 0x2e68120f,
+ 0xa10, 0x9500bb78,
+ 0xa14, 0x11144028,
+ 0xa18, 0x00881117,
+ 0xa1c, 0x89140f00,
+ 0xa20, 0x1a1b0000,
+ 0xa24, 0x090e1317,
+ 0xa28, 0x00000204,
+ 0xa2c, 0x00d30000,
+ 0xa70, 0x101fbf00,
+ 0xa74, 0x00000007,
+ 0xc00, 0x48071d40,
+ 0xc04, 0x03a05611,
+ 0xc08, 0x000000e4,
+ 0xc0c, 0x6c6c6c6c,
+ 0xc10, 0x08800000,
+ 0xc14, 0x40000100,
+ 0xc18, 0x08800000,
+ 0xc1c, 0x40000100,
+ 0xc20, 0x00000000,
+ 0xc24, 0x00000000,
+ 0xc28, 0x00000000,
+ 0xc2c, 0x00000000,
+ 0xc30, 0x69e9ac44,
+ 0xc34, 0x469652cf,
+ 0xc38, 0x49795994,
+ 0xc3c, 0x0a97971c,
+ 0xc40, 0x1f7c403f,
+ 0xc44, 0x000100b7,
+ 0xc48, 0xec020107,
+ 0xc4c, 0x007f037f,
+ 0xc50, 0x69543420,
+ 0xc54, 0x43bc0094,
+ 0xc58, 0x69543420,
+ 0xc5c, 0x433c0094,
+ 0xc60, 0x00000000,
+ 0xc64, 0x5116848b,
+ 0xc68, 0x47c00bff,
+ 0xc6c, 0x00000036,
+ 0xc70, 0x2c7f000d,
+ 0xc74, 0x018610db,
+ 0xc78, 0x0000001f,
+ 0xc7c, 0x00b91612,
+ 0xc80, 0x40000100,
+ 0xc84, 0x20f60000,
+ 0xc88, 0x40000100,
+ 0xc8c, 0x20200000,
+ 0xc90, 0x00121820,
+ 0xc94, 0x00000000,
+ 0xc98, 0x00121820,
+ 0xc9c, 0x00007f7f,
+ 0xca0, 0x00000000,
+ 0xca4, 0x00000080,
+ 0xca8, 0x00000000,
+ 0xcac, 0x00000000,
+ 0xcb0, 0x00000000,
+ 0xcb4, 0x00000000,
+ 0xcb8, 0x00000000,
+ 0xcbc, 0x28000000,
+ 0xcc0, 0x00000000,
+ 0xcc4, 0x00000000,
+ 0xcc8, 0x00000000,
+ 0xccc, 0x00000000,
+ 0xcd0, 0x00000000,
+ 0xcd4, 0x00000000,
+ 0xcd8, 0x64b22427,
+ 0xcdc, 0x00766932,
+ 0xce0, 0x00222222,
+ 0xce4, 0x00000000,
+ 0xce8, 0x37644302,
+ 0xcec, 0x2f97d40c,
+ 0xd00, 0x00080740,
+ 0xd04, 0x00020401,
+ 0xd08, 0x0000907f,
+ 0xd0c, 0x20010201,
+ 0xd10, 0xa0633333,
+ 0xd14, 0x3333bc43,
+ 0xd18, 0x7a8f5b6b,
+ 0xd2c, 0xcc979975,
+ 0xd30, 0x00000000,
+ 0xd34, 0x80608000,
+ 0xd38, 0x00000000,
+ 0xd3c, 0x00027293,
+ 0xd40, 0x00000000,
+ 0xd44, 0x00000000,
+ 0xd48, 0x00000000,
+ 0xd4c, 0x00000000,
+ 0xd50, 0x6437140a,
+ 0xd54, 0x00000000,
+ 0xd58, 0x00000000,
+ 0xd5c, 0x30032064,
+ 0xd60, 0x4653de68,
+ 0xd64, 0x04518a3c,
+ 0xd68, 0x00002101,
+ 0xd6c, 0x2a201c16,
+ 0xd70, 0x1812362e,
+ 0xd74, 0x322c2220,
+ 0xd78, 0x000e3c24,
+ 0xe00, 0x2a2a2a2a,
+ 0xe04, 0x2a2a2a2a,
+ 0xe08, 0x03902a2a,
+ 0xe10, 0x2a2a2a2a,
+ 0xe14, 0x2a2a2a2a,
+ 0xe18, 0x2a2a2a2a,
+ 0xe1c, 0x2a2a2a2a,
+ 0xe28, 0x00000000,
+ 0xe30, 0x1000dc1f,
+ 0xe34, 0x10008c1f,
+ 0xe38, 0x02140102,
+ 0xe3c, 0x681604c2,
+ 0xe40, 0x01007c00,
+ 0xe44, 0x01004800,
+ 0xe48, 0xfb000000,
+ 0xe4c, 0x000028d1,
+ 0xe50, 0x1000dc1f,
+ 0xe54, 0x10008c1f,
+ 0xe58, 0x02140102,
+ 0xe5c, 0x28160d05,
+ 0xe60, 0x00000010,
+ 0xe68, 0x001b25a4,
+ 0xe6c, 0x631b25a0,
+ 0xe70, 0x631b25a0,
+ 0xe74, 0x081b25a0,
+ 0xe78, 0x081b25a0,
+ 0xe7c, 0x081b25a0,
+ 0xe80, 0x081b25a0,
+ 0xe84, 0x631b25a0,
+ 0xe88, 0x081b25a0,
+ 0xe8c, 0x631b25a0,
+ 0xed0, 0x631b25a0,
+ 0xed4, 0x631b25a0,
+ 0xed8, 0x631b25a0,
+ 0xedc, 0x001b25a0,
+ 0xee0, 0x001b25a0,
+ 0xeec, 0x6b1b25a0,
+ 0xf14, 0x00000003,
+ 0xf4c, 0x00000000,
+ 0xf00, 0x00000300,
+};
+
+u32 RTL8192CEPHY_REG_ARRAY_PG[PHY_REG_ARRAY_PGLENGTH] = {
+ 0xe00, 0xffffffff, 0x0a0c0c0c,
+ 0xe04, 0xffffffff, 0x02040608,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x0a0c0d0e,
+ 0xe14, 0xffffffff, 0x02040608,
+ 0xe18, 0xffffffff, 0x0a0c0d0e,
+ 0xe1c, 0xffffffff, 0x02040608,
+ 0x830, 0xffffffff, 0x0a0c0c0c,
+ 0x834, 0xffffffff, 0x02040608,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x0a0c0d0e,
+ 0x848, 0xffffffff, 0x02040608,
+ 0x84c, 0xffffffff, 0x0a0c0d0e,
+ 0x868, 0xffffffff, 0x02040608,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x06060606,
+ 0xe14, 0xffffffff, 0x00020406,
+ 0xe18, 0xffffffff, 0x06060606,
+ 0xe1c, 0xffffffff, 0x00020406,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x06060606,
+ 0x848, 0xffffffff, 0x00020406,
+ 0x84c, 0xffffffff, 0x06060606,
+ 0x868, 0xffffffff, 0x00020406,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+};
+
+u32 RTL8192CERADIOA_2TARRAY[RADIOA_2TARRAYLENGTH] = {
+ 0x000, 0x00030159,
+ 0x001, 0x00031284,
+ 0x002, 0x00098000,
+ 0x003, 0x00018c63,
+ 0x004, 0x000210e7,
+ 0x009, 0x0002044f,
+ 0x00a, 0x0001adb0,
+ 0x00b, 0x00054867,
+ 0x00c, 0x0008992e,
+ 0x00d, 0x0000e52c,
+ 0x00e, 0x00039ce7,
+ 0x00f, 0x00000451,
+ 0x019, 0x00000000,
+ 0x01a, 0x00010255,
+ 0x01b, 0x00060a00,
+ 0x01c, 0x000fc378,
+ 0x01d, 0x000a1250,
+ 0x01e, 0x0004445f,
+ 0x01f, 0x00080001,
+ 0x020, 0x0000b614,
+ 0x021, 0x0006c000,
+ 0x022, 0x00000000,
+ 0x023, 0x00001558,
+ 0x024, 0x00000060,
+ 0x025, 0x00000483,
+ 0x026, 0x0004f000,
+ 0x027, 0x000ec7d9,
+ 0x028, 0x000977c0,
+ 0x029, 0x00004783,
+ 0x02a, 0x00000001,
+ 0x02b, 0x00021334,
+ 0x02a, 0x00000000,
+ 0x02b, 0x00000054,
+ 0x02a, 0x00000001,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00053333,
+ 0x02c, 0x0000000c,
+ 0x02a, 0x00000002,
+ 0x02b, 0x00000808,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000003,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000004,
+ 0x02b, 0x00000808,
+ 0x02b, 0x0006b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000005,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00073333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000006,
+ 0x02b, 0x00000709,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000007,
+ 0x02b, 0x00000709,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000008,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0004b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000009,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00053333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000a,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000b,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000c,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0006b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000d,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00073333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000e,
+ 0x02b, 0x0000050b,
+ 0x02b, 0x00066666,
+ 0x02c, 0x0000001a,
+ 0x02a, 0x000e0000,
+ 0x010, 0x0004000f,
+ 0x011, 0x000e31fc,
+ 0x010, 0x0006000f,
+ 0x011, 0x000ff9f8,
+ 0x010, 0x0002000f,
+ 0x011, 0x000203f9,
+ 0x010, 0x0003000f,
+ 0x011, 0x000ff500,
+ 0x010, 0x00000000,
+ 0x011, 0x00000000,
+ 0x010, 0x0008000f,
+ 0x011, 0x0003f100,
+ 0x010, 0x0009000f,
+ 0x011, 0x00023100,
+ 0x012, 0x00032000,
+ 0x012, 0x00071000,
+ 0x012, 0x000b0000,
+ 0x012, 0x000fc000,
+ 0x013, 0x000287af,
+ 0x013, 0x000244b7,
+ 0x013, 0x000204ab,
+ 0x013, 0x0001c49f,
+ 0x013, 0x00018493,
+ 0x013, 0x00014297,
+ 0x013, 0x00010295,
+ 0x013, 0x0000c298,
+ 0x013, 0x0000819c,
+ 0x013, 0x000040a8,
+ 0x013, 0x0000001c,
+ 0x014, 0x0001944c,
+ 0x014, 0x00059444,
+ 0x014, 0x0009944c,
+ 0x014, 0x000d9444,
+ 0x015, 0x0000f424,
+ 0x015, 0x0004f424,
+ 0x015, 0x0008f424,
+ 0x015, 0x000cf424,
+ 0x016, 0x000e0330,
+ 0x016, 0x000a0330,
+ 0x016, 0x00060330,
+ 0x016, 0x00020330,
+ 0x000, 0x00010159,
+ 0x018, 0x0000f401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01f, 0x00080003,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00044457,
+ 0x01f, 0x00080000,
+ 0x000, 0x00030159,
+};
+
+u32 RTL8192CE_RADIOB_2TARRAY[RADIOB_2TARRAYLENGTH] = {
+ 0x000, 0x00030159,
+ 0x001, 0x00031284,
+ 0x002, 0x00098000,
+ 0x003, 0x00018c63,
+ 0x004, 0x000210e7,
+ 0x009, 0x0002044f,
+ 0x00a, 0x0001adb0,
+ 0x00b, 0x00054867,
+ 0x00c, 0x0008992e,
+ 0x00d, 0x0000e52c,
+ 0x00e, 0x00039ce7,
+ 0x00f, 0x00000451,
+ 0x012, 0x00032000,
+ 0x012, 0x00071000,
+ 0x012, 0x000b0000,
+ 0x012, 0x000fc000,
+ 0x013, 0x000287af,
+ 0x013, 0x000244b7,
+ 0x013, 0x000204ab,
+ 0x013, 0x0001c49f,
+ 0x013, 0x00018493,
+ 0x013, 0x00014297,
+ 0x013, 0x00010295,
+ 0x013, 0x0000c298,
+ 0x013, 0x0000819c,
+ 0x013, 0x000040a8,
+ 0x013, 0x0000001c,
+ 0x014, 0x0001944c,
+ 0x014, 0x00059444,
+ 0x014, 0x0009944c,
+ 0x014, 0x000d9444,
+ 0x015, 0x0000f424,
+ 0x015, 0x0004f424,
+ 0x015, 0x0008f424,
+ 0x015, 0x000cf424,
+ 0x016, 0x000e0330,
+ 0x016, 0x000a0330,
+ 0x016, 0x00060330,
+ 0x016, 0x00020330,
+};
+
+u32 RTL8192CE_RADIOA_1TARRAY[RADIOA_1TARRAYLENGTH] = {
+ 0x000, 0x00030159,
+ 0x001, 0x00031284,
+ 0x002, 0x00098000,
+ 0x003, 0x00018c63,
+ 0x004, 0x000210e7,
+ 0x009, 0x0002044f,
+ 0x00a, 0x0001adb0,
+ 0x00b, 0x00054867,
+ 0x00c, 0x0008992e,
+ 0x00d, 0x0000e52c,
+ 0x00e, 0x00039ce7,
+ 0x00f, 0x00000451,
+ 0x019, 0x00000000,
+ 0x01a, 0x00010255,
+ 0x01b, 0x00060a00,
+ 0x01c, 0x000fc378,
+ 0x01d, 0x000a1250,
+ 0x01e, 0x0004445f,
+ 0x01f, 0x00080001,
+ 0x020, 0x0000b614,
+ 0x021, 0x0006c000,
+ 0x022, 0x00000000,
+ 0x023, 0x00001558,
+ 0x024, 0x00000060,
+ 0x025, 0x00000483,
+ 0x026, 0x0004f000,
+ 0x027, 0x000ec7d9,
+ 0x028, 0x000977c0,
+ 0x029, 0x00004783,
+ 0x02a, 0x00000001,
+ 0x02b, 0x00021334,
+ 0x02a, 0x00000000,
+ 0x02b, 0x00000054,
+ 0x02a, 0x00000001,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00053333,
+ 0x02c, 0x0000000c,
+ 0x02a, 0x00000002,
+ 0x02b, 0x00000808,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000003,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000004,
+ 0x02b, 0x00000808,
+ 0x02b, 0x0006b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000005,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00073333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000006,
+ 0x02b, 0x00000709,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000007,
+ 0x02b, 0x00000709,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000008,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0004b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000009,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00053333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000a,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000b,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000c,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0006b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000d,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00073333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000e,
+ 0x02b, 0x0000050b,
+ 0x02b, 0x00066666,
+ 0x02c, 0x0000001a,
+ 0x02a, 0x000e0000,
+ 0x010, 0x0004000f,
+ 0x011, 0x000e31fc,
+ 0x010, 0x0006000f,
+ 0x011, 0x000ff9f8,
+ 0x010, 0x0002000f,
+ 0x011, 0x000203f9,
+ 0x010, 0x0003000f,
+ 0x011, 0x000ff500,
+ 0x010, 0x00000000,
+ 0x011, 0x00000000,
+ 0x010, 0x0008000f,
+ 0x011, 0x0003f100,
+ 0x010, 0x0009000f,
+ 0x011, 0x00023100,
+ 0x012, 0x00032000,
+ 0x012, 0x00071000,
+ 0x012, 0x000b0000,
+ 0x012, 0x000fc000,
+ 0x013, 0x000287af,
+ 0x013, 0x000244b7,
+ 0x013, 0x000204ab,
+ 0x013, 0x0001c49f,
+ 0x013, 0x00018493,
+ 0x013, 0x00014297,
+ 0x013, 0x00010295,
+ 0x013, 0x0000c298,
+ 0x013, 0x0000819c,
+ 0x013, 0x000040a8,
+ 0x013, 0x0000001c,
+ 0x014, 0x0001944c,
+ 0x014, 0x00059444,
+ 0x014, 0x0009944c,
+ 0x014, 0x000d9444,
+ 0x015, 0x0000f424,
+ 0x015, 0x0004f424,
+ 0x015, 0x0008f424,
+ 0x015, 0x000cf424,
+ 0x016, 0x000e0330,
+ 0x016, 0x000a0330,
+ 0x016, 0x00060330,
+ 0x016, 0x00020330,
+ 0x000, 0x00010159,
+ 0x018, 0x0000f401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01f, 0x00080003,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00044457,
+ 0x01f, 0x00080000,
+ 0x000, 0x00030159,
+};
+
+u32 RTL8192CE_RADIOB_1TARRAY[RADIOB_1TARRAYLENGTH] = {
+ 0x0,
+};
+
+u32 RTL8192CEMAC_2T_ARRAY[MAC_2T_ARRAYLENGTH] = {
+ 0x420, 0x00000080,
+ 0x423, 0x00000000,
+ 0x430, 0x00000000,
+ 0x431, 0x00000000,
+ 0x432, 0x00000000,
+ 0x433, 0x00000001,
+ 0x434, 0x00000004,
+ 0x435, 0x00000005,
+ 0x436, 0x00000006,
+ 0x437, 0x00000007,
+ 0x438, 0x00000000,
+ 0x439, 0x00000000,
+ 0x43a, 0x00000000,
+ 0x43b, 0x00000001,
+ 0x43c, 0x00000004,
+ 0x43d, 0x00000005,
+ 0x43e, 0x00000006,
+ 0x43f, 0x00000007,
+ 0x440, 0x0000005d,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000015,
+ 0x445, 0x000000f0,
+ 0x446, 0x0000000f,
+ 0x447, 0x00000000,
+ 0x458, 0x00000041,
+ 0x459, 0x000000a8,
+ 0x45a, 0x00000072,
+ 0x45b, 0x000000b9,
+ 0x460, 0x00000088,
+ 0x461, 0x00000088,
+ 0x462, 0x00000006,
+ 0x463, 0x00000003,
+ 0x4c8, 0x00000004,
+ 0x4c9, 0x00000008,
+ 0x4cc, 0x00000002,
+ 0x4cd, 0x00000028,
+ 0x4ce, 0x00000001,
+ 0x500, 0x00000026,
+ 0x501, 0x000000a2,
+ 0x502, 0x0000002f,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000a3,
+ 0x506, 0x0000005e,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002b,
+ 0x509, 0x000000a4,
+ 0x50a, 0x0000005e,
+ 0x50b, 0x00000000,
+ 0x50c, 0x0000004f,
+ 0x50d, 0x000000a4,
+ 0x50e, 0x00000000,
+ 0x50f, 0x00000000,
+ 0x512, 0x0000001c,
+ 0x514, 0x0000000a,
+ 0x515, 0x00000010,
+ 0x516, 0x0000000a,
+ 0x517, 0x00000010,
+ 0x51a, 0x00000016,
+ 0x524, 0x0000000f,
+ 0x525, 0x0000004f,
+ 0x546, 0x00000020,
+ 0x547, 0x00000000,
+ 0x559, 0x00000002,
+ 0x55a, 0x00000002,
+ 0x55d, 0x000000ff,
+ 0x605, 0x00000030,
+ 0x608, 0x0000000e,
+ 0x609, 0x0000002a,
+ 0x652, 0x00000020,
+ 0x63c, 0x0000000a,
+ 0x63d, 0x0000000a,
+ 0x700, 0x00000021,
+ 0x701, 0x00000043,
+ 0x702, 0x00000065,
+ 0x703, 0x00000087,
+ 0x708, 0x00000021,
+ 0x709, 0x00000043,
+ 0x70a, 0x00000065,
+ 0x70b, 0x00000087,
+};
+
+u32 RTL8192CEAGCTAB_2TARRAY[AGCTAB_2TARRAYLENGTH] = {
+ 0xc78, 0x7b000001,
+ 0xc78, 0x7b010001,
+ 0xc78, 0x7b020001,
+ 0xc78, 0x7b030001,
+ 0xc78, 0x7b040001,
+ 0xc78, 0x7b050001,
+ 0xc78, 0x7a060001,
+ 0xc78, 0x79070001,
+ 0xc78, 0x78080001,
+ 0xc78, 0x77090001,
+ 0xc78, 0x760a0001,
+ 0xc78, 0x750b0001,
+ 0xc78, 0x740c0001,
+ 0xc78, 0x730d0001,
+ 0xc78, 0x720e0001,
+ 0xc78, 0x710f0001,
+ 0xc78, 0x70100001,
+ 0xc78, 0x6f110001,
+ 0xc78, 0x6e120001,
+ 0xc78, 0x6d130001,
+ 0xc78, 0x6c140001,
+ 0xc78, 0x6b150001,
+ 0xc78, 0x6a160001,
+ 0xc78, 0x69170001,
+ 0xc78, 0x68180001,
+ 0xc78, 0x67190001,
+ 0xc78, 0x661a0001,
+ 0xc78, 0x651b0001,
+ 0xc78, 0x641c0001,
+ 0xc78, 0x631d0001,
+ 0xc78, 0x621e0001,
+ 0xc78, 0x611f0001,
+ 0xc78, 0x60200001,
+ 0xc78, 0x49210001,
+ 0xc78, 0x48220001,
+ 0xc78, 0x47230001,
+ 0xc78, 0x46240001,
+ 0xc78, 0x45250001,
+ 0xc78, 0x44260001,
+ 0xc78, 0x43270001,
+ 0xc78, 0x42280001,
+ 0xc78, 0x41290001,
+ 0xc78, 0x402a0001,
+ 0xc78, 0x262b0001,
+ 0xc78, 0x252c0001,
+ 0xc78, 0x242d0001,
+ 0xc78, 0x232e0001,
+ 0xc78, 0x222f0001,
+ 0xc78, 0x21300001,
+ 0xc78, 0x20310001,
+ 0xc78, 0x06320001,
+ 0xc78, 0x05330001,
+ 0xc78, 0x04340001,
+ 0xc78, 0x03350001,
+ 0xc78, 0x02360001,
+ 0xc78, 0x01370001,
+ 0xc78, 0x00380001,
+ 0xc78, 0x00390001,
+ 0xc78, 0x003a0001,
+ 0xc78, 0x003b0001,
+ 0xc78, 0x003c0001,
+ 0xc78, 0x003d0001,
+ 0xc78, 0x003e0001,
+ 0xc78, 0x003f0001,
+ 0xc78, 0x7b400001,
+ 0xc78, 0x7b410001,
+ 0xc78, 0x7b420001,
+ 0xc78, 0x7b430001,
+ 0xc78, 0x7b440001,
+ 0xc78, 0x7b450001,
+ 0xc78, 0x7a460001,
+ 0xc78, 0x79470001,
+ 0xc78, 0x78480001,
+ 0xc78, 0x77490001,
+ 0xc78, 0x764a0001,
+ 0xc78, 0x754b0001,
+ 0xc78, 0x744c0001,
+ 0xc78, 0x734d0001,
+ 0xc78, 0x724e0001,
+ 0xc78, 0x714f0001,
+ 0xc78, 0x70500001,
+ 0xc78, 0x6f510001,
+ 0xc78, 0x6e520001,
+ 0xc78, 0x6d530001,
+ 0xc78, 0x6c540001,
+ 0xc78, 0x6b550001,
+ 0xc78, 0x6a560001,
+ 0xc78, 0x69570001,
+ 0xc78, 0x68580001,
+ 0xc78, 0x67590001,
+ 0xc78, 0x665a0001,
+ 0xc78, 0x655b0001,
+ 0xc78, 0x645c0001,
+ 0xc78, 0x635d0001,
+ 0xc78, 0x625e0001,
+ 0xc78, 0x615f0001,
+ 0xc78, 0x60600001,
+ 0xc78, 0x49610001,
+ 0xc78, 0x48620001,
+ 0xc78, 0x47630001,
+ 0xc78, 0x46640001,
+ 0xc78, 0x45650001,
+ 0xc78, 0x44660001,
+ 0xc78, 0x43670001,
+ 0xc78, 0x42680001,
+ 0xc78, 0x41690001,
+ 0xc78, 0x406a0001,
+ 0xc78, 0x266b0001,
+ 0xc78, 0x256c0001,
+ 0xc78, 0x246d0001,
+ 0xc78, 0x236e0001,
+ 0xc78, 0x226f0001,
+ 0xc78, 0x21700001,
+ 0xc78, 0x20710001,
+ 0xc78, 0x06720001,
+ 0xc78, 0x05730001,
+ 0xc78, 0x04740001,
+ 0xc78, 0x03750001,
+ 0xc78, 0x02760001,
+ 0xc78, 0x01770001,
+ 0xc78, 0x00780001,
+ 0xc78, 0x00790001,
+ 0xc78, 0x007a0001,
+ 0xc78, 0x007b0001,
+ 0xc78, 0x007c0001,
+ 0xc78, 0x007d0001,
+ 0xc78, 0x007e0001,
+ 0xc78, 0x007f0001,
+ 0xc78, 0x3800001e,
+ 0xc78, 0x3801001e,
+ 0xc78, 0x3802001e,
+ 0xc78, 0x3803001e,
+ 0xc78, 0x3804001e,
+ 0xc78, 0x3805001e,
+ 0xc78, 0x3806001e,
+ 0xc78, 0x3807001e,
+ 0xc78, 0x3808001e,
+ 0xc78, 0x3c09001e,
+ 0xc78, 0x3e0a001e,
+ 0xc78, 0x400b001e,
+ 0xc78, 0x440c001e,
+ 0xc78, 0x480d001e,
+ 0xc78, 0x4c0e001e,
+ 0xc78, 0x500f001e,
+ 0xc78, 0x5210001e,
+ 0xc78, 0x5611001e,
+ 0xc78, 0x5a12001e,
+ 0xc78, 0x5e13001e,
+ 0xc78, 0x6014001e,
+ 0xc78, 0x6015001e,
+ 0xc78, 0x6016001e,
+ 0xc78, 0x6217001e,
+ 0xc78, 0x6218001e,
+ 0xc78, 0x6219001e,
+ 0xc78, 0x621a001e,
+ 0xc78, 0x621b001e,
+ 0xc78, 0x621c001e,
+ 0xc78, 0x621d001e,
+ 0xc78, 0x621e001e,
+ 0xc78, 0x621f001e,
+};
+
+u32 RTL8192CEAGCTAB_1TARRAY[AGCTAB_1TARRAYLENGTH] = {
+ 0xc78, 0x7b000001,
+ 0xc78, 0x7b010001,
+ 0xc78, 0x7b020001,
+ 0xc78, 0x7b030001,
+ 0xc78, 0x7b040001,
+ 0xc78, 0x7b050001,
+ 0xc78, 0x7a060001,
+ 0xc78, 0x79070001,
+ 0xc78, 0x78080001,
+ 0xc78, 0x77090001,
+ 0xc78, 0x760a0001,
+ 0xc78, 0x750b0001,
+ 0xc78, 0x740c0001,
+ 0xc78, 0x730d0001,
+ 0xc78, 0x720e0001,
+ 0xc78, 0x710f0001,
+ 0xc78, 0x70100001,
+ 0xc78, 0x6f110001,
+ 0xc78, 0x6e120001,
+ 0xc78, 0x6d130001,
+ 0xc78, 0x6c140001,
+ 0xc78, 0x6b150001,
+ 0xc78, 0x6a160001,
+ 0xc78, 0x69170001,
+ 0xc78, 0x68180001,
+ 0xc78, 0x67190001,
+ 0xc78, 0x661a0001,
+ 0xc78, 0x651b0001,
+ 0xc78, 0x641c0001,
+ 0xc78, 0x631d0001,
+ 0xc78, 0x621e0001,
+ 0xc78, 0x611f0001,
+ 0xc78, 0x60200001,
+ 0xc78, 0x49210001,
+ 0xc78, 0x48220001,
+ 0xc78, 0x47230001,
+ 0xc78, 0x46240001,
+ 0xc78, 0x45250001,
+ 0xc78, 0x44260001,
+ 0xc78, 0x43270001,
+ 0xc78, 0x42280001,
+ 0xc78, 0x41290001,
+ 0xc78, 0x402a0001,
+ 0xc78, 0x262b0001,
+ 0xc78, 0x252c0001,
+ 0xc78, 0x242d0001,
+ 0xc78, 0x232e0001,
+ 0xc78, 0x222f0001,
+ 0xc78, 0x21300001,
+ 0xc78, 0x20310001,
+ 0xc78, 0x06320001,
+ 0xc78, 0x05330001,
+ 0xc78, 0x04340001,
+ 0xc78, 0x03350001,
+ 0xc78, 0x02360001,
+ 0xc78, 0x01370001,
+ 0xc78, 0x00380001,
+ 0xc78, 0x00390001,
+ 0xc78, 0x003a0001,
+ 0xc78, 0x003b0001,
+ 0xc78, 0x003c0001,
+ 0xc78, 0x003d0001,
+ 0xc78, 0x003e0001,
+ 0xc78, 0x003f0001,
+ 0xc78, 0x7b400001,
+ 0xc78, 0x7b410001,
+ 0xc78, 0x7b420001,
+ 0xc78, 0x7b430001,
+ 0xc78, 0x7b440001,
+ 0xc78, 0x7b450001,
+ 0xc78, 0x7a460001,
+ 0xc78, 0x79470001,
+ 0xc78, 0x78480001,
+ 0xc78, 0x77490001,
+ 0xc78, 0x764a0001,
+ 0xc78, 0x754b0001,
+ 0xc78, 0x744c0001,
+ 0xc78, 0x734d0001,
+ 0xc78, 0x724e0001,
+ 0xc78, 0x714f0001,
+ 0xc78, 0x70500001,
+ 0xc78, 0x6f510001,
+ 0xc78, 0x6e520001,
+ 0xc78, 0x6d530001,
+ 0xc78, 0x6c540001,
+ 0xc78, 0x6b550001,
+ 0xc78, 0x6a560001,
+ 0xc78, 0x69570001,
+ 0xc78, 0x68580001,
+ 0xc78, 0x67590001,
+ 0xc78, 0x665a0001,
+ 0xc78, 0x655b0001,
+ 0xc78, 0x645c0001,
+ 0xc78, 0x635d0001,
+ 0xc78, 0x625e0001,
+ 0xc78, 0x615f0001,
+ 0xc78, 0x60600001,
+ 0xc78, 0x49610001,
+ 0xc78, 0x48620001,
+ 0xc78, 0x47630001,
+ 0xc78, 0x46640001,
+ 0xc78, 0x45650001,
+ 0xc78, 0x44660001,
+ 0xc78, 0x43670001,
+ 0xc78, 0x42680001,
+ 0xc78, 0x41690001,
+ 0xc78, 0x406a0001,
+ 0xc78, 0x266b0001,
+ 0xc78, 0x256c0001,
+ 0xc78, 0x246d0001,
+ 0xc78, 0x236e0001,
+ 0xc78, 0x226f0001,
+ 0xc78, 0x21700001,
+ 0xc78, 0x20710001,
+ 0xc78, 0x06720001,
+ 0xc78, 0x05730001,
+ 0xc78, 0x04740001,
+ 0xc78, 0x03750001,
+ 0xc78, 0x02760001,
+ 0xc78, 0x01770001,
+ 0xc78, 0x00780001,
+ 0xc78, 0x00790001,
+ 0xc78, 0x007a0001,
+ 0xc78, 0x007b0001,
+ 0xc78, 0x007c0001,
+ 0xc78, 0x007d0001,
+ 0xc78, 0x007e0001,
+ 0xc78, 0x007f0001,
+ 0xc78, 0x3800001e,
+ 0xc78, 0x3801001e,
+ 0xc78, 0x3802001e,
+ 0xc78, 0x3803001e,
+ 0xc78, 0x3804001e,
+ 0xc78, 0x3805001e,
+ 0xc78, 0x3806001e,
+ 0xc78, 0x3807001e,
+ 0xc78, 0x3808001e,
+ 0xc78, 0x3c09001e,
+ 0xc78, 0x3e0a001e,
+ 0xc78, 0x400b001e,
+ 0xc78, 0x440c001e,
+ 0xc78, 0x480d001e,
+ 0xc78, 0x4c0e001e,
+ 0xc78, 0x500f001e,
+ 0xc78, 0x5210001e,
+ 0xc78, 0x5611001e,
+ 0xc78, 0x5a12001e,
+ 0xc78, 0x5e13001e,
+ 0xc78, 0x6014001e,
+ 0xc78, 0x6015001e,
+ 0xc78, 0x6016001e,
+ 0xc78, 0x6217001e,
+ 0xc78, 0x6218001e,
+ 0xc78, 0x6219001e,
+ 0xc78, 0x621a001e,
+ 0xc78, 0x621b001e,
+ 0xc78, 0x621c001e,
+ 0xc78, 0x621d001e,
+ 0xc78, 0x621e001e,
+ 0xc78, 0x621f001e,
+};
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/table.h b/drivers/net/wireless/rtlwifi/rtl8192ce/table.h
new file mode 100644
index 000000000000..3a6e8b6aeee0
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/table.h
@@ -0,0 +1,58 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Created on 2010/ 5/18, 1:41
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CE_TABLE__H_
+#define __RTL92CE_TABLE__H_
+
+#include <linux/types.h>
+
+#define PHY_REG_2TARRAY_LENGTH 374
+extern u32 RTL8192CEPHY_REG_2TARRAY[PHY_REG_2TARRAY_LENGTH];
+#define PHY_REG_1TARRAY_LENGTH 374
+extern u32 RTL8192CEPHY_REG_1TARRAY[PHY_REG_1TARRAY_LENGTH];
+#define PHY_REG_ARRAY_PGLENGTH 192
+extern u32 RTL8192CEPHY_REG_ARRAY_PG[PHY_REG_ARRAY_PGLENGTH];
+#define RADIOA_2TARRAYLENGTH 282
+extern u32 RTL8192CERADIOA_2TARRAY[RADIOA_2TARRAYLENGTH];
+#define RADIOB_2TARRAYLENGTH 78
+extern u32 RTL8192CE_RADIOB_2TARRAY[RADIOB_2TARRAYLENGTH];
+#define RADIOA_1TARRAYLENGTH 282
+extern u32 RTL8192CE_RADIOA_1TARRAY[RADIOA_1TARRAYLENGTH];
+#define RADIOB_1TARRAYLENGTH 1
+extern u32 RTL8192CE_RADIOB_1TARRAY[RADIOB_1TARRAYLENGTH];
+#define MAC_2T_ARRAYLENGTH 162
+extern u32 RTL8192CEMAC_2T_ARRAY[MAC_2T_ARRAYLENGTH];
+#define AGCTAB_2TARRAYLENGTH 320
+extern u32 RTL8192CEAGCTAB_2TARRAY[AGCTAB_2TARRAYLENGTH];
+#define AGCTAB_1TARRAYLENGTH 320
+extern u32 RTL8192CEAGCTAB_1TARRAY[AGCTAB_1TARRAYLENGTH];
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
new file mode 100644
index 000000000000..aa2b5815600f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -0,0 +1,1066 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "trx.h"
+#include "led.h"
+
+static enum rtl_desc_qsel _rtl92ce_map_hwqueue_to_fwqueue(__le16 fc,
+ unsigned int
+ skb_queue)
+{
+ enum rtl_desc_qsel qsel;
+
+ if (unlikely(ieee80211_is_beacon(fc))) {
+ qsel = QSLT_BEACON;
+ return qsel;
+ }
+
+ if (ieee80211_is_mgmt(fc)) {
+ qsel = QSLT_MGNT;
+ return qsel;
+ }
+
+ switch (skb_queue) {
+ case VO_QUEUE:
+ qsel = QSLT_VO;
+ break;
+ case VI_QUEUE:
+ qsel = QSLT_VI;
+ break;
+ case BE_QUEUE:
+ qsel = QSLT_BE;
+ break;
+ case BK_QUEUE:
+ qsel = QSLT_BK;
+ break;
+ default:
+ qsel = QSLT_BE;
+ RT_ASSERT(false, ("BE queue, skb_queue:%d,"
+ " set qsel = 0x%X\n", skb_queue, QSLT_BE));
+ break;
+ }
+ return qsel;
+}
+
+static int _rtl92ce_rate_mapping(bool isht, u8 desc_rate, bool first_ampdu)
+{
+ int rate_idx;
+
+ if (first_ampdu) {
+ if (false == isht) {
+ switch (desc_rate) {
+ case DESC92C_RATE1M:
+ rate_idx = 0;
+ break;
+ case DESC92C_RATE2M:
+ rate_idx = 1;
+ break;
+ case DESC92C_RATE5_5M:
+ rate_idx = 2;
+ break;
+ case DESC92C_RATE11M:
+ rate_idx = 3;
+ break;
+ case DESC92C_RATE6M:
+ rate_idx = 4;
+ break;
+ case DESC92C_RATE9M:
+ rate_idx = 5;
+ break;
+ case DESC92C_RATE12M:
+ rate_idx = 6;
+ break;
+ case DESC92C_RATE18M:
+ rate_idx = 7;
+ break;
+ case DESC92C_RATE24M:
+ rate_idx = 8;
+ break;
+ case DESC92C_RATE36M:
+ rate_idx = 9;
+ break;
+ case DESC92C_RATE48M:
+ rate_idx = 10;
+ break;
+ case DESC92C_RATE54M:
+ rate_idx = 11;
+ break;
+ default:
+ rate_idx = 0;
+ break;
+ }
+ } else {
+ rate_idx = 11;
+ }
+
+ return rate_idx;
+ }
+
+ switch (desc_rate) {
+ case DESC92C_RATE1M:
+ rate_idx = 0;
+ break;
+ case DESC92C_RATE2M:
+ rate_idx = 1;
+ break;
+ case DESC92C_RATE5_5M:
+ rate_idx = 2;
+ break;
+ case DESC92C_RATE11M:
+ rate_idx = 3;
+ break;
+ case DESC92C_RATE6M:
+ rate_idx = 4;
+ break;
+ case DESC92C_RATE9M:
+ rate_idx = 5;
+ break;
+ case DESC92C_RATE12M:
+ rate_idx = 6;
+ break;
+ case DESC92C_RATE18M:
+ rate_idx = 7;
+ break;
+ case DESC92C_RATE24M:
+ rate_idx = 8;
+ break;
+ case DESC92C_RATE36M:
+ rate_idx = 9;
+ break;
+ case DESC92C_RATE48M:
+ rate_idx = 10;
+ break;
+ case DESC92C_RATE54M:
+ rate_idx = 11;
+ break;
+ default:
+ rate_idx = 11;
+ break;
+ }
+ return rate_idx;
+}
+
+static u8 _rtl92c_query_rxpwrpercentage(char antpower)
+{
+ if ((antpower <= -100) || (antpower >= 20))
+ return 0;
+ else if (antpower >= 0)
+ return 100;
+ else
+ return 100 + antpower;
+}
+
+static u8 _rtl92c_evm_db_to_percentage(char value)
+{
+ char ret_val;
+ ret_val = value;
+
+ if (ret_val >= 0)
+ ret_val = 0;
+
+ if (ret_val <= -33)
+ ret_val = -33;
+
+ ret_val = 0 - ret_val;
+ ret_val *= 3;
+
+ if (ret_val == 99)
+ ret_val = 100;
+
+ return ret_val;
+}
+
+static long _rtl92ce_translate_todbm(struct ieee80211_hw *hw,
+ u8 signal_strength_index)
+{
+ long signal_power;
+
+ signal_power = (long)((signal_strength_index + 1) >> 1);
+ signal_power -= 95;
+ return signal_power;
+}
+
+static long _rtl92ce_signal_scale_mapping(struct ieee80211_hw *hw,
+ long currsig)
+{
+ long retsig;
+
+ if (currsig >= 61 && currsig <= 100)
+ retsig = 90 + ((currsig - 60) / 4);
+ else if (currsig >= 41 && currsig <= 60)
+ retsig = 78 + ((currsig - 40) / 2);
+ else if (currsig >= 31 && currsig <= 40)
+ retsig = 66 + (currsig - 30);
+ else if (currsig >= 21 && currsig <= 30)
+ retsig = 54 + (currsig - 20);
+ else if (currsig >= 5 && currsig <= 20)
+ retsig = 42 + (((currsig - 5) * 2) / 3);
+ else if (currsig == 4)
+ retsig = 36;
+ else if (currsig == 3)
+ retsig = 27;
+ else if (currsig == 2)
+ retsig = 18;
+ else if (currsig == 1)
+ retsig = 9;
+ else
+ retsig = currsig;
+
+ return retsig;
+}
+
+static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats,
+ struct rx_desc_92c *pdesc,
+ struct rx_fwinfo_92c *p_drvinfo,
+ bool packet_match_bssid,
+ bool packet_toself,
+ bool packet_beacon)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct phy_sts_cck_8192s_t *cck_buf;
+ s8 rx_pwr_all, rx_pwr[4];
+ u8 evm, pwdb_all, rf_rx_num = 0;
+ u8 i, max_spatial_stream;
+ u32 rssi, total_rssi = 0;
+ bool is_cck_rate;
+
+ is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
+ pstats->packet_matchbssid = packet_match_bssid;
+ pstats->packet_toself = packet_toself;
+ pstats->is_cck = is_cck_rate;
+ pstats->packet_beacon = packet_beacon;
+ pstats->is_cck = is_cck_rate;
+ pstats->rx_mimo_signalquality[0] = -1;
+ pstats->rx_mimo_signalquality[1] = -1;
+
+ if (is_cck_rate) {
+ u8 report, cck_highpwr;
+ cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
+
+ cck_highpwr = (u8) rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER2,
+ BIT(9));
+ if (!cck_highpwr) {
+ u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
+ report = cck_buf->cck_agc_rpt & 0xc0;
+ report = report >> 6;
+ switch (report) {
+ case 0x3:
+ rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x2:
+ rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x1:
+ rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x0:
+ rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
+ break;
+ }
+ } else {
+ u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
+ report = p_drvinfo->cfosho[0] & 0x60;
+ report = report >> 5;
+ switch (report) {
+ case 0x3:
+ rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ case 0x2:
+ rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ case 0x1:
+ rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ case 0x0:
+ rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ }
+ }
+
+ pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
+ pstats->rx_pwdb_all = pwdb_all;
+ pstats->recvsignalpower = rx_pwr_all;
+
+ if (packet_match_bssid) {
+ u8 sq;
+ if (pstats->rx_pwdb_all > 40)
+ sq = 100;
+ else {
+ sq = cck_buf->sq_rpt;
+ if (sq > 64)
+ sq = 0;
+ else if (sq < 20)
+ sq = 100;
+ else
+ sq = ((64 - sq) * 100) / 44;
+ }
+
+ pstats->signalquality = sq;
+ pstats->rx_mimo_signalquality[0] = sq;
+ pstats->rx_mimo_signalquality[1] = -1;
+ }
+ } else {
+ rtlpriv->dm.rfpath_rxenable[0] =
+ rtlpriv->dm.rfpath_rxenable[1] = true;
+ for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) {
+ if (rtlpriv->dm.rfpath_rxenable[i])
+ rf_rx_num++;
+
+ rx_pwr[i] =
+ ((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110;
+ rssi = _rtl92c_query_rxpwrpercentage(rx_pwr[i]);
+ total_rssi += rssi;
+ rtlpriv->stats.rx_snr_db[i] =
+ (long)(p_drvinfo->rxsnr[i] / 2);
+
+ if (packet_match_bssid)
+ pstats->rx_mimo_signalstrength[i] = (u8) rssi;
+ }
+
+ rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
+ pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
+ pstats->rx_pwdb_all = pwdb_all;
+ pstats->rxpower = rx_pwr_all;
+ pstats->recvsignalpower = rx_pwr_all;
+
+ if (pdesc->rxht && pdesc->rxmcs >= DESC92C_RATEMCS8 &&
+ pdesc->rxmcs <= DESC92C_RATEMCS15)
+ max_spatial_stream = 2;
+ else
+ max_spatial_stream = 1;
+
+ for (i = 0; i < max_spatial_stream; i++) {
+ evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]);
+
+ if (packet_match_bssid) {
+ if (i == 0)
+ pstats->signalquality =
+ (u8) (evm & 0xff);
+ pstats->rx_mimo_signalquality[i] =
+ (u8) (evm & 0xff);
+ }
+ }
+ }
+
+ if (is_cck_rate)
+ pstats->signalstrength =
+ (u8) (_rtl92ce_signal_scale_mapping(hw, pwdb_all));
+ else if (rf_rx_num != 0)
+ pstats->signalstrength =
+ (u8) (_rtl92ce_signal_scale_mapping
+ (hw, total_rssi /= rf_rx_num));
+}
+
+static void _rtl92ce_process_ui_rssi(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 rfpath;
+ u32 last_rssi, tmpval;
+
+ if (pstats->packet_toself || pstats->packet_beacon) {
+ rtlpriv->stats.rssi_calculate_cnt++;
+
+ if (rtlpriv->stats.ui_rssi.total_num++ >=
+ PHY_RSSI_SLID_WIN_MAX) {
+ rtlpriv->stats.ui_rssi.total_num =
+ PHY_RSSI_SLID_WIN_MAX;
+ last_rssi =
+ rtlpriv->stats.ui_rssi.elements[rtlpriv->
+ stats.ui_rssi.index];
+ rtlpriv->stats.ui_rssi.total_val -= last_rssi;
+ }
+
+ rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength;
+ rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.
+ index++] =
+ pstats->signalstrength;
+
+ if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
+ rtlpriv->stats.ui_rssi.index = 0;
+
+ tmpval = rtlpriv->stats.ui_rssi.total_val /
+ rtlpriv->stats.ui_rssi.total_num;
+ rtlpriv->stats.signal_strength =
+ _rtl92ce_translate_todbm(hw, (u8) tmpval);
+ pstats->rssi = rtlpriv->stats.signal_strength;
+ }
+
+ if (!pstats->is_cck && pstats->packet_toself) {
+ for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
+ rfpath++) {
+
+ if (!rtl8192_phy_check_is_legal_rfpath(hw, rfpath))
+ continue;
+
+ if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ pstats->rx_mimo_signalstrength[rfpath];
+
+ }
+
+ if (pstats->rx_mimo_signalstrength[rfpath] >
+ rtlpriv->stats.rx_rssi_percentage[rfpath]) {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ ((rtlpriv->stats.
+ rx_rssi_percentage[rfpath] *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_mimo_signalstrength[rfpath])) /
+ (RX_SMOOTH_FACTOR);
+
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ rtlpriv->stats.rx_rssi_percentage[rfpath] +
+ 1;
+ } else {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ ((rtlpriv->stats.
+ rx_rssi_percentage[rfpath] *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_mimo_signalstrength[rfpath])) /
+ (RX_SMOOTH_FACTOR);
+ }
+
+ }
+ }
+}
+
+static void _rtl92ce_update_rxsignalstatistics(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int weighting = 0;
+
+ if (rtlpriv->stats.recv_signal_power == 0)
+ rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
+
+ if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
+ weighting = 5;
+
+ else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
+ weighting = (-5);
+
+ rtlpriv->stats.recv_signal_power =
+ (rtlpriv->stats.recv_signal_power * 5 +
+ pstats->recvsignalpower + weighting) / 6;
+}
+
+static void _rtl92ce_process_pwdb(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ long undecorated_smoothed_pwdb;
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+ return;
+ } else {
+ undecorated_smoothed_pwdb =
+ rtlpriv->dm.undecorated_smoothed_pwdb;
+ }
+
+ if (pstats->packet_toself || pstats->packet_beacon) {
+ if (undecorated_smoothed_pwdb < 0)
+ undecorated_smoothed_pwdb = pstats->rx_pwdb_all;
+
+ if (pstats->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) {
+ undecorated_smoothed_pwdb =
+ (((undecorated_smoothed_pwdb) *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+
+ undecorated_smoothed_pwdb = undecorated_smoothed_pwdb
+ + 1;
+ } else {
+ undecorated_smoothed_pwdb =
+ (((undecorated_smoothed_pwdb) *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+ }
+
+ rtlpriv->dm.undecorated_smoothed_pwdb =
+ undecorated_smoothed_pwdb;
+ _rtl92ce_update_rxsignalstatistics(hw, pstats);
+ }
+}
+
+static void _rtl92ce_process_ui_link_quality(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 last_evm, n_spatialstream, tmpval;
+
+ if (pstats->signalquality != 0) {
+ if (pstats->packet_toself || pstats->packet_beacon) {
+
+ if (rtlpriv->stats.ui_link_quality.total_num++ >=
+ PHY_LINKQUALITY_SLID_WIN_MAX) {
+ rtlpriv->stats.ui_link_quality.total_num =
+ PHY_LINKQUALITY_SLID_WIN_MAX;
+ last_evm =
+ rtlpriv->stats.
+ ui_link_quality.elements[rtlpriv->
+ stats.ui_link_quality.
+ index];
+ rtlpriv->stats.ui_link_quality.total_val -=
+ last_evm;
+ }
+
+ rtlpriv->stats.ui_link_quality.total_val +=
+ pstats->signalquality;
+ rtlpriv->stats.ui_link_quality.elements[rtlpriv->stats.
+ ui_link_quality.
+ index++] =
+ pstats->signalquality;
+
+ if (rtlpriv->stats.ui_link_quality.index >=
+ PHY_LINKQUALITY_SLID_WIN_MAX)
+ rtlpriv->stats.ui_link_quality.index = 0;
+
+ tmpval = rtlpriv->stats.ui_link_quality.total_val /
+ rtlpriv->stats.ui_link_quality.total_num;
+ rtlpriv->stats.signal_quality = tmpval;
+
+ rtlpriv->stats.last_sigstrength_inpercent = tmpval;
+
+ for (n_spatialstream = 0; n_spatialstream < 2;
+ n_spatialstream++) {
+ if (pstats->
+ rx_mimo_signalquality[n_spatialstream] !=
+ -1) {
+ if (rtlpriv->stats.
+ rx_evm_percentage[n_spatialstream]
+ == 0) {
+ rtlpriv->stats.
+ rx_evm_percentage
+ [n_spatialstream] =
+ pstats->rx_mimo_signalquality
+ [n_spatialstream];
+ }
+
+ rtlpriv->stats.
+ rx_evm_percentage[n_spatialstream] =
+ ((rtlpriv->
+ stats.rx_evm_percentage
+ [n_spatialstream] *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->
+ rx_mimo_signalquality
+ [n_spatialstream] * 1)) /
+ (RX_SMOOTH_FACTOR);
+ }
+ }
+ }
+ } else {
+ ;
+ }
+}
+
+static void _rtl92ce_process_phyinfo(struct ieee80211_hw *hw,
+ u8 *buffer,
+ struct rtl_stats *pcurrent_stats)
+{
+
+ if (!pcurrent_stats->packet_matchbssid &&
+ !pcurrent_stats->packet_beacon)
+ return;
+
+ _rtl92ce_process_ui_rssi(hw, pcurrent_stats);
+ _rtl92ce_process_pwdb(hw, pcurrent_stats);
+ _rtl92ce_process_ui_link_quality(hw, pcurrent_stats);
+}
+
+static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct rtl_stats *pstats,
+ struct rx_desc_92c *pdesc,
+ struct rx_fwinfo_92c *p_drvinfo)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+
+ struct ieee80211_hdr *hdr;
+ u8 *tmp_buf;
+ u8 *praddr;
+ u8 *psaddr;
+ __le16 fc;
+ u16 type, c_fc;
+ bool packet_matchbssid, packet_toself, packet_beacon;
+
+ tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
+
+ hdr = (struct ieee80211_hdr *)tmp_buf;
+ fc = hdr->frame_control;
+ c_fc = le16_to_cpu(fc);
+ type = WLAN_FC_GET_TYPE(fc);
+ praddr = hdr->addr1;
+ psaddr = hdr->addr2;
+
+ packet_matchbssid =
+ ((IEEE80211_FTYPE_CTL != type) &&
+ (!compare_ether_addr(mac->bssid,
+ (c_fc & IEEE80211_FCTL_TODS) ?
+ hdr->addr1 : (c_fc & IEEE80211_FCTL_FROMDS) ?
+ hdr->addr2 : hdr->addr3)) &&
+ (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
+
+ packet_toself = packet_matchbssid &&
+ (!compare_ether_addr(praddr, rtlefuse->dev_addr));
+
+ if (ieee80211_is_beacon(fc))
+ packet_beacon = true;
+
+ _rtl92ce_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
+ packet_matchbssid, packet_toself,
+ packet_beacon);
+
+ _rtl92ce_process_phyinfo(hw, tmp_buf, pstats);
+}
+
+bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
+ struct rtl_stats *stats,
+ struct ieee80211_rx_status *rx_status,
+ u8 *p_desc, struct sk_buff *skb)
+{
+ struct rx_fwinfo_92c *p_drvinfo;
+ struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
+
+ u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+ stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
+ stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
+ RX_DRV_INFO_SIZE_UNIT;
+ stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
+ stats->icv = (u16) GET_RX_DESC_ICV(pdesc);
+ stats->crc = (u16) GET_RX_DESC_CRC32(pdesc);
+ stats->hwerror = (stats->crc | stats->icv);
+ stats->decrypted = !GET_RX_DESC_SWDEC(pdesc);
+ stats->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
+ stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
+ stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
+ stats->isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
+ && (GET_RX_DESC_FAGGR(pdesc) == 1));
+ stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
+ stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+
+ rx_status->freq = hw->conf.channel->center_freq;
+ rx_status->band = hw->conf.channel->band;
+
+ if (GET_RX_DESC_CRC32(pdesc))
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (!GET_RX_DESC_SWDEC(pdesc))
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+
+ if (GET_RX_DESC_BW(pdesc))
+ rx_status->flag |= RX_FLAG_40MHZ;
+
+ if (GET_RX_DESC_RXHT(pdesc))
+ rx_status->flag |= RX_FLAG_HT;
+
+ rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+
+ if (stats->decrypted)
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+
+ rx_status->rate_idx = _rtl92ce_rate_mapping((bool)
+ GET_RX_DESC_RXHT(pdesc),
+ (u8)
+ GET_RX_DESC_RXMCS(pdesc),
+ (bool)
+ GET_RX_DESC_PAGGR(pdesc));
+
+ rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
+ if (phystatus == true) {
+ p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
+ stats->rx_bufshift);
+
+ _rtl92ce_translate_rx_signal_stuff(hw,
+ skb, stats, pdesc,
+ p_drvinfo);
+ }
+
+ /*rx_status->qual = stats->signal; */
+ rx_status->signal = stats->rssi + 10;
+ /*rx_status->noise = -stats->noise; */
+
+ return true;
+}
+
+void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ struct ieee80211_tx_info *info, struct sk_buff *skb,
+ unsigned int queue_index)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool defaultadapter = true;
+ struct ieee80211_sta *sta;
+ u8 *pdesc = (u8 *) pdesc_tx;
+ struct rtl_tcb_desc tcb_desc;
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ u16 seq_number;
+ __le16 fc = hdr->frame_control;
+ u8 rate_flag = info->control.rates[0].flags;
+
+ enum rtl_desc_qsel fw_qsel =
+ _rtl92ce_map_hwqueue_to_fwqueue(fc, queue_index);
+
+ bool firstseg = ((hdr->seq_ctrl &
+ cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
+
+ bool lastseg = ((hdr->frame_control &
+ cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
+
+ dma_addr_t mapping = pci_map_single(rtlpci->pdev,
+ skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+
+ seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+
+ rtl_get_tcb_desc(hw, info, skb, &tcb_desc);
+
+ CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_92c));
+
+ if (firstseg) {
+ SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+
+ SET_TX_DESC_TX_RATE(pdesc, tcb_desc.hw_rate);
+
+ if (tcb_desc.use_shortgi || tcb_desc.use_shortpreamble)
+ SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
+
+ if (mac->tids[tid].agg.agg_state == RTL_AGG_ON &&
+ info->flags & IEEE80211_TX_CTL_AMPDU) {
+ SET_TX_DESC_AGG_BREAK(pdesc, 1);
+ SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14);
+ }
+ SET_TX_DESC_SEQ(pdesc, seq_number);
+
+ SET_TX_DESC_RTS_ENABLE(pdesc, ((tcb_desc.rts_enable &&
+ !tcb_desc.
+ cts_enable) ? 1 : 0));
+ SET_TX_DESC_HW_RTS_ENABLE(pdesc,
+ ((tcb_desc.rts_enable
+ || tcb_desc.cts_enable) ? 1 : 0));
+ SET_TX_DESC_CTS2SELF(pdesc, ((tcb_desc.cts_enable) ? 1 : 0));
+ SET_TX_DESC_RTS_STBC(pdesc, ((tcb_desc.rts_stbc) ? 1 : 0));
+
+ SET_TX_DESC_RTS_RATE(pdesc, tcb_desc.rts_rate);
+ SET_TX_DESC_RTS_BW(pdesc, 0);
+ SET_TX_DESC_RTS_SC(pdesc, tcb_desc.rts_sc);
+ SET_TX_DESC_RTS_SHORT(pdesc,
+ ((tcb_desc.rts_rate <= DESC92C_RATE54M) ?
+ (tcb_desc.rts_use_shortpreamble ? 1 : 0)
+ : (tcb_desc.rts_use_shortgi ? 1 : 0)));
+
+ if (mac->bw_40) {
+ if (tcb_desc.packet_bw) {
+ SET_TX_DESC_DATA_BW(pdesc, 1);
+ SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
+ } else {
+ SET_TX_DESC_DATA_BW(pdesc, 0);
+
+ if (rate_flag & IEEE80211_TX_RC_DUP_DATA) {
+ SET_TX_DESC_TX_SUB_CARRIER(pdesc,
+ mac->cur_40_prime_sc);
+ }
+ }
+ } else {
+ SET_TX_DESC_DATA_BW(pdesc, 0);
+ SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
+ }
+
+ SET_TX_DESC_LINIP(pdesc, 0);
+ SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len);
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(mac->vif, mac->bssid);
+ if (sta) {
+ u8 ampdu_density = sta->ht_cap.ampdu_density;
+ SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
+ }
+ rcu_read_unlock();
+
+ if (info->control.hw_key) {
+ struct ieee80211_key_conf *keyconf =
+ info->control.hw_key;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+ break;
+ default:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+ break;
+
+ }
+ }
+
+ SET_TX_DESC_PKT_ID(pdesc, 0);
+ SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
+
+ SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
+ SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF);
+ SET_TX_DESC_DISABLE_FB(pdesc, 0);
+ SET_TX_DESC_USE_RATE(pdesc, tcb_desc.use_driver_rate ? 1 : 0);
+
+ if (ieee80211_is_data_qos(fc)) {
+ if (mac->rdg_en) {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ ("Enable RDG function.\n"));
+ SET_TX_DESC_RDG_ENABLE(pdesc, 1);
+ SET_TX_DESC_HTC(pdesc, 1);
+ }
+ }
+ }
+
+ SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
+ SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
+
+ SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
+
+ SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
+
+ if (rtlpriv->dm.useramask) {
+ SET_TX_DESC_RATE_ID(pdesc, tcb_desc.ratr_index);
+ SET_TX_DESC_MACID(pdesc, tcb_desc.mac_id);
+ } else {
+ SET_TX_DESC_RATE_ID(pdesc, 0xC + tcb_desc.ratr_index);
+ SET_TX_DESC_MACID(pdesc, tcb_desc.ratr_index);
+ }
+
+ if ((!ieee80211_is_data_qos(fc)) && ppsc->leisure_ps &&
+ ppsc->fwctrl_lps) {
+ SET_TX_DESC_HWSEQ_EN(pdesc, 1);
+ SET_TX_DESC_PKT_ID(pdesc, 8);
+
+ if (!defaultadapter)
+ SET_TX_DESC_QOS(pdesc, 1);
+ }
+
+ SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
+
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
+ is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
+ SET_TX_DESC_BMC(pdesc, 1);
+ }
+
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, ("\n"));
+}
+
+void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
+ u8 *pdesc, bool firstseg,
+ bool lastseg, struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u8 fw_queue = QSLT_BEACON;
+
+ dma_addr_t mapping = pci_map_single(rtlpci->pdev,
+ skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
+ __le16 fc = hdr->frame_control;
+
+ CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
+
+ if (firstseg)
+ SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+
+ SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
+
+ SET_TX_DESC_SEQ(pdesc, 0);
+
+ SET_TX_DESC_LINIP(pdesc, 0);
+
+ SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
+
+ SET_TX_DESC_FIRST_SEG(pdesc, 1);
+ SET_TX_DESC_LAST_SEG(pdesc, 1);
+
+ SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) (skb->len));
+
+ SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
+
+ SET_TX_DESC_RATE_ID(pdesc, 7);
+ SET_TX_DESC_MACID(pdesc, 0);
+
+ SET_TX_DESC_OWN(pdesc, 1);
+
+ SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
+
+ SET_TX_DESC_FIRST_SEG(pdesc, 1);
+ SET_TX_DESC_LAST_SEG(pdesc, 1);
+
+ SET_TX_DESC_OFFSET(pdesc, 0x20);
+
+ SET_TX_DESC_USE_RATE(pdesc, 1);
+
+ if (!ieee80211_is_data_qos(fc)) {
+ SET_TX_DESC_HWSEQ_EN(pdesc, 1);
+ SET_TX_DESC_PKT_ID(pdesc, 8);
+ }
+
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
+ "H2C Tx Cmd Content\n",
+ pdesc, TX_DESC_SIZE);
+}
+
+void rtl92ce_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
+{
+ if (istx == true) {
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ SET_TX_DESC_OWN(pdesc, 1);
+ break;
+ case HW_DESC_TX_NEXTDESC_ADDR:
+ SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
+ break;
+ default:
+ RT_ASSERT(false, ("ERR txdesc :%d"
+ " not process\n", desc_name));
+ break;
+ }
+ } else {
+ switch (desc_name) {
+ case HW_DESC_RXOWN:
+ SET_RX_DESC_OWN(pdesc, 1);
+ break;
+ case HW_DESC_RXBUFF_ADDR:
+ SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *) val);
+ break;
+ case HW_DESC_RXPKT_LEN:
+ SET_RX_DESC_PKT_LEN(pdesc, *(u32 *) val);
+ break;
+ case HW_DESC_RXERO:
+ SET_RX_DESC_EOR(pdesc, 1);
+ break;
+ default:
+ RT_ASSERT(false, ("ERR rxdesc :%d "
+ "not process\n", desc_name));
+ break;
+ }
+ }
+}
+
+u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name)
+{
+ u32 ret = 0;
+
+ if (istx == true) {
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ ret = GET_TX_DESC_OWN(p_desc);
+ break;
+ case HW_DESC_TXBUFF_ADDR:
+ ret = GET_TX_DESC_TX_BUFFER_ADDRESS(p_desc);
+ break;
+ default:
+ RT_ASSERT(false, ("ERR txdesc :%d "
+ "not process\n", desc_name));
+ break;
+ }
+ } else {
+ struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ ret = GET_RX_DESC_OWN(pdesc);
+ break;
+ case HW_DESC_RXPKT_LEN:
+ ret = GET_RX_DESC_PKT_LEN(pdesc);
+ break;
+ default:
+ RT_ASSERT(false, ("ERR rxdesc :%d "
+ "not process\n", desc_name));
+ break;
+ }
+ }
+ return ret;
+}
+
+void rtl92ce_tx_polling(struct ieee80211_hw *hw, unsigned int hw_queue)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ if (hw_queue == BEACON_QUEUE) {
+ rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, BIT(4));
+ } else {
+ rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG,
+ BIT(0) << (hw_queue));
+ }
+}
+
+bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring;
+ struct rtl_tx_desc *pdesc;
+ u8 own;
+ unsigned long flags;
+ struct sk_buff *pskb = NULL;
+
+ ring = &rtlpci->tx_ring[BEACON_QUEUE];
+
+ spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+
+ pskb = __skb_dequeue(&ring->queue);
+ if (pskb)
+ kfree_skb(pskb);
+
+ pdesc = &ring->desc[0];
+ own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
+
+ rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
+
+ __skb_queue_tail(&ring->queue, skb);
+
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+ rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
+
+ return true;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
new file mode 100644
index 000000000000..803adcc80c96
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
@@ -0,0 +1,740 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CE_TRX_H__
+#define __RTL92CE_TRX_H__
+
+#define TX_DESC_SIZE 64
+#define TX_DESC_AGGR_SUBFRAME_SIZE 32
+
+#define RX_DESC_SIZE 32
+#define RX_DRV_INFO_SIZE_UNIT 8
+
+#define TX_DESC_NEXT_DESC_OFFSET 40
+#define USB_HWDESC_HEADER_LEN 32
+#define CRCLENGTH 4
+
+/* Define a macro that takes a le32 word, converts it to host ordering,
+ * right shifts by a specified count, creates a mask of the specified
+ * bit count, and extracts that number of bits.
+ */
+
+#define SHIFT_AND_MASK_LE(__pdesc, __shift, __mask) \
+ ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
+ BIT_LEN_MASK_32(__mask))
+
+/* Define a macro that clears a bit field in an le32 word and
+ * sets the specified value into that bit field. The resulting
+ * value remains in le32 ordering; however, it is properly converted
+ * to host ordering for the clear and set operations before conversion
+ * back to le32.
+ */
+
+#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \
+ (*(__le32 *)(__pdesc) = \
+ (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \
+ (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \
+ (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
+
+/* macros to read/write various fields in RX or TX descriptors */
+
+#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 0, 16, __val)
+#define SET_TX_DESC_OFFSET(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 16, 8, __val)
+#define SET_TX_DESC_BMC(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 24, 1, __val)
+#define SET_TX_DESC_HTC(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 25, 1, __val)
+#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 26, 1, __val)
+#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 27, 1, __val)
+#define SET_TX_DESC_LINIP(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 28, 1, __val)
+#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 29, 1, __val)
+#define SET_TX_DESC_GF(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
+#define SET_TX_DESC_OWN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
+
+#define GET_TX_DESC_PKT_SIZE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 0, 16)
+#define GET_TX_DESC_OFFSET(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 16, 8)
+#define GET_TX_DESC_BMC(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 24, 1)
+#define GET_TX_DESC_HTC(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 25, 1)
+#define GET_TX_DESC_LAST_SEG(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 26, 1)
+#define GET_TX_DESC_FIRST_SEG(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 27, 1)
+#define GET_TX_DESC_LINIP(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 28, 1)
+#define GET_TX_DESC_NO_ACM(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 29, 1)
+#define GET_TX_DESC_GF(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 30, 1)
+#define GET_TX_DESC_OWN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 31, 1)
+
+#define SET_TX_DESC_MACID(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 0, 5, __val)
+#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 5, 1, __val)
+#define SET_TX_DESC_BK(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 6, 1, __val)
+#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 7, 1, __val)
+#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 8, 5, __val)
+#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 13, 1, __val)
+#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 14, 1, __val)
+#define SET_TX_DESC_PIFS(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 15, 1, __val)
+#define SET_TX_DESC_RATE_ID(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 16, 4, __val)
+#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 20, 1, __val)
+#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 21, 1, __val)
+#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 22, 2, __val)
+#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 24, 8, __val)
+
+#define GET_TX_DESC_MACID(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
+#define GET_TX_DESC_AGG_ENABLE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 5, 1)
+#define GET_TX_DESC_AGG_BREAK(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 6, 1)
+#define GET_TX_DESC_RDG_ENABLE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 7, 1)
+#define GET_TX_DESC_QUEUE_SEL(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 8, 5)
+#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 13, 1)
+#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
+#define GET_TX_DESC_PIFS(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
+#define GET_TX_DESC_RATE_ID(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
+#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 20, 1)
+#define GET_TX_DESC_EN_DESC_ID(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 21, 1)
+#define GET_TX_DESC_SEC_TYPE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 22, 2)
+#define GET_TX_DESC_PKT_OFFSET(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 24, 8)
+
+#define SET_TX_DESC_RTS_RC(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 0, 6, __val)
+#define SET_TX_DESC_DATA_RC(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 6, 6, __val)
+#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 14, 2, __val)
+#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 17, 1, __val)
+#define SET_TX_DESC_RAW(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 18, 1, __val)
+#define SET_TX_DESC_CCX(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 19, 1, __val)
+#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 20, 3, __val)
+#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 24, 1, __val)
+#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 25, 1, __val)
+#define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 26, 2, __val)
+#define SET_TX_DESC_TX_ANTL(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 28, 2, __val)
+#define SET_TX_DESC_TX_ANT_HT(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 30, 2, __val)
+
+#define GET_TX_DESC_RTS_RC(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 0, 6)
+#define GET_TX_DESC_DATA_RC(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 6, 6)
+#define GET_TX_DESC_BAR_RTY_TH(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 14, 2)
+#define GET_TX_DESC_MORE_FRAG(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 17, 1)
+#define GET_TX_DESC_RAW(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 18, 1)
+#define GET_TX_DESC_CCX(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 19, 1)
+#define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 20, 3)
+#define GET_TX_DESC_ANTSEL_A(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 24, 1)
+#define GET_TX_DESC_ANTSEL_B(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 25, 1)
+#define GET_TX_DESC_TX_ANT_CCK(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 26, 2)
+#define GET_TX_DESC_TX_ANTL(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 28, 2)
+#define GET_TX_DESC_TX_ANT_HT(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 30, 2)
+
+#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+12, 0, 8, __val)
+#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+12, 8, 8, __val)
+#define SET_TX_DESC_SEQ(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+12, 16, 12, __val)
+#define SET_TX_DESC_PKT_ID(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+12, 28, 4, __val)
+
+#define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 0, 8)
+#define GET_TX_DESC_TAIL_PAGE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 8, 8)
+#define GET_TX_DESC_SEQ(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 16, 12)
+#define GET_TX_DESC_PKT_ID(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 28, 4)
+
+#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 0, 5, __val)
+#define SET_TX_DESC_AP_DCFE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 5, 1, __val)
+#define SET_TX_DESC_QOS(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 6, 1, __val)
+#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 7, 1, __val)
+#define SET_TX_DESC_USE_RATE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 8, 1, __val)
+#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 9, 1, __val)
+#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 10, 1, __val)
+#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 11, 1, __val)
+#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 12, 1, __val)
+#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 13, 1, __val)
+#define SET_TX_DESC_PORT_ID(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 14, 1, __val)
+#define SET_TX_DESC_WAIT_DCTS(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 18, 1, __val)
+#define SET_TX_DESC_CTS2AP_EN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 19, 1, __val)
+#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 20, 2, __val)
+#define SET_TX_DESC_TX_STBC(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 22, 2, __val)
+#define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 24, 1, __val)
+#define SET_TX_DESC_DATA_BW(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 25, 1, __val)
+#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 26, 1, __val)
+#define SET_TX_DESC_RTS_BW(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 27, 1, __val)
+#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 28, 2, __val)
+#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 30, 2, __val)
+
+#define GET_TX_DESC_RTS_RATE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 0, 5)
+#define GET_TX_DESC_AP_DCFE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 5, 1)
+#define GET_TX_DESC_QOS(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 6, 1)
+#define GET_TX_DESC_HWSEQ_EN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 7, 1)
+#define GET_TX_DESC_USE_RATE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 8, 1)
+#define GET_TX_DESC_DISABLE_RTS_FB(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 9, 1)
+#define GET_TX_DESC_DISABLE_FB(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 10, 1)
+#define GET_TX_DESC_CTS2SELF(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 11, 1)
+#define GET_TX_DESC_RTS_ENABLE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 12, 1)
+#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 13, 1)
+#define GET_TX_DESC_PORT_ID(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 14, 1)
+#define GET_TX_DESC_WAIT_DCTS(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 18, 1)
+#define GET_TX_DESC_CTS2AP_EN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 19, 1)
+#define GET_TX_DESC_TX_SUB_CARRIER(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 20, 2)
+#define GET_TX_DESC_TX_STBC(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 22, 2)
+#define GET_TX_DESC_DATA_SHORT(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 24, 1)
+#define GET_TX_DESC_DATA_BW(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 25, 1)
+#define GET_TX_DESC_RTS_SHORT(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 26, 1)
+#define GET_TX_DESC_RTS_BW(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 27, 1)
+#define GET_TX_DESC_RTS_SC(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 28, 2)
+#define GET_TX_DESC_RTS_STBC(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 30, 2)
+
+#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val)
+#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val)
+#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val)
+#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+20, 8, 5, __val)
+#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+20, 13, 4, __val)
+#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+20, 17, 1, __val)
+#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+20, 18, 6, __val)
+#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+20, 24, 8, __val)
+
+#define GET_TX_DESC_TX_RATE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+20, 0, 6)
+#define GET_TX_DESC_DATA_SHORTGI(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+20, 6, 1)
+#define GET_TX_DESC_CCX_TAG(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+20, 7, 1)
+#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+20, 8, 5)
+#define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+20, 13, 4)
+#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+20, 17, 1)
+#define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+20, 18, 6)
+#define GET_TX_DESC_USB_TXAGG_NUM(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+20, 24, 8)
+
+#define SET_TX_DESC_TXAGC_A(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+24, 0, 5, __val)
+#define SET_TX_DESC_TXAGC_B(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+24, 5, 5, __val)
+#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+24, 10, 1, __val)
+#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+24, 11, 5, __val)
+#define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+24, 16, 4, __val)
+#define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+24, 20, 4, __val)
+#define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+24, 24, 4, __val)
+#define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+24, 28, 4, __val)
+
+#define GET_TX_DESC_TXAGC_A(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+24, 0, 5)
+#define GET_TX_DESC_TXAGC_B(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+24, 5, 5)
+#define GET_TX_DESC_USE_MAX_LEN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+24, 10, 1)
+#define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+24, 11, 5)
+#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+24, 16, 4)
+#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+24, 20, 4)
+#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+24, 24, 4)
+#define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+24, 28, 4)
+
+#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+28, 0, 16, __val)
+#define SET_TX_DESC_MCSG4_MAX_LEN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+28, 16, 4, __val)
+#define SET_TX_DESC_MCSG5_MAX_LEN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+28, 20, 4, __val)
+#define SET_TX_DESC_MCSG6_MAX_LEN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+28, 24, 4, __val)
+#define SET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+28, 28, 4, __val)
+
+#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+28, 0, 16)
+#define GET_TX_DESC_MCSG4_MAX_LEN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+28, 16, 4)
+#define GET_TX_DESC_MCSG5_MAX_LEN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+28, 20, 4)
+#define GET_TX_DESC_MCSG6_MAX_LEN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+28, 24, 4)
+#define GET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+28, 28, 4)
+
+#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+32, 0, 32, __val)
+#define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+36, 0, 32, __val)
+
+#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+32, 0, 32)
+#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+36, 0, 32)
+
+#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+40, 0, 32, __val)
+#define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+44, 0, 32, __val)
+
+#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+40, 0, 32)
+#define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+44, 0, 32)
+
+#define GET_RX_DESC_PKT_LEN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 0, 14)
+#define GET_RX_DESC_CRC32(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 14, 1)
+#define GET_RX_DESC_ICV(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 15, 1)
+#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 16, 4)
+#define GET_RX_DESC_SECURITY(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 20, 3)
+#define GET_RX_DESC_QOS(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 23, 1)
+#define GET_RX_DESC_SHIFT(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 24, 2)
+#define GET_RX_DESC_PHYST(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 26, 1)
+#define GET_RX_DESC_SWDEC(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 27, 1)
+#define GET_RX_DESC_LS(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 28, 1)
+#define GET_RX_DESC_FS(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 29, 1)
+#define GET_RX_DESC_EOR(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 30, 1)
+#define GET_RX_DESC_OWN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 31, 1)
+
+#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 0, 14, __val)
+#define SET_RX_DESC_EOR(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
+#define SET_RX_DESC_OWN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
+
+#define GET_RX_DESC_MACID(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
+#define GET_RX_DESC_TID(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 5, 4)
+#define GET_RX_DESC_HWRSVD(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 9, 5)
+#define GET_RX_DESC_PAGGR(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
+#define GET_RX_DESC_FAGGR(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
+#define GET_RX_DESC_A1_FIT(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
+#define GET_RX_DESC_A2_FIT(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 20, 4)
+#define GET_RX_DESC_PAM(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 24, 1)
+#define GET_RX_DESC_PWR(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 25, 1)
+#define GET_RX_DESC_MD(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 26, 1)
+#define GET_RX_DESC_MF(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 27, 1)
+#define GET_RX_DESC_TYPE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 28, 2)
+#define GET_RX_DESC_MC(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 30, 1)
+#define GET_RX_DESC_BC(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 31, 1)
+#define GET_RX_DESC_SEQ(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 0, 12)
+#define GET_RX_DESC_FRAG(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 12, 4)
+#define GET_RX_DESC_NEXT_PKT_LEN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 16, 14)
+#define GET_RX_DESC_NEXT_IND(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 30, 1)
+#define GET_RX_DESC_RSVD(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 31, 1)
+
+#define GET_RX_DESC_RXMCS(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 0, 6)
+#define GET_RX_DESC_RXHT(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 6, 1)
+#define GET_RX_DESC_SPLCP(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 8, 1)
+#define GET_RX_DESC_BW(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 9, 1)
+#define GET_RX_DESC_HTC(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 10, 1)
+#define GET_RX_DESC_HWPC_ERR(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 14, 1)
+#define GET_RX_DESC_HWPC_IND(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 15, 1)
+#define GET_RX_DESC_IV0(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 16, 16)
+
+#define GET_RX_DESC_IV1(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 0, 32)
+#define GET_RX_DESC_TSFL(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+20, 0, 32)
+
+#define GET_RX_DESC_BUFF_ADDR(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+24, 0, 32)
+#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+28, 0, 32)
+
+#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+24, 0, 32, __val)
+#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val)
+
+#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
+do { \
+ if (_size > TX_DESC_NEXT_DESC_OFFSET) \
+ memset((void *)__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
+ else \
+ memset((void *)__pdesc, 0, _size); \
+} while (0);
+
+#define RX_HAL_IS_CCK_RATE(_pdesc)\
+ (_pdesc->rxmcs == DESC92C_RATE1M || \
+ _pdesc->rxmcs == DESC92C_RATE2M || \
+ _pdesc->rxmcs == DESC92C_RATE5_5M || \
+ _pdesc->rxmcs == DESC92C_RATE11M)
+
+struct rx_fwinfo_92c {
+ u8 gain_trsw[4];
+ u8 pwdb_all;
+ u8 cfosho[4];
+ u8 cfotail[4];
+ char rxevm[2];
+ char rxsnr[4];
+ u8 pdsnr[2];
+ u8 csi_current[2];
+ u8 csi_target[2];
+ u8 sigevm;
+ u8 max_ex_pwr;
+ u8 ex_intf_flag:1;
+ u8 sgi_en:1;
+ u8 rxsc:2;
+ u8 reserve:4;
+} __packed;
+
+struct tx_desc_92c {
+ u32 pktsize:16;
+ u32 offset:8;
+ u32 bmc:1;
+ u32 htc:1;
+ u32 lastseg:1;
+ u32 firstseg:1;
+ u32 linip:1;
+ u32 noacm:1;
+ u32 gf:1;
+ u32 own:1;
+
+ u32 macid:5;
+ u32 agg_en:1;
+ u32 bk:1;
+ u32 rdg_en:1;
+ u32 queuesel:5;
+ u32 rd_nav_ext:1;
+ u32 lsig_txop_en:1;
+ u32 pifs:1;
+ u32 rateid:4;
+ u32 nav_usehdr:1;
+ u32 en_descid:1;
+ u32 sectype:2;
+ u32 pktoffset:8;
+
+ u32 rts_rc:6;
+ u32 data_rc:6;
+ u32 rsvd0:2;
+ u32 bar_retryht:2;
+ u32 rsvd1:1;
+ u32 morefrag:1;
+ u32 raw:1;
+ u32 ccx:1;
+ u32 ampdudensity:3;
+ u32 rsvd2:1;
+ u32 ant_sela:1;
+ u32 ant_selb:1;
+ u32 txant_cck:2;
+ u32 txant_l:2;
+ u32 txant_ht:2;
+
+ u32 nextheadpage:8;
+ u32 tailpage:8;
+ u32 seq:12;
+ u32 pktid:4;
+
+ u32 rtsrate:5;
+ u32 apdcfe:1;
+ u32 qos:1;
+ u32 hwseq_enable:1;
+ u32 userrate:1;
+ u32 dis_rtsfb:1;
+ u32 dis_datafb:1;
+ u32 cts2self:1;
+ u32 rts_en:1;
+ u32 hwrts_en:1;
+ u32 portid:1;
+ u32 rsvd3:3;
+ u32 waitdcts:1;
+ u32 cts2ap_en:1;
+ u32 txsc:2;
+ u32 stbc:2;
+ u32 txshort:1;
+ u32 txbw:1;
+ u32 rtsshort:1;
+ u32 rtsbw:1;
+ u32 rtssc:2;
+ u32 rtsstbc:2;
+
+ u32 txrate:6;
+ u32 shortgi:1;
+ u32 ccxt:1;
+ u32 txrate_fb_lmt:5;
+ u32 rtsrate_fb_lmt:4;
+ u32 retrylmt_en:1;
+ u32 txretrylmt:6;
+ u32 usb_txaggnum:8;
+
+ u32 txagca:5;
+ u32 txagcb:5;
+ u32 usemaxlen:1;
+ u32 maxaggnum:5;
+ u32 mcsg1maxlen:4;
+ u32 mcsg2maxlen:4;
+ u32 mcsg3maxlen:4;
+ u32 mcs7sgimaxlen:4;
+
+ u32 txbuffersize:16;
+ u32 mcsg4maxlen:4;
+ u32 mcsg5maxlen:4;
+ u32 mcsg6maxlen:4;
+ u32 mcsg15sgimaxlen:4;
+
+ u32 txbuffaddr;
+ u32 txbufferaddr64;
+ u32 nextdescaddress;
+ u32 nextdescaddress64;
+
+ u32 reserve_pass_pcie_mm_limit[4];
+} __packed;
+
+struct rx_desc_92c {
+ u32 length:14;
+ u32 crc32:1;
+ u32 icverror:1;
+ u32 drv_infosize:4;
+ u32 security:3;
+ u32 qos:1;
+ u32 shift:2;
+ u32 phystatus:1;
+ u32 swdec:1;
+ u32 lastseg:1;
+ u32 firstseg:1;
+ u32 eor:1;
+ u32 own:1;
+
+ u32 macid:5;
+ u32 tid:4;
+ u32 hwrsvd:5;
+ u32 paggr:1;
+ u32 faggr:1;
+ u32 a1_fit:4;
+ u32 a2_fit:4;
+ u32 pam:1;
+ u32 pwr:1;
+ u32 moredata:1;
+ u32 morefrag:1;
+ u32 type:2;
+ u32 mc:1;
+ u32 bc:1;
+
+ u32 seq:12;
+ u32 frag:4;
+ u32 nextpktlen:14;
+ u32 nextind:1;
+ u32 rsvd:1;
+
+ u32 rxmcs:6;
+ u32 rxht:1;
+ u32 amsdu:1;
+ u32 splcp:1;
+ u32 bandwidth:1;
+ u32 htc:1;
+ u32 tcpchk_rpt:1;
+ u32 ipcchk_rpt:1;
+ u32 tcpchk_valid:1;
+ u32 hwpcerr:1;
+ u32 hwpcind:1;
+ u32 iv0:16;
+
+ u32 iv1;
+
+ u32 tsfl;
+
+ u32 bufferaddress;
+ u32 bufferaddress64;
+
+} __packed;
+
+void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr,
+ u8 *pdesc, struct ieee80211_tx_info *info,
+ struct sk_buff *skb, unsigned int qsel);
+bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
+ struct rtl_stats *stats,
+ struct ieee80211_rx_status *rx_status,
+ u8 *pdesc, struct sk_buff *skb);
+void rtl92ce_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+u32 rtl92ce_get_desc(u8 *pdesc, bool istx, u8 desc_name);
+void rtl92ce_tx_polling(struct ieee80211_hw *hw, unsigned int hw_queue);
+void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
+ bool b_firstseg, bool b_lastseg,
+ struct sk_buff *skb);
+bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile b/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile
new file mode 100644
index 000000000000..ad2de6b839ef
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile
@@ -0,0 +1,14 @@
+rtl8192cu-objs := \
+ dm.o \
+ hw.o \
+ led.o \
+ mac.o \
+ phy.o \
+ rf.o \
+ sw.o \
+ table.o \
+ trx.o
+
+obj-$(CONFIG_RTL8192CU) += rtl8192cu.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
new file mode 100644
index 000000000000..c54940ea72fe
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
@@ -0,0 +1,62 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../rtl8192ce/def.h"
+
+/*-------------------------------------------------------------------------
+ * Chip specific
+ *-------------------------------------------------------------------------*/
+#define CHIP_8723 BIT(2) /* RTL8723 With BT feature */
+#define CHIP_8723_DRV_REV BIT(3) /* RTL8723 Driver Revised */
+#define NORMAL_CHIP BIT(4)
+#define CHIP_VENDOR_UMC BIT(5)
+#define CHIP_VENDOR_UMC_B_CUT BIT(6)
+
+#define IS_NORMAL_CHIP(version) \
+ (((version) & NORMAL_CHIP) ? true : false)
+
+#define IS_8723_SERIES(version) \
+ (((version) & CHIP_8723) ? true : false)
+
+#define IS_92C_1T2R(version) \
+ (((version) & CHIP_92C) && ((version) & CHIP_92C_1T2R))
+
+#define IS_VENDOR_UMC(version) \
+ (((version) & CHIP_VENDOR_UMC) ? true : false)
+
+#define IS_VENDOR_UMC_A_CUT(version) \
+ (((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6) | BIT(7))) ? \
+ false : true) : false)
+
+#define IS_VENDOR_8723_A_CUT(version) \
+ (((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6))) ? \
+ false : true) : false)
+
+#define CHIP_BONDING_92C_1T2R 0x1
+#define CHIP_BONDING_IDENTIFIER(_value) (((_value) >> 22) & 0x3)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
new file mode 100644
index 000000000000..f311baee668d
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
@@ -0,0 +1,113 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../base.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+
+void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ long undecorated_smoothed_pwdb;
+
+ if (!rtlpriv->dm.dynamic_txpower_enable)
+ return;
+
+ if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+ return;
+ }
+
+ if ((mac->link_state < MAC80211_LINKED) &&
+ (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
+ ("Not connected to any\n"));
+
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+
+ rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
+ return;
+ }
+
+ if (mac->link_state >= MAC80211_LINKED) {
+ if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+ undecorated_smoothed_pwdb =
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("AP Client PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb));
+ } else {
+ undecorated_smoothed_pwdb =
+ rtlpriv->dm.undecorated_smoothed_pwdb;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("STA Default Port PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb));
+ }
+ } else {
+ undecorated_smoothed_pwdb =
+ rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
+
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("AP Ext Port PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb));
+ }
+
+ if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"));
+ } else if ((undecorated_smoothed_pwdb <
+ (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
+ (undecorated_smoothed_pwdb >=
+ TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
+
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"));
+ } else if (undecorated_smoothed_pwdb <
+ (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("TXHIGHPWRLEVEL_NORMAL\n"));
+ }
+
+ if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("PHY_SetTxPowerLevel8192S() Channel = %d\n",
+ rtlphy->current_channel));
+ rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
+ }
+
+ rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
new file mode 100644
index 000000000000..7f966c666b5a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
@@ -0,0 +1,32 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../rtl8192ce/dm.h"
+
+void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
new file mode 100644
index 000000000000..9444e76838cf
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -0,0 +1,2504 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../efuse.h"
+#include "../base.h"
+#include "../cam.h"
+#include "../ps.h"
+#include "../usb.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "mac.h"
+#include "dm.h"
+#include "hw.h"
+#include "trx.h"
+#include "led.h"
+#include "table.h"
+
+static void _rtl92cu_phy_param_tab_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
+
+ rtlphy->hwparam_tables[MAC_REG].length = RTL8192CUMAC_2T_ARRAYLENGTH;
+ rtlphy->hwparam_tables[MAC_REG].pdata = RTL8192CUMAC_2T_ARRAY;
+ if (IS_HIGHT_PA(rtlefuse->board_type)) {
+ rtlphy->hwparam_tables[PHY_REG_PG].length =
+ RTL8192CUPHY_REG_Array_PG_HPLength;
+ rtlphy->hwparam_tables[PHY_REG_PG].pdata =
+ RTL8192CUPHY_REG_Array_PG_HP;
+ } else {
+ rtlphy->hwparam_tables[PHY_REG_PG].length =
+ RTL8192CUPHY_REG_ARRAY_PGLENGTH;
+ rtlphy->hwparam_tables[PHY_REG_PG].pdata =
+ RTL8192CUPHY_REG_ARRAY_PG;
+ }
+ /* 2T */
+ rtlphy->hwparam_tables[PHY_REG_2T].length =
+ RTL8192CUPHY_REG_2TARRAY_LENGTH;
+ rtlphy->hwparam_tables[PHY_REG_2T].pdata =
+ RTL8192CUPHY_REG_2TARRAY;
+ rtlphy->hwparam_tables[RADIOA_2T].length =
+ RTL8192CURADIOA_2TARRAYLENGTH;
+ rtlphy->hwparam_tables[RADIOA_2T].pdata =
+ RTL8192CURADIOA_2TARRAY;
+ rtlphy->hwparam_tables[RADIOB_2T].length =
+ RTL8192CURADIOB_2TARRAYLENGTH;
+ rtlphy->hwparam_tables[RADIOB_2T].pdata =
+ RTL8192CU_RADIOB_2TARRAY;
+ rtlphy->hwparam_tables[AGCTAB_2T].length =
+ RTL8192CUAGCTAB_2TARRAYLENGTH;
+ rtlphy->hwparam_tables[AGCTAB_2T].pdata =
+ RTL8192CUAGCTAB_2TARRAY;
+ /* 1T */
+ if (IS_HIGHT_PA(rtlefuse->board_type)) {
+ rtlphy->hwparam_tables[PHY_REG_1T].length =
+ RTL8192CUPHY_REG_1T_HPArrayLength;
+ rtlphy->hwparam_tables[PHY_REG_1T].pdata =
+ RTL8192CUPHY_REG_1T_HPArray;
+ rtlphy->hwparam_tables[RADIOA_1T].length =
+ RTL8192CURadioA_1T_HPArrayLength;
+ rtlphy->hwparam_tables[RADIOA_1T].pdata =
+ RTL8192CURadioA_1T_HPArray;
+ rtlphy->hwparam_tables[RADIOB_1T].length =
+ RTL8192CURADIOB_1TARRAYLENGTH;
+ rtlphy->hwparam_tables[RADIOB_1T].pdata =
+ RTL8192CU_RADIOB_1TARRAY;
+ rtlphy->hwparam_tables[AGCTAB_1T].length =
+ RTL8192CUAGCTAB_1T_HPArrayLength;
+ rtlphy->hwparam_tables[AGCTAB_1T].pdata =
+ Rtl8192CUAGCTAB_1T_HPArray;
+ } else {
+ rtlphy->hwparam_tables[PHY_REG_1T].length =
+ RTL8192CUPHY_REG_1TARRAY_LENGTH;
+ rtlphy->hwparam_tables[PHY_REG_1T].pdata =
+ RTL8192CUPHY_REG_1TARRAY;
+ rtlphy->hwparam_tables[RADIOA_1T].length =
+ RTL8192CURADIOA_1TARRAYLENGTH;
+ rtlphy->hwparam_tables[RADIOA_1T].pdata =
+ RTL8192CU_RADIOA_1TARRAY;
+ rtlphy->hwparam_tables[RADIOB_1T].length =
+ RTL8192CURADIOB_1TARRAYLENGTH;
+ rtlphy->hwparam_tables[RADIOB_1T].pdata =
+ RTL8192CU_RADIOB_1TARRAY;
+ rtlphy->hwparam_tables[AGCTAB_1T].length =
+ RTL8192CUAGCTAB_1TARRAYLENGTH;
+ rtlphy->hwparam_tables[AGCTAB_1T].pdata =
+ RTL8192CUAGCTAB_1TARRAY;
+ }
+}
+
+static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
+ bool autoload_fail,
+ u8 *hwinfo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 rf_path, index, tempval;
+ u16 i;
+
+ for (rf_path = 0; rf_path < 2; rf_path++) {
+ for (i = 0; i < 3; i++) {
+ if (!autoload_fail) {
+ rtlefuse->
+ eeprom_chnlarea_txpwr_cck[rf_path][i] =
+ hwinfo[EEPROM_TXPOWERCCK + rf_path * 3 + i];
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] =
+ hwinfo[EEPROM_TXPOWERHT40_1S + rf_path * 3 +
+ i];
+ } else {
+ rtlefuse->
+ eeprom_chnlarea_txpwr_cck[rf_path][i] =
+ EEPROM_DEFAULT_TXPOWERLEVEL;
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] =
+ EEPROM_DEFAULT_TXPOWERLEVEL;
+ }
+ }
+ }
+ for (i = 0; i < 3; i++) {
+ if (!autoload_fail)
+ tempval = hwinfo[EEPROM_TXPOWERHT40_2SDIFF + i];
+ else
+ tempval = EEPROM_DEFAULT_HT40_2SDIFF;
+ rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_A][i] =
+ (tempval & 0xf);
+ rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_B][i] =
+ ((tempval & 0xf0) >> 4);
+ }
+ for (rf_path = 0; rf_path < 2; rf_path++)
+ for (i = 0; i < 3; i++)
+ RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+ ("RF(%d) EEPROM CCK Area(%d) = 0x%x\n", rf_path,
+ i, rtlefuse->
+ eeprom_chnlarea_txpwr_cck[rf_path][i]));
+ for (rf_path = 0; rf_path < 2; rf_path++)
+ for (i = 0; i < 3; i++)
+ RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+ ("RF(%d) EEPROM HT40 1S Area(%d) = 0x%x\n",
+ rf_path, i,
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_1s[rf_path][i]));
+ for (rf_path = 0; rf_path < 2; rf_path++)
+ for (i = 0; i < 3; i++)
+ RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+ ("RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
+ rf_path, i,
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path]
+ [i]));
+ for (rf_path = 0; rf_path < 2; rf_path++) {
+ for (i = 0; i < 14; i++) {
+ index = _rtl92c_get_chnl_group((u8) i);
+ rtlefuse->txpwrlevel_cck[rf_path][i] =
+ rtlefuse->eeprom_chnlarea_txpwr_cck[rf_path][index];
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_1s[rf_path][index];
+ if ((rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] -
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][index])
+ > 0) {
+ rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
+ rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_1s[rf_path]
+ [index] - rtlefuse->
+ eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path]
+ [index];
+ } else {
+ rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0;
+ }
+ }
+ for (i = 0; i < 14; i++) {
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = "
+ "[0x%x / 0x%x / 0x%x]\n", rf_path, i,
+ rtlefuse->txpwrlevel_cck[rf_path][i],
+ rtlefuse->txpwrlevel_ht40_1s[rf_path][i],
+ rtlefuse->txpwrlevel_ht40_2s[rf_path][i]));
+ }
+ }
+ for (i = 0; i < 3; i++) {
+ if (!autoload_fail) {
+ rtlefuse->eeprom_pwrlimit_ht40[i] =
+ hwinfo[EEPROM_TXPWR_GROUP + i];
+ rtlefuse->eeprom_pwrlimit_ht20[i] =
+ hwinfo[EEPROM_TXPWR_GROUP + 3 + i];
+ } else {
+ rtlefuse->eeprom_pwrlimit_ht40[i] = 0;
+ rtlefuse->eeprom_pwrlimit_ht20[i] = 0;
+ }
+ }
+ for (rf_path = 0; rf_path < 2; rf_path++) {
+ for (i = 0; i < 14; i++) {
+ index = _rtl92c_get_chnl_group((u8) i);
+ if (rf_path == RF90_PATH_A) {
+ rtlefuse->pwrgroup_ht20[rf_path][i] =
+ (rtlefuse->eeprom_pwrlimit_ht20[index]
+ & 0xf);
+ rtlefuse->pwrgroup_ht40[rf_path][i] =
+ (rtlefuse->eeprom_pwrlimit_ht40[index]
+ & 0xf);
+ } else if (rf_path == RF90_PATH_B) {
+ rtlefuse->pwrgroup_ht20[rf_path][i] =
+ ((rtlefuse->eeprom_pwrlimit_ht20[index]
+ & 0xf0) >> 4);
+ rtlefuse->pwrgroup_ht40[rf_path][i] =
+ ((rtlefuse->eeprom_pwrlimit_ht40[index]
+ & 0xf0) >> 4);
+ }
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("RF-%d pwrgroup_ht20[%d] = 0x%x\n",
+ rf_path, i,
+ rtlefuse->pwrgroup_ht20[rf_path][i]));
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("RF-%d pwrgroup_ht40[%d] = 0x%x\n",
+ rf_path, i,
+ rtlefuse->pwrgroup_ht40[rf_path][i]));
+ }
+ }
+ for (i = 0; i < 14; i++) {
+ index = _rtl92c_get_chnl_group((u8) i);
+ if (!autoload_fail)
+ tempval = hwinfo[EEPROM_TXPOWERHT20DIFF + index];
+ else
+ tempval = EEPROM_DEFAULT_HT20_DIFF;
+ rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] = (tempval & 0xF);
+ rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] =
+ ((tempval >> 4) & 0xF);
+ if (rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] & BIT(3))
+ rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] |= 0xF0;
+ if (rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] & BIT(3))
+ rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] |= 0xF0;
+ index = _rtl92c_get_chnl_group((u8) i);
+ if (!autoload_fail)
+ tempval = hwinfo[EEPROM_TXPOWER_OFDMDIFF + index];
+ else
+ tempval = EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF;
+ rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i] = (tempval & 0xF);
+ rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i] =
+ ((tempval >> 4) & 0xF);
+ }
+ rtlefuse->legacy_ht_txpowerdiff =
+ rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][7];
+ for (i = 0; i < 14; i++)
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("RF-A Ht20 to HT40 Diff[%d] = 0x%x\n", i,
+ rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]));
+ for (i = 0; i < 14; i++)
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("RF-A Legacy to Ht40 Diff[%d] = 0x%x\n", i,
+ rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]));
+ for (i = 0; i < 14; i++)
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("RF-B Ht20 to HT40 Diff[%d] = 0x%x\n", i,
+ rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]));
+ for (i = 0; i < 14; i++)
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("RF-B Legacy to HT40 Diff[%d] = 0x%x\n", i,
+ rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]));
+ if (!autoload_fail)
+ rtlefuse->eeprom_regulatory = (hwinfo[RF_OPTION1] & 0x7);
+ else
+ rtlefuse->eeprom_regulatory = 0;
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory));
+ if (!autoload_fail) {
+ rtlefuse->eeprom_tssi[RF90_PATH_A] = hwinfo[EEPROM_TSSI_A];
+ rtlefuse->eeprom_tssi[RF90_PATH_B] = hwinfo[EEPROM_TSSI_B];
+ } else {
+ rtlefuse->eeprom_tssi[RF90_PATH_A] = EEPROM_DEFAULT_TSSI;
+ rtlefuse->eeprom_tssi[RF90_PATH_B] = EEPROM_DEFAULT_TSSI;
+ }
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("TSSI_A = 0x%x, TSSI_B = 0x%x\n",
+ rtlefuse->eeprom_tssi[RF90_PATH_A],
+ rtlefuse->eeprom_tssi[RF90_PATH_B]));
+ if (!autoload_fail)
+ tempval = hwinfo[EEPROM_THERMAL_METER];
+ else
+ tempval = EEPROM_DEFAULT_THERMALMETER;
+ rtlefuse->eeprom_thermalmeter = (tempval & 0x1f);
+ if (rtlefuse->eeprom_thermalmeter < 0x06 ||
+ rtlefuse->eeprom_thermalmeter > 0x1c)
+ rtlefuse->eeprom_thermalmeter = 0x12;
+ if (rtlefuse->eeprom_thermalmeter == 0x1f || autoload_fail)
+ rtlefuse->apk_thermalmeterignore = true;
+ rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
+ RTPRINT(rtlpriv, FINIT, INIT_TxPower,
+ ("thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter));
+}
+
+static void _rtl92cu_read_board_type(struct ieee80211_hw *hw, u8 *contents)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 boardType;
+
+ if (IS_NORMAL_CHIP(rtlhal->version)) {
+ boardType = ((contents[EEPROM_RF_OPT1]) &
+ BOARD_TYPE_NORMAL_MASK) >> 5; /*bit[7:5]*/
+ } else {
+ boardType = contents[EEPROM_RF_OPT4];
+ boardType &= BOARD_TYPE_TEST_MASK;
+ }
+ rtlefuse->board_type = boardType;
+ if (IS_HIGHT_PA(rtlefuse->board_type))
+ rtlefuse->external_pa = 1;
+ printk(KERN_INFO "rtl8192cu: Board Type %x\n", rtlefuse->board_type);
+
+#ifdef CONFIG_ANTENNA_DIVERSITY
+ /* Antenna Diversity setting. */
+ if (registry_par->antdiv_cfg == 2) /* 2: From Efuse */
+ rtl_efuse->antenna_cfg = (contents[EEPROM_RF_OPT1]&0x18)>>3;
+ else
+ rtl_efuse->antenna_cfg = registry_par->antdiv_cfg; /* 0:OFF, */
+
+ printk(KERN_INFO "rtl8192cu: Antenna Config %x\n",
+ rtl_efuse->antenna_cfg);
+#endif
+}
+
+#ifdef CONFIG_BT_COEXIST
+static void _update_bt_param(_adapter *padapter)
+{
+ struct btcoexist_priv *pbtpriv = &(padapter->halpriv.bt_coexist);
+ struct registry_priv *registry_par = &padapter->registrypriv;
+ if (2 != registry_par->bt_iso) {
+ /* 0:Low, 1:High, 2:From Efuse */
+ pbtpriv->BT_Ant_isolation = registry_par->bt_iso;
+ }
+ if (registry_par->bt_sco == 1) {
+ /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter, 4.Busy,
+ * 5.OtherBusy */
+ pbtpriv->BT_Service = BT_OtherAction;
+ } else if (registry_par->bt_sco == 2) {
+ pbtpriv->BT_Service = BT_SCO;
+ } else if (registry_par->bt_sco == 4) {
+ pbtpriv->BT_Service = BT_Busy;
+ } else if (registry_par->bt_sco == 5) {
+ pbtpriv->BT_Service = BT_OtherBusy;
+ } else {
+ pbtpriv->BT_Service = BT_Idle;
+ }
+ pbtpriv->BT_Ampdu = registry_par->bt_ampdu;
+ pbtpriv->bCOBT = _TRUE;
+ pbtpriv->BtEdcaUL = 0;
+ pbtpriv->BtEdcaDL = 0;
+ pbtpriv->BtRssiState = 0xff;
+ pbtpriv->bInitSet = _FALSE;
+ pbtpriv->bBTBusyTraffic = _FALSE;
+ pbtpriv->bBTTrafficModeSet = _FALSE;
+ pbtpriv->bBTNonTrafficModeSet = _FALSE;
+ pbtpriv->CurrentState = 0;
+ pbtpriv->PreviousState = 0;
+ printk(KERN_INFO "rtl8192cu: BT Coexistance = %s\n",
+ (pbtpriv->BT_Coexist == _TRUE) ? "enable" : "disable");
+ if (pbtpriv->BT_Coexist) {
+ if (pbtpriv->BT_Ant_Num == Ant_x2)
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+ "Ant_Num = Antx2\n");
+ else if (pbtpriv->BT_Ant_Num == Ant_x1)
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+ "Ant_Num = Antx1\n");
+ switch (pbtpriv->BT_CoexistType) {
+ case BT_2Wire:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+ "CoexistType = BT_2Wire\n");
+ break;
+ case BT_ISSC_3Wire:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+ "CoexistType = BT_ISSC_3Wire\n");
+ break;
+ case BT_Accel:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+ "CoexistType = BT_Accel\n");
+ break;
+ case BT_CSR_BC4:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+ "CoexistType = BT_CSR_BC4\n");
+ break;
+ case BT_CSR_BC8:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+ "CoexistType = BT_CSR_BC8\n");
+ break;
+ case BT_RTL8756:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+ "CoexistType = BT_RTL8756\n");
+ break;
+ default:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
+ "CoexistType = Unknown\n");
+ break;
+ }
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_Ant_isolation = %d\n",
+ pbtpriv->BT_Ant_isolation);
+ switch (pbtpriv->BT_Service) {
+ case BT_OtherAction:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
+ "BT_OtherAction\n");
+ break;
+ case BT_SCO:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
+ "BT_SCO\n");
+ break;
+ case BT_Busy:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
+ "BT_Busy\n");
+ break;
+ case BT_OtherBusy:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
+ "BT_OtherBusy\n");
+ break;
+ default:
+ printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
+ "BT_Idle\n");
+ break;
+ }
+ printk(KERN_INFO "rtl8192cu: BT_RadioSharedType = 0x%x\n",
+ pbtpriv->BT_RadioSharedType);
+ }
+}
+
+#define GET_BT_COEXIST(priv) (&priv->bt_coexist)
+
+static void _rtl92cu_read_bluetooth_coexistInfo(struct ieee80211_hw *hw,
+ u8 *contents,
+ bool bautoloadfailed);
+{
+ HAL_DATA_TYPE *pHalData = GET_HAL_DATA(Adapter);
+ bool isNormal = IS_NORMAL_CHIP(pHalData->VersionID);
+ struct btcoexist_priv *pbtpriv = &pHalData->bt_coexist;
+ u8 rf_opt4;
+
+ _rtw_memset(pbtpriv, 0, sizeof(struct btcoexist_priv));
+ if (AutoloadFail) {
+ pbtpriv->BT_Coexist = _FALSE;
+ pbtpriv->BT_CoexistType = BT_2Wire;
+ pbtpriv->BT_Ant_Num = Ant_x2;
+ pbtpriv->BT_Ant_isolation = 0;
+ pbtpriv->BT_RadioSharedType = BT_Radio_Shared;
+ return;
+ }
+ if (isNormal) {
+ if (pHalData->BoardType == BOARD_USB_COMBO)
+ pbtpriv->BT_Coexist = _TRUE;
+ else
+ pbtpriv->BT_Coexist = ((PROMContent[EEPROM_RF_OPT3] &
+ 0x20) >> 5); /* bit[5] */
+ rf_opt4 = PROMContent[EEPROM_RF_OPT4];
+ pbtpriv->BT_CoexistType = ((rf_opt4&0xe)>>1); /* bit [3:1] */
+ pbtpriv->BT_Ant_Num = (rf_opt4&0x1); /* bit [0] */
+ pbtpriv->BT_Ant_isolation = ((rf_opt4&0x10)>>4); /* bit [4] */
+ pbtpriv->BT_RadioSharedType = ((rf_opt4&0x20)>>5); /* bit [5] */
+ } else {
+ pbtpriv->BT_Coexist = (PROMContent[EEPROM_RF_OPT4] >> 4) ?
+ _TRUE : _FALSE;
+ }
+ _update_bt_param(Adapter);
+}
+#endif
+
+static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u16 i, usvalue;
+ u8 hwinfo[HWSET_MAX_SIZE] = {0};
+ u16 eeprom_id;
+
+ if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+ rtl_efuse_shadow_map_update(hw);
+ memcpy((void *)hwinfo,
+ (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ HWSET_MAX_SIZE);
+ } else if (rtlefuse->epromtype == EEPROM_93C46) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("RTL819X Not boot from eeprom, check it !!"));
+ }
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"),
+ hwinfo, HWSET_MAX_SIZE);
+ eeprom_id = *((u16 *)&hwinfo[0]);
+ if (eeprom_id != RTL8190_EEPROM_ID) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
+ rtlefuse->autoload_failflag = true;
+ } else {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+ rtlefuse->autoload_failflag = false;
+ }
+ if (rtlefuse->autoload_failflag == true)
+ return;
+ for (i = 0; i < 6; i += 2) {
+ usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
+ *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
+ }
+ printk(KERN_INFO "rtl8192cu: MAC address: %pM\n", rtlefuse->dev_addr);
+ _rtl92cu_read_txpower_info_from_hwpg(hw,
+ rtlefuse->autoload_failflag, hwinfo);
+ rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
+ rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ (" VID = 0x%02x PID = 0x%02x\n",
+ rtlefuse->eeprom_vid, rtlefuse->eeprom_did));
+ rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
+ rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
+ rtlefuse->txpwr_fromeprom = true;
+ rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid));
+ if (rtlhal->oem_id == RT_CID_DEFAULT) {
+ switch (rtlefuse->eeprom_oemid) {
+ case EEPROM_CID_DEFAULT:
+ if (rtlefuse->eeprom_did == 0x8176) {
+ if ((rtlefuse->eeprom_svid == 0x103C &&
+ rtlefuse->eeprom_smid == 0x1629))
+ rtlhal->oem_id = RT_CID_819x_HP;
+ else
+ rtlhal->oem_id = RT_CID_DEFAULT;
+ } else {
+ rtlhal->oem_id = RT_CID_DEFAULT;
+ }
+ break;
+ case EEPROM_CID_TOSHIBA:
+ rtlhal->oem_id = RT_CID_TOSHIBA;
+ break;
+ case EEPROM_CID_QMI:
+ rtlhal->oem_id = RT_CID_819x_QMI;
+ break;
+ case EEPROM_CID_WHQL:
+ default:
+ rtlhal->oem_id = RT_CID_DEFAULT;
+ break;
+ }
+ }
+ _rtl92cu_read_board_type(hw, hwinfo);
+#ifdef CONFIG_BT_COEXIST
+ _rtl92cu_read_bluetooth_coexistInfo(hw, hwinfo,
+ rtlefuse->autoload_failflag);
+#endif
+}
+
+static void _rtl92cu_hal_customized_behavior(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ switch (rtlhal->oem_id) {
+ case RT_CID_819x_HP:
+ usb_priv->ledctl.led_opendrain = true;
+ break;
+ case RT_CID_819x_Lenovo:
+ case RT_CID_DEFAULT:
+ case RT_CID_TOSHIBA:
+ case RT_CID_CCX:
+ case RT_CID_819x_Acer:
+ case RT_CID_WHQL:
+ default:
+ break;
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("RT Customized ID: 0x%02X\n", rtlhal->oem_id));
+}
+
+void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw)
+{
+
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tmp_u1b;
+
+ if (!IS_NORMAL_CHIP(rtlhal->version))
+ return;
+ tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
+ rtlefuse->epromtype = (tmp_u1b & EEPROMSEL) ?
+ EEPROM_93C46 : EEPROM_BOOT_EFUSE;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from %s\n",
+ (tmp_u1b & EEPROMSEL) ? "EERROM" : "EFUSE"));
+ rtlefuse->autoload_failflag = (tmp_u1b & EEPROM_EN) ? false : true;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload %s\n",
+ (tmp_u1b & EEPROM_EN) ? "OK!!" : "ERR!!"));
+ _rtl92cu_read_adapter_info(hw);
+ _rtl92cu_hal_customized_behavior(hw);
+ return;
+}
+
+static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int status = 0;
+ u16 value16;
+ u8 value8;
+ /* polling autoload done. */
+ u32 pollingCount = 0;
+
+ do {
+ if (rtl_read_byte(rtlpriv, REG_APS_FSMCO) & PFM_ALDN) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Autoload Done!\n"));
+ break;
+ }
+ if (pollingCount++ > 100) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+ ("Failed to polling REG_APS_FSMCO[PFM_ALDN]"
+ " done!\n"));
+ return -ENODEV;
+ }
+ } while (true);
+ /* 0. RSV_CTRL 0x1C[7:0] = 0 unlock ISO/CLK/Power control register */
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0);
+ /* Power on when re-enter from IPS/Radio off/card disable */
+ /* enable SPS into PWM mode */
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
+ udelay(100);
+ value8 = rtl_read_byte(rtlpriv, REG_LDOV12D_CTRL);
+ if (0 == (value8 & LDV12_EN)) {
+ value8 |= LDV12_EN;
+ rtl_write_byte(rtlpriv, REG_LDOV12D_CTRL, value8);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ (" power-on :REG_LDOV12D_CTRL Reg0x21:0x%02x.\n",
+ value8));
+ udelay(100);
+ value8 = rtl_read_byte(rtlpriv, REG_SYS_ISO_CTRL);
+ value8 &= ~ISO_MD2PP;
+ rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, value8);
+ }
+ /* auto enable WLAN */
+ pollingCount = 0;
+ value16 = rtl_read_word(rtlpriv, REG_APS_FSMCO);
+ value16 |= APFM_ONMAC;
+ rtl_write_word(rtlpriv, REG_APS_FSMCO, value16);
+ do {
+ if (!(rtl_read_word(rtlpriv, REG_APS_FSMCO) & APFM_ONMAC)) {
+ printk(KERN_INFO "rtl8192cu: MAC auto ON okay!\n");
+ break;
+ }
+ if (pollingCount++ > 100) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+ ("Failed to polling REG_APS_FSMCO[APFM_ONMAC]"
+ " done!\n"));
+ return -ENODEV;
+ }
+ } while (true);
+ /* Enable Radio ,GPIO ,and LED function */
+ rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x0812);
+ /* release RF digital isolation */
+ value16 = rtl_read_word(rtlpriv, REG_SYS_ISO_CTRL);
+ value16 &= ~ISO_DIOR;
+ rtl_write_word(rtlpriv, REG_SYS_ISO_CTRL, value16);
+ /* Reconsider when to do this operation after asking HWSD. */
+ pollingCount = 0;
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, (rtl_read_byte(rtlpriv,
+ REG_APSD_CTRL) & ~BIT(6)));
+ do {
+ pollingCount++;
+ } while ((pollingCount < 200) &&
+ (rtl_read_byte(rtlpriv, REG_APSD_CTRL) & BIT(7)));
+ /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */
+ value16 = rtl_read_word(rtlpriv, REG_CR);
+ value16 |= (HCI_TXDMA_EN | HCI_RXDMA_EN | TXDMA_EN | RXDMA_EN |
+ PROTOCOL_EN | SCHEDULE_EN | MACTXEN | MACRXEN | ENSEC);
+ rtl_write_word(rtlpriv, REG_CR, value16);
+ return status;
+}
+
+static void _rtl92cu_init_queue_reserved_page(struct ieee80211_hw *hw,
+ bool wmm_enable,
+ u8 out_ep_num,
+ u8 queue_sel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool isChipN = IS_NORMAL_CHIP(rtlhal->version);
+ u32 outEPNum = (u32)out_ep_num;
+ u32 numHQ = 0;
+ u32 numLQ = 0;
+ u32 numNQ = 0;
+ u32 numPubQ;
+ u32 value32;
+ u8 value8;
+ u32 txQPageNum, txQPageUnit, txQRemainPage;
+
+ if (!wmm_enable) {
+ numPubQ = (isChipN) ? CHIP_B_PAGE_NUM_PUBQ :
+ CHIP_A_PAGE_NUM_PUBQ;
+ txQPageNum = TX_TOTAL_PAGE_NUMBER - numPubQ;
+
+ txQPageUnit = txQPageNum/outEPNum;
+ txQRemainPage = txQPageNum % outEPNum;
+ if (queue_sel & TX_SELE_HQ)
+ numHQ = txQPageUnit;
+ if (queue_sel & TX_SELE_LQ)
+ numLQ = txQPageUnit;
+ /* HIGH priority queue always present in the configuration of
+ * 2 out-ep. Remainder pages have assigned to High queue */
+ if ((outEPNum > 1) && (txQRemainPage))
+ numHQ += txQRemainPage;
+ /* NOTE: This step done before writting REG_RQPN. */
+ if (isChipN) {
+ if (queue_sel & TX_SELE_NQ)
+ numNQ = txQPageUnit;
+ value8 = (u8)_NPQ(numNQ);
+ rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8);
+ }
+ } else {
+ /* for WMM ,number of out-ep must more than or equal to 2! */
+ numPubQ = isChipN ? WMM_CHIP_B_PAGE_NUM_PUBQ :
+ WMM_CHIP_A_PAGE_NUM_PUBQ;
+ if (queue_sel & TX_SELE_HQ) {
+ numHQ = isChipN ? WMM_CHIP_B_PAGE_NUM_HPQ :
+ WMM_CHIP_A_PAGE_NUM_HPQ;
+ }
+ if (queue_sel & TX_SELE_LQ) {
+ numLQ = isChipN ? WMM_CHIP_B_PAGE_NUM_LPQ :
+ WMM_CHIP_A_PAGE_NUM_LPQ;
+ }
+ /* NOTE: This step done before writting REG_RQPN. */
+ if (isChipN) {
+ if (queue_sel & TX_SELE_NQ)
+ numNQ = WMM_CHIP_B_PAGE_NUM_NPQ;
+ value8 = (u8)_NPQ(numNQ);
+ rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8);
+ }
+ }
+ /* TX DMA */
+ value32 = _HPQ(numHQ) | _LPQ(numLQ) | _PUBQ(numPubQ) | LD_RQPN;
+ rtl_write_dword(rtlpriv, REG_RQPN, value32);
+}
+
+static void _rtl92c_init_trx_buffer(struct ieee80211_hw *hw, bool wmm_enable)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 txpktbuf_bndy;
+ u8 value8;
+
+ if (!wmm_enable)
+ txpktbuf_bndy = TX_PAGE_BOUNDARY;
+ else /* for WMM */
+ txpktbuf_bndy = (IS_NORMAL_CHIP(rtlhal->version))
+ ? WMM_CHIP_B_TX_PAGE_BOUNDARY
+ : WMM_CHIP_A_TX_PAGE_BOUNDARY;
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_WMAC_LBK_BF_HD, txpktbuf_bndy);
+ rtl_write_byte(rtlpriv, REG_TRXFF_BNDY, txpktbuf_bndy);
+ rtl_write_byte(rtlpriv, REG_TDECTRL+1, txpktbuf_bndy);
+ rtl_write_word(rtlpriv, (REG_TRXFF_BNDY + 2), 0x27FF);
+ value8 = _PSRX(RX_PAGE_SIZE_REG_VALUE) | _PSTX(PBP_128);
+ rtl_write_byte(rtlpriv, REG_PBP, value8);
+}
+
+static void _rtl92c_init_chipN_reg_priority(struct ieee80211_hw *hw, u16 beQ,
+ u16 bkQ, u16 viQ, u16 voQ,
+ u16 mgtQ, u16 hiQ)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 value16 = (rtl_read_word(rtlpriv, REG_TRXDMA_CTRL) & 0x7);
+
+ value16 |= _TXDMA_BEQ_MAP(beQ) | _TXDMA_BKQ_MAP(bkQ) |
+ _TXDMA_VIQ_MAP(viQ) | _TXDMA_VOQ_MAP(voQ) |
+ _TXDMA_MGQ_MAP(mgtQ) | _TXDMA_HIQ_MAP(hiQ);
+ rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, value16);
+}
+
+static void _rtl92cu_init_chipN_one_out_ep_priority(struct ieee80211_hw *hw,
+ bool wmm_enable,
+ u8 queue_sel)
+{
+ u16 uninitialized_var(value);
+
+ switch (queue_sel) {
+ case TX_SELE_HQ:
+ value = QUEUE_HIGH;
+ break;
+ case TX_SELE_LQ:
+ value = QUEUE_LOW;
+ break;
+ case TX_SELE_NQ:
+ value = QUEUE_NORMAL;
+ break;
+ default:
+ WARN_ON(1); /* Shall not reach here! */
+ break;
+ }
+ _rtl92c_init_chipN_reg_priority(hw, value, value, value, value,
+ value, value);
+ printk(KERN_INFO "rtl8192cu: Tx queue select: 0x%02x\n", queue_sel);
+}
+
+static void _rtl92cu_init_chipN_two_out_ep_priority(struct ieee80211_hw *hw,
+ bool wmm_enable,
+ u8 queue_sel)
+{
+ u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
+ u16 uninitialized_var(valueHi);
+ u16 uninitialized_var(valueLow);
+
+ switch (queue_sel) {
+ case (TX_SELE_HQ | TX_SELE_LQ):
+ valueHi = QUEUE_HIGH;
+ valueLow = QUEUE_LOW;
+ break;
+ case (TX_SELE_NQ | TX_SELE_LQ):
+ valueHi = QUEUE_NORMAL;
+ valueLow = QUEUE_LOW;
+ break;
+ case (TX_SELE_HQ | TX_SELE_NQ):
+ valueHi = QUEUE_HIGH;
+ valueLow = QUEUE_NORMAL;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ if (!wmm_enable) {
+ beQ = valueLow;
+ bkQ = valueLow;
+ viQ = valueHi;
+ voQ = valueHi;
+ mgtQ = valueHi;
+ hiQ = valueHi;
+ } else {/* for WMM ,CONFIG_OUT_EP_WIFI_MODE */
+ beQ = valueHi;
+ bkQ = valueLow;
+ viQ = valueLow;
+ voQ = valueHi;
+ mgtQ = valueHi;
+ hiQ = valueHi;
+ }
+ _rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
+ printk(KERN_INFO "rtl8192cu: Tx queue select: 0x%02x\n", queue_sel);
+}
+
+static void _rtl92cu_init_chipN_three_out_ep_priority(struct ieee80211_hw *hw,
+ bool wmm_enable,
+ u8 queue_sel)
+{
+ u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (!wmm_enable) { /* typical setting */
+ beQ = QUEUE_LOW;
+ bkQ = QUEUE_LOW;
+ viQ = QUEUE_NORMAL;
+ voQ = QUEUE_HIGH;
+ mgtQ = QUEUE_HIGH;
+ hiQ = QUEUE_HIGH;
+ } else { /* for WMM */
+ beQ = QUEUE_LOW;
+ bkQ = QUEUE_NORMAL;
+ viQ = QUEUE_NORMAL;
+ voQ = QUEUE_HIGH;
+ mgtQ = QUEUE_HIGH;
+ hiQ = QUEUE_HIGH;
+ }
+ _rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+ ("Tx queue select :0x%02x..\n", queue_sel));
+}
+
+static void _rtl92cu_init_chipN_queue_priority(struct ieee80211_hw *hw,
+ bool wmm_enable,
+ u8 out_ep_num,
+ u8 queue_sel)
+{
+ switch (out_ep_num) {
+ case 1:
+ _rtl92cu_init_chipN_one_out_ep_priority(hw, wmm_enable,
+ queue_sel);
+ break;
+ case 2:
+ _rtl92cu_init_chipN_two_out_ep_priority(hw, wmm_enable,
+ queue_sel);
+ break;
+ case 3:
+ _rtl92cu_init_chipN_three_out_ep_priority(hw, wmm_enable,
+ queue_sel);
+ break;
+ default:
+ WARN_ON(1); /* Shall not reach here! */
+ break;
+ }
+}
+
+static void _rtl92cu_init_chipT_queue_priority(struct ieee80211_hw *hw,
+ bool wmm_enable,
+ u8 out_ep_num,
+ u8 queue_sel)
+{
+ u8 hq_sele;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ switch (out_ep_num) {
+ case 2: /* (TX_SELE_HQ|TX_SELE_LQ) */
+ if (!wmm_enable) /* typical setting */
+ hq_sele = HQSEL_VOQ | HQSEL_VIQ | HQSEL_MGTQ |
+ HQSEL_HIQ;
+ else /* for WMM */
+ hq_sele = HQSEL_VOQ | HQSEL_BEQ | HQSEL_MGTQ |
+ HQSEL_HIQ;
+ break;
+ case 1:
+ if (TX_SELE_LQ == queue_sel) {
+ /* map all endpoint to Low queue */
+ hq_sele = 0;
+ } else if (TX_SELE_HQ == queue_sel) {
+ /* map all endpoint to High queue */
+ hq_sele = HQSEL_VOQ | HQSEL_VIQ | HQSEL_BEQ |
+ HQSEL_BKQ | HQSEL_MGTQ | HQSEL_HIQ;
+ }
+ break;
+ default:
+ WARN_ON(1); /* Shall not reach here! */
+ break;
+ }
+ rtl_write_byte(rtlpriv, (REG_TRXDMA_CTRL+1), hq_sele);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+ ("Tx queue select :0x%02x..\n", hq_sele));
+}
+
+static void _rtl92cu_init_queue_priority(struct ieee80211_hw *hw,
+ bool wmm_enable,
+ u8 out_ep_num,
+ u8 queue_sel)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ if (IS_NORMAL_CHIP(rtlhal->version))
+ _rtl92cu_init_chipN_queue_priority(hw, wmm_enable, out_ep_num,
+ queue_sel);
+ else
+ _rtl92cu_init_chipT_queue_priority(hw, wmm_enable, out_ep_num,
+ queue_sel);
+}
+
+static void _rtl92cu_init_usb_aggregation(struct ieee80211_hw *hw)
+{
+}
+
+static void _rtl92cu_init_wmac_setting(struct ieee80211_hw *hw)
+{
+ u16 value16;
+
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ mac->rx_conf = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APP_FCS |
+ RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL |
+ RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32);
+ rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf);
+ /* Accept all multicast address */
+ rtl_write_dword(rtlpriv, REG_MAR, 0xFFFFFFFF);
+ rtl_write_dword(rtlpriv, REG_MAR + 4, 0xFFFFFFFF);
+ /* Accept all management frames */
+ value16 = 0xFFFF;
+ rtl92c_set_mgt_filter(hw, value16);
+ /* Reject all control frame - default value is 0 */
+ rtl92c_set_ctrl_filter(hw, 0x0);
+ /* Accept all data frames */
+ value16 = 0xFFFF;
+ rtl92c_set_data_filter(hw, value16);
+}
+
+static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+ int err = 0;
+ u32 boundary = 0;
+ u8 wmm_enable = false; /* TODO */
+ u8 out_ep_nums = rtlusb->out_ep_nums;
+ u8 queue_sel = rtlusb->out_queue_sel;
+ err = _rtl92cu_init_power_on(hw);
+
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Failed to init power on!\n"));
+ return err;
+ }
+ if (!wmm_enable) {
+ boundary = TX_PAGE_BOUNDARY;
+ } else { /* for WMM */
+ boundary = (IS_NORMAL_CHIP(rtlhal->version))
+ ? WMM_CHIP_B_TX_PAGE_BOUNDARY
+ : WMM_CHIP_A_TX_PAGE_BOUNDARY;
+ }
+ if (false == rtl92c_init_llt_table(hw, boundary)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Failed to init LLT Table!\n"));
+ return -EINVAL;
+ }
+ _rtl92cu_init_queue_reserved_page(hw, wmm_enable, out_ep_nums,
+ queue_sel);
+ _rtl92c_init_trx_buffer(hw, wmm_enable);
+ _rtl92cu_init_queue_priority(hw, wmm_enable, out_ep_nums,
+ queue_sel);
+ /* Get Rx PHY status in order to report RSSI and others. */
+ rtl92c_init_driver_info_size(hw, RTL92C_DRIVER_INFO_SIZE);
+ rtl92c_init_interrupt(hw);
+ rtl92c_init_network_type(hw);
+ _rtl92cu_init_wmac_setting(hw);
+ rtl92c_init_adaptive_ctrl(hw);
+ rtl92c_init_edca(hw);
+ rtl92c_init_rate_fallback(hw);
+ rtl92c_init_retry_function(hw);
+ _rtl92cu_init_usb_aggregation(hw);
+ rtlpriv->cfg->ops->set_bw_mode(hw, NL80211_CHAN_HT20);
+ rtl92c_set_min_space(hw, IS_92C_SERIAL(rtlhal->version));
+ rtl92c_init_beacon_parameters(hw, rtlhal->version);
+ rtl92c_init_ampdu_aggregation(hw);
+ rtl92c_init_beacon_max_error(hw, true);
+ return err;
+}
+
+void rtl92cu_enable_hw_security_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 sec_reg_value = 0x0;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm));
+ if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("not open sw encryption\n"));
+ return;
+ }
+ sec_reg_value = SCR_TxEncEnable | SCR_RxDecEnable;
+ if (rtlpriv->sec.use_defaultkey) {
+ sec_reg_value |= SCR_TxUseDK;
+ sec_reg_value |= SCR_RxUseDK;
+ }
+ if (IS_NORMAL_CHIP(rtlhal->version))
+ sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
+ rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
+ ("The SECR-value %x\n", sec_reg_value));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
+}
+
+static void _rtl92cu_hw_configure(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ /* To Fix MAC loopback mode fail. */
+ rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
+ rtl_write_byte(rtlpriv, 0x15, 0xe9);
+ /* HW SEQ CTRL */
+ /* set 0x0 to 0xFF by tynli. Default enable HW SEQ NUM. */
+ rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF);
+ /* fixed USB interface interference issue */
+ rtl_write_byte(rtlpriv, 0xfe40, 0xe0);
+ rtl_write_byte(rtlpriv, 0xfe41, 0x8d);
+ rtl_write_byte(rtlpriv, 0xfe42, 0x80);
+ rtlusb->reg_bcn_ctrl_val = 0x18;
+ rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8)rtlusb->reg_bcn_ctrl_val);
+}
+
+static void _InitPABias(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 pa_setting;
+
+ /* FIXED PA current issue */
+ pa_setting = efuse_read_1byte(hw, 0x1FA);
+ if (!(pa_setting & BIT(0))) {
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0x0F406);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0x4F406);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0x8F406);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0xCF406);
+ }
+ if (!(pa_setting & BIT(1)) && IS_NORMAL_CHIP(rtlhal->version) &&
+ IS_92C_SERIAL(rtlhal->version)) {
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0x0F406);
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0x4F406);
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0x8F406);
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0xCF406);
+ }
+ if (!(pa_setting & BIT(4))) {
+ pa_setting = rtl_read_byte(rtlpriv, 0x16);
+ pa_setting &= 0x0F;
+ rtl_write_byte(rtlpriv, 0x16, pa_setting | 0x90);
+ }
+}
+
+static void _InitAntenna_Selection(struct ieee80211_hw *hw)
+{
+#ifdef CONFIG_ANTENNA_DIVERSITY
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (pHalData->AntDivCfg == 0)
+ return;
+
+ if (rtlphy->rf_type == RF_1T1R) {
+ rtl_write_dword(rtlpriv, REG_LEDCFG0,
+ rtl_read_dword(rtlpriv,
+ REG_LEDCFG0)|BIT(23));
+ rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
+ if (rtl_get_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300) ==
+ Antenna_A)
+ pHalData->CurAntenna = Antenna_A;
+ else
+ pHalData->CurAntenna = Antenna_B;
+ }
+#endif
+}
+
+static void _dump_registers(struct ieee80211_hw *hw)
+{
+}
+
+static void _update_mac_setting(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ mac->rx_conf = rtl_read_dword(rtlpriv, REG_RCR);
+ mac->rx_mgt_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP0);
+ mac->rx_ctrl_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP1);
+ mac->rx_data_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
+}
+
+int rtl92cu_hw_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ int err = 0;
+ static bool iqk_initialized;
+
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU;
+ err = _rtl92cu_init_mac(hw);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("init mac failed!\n"));
+ return err;
+ }
+ err = rtl92c_download_fw(hw);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("Failed to download FW. Init HW without FW now..\n"));
+ err = 1;
+ rtlhal->fw_ready = false;
+ return err;
+ } else {
+ rtlhal->fw_ready = true;
+ }
+ rtlhal->last_hmeboxnum = 0; /* h2c */
+ _rtl92cu_phy_param_tab_init(hw);
+ rtl92cu_phy_mac_config(hw);
+ rtl92cu_phy_bb_config(hw);
+ rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
+ rtl92c_phy_rf_config(hw);
+ if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
+ !IS_92C_SERIAL(rtlhal->version)) {
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD, 0x30255);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G2, MASKDWORD, 0x50a00);
+ }
+ rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
+ RF_CHNLBW, RFREG_OFFSET_MASK);
+ rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
+ RF_CHNLBW, RFREG_OFFSET_MASK);
+ rtl92cu_bb_block_on(hw);
+ rtl_cam_reset_all_entry(hw);
+ rtl92cu_enable_hw_security_config(hw);
+ ppsc->rfpwr_state = ERFON;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
+ if (ppsc->rfpwr_state == ERFON) {
+ rtl92c_phy_set_rfpath_switch(hw, 1);
+ if (iqk_initialized) {
+ rtl92c_phy_iq_calibrate(hw, false);
+ } else {
+ rtl92c_phy_iq_calibrate(hw, false);
+ iqk_initialized = true;
+ }
+ rtl92c_dm_check_txpower_tracking(hw);
+ rtl92c_phy_lc_calibrate(hw);
+ }
+ _rtl92cu_hw_configure(hw);
+ _InitPABias(hw);
+ _InitAntenna_Selection(hw);
+ _update_mac_setting(hw);
+ rtl92c_dm_init(hw);
+ _dump_registers(hw);
+ return err;
+}
+
+static void _DisableRFAFEAndResetBB(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+/**************************************
+a. TXPAUSE 0x522[7:0] = 0xFF Pause MAC TX queue
+b. RF path 0 offset 0x00 = 0x00 disable RF
+c. APSD_CTRL 0x600[7:0] = 0x40
+d. SYS_FUNC_EN 0x02[7:0] = 0x16 reset BB state machine
+e. SYS_FUNC_EN 0x02[7:0] = 0x14 reset BB state machine
+***************************************/
+ u8 eRFPath = 0, value8 = 0;
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+ rtl_set_rfreg(hw, (enum radio_path)eRFPath, 0x0, MASKBYTE0, 0x0);
+
+ value8 |= APSDOFF;
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, value8); /*0x40*/
+ value8 = 0;
+ value8 |= (FEN_USBD | FEN_USBA | FEN_BB_GLB_RSTn);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, value8);/*0x16*/
+ value8 &= (~FEN_BB_GLB_RSTn);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, value8); /*0x14*/
+}
+
+static void _ResetDigitalProcedure1(struct ieee80211_hw *hw, bool bWithoutHWSM)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ if (rtlhal->fw_version <= 0x20) {
+ /*****************************
+ f. MCUFWDL 0x80[7:0]=0 reset MCU ready status
+ g. SYS_FUNC_EN 0x02[10]= 0 reset MCU reg, (8051 reset)
+ h. SYS_FUNC_EN 0x02[15-12]= 5 reset MAC reg, DCORE
+ i. SYS_FUNC_EN 0x02[10]= 1 enable MCU reg, (8051 enable)
+ ******************************/
+ u16 valu16 = 0;
+
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
+ valu16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+ rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (valu16 &
+ (~FEN_CPUEN))); /* reset MCU ,8051 */
+ valu16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN)&0x0FFF;
+ rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (valu16 |
+ (FEN_HWPDN|FEN_ELDR))); /* reset MAC */
+ valu16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+ rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (valu16 |
+ FEN_CPUEN)); /* enable MCU ,8051 */
+ } else {
+ u8 retry_cnts = 0;
+
+ /* IF fw in RAM code, do reset */
+ if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(1)) {
+ /* reset MCU ready status */
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
+ if (rtlhal->fw_ready) {
+ /* 8051 reset by self */
+ rtl_write_byte(rtlpriv, REG_HMETFR+3, 0x20);
+ while ((retry_cnts++ < 100) &&
+ (FEN_CPUEN & rtl_read_word(rtlpriv,
+ REG_SYS_FUNC_EN))) {
+ udelay(50);
+ }
+ if (retry_cnts >= 100) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("#####=> 8051 reset failed!.."
+ ".......................\n"););
+ /* if 8051 reset fail, reset MAC. */
+ rtl_write_byte(rtlpriv,
+ REG_SYS_FUNC_EN + 1,
+ 0x50);
+ udelay(100);
+ }
+ }
+ }
+ /* Reset MAC and Enable 8051 */
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x54);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
+ }
+ if (bWithoutHWSM) {
+ /*****************************
+ Without HW auto state machine
+ g.SYS_CLKR 0x08[15:0] = 0x30A3 disable MAC clock
+ h.AFE_PLL_CTRL 0x28[7:0] = 0x80 disable AFE PLL
+ i.AFE_XTAL_CTRL 0x24[15:0] = 0x880F gated AFE DIG_CLOCK
+ j.SYS_ISu_CTRL 0x00[7:0] = 0xF9 isolated digital to PON
+ ******************************/
+ rtl_write_word(rtlpriv, REG_SYS_CLKR, 0x70A3);
+ rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x80);
+ rtl_write_word(rtlpriv, REG_AFE_XTAL_CTRL, 0x880F);
+ rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, 0xF9);
+ }
+}
+
+static void _ResetDigitalProcedure2(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+/*****************************
+k. SYS_FUNC_EN 0x03[7:0] = 0x44 disable ELDR runction
+l. SYS_CLKR 0x08[15:0] = 0x3083 disable ELDR clock
+m. SYS_ISO_CTRL 0x01[7:0] = 0x83 isolated ELDR to PON
+******************************/
+ rtl_write_word(rtlpriv, REG_SYS_CLKR, 0x70A3);
+ rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL+1, 0x82);
+}
+
+static void _DisableGPIO(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+/***************************************
+j. GPIO_PIN_CTRL 0x44[31:0]=0x000
+k. Value = GPIO_PIN_CTRL[7:0]
+l. GPIO_PIN_CTRL 0x44[31:0] = 0x00FF0000 | (value <<8); write ext PIN level
+m. GPIO_MUXCFG 0x42 [15:0] = 0x0780
+n. LEDCFG 0x4C[15:0] = 0x8080
+***************************************/
+ u8 value8;
+ u16 value16;
+ u32 value32;
+
+ /* 1. Disable GPIO[7:0] */
+ rtl_write_word(rtlpriv, REG_GPIO_PIN_CTRL+2, 0x0000);
+ value32 = rtl_read_dword(rtlpriv, REG_GPIO_PIN_CTRL) & 0xFFFF00FF;
+ value8 = (u8) (value32&0x000000FF);
+ value32 |= ((value8<<8) | 0x00FF0000);
+ rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, value32);
+ /* 2. Disable GPIO[10:8] */
+ rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG+3, 0x00);
+ value16 = rtl_read_word(rtlpriv, REG_GPIO_MUXCFG+2) & 0xFF0F;
+ value8 = (u8) (value16&0x000F);
+ value16 |= ((value8<<4) | 0x0780);
+ rtl_write_word(rtlpriv, REG_GPIO_PIN_CTRL+2, value16);
+ /* 3. Disable LED0 & 1 */
+ rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8080);
+}
+
+static void _DisableAnalog(struct ieee80211_hw *hw, bool bWithoutHWSM)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 value16 = 0;
+ u8 value8 = 0;
+
+ if (bWithoutHWSM) {
+ /*****************************
+ n. LDOA15_CTRL 0x20[7:0] = 0x04 disable A15 power
+ o. LDOV12D_CTRL 0x21[7:0] = 0x54 disable digital core power
+ r. When driver call disable, the ASIC will turn off remaining
+ clock automatically
+ ******************************/
+ rtl_write_byte(rtlpriv, REG_LDOA15_CTRL, 0x04);
+ value8 = rtl_read_byte(rtlpriv, REG_LDOV12D_CTRL);
+ value8 &= (~LDV12_EN);
+ rtl_write_byte(rtlpriv, REG_LDOV12D_CTRL, value8);
+ }
+
+/*****************************
+h. SPS0_CTRL 0x11[7:0] = 0x23 enter PFM mode
+i. APS_FSMCO 0x04[15:0] = 0x4802 set USB suspend
+******************************/
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23);
+ value16 |= (APDM_HOST | AFSM_HSUS | PFM_ALDN);
+ rtl_write_word(rtlpriv, REG_APS_FSMCO, (u16)value16);
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0E);
+}
+
+static void _CardDisableHWSM(struct ieee80211_hw *hw)
+{
+ /* ==== RF Off Sequence ==== */
+ _DisableRFAFEAndResetBB(hw);
+ /* ==== Reset digital sequence ====== */
+ _ResetDigitalProcedure1(hw, false);
+ /* ==== Pull GPIO PIN to balance level and LED control ====== */
+ _DisableGPIO(hw);
+ /* ==== Disable analog sequence === */
+ _DisableAnalog(hw, false);
+}
+
+static void _CardDisableWithoutHWSM(struct ieee80211_hw *hw)
+{
+ /*==== RF Off Sequence ==== */
+ _DisableRFAFEAndResetBB(hw);
+ /* ==== Reset digital sequence ====== */
+ _ResetDigitalProcedure1(hw, true);
+ /* ==== Pull GPIO PIN to balance level and LED control ====== */
+ _DisableGPIO(hw);
+ /* ==== Reset digital sequence ====== */
+ _ResetDigitalProcedure2(hw);
+ /* ==== Disable analog sequence === */
+ _DisableAnalog(hw, true);
+}
+
+static void _rtl92cu_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
+ u8 set_bits, u8 clear_bits)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ rtlusb->reg_bcn_ctrl_val |= set_bits;
+ rtlusb->reg_bcn_ctrl_val &= ~clear_bits;
+ rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlusb->reg_bcn_ctrl_val);
+}
+
+static void _rtl92cu_stop_tx_beacon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 tmp1byte = 0;
+ if (IS_NORMAL_CHIP(rtlhal->version)) {
+ tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+ tmp1byte & (~BIT(6)));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
+ tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+ tmp1byte &= ~(BIT(0));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+ } else {
+ rtl_write_byte(rtlpriv, REG_TXPAUSE,
+ rtl_read_byte(rtlpriv, REG_TXPAUSE) | BIT(6));
+ }
+}
+
+static void _rtl92cu_resume_tx_beacon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 tmp1byte = 0;
+
+ if (IS_NORMAL_CHIP(rtlhal->version)) {
+ tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+ tmp1byte | BIT(6));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
+ tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+ tmp1byte |= BIT(0);
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+ } else {
+ rtl_write_byte(rtlpriv, REG_TXPAUSE,
+ rtl_read_byte(rtlpriv, REG_TXPAUSE) & (~BIT(6)));
+ }
+}
+
+static void _rtl92cu_enable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ if (IS_NORMAL_CHIP(rtlhal->version))
+ _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(1));
+ else
+ _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
+}
+
+static void _rtl92cu_disable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ if (IS_NORMAL_CHIP(rtlhal->version))
+ _rtl92cu_set_bcn_ctrl_reg(hw, BIT(1), 0);
+ else
+ _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
+}
+
+static int _rtl92cu_set_media_status(struct ieee80211_hw *hw,
+ enum nl80211_iftype type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
+ enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
+
+ bt_msr &= 0xfc;
+ rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xFF);
+ if (type == NL80211_IFTYPE_UNSPECIFIED || type ==
+ NL80211_IFTYPE_STATION) {
+ _rtl92cu_stop_tx_beacon(hw);
+ _rtl92cu_enable_bcn_sub_func(hw);
+ } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP) {
+ _rtl92cu_resume_tx_beacon(hw);
+ _rtl92cu_disable_bcn_sub_func(hw);
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("Set HW_VAR_MEDIA_"
+ "STATUS:No such media status(%x).\n", type));
+ }
+ switch (type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ bt_msr |= MSR_NOLINK;
+ ledaction = LED_CTL_LINK;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Set Network type to NO LINK!\n"));
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ bt_msr |= MSR_ADHOC;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Set Network type to Ad Hoc!\n"));
+ break;
+ case NL80211_IFTYPE_STATION:
+ bt_msr |= MSR_INFRA;
+ ledaction = LED_CTL_LINK;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Set Network type to STA!\n"));
+ break;
+ case NL80211_IFTYPE_AP:
+ bt_msr |= MSR_AP;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Set Network type to AP!\n"));
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Network type %d not support!\n", type));
+ goto error_out;
+ }
+ rtl_write_byte(rtlpriv, (MSR), bt_msr);
+ rtlpriv->cfg->ops->led_control(hw, ledaction);
+ if ((bt_msr & 0xfc) == MSR_AP)
+ rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+ else
+ rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
+ return 0;
+error_out:
+ return 1;
+}
+
+void rtl92cu_card_disable(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ enum nl80211_iftype opmode;
+
+ mac->link_state = MAC80211_NOLINK;
+ opmode = NL80211_IFTYPE_UNSPECIFIED;
+ _rtl92cu_set_media_status(hw, opmode);
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ if (rtlusb->disableHWSM)
+ _CardDisableHWSM(hw);
+ else
+ _CardDisableWithoutHWSM(hw);
+}
+
+void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
+{
+ /* dummy routine needed for callback from rtl_op_configure_filter() */
+}
+
+/*========================================================================== */
+
+static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw,
+ enum nl80211_iftype type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 filterout_non_associated_bssid = false;
+
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ filterout_non_associated_bssid = true;
+ break;
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_AP:
+ default:
+ break;
+ }
+ if (filterout_non_associated_bssid == true) {
+ if (IS_NORMAL_CHIP(rtlhal->version)) {
+ switch (rtlphy->current_io_type) {
+ case IO_CMD_RESUME_DM_BY_SCAN:
+ reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_RCR, (u8 *)(&reg_rcr));
+ /* enable update TSF */
+ _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
+ break;
+ case IO_CMD_PAUSE_DM_BY_SCAN:
+ reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_RCR, (u8 *)(&reg_rcr));
+ /* disable update TSF */
+ _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
+ break;
+ }
+ } else {
+ reg_rcr |= (RCR_CBSSID);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+ (u8 *)(&reg_rcr));
+ _rtl92cu_set_bcn_ctrl_reg(hw, 0, (BIT(4)|BIT(5)));
+ }
+ } else if (filterout_non_associated_bssid == false) {
+ if (IS_NORMAL_CHIP(rtlhal->version)) {
+ reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+ (u8 *)(&reg_rcr));
+ _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
+ } else {
+ reg_rcr &= (~RCR_CBSSID);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+ (u8 *)(&reg_rcr));
+ _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4)|BIT(5)), 0);
+ }
+ }
+}
+
+int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
+{
+ if (_rtl92cu_set_media_status(hw, type))
+ return -EOPNOTSUPP;
+ _rtl92cu_set_check_bssid(hw, type);
+ return 0;
+}
+
+static void _InitBeaconParameters(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ rtl_write_word(rtlpriv, REG_BCN_CTRL, 0x1010);
+
+ /* TODO: Remove these magic number */
+ rtl_write_word(rtlpriv, REG_TBTT_PROHIBIT, 0x6404);
+ rtl_write_byte(rtlpriv, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME);
+ rtl_write_byte(rtlpriv, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME);
+ /* Change beacon AIFS to the largest number
+ * beacause test chip does not contension before sending beacon. */
+ if (IS_NORMAL_CHIP(rtlhal->version))
+ rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660F);
+ else
+ rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF);
+}
+
+static void _beacon_function_enable(struct ieee80211_hw *hw, bool Enable,
+ bool Linked)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4) | BIT(3) | BIT(1)), 0x00);
+ rtl_write_byte(rtlpriv, REG_RD_CTRL+1, 0x6F);
+}
+
+void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw)
+{
+
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 bcn_interval, atim_window;
+ u32 value32;
+
+ bcn_interval = mac->beacon_interval;
+ atim_window = 2; /*FIX MERGE */
+ rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
+ rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+ _InitBeaconParameters(hw);
+ rtl_write_byte(rtlpriv, REG_SLOT, 0x09);
+ /*
+ * Force beacon frame transmission even after receiving beacon frame
+ * from other ad hoc STA
+ *
+ *
+ * Reset TSF Timer to zero, added by Roger. 2008.06.24
+ */
+ value32 = rtl_read_dword(rtlpriv, REG_TCR);
+ value32 &= ~TSFRST;
+ rtl_write_dword(rtlpriv, REG_TCR, value32);
+ value32 |= TSFRST;
+ rtl_write_dword(rtlpriv, REG_TCR, value32);
+ RT_TRACE(rtlpriv, COMP_INIT|COMP_BEACON, DBG_LOUD,
+ ("SetBeaconRelatedRegisters8192CUsb(): Set TCR(%x)\n",
+ value32));
+ /* TODO: Modify later (Find the right parameters)
+ * NOTE: Fix test chip's bug (about contention windows's randomness) */
+ if ((mac->opmode == NL80211_IFTYPE_ADHOC) ||
+ (mac->opmode == NL80211_IFTYPE_AP)) {
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x50);
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x50);
+ }
+ _beacon_function_enable(hw, true, true);
+}
+
+void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 bcn_interval = mac->beacon_interval;
+
+ RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
+ ("beacon_interval:%d\n", bcn_interval));
+ rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+}
+
+void rtl92cu_update_interrupt_mask(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr)
+{
+}
+
+void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ switch (variable) {
+ case HW_VAR_RCR:
+ *((u32 *)(val)) = mac->rx_conf;
+ break;
+ case HW_VAR_RF_STATE:
+ *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
+ break;
+ case HW_VAR_FWLPS_RF_ON:{
+ enum rf_pwrstate rfState;
+ u32 val_rcr;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE,
+ (u8 *)(&rfState));
+ if (rfState == ERFOFF) {
+ *((bool *) (val)) = true;
+ } else {
+ val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+ val_rcr &= 0x00070000;
+ if (val_rcr)
+ *((bool *) (val)) = false;
+ else
+ *((bool *) (val)) = true;
+ }
+ break;
+ }
+ case HW_VAR_FW_PSMODE_STATUS:
+ *((bool *) (val)) = ppsc->fw_current_inpsmode;
+ break;
+ case HW_VAR_CORRECT_TSF:{
+ u64 tsf;
+ u32 *ptsf_low = (u32 *)&tsf;
+ u32 *ptsf_high = ((u32 *)&tsf) + 1;
+
+ *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
+ *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
+ *((u64 *)(val)) = tsf;
+ break;
+ }
+ case HW_VAR_MGT_FILTER:
+ *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP0);
+ break;
+ case HW_VAR_CTRL_FILTER:
+ *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP1);
+ break;
+ case HW_VAR_DATA_FILTER:
+ *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+}
+
+void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ enum wireless_mode wirelessmode = mac->mode;
+ u8 idx = 0;
+
+ switch (variable) {
+ case HW_VAR_ETHER_ADDR:{
+ for (idx = 0; idx < ETH_ALEN; idx++) {
+ rtl_write_byte(rtlpriv, (REG_MACID + idx),
+ val[idx]);
+ }
+ break;
+ }
+ case HW_VAR_BASIC_RATE:{
+ u16 rate_cfg = ((u16 *) val)[0];
+ u8 rate_index = 0;
+
+ rate_cfg &= 0x15f;
+ /* TODO */
+ /* if (mac->current_network.vender == HT_IOT_PEER_CISCO
+ * && ((rate_cfg & 0x150) == 0)) {
+ * rate_cfg |= 0x010;
+ * } */
+ rate_cfg |= 0x01;
+ rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
+ rtl_write_byte(rtlpriv, REG_RRSR + 1,
+ (rate_cfg >> 8) & 0xff);
+ while (rate_cfg > 0x1) {
+ rate_cfg >>= 1;
+ rate_index++;
+ }
+ rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
+ rate_index);
+ break;
+ }
+ case HW_VAR_BSSID:{
+ for (idx = 0; idx < ETH_ALEN; idx++) {
+ rtl_write_byte(rtlpriv, (REG_BSSID + idx),
+ val[idx]);
+ }
+ break;
+ }
+ case HW_VAR_SIFS:{
+ rtl_write_byte(rtlpriv, REG_SIFS_CCK + 1, val[0]);
+ rtl_write_byte(rtlpriv, REG_SIFS_OFDM + 1, val[1]);
+ rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
+ rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
+ rtl_write_byte(rtlpriv, REG_R2T_SIFS+1, val[0]);
+ rtl_write_byte(rtlpriv, REG_T2T_SIFS+1, val[0]);
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ ("HW_VAR_SIFS\n"));
+ break;
+ }
+ case HW_VAR_SLOT_TIME:{
+ u8 e_aci;
+ u8 QOS_MODE = 1;
+
+ rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ ("HW_VAR_SLOT_TIME %x\n", val[0]));
+ if (QOS_MODE) {
+ for (e_aci = 0; e_aci < AC_MAX; e_aci++)
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_AC_PARAM,
+ (u8 *)(&e_aci));
+ } else {
+ u8 sifstime = 0;
+ u8 u1bAIFS;
+
+ if (IS_WIRELESS_MODE_A(wirelessmode) ||
+ IS_WIRELESS_MODE_N_24G(wirelessmode) ||
+ IS_WIRELESS_MODE_N_5G(wirelessmode))
+ sifstime = 16;
+ else
+ sifstime = 10;
+ u1bAIFS = sifstime + (2 * val[0]);
+ rtl_write_byte(rtlpriv, REG_EDCA_VO_PARAM,
+ u1bAIFS);
+ rtl_write_byte(rtlpriv, REG_EDCA_VI_PARAM,
+ u1bAIFS);
+ rtl_write_byte(rtlpriv, REG_EDCA_BE_PARAM,
+ u1bAIFS);
+ rtl_write_byte(rtlpriv, REG_EDCA_BK_PARAM,
+ u1bAIFS);
+ }
+ break;
+ }
+ case HW_VAR_ACK_PREAMBLE:{
+ u8 reg_tmp;
+ u8 short_preamble = (bool) (*(u8 *) val);
+ reg_tmp = 0;
+ if (short_preamble)
+ reg_tmp |= 0x80;
+ rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_tmp);
+ break;
+ }
+ case HW_VAR_AMPDU_MIN_SPACE:{
+ u8 min_spacing_to_set;
+ u8 sec_min_space;
+
+ min_spacing_to_set = *((u8 *) val);
+ if (min_spacing_to_set <= 7) {
+ switch (rtlpriv->sec.pairwise_enc_algorithm) {
+ case NO_ENCRYPTION:
+ case AESCCMP_ENCRYPTION:
+ sec_min_space = 0;
+ break;
+ case WEP40_ENCRYPTION:
+ case WEP104_ENCRYPTION:
+ case TKIP_ENCRYPTION:
+ sec_min_space = 6;
+ break;
+ default:
+ sec_min_space = 7;
+ break;
+ }
+ if (min_spacing_to_set < sec_min_space)
+ min_spacing_to_set = sec_min_space;
+ mac->min_space_cfg = ((mac->min_space_cfg &
+ 0xf8) |
+ min_spacing_to_set);
+ *val = min_spacing_to_set;
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ ("Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg));
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+ mac->min_space_cfg);
+ }
+ break;
+ }
+ case HW_VAR_SHORTGI_DENSITY:{
+ u8 density_to_set;
+
+ density_to_set = *((u8 *) val);
+ density_to_set &= 0x1f;
+ mac->min_space_cfg &= 0x07;
+ mac->min_space_cfg |= (density_to_set << 3);
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ ("Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg));
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+ mac->min_space_cfg);
+ break;
+ }
+ case HW_VAR_AMPDU_FACTOR:{
+ u8 regtoset_normal[4] = {0x41, 0xa8, 0x72, 0xb9};
+ u8 factor_toset;
+ u8 *p_regtoset = NULL;
+ u8 index = 0;
+
+ p_regtoset = regtoset_normal;
+ factor_toset = *((u8 *) val);
+ if (factor_toset <= 3) {
+ factor_toset = (1 << (factor_toset + 2));
+ if (factor_toset > 0xf)
+ factor_toset = 0xf;
+ for (index = 0; index < 4; index++) {
+ if ((p_regtoset[index] & 0xf0) >
+ (factor_toset << 4))
+ p_regtoset[index] =
+ (p_regtoset[index] & 0x0f)
+ | (factor_toset << 4);
+ if ((p_regtoset[index] & 0x0f) >
+ factor_toset)
+ p_regtoset[index] =
+ (p_regtoset[index] & 0xf0)
+ | (factor_toset);
+ rtl_write_byte(rtlpriv,
+ (REG_AGGLEN_LMT + index),
+ p_regtoset[index]);
+ }
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ ("Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset));
+ }
+ break;
+ }
+ case HW_VAR_AC_PARAM:{
+ u8 e_aci = *((u8 *) val);
+ u32 u4b_ac_param;
+ u16 cw_min = le16_to_cpu(mac->ac[e_aci].cw_min);
+ u16 cw_max = le16_to_cpu(mac->ac[e_aci].cw_max);
+ u16 tx_op = le16_to_cpu(mac->ac[e_aci].tx_op);
+
+ u4b_ac_param = (u32) mac->ac[e_aci].aifs;
+ u4b_ac_param |= (u32) ((cw_min & 0xF) <<
+ AC_PARAM_ECW_MIN_OFFSET);
+ u4b_ac_param |= (u32) ((cw_max & 0xF) <<
+ AC_PARAM_ECW_MAX_OFFSET);
+ u4b_ac_param |= (u32) tx_op << AC_PARAM_TXOP_OFFSET;
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ ("queue:%x, ac_param:%x\n", e_aci,
+ u4b_ac_param));
+ switch (e_aci) {
+ case AC1_BK:
+ rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM,
+ u4b_ac_param);
+ break;
+ case AC0_BE:
+ rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
+ u4b_ac_param);
+ break;
+ case AC2_VI:
+ rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM,
+ u4b_ac_param);
+ break;
+ case AC3_VO:
+ rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM,
+ u4b_ac_param);
+ break;
+ default:
+ RT_ASSERT(false, ("SetHwReg8185(): invalid"
+ " aci: %d !\n", e_aci));
+ break;
+ }
+ if (rtlusb->acm_method != eAcmWay2_SW)
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_ACM_CTRL, (u8 *)(&e_aci));
+ break;
+ }
+ case HW_VAR_ACM_CTRL:{
+ u8 e_aci = *((u8 *) val);
+ union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)
+ (&(mac->ac[0].aifs));
+ u8 acm = p_aci_aifsn->f.acm;
+ u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
+
+ acm_ctrl =
+ acm_ctrl | ((rtlusb->acm_method == 2) ? 0x0 : 0x1);
+ if (acm) {
+ switch (e_aci) {
+ case AC0_BE:
+ acm_ctrl |= AcmHw_BeqEn;
+ break;
+ case AC2_VI:
+ acm_ctrl |= AcmHw_ViqEn;
+ break;
+ case AC3_VO:
+ acm_ctrl |= AcmHw_VoqEn;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("HW_VAR_ACM_CTRL acm set "
+ "failed: eACI is %d\n", acm));
+ break;
+ }
+ } else {
+ switch (e_aci) {
+ case AC0_BE:
+ acm_ctrl &= (~AcmHw_BeqEn);
+ break;
+ case AC2_VI:
+ acm_ctrl &= (~AcmHw_ViqEn);
+ break;
+ case AC3_VO:
+ acm_ctrl &= (~AcmHw_BeqEn);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ }
+ RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
+ ("SetHwReg8190pci(): [HW_VAR_ACM_CTRL] "
+ "Write 0x%X\n", acm_ctrl));
+ rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
+ break;
+ }
+ case HW_VAR_RCR:{
+ rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]);
+ mac->rx_conf = ((u32 *) (val))[0];
+ RT_TRACE(rtlpriv, COMP_RECV, DBG_DMESG,
+ ("### Set RCR(0x%08x) ###\n", mac->rx_conf));
+ break;
+ }
+ case HW_VAR_RETRY_LIMIT:{
+ u8 retry_limit = ((u8 *) (val))[0];
+
+ rtl_write_word(rtlpriv, REG_RL,
+ retry_limit << RETRY_LIMIT_SHORT_SHIFT |
+ retry_limit << RETRY_LIMIT_LONG_SHIFT);
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_DMESG, ("Set HW_VAR_R"
+ "ETRY_LIMIT(0x%08x)\n", retry_limit));
+ break;
+ }
+ case HW_VAR_DUAL_TSF_RST:
+ rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
+ break;
+ case HW_VAR_EFUSE_BYTES:
+ rtlefuse->efuse_usedbytes = *((u16 *) val);
+ break;
+ case HW_VAR_EFUSE_USAGE:
+ rtlefuse->efuse_usedpercentage = *((u8 *) val);
+ break;
+ case HW_VAR_IO_CMD:
+ rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val));
+ break;
+ case HW_VAR_WPA_CONFIG:
+ rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
+ break;
+ case HW_VAR_SET_RPWM:{
+ u8 rpwm_val = rtl_read_byte(rtlpriv, REG_USB_HRPWM);
+
+ if (rpwm_val & BIT(7))
+ rtl_write_byte(rtlpriv, REG_USB_HRPWM,
+ (*(u8 *)val));
+ else
+ rtl_write_byte(rtlpriv, REG_USB_HRPWM,
+ ((*(u8 *)val) | BIT(7)));
+ break;
+ }
+ case HW_VAR_H2C_FW_PWRMODE:{
+ u8 psmode = (*(u8 *) val);
+
+ if ((psmode != FW_PS_ACTIVE_MODE) &&
+ (!IS_92C_SERIAL(rtlhal->version)))
+ rtl92c_dm_rf_saving(hw, true);
+ rtl92c_set_fw_pwrmode_cmd(hw, (*(u8 *) val));
+ break;
+ }
+ case HW_VAR_FW_PSMODE_STATUS:
+ ppsc->fw_current_inpsmode = *((bool *) val);
+ break;
+ case HW_VAR_H2C_FW_JOINBSSRPT:{
+ u8 mstatus = (*(u8 *) val);
+ u8 tmp_reg422;
+ bool recover = false;
+
+ if (mstatus == RT_MEDIA_CONNECT) {
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_AID, NULL);
+ rtl_write_byte(rtlpriv, REG_CR + 1, 0x03);
+ _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(3));
+ _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
+ tmp_reg422 = rtl_read_byte(rtlpriv,
+ REG_FWHW_TXQ_CTRL + 2);
+ if (tmp_reg422 & BIT(6))
+ recover = true;
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+ tmp_reg422 & (~BIT(6)));
+ rtl92c_set_fw_rsvdpagepkt(hw, 0);
+ _rtl92cu_set_bcn_ctrl_reg(hw, BIT(3), 0);
+ _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
+ if (recover)
+ rtl_write_byte(rtlpriv,
+ REG_FWHW_TXQ_CTRL + 2,
+ tmp_reg422 | BIT(6));
+ rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
+ }
+ rtl92c_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
+ break;
+ }
+ case HW_VAR_AID:{
+ u16 u2btmp;
+
+ u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
+ u2btmp &= 0xC000;
+ rtl_write_word(rtlpriv, REG_BCN_PSR_RPT,
+ (u2btmp | mac->assoc_id));
+ break;
+ }
+ case HW_VAR_CORRECT_TSF:{
+ u8 btype_ibss = ((u8 *) (val))[0];
+
+ if (btype_ibss == true)
+ _rtl92cu_stop_tx_beacon(hw);
+ _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(3));
+ rtl_write_dword(rtlpriv, REG_TSFTR, (u32)(mac->tsf &
+ 0xffffffff));
+ rtl_write_dword(rtlpriv, REG_TSFTR + 4,
+ (u32)((mac->tsf >> 32) & 0xffffffff));
+ _rtl92cu_set_bcn_ctrl_reg(hw, BIT(3), 0);
+ if (btype_ibss == true)
+ _rtl92cu_resume_tx_beacon(hw);
+ break;
+ }
+ case HW_VAR_MGT_FILTER:
+ rtl_write_word(rtlpriv, REG_RXFLTMAP0, *(u16 *)val);
+ break;
+ case HW_VAR_CTRL_FILTER:
+ rtl_write_word(rtlpriv, REG_RXFLTMAP1, *(u16 *)val);
+ break;
+ case HW_VAR_DATA_FILTER:
+ rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *)val);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case "
+ "not process\n"));
+ break;
+ }
+}
+
+void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u32 ratr_value = (u32) mac->basic_rates;
+ u8 *mcsrate = mac->mcs;
+ u8 ratr_index = 0;
+ u8 nmode = mac->ht_enable;
+ u8 mimo_ps = 1;
+ u16 shortgi_rate = 0;
+ u32 tmp_ratr_value = 0;
+ u8 curtxbw_40mhz = mac->bw_40;
+ u8 curshortgi_40mhz = mac->sgi_40;
+ u8 curshortgi_20mhz = mac->sgi_20;
+ enum wireless_mode wirelessmode = mac->mode;
+
+ ratr_value |= ((*(u16 *) (mcsrate))) << 12;
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ if (ratr_value & 0x0000000c)
+ ratr_value &= 0x0000000d;
+ else
+ ratr_value &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_G:
+ ratr_value &= 0x00000FF5;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ nmode = 1;
+ if (mimo_ps == 0) {
+ ratr_value &= 0x0007F005;
+ } else {
+ u32 ratr_mask;
+
+ if (get_rf_type(rtlphy) == RF_1T2R ||
+ get_rf_type(rtlphy) == RF_1T1R)
+ ratr_mask = 0x000ff005;
+ else
+ ratr_mask = 0x0f0ff005;
+ if (curtxbw_40mhz)
+ ratr_mask |= 0x00000010;
+ ratr_value &= ratr_mask;
+ }
+ break;
+ default:
+ if (rtlphy->rf_type == RF_1T2R)
+ ratr_value &= 0x000ff0ff;
+ else
+ ratr_value &= 0x0f0ff0ff;
+ break;
+ }
+ ratr_value &= 0x0FFFFFFF;
+ if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) ||
+ (!curtxbw_40mhz && curshortgi_20mhz))) {
+ ratr_value |= 0x10000000;
+ tmp_ratr_value = (ratr_value >> 12);
+ for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
+ if ((1 << shortgi_rate) & tmp_ratr_value)
+ break;
+ }
+ shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
+ (shortgi_rate << 4) | (shortgi_rate);
+ }
+ rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("%x\n", rtl_read_dword(rtlpriv,
+ REG_ARFR0)));
+}
+
+void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u32 ratr_bitmap = (u32) mac->basic_rates;
+ u8 *p_mcsrate = mac->mcs;
+ u8 ratr_index = 0;
+ u8 curtxbw_40mhz = mac->bw_40;
+ u8 curshortgi_40mhz = mac->sgi_40;
+ u8 curshortgi_20mhz = mac->sgi_20;
+ enum wireless_mode wirelessmode = mac->mode;
+ bool shortgi = false;
+ u8 rate_mask[5];
+ u8 macid = 0;
+ u8 mimops = 1;
+
+ ratr_bitmap |= (p_mcsrate[1] << 20) | (p_mcsrate[0] << 12);
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ ratr_index = RATR_INX_WIRELESS_B;
+ if (ratr_bitmap & 0x0000000c)
+ ratr_bitmap &= 0x0000000d;
+ else
+ ratr_bitmap &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_G:
+ ratr_index = RATR_INX_WIRELESS_GB;
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x00000f00;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x00000ff0;
+ else
+ ratr_bitmap &= 0x00000ff5;
+ break;
+ case WIRELESS_MODE_A:
+ ratr_index = RATR_INX_WIRELESS_A;
+ ratr_bitmap &= 0x00000ff0;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ ratr_index = RATR_INX_WIRELESS_NGB;
+ if (mimops == 0) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x00070000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0007f000;
+ else
+ ratr_bitmap &= 0x0007f005;
+ } else {
+ if (rtlphy->rf_type == RF_1T2R ||
+ rtlphy->rf_type == RF_1T1R) {
+ if (curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff015;
+ } else {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff005;
+ }
+ } else {
+ if (curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0f0f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0f0ff000;
+ else
+ ratr_bitmap &= 0x0f0ff015;
+ } else {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0f0f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0f0ff000;
+ else
+ ratr_bitmap &= 0x0f0ff005;
+ }
+ }
+ }
+ if ((curtxbw_40mhz && curshortgi_40mhz) ||
+ (!curtxbw_40mhz && curshortgi_20mhz)) {
+ if (macid == 0)
+ shortgi = true;
+ else if (macid == 1)
+ shortgi = false;
+ }
+ break;
+ default:
+ ratr_index = RATR_INX_WIRELESS_NGB;
+ if (rtlphy->rf_type == RF_1T2R)
+ ratr_bitmap &= 0x000ff0ff;
+ else
+ ratr_bitmap &= 0x0f0ff0ff;
+ break;
+ }
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("ratr_bitmap :%x\n",
+ ratr_bitmap));
+ *(u32 *)&rate_mask = ((ratr_bitmap & 0x0fffffff) |
+ ratr_index << 28);
+ rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("Rate_index:%x, "
+ "ratr_val:%x, %x:%x:%x:%x:%x\n",
+ ratr_index, ratr_bitmap,
+ rate_mask[0], rate_mask[1],
+ rate_mask[2], rate_mask[3],
+ rate_mask[4]));
+ rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
+}
+
+void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 sifs_timer;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
+ (u8 *)&mac->slot_time);
+ if (!mac->ht_enable)
+ sifs_timer = 0x0a0a;
+ else
+ sifs_timer = 0x0e0e;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
+}
+
+bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
+ u8 u1tmp = 0;
+ bool actuallyset = false;
+ unsigned long flag = 0;
+ /* to do - usb autosuspend */
+ u8 usb_autosuspend = 0;
+
+ if (ppsc->swrf_processing)
+ return false;
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ if (ppsc->rfchange_inprogress) {
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ return false;
+ } else {
+ ppsc->rfchange_inprogress = true;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ }
+ cur_rfstate = ppsc->rfpwr_state;
+ if (usb_autosuspend) {
+ /* to do................... */
+ } else {
+ if (ppsc->pwrdown_mode) {
+ u1tmp = rtl_read_byte(rtlpriv, REG_HSISR);
+ e_rfpowerstate_toset = (u1tmp & BIT(7)) ?
+ ERFOFF : ERFON;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
+ ("pwrdown, 0x5c(BIT7)=%02x\n", u1tmp));
+ } else {
+ rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG,
+ rtl_read_byte(rtlpriv,
+ REG_MAC_PINMUX_CFG) & ~(BIT(3)));
+ u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
+ e_rfpowerstate_toset = (u1tmp & BIT(3)) ?
+ ERFON : ERFOFF;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
+ ("GPIO_IN=%02x\n", u1tmp));
+ }
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("N-SS RF =%x\n",
+ e_rfpowerstate_toset));
+ }
+ if ((ppsc->hwradiooff) && (e_rfpowerstate_toset == ERFON)) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("GPIOChangeRF - HW "
+ "Radio ON, RF ON\n"));
+ ppsc->hwradiooff = false;
+ actuallyset = true;
+ } else if ((!ppsc->hwradiooff) && (e_rfpowerstate_toset ==
+ ERFOFF)) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("GPIOChangeRF - HW"
+ " Radio OFF\n"));
+ ppsc->hwradiooff = true;
+ actuallyset = true;
+ } else {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD ,
+ ("pHalData->bHwRadioOff and eRfPowerStateToSet do not"
+ " match: pHalData->bHwRadioOff %x, eRfPowerStateToSet "
+ "%x\n", ppsc->hwradiooff, e_rfpowerstate_toset));
+ }
+ if (actuallyset) {
+ ppsc->hwradiooff = 1;
+ if (e_rfpowerstate_toset == ERFON) {
+ if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
+ RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM))
+ RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
+ else if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_PCI_D3)
+ && RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3))
+ RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3);
+ }
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ /* For power down module, we need to enable register block
+ * contrl reg at 0x1c. Then enable power down control bit
+ * of register 0x04 BIT4 and BIT15 as 1.
+ */
+ if (ppsc->pwrdown_mode && e_rfpowerstate_toset == ERFOFF) {
+ /* Enable register area 0x0-0xc. */
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0);
+ if (IS_HARDWARE_TYPE_8723U(rtlhal)) {
+ /*
+ * We should configure HW PDn source for WiFi
+ * ONLY, and then our HW will be set in
+ * power-down mode if PDn source from all
+ * functions are configured.
+ */
+ u1tmp = rtl_read_byte(rtlpriv,
+ REG_MULTI_FUNC_CTRL);
+ rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL,
+ (u1tmp|WL_HWPDN_EN));
+ } else {
+ rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812);
+ }
+ }
+ if (e_rfpowerstate_toset == ERFOFF) {
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM)
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
+ else if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_PCI_D3)
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3);
+ }
+ } else if (e_rfpowerstate_toset == ERFOFF || cur_rfstate == ERFOFF) {
+ /* Enter D3 or ASPM after GPIO had been done. */
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM)
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
+ else if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_PCI_D3)
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3);
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ } else {
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ }
+ *valid = 1;
+ return !ppsc->hwradiooff;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
new file mode 100644
index 000000000000..62af555bb61c
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
@@ -0,0 +1,116 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_HW_H__
+#define __RTL92CU_HW_H__
+
+#define H2C_RA_MASK 6
+
+#define LLT_POLLING_LLT_THRESHOLD 20
+#define LLT_POLLING_READY_TIMEOUT_COUNT 100
+#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255
+
+#define RX_PAGE_SIZE_REG_VALUE PBP_128
+/* Note: We will divide number of page equally for each queue
+ * other than public queue! */
+#define TX_TOTAL_PAGE_NUMBER 0xF8
+#define TX_PAGE_BOUNDARY (TX_TOTAL_PAGE_NUMBER + 1)
+
+
+#define CHIP_B_PAGE_NUM_PUBQ 0xE7
+
+/* For Test Chip Setting
+ * (HPQ + LPQ + PUBQ) shall be TX_TOTAL_PAGE_NUMBER */
+#define CHIP_A_PAGE_NUM_PUBQ 0x7E
+
+
+/* For Chip A Setting */
+#define WMM_CHIP_A_TX_TOTAL_PAGE_NUMBER 0xF5
+#define WMM_CHIP_A_TX_PAGE_BOUNDARY \
+ (WMM_CHIP_A_TX_TOTAL_PAGE_NUMBER + 1) /* F6 */
+
+#define WMM_CHIP_A_PAGE_NUM_PUBQ 0xA3
+#define WMM_CHIP_A_PAGE_NUM_HPQ 0x29
+#define WMM_CHIP_A_PAGE_NUM_LPQ 0x29
+
+
+
+/* Note: For Chip B Setting ,modify later */
+#define WMM_CHIP_B_TX_TOTAL_PAGE_NUMBER 0xF5
+#define WMM_CHIP_B_TX_PAGE_BOUNDARY \
+ (WMM_CHIP_B_TX_TOTAL_PAGE_NUMBER + 1) /* F6 */
+
+#define WMM_CHIP_B_PAGE_NUM_PUBQ 0xB0
+#define WMM_CHIP_B_PAGE_NUM_HPQ 0x29
+#define WMM_CHIP_B_PAGE_NUM_LPQ 0x1C
+#define WMM_CHIP_B_PAGE_NUM_NPQ 0x1C
+
+#define BOARD_TYPE_NORMAL_MASK 0xE0
+#define BOARD_TYPE_TEST_MASK 0x0F
+
+/* should be renamed and moved to another file */
+enum _BOARD_TYPE_8192CUSB {
+ BOARD_USB_DONGLE = 0, /* USB dongle */
+ BOARD_USB_High_PA = 1, /* USB dongle - high power PA */
+ BOARD_MINICARD = 2, /* Minicard */
+ BOARD_USB_SOLO = 3, /* USB solo-Slim module */
+ BOARD_USB_COMBO = 4, /* USB Combo-Slim module */
+};
+
+#define IS_HIGHT_PA(boardtype) \
+ ((boardtype == BOARD_USB_High_PA) ? true : false)
+
+#define RTL92C_DRIVER_INFO_SIZE 4
+void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw);
+void rtl92cu_enable_hw_security_config(struct ieee80211_hw *hw);
+int rtl92cu_hw_init(struct ieee80211_hw *hw);
+void rtl92cu_card_disable(struct ieee80211_hw *hw);
+int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
+void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw);
+void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw);
+void rtl92cu_update_interrupt_mask(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr);
+void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw);
+void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level);
+
+void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw);
+bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid);
+void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
+u8 _rtl92c_get_chnl_group(u8 chnl);
+int rtl92c_download_fw(struct ieee80211_hw *hw);
+void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
+void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished);
+void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
+ u8 element_id, u32 cmd_len, u8 *p_cmdbuffer);
+bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/rtlwifi/rtl8192cu/led.c
new file mode 100644
index 000000000000..332c74348a69
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/led.c
@@ -0,0 +1,142 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../usb.h"
+#include "reg.h"
+#include "led.h"
+
+static void _rtl92cu_init_led(struct ieee80211_hw *hw,
+ struct rtl_led *pled, enum rtl_led_pin ledpin)
+{
+ pled->hw = hw;
+ pled->ledpin = ledpin;
+ pled->ledon = false;
+}
+
+static void _rtl92cu_deInit_led(struct rtl_led *pled)
+{
+}
+
+void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+ u8 ledcfg;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
+ ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+ switch (pled->ledpin) {
+ case LED_PIN_GPIO0:
+ break;
+ case LED_PIN_LED0:
+ rtl_write_byte(rtlpriv,
+ REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5) | BIT(6));
+ break;
+ case LED_PIN_LED1:
+ rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5));
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ pled->ledon = true;
+}
+
+void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
+ u8 ledcfg;
+
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
+ ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+ switch (pled->ledpin) {
+ case LED_PIN_GPIO0:
+ break;
+ case LED_PIN_LED0:
+ ledcfg &= 0xf0;
+ if (usbpriv->ledctl.led_opendrain == true)
+ rtl_write_byte(rtlpriv, REG_LEDCFG2,
+ (ledcfg | BIT(1) | BIT(5) | BIT(6)));
+ else
+ rtl_write_byte(rtlpriv, REG_LEDCFG2,
+ (ledcfg | BIT(3) | BIT(5) | BIT(6)));
+ break;
+ case LED_PIN_LED1:
+ ledcfg &= 0x0f;
+ rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3)));
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ pled->ledon = false;
+}
+
+void rtl92cu_init_sw_leds(struct ieee80211_hw *hw)
+{
+ struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
+ _rtl92cu_init_led(hw, &(usbpriv->ledctl.sw_led0), LED_PIN_LED0);
+ _rtl92cu_init_led(hw, &(usbpriv->ledctl.sw_led1), LED_PIN_LED1);
+}
+
+void rtl92cu_deinit_sw_leds(struct ieee80211_hw *hw)
+{
+ struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
+ _rtl92cu_deInit_led(&(usbpriv->ledctl.sw_led0));
+ _rtl92cu_deInit_led(&(usbpriv->ledctl.sw_led1));
+}
+
+static void _rtl92cu_sw_led_control(struct ieee80211_hw *hw,
+ enum led_ctl_mode ledaction)
+{
+}
+
+void rtl92cu_led_control(struct ieee80211_hw *hw,
+ enum led_ctl_mode ledaction)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
+ (ledaction == LED_CTL_TX ||
+ ledaction == LED_CTL_RX ||
+ ledaction == LED_CTL_SITE_SURVEY ||
+ ledaction == LED_CTL_LINK ||
+ ledaction == LED_CTL_NO_LINK ||
+ ledaction == LED_CTL_START_TO_LINK ||
+ ledaction == LED_CTL_POWER_ON)) {
+ return;
+ }
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, ("ledaction %d,\n",
+ ledaction));
+ _rtl92cu_sw_led_control(hw, ledaction);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.h b/drivers/net/wireless/rtlwifi/rtl8192cu/led.h
new file mode 100644
index 000000000000..decaee4d1eb1
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/led.h
@@ -0,0 +1,37 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_LED_H__
+#define __RTL92CU_LED_H__
+
+void rtl92cu_init_sw_leds(struct ieee80211_hw *hw);
+void rtl92cu_deinit_sw_leds(struct ieee80211_hw *hw);
+void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl92cu_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
new file mode 100644
index 000000000000..f8514cba17b6
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -0,0 +1,1144 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+****************************************************************************/
+#include <linux/module.h>
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../usb.h"
+#include "../ps.h"
+#include "../cam.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+#include "mac.h"
+#include "trx.h"
+
+/* macro to shorten lines */
+
+#define LINK_Q ui_link_quality
+#define RX_EVM rx_evm_percentage
+#define RX_SIGQ rx_mimo_signalquality
+
+
+void rtl92c_read_chip_version(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ enum version_8192c chip_version = VERSION_UNKNOWN;
+ u32 value32;
+
+ value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
+ if (value32 & TRP_VAUX_EN) {
+ chip_version = (value32 & TYPE_ID) ? VERSION_TEST_CHIP_92C :
+ VERSION_TEST_CHIP_88C;
+ } else {
+ /* Normal mass production chip. */
+ chip_version = NORMAL_CHIP;
+ chip_version |= ((value32 & TYPE_ID) ? CHIP_92C : 0);
+ chip_version |= ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0);
+ /* RTL8723 with BT function. */
+ chip_version |= ((value32 & BT_FUNC) ? CHIP_8723 : 0);
+ if (IS_VENDOR_UMC(chip_version))
+ chip_version |= ((value32 & CHIP_VER_RTL_MASK) ?
+ CHIP_VENDOR_UMC_B_CUT : 0);
+ if (IS_92C_SERIAL(chip_version)) {
+ value32 = rtl_read_dword(rtlpriv, REG_HPON_FSM);
+ chip_version |= ((CHIP_BONDING_IDENTIFIER(value32) ==
+ CHIP_BONDING_92C_1T2R) ? CHIP_92C_1T2R : 0);
+ } else if (IS_8723_SERIES(chip_version)) {
+ value32 = rtl_read_dword(rtlpriv, REG_GPIO_OUTSTS);
+ chip_version |= ((value32 & RF_RL_ID) ?
+ CHIP_8723_DRV_REV : 0);
+ }
+ }
+ rtlhal->version = (enum version_8192c)chip_version;
+ switch (rtlhal->version) {
+ case VERSION_NORMAL_TSMC_CHIP_92C_1T2R:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_B_CHIP_92C.\n"));
+ break;
+ case VERSION_NORMAL_TSMC_CHIP_92C:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_92C.\n"));
+ break;
+ case VERSION_NORMAL_TSMC_CHIP_88C:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_88C.\n"));
+ break;
+ case VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_NORMAL_UMC_CHIP_i"
+ "92C_1T2R_A_CUT.\n"));
+ break;
+ case VERSION_NORMAL_UMC_CHIP_92C_A_CUT:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_NORMAL_UMC_CHIP_"
+ "92C_A_CUT.\n"));
+ break;
+ case VERSION_NORMAL_UMC_CHIP_88C_A_CUT:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
+ "_88C_A_CUT.\n"));
+ break;
+ case VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
+ "_92C_1T2R_B_CUT.\n"));
+ break;
+ case VERSION_NORMAL_UMC_CHIP_92C_B_CUT:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
+ "_92C_B_CUT.\n"));
+ break;
+ case VERSION_NORMAL_UMC_CHIP_88C_B_CUT:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
+ "_88C_B_CUT.\n"));
+ break;
+ case VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_NORMA_UMC_CHIP"
+ "_8723_1T1R_A_CUT.\n"));
+ break;
+ case VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_NORMA_UMC_CHIP"
+ "_8723_1T1R_B_CUT.\n"));
+ break;
+ case VERSION_TEST_CHIP_92C:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_TEST_CHIP_92C.\n"));
+ break;
+ case VERSION_TEST_CHIP_88C:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: VERSION_TEST_CHIP_88C.\n"));
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Chip Version ID: ???????????????.\n"));
+ break;
+ }
+ if (IS_92C_SERIAL(rtlhal->version))
+ rtlphy->rf_type =
+ (IS_92C_1T2R(rtlhal->version)) ? RF_1T2R : RF_2T2R;
+ else
+ rtlphy->rf_type = RF_1T1R;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
+ "RF_2T2R" : "RF_1T1R"));
+ if (get_rf_type(rtlphy) == RF_1T1R)
+ rtlpriv->dm.rfpath_rxenable[0] = true;
+ else
+ rtlpriv->dm.rfpath_rxenable[0] =
+ rtlpriv->dm.rfpath_rxenable[1] = true;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n",
+ rtlhal->version));
+}
+
+/**
+ * writeLLT - LLT table write access
+ * @io: io callback
+ * @address: LLT logical address.
+ * @data: LLT data content
+ *
+ * Realtek hardware access function.
+ *
+ */
+bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ bool status = true;
+ long count = 0;
+ u32 value = _LLT_INIT_ADDR(address) |
+ _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
+
+ rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
+ do {
+ value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
+ if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
+ break;
+ if (count > POLLING_LLT_THRESHOLD) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Failed to polling write LLT done at"
+ " address %d! _LLT_OP_VALUE(%x)\n",
+ address, _LLT_OP_VALUE(value)));
+ status = false;
+ break;
+ }
+ } while (++count);
+ return status;
+}
+/**
+ * rtl92c_init_LLT_table - Init LLT table
+ * @io: io callback
+ * @boundary:
+ *
+ * Realtek hardware access function.
+ *
+ */
+bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary)
+{
+ bool rst = true;
+ u32 i;
+
+ for (i = 0; i < (boundary - 1); i++) {
+ rst = rtl92c_llt_write(hw, i , i + 1);
+ if (true != rst) {
+ printk(KERN_ERR "===> %s #1 fail\n", __func__);
+ return rst;
+ }
+ }
+ /* end of list */
+ rst = rtl92c_llt_write(hw, (boundary - 1), 0xFF);
+ if (true != rst) {
+ printk(KERN_ERR "===> %s #2 fail\n", __func__);
+ return rst;
+ }
+ /* Make the other pages as ring buffer
+ * This ring buffer is used as beacon buffer if we config this MAC
+ * as two MAC transfer.
+ * Otherwise used as local loopback buffer.
+ */
+ for (i = boundary; i < LLT_LAST_ENTRY_OF_TX_PKT_BUFFER; i++) {
+ rst = rtl92c_llt_write(hw, i, (i + 1));
+ if (true != rst) {
+ printk(KERN_ERR "===> %s #3 fail\n", __func__);
+ return rst;
+ }
+ }
+ /* Let last entry point to the start entry of ring buffer */
+ rst = rtl92c_llt_write(hw, LLT_LAST_ENTRY_OF_TX_PKT_BUFFER, boundary);
+ if (true != rst) {
+ printk(KERN_ERR "===> %s #4 fail\n", __func__);
+ return rst;
+ }
+ return rst;
+}
+void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 *macaddr = p_macaddr;
+ u32 entry_id = 0;
+ bool is_pairwise = false;
+ static u8 cam_const_addr[4][6] = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
+ };
+ static u8 cam_const_broad[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+
+ if (clear_all) {
+ u8 idx = 0;
+ u8 cam_offset = 0;
+ u8 clear_number = 5;
+
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("clear_all\n"));
+ for (idx = 0; idx < clear_number; idx++) {
+ rtl_cam_mark_invalid(hw, cam_offset + idx);
+ rtl_cam_empty_entry(hw, cam_offset + idx);
+ if (idx < 5) {
+ memset(rtlpriv->sec.key_buf[idx], 0,
+ MAX_KEY_LEN);
+ rtlpriv->sec.key_len[idx] = 0;
+ }
+ }
+ } else {
+ switch (enc_algo) {
+ case WEP40_ENCRYPTION:
+ enc_algo = CAM_WEP40;
+ break;
+ case WEP104_ENCRYPTION:
+ enc_algo = CAM_WEP104;
+ break;
+ case TKIP_ENCRYPTION:
+ enc_algo = CAM_TKIP;
+ break;
+ case AESCCMP_ENCRYPTION:
+ enc_algo = CAM_AES;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("iillegal switch case\n"));
+ enc_algo = CAM_TKIP;
+ break;
+ }
+ if (is_wepkey || rtlpriv->sec.use_defaultkey) {
+ macaddr = cam_const_addr[key_index];
+ entry_id = key_index;
+ } else {
+ if (is_group) {
+ macaddr = cam_const_broad;
+ entry_id = key_index;
+ } else {
+ key_index = PAIRWISE_KEYIDX;
+ entry_id = CAM_PAIRWISE_KEY_POSITION;
+ is_pairwise = true;
+ }
+ }
+ if (rtlpriv->sec.key_len[key_index] == 0) {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("delete one entry\n"));
+ rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
+ } else {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
+ ("The insert KEY length is %d\n",
+ rtlpriv->sec.key_len[PAIRWISE_KEYIDX]));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
+ ("The insert KEY is %x %x\n",
+ rtlpriv->sec.key_buf[0][0],
+ rtlpriv->sec.key_buf[0][1]));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("add one entry\n"));
+ if (is_pairwise) {
+ RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
+ "Pairwiase Key content :",
+ rtlpriv->sec.pairwise_key,
+ rtlpriv->sec.
+ key_len[PAIRWISE_KEYIDX]);
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("set Pairwiase key\n"));
+
+ rtl_cam_add_one_entry(hw, macaddr, key_index,
+ entry_id, enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.
+ key_buf[key_index]);
+ } else {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("set group key\n"));
+ if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+ rtl_cam_add_one_entry(hw,
+ rtlefuse->dev_addr,
+ PAIRWISE_KEYIDX,
+ CAM_PAIRWISE_KEY_POSITION,
+ enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf
+ [entry_id]);
+ }
+ rtl_cam_add_one_entry(hw, macaddr, key_index,
+ entry_id, enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf[entry_id]);
+ }
+ }
+ }
+}
+
+u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ return rtl_read_dword(rtlpriv, REG_TXDMA_STATUS);
+}
+
+void rtl92c_enable_interrupt(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ if (IS_HARDWARE_TYPE_8192CE(rtlhal)) {
+ rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] &
+ 0xFFFFFFFF);
+ rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] &
+ 0xFFFFFFFF);
+ rtlpci->irq_enabled = true;
+ } else {
+ rtl_write_dword(rtlpriv, REG_HIMR, rtlusb->irq_mask[0] &
+ 0xFFFFFFFF);
+ rtl_write_dword(rtlpriv, REG_HIMRE, rtlusb->irq_mask[1] &
+ 0xFFFFFFFF);
+ rtlusb->irq_enabled = true;
+ }
+}
+
+void rtl92c_init_interrupt(struct ieee80211_hw *hw)
+{
+ rtl92c_enable_interrupt(hw);
+}
+
+void rtl92c_disable_interrupt(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED);
+ rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED);
+ if (IS_HARDWARE_TYPE_8192CE(rtlhal))
+ rtlpci->irq_enabled = false;
+ else if (IS_HARDWARE_TYPE_8192CU(rtlhal))
+ rtlusb->irq_enabled = false;
+}
+
+void rtl92c_set_qos(struct ieee80211_hw *hw, int aci)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u32 u4b_ac_param;
+
+ rtl92c_dm_init_edca_turbo(hw);
+ u4b_ac_param = (u32) mac->ac[aci].aifs;
+ u4b_ac_param |=
+ ((u32) le16_to_cpu(mac->ac[aci].cw_min) & 0xF) <<
+ AC_PARAM_ECW_MIN_OFFSET;
+ u4b_ac_param |=
+ ((u32) le16_to_cpu(mac->ac[aci].cw_max) & 0xF) <<
+ AC_PARAM_ECW_MAX_OFFSET;
+ u4b_ac_param |= (u32) le16_to_cpu(mac->ac[aci].tx_op) <<
+ AC_PARAM_TXOP_OFFSET;
+ RT_TRACE(rtlpriv, COMP_QOS, DBG_LOUD,
+ ("queue:%x, ac_param:%x\n", aci, u4b_ac_param));
+ switch (aci) {
+ case AC1_BK:
+ rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param);
+ break;
+ case AC0_BE:
+ rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param);
+ break;
+ case AC2_VI:
+ rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, u4b_ac_param);
+ break;
+ case AC3_VO:
+ rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, u4b_ac_param);
+ break;
+ default:
+ RT_ASSERT(false, ("invalid aci: %d !\n", aci));
+ break;
+ }
+}
+
+/*-------------------------------------------------------------------------
+ * HW MAC Address
+ *-------------------------------------------------------------------------*/
+void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr)
+{
+ u32 i;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ for (i = 0 ; i < ETH_ALEN ; i++)
+ rtl_write_byte(rtlpriv, (REG_MACID + i), *(addr+i));
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, ("MAC Address: %02X-%02X-%02X-"
+ "%02X-%02X-%02X\n",
+ rtl_read_byte(rtlpriv, REG_MACID),
+ rtl_read_byte(rtlpriv, REG_MACID+1),
+ rtl_read_byte(rtlpriv, REG_MACID+2),
+ rtl_read_byte(rtlpriv, REG_MACID+3),
+ rtl_read_byte(rtlpriv, REG_MACID+4),
+ rtl_read_byte(rtlpriv, REG_MACID+5)));
+}
+
+void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, size);
+}
+
+int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
+{
+ u8 value;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ switch (type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ value = NT_NO_LINK;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Set Network type to NO LINK!\n"));
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ value = NT_LINK_AD_HOC;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Set Network type to Ad Hoc!\n"));
+ break;
+ case NL80211_IFTYPE_STATION:
+ value = NT_LINK_AP;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Set Network type to STA!\n"));
+ break;
+ case NL80211_IFTYPE_AP:
+ value = NT_AS_AP;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Set Network type to AP!\n"));
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Network type %d not support!\n", type));
+ return -EOPNOTSUPP;
+ }
+ rtl_write_byte(rtlpriv, (REG_CR + 2), value);
+ return 0;
+}
+
+void rtl92c_init_network_type(struct ieee80211_hw *hw)
+{
+ rtl92c_set_network_type(hw, NL80211_IFTYPE_UNSPECIFIED);
+}
+
+void rtl92c_init_adaptive_ctrl(struct ieee80211_hw *hw)
+{
+ u16 value16;
+ u32 value32;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /* Response Rate Set */
+ value32 = rtl_read_dword(rtlpriv, REG_RRSR);
+ value32 &= ~RATE_BITMAP_ALL;
+ value32 |= RATE_RRSR_CCK_ONLY_1M;
+ rtl_write_dword(rtlpriv, REG_RRSR, value32);
+ /* SIFS (used in NAV) */
+ value16 = _SPEC_SIFS_CCK(0x10) | _SPEC_SIFS_OFDM(0x10);
+ rtl_write_word(rtlpriv, REG_SPEC_SIFS, value16);
+ /* Retry Limit */
+ value16 = _LRL(0x30) | _SRL(0x30);
+ rtl_write_dword(rtlpriv, REG_RL, value16);
+}
+
+void rtl92c_init_rate_fallback(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /* Set Data Auto Rate Fallback Retry Count register. */
+ rtl_write_dword(rtlpriv, REG_DARFRC, 0x00000000);
+ rtl_write_dword(rtlpriv, REG_DARFRC+4, 0x10080404);
+ rtl_write_dword(rtlpriv, REG_RARFRC, 0x04030201);
+ rtl_write_dword(rtlpriv, REG_RARFRC+4, 0x08070605);
+}
+
+static void rtl92c_set_cck_sifs(struct ieee80211_hw *hw, u8 trx_sifs,
+ u8 ctx_sifs)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_SIFS_CCK, trx_sifs);
+ rtl_write_byte(rtlpriv, (REG_SIFS_CCK + 1), ctx_sifs);
+}
+
+static void rtl92c_set_ofdm_sifs(struct ieee80211_hw *hw, u8 trx_sifs,
+ u8 ctx_sifs)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_SIFS_OFDM, trx_sifs);
+ rtl_write_byte(rtlpriv, (REG_SIFS_OFDM + 1), ctx_sifs);
+}
+
+void rtl92c_init_edca_param(struct ieee80211_hw *hw,
+ u16 queue, u16 txop, u8 cw_min, u8 cw_max, u8 aifs)
+{
+ /* sequence: VO, VI, BE, BK ==> the same as 92C hardware design.
+ * referenc : enum nl80211_txq_q or ieee80211_set_wmm_default function.
+ */
+ u32 value;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ value = (u32)aifs;
+ value |= ((u32)cw_min & 0xF) << 8;
+ value |= ((u32)cw_max & 0xF) << 12;
+ value |= (u32)txop << 16;
+ /* 92C hardware register sequence is the same as queue number. */
+ rtl_write_dword(rtlpriv, (REG_EDCA_VO_PARAM + (queue * 4)), value);
+}
+
+void rtl92c_init_edca(struct ieee80211_hw *hw)
+{
+ u16 value16;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /* disable EDCCA count down, to reduce collison and retry */
+ value16 = rtl_read_word(rtlpriv, REG_RD_CTRL);
+ value16 |= DIS_EDCA_CNT_DWN;
+ rtl_write_word(rtlpriv, REG_RD_CTRL, value16);
+ /* Update SIFS timing. ??????????
+ * pHalData->SifsTime = 0x0e0e0a0a; */
+ rtl92c_set_cck_sifs(hw, 0xa, 0xa);
+ rtl92c_set_ofdm_sifs(hw, 0xe, 0xe);
+ /* Set CCK/OFDM SIFS to be 10us. */
+ rtl_write_word(rtlpriv, REG_SIFS_CCK, 0x0a0a);
+ rtl_write_word(rtlpriv, REG_SIFS_OFDM, 0x1010);
+ rtl_write_word(rtlpriv, REG_PROT_MODE_CTRL, 0x0204);
+ rtl_write_dword(rtlpriv, REG_BAR_MODE_CTRL, 0x014004);
+ /* TXOP */
+ rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, 0x005EA42B);
+ rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0x0000A44F);
+ rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x005EA324);
+ rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x002FA226);
+ /* PIFS */
+ rtl_write_byte(rtlpriv, REG_PIFS, 0x1C);
+ /* AGGR BREAK TIME Register */
+ rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
+ rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0040);
+ rtl_write_byte(rtlpriv, REG_BCNDMATIM, 0x02);
+ rtl_write_byte(rtlpriv, REG_ATIMWND, 0x02);
+}
+
+void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x99997631);
+ rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
+ /* init AMPDU aggregation number, tuning for Tx's TP, */
+ rtl_write_word(rtlpriv, 0x4CA, 0x0708);
+}
+
+void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xFF);
+}
+
+void rtl92c_init_rdg_setting(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_RD_CTRL, 0xFF);
+ rtl_write_word(rtlpriv, REG_RD_NAV_NXT, 0x200);
+ rtl_write_byte(rtlpriv, REG_RD_RESP_PKT_TH, 0x05);
+}
+
+void rtl92c_init_retry_function(struct ieee80211_hw *hw)
+{
+ u8 value8;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ value8 = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL);
+ value8 |= EN_AMPDU_RTY_NEW;
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL, value8);
+ /* Set ACK timeout */
+ rtl_write_byte(rtlpriv, REG_ACKTO, 0x40);
+}
+
+void rtl92c_init_beacon_parameters(struct ieee80211_hw *hw,
+ enum version_8192c version)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ rtl_write_word(rtlpriv, REG_TBTT_PROHIBIT, 0x6404);/* ms */
+ rtl_write_byte(rtlpriv, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME);/*ms*/
+ rtl_write_byte(rtlpriv, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME);
+ if (IS_NORMAL_CHIP(rtlhal->version))
+ rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660F);
+ else
+ rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF);
+}
+
+void rtl92c_disable_fast_edca(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_word(rtlpriv, REG_FAST_EDCA_CTRL, 0);
+}
+
+void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 value = is2T ? MAX_MSS_DENSITY_2T : MAX_MSS_DENSITY_1T;
+
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, value);
+}
+
+u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ return rtl_read_word(rtlpriv, REG_RXFLTMAP0);
+}
+
+void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_word(rtlpriv, REG_RXFLTMAP0, filter);
+}
+
+u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ return rtl_read_word(rtlpriv, REG_RXFLTMAP1);
+}
+
+void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_word(rtlpriv, REG_RXFLTMAP1, filter);
+}
+
+u16 rtl92c_get_data_filter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ return rtl_read_word(rtlpriv, REG_RXFLTMAP2);
+}
+
+void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_word(rtlpriv, REG_RXFLTMAP2, filter);
+}
+/*==============================================================*/
+
+static u8 _rtl92c_query_rxpwrpercentage(char antpower)
+{
+ if ((antpower <= -100) || (antpower >= 20))
+ return 0;
+ else if (antpower >= 0)
+ return 100;
+ else
+ return 100 + antpower;
+}
+
+static u8 _rtl92c_evm_db_to_percentage(char value)
+{
+ char ret_val;
+
+ ret_val = value;
+ if (ret_val >= 0)
+ ret_val = 0;
+ if (ret_val <= -33)
+ ret_val = -33;
+ ret_val = 0 - ret_val;
+ ret_val *= 3;
+ if (ret_val == 99)
+ ret_val = 100;
+ return ret_val;
+}
+
+static long _rtl92c_translate_todbm(struct ieee80211_hw *hw,
+ u8 signal_strength_index)
+{
+ long signal_power;
+
+ signal_power = (long)((signal_strength_index + 1) >> 1);
+ signal_power -= 95;
+ return signal_power;
+}
+
+static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
+ long currsig)
+{
+ long retsig;
+
+ if (currsig >= 61 && currsig <= 100)
+ retsig = 90 + ((currsig - 60) / 4);
+ else if (currsig >= 41 && currsig <= 60)
+ retsig = 78 + ((currsig - 40) / 2);
+ else if (currsig >= 31 && currsig <= 40)
+ retsig = 66 + (currsig - 30);
+ else if (currsig >= 21 && currsig <= 30)
+ retsig = 54 + (currsig - 20);
+ else if (currsig >= 5 && currsig <= 20)
+ retsig = 42 + (((currsig - 5) * 2) / 3);
+ else if (currsig == 4)
+ retsig = 36;
+ else if (currsig == 3)
+ retsig = 27;
+ else if (currsig == 2)
+ retsig = 18;
+ else if (currsig == 1)
+ retsig = 9;
+ else
+ retsig = currsig;
+ return retsig;
+}
+
+static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats,
+ struct rx_desc_92c *pdesc,
+ struct rx_fwinfo_92c *p_drvinfo,
+ bool packet_match_bssid,
+ bool packet_toself,
+ bool packet_beacon)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct phy_sts_cck_8192s_t *cck_buf;
+ s8 rx_pwr_all = 0, rx_pwr[4];
+ u8 rf_rx_num = 0, evm, pwdb_all;
+ u8 i, max_spatial_stream;
+ u32 rssi, total_rssi = 0;
+ bool in_powersavemode = false;
+ bool is_cck_rate;
+
+ is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
+ pstats->packet_matchbssid = packet_match_bssid;
+ pstats->packet_toself = packet_toself;
+ pstats->is_cck = is_cck_rate;
+ pstats->packet_beacon = packet_beacon;
+ pstats->is_cck = is_cck_rate;
+ pstats->RX_SIGQ[0] = -1;
+ pstats->RX_SIGQ[1] = -1;
+ if (is_cck_rate) {
+ u8 report, cck_highpwr;
+ cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
+ if (!in_powersavemode)
+ cck_highpwr = rtlphy->cck_high_power;
+ else
+ cck_highpwr = false;
+ if (!cck_highpwr) {
+ u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
+ report = cck_buf->cck_agc_rpt & 0xc0;
+ report = report >> 6;
+ switch (report) {
+ case 0x3:
+ rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x2:
+ rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x1:
+ rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x0:
+ rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
+ break;
+ }
+ } else {
+ u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
+ report = p_drvinfo->cfosho[0] & 0x60;
+ report = report >> 5;
+ switch (report) {
+ case 0x3:
+ rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ case 0x2:
+ rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ case 0x1:
+ rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ case 0x0:
+ rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ }
+ }
+ pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
+ pstats->rx_pwdb_all = pwdb_all;
+ pstats->recvsignalpower = rx_pwr_all;
+ if (packet_match_bssid) {
+ u8 sq;
+ if (pstats->rx_pwdb_all > 40)
+ sq = 100;
+ else {
+ sq = cck_buf->sq_rpt;
+ if (sq > 64)
+ sq = 0;
+ else if (sq < 20)
+ sq = 100;
+ else
+ sq = ((64 - sq) * 100) / 44;
+ }
+ pstats->signalquality = sq;
+ pstats->RX_SIGQ[0] = sq;
+ pstats->RX_SIGQ[1] = -1;
+ }
+ } else {
+ rtlpriv->dm.rfpath_rxenable[0] =
+ rtlpriv->dm.rfpath_rxenable[1] = true;
+ for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) {
+ if (rtlpriv->dm.rfpath_rxenable[i])
+ rf_rx_num++;
+ rx_pwr[i] =
+ ((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110;
+ rssi = _rtl92c_query_rxpwrpercentage(rx_pwr[i]);
+ total_rssi += rssi;
+ rtlpriv->stats.rx_snr_db[i] =
+ (long)(p_drvinfo->rxsnr[i] / 2);
+
+ if (packet_match_bssid)
+ pstats->rx_mimo_signalstrength[i] = (u8) rssi;
+ }
+ rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
+ pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
+ pstats->rx_pwdb_all = pwdb_all;
+ pstats->rxpower = rx_pwr_all;
+ pstats->recvsignalpower = rx_pwr_all;
+ if (GET_RX_DESC_RX_MCS(pdesc) &&
+ GET_RX_DESC_RX_MCS(pdesc) >= DESC92C_RATEMCS8 &&
+ GET_RX_DESC_RX_MCS(pdesc) <= DESC92C_RATEMCS15)
+ max_spatial_stream = 2;
+ else
+ max_spatial_stream = 1;
+ for (i = 0; i < max_spatial_stream; i++) {
+ evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]);
+ if (packet_match_bssid) {
+ if (i == 0)
+ pstats->signalquality =
+ (u8) (evm & 0xff);
+ pstats->RX_SIGQ[i] =
+ (u8) (evm & 0xff);
+ }
+ }
+ }
+ if (is_cck_rate)
+ pstats->signalstrength =
+ (u8) (_rtl92c_signal_scale_mapping(hw, pwdb_all));
+ else if (rf_rx_num != 0)
+ pstats->signalstrength =
+ (u8) (_rtl92c_signal_scale_mapping
+ (hw, total_rssi /= rf_rx_num));
+}
+
+static void _rtl92c_process_ui_rssi(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 rfpath;
+ u32 last_rssi, tmpval;
+
+ if (pstats->packet_toself || pstats->packet_beacon) {
+ rtlpriv->stats.rssi_calculate_cnt++;
+ if (rtlpriv->stats.ui_rssi.total_num++ >=
+ PHY_RSSI_SLID_WIN_MAX) {
+ rtlpriv->stats.ui_rssi.total_num =
+ PHY_RSSI_SLID_WIN_MAX;
+ last_rssi =
+ rtlpriv->stats.ui_rssi.elements[rtlpriv->
+ stats.ui_rssi.index];
+ rtlpriv->stats.ui_rssi.total_val -= last_rssi;
+ }
+ rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength;
+ rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.
+ index++] = pstats->signalstrength;
+ if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
+ rtlpriv->stats.ui_rssi.index = 0;
+ tmpval = rtlpriv->stats.ui_rssi.total_val /
+ rtlpriv->stats.ui_rssi.total_num;
+ rtlpriv->stats.signal_strength =
+ _rtl92c_translate_todbm(hw, (u8) tmpval);
+ pstats->rssi = rtlpriv->stats.signal_strength;
+ }
+ if (!pstats->is_cck && pstats->packet_toself) {
+ for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
+ rfpath++) {
+ if (!rtl8192_phy_check_is_legal_rfpath(hw, rfpath))
+ continue;
+ if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ pstats->rx_mimo_signalstrength[rfpath];
+ }
+ if (pstats->rx_mimo_signalstrength[rfpath] >
+ rtlpriv->stats.rx_rssi_percentage[rfpath]) {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ ((rtlpriv->stats.
+ rx_rssi_percentage[rfpath] *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_mimo_signalstrength[rfpath])) /
+ (RX_SMOOTH_FACTOR);
+
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ rtlpriv->stats.rx_rssi_percentage[rfpath] +
+ 1;
+ } else {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ ((rtlpriv->stats.
+ rx_rssi_percentage[rfpath] *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_mimo_signalstrength[rfpath])) /
+ (RX_SMOOTH_FACTOR);
+ }
+ }
+ }
+}
+
+static void _rtl92c_update_rxsignalstatistics(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int weighting = 0;
+
+ if (rtlpriv->stats.recv_signal_power == 0)
+ rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
+ if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
+ weighting = 5;
+ else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
+ weighting = (-5);
+ rtlpriv->stats.recv_signal_power =
+ (rtlpriv->stats.recv_signal_power * 5 +
+ pstats->recvsignalpower + weighting) / 6;
+}
+
+static void _rtl92c_process_pwdb(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ long undecorated_smoothed_pwdb = 0;
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+ return;
+ } else {
+ undecorated_smoothed_pwdb =
+ rtlpriv->dm.undecorated_smoothed_pwdb;
+ }
+ if (pstats->packet_toself || pstats->packet_beacon) {
+ if (undecorated_smoothed_pwdb < 0)
+ undecorated_smoothed_pwdb = pstats->rx_pwdb_all;
+ if (pstats->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) {
+ undecorated_smoothed_pwdb =
+ (((undecorated_smoothed_pwdb) *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+ undecorated_smoothed_pwdb = undecorated_smoothed_pwdb
+ + 1;
+ } else {
+ undecorated_smoothed_pwdb =
+ (((undecorated_smoothed_pwdb) *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+ }
+ rtlpriv->dm.undecorated_smoothed_pwdb =
+ undecorated_smoothed_pwdb;
+ _rtl92c_update_rxsignalstatistics(hw, pstats);
+ }
+}
+
+static void _rtl92c_process_LINK_Q(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 last_evm = 0, n_stream, tmpval;
+
+ if (pstats->signalquality != 0) {
+ if (pstats->packet_toself || pstats->packet_beacon) {
+ if (rtlpriv->stats.LINK_Q.total_num++ >=
+ PHY_LINKQUALITY_SLID_WIN_MAX) {
+ rtlpriv->stats.LINK_Q.total_num =
+ PHY_LINKQUALITY_SLID_WIN_MAX;
+ last_evm =
+ rtlpriv->stats.LINK_Q.elements
+ [rtlpriv->stats.LINK_Q.index];
+ rtlpriv->stats.LINK_Q.total_val -=
+ last_evm;
+ }
+ rtlpriv->stats.LINK_Q.total_val +=
+ pstats->signalquality;
+ rtlpriv->stats.LINK_Q.elements
+ [rtlpriv->stats.LINK_Q.index++] =
+ pstats->signalquality;
+ if (rtlpriv->stats.LINK_Q.index >=
+ PHY_LINKQUALITY_SLID_WIN_MAX)
+ rtlpriv->stats.LINK_Q.index = 0;
+ tmpval = rtlpriv->stats.LINK_Q.total_val /
+ rtlpriv->stats.LINK_Q.total_num;
+ rtlpriv->stats.signal_quality = tmpval;
+ rtlpriv->stats.last_sigstrength_inpercent = tmpval;
+ for (n_stream = 0; n_stream < 2;
+ n_stream++) {
+ if (pstats->RX_SIGQ[n_stream] != -1) {
+ if (!rtlpriv->stats.RX_EVM[n_stream]) {
+ rtlpriv->stats.RX_EVM[n_stream]
+ = pstats->RX_SIGQ[n_stream];
+ }
+ rtlpriv->stats.RX_EVM[n_stream] =
+ ((rtlpriv->stats.RX_EVM
+ [n_stream] *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->RX_SIGQ
+ [n_stream] * 1)) /
+ (RX_SMOOTH_FACTOR);
+ }
+ }
+ }
+ } else {
+ ;
+ }
+}
+
+static void _rtl92c_process_phyinfo(struct ieee80211_hw *hw,
+ u8 *buffer,
+ struct rtl_stats *pcurrent_stats)
+{
+ if (!pcurrent_stats->packet_matchbssid &&
+ !pcurrent_stats->packet_beacon)
+ return;
+ _rtl92c_process_ui_rssi(hw, pcurrent_stats);
+ _rtl92c_process_pwdb(hw, pcurrent_stats);
+ _rtl92c_process_LINK_Q(hw, pcurrent_stats);
+}
+
+void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct rtl_stats *pstats,
+ struct rx_desc_92c *pdesc,
+ struct rx_fwinfo_92c *p_drvinfo)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct ieee80211_hdr *hdr;
+ u8 *tmp_buf;
+ u8 *praddr;
+ u8 *psaddr;
+ __le16 fc;
+ u16 type, cpu_fc;
+ bool packet_matchbssid, packet_toself, packet_beacon;
+
+ tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
+ hdr = (struct ieee80211_hdr *)tmp_buf;
+ fc = hdr->frame_control;
+ cpu_fc = le16_to_cpu(fc);
+ type = WLAN_FC_GET_TYPE(fc);
+ praddr = hdr->addr1;
+ psaddr = hdr->addr2;
+ packet_matchbssid =
+ ((IEEE80211_FTYPE_CTL != type) &&
+ (!compare_ether_addr(mac->bssid,
+ (cpu_fc & IEEE80211_FCTL_TODS) ?
+ hdr->addr1 : (cpu_fc & IEEE80211_FCTL_FROMDS) ?
+ hdr->addr2 : hdr->addr3)) &&
+ (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
+
+ packet_toself = packet_matchbssid &&
+ (!compare_ether_addr(praddr, rtlefuse->dev_addr));
+ if (ieee80211_is_beacon(fc))
+ packet_beacon = true;
+ _rtl92c_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
+ packet_matchbssid, packet_toself,
+ packet_beacon);
+ _rtl92c_process_phyinfo(hw, tmp_buf, pstats);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
new file mode 100644
index 000000000000..298fdb724aa5
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
@@ -0,0 +1,180 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92C_MAC_H__
+#define __RTL92C_MAC_H__
+
+#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255
+#define DRIVER_EARLY_INT_TIME 0x05
+#define BCN_DMA_ATIME_INT_TIME 0x02
+
+void rtl92c_read_chip_version(struct ieee80211_hw *hw);
+bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data);
+bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary);
+void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all);
+void rtl92c_enable_interrupt(struct ieee80211_hw *hw);
+void rtl92c_disable_interrupt(struct ieee80211_hw *hw);
+void rtl92c_set_qos(struct ieee80211_hw *hw, int aci);
+
+
+/*---------------------------------------------------------------
+ * Hardware init functions
+ *---------------------------------------------------------------*/
+void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr);
+void rtl92c_init_interrupt(struct ieee80211_hw *hw);
+void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size);
+
+int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
+void rtl92c_init_network_type(struct ieee80211_hw *hw);
+void rtl92c_init_adaptive_ctrl(struct ieee80211_hw *hw);
+void rtl92c_init_rate_fallback(struct ieee80211_hw *hw);
+
+void rtl92c_init_edca_param(struct ieee80211_hw *hw,
+ u16 queue,
+ u16 txop,
+ u8 ecwmax,
+ u8 ecwmin,
+ u8 aifs);
+
+void rtl92c_init_edca(struct ieee80211_hw *hw);
+void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw);
+void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode);
+void rtl92c_init_rdg_setting(struct ieee80211_hw *hw);
+void rtl92c_init_retry_function(struct ieee80211_hw *hw);
+
+void rtl92c_init_beacon_parameters(struct ieee80211_hw *hw,
+ enum version_8192c version);
+
+void rtl92c_disable_fast_edca(struct ieee80211_hw *hw);
+void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T);
+
+/* For filter */
+u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw);
+void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter);
+u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw);
+void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter);
+u16 rtl92c_get_data_filter(struct ieee80211_hw *hw);
+void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter);
+
+
+u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw);
+
+#define RX_HAL_IS_CCK_RATE(_pdesc)\
+ (GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE1M ||\
+ GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE2M ||\
+ GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE5_5M ||\
+ GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE11M)
+
+struct rx_fwinfo_92c {
+ u8 gain_trsw[4];
+ u8 pwdb_all;
+ u8 cfosho[4];
+ u8 cfotail[4];
+ char rxevm[2];
+ char rxsnr[4];
+ u8 pdsnr[2];
+ u8 csi_current[2];
+ u8 csi_target[2];
+ u8 sigevm;
+ u8 max_ex_pwr;
+ u8 ex_intf_flag:1;
+ u8 sgi_en:1;
+ u8 rxsc:2;
+ u8 reserve:4;
+} __packed;
+
+struct rx_desc_92c {
+ u32 length:14;
+ u32 crc32:1;
+ u32 icverror:1;
+ u32 drv_infosize:4;
+ u32 security:3;
+ u32 qos:1;
+ u32 shift:2;
+ u32 phystatus:1;
+ u32 swdec:1;
+ u32 lastseg:1;
+ u32 firstseg:1;
+ u32 eor:1;
+ u32 own:1;
+ u32 macid:5; /* word 1 */
+ u32 tid:4;
+ u32 hwrsvd:5;
+ u32 paggr:1;
+ u32 faggr:1;
+ u32 a1_fit:4;
+ u32 a2_fit:4;
+ u32 pam:1;
+ u32 pwr:1;
+ u32 moredata:1;
+ u32 morefrag:1;
+ u32 type:2;
+ u32 mc:1;
+ u32 bc:1;
+ u32 seq:12; /* word 2 */
+ u32 frag:4;
+ u32 nextpktlen:14;
+ u32 nextind:1;
+ u32 rsvd:1;
+ u32 rxmcs:6; /* word 3 */
+ u32 rxht:1;
+ u32 amsdu:1;
+ u32 splcp:1;
+ u32 bandwidth:1;
+ u32 htc:1;
+ u32 tcpchk_rpt:1;
+ u32 ipcchk_rpt:1;
+ u32 tcpchk_valid:1;
+ u32 hwpcerr:1;
+ u32 hwpcind:1;
+ u32 iv0:16;
+ u32 iv1; /* word 4 */
+ u32 tsfl; /* word 5 */
+ u32 bufferaddress; /* word 6 */
+ u32 bufferaddress64; /* word 7 */
+} __packed;
+
+enum rtl_desc_qsel rtl92c_map_hwqueue_to_fwqueue(u16 fc,
+ unsigned int
+ skb_queue);
+void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct rtl_stats *pstats,
+ struct rx_desc_92c *pdesc,
+ struct rx_fwinfo_92c *p_drvinfo);
+
+/*---------------------------------------------------------------
+ * Card disable functions
+ *---------------------------------------------------------------*/
+
+
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
new file mode 100644
index 000000000000..4e020e654e6b
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -0,0 +1,607 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../ps.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+#include "table.h"
+
+u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr, u32 bitmask)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 original_value, readback_value, bitshift;
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+ "rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask));
+ if (rtlphy->rf_mode != RF_OP_BY_FW) {
+ original_value = _rtl92c_phy_rf_serial_read(hw,
+ rfpath, regaddr);
+ } else {
+ original_value = _rtl92c_phy_fw_rf_serial_read(hw,
+ rfpath, regaddr);
+ }
+ bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ ("regaddr(%#x), rfpath(%#x), "
+ "bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value));
+ return readback_value;
+}
+
+void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 original_value, bitshift;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ ("regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath));
+ if (rtlphy->rf_mode != RF_OP_BY_FW) {
+ if (bitmask != RFREG_OFFSET_MASK) {
+ original_value = _rtl92c_phy_rf_serial_read(hw,
+ rfpath,
+ regaddr);
+ bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ data =
+ ((original_value & (~bitmask)) |
+ (data << bitshift));
+ }
+ _rtl92c_phy_rf_serial_write(hw, rfpath, regaddr, data);
+ } else {
+ if (bitmask != RFREG_OFFSET_MASK) {
+ original_value = _rtl92c_phy_fw_rf_serial_read(hw,
+ rfpath,
+ regaddr);
+ bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
+ data =
+ ((original_value & (~bitmask)) |
+ (data << bitshift));
+ }
+ _rtl92c_phy_fw_rf_serial_write(hw, rfpath, regaddr, data);
+ }
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+ "bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath));
+}
+
+bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw)
+{
+ bool rtstatus;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool is92c = IS_92C_SERIAL(rtlhal->version);
+
+ rtstatus = _rtl92cu_phy_config_mac_with_headerfile(hw);
+ if (is92c && IS_HARDWARE_TYPE_8192CE(rtlhal))
+ rtl_write_byte(rtlpriv, 0x14, 0x71);
+ return rtstatus;
+}
+
+bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw)
+{
+ bool rtstatus = true;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u16 regval;
+ u8 b_reg_hwparafile = 1;
+
+ _rtl92c_phy_init_bb_rf_register_definition(hw);
+ regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+ rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, regval | BIT(13) |
+ BIT(0) | BIT(1));
+ rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x83);
+ rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL + 1, 0xdb);
+ rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB);
+ if (IS_HARDWARE_TYPE_8192CE(rtlhal)) {
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_PPLL | FEN_PCIEA |
+ FEN_DIO_PCIE | FEN_BB_GLB_RSTn | FEN_BBRSTB);
+ } else if (IS_HARDWARE_TYPE_8192CU(rtlhal)) {
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_USBA | FEN_USBD |
+ FEN_BB_GLB_RSTn | FEN_BBRSTB);
+ rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
+ }
+ rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
+ if (b_reg_hwparafile == 1)
+ rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw);
+ return rtstatus;
+}
+
+bool _rtl92cu_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 i;
+ u32 arraylength;
+ u32 *ptrarray;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Read Rtl819XMACPHY_Array\n"));
+ arraylength = rtlphy->hwparam_tables[MAC_REG].length ;
+ ptrarray = rtlphy->hwparam_tables[MAC_REG].pdata;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Img:RTL8192CEMAC_2T_ARRAY\n"));
+ for (i = 0; i < arraylength; i = i + 2)
+ rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
+ return true;
+}
+
+bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+ u8 configtype)
+{
+ int i;
+ u32 *phy_regarray_table;
+ u32 *agctab_array_table;
+ u16 phy_reg_arraylen, agctab_arraylen;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (IS_92C_SERIAL(rtlhal->version)) {
+ agctab_arraylen = rtlphy->hwparam_tables[AGCTAB_2T].length;
+ agctab_array_table = rtlphy->hwparam_tables[AGCTAB_2T].pdata;
+ phy_reg_arraylen = rtlphy->hwparam_tables[PHY_REG_2T].length;
+ phy_regarray_table = rtlphy->hwparam_tables[PHY_REG_2T].pdata;
+ } else {
+ agctab_arraylen = rtlphy->hwparam_tables[AGCTAB_1T].length;
+ agctab_array_table = rtlphy->hwparam_tables[AGCTAB_1T].pdata;
+ phy_reg_arraylen = rtlphy->hwparam_tables[PHY_REG_1T].length;
+ phy_regarray_table = rtlphy->hwparam_tables[PHY_REG_1T].pdata;
+ }
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ for (i = 0; i < phy_reg_arraylen; i = i + 2) {
+ if (phy_regarray_table[i] == 0xfe)
+ mdelay(50);
+ else if (phy_regarray_table[i] == 0xfd)
+ mdelay(5);
+ else if (phy_regarray_table[i] == 0xfc)
+ mdelay(1);
+ else if (phy_regarray_table[i] == 0xfb)
+ udelay(50);
+ else if (phy_regarray_table[i] == 0xfa)
+ udelay(5);
+ else if (phy_regarray_table[i] == 0xf9)
+ udelay(1);
+ rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
+ phy_regarray_table[i + 1]);
+ udelay(1);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("The phy_regarray_table[0] is %x"
+ " Rtl819XPHY_REGArray[1] is %x\n",
+ phy_regarray_table[i],
+ phy_regarray_table[i + 1]));
+ }
+ } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
+ for (i = 0; i < agctab_arraylen; i = i + 2) {
+ rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD,
+ agctab_array_table[i + 1]);
+ udelay(1);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("The agctab_array_table[0] is "
+ "%x Rtl819XPHY_REGArray[1] is %x\n",
+ agctab_array_table[i],
+ agctab_array_table[i + 1]));
+ }
+ }
+ return true;
+}
+
+bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+ u8 configtype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ int i;
+ u32 *phy_regarray_table_pg;
+ u16 phy_regarray_pg_len;
+
+ rtlphy->pwrgroup_cnt = 0;
+ phy_regarray_pg_len = rtlphy->hwparam_tables[PHY_REG_PG].length;
+ phy_regarray_table_pg = rtlphy->hwparam_tables[PHY_REG_PG].pdata;
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
+ if (phy_regarray_table_pg[i] == 0xfe)
+ mdelay(50);
+ else if (phy_regarray_table_pg[i] == 0xfd)
+ mdelay(5);
+ else if (phy_regarray_table_pg[i] == 0xfc)
+ mdelay(1);
+ else if (phy_regarray_table_pg[i] == 0xfb)
+ udelay(50);
+ else if (phy_regarray_table_pg[i] == 0xfa)
+ udelay(5);
+ else if (phy_regarray_table_pg[i] == 0xf9)
+ udelay(1);
+ _rtl92c_store_pwrIndex_diffrate_offset(hw,
+ phy_regarray_table_pg[i],
+ phy_regarray_table_pg[i + 1],
+ phy_regarray_table_pg[i + 2]);
+ }
+ } else {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ ("configtype != BaseBand_Config_PHY_REG\n"));
+ }
+ return true;
+}
+
+bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath)
+{
+ int i;
+ u32 *radioa_array_table;
+ u32 *radiob_array_table;
+ u16 radioa_arraylen, radiob_arraylen;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (IS_92C_SERIAL(rtlhal->version)) {
+ radioa_arraylen = rtlphy->hwparam_tables[RADIOA_2T].length;
+ radioa_array_table = rtlphy->hwparam_tables[RADIOA_2T].pdata;
+ radiob_arraylen = rtlphy->hwparam_tables[RADIOB_2T].length;
+ radiob_array_table = rtlphy->hwparam_tables[RADIOB_2T].pdata;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Radio_A:RTL8192CERADIOA_2TARRAY\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Radio_B:RTL8192CE_RADIOB_2TARRAY\n"));
+ } else {
+ radioa_arraylen = rtlphy->hwparam_tables[RADIOA_1T].length;
+ radioa_array_table = rtlphy->hwparam_tables[RADIOA_1T].pdata;
+ radiob_arraylen = rtlphy->hwparam_tables[RADIOB_1T].length;
+ radiob_array_table = rtlphy->hwparam_tables[RADIOB_1T].pdata;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Radio_A:RTL8192CE_RADIOA_1TARRAY\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Radio_B:RTL8192CE_RADIOB_1TARRAY\n"));
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Radio No %x\n", rfpath));
+ switch (rfpath) {
+ case RF90_PATH_A:
+ for (i = 0; i < radioa_arraylen; i = i + 2) {
+ if (radioa_array_table[i] == 0xfe)
+ mdelay(50);
+ else if (radioa_array_table[i] == 0xfd)
+ mdelay(5);
+ else if (radioa_array_table[i] == 0xfc)
+ mdelay(1);
+ else if (radioa_array_table[i] == 0xfb)
+ udelay(50);
+ else if (radioa_array_table[i] == 0xfa)
+ udelay(5);
+ else if (radioa_array_table[i] == 0xf9)
+ udelay(1);
+ else {
+ rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
+ RFREG_OFFSET_MASK,
+ radioa_array_table[i + 1]);
+ udelay(1);
+ }
+ }
+ break;
+ case RF90_PATH_B:
+ for (i = 0; i < radiob_arraylen; i = i + 2) {
+ if (radiob_array_table[i] == 0xfe) {
+ mdelay(50);
+ } else if (radiob_array_table[i] == 0xfd)
+ mdelay(5);
+ else if (radiob_array_table[i] == 0xfc)
+ mdelay(1);
+ else if (radiob_array_table[i] == 0xfb)
+ udelay(50);
+ else if (radiob_array_table[i] == 0xfa)
+ udelay(5);
+ else if (radiob_array_table[i] == 0xf9)
+ udelay(1);
+ else {
+ rtl_set_rfreg(hw, rfpath, radiob_array_table[i],
+ RFREG_OFFSET_MASK,
+ radiob_array_table[i + 1]);
+ udelay(1);
+ }
+ }
+ break;
+ case RF90_PATH_C:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ case RF90_PATH_D:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ return true;
+}
+
+void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u8 reg_bw_opmode;
+ u8 reg_prsr_rsc;
+
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
+ ("Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz"))
+ if (is_hal_stop(rtlhal)) {
+ rtlphy->set_bwmode_inprogress = false;
+ return;
+ }
+ reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
+ reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
+ switch (rtlphy->current_chan_bw) {
+ case HT_CHANNEL_WIDTH_20:
+ reg_bw_opmode |= BW_OPMODE_20MHZ;
+ rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ reg_bw_opmode &= ~BW_OPMODE_20MHZ;
+ rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+ reg_prsr_rsc =
+ (reg_prsr_rsc & 0x90) | (mac->cur_40_prime_sc << 5);
+ rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
+ break;
+ }
+ switch (rtlphy->current_chan_bw) {
+ case HT_CHANNEL_WIDTH_20:
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0);
+ rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1);
+ rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1);
+ rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND,
+ (mac->cur_40_prime_sc >> 1));
+ rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 0);
+ rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)),
+ (mac->cur_40_prime_sc ==
+ HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
+ break;
+ }
+ rtl92cu_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
+ rtlphy->set_bwmode_inprogress = false;
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
+}
+
+void rtl92cu_bb_block_on(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ mutex_lock(&rtlpriv->io.bb_mutex);
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1);
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1);
+ mutex_unlock(&rtlpriv->io.bb_mutex);
+}
+
+void _rtl92cu_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
+{
+ u8 tmpreg;
+ u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ tmpreg = rtl_read_byte(rtlpriv, 0xd03);
+
+ if ((tmpreg & 0x70) != 0)
+ rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
+ else
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+
+ if ((tmpreg & 0x70) != 0) {
+ rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS);
+ if (is2t)
+ rf_b_mode = rtl_get_rfreg(hw, RF90_PATH_B, 0x00,
+ MASK12BITS);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS,
+ (rf_a_mode & 0x8FFFF) | 0x10000);
+ if (is2t)
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
+ (rf_b_mode & 0x8FFFF) | 0x10000);
+ }
+ lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, lc_cal | 0x08000);
+ mdelay(100);
+ if ((tmpreg & 0x70) != 0) {
+ rtl_write_byte(rtlpriv, 0xd03, tmpreg);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, rf_a_mode);
+ if (is2t)
+ rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
+ rf_b_mode);
+ } else {
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+ }
+}
+
+bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool bresult = true;
+ u8 i, queue_id;
+ struct rtl8192_tx_ring *ring = NULL;
+
+ ppsc->set_rfpowerstate_inprogress = true;
+ switch (rfpwr_state) {
+ case ERFON:
+ if ((ppsc->rfpwr_state == ERFOFF) &&
+ RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
+ bool rtstatus;
+ u32 InitializeCount = 0;
+
+ do {
+ InitializeCount++;
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ ("IPS Set eRf nic enable\n"));
+ rtstatus = rtl_ps_enable_nic(hw);
+ } while ((rtstatus != true)
+ && (InitializeCount < 10));
+ RT_CLEAR_PS_LEVEL(ppsc,
+ RT_RF_OFF_LEVL_HALT_NIC);
+ } else {
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ ("Set ERFON sleeped:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->
+ last_sleep_jiffies)));
+ ppsc->last_awake_jiffies = jiffies;
+ rtl92ce_phy_set_rf_on(hw);
+ }
+ if (mac->link_state == MAC80211_LINKED) {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_LINK);
+ } else {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_NO_LINK);
+ }
+ break;
+ case ERFOFF:
+ for (queue_id = 0, i = 0;
+ queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+ ring = &pcipriv->dev.tx_ring[queue_id];
+ if (skb_queue_len(&ring->queue) == 0 ||
+ queue_id == BEACON_QUEUE) {
+ queue_id++;
+ continue;
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("eRf Off/Sleep: %d times "
+ "TcbBusyQueue[%d] "
+ "=%d before doze!\n", (i + 1),
+ queue_id,
+ skb_queue_len(&ring->queue)));
+ udelay(10);
+ i++;
+ }
+ if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("\nERFOFF: %d times "
+ "TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue)));
+ break;
+ }
+ }
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ ("IPS Set eRf nic disable\n"));
+ rtl_ps_disable_nic(hw);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ } else {
+ if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_NO_LINK);
+ } else {
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_POWER_OFF);
+ }
+ }
+ break;
+ case ERFSLEEP:
+ if (ppsc->rfpwr_state == ERFOFF)
+ break;
+ for (queue_id = 0, i = 0;
+ queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+ ring = &pcipriv->dev.tx_ring[queue_id];
+ if (skb_queue_len(&ring->queue) == 0) {
+ queue_id++;
+ continue;
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("eRf Off/Sleep: %d times "
+ "TcbBusyQueue[%d] =%d before "
+ "doze!\n", (i + 1), queue_id,
+ skb_queue_len(&ring->queue)));
+ udelay(10);
+ i++;
+ }
+ if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("\n ERFSLEEP: %d times "
+ "TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue)));
+ break;
+ }
+ }
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ ("Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies)));
+ ppsc->last_sleep_jiffies = jiffies;
+ _rtl92c_phy_set_rf_sleep(hw);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ bresult = false;
+ break;
+ }
+ if (bresult)
+ ppsc->rfpwr_state = rfpwr_state;
+ ppsc->set_rfpowerstate_inprogress = false;
+ return bresult;
+}
+
+bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state)
+{
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool bresult = false;
+
+ if (rfpwr_state == ppsc->rfpwr_state)
+ return bresult;
+ bresult = _rtl92cu_phy_set_rf_power_state(hw, rfpwr_state);
+ return bresult;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h
new file mode 100644
index 000000000000..06299559ab68
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h
@@ -0,0 +1,36 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../rtl8192ce/phy.h"
+
+void rtl92cu_bb_block_on(struct ieee80211_hw *hw);
+bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath);
+void rtl92c_phy_set_io(struct ieee80211_hw *hw);
+bool _rtl92cu_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
+bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h b/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h
new file mode 100644
index 000000000000..7f1be614c998
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h
@@ -0,0 +1,30 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../rtl8192ce/reg.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
new file mode 100644
index 000000000000..1c79c226f145
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
@@ -0,0 +1,493 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+
+static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw);
+
+void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ switch (bandwidth) {
+ case HT_CHANNEL_WIDTH_20:
+ rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
+ 0xfffff3ff) | 0x0400);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
+ rtlphy->rfreg_chnlval[0]);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
+ 0xfffff3ff));
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
+ rtlphy->rfreg_chnlval[0]);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("unknown bandwidth: %#X\n", bandwidth));
+ break;
+ }
+}
+
+void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u32 tx_agc[2] = { 0, 0 }, tmpval = 0;
+ bool turbo_scanoff = false;
+ u8 idx1, idx2;
+ u8 *ptr;
+
+ if (rtlhal->interface == INTF_PCI) {
+ if (rtlefuse->eeprom_regulatory != 0)
+ turbo_scanoff = true;
+ } else {
+ if ((rtlefuse->eeprom_regulatory != 0) ||
+ (rtlefuse->external_pa))
+ turbo_scanoff = true;
+ }
+ if (mac->act_scanning == true) {
+ tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
+ tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
+ if (turbo_scanoff) {
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ if (rtlhal->interface == INTF_USB) {
+ if (tx_agc[idx1] > 0x20 &&
+ rtlefuse->external_pa)
+ tx_agc[idx1] = 0x20;
+ }
+ }
+ }
+ } else {
+ if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_LEVEL1) {
+ tx_agc[RF90_PATH_A] = 0x10101010;
+ tx_agc[RF90_PATH_B] = 0x10101010;
+ } else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_LEVEL1) {
+ tx_agc[RF90_PATH_A] = 0x00000000;
+ tx_agc[RF90_PATH_B] = 0x00000000;
+ } else{
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ }
+ if (rtlefuse->eeprom_regulatory == 0) {
+ tmpval = (rtlphy->mcs_txpwrlevel_origoffset
+ [0][6]) +
+ (rtlphy->mcs_txpwrlevel_origoffset
+ [0][7] << 8);
+ tx_agc[RF90_PATH_A] += tmpval;
+ tmpval = (rtlphy->mcs_txpwrlevel_origoffset
+ [0][14]) +
+ (rtlphy->mcs_txpwrlevel_origoffset
+ [0][15] << 24);
+ tx_agc[RF90_PATH_B] += tmpval;
+ }
+ }
+ }
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ ptr = (u8 *) (&(tx_agc[idx1]));
+ for (idx2 = 0; idx2 < 4; idx2++) {
+ if (*ptr > RF6052_MAX_TX_PWR)
+ *ptr = RF6052_MAX_TX_PWR;
+ ptr++;
+ }
+ }
+ tmpval = tx_agc[RF90_PATH_A] & 0xff;
+ rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_A_CCK1_MCS32));
+
+ tmpval = tx_agc[RF90_PATH_A] >> 8;
+ if (mac->mode == WIRELESS_MODE_B)
+ tmpval = tmpval & 0xff00ffff;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_B_CCK11_A_CCK2_11));
+ tmpval = tx_agc[RF90_PATH_B] >> 24;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_B_CCK11_A_CCK2_11));
+ tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_B_CCK1_55_MCS32));
+}
+
+static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel,
+ u32 *ofdmbase, u32 *mcsbase)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u32 powerBase0, powerBase1;
+ u8 legacy_pwrdiff = 0, ht20_pwrdiff = 0;
+ u8 i, powerlevel[2];
+
+ for (i = 0; i < 2; i++) {
+ powerlevel[i] = ppowerlevel[i];
+ legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1];
+ powerBase0 = powerlevel[i] + legacy_pwrdiff;
+ powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) |
+ (powerBase0 << 8) | powerBase0;
+ *(ofdmbase + i) = powerBase0;
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ (" [OFDM power base index rf(%c) = 0x%x]\n",
+ ((i == 0) ? 'A' : 'B'), *(ofdmbase + i)));
+ }
+ for (i = 0; i < 2; i++) {
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {
+ ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1];
+ powerlevel[i] += ht20_pwrdiff;
+ }
+ powerBase1 = powerlevel[i];
+ powerBase1 = (powerBase1 << 24) |
+ (powerBase1 << 16) | (powerBase1 << 8) | powerBase1;
+ *(mcsbase + i) = powerBase1;
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ (" [MCS power base index rf(%c) = 0x%x]\n",
+ ((i == 0) ? 'A' : 'B'), *(mcsbase + i)));
+ }
+}
+
+static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
+ u8 channel, u8 index,
+ u32 *powerBase0,
+ u32 *powerBase1,
+ u32 *p_outwriteval)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 i, chnlgroup = 0, pwr_diff_limit[4];
+ u32 writeVal, customer_limit, rf;
+
+ for (rf = 0; rf < 2; rf++) {
+ switch (rtlefuse->eeprom_regulatory) {
+ case 0:
+ chnlgroup = 0;
+ writeVal = rtlphy->mcs_txpwrlevel_origoffset
+ [chnlgroup][index + (rf ? 8 : 0)]
+ + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("RTK better performance,writeVal(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ break;
+ case 1:
+ if (rtlphy->pwrgroup_cnt == 1)
+ chnlgroup = 0;
+ if (rtlphy->pwrgroup_cnt >= 3) {
+ if (channel <= 3)
+ chnlgroup = 0;
+ else if (channel >= 4 && channel <= 9)
+ chnlgroup = 1;
+ else if (channel > 9)
+ chnlgroup = 2;
+ if (rtlphy->current_chan_bw ==
+ HT_CHANNEL_WIDTH_20)
+ chnlgroup++;
+ else
+ chnlgroup += 4;
+ }
+ writeVal = rtlphy->mcs_txpwrlevel_origoffset
+ [chnlgroup][index +
+ (rf ? 8 : 0)] +
+ ((index < 2) ? powerBase0[rf] :
+ powerBase1[rf]);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Realtek regulatory, 20MHz, "
+ "writeVal(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ break;
+ case 2:
+ writeVal = ((index < 2) ? powerBase0[rf] :
+ powerBase1[rf]);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Better regulatory,writeVal(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ break;
+ case 3:
+ chnlgroup = 0;
+ if (rtlphy->current_chan_bw ==
+ HT_CHANNEL_WIDTH_20_40) {
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("customer's limit, 40MHzrf(%c) = "
+ "0x%x\n", ((rf == 0) ? 'A' : 'B'),
+ rtlefuse->pwrgroup_ht40[rf]
+ [channel - 1]));
+ } else {
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("customer's limit, 20MHz rf(%c) = "
+ "0x%x\n", ((rf == 0) ? 'A' : 'B'),
+ rtlefuse->pwrgroup_ht20[rf]
+ [channel - 1]));
+ }
+ for (i = 0; i < 4; i++) {
+ pwr_diff_limit[i] =
+ (u8) ((rtlphy->mcs_txpwrlevel_origoffset
+ [chnlgroup][index + (rf ? 8 : 0)]
+ & (0x7f << (i * 8))) >> (i * 8));
+ if (rtlphy->current_chan_bw ==
+ HT_CHANNEL_WIDTH_20_40) {
+ if (pwr_diff_limit[i] >
+ rtlefuse->pwrgroup_ht40[rf]
+ [channel - 1])
+ pwr_diff_limit[i] = rtlefuse->
+ pwrgroup_ht40[rf]
+ [channel - 1];
+ } else {
+ if (pwr_diff_limit[i] >
+ rtlefuse->pwrgroup_ht20[rf]
+ [channel - 1])
+ pwr_diff_limit[i] =
+ rtlefuse->pwrgroup_ht20[rf]
+ [channel - 1];
+ }
+ }
+ customer_limit = (pwr_diff_limit[3] << 24) |
+ (pwr_diff_limit[2] << 16) |
+ (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Customer's limit rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), customer_limit));
+ writeVal = customer_limit + ((index < 2) ?
+ powerBase0[rf] : powerBase1[rf]);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Customer, writeVal rf(%c)= 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ break;
+ default:
+ chnlgroup = 0;
+ writeVal = rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
+ [index + (rf ? 8 : 0)] + ((index < 2) ?
+ powerBase0[rf] : powerBase1[rf]);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR, ("RTK better "
+ "performance, writeValrf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeVal));
+ break;
+ }
+ if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_LEVEL1)
+ writeVal = 0x14141414;
+ else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_LEVEL2)
+ writeVal = 0x00000000;
+ if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
+ writeVal = writeVal - 0x06060606;
+ else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+ TXHIGHPWRLEVEL_BT2)
+ writeVal = writeVal;
+ *(p_outwriteval + rf) = writeVal;
+ }
+}
+
+static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
+ u8 index, u32 *pValue)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u16 regoffset_a[6] = {
+ RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24,
+ RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
+ RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
+ };
+ u16 regoffset_b[6] = {
+ RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24,
+ RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
+ RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
+ };
+ u8 i, rf, pwr_val[4];
+ u32 writeVal;
+ u16 regoffset;
+
+ for (rf = 0; rf < 2; rf++) {
+ writeVal = pValue[rf];
+ for (i = 0; i < 4; i++) {
+ pwr_val[i] = (u8)((writeVal & (0x7f << (i * 8))) >>
+ (i * 8));
+ if (pwr_val[i] > RF6052_MAX_TX_PWR)
+ pwr_val[i] = RF6052_MAX_TX_PWR;
+ }
+ writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
+ (pwr_val[1] << 8) | pwr_val[0];
+ if (rf == 0)
+ regoffset = regoffset_a[index];
+ else
+ regoffset = regoffset_b[index];
+ rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Set 0x%x = %08x\n", regoffset, writeVal));
+ if (((get_rf_type(rtlphy) == RF_2T2R) &&
+ (regoffset == RTXAGC_A_MCS15_MCS12 ||
+ regoffset == RTXAGC_B_MCS15_MCS12)) ||
+ ((get_rf_type(rtlphy) != RF_2T2R) &&
+ (regoffset == RTXAGC_A_MCS07_MCS04 ||
+ regoffset == RTXAGC_B_MCS07_MCS04))) {
+ writeVal = pwr_val[3];
+ if (regoffset == RTXAGC_A_MCS15_MCS12 ||
+ regoffset == RTXAGC_A_MCS07_MCS04)
+ regoffset = 0xc90;
+ if (regoffset == RTXAGC_B_MCS15_MCS12 ||
+ regoffset == RTXAGC_B_MCS07_MCS04)
+ regoffset = 0xc98;
+ for (i = 0; i < 3; i++) {
+ writeVal = (writeVal > 6) ? (writeVal - 6) : 0;
+ rtl_write_byte(rtlpriv, (u32)(regoffset + i),
+ (u8)writeVal);
+ }
+ }
+ }
+}
+
+void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel)
+{
+ u32 writeVal[2], powerBase0[2], powerBase1[2];
+ u8 index = 0;
+
+ rtl92c_phy_get_power_base(hw, ppowerlevel,
+ channel, &powerBase0[0], &powerBase1[0]);
+ for (index = 0; index < 6; index++) {
+ _rtl92c_get_txpower_writeval_by_regulatory(hw,
+ channel, index,
+ &powerBase0[0],
+ &powerBase1[0],
+ &writeVal[0]);
+ _rtl92c_write_ofdm_power_reg(hw, index, &writeVal[0]);
+ }
+}
+
+bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ bool rtstatus = true;
+ u8 b_reg_hwparafile = 1;
+
+ if (rtlphy->rf_type == RF_1T1R)
+ rtlphy->num_total_rfpath = 1;
+ else
+ rtlphy->num_total_rfpath = 2;
+ if (b_reg_hwparafile == 1)
+ rtstatus = _rtl92c_phy_rf6052_config_parafile(hw);
+ return rtstatus;
+}
+
+static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 u4_regvalue = 0;
+ u8 rfpath;
+ bool rtstatus = true;
+ struct bb_reg_def *pphyreg;
+
+ for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
+ pphyreg = &rtlphy->phyreg_def[rfpath];
+ switch (rfpath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV << 16);
+ break;
+ }
+ rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
+ udelay(1);
+ rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
+ udelay(1);
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2,
+ B3WIREADDREAALENGTH, 0x0);
+ udelay(1);
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
+ udelay(1);
+ switch (rfpath) {
+ case RF90_PATH_A:
+ rtstatus = rtl92cu_phy_config_rf_with_headerfile(hw,
+ (enum radio_path) rfpath);
+ break;
+ case RF90_PATH_B:
+ rtstatus = rtl92cu_phy_config_rf_with_headerfile(hw,
+ (enum radio_path) rfpath);
+ break;
+ case RF90_PATH_C:
+ break;
+ case RF90_PATH_D:
+ break;
+ }
+ switch (rfpath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ rtl_set_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV, u4_regvalue);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ rtl_set_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV << 16, u4_regvalue);
+ break;
+ }
+ if (rtstatus != true) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Radio[%d] Fail!!", rfpath));
+ goto phy_rf_cfg_fail;
+ }
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("<---\n"));
+ return rtstatus;
+phy_rf_cfg_fail:
+ return rtstatus;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
new file mode 100644
index 000000000000..86c2728cfa00
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
@@ -0,0 +1,47 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_RF_H__
+#define __RTL92CU_RF_H__
+
+#define RF6052_MAX_TX_PWR 0x3F
+#define RF6052_MAX_REG 0x3F
+#define RF6052_MAX_PATH 2
+
+extern void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
+ u8 bandwidth);
+extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
+bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw);
+bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
new file mode 100644
index 000000000000..71244a38d49e
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -0,0 +1,336 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../core.h"
+#include "../usb.h"
+#include "../efuse.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "mac.h"
+#include "dm.h"
+#include "rf.h"
+#include "sw.h"
+#include "trx.h"
+#include "led.h"
+#include "hw.h"
+#include <linux/vmalloc.h>
+
+MODULE_AUTHOR("Georgia <georgia@realtek.com>");
+MODULE_AUTHOR("Ziv Huang <ziv_huang@realtek.com>");
+MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n USB wireless");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw.bin");
+
+static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_flag = 0;
+ rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.thermalvalue = 0;
+ rtlpriv->rtlhal.pfirmware = vmalloc(0x4000);
+ if (!rtlpriv->rtlhal.pfirmware) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Can't alloc buffer for fw.\n"));
+ return 1;
+ }
+ return 0;
+}
+
+static void rtl92cu_deinit_sw_vars(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->rtlhal.pfirmware) {
+ vfree(rtlpriv->rtlhal.pfirmware);
+ rtlpriv->rtlhal.pfirmware = NULL;
+ }
+}
+
+static struct rtl_hal_ops rtl8192cu_hal_ops = {
+ .init_sw_vars = rtl92cu_init_sw_vars,
+ .deinit_sw_vars = rtl92cu_deinit_sw_vars,
+ .read_chip_version = rtl92c_read_chip_version,
+ .read_eeprom_info = rtl92cu_read_eeprom_info,
+ .enable_interrupt = rtl92c_enable_interrupt,
+ .disable_interrupt = rtl92c_disable_interrupt,
+ .hw_init = rtl92cu_hw_init,
+ .hw_disable = rtl92cu_card_disable,
+ .set_network_type = rtl92cu_set_network_type,
+ .set_chk_bssid = rtl92cu_set_check_bssid,
+ .set_qos = rtl92c_set_qos,
+ .set_bcn_reg = rtl92cu_set_beacon_related_registers,
+ .set_bcn_intv = rtl92cu_set_beacon_interval,
+ .update_interrupt_mask = rtl92cu_update_interrupt_mask,
+ .get_hw_reg = rtl92cu_get_hw_reg,
+ .set_hw_reg = rtl92cu_set_hw_reg,
+ .update_rate_table = rtl92cu_update_hal_rate_table,
+ .update_rate_mask = rtl92cu_update_hal_rate_mask,
+ .fill_tx_desc = rtl92cu_tx_fill_desc,
+ .fill_fake_txdesc = rtl92cu_fill_fake_txdesc,
+ .fill_tx_cmddesc = rtl92cu_tx_fill_cmddesc,
+ .cmd_send_packet = rtl92cu_cmd_send_packet,
+ .query_rx_desc = rtl92cu_rx_query_desc,
+ .set_channel_access = rtl92cu_update_channel_access_setting,
+ .radio_onoff_checking = rtl92cu_gpio_radio_on_off_checking,
+ .set_bw_mode = rtl92c_phy_set_bw_mode,
+ .switch_channel = rtl92c_phy_sw_chnl,
+ .dm_watchdog = rtl92c_dm_watchdog,
+ .scan_operation_backup = rtl92c_phy_scan_operation_backup,
+ .set_rf_power_state = rtl92cu_phy_set_rf_power_state,
+ .led_control = rtl92cu_led_control,
+ .enable_hw_sec = rtl92cu_enable_hw_security_config,
+ .set_key = rtl92c_set_key,
+ .init_sw_leds = rtl92cu_init_sw_leds,
+ .deinit_sw_leds = rtl92cu_deinit_sw_leds,
+ .get_bbreg = rtl92c_phy_query_bb_reg,
+ .set_bbreg = rtl92c_phy_set_bb_reg,
+ .get_rfreg = rtl92cu_phy_query_rf_reg,
+ .set_rfreg = rtl92cu_phy_set_rf_reg,
+ .phy_rf6052_config = rtl92cu_phy_rf6052_config,
+ .phy_rf6052_set_cck_txpower = rtl92cu_phy_rf6052_set_cck_txpower,
+ .phy_rf6052_set_ofdm_txpower = rtl92cu_phy_rf6052_set_ofdm_txpower,
+ .config_bb_with_headerfile = _rtl92cu_phy_config_bb_with_headerfile,
+ .config_bb_with_pgheaderfile = _rtl92cu_phy_config_bb_with_pgheaderfile,
+ .phy_lc_calibrate = _rtl92cu_phy_lc_calibrate,
+ .phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback,
+ .dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower,
+};
+
+static struct rtl_mod_params rtl92cu_mod_params = {
+ .sw_crypto = 0,
+};
+
+static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
+ /* rx */
+ .in_ep_num = RTL92C_USB_BULK_IN_NUM,
+ .rx_urb_num = RTL92C_NUM_RX_URBS,
+ .rx_max_size = RTL92C_SIZE_MAX_RX_BUFFER,
+ .usb_rx_hdl = rtl8192cu_rx_hdl,
+ .usb_rx_segregate_hdl = NULL, /* rtl8192c_rx_segregate_hdl; */
+ /* tx */
+ .usb_tx_cleanup = rtl8192c_tx_cleanup,
+ .usb_tx_post_hdl = rtl8192c_tx_post_hdl,
+ .usb_tx_aggregate_hdl = rtl8192c_tx_aggregate_hdl,
+ /* endpoint mapping */
+ .usb_endpoint_mapping = rtl8192cu_endpoint_mapping,
+ .usb_mq_to_hwq = rtl8192cu_mq_to_hwq,
+};
+
+static struct rtl_hal_cfg rtl92cu_hal_cfg = {
+ .name = "rtl92c_usb",
+ .fw_name = "rtlwifi/rtl8192cufw.bin",
+ .ops = &rtl8192cu_hal_ops,
+ .mod_params = &rtl92cu_mod_params,
+ .usb_interface_cfg = &rtl92cu_interface_cfg,
+
+ .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
+ .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
+ .maps[SYS_CLK] = REG_SYS_CLKR,
+ .maps[MAC_RCR_AM] = AM,
+ .maps[MAC_RCR_AB] = AB,
+ .maps[MAC_RCR_ACRC32] = ACRC32,
+ .maps[MAC_RCR_ACF] = ACF,
+ .maps[MAC_RCR_AAP] = AAP,
+
+ .maps[EFUSE_TEST] = REG_EFUSE_TEST,
+ .maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
+ .maps[EFUSE_CLK] = 0,
+ .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
+ .maps[EFUSE_PWC_EV12V] = PWC_EV12V,
+ .maps[EFUSE_FEN_ELDR] = FEN_ELDR,
+ .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
+ .maps[EFUSE_ANA8M] = EFUSE_ANA8M,
+ .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
+ .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
+ .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
+
+ .maps[RWCAM] = REG_CAMCMD,
+ .maps[WCAMI] = REG_CAMWRITE,
+ .maps[RCAMO] = REG_CAMREAD,
+ .maps[CAMDBG] = REG_CAMDBG,
+ .maps[SECR] = REG_SECCFG,
+ .maps[SEC_CAM_NONE] = CAM_NONE,
+ .maps[SEC_CAM_WEP40] = CAM_WEP40,
+ .maps[SEC_CAM_TKIP] = CAM_TKIP,
+ .maps[SEC_CAM_AES] = CAM_AES,
+ .maps[SEC_CAM_WEP104] = CAM_WEP104,
+
+ .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
+ .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
+ .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
+ .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
+ .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
+ .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
+ .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8,
+ .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
+ .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
+ .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
+ .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
+ .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
+ .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
+ .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
+ .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,
+ .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,
+
+ .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
+ .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
+ .maps[RTL_IMR_BcnInt] = IMR_BCNINT,
+ .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
+ .maps[RTL_IMR_RDU] = IMR_RDU,
+ .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
+ .maps[RTL_IMR_BDOK] = IMR_BDOK,
+ .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
+ .maps[RTL_IMR_TBDER] = IMR_TBDER,
+ .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
+ .maps[RTL_IMR_TBDOK] = IMR_TBDOK,
+ .maps[RTL_IMR_BKDOK] = IMR_BKDOK,
+ .maps[RTL_IMR_BEDOK] = IMR_BEDOK,
+ .maps[RTL_IMR_VIDOK] = IMR_VIDOK,
+ .maps[RTL_IMR_VODOK] = IMR_VODOK,
+ .maps[RTL_IMR_ROK] = IMR_ROK,
+ .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
+
+ .maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M,
+ .maps[RTL_RC_CCK_RATE2M] = DESC92C_RATE2M,
+ .maps[RTL_RC_CCK_RATE5_5M] = DESC92C_RATE5_5M,
+ .maps[RTL_RC_CCK_RATE11M] = DESC92C_RATE11M,
+ .maps[RTL_RC_OFDM_RATE6M] = DESC92C_RATE6M,
+ .maps[RTL_RC_OFDM_RATE9M] = DESC92C_RATE9M,
+ .maps[RTL_RC_OFDM_RATE12M] = DESC92C_RATE12M,
+ .maps[RTL_RC_OFDM_RATE18M] = DESC92C_RATE18M,
+ .maps[RTL_RC_OFDM_RATE24M] = DESC92C_RATE24M,
+ .maps[RTL_RC_OFDM_RATE36M] = DESC92C_RATE36M,
+ .maps[RTL_RC_OFDM_RATE48M] = DESC92C_RATE48M,
+ .maps[RTL_RC_OFDM_RATE54M] = DESC92C_RATE54M,
+ .maps[RTL_RC_HT_RATEMCS7] = DESC92C_RATEMCS7,
+ .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15,
+};
+
+#define USB_VENDER_ID_REALTEK 0x0bda
+
+/* 2010-10-19 DID_USB_V3.4 */
+static struct usb_device_id rtl8192c_usb_ids[] = {
+
+ /*=== Realtek demoboard ===*/
+ /* Default ID */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8191, rtl92cu_hal_cfg)},
+
+ /****** 8188CU ********/
+ /* 8188CE-VAU USB minCard */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8170, rtl92cu_hal_cfg)},
+ /* 8188cu 1*1 dongle */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8176, rtl92cu_hal_cfg)},
+ /* 8188cu 1*1 dongle, (b/g mode only) */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
+ /* 8188cu Slim Solo */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817a, rtl92cu_hal_cfg)},
+ /* 8188cu Slim Combo */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
+ /* 8188RU High-power USB Dongle */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)},
+ /* 8188CE-VAU USB minCard (b/g mode only) */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
+ /* 8188 Combo for BC4 */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
+
+ /****** 8192CU ********/
+ /* 8191cu 1*2 */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
+ /* 8192cu 2*2 */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
+ /* 8192CE-VAU USB minCard */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817c, rtl92cu_hal_cfg)},
+
+ /*=== Customer ID ===*/
+ /****** 8188CU ********/
+ {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/
+ {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
+ {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
+ {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+ {RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
+ /* HP - Lite-On ,8188CUS Slim Combo */
+ {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/
+ {RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/
+ {RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/
+ {RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/
+ {RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/
+ {RTL_USB_DEVICE(0x3358, 0x13d3, rtl92cu_hal_cfg)}, /*Azwave 8188CE-VAU*/
+ /* Russian customer -Azwave (8188CE-VAU b/g mode only) */
+ {RTL_USB_DEVICE(0x3359, 0x13d3, rtl92cu_hal_cfg)},
+
+ /****** 8192CU ********/
+ {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/
+ {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/
+ {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
+ {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Abocom -Abocom*/
+ {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
+ {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+ {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+ {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
+ {RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/
+ {}
+};
+
+MODULE_DEVICE_TABLE(usb, rtl8192c_usb_ids);
+
+static struct usb_driver rtl8192cu_driver = {
+ .name = "rtl8192cu",
+ .probe = rtl_usb_probe,
+ .disconnect = rtl_usb_disconnect,
+ .id_table = rtl8192c_usb_ids,
+
+#ifdef CONFIG_PM
+ /* .suspend = rtl_usb_suspend, */
+ /* .resume = rtl_usb_resume, */
+ /* .reset_resume = rtl8192c_resume, */
+#endif /* CONFIG_PM */
+#ifdef CONFIG_AUTOSUSPEND
+ .supports_autosuspend = 1,
+#endif
+};
+
+static int __init rtl8192cu_init(void)
+{
+ return usb_register(&rtl8192cu_driver);
+}
+
+static void __exit rtl8192cu_exit(void)
+{
+ usb_deregister(&rtl8192cu_driver);
+}
+
+module_init(rtl8192cu_init);
+module_exit(rtl8192cu_exit);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
new file mode 100644
index 000000000000..43b1177924ab
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
@@ -0,0 +1,53 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_SW_H__
+#define __RTL92CU_SW_H__
+
+#define EFUSE_MAX_SECTION 16
+
+void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *powerlevel);
+void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
+bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+ u8 configtype);
+bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+ u8 configtype);
+void _rtl92cu_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
+void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data);
+bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
+u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr, u32 bitmask);
+void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.c b/drivers/net/wireless/rtlwifi/rtl8192cu/table.c
new file mode 100644
index 000000000000..d57ef5e88a9e
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/table.c
@@ -0,0 +1,1888 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "table.h"
+
+u32 RTL8192CUPHY_REG_2TARRAY[RTL8192CUPHY_REG_2TARRAY_LENGTH] = {
+ 0x024, 0x0011800f,
+ 0x028, 0x00ffdb83,
+ 0x800, 0x80040002,
+ 0x804, 0x00000003,
+ 0x808, 0x0000fc00,
+ 0x80c, 0x0000000a,
+ 0x810, 0x10005388,
+ 0x814, 0x020c3d10,
+ 0x818, 0x02200385,
+ 0x81c, 0x00000000,
+ 0x820, 0x01000100,
+ 0x824, 0x00390004,
+ 0x828, 0x01000100,
+ 0x82c, 0x00390004,
+ 0x830, 0x27272727,
+ 0x834, 0x27272727,
+ 0x838, 0x27272727,
+ 0x83c, 0x27272727,
+ 0x840, 0x00010000,
+ 0x844, 0x00010000,
+ 0x848, 0x27272727,
+ 0x84c, 0x27272727,
+ 0x850, 0x00000000,
+ 0x854, 0x00000000,
+ 0x858, 0x569a569a,
+ 0x85c, 0x0c1b25a4,
+ 0x860, 0x66e60230,
+ 0x864, 0x061f0130,
+ 0x868, 0x27272727,
+ 0x86c, 0x2b2b2b27,
+ 0x870, 0x07000700,
+ 0x874, 0x22184000,
+ 0x878, 0x08080808,
+ 0x87c, 0x00000000,
+ 0x880, 0xc0083070,
+ 0x884, 0x000004d5,
+ 0x888, 0x00000000,
+ 0x88c, 0xcc0000c0,
+ 0x890, 0x00000800,
+ 0x894, 0xfffffffe,
+ 0x898, 0x40302010,
+ 0x89c, 0x00706050,
+ 0x900, 0x00000000,
+ 0x904, 0x00000023,
+ 0x908, 0x00000000,
+ 0x90c, 0x81121313,
+ 0xa00, 0x00d047c8,
+ 0xa04, 0x80ff000c,
+ 0xa08, 0x8c838300,
+ 0xa0c, 0x2e68120f,
+ 0xa10, 0x9500bb78,
+ 0xa14, 0x11144028,
+ 0xa18, 0x00881117,
+ 0xa1c, 0x89140f00,
+ 0xa20, 0x1a1b0000,
+ 0xa24, 0x090e1317,
+ 0xa28, 0x00000204,
+ 0xa2c, 0x00d30000,
+ 0xa70, 0x101fbf00,
+ 0xa74, 0x00000007,
+ 0xc00, 0x48071d40,
+ 0xc04, 0x03a05633,
+ 0xc08, 0x000000e4,
+ 0xc0c, 0x6c6c6c6c,
+ 0xc10, 0x08800000,
+ 0xc14, 0x40000100,
+ 0xc18, 0x08800000,
+ 0xc1c, 0x40000100,
+ 0xc20, 0x00000000,
+ 0xc24, 0x00000000,
+ 0xc28, 0x00000000,
+ 0xc2c, 0x00000000,
+ 0xc30, 0x69e9ac44,
+ 0xc34, 0x469652cf,
+ 0xc38, 0x49795994,
+ 0xc3c, 0x0a97971c,
+ 0xc40, 0x1f7c403f,
+ 0xc44, 0x000100b7,
+ 0xc48, 0xec020107,
+ 0xc4c, 0x007f037f,
+ 0xc50, 0x6954341e,
+ 0xc54, 0x43bc0094,
+ 0xc58, 0x6954341e,
+ 0xc5c, 0x433c0094,
+ 0xc60, 0x00000000,
+ 0xc64, 0x5116848b,
+ 0xc68, 0x47c00bff,
+ 0xc6c, 0x00000036,
+ 0xc70, 0x2c7f000d,
+ 0xc74, 0x0186115b,
+ 0xc78, 0x0000001f,
+ 0xc7c, 0x00b99612,
+ 0xc80, 0x40000100,
+ 0xc84, 0x20f60000,
+ 0xc88, 0x40000100,
+ 0xc8c, 0x20200000,
+ 0xc90, 0x00121820,
+ 0xc94, 0x00000000,
+ 0xc98, 0x00121820,
+ 0xc9c, 0x00007f7f,
+ 0xca0, 0x00000000,
+ 0xca4, 0x00000080,
+ 0xca8, 0x00000000,
+ 0xcac, 0x00000000,
+ 0xcb0, 0x00000000,
+ 0xcb4, 0x00000000,
+ 0xcb8, 0x00000000,
+ 0xcbc, 0x28000000,
+ 0xcc0, 0x00000000,
+ 0xcc4, 0x00000000,
+ 0xcc8, 0x00000000,
+ 0xccc, 0x00000000,
+ 0xcd0, 0x00000000,
+ 0xcd4, 0x00000000,
+ 0xcd8, 0x64b22427,
+ 0xcdc, 0x00766932,
+ 0xce0, 0x00222222,
+ 0xce4, 0x00000000,
+ 0xce8, 0x37644302,
+ 0xcec, 0x2f97d40c,
+ 0xd00, 0x00080740,
+ 0xd04, 0x00020403,
+ 0xd08, 0x0000907f,
+ 0xd0c, 0x20010201,
+ 0xd10, 0xa0633333,
+ 0xd14, 0x3333bc43,
+ 0xd18, 0x7a8f5b6b,
+ 0xd2c, 0xcc979975,
+ 0xd30, 0x00000000,
+ 0xd34, 0x80608000,
+ 0xd38, 0x00000000,
+ 0xd3c, 0x00027293,
+ 0xd40, 0x00000000,
+ 0xd44, 0x00000000,
+ 0xd48, 0x00000000,
+ 0xd4c, 0x00000000,
+ 0xd50, 0x6437140a,
+ 0xd54, 0x00000000,
+ 0xd58, 0x00000000,
+ 0xd5c, 0x30032064,
+ 0xd60, 0x4653de68,
+ 0xd64, 0x04518a3c,
+ 0xd68, 0x00002101,
+ 0xd6c, 0x2a201c16,
+ 0xd70, 0x1812362e,
+ 0xd74, 0x322c2220,
+ 0xd78, 0x000e3c24,
+ 0xe00, 0x2a2a2a2a,
+ 0xe04, 0x2a2a2a2a,
+ 0xe08, 0x03902a2a,
+ 0xe10, 0x2a2a2a2a,
+ 0xe14, 0x2a2a2a2a,
+ 0xe18, 0x2a2a2a2a,
+ 0xe1c, 0x2a2a2a2a,
+ 0xe28, 0x00000000,
+ 0xe30, 0x1000dc1f,
+ 0xe34, 0x10008c1f,
+ 0xe38, 0x02140102,
+ 0xe3c, 0x681604c2,
+ 0xe40, 0x01007c00,
+ 0xe44, 0x01004800,
+ 0xe48, 0xfb000000,
+ 0xe4c, 0x000028d1,
+ 0xe50, 0x1000dc1f,
+ 0xe54, 0x10008c1f,
+ 0xe58, 0x02140102,
+ 0xe5c, 0x28160d05,
+ 0xe60, 0x00000010,
+ 0xe68, 0x001b25a4,
+ 0xe6c, 0x63db25a4,
+ 0xe70, 0x63db25a4,
+ 0xe74, 0x0c1b25a4,
+ 0xe78, 0x0c1b25a4,
+ 0xe7c, 0x0c1b25a4,
+ 0xe80, 0x0c1b25a4,
+ 0xe84, 0x63db25a4,
+ 0xe88, 0x0c1b25a4,
+ 0xe8c, 0x63db25a4,
+ 0xed0, 0x63db25a4,
+ 0xed4, 0x63db25a4,
+ 0xed8, 0x63db25a4,
+ 0xedc, 0x001b25a4,
+ 0xee0, 0x001b25a4,
+ 0xeec, 0x6fdb25a4,
+ 0xf14, 0x00000003,
+ 0xf4c, 0x00000000,
+ 0xf00, 0x00000300,
+};
+
+u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH] = {
+ 0x024, 0x0011800f,
+ 0x028, 0x00ffdb83,
+ 0x800, 0x80040000,
+ 0x804, 0x00000001,
+ 0x808, 0x0000fc00,
+ 0x80c, 0x0000000a,
+ 0x810, 0x10005388,
+ 0x814, 0x020c3d10,
+ 0x818, 0x02200385,
+ 0x81c, 0x00000000,
+ 0x820, 0x01000100,
+ 0x824, 0x00390004,
+ 0x828, 0x00000000,
+ 0x82c, 0x00000000,
+ 0x830, 0x00000000,
+ 0x834, 0x00000000,
+ 0x838, 0x00000000,
+ 0x83c, 0x00000000,
+ 0x840, 0x00010000,
+ 0x844, 0x00000000,
+ 0x848, 0x00000000,
+ 0x84c, 0x00000000,
+ 0x850, 0x00000000,
+ 0x854, 0x00000000,
+ 0x858, 0x569a569a,
+ 0x85c, 0x001b25a4,
+ 0x860, 0x66e60230,
+ 0x864, 0x061f0130,
+ 0x868, 0x00000000,
+ 0x86c, 0x32323200,
+ 0x870, 0x07000700,
+ 0x874, 0x22004000,
+ 0x878, 0x00000808,
+ 0x87c, 0x00000000,
+ 0x880, 0xc0083070,
+ 0x884, 0x000004d5,
+ 0x888, 0x00000000,
+ 0x88c, 0xccc000c0,
+ 0x890, 0x00000800,
+ 0x894, 0xfffffffe,
+ 0x898, 0x40302010,
+ 0x89c, 0x00706050,
+ 0x900, 0x00000000,
+ 0x904, 0x00000023,
+ 0x908, 0x00000000,
+ 0x90c, 0x81121111,
+ 0xa00, 0x00d047c8,
+ 0xa04, 0x80ff000c,
+ 0xa08, 0x8c838300,
+ 0xa0c, 0x2e68120f,
+ 0xa10, 0x9500bb78,
+ 0xa14, 0x11144028,
+ 0xa18, 0x00881117,
+ 0xa1c, 0x89140f00,
+ 0xa20, 0x1a1b0000,
+ 0xa24, 0x090e1317,
+ 0xa28, 0x00000204,
+ 0xa2c, 0x00d30000,
+ 0xa70, 0x101fbf00,
+ 0xa74, 0x00000007,
+ 0xc00, 0x48071d40,
+ 0xc04, 0x03a05611,
+ 0xc08, 0x000000e4,
+ 0xc0c, 0x6c6c6c6c,
+ 0xc10, 0x08800000,
+ 0xc14, 0x40000100,
+ 0xc18, 0x08800000,
+ 0xc1c, 0x40000100,
+ 0xc20, 0x00000000,
+ 0xc24, 0x00000000,
+ 0xc28, 0x00000000,
+ 0xc2c, 0x00000000,
+ 0xc30, 0x69e9ac44,
+ 0xc34, 0x469652cf,
+ 0xc38, 0x49795994,
+ 0xc3c, 0x0a97971c,
+ 0xc40, 0x1f7c403f,
+ 0xc44, 0x000100b7,
+ 0xc48, 0xec020107,
+ 0xc4c, 0x007f037f,
+ 0xc50, 0x6954341e,
+ 0xc54, 0x43bc0094,
+ 0xc58, 0x6954341e,
+ 0xc5c, 0x433c0094,
+ 0xc60, 0x00000000,
+ 0xc64, 0x5116848b,
+ 0xc68, 0x47c00bff,
+ 0xc6c, 0x00000036,
+ 0xc70, 0x2c7f000d,
+ 0xc74, 0x018610db,
+ 0xc78, 0x0000001f,
+ 0xc7c, 0x00b91612,
+ 0xc80, 0x40000100,
+ 0xc84, 0x20f60000,
+ 0xc88, 0x40000100,
+ 0xc8c, 0x20200000,
+ 0xc90, 0x00121820,
+ 0xc94, 0x00000000,
+ 0xc98, 0x00121820,
+ 0xc9c, 0x00007f7f,
+ 0xca0, 0x00000000,
+ 0xca4, 0x00000080,
+ 0xca8, 0x00000000,
+ 0xcac, 0x00000000,
+ 0xcb0, 0x00000000,
+ 0xcb4, 0x00000000,
+ 0xcb8, 0x00000000,
+ 0xcbc, 0x28000000,
+ 0xcc0, 0x00000000,
+ 0xcc4, 0x00000000,
+ 0xcc8, 0x00000000,
+ 0xccc, 0x00000000,
+ 0xcd0, 0x00000000,
+ 0xcd4, 0x00000000,
+ 0xcd8, 0x64b22427,
+ 0xcdc, 0x00766932,
+ 0xce0, 0x00222222,
+ 0xce4, 0x00000000,
+ 0xce8, 0x37644302,
+ 0xcec, 0x2f97d40c,
+ 0xd00, 0x00080740,
+ 0xd04, 0x00020401,
+ 0xd08, 0x0000907f,
+ 0xd0c, 0x20010201,
+ 0xd10, 0xa0633333,
+ 0xd14, 0x3333bc43,
+ 0xd18, 0x7a8f5b6b,
+ 0xd2c, 0xcc979975,
+ 0xd30, 0x00000000,
+ 0xd34, 0x80608000,
+ 0xd38, 0x00000000,
+ 0xd3c, 0x00027293,
+ 0xd40, 0x00000000,
+ 0xd44, 0x00000000,
+ 0xd48, 0x00000000,
+ 0xd4c, 0x00000000,
+ 0xd50, 0x6437140a,
+ 0xd54, 0x00000000,
+ 0xd58, 0x00000000,
+ 0xd5c, 0x30032064,
+ 0xd60, 0x4653de68,
+ 0xd64, 0x04518a3c,
+ 0xd68, 0x00002101,
+ 0xd6c, 0x2a201c16,
+ 0xd70, 0x1812362e,
+ 0xd74, 0x322c2220,
+ 0xd78, 0x000e3c24,
+ 0xe00, 0x2a2a2a2a,
+ 0xe04, 0x2a2a2a2a,
+ 0xe08, 0x03902a2a,
+ 0xe10, 0x2a2a2a2a,
+ 0xe14, 0x2a2a2a2a,
+ 0xe18, 0x2a2a2a2a,
+ 0xe1c, 0x2a2a2a2a,
+ 0xe28, 0x00000000,
+ 0xe30, 0x1000dc1f,
+ 0xe34, 0x10008c1f,
+ 0xe38, 0x02140102,
+ 0xe3c, 0x681604c2,
+ 0xe40, 0x01007c00,
+ 0xe44, 0x01004800,
+ 0xe48, 0xfb000000,
+ 0xe4c, 0x000028d1,
+ 0xe50, 0x1000dc1f,
+ 0xe54, 0x10008c1f,
+ 0xe58, 0x02140102,
+ 0xe5c, 0x28160d05,
+ 0xe60, 0x00000008,
+ 0xe68, 0x001b25a4,
+ 0xe6c, 0x631b25a0,
+ 0xe70, 0x631b25a0,
+ 0xe74, 0x081b25a0,
+ 0xe78, 0x081b25a0,
+ 0xe7c, 0x081b25a0,
+ 0xe80, 0x081b25a0,
+ 0xe84, 0x631b25a0,
+ 0xe88, 0x081b25a0,
+ 0xe8c, 0x631b25a0,
+ 0xed0, 0x631b25a0,
+ 0xed4, 0x631b25a0,
+ 0xed8, 0x631b25a0,
+ 0xedc, 0x001b25a0,
+ 0xee0, 0x001b25a0,
+ 0xeec, 0x6b1b25a0,
+ 0xf14, 0x00000003,
+ 0xf4c, 0x00000000,
+ 0xf00, 0x00000300,
+};
+
+u32 RTL8192CUPHY_REG_ARRAY_PG[RTL8192CUPHY_REG_ARRAY_PGLENGTH] = {
+ 0xe00, 0xffffffff, 0x07090c0c,
+ 0xe04, 0xffffffff, 0x01020405,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x0b0c0c0e,
+ 0xe14, 0xffffffff, 0x01030506,
+ 0xe18, 0xffffffff, 0x0b0c0d0e,
+ 0xe1c, 0xffffffff, 0x01030509,
+ 0x830, 0xffffffff, 0x07090c0c,
+ 0x834, 0xffffffff, 0x01020405,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x0b0c0d0e,
+ 0x848, 0xffffffff, 0x01030509,
+ 0x84c, 0xffffffff, 0x0b0c0d0e,
+ 0x868, 0xffffffff, 0x01030509,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x06060606,
+ 0xe14, 0xffffffff, 0x00020406,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x06060606,
+ 0x848, 0xffffffff, 0x00020406,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+};
+
+u32 RTL8192CURADIOA_2TARRAY[RTL8192CURADIOA_2TARRAYLENGTH] = {
+ 0x000, 0x00030159,
+ 0x001, 0x00031284,
+ 0x002, 0x00098000,
+ 0x003, 0x00018c63,
+ 0x004, 0x000210e7,
+ 0x009, 0x0002044f,
+ 0x00a, 0x0001adb1,
+ 0x00b, 0x00054867,
+ 0x00c, 0x0008992e,
+ 0x00d, 0x0000e52c,
+ 0x00e, 0x00039ce7,
+ 0x00f, 0x00000451,
+ 0x019, 0x00000000,
+ 0x01a, 0x00010255,
+ 0x01b, 0x00060a00,
+ 0x01c, 0x000fc378,
+ 0x01d, 0x000a1250,
+ 0x01e, 0x0004445f,
+ 0x01f, 0x00080001,
+ 0x020, 0x0000b614,
+ 0x021, 0x0006c000,
+ 0x022, 0x00000000,
+ 0x023, 0x00001558,
+ 0x024, 0x00000060,
+ 0x025, 0x00000483,
+ 0x026, 0x0004f000,
+ 0x027, 0x000ec7d9,
+ 0x028, 0x000577c0,
+ 0x029, 0x00004783,
+ 0x02a, 0x00000001,
+ 0x02b, 0x00021334,
+ 0x02a, 0x00000000,
+ 0x02b, 0x00000054,
+ 0x02a, 0x00000001,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00053333,
+ 0x02c, 0x0000000c,
+ 0x02a, 0x00000002,
+ 0x02b, 0x00000808,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000003,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000004,
+ 0x02b, 0x00000808,
+ 0x02b, 0x0006b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000005,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00073333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000006,
+ 0x02b, 0x00000709,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000007,
+ 0x02b, 0x00000709,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000008,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0004b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000009,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00053333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000a,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000b,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000c,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0006b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000d,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00073333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000e,
+ 0x02b, 0x0000050b,
+ 0x02b, 0x00066666,
+ 0x02c, 0x0000001a,
+ 0x02a, 0x000e0000,
+ 0x010, 0x0004000f,
+ 0x011, 0x000e31fc,
+ 0x010, 0x0006000f,
+ 0x011, 0x000ff9f8,
+ 0x010, 0x0002000f,
+ 0x011, 0x000203f9,
+ 0x010, 0x0003000f,
+ 0x011, 0x000ff500,
+ 0x010, 0x00000000,
+ 0x011, 0x00000000,
+ 0x010, 0x0008000f,
+ 0x011, 0x0003f100,
+ 0x010, 0x0009000f,
+ 0x011, 0x00023100,
+ 0x012, 0x00032000,
+ 0x012, 0x00071000,
+ 0x012, 0x000b0000,
+ 0x012, 0x000fc000,
+ 0x013, 0x000287af,
+ 0x013, 0x000244b7,
+ 0x013, 0x000204ab,
+ 0x013, 0x0001c49f,
+ 0x013, 0x00018493,
+ 0x013, 0x00014297,
+ 0x013, 0x00010295,
+ 0x013, 0x0000c298,
+ 0x013, 0x0000819c,
+ 0x013, 0x000040a8,
+ 0x013, 0x0000001c,
+ 0x014, 0x0001944c,
+ 0x014, 0x00059444,
+ 0x014, 0x0009944c,
+ 0x014, 0x000d9444,
+ 0x015, 0x0000f424,
+ 0x015, 0x0004f424,
+ 0x015, 0x0008f424,
+ 0x015, 0x000cf424,
+ 0x016, 0x000e0330,
+ 0x016, 0x000a0330,
+ 0x016, 0x00060330,
+ 0x016, 0x00020330,
+ 0x000, 0x00010159,
+ 0x018, 0x0000f401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01f, 0x00080003,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00044457,
+ 0x01f, 0x00080000,
+ 0x000, 0x00030159,
+};
+
+u32 RTL8192CU_RADIOB_2TARRAY[RTL8192CURADIOB_2TARRAYLENGTH] = {
+ 0x000, 0x00030159,
+ 0x001, 0x00031284,
+ 0x002, 0x00098000,
+ 0x003, 0x00018c63,
+ 0x004, 0x000210e7,
+ 0x009, 0x0002044f,
+ 0x00a, 0x0001adb1,
+ 0x00b, 0x00054867,
+ 0x00c, 0x0008992e,
+ 0x00d, 0x0000e52c,
+ 0x00e, 0x00039ce7,
+ 0x00f, 0x00000451,
+ 0x012, 0x00032000,
+ 0x012, 0x00071000,
+ 0x012, 0x000b0000,
+ 0x012, 0x000fc000,
+ 0x013, 0x000287af,
+ 0x013, 0x000244b7,
+ 0x013, 0x000204ab,
+ 0x013, 0x0001c49f,
+ 0x013, 0x00018493,
+ 0x013, 0x00014297,
+ 0x013, 0x00010295,
+ 0x013, 0x0000c298,
+ 0x013, 0x0000819c,
+ 0x013, 0x000040a8,
+ 0x013, 0x0000001c,
+ 0x014, 0x0001944c,
+ 0x014, 0x00059444,
+ 0x014, 0x0009944c,
+ 0x014, 0x000d9444,
+ 0x015, 0x0000f424,
+ 0x015, 0x0004f424,
+ 0x015, 0x0008f424,
+ 0x015, 0x000cf424,
+ 0x016, 0x000e0330,
+ 0x016, 0x000a0330,
+ 0x016, 0x00060330,
+ 0x016, 0x00020330,
+};
+
+u32 RTL8192CU_RADIOA_1TARRAY[RTL8192CURADIOA_1TARRAYLENGTH] = {
+ 0x000, 0x00030159,
+ 0x001, 0x00031284,
+ 0x002, 0x00098000,
+ 0x003, 0x00018c63,
+ 0x004, 0x000210e7,
+ 0x009, 0x0002044f,
+ 0x00a, 0x0001adb1,
+ 0x00b, 0x00054867,
+ 0x00c, 0x0008992e,
+ 0x00d, 0x0000e52c,
+ 0x00e, 0x00039ce7,
+ 0x00f, 0x00000451,
+ 0x019, 0x00000000,
+ 0x01a, 0x00010255,
+ 0x01b, 0x00060a00,
+ 0x01c, 0x000fc378,
+ 0x01d, 0x000a1250,
+ 0x01e, 0x0004445f,
+ 0x01f, 0x00080001,
+ 0x020, 0x0000b614,
+ 0x021, 0x0006c000,
+ 0x022, 0x00000000,
+ 0x023, 0x00001558,
+ 0x024, 0x00000060,
+ 0x025, 0x00000483,
+ 0x026, 0x0004f000,
+ 0x027, 0x000ec7d9,
+ 0x028, 0x000577c0,
+ 0x029, 0x00004783,
+ 0x02a, 0x00000001,
+ 0x02b, 0x00021334,
+ 0x02a, 0x00000000,
+ 0x02b, 0x00000054,
+ 0x02a, 0x00000001,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00053333,
+ 0x02c, 0x0000000c,
+ 0x02a, 0x00000002,
+ 0x02b, 0x00000808,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000003,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000004,
+ 0x02b, 0x00000808,
+ 0x02b, 0x0006b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000005,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00073333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000006,
+ 0x02b, 0x00000709,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000007,
+ 0x02b, 0x00000709,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000008,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0004b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000009,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00053333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000a,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000b,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000c,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0006b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000d,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00073333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000e,
+ 0x02b, 0x0000050b,
+ 0x02b, 0x00066666,
+ 0x02c, 0x0000001a,
+ 0x02a, 0x000e0000,
+ 0x010, 0x0004000f,
+ 0x011, 0x000e31fc,
+ 0x010, 0x0006000f,
+ 0x011, 0x000ff9f8,
+ 0x010, 0x0002000f,
+ 0x011, 0x000203f9,
+ 0x010, 0x0003000f,
+ 0x011, 0x000ff500,
+ 0x010, 0x00000000,
+ 0x011, 0x00000000,
+ 0x010, 0x0008000f,
+ 0x011, 0x0003f100,
+ 0x010, 0x0009000f,
+ 0x011, 0x00023100,
+ 0x012, 0x00032000,
+ 0x012, 0x00071000,
+ 0x012, 0x000b0000,
+ 0x012, 0x000fc000,
+ 0x013, 0x000287b3,
+ 0x013, 0x000244b7,
+ 0x013, 0x000204ab,
+ 0x013, 0x0001c49f,
+ 0x013, 0x00018493,
+ 0x013, 0x0001429b,
+ 0x013, 0x00010299,
+ 0x013, 0x0000c29c,
+ 0x013, 0x000081a0,
+ 0x013, 0x000040ac,
+ 0x013, 0x00000020,
+ 0x014, 0x0001944c,
+ 0x014, 0x00059444,
+ 0x014, 0x0009944c,
+ 0x014, 0x000d9444,
+ 0x015, 0x0000f405,
+ 0x015, 0x0004f405,
+ 0x015, 0x0008f405,
+ 0x015, 0x000cf405,
+ 0x016, 0x000e0330,
+ 0x016, 0x000a0330,
+ 0x016, 0x00060330,
+ 0x016, 0x00020330,
+ 0x000, 0x00010159,
+ 0x018, 0x0000f401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01f, 0x00080003,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00044457,
+ 0x01f, 0x00080000,
+ 0x000, 0x00030159,
+};
+
+u32 RTL8192CU_RADIOB_1TARRAY[RTL8192CURADIOB_1TARRAYLENGTH] = {
+ 0x0,
+};
+
+u32 RTL8192CUMAC_2T_ARRAY[RTL8192CUMAC_2T_ARRAYLENGTH] = {
+ 0x420, 0x00000080,
+ 0x423, 0x00000000,
+ 0x430, 0x00000000,
+ 0x431, 0x00000000,
+ 0x432, 0x00000000,
+ 0x433, 0x00000001,
+ 0x434, 0x00000004,
+ 0x435, 0x00000005,
+ 0x436, 0x00000006,
+ 0x437, 0x00000007,
+ 0x438, 0x00000000,
+ 0x439, 0x00000000,
+ 0x43a, 0x00000000,
+ 0x43b, 0x00000001,
+ 0x43c, 0x00000004,
+ 0x43d, 0x00000005,
+ 0x43e, 0x00000006,
+ 0x43f, 0x00000007,
+ 0x440, 0x0000005d,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000015,
+ 0x445, 0x000000f0,
+ 0x446, 0x0000000f,
+ 0x447, 0x00000000,
+ 0x458, 0x00000041,
+ 0x459, 0x000000a8,
+ 0x45a, 0x00000072,
+ 0x45b, 0x000000b9,
+ 0x460, 0x00000066,
+ 0x461, 0x00000066,
+ 0x462, 0x00000008,
+ 0x463, 0x00000003,
+ 0x4c8, 0x000000ff,
+ 0x4c9, 0x00000008,
+ 0x4cc, 0x000000ff,
+ 0x4cd, 0x000000ff,
+ 0x4ce, 0x00000001,
+ 0x500, 0x00000026,
+ 0x501, 0x000000a2,
+ 0x502, 0x0000002f,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000a3,
+ 0x506, 0x0000005e,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002b,
+ 0x509, 0x000000a4,
+ 0x50a, 0x0000005e,
+ 0x50b, 0x00000000,
+ 0x50c, 0x0000004f,
+ 0x50d, 0x000000a4,
+ 0x50e, 0x00000000,
+ 0x50f, 0x00000000,
+ 0x512, 0x0000001c,
+ 0x514, 0x0000000a,
+ 0x515, 0x00000010,
+ 0x516, 0x0000000a,
+ 0x517, 0x00000010,
+ 0x51a, 0x00000016,
+ 0x524, 0x0000000f,
+ 0x525, 0x0000004f,
+ 0x546, 0x00000040,
+ 0x547, 0x00000000,
+ 0x550, 0x00000010,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55a, 0x00000002,
+ 0x55d, 0x000000ff,
+ 0x605, 0x00000030,
+ 0x608, 0x0000000e,
+ 0x609, 0x0000002a,
+ 0x652, 0x00000020,
+ 0x63c, 0x0000000a,
+ 0x63d, 0x0000000e,
+ 0x63e, 0x0000000a,
+ 0x63f, 0x0000000e,
+ 0x66e, 0x00000005,
+ 0x700, 0x00000021,
+ 0x701, 0x00000043,
+ 0x702, 0x00000065,
+ 0x703, 0x00000087,
+ 0x708, 0x00000021,
+ 0x709, 0x00000043,
+ 0x70a, 0x00000065,
+ 0x70b, 0x00000087,
+};
+
+u32 RTL8192CUAGCTAB_2TARRAY[RTL8192CUAGCTAB_2TARRAYLENGTH] = {
+ 0xc78, 0x7b000001,
+ 0xc78, 0x7b010001,
+ 0xc78, 0x7b020001,
+ 0xc78, 0x7b030001,
+ 0xc78, 0x7b040001,
+ 0xc78, 0x7b050001,
+ 0xc78, 0x7a060001,
+ 0xc78, 0x79070001,
+ 0xc78, 0x78080001,
+ 0xc78, 0x77090001,
+ 0xc78, 0x760a0001,
+ 0xc78, 0x750b0001,
+ 0xc78, 0x740c0001,
+ 0xc78, 0x730d0001,
+ 0xc78, 0x720e0001,
+ 0xc78, 0x710f0001,
+ 0xc78, 0x70100001,
+ 0xc78, 0x6f110001,
+ 0xc78, 0x6e120001,
+ 0xc78, 0x6d130001,
+ 0xc78, 0x6c140001,
+ 0xc78, 0x6b150001,
+ 0xc78, 0x6a160001,
+ 0xc78, 0x69170001,
+ 0xc78, 0x68180001,
+ 0xc78, 0x67190001,
+ 0xc78, 0x661a0001,
+ 0xc78, 0x651b0001,
+ 0xc78, 0x641c0001,
+ 0xc78, 0x631d0001,
+ 0xc78, 0x621e0001,
+ 0xc78, 0x611f0001,
+ 0xc78, 0x60200001,
+ 0xc78, 0x49210001,
+ 0xc78, 0x48220001,
+ 0xc78, 0x47230001,
+ 0xc78, 0x46240001,
+ 0xc78, 0x45250001,
+ 0xc78, 0x44260001,
+ 0xc78, 0x43270001,
+ 0xc78, 0x42280001,
+ 0xc78, 0x41290001,
+ 0xc78, 0x402a0001,
+ 0xc78, 0x262b0001,
+ 0xc78, 0x252c0001,
+ 0xc78, 0x242d0001,
+ 0xc78, 0x232e0001,
+ 0xc78, 0x222f0001,
+ 0xc78, 0x21300001,
+ 0xc78, 0x20310001,
+ 0xc78, 0x06320001,
+ 0xc78, 0x05330001,
+ 0xc78, 0x04340001,
+ 0xc78, 0x03350001,
+ 0xc78, 0x02360001,
+ 0xc78, 0x01370001,
+ 0xc78, 0x00380001,
+ 0xc78, 0x00390001,
+ 0xc78, 0x003a0001,
+ 0xc78, 0x003b0001,
+ 0xc78, 0x003c0001,
+ 0xc78, 0x003d0001,
+ 0xc78, 0x003e0001,
+ 0xc78, 0x003f0001,
+ 0xc78, 0x7b400001,
+ 0xc78, 0x7b410001,
+ 0xc78, 0x7b420001,
+ 0xc78, 0x7b430001,
+ 0xc78, 0x7b440001,
+ 0xc78, 0x7b450001,
+ 0xc78, 0x7a460001,
+ 0xc78, 0x79470001,
+ 0xc78, 0x78480001,
+ 0xc78, 0x77490001,
+ 0xc78, 0x764a0001,
+ 0xc78, 0x754b0001,
+ 0xc78, 0x744c0001,
+ 0xc78, 0x734d0001,
+ 0xc78, 0x724e0001,
+ 0xc78, 0x714f0001,
+ 0xc78, 0x70500001,
+ 0xc78, 0x6f510001,
+ 0xc78, 0x6e520001,
+ 0xc78, 0x6d530001,
+ 0xc78, 0x6c540001,
+ 0xc78, 0x6b550001,
+ 0xc78, 0x6a560001,
+ 0xc78, 0x69570001,
+ 0xc78, 0x68580001,
+ 0xc78, 0x67590001,
+ 0xc78, 0x665a0001,
+ 0xc78, 0x655b0001,
+ 0xc78, 0x645c0001,
+ 0xc78, 0x635d0001,
+ 0xc78, 0x625e0001,
+ 0xc78, 0x615f0001,
+ 0xc78, 0x60600001,
+ 0xc78, 0x49610001,
+ 0xc78, 0x48620001,
+ 0xc78, 0x47630001,
+ 0xc78, 0x46640001,
+ 0xc78, 0x45650001,
+ 0xc78, 0x44660001,
+ 0xc78, 0x43670001,
+ 0xc78, 0x42680001,
+ 0xc78, 0x41690001,
+ 0xc78, 0x406a0001,
+ 0xc78, 0x266b0001,
+ 0xc78, 0x256c0001,
+ 0xc78, 0x246d0001,
+ 0xc78, 0x236e0001,
+ 0xc78, 0x226f0001,
+ 0xc78, 0x21700001,
+ 0xc78, 0x20710001,
+ 0xc78, 0x06720001,
+ 0xc78, 0x05730001,
+ 0xc78, 0x04740001,
+ 0xc78, 0x03750001,
+ 0xc78, 0x02760001,
+ 0xc78, 0x01770001,
+ 0xc78, 0x00780001,
+ 0xc78, 0x00790001,
+ 0xc78, 0x007a0001,
+ 0xc78, 0x007b0001,
+ 0xc78, 0x007c0001,
+ 0xc78, 0x007d0001,
+ 0xc78, 0x007e0001,
+ 0xc78, 0x007f0001,
+ 0xc78, 0x3800001e,
+ 0xc78, 0x3801001e,
+ 0xc78, 0x3802001e,
+ 0xc78, 0x3803001e,
+ 0xc78, 0x3804001e,
+ 0xc78, 0x3805001e,
+ 0xc78, 0x3806001e,
+ 0xc78, 0x3807001e,
+ 0xc78, 0x3808001e,
+ 0xc78, 0x3c09001e,
+ 0xc78, 0x3e0a001e,
+ 0xc78, 0x400b001e,
+ 0xc78, 0x440c001e,
+ 0xc78, 0x480d001e,
+ 0xc78, 0x4c0e001e,
+ 0xc78, 0x500f001e,
+ 0xc78, 0x5210001e,
+ 0xc78, 0x5611001e,
+ 0xc78, 0x5a12001e,
+ 0xc78, 0x5e13001e,
+ 0xc78, 0x6014001e,
+ 0xc78, 0x6015001e,
+ 0xc78, 0x6016001e,
+ 0xc78, 0x6217001e,
+ 0xc78, 0x6218001e,
+ 0xc78, 0x6219001e,
+ 0xc78, 0x621a001e,
+ 0xc78, 0x621b001e,
+ 0xc78, 0x621c001e,
+ 0xc78, 0x621d001e,
+ 0xc78, 0x621e001e,
+ 0xc78, 0x621f001e,
+};
+
+u32 RTL8192CUAGCTAB_1TARRAY[RTL8192CUAGCTAB_1TARRAYLENGTH] = {
+ 0xc78, 0x7b000001,
+ 0xc78, 0x7b010001,
+ 0xc78, 0x7b020001,
+ 0xc78, 0x7b030001,
+ 0xc78, 0x7b040001,
+ 0xc78, 0x7b050001,
+ 0xc78, 0x7a060001,
+ 0xc78, 0x79070001,
+ 0xc78, 0x78080001,
+ 0xc78, 0x77090001,
+ 0xc78, 0x760a0001,
+ 0xc78, 0x750b0001,
+ 0xc78, 0x740c0001,
+ 0xc78, 0x730d0001,
+ 0xc78, 0x720e0001,
+ 0xc78, 0x710f0001,
+ 0xc78, 0x70100001,
+ 0xc78, 0x6f110001,
+ 0xc78, 0x6e120001,
+ 0xc78, 0x6d130001,
+ 0xc78, 0x6c140001,
+ 0xc78, 0x6b150001,
+ 0xc78, 0x6a160001,
+ 0xc78, 0x69170001,
+ 0xc78, 0x68180001,
+ 0xc78, 0x67190001,
+ 0xc78, 0x661a0001,
+ 0xc78, 0x651b0001,
+ 0xc78, 0x641c0001,
+ 0xc78, 0x631d0001,
+ 0xc78, 0x621e0001,
+ 0xc78, 0x611f0001,
+ 0xc78, 0x60200001,
+ 0xc78, 0x49210001,
+ 0xc78, 0x48220001,
+ 0xc78, 0x47230001,
+ 0xc78, 0x46240001,
+ 0xc78, 0x45250001,
+ 0xc78, 0x44260001,
+ 0xc78, 0x43270001,
+ 0xc78, 0x42280001,
+ 0xc78, 0x41290001,
+ 0xc78, 0x402a0001,
+ 0xc78, 0x262b0001,
+ 0xc78, 0x252c0001,
+ 0xc78, 0x242d0001,
+ 0xc78, 0x232e0001,
+ 0xc78, 0x222f0001,
+ 0xc78, 0x21300001,
+ 0xc78, 0x20310001,
+ 0xc78, 0x06320001,
+ 0xc78, 0x05330001,
+ 0xc78, 0x04340001,
+ 0xc78, 0x03350001,
+ 0xc78, 0x02360001,
+ 0xc78, 0x01370001,
+ 0xc78, 0x00380001,
+ 0xc78, 0x00390001,
+ 0xc78, 0x003a0001,
+ 0xc78, 0x003b0001,
+ 0xc78, 0x003c0001,
+ 0xc78, 0x003d0001,
+ 0xc78, 0x003e0001,
+ 0xc78, 0x003f0001,
+ 0xc78, 0x7b400001,
+ 0xc78, 0x7b410001,
+ 0xc78, 0x7b420001,
+ 0xc78, 0x7b430001,
+ 0xc78, 0x7b440001,
+ 0xc78, 0x7b450001,
+ 0xc78, 0x7a460001,
+ 0xc78, 0x79470001,
+ 0xc78, 0x78480001,
+ 0xc78, 0x77490001,
+ 0xc78, 0x764a0001,
+ 0xc78, 0x754b0001,
+ 0xc78, 0x744c0001,
+ 0xc78, 0x734d0001,
+ 0xc78, 0x724e0001,
+ 0xc78, 0x714f0001,
+ 0xc78, 0x70500001,
+ 0xc78, 0x6f510001,
+ 0xc78, 0x6e520001,
+ 0xc78, 0x6d530001,
+ 0xc78, 0x6c540001,
+ 0xc78, 0x6b550001,
+ 0xc78, 0x6a560001,
+ 0xc78, 0x69570001,
+ 0xc78, 0x68580001,
+ 0xc78, 0x67590001,
+ 0xc78, 0x665a0001,
+ 0xc78, 0x655b0001,
+ 0xc78, 0x645c0001,
+ 0xc78, 0x635d0001,
+ 0xc78, 0x625e0001,
+ 0xc78, 0x615f0001,
+ 0xc78, 0x60600001,
+ 0xc78, 0x49610001,
+ 0xc78, 0x48620001,
+ 0xc78, 0x47630001,
+ 0xc78, 0x46640001,
+ 0xc78, 0x45650001,
+ 0xc78, 0x44660001,
+ 0xc78, 0x43670001,
+ 0xc78, 0x42680001,
+ 0xc78, 0x41690001,
+ 0xc78, 0x406a0001,
+ 0xc78, 0x266b0001,
+ 0xc78, 0x256c0001,
+ 0xc78, 0x246d0001,
+ 0xc78, 0x236e0001,
+ 0xc78, 0x226f0001,
+ 0xc78, 0x21700001,
+ 0xc78, 0x20710001,
+ 0xc78, 0x06720001,
+ 0xc78, 0x05730001,
+ 0xc78, 0x04740001,
+ 0xc78, 0x03750001,
+ 0xc78, 0x02760001,
+ 0xc78, 0x01770001,
+ 0xc78, 0x00780001,
+ 0xc78, 0x00790001,
+ 0xc78, 0x007a0001,
+ 0xc78, 0x007b0001,
+ 0xc78, 0x007c0001,
+ 0xc78, 0x007d0001,
+ 0xc78, 0x007e0001,
+ 0xc78, 0x007f0001,
+ 0xc78, 0x3800001e,
+ 0xc78, 0x3801001e,
+ 0xc78, 0x3802001e,
+ 0xc78, 0x3803001e,
+ 0xc78, 0x3804001e,
+ 0xc78, 0x3805001e,
+ 0xc78, 0x3806001e,
+ 0xc78, 0x3807001e,
+ 0xc78, 0x3808001e,
+ 0xc78, 0x3c09001e,
+ 0xc78, 0x3e0a001e,
+ 0xc78, 0x400b001e,
+ 0xc78, 0x440c001e,
+ 0xc78, 0x480d001e,
+ 0xc78, 0x4c0e001e,
+ 0xc78, 0x500f001e,
+ 0xc78, 0x5210001e,
+ 0xc78, 0x5611001e,
+ 0xc78, 0x5a12001e,
+ 0xc78, 0x5e13001e,
+ 0xc78, 0x6014001e,
+ 0xc78, 0x6015001e,
+ 0xc78, 0x6016001e,
+ 0xc78, 0x6217001e,
+ 0xc78, 0x6218001e,
+ 0xc78, 0x6219001e,
+ 0xc78, 0x621a001e,
+ 0xc78, 0x621b001e,
+ 0xc78, 0x621c001e,
+ 0xc78, 0x621d001e,
+ 0xc78, 0x621e001e,
+ 0xc78, 0x621f001e,
+};
+
+u32 RTL8192CUPHY_REG_1T_HPArray[RTL8192CUPHY_REG_1T_HPArrayLength] = {
+ 0x024, 0x0011800f,
+ 0x028, 0x00ffdb83,
+ 0x040, 0x000c0004,
+ 0x800, 0x80040000,
+ 0x804, 0x00000001,
+ 0x808, 0x0000fc00,
+ 0x80c, 0x0000000a,
+ 0x810, 0x10005388,
+ 0x814, 0x020c3d10,
+ 0x818, 0x02200385,
+ 0x81c, 0x00000000,
+ 0x820, 0x01000100,
+ 0x824, 0x00390204,
+ 0x828, 0x00000000,
+ 0x82c, 0x00000000,
+ 0x830, 0x00000000,
+ 0x834, 0x00000000,
+ 0x838, 0x00000000,
+ 0x83c, 0x00000000,
+ 0x840, 0x00010000,
+ 0x844, 0x00000000,
+ 0x848, 0x00000000,
+ 0x84c, 0x00000000,
+ 0x850, 0x00000000,
+ 0x854, 0x00000000,
+ 0x858, 0x569a569a,
+ 0x85c, 0x001b25a4,
+ 0x860, 0x66e60230,
+ 0x864, 0x061f0130,
+ 0x868, 0x00000000,
+ 0x86c, 0x20202000,
+ 0x870, 0x03000300,
+ 0x874, 0x22004000,
+ 0x878, 0x00000808,
+ 0x87c, 0x00ffc3f1,
+ 0x880, 0xc0083070,
+ 0x884, 0x000004d5,
+ 0x888, 0x00000000,
+ 0x88c, 0xccc000c0,
+ 0x890, 0x00000800,
+ 0x894, 0xfffffffe,
+ 0x898, 0x40302010,
+ 0x89c, 0x00706050,
+ 0x900, 0x00000000,
+ 0x904, 0x00000023,
+ 0x908, 0x00000000,
+ 0x90c, 0x81121111,
+ 0xa00, 0x00d047c8,
+ 0xa04, 0x80ff000c,
+ 0xa08, 0x8c838300,
+ 0xa0c, 0x2e68120f,
+ 0xa10, 0x9500bb78,
+ 0xa14, 0x11144028,
+ 0xa18, 0x00881117,
+ 0xa1c, 0x89140f00,
+ 0xa20, 0x15160000,
+ 0xa24, 0x070b0f12,
+ 0xa28, 0x00000104,
+ 0xa2c, 0x00d30000,
+ 0xa70, 0x101fbf00,
+ 0xa74, 0x00000007,
+ 0xc00, 0x48071d40,
+ 0xc04, 0x03a05611,
+ 0xc08, 0x000000e4,
+ 0xc0c, 0x6c6c6c6c,
+ 0xc10, 0x08800000,
+ 0xc14, 0x40000100,
+ 0xc18, 0x08800000,
+ 0xc1c, 0x40000100,
+ 0xc20, 0x00000000,
+ 0xc24, 0x00000000,
+ 0xc28, 0x00000000,
+ 0xc2c, 0x00000000,
+ 0xc30, 0x69e9ac44,
+ 0xc34, 0x469652cf,
+ 0xc38, 0x49795994,
+ 0xc3c, 0x0a97971c,
+ 0xc40, 0x1f7c403f,
+ 0xc44, 0x000100b7,
+ 0xc48, 0xec020107,
+ 0xc4c, 0x007f037f,
+ 0xc50, 0x6954342e,
+ 0xc54, 0x43bc0094,
+ 0xc58, 0x6954342f,
+ 0xc5c, 0x433c0094,
+ 0xc60, 0x00000000,
+ 0xc64, 0x5116848b,
+ 0xc68, 0x47c00bff,
+ 0xc6c, 0x00000036,
+ 0xc70, 0x2c46000d,
+ 0xc74, 0x018610db,
+ 0xc78, 0x0000001f,
+ 0xc7c, 0x00b91612,
+ 0xc80, 0x24000090,
+ 0xc84, 0x20f60000,
+ 0xc88, 0x24000090,
+ 0xc8c, 0x20200000,
+ 0xc90, 0x00121820,
+ 0xc94, 0x00000000,
+ 0xc98, 0x00121820,
+ 0xc9c, 0x00007f7f,
+ 0xca0, 0x00000000,
+ 0xca4, 0x00000080,
+ 0xca8, 0x00000000,
+ 0xcac, 0x00000000,
+ 0xcb0, 0x00000000,
+ 0xcb4, 0x00000000,
+ 0xcb8, 0x00000000,
+ 0xcbc, 0x28000000,
+ 0xcc0, 0x00000000,
+ 0xcc4, 0x00000000,
+ 0xcc8, 0x00000000,
+ 0xccc, 0x00000000,
+ 0xcd0, 0x00000000,
+ 0xcd4, 0x00000000,
+ 0xcd8, 0x64b22427,
+ 0xcdc, 0x00766932,
+ 0xce0, 0x00222222,
+ 0xce4, 0x00000000,
+ 0xce8, 0x37644302,
+ 0xcec, 0x2f97d40c,
+ 0xd00, 0x00080740,
+ 0xd04, 0x00020401,
+ 0xd08, 0x0000907f,
+ 0xd0c, 0x20010201,
+ 0xd10, 0xa0633333,
+ 0xd14, 0x3333bc43,
+ 0xd18, 0x7a8f5b6b,
+ 0xd2c, 0xcc979975,
+ 0xd30, 0x00000000,
+ 0xd34, 0x80608000,
+ 0xd38, 0x00000000,
+ 0xd3c, 0x00027293,
+ 0xd40, 0x00000000,
+ 0xd44, 0x00000000,
+ 0xd48, 0x00000000,
+ 0xd4c, 0x00000000,
+ 0xd50, 0x6437140a,
+ 0xd54, 0x00000000,
+ 0xd58, 0x00000000,
+ 0xd5c, 0x30032064,
+ 0xd60, 0x4653de68,
+ 0xd64, 0x04518a3c,
+ 0xd68, 0x00002101,
+ 0xd6c, 0x2a201c16,
+ 0xd70, 0x1812362e,
+ 0xd74, 0x322c2220,
+ 0xd78, 0x000e3c24,
+ 0xe00, 0x24242424,
+ 0xe04, 0x24242424,
+ 0xe08, 0x03902024,
+ 0xe10, 0x24242424,
+ 0xe14, 0x24242424,
+ 0xe18, 0x24242424,
+ 0xe1c, 0x24242424,
+ 0xe28, 0x00000000,
+ 0xe30, 0x1000dc1f,
+ 0xe34, 0x10008c1f,
+ 0xe38, 0x02140102,
+ 0xe3c, 0x681604c2,
+ 0xe40, 0x01007c00,
+ 0xe44, 0x01004800,
+ 0xe48, 0xfb000000,
+ 0xe4c, 0x000028d1,
+ 0xe50, 0x1000dc1f,
+ 0xe54, 0x10008c1f,
+ 0xe58, 0x02140102,
+ 0xe5c, 0x28160d05,
+ 0xe60, 0x00000008,
+ 0xe68, 0x001b25a4,
+ 0xe6c, 0x631b25a0,
+ 0xe70, 0x631b25a0,
+ 0xe74, 0x081b25a0,
+ 0xe78, 0x081b25a0,
+ 0xe7c, 0x081b25a0,
+ 0xe80, 0x081b25a0,
+ 0xe84, 0x631b25a0,
+ 0xe88, 0x081b25a0,
+ 0xe8c, 0x631b25a0,
+ 0xed0, 0x631b25a0,
+ 0xed4, 0x631b25a0,
+ 0xed8, 0x631b25a0,
+ 0xedc, 0x001b25a0,
+ 0xee0, 0x001b25a0,
+ 0xeec, 0x6b1b25a0,
+ 0xee8, 0x31555448,
+ 0xf14, 0x00000003,
+ 0xf4c, 0x00000000,
+ 0xf00, 0x00000300,
+};
+
+u32 RTL8192CUPHY_REG_Array_PG_HP[RTL8192CUPHY_REG_Array_PG_HPLength] = {
+ 0xe00, 0xffffffff, 0x06080808,
+ 0xe04, 0xffffffff, 0x00040406,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x04060608,
+ 0xe14, 0xffffffff, 0x00020204,
+ 0xe18, 0xffffffff, 0x04060608,
+ 0xe1c, 0xffffffff, 0x00020204,
+ 0x830, 0xffffffff, 0x06080808,
+ 0x834, 0xffffffff, 0x00040406,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x04060608,
+ 0x848, 0xffffffff, 0x00020204,
+ 0x84c, 0xffffffff, 0x04060608,
+ 0x868, 0xffffffff, 0x00020204,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+};
+
+u32 RTL8192CURadioA_1T_HPArray[RTL8192CURadioA_1T_HPArrayLength] = {
+ 0x000, 0x00030159,
+ 0x001, 0x00031284,
+ 0x002, 0x00098000,
+ 0x003, 0x00018c63,
+ 0x004, 0x000210e7,
+ 0x009, 0x0002044f,
+ 0x00a, 0x0001adb0,
+ 0x00b, 0x00054867,
+ 0x00c, 0x0008992e,
+ 0x00d, 0x0000e529,
+ 0x00e, 0x00039ce7,
+ 0x00f, 0x00000451,
+ 0x019, 0x00000000,
+ 0x01a, 0x00000255,
+ 0x01b, 0x00060a00,
+ 0x01c, 0x000fc378,
+ 0x01d, 0x000a1250,
+ 0x01e, 0x0004445f,
+ 0x01f, 0x00080001,
+ 0x020, 0x0000b614,
+ 0x021, 0x0006c000,
+ 0x022, 0x0000083c,
+ 0x023, 0x00001558,
+ 0x024, 0x00000060,
+ 0x025, 0x00000483,
+ 0x026, 0x0004f000,
+ 0x027, 0x000ec7d9,
+ 0x028, 0x000977c0,
+ 0x029, 0x00004783,
+ 0x02a, 0x00000001,
+ 0x02b, 0x00021334,
+ 0x02a, 0x00000000,
+ 0x02b, 0x00000054,
+ 0x02a, 0x00000001,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00053333,
+ 0x02c, 0x0000000c,
+ 0x02a, 0x00000002,
+ 0x02b, 0x00000808,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000003,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000004,
+ 0x02b, 0x00000808,
+ 0x02b, 0x0006b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000005,
+ 0x02b, 0x00000808,
+ 0x02b, 0x00073333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000006,
+ 0x02b, 0x00000709,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000007,
+ 0x02b, 0x00000709,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000008,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0004b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x00000009,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00053333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000a,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0005b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000b,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00063333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000c,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x0006b333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000d,
+ 0x02b, 0x0000060a,
+ 0x02b, 0x00073333,
+ 0x02c, 0x0000000d,
+ 0x02a, 0x0000000e,
+ 0x02b, 0x0000050b,
+ 0x02b, 0x00066666,
+ 0x02c, 0x0000001a,
+ 0x02a, 0x000e0000,
+ 0x010, 0x0004000f,
+ 0x011, 0x000e31fc,
+ 0x010, 0x0006000f,
+ 0x011, 0x000ff9f8,
+ 0x010, 0x0002000f,
+ 0x011, 0x000203f9,
+ 0x010, 0x0003000f,
+ 0x011, 0x000ff500,
+ 0x010, 0x00000000,
+ 0x011, 0x00000000,
+ 0x010, 0x0008000f,
+ 0x011, 0x0003f100,
+ 0x010, 0x0009000f,
+ 0x011, 0x00023100,
+ 0x012, 0x000d8000,
+ 0x012, 0x00090000,
+ 0x012, 0x00051000,
+ 0x012, 0x00012000,
+ 0x013, 0x00028fb4,
+ 0x013, 0x00024fa8,
+ 0x013, 0x000207a4,
+ 0x013, 0x0001c798,
+ 0x013, 0x000183a4,
+ 0x013, 0x00014398,
+ 0x013, 0x000101a4,
+ 0x013, 0x0000c198,
+ 0x013, 0x000080a4,
+ 0x013, 0x00004098,
+ 0x013, 0x00000000,
+ 0x014, 0x0001944c,
+ 0x014, 0x00059444,
+ 0x014, 0x0009944c,
+ 0x014, 0x000d9444,
+ 0x015, 0x0000f405,
+ 0x015, 0x0004f405,
+ 0x015, 0x0008f405,
+ 0x015, 0x000cf405,
+ 0x016, 0x000e0330,
+ 0x016, 0x000a0330,
+ 0x016, 0x00060330,
+ 0x016, 0x00020330,
+ 0x000, 0x00010159,
+ 0x018, 0x0000f401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01f, 0x00080003,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00044457,
+ 0x01f, 0x00080000,
+ 0x000, 0x00030159,
+};
+
+u32 Rtl8192CUAGCTAB_1T_HPArray[RTL8192CUAGCTAB_1T_HPArrayLength] = {
+ 0xc78, 0x7b000001,
+ 0xc78, 0x7b010001,
+ 0xc78, 0x7b020001,
+ 0xc78, 0x7b030001,
+ 0xc78, 0x7b040001,
+ 0xc78, 0x7b050001,
+ 0xc78, 0x7b060001,
+ 0xc78, 0x7b070001,
+ 0xc78, 0x7b080001,
+ 0xc78, 0x7a090001,
+ 0xc78, 0x790a0001,
+ 0xc78, 0x780b0001,
+ 0xc78, 0x770c0001,
+ 0xc78, 0x760d0001,
+ 0xc78, 0x750e0001,
+ 0xc78, 0x740f0001,
+ 0xc78, 0x73100001,
+ 0xc78, 0x72110001,
+ 0xc78, 0x71120001,
+ 0xc78, 0x70130001,
+ 0xc78, 0x6f140001,
+ 0xc78, 0x6e150001,
+ 0xc78, 0x6d160001,
+ 0xc78, 0x6c170001,
+ 0xc78, 0x6b180001,
+ 0xc78, 0x6a190001,
+ 0xc78, 0x691a0001,
+ 0xc78, 0x681b0001,
+ 0xc78, 0x671c0001,
+ 0xc78, 0x661d0001,
+ 0xc78, 0x651e0001,
+ 0xc78, 0x641f0001,
+ 0xc78, 0x63200001,
+ 0xc78, 0x62210001,
+ 0xc78, 0x61220001,
+ 0xc78, 0x60230001,
+ 0xc78, 0x46240001,
+ 0xc78, 0x45250001,
+ 0xc78, 0x44260001,
+ 0xc78, 0x43270001,
+ 0xc78, 0x42280001,
+ 0xc78, 0x41290001,
+ 0xc78, 0x402a0001,
+ 0xc78, 0x262b0001,
+ 0xc78, 0x252c0001,
+ 0xc78, 0x242d0001,
+ 0xc78, 0x232e0001,
+ 0xc78, 0x222f0001,
+ 0xc78, 0x21300001,
+ 0xc78, 0x20310001,
+ 0xc78, 0x06320001,
+ 0xc78, 0x05330001,
+ 0xc78, 0x04340001,
+ 0xc78, 0x03350001,
+ 0xc78, 0x02360001,
+ 0xc78, 0x01370001,
+ 0xc78, 0x00380001,
+ 0xc78, 0x00390001,
+ 0xc78, 0x003a0001,
+ 0xc78, 0x003b0001,
+ 0xc78, 0x003c0001,
+ 0xc78, 0x003d0001,
+ 0xc78, 0x003e0001,
+ 0xc78, 0x003f0001,
+ 0xc78, 0x7b400001,
+ 0xc78, 0x7b410001,
+ 0xc78, 0x7b420001,
+ 0xc78, 0x7b430001,
+ 0xc78, 0x7b440001,
+ 0xc78, 0x7b450001,
+ 0xc78, 0x7b460001,
+ 0xc78, 0x7b470001,
+ 0xc78, 0x7b480001,
+ 0xc78, 0x7a490001,
+ 0xc78, 0x794a0001,
+ 0xc78, 0x784b0001,
+ 0xc78, 0x774c0001,
+ 0xc78, 0x764d0001,
+ 0xc78, 0x754e0001,
+ 0xc78, 0x744f0001,
+ 0xc78, 0x73500001,
+ 0xc78, 0x72510001,
+ 0xc78, 0x71520001,
+ 0xc78, 0x70530001,
+ 0xc78, 0x6f540001,
+ 0xc78, 0x6e550001,
+ 0xc78, 0x6d560001,
+ 0xc78, 0x6c570001,
+ 0xc78, 0x6b580001,
+ 0xc78, 0x6a590001,
+ 0xc78, 0x695a0001,
+ 0xc78, 0x685b0001,
+ 0xc78, 0x675c0001,
+ 0xc78, 0x665d0001,
+ 0xc78, 0x655e0001,
+ 0xc78, 0x645f0001,
+ 0xc78, 0x63600001,
+ 0xc78, 0x62610001,
+ 0xc78, 0x61620001,
+ 0xc78, 0x60630001,
+ 0xc78, 0x46640001,
+ 0xc78, 0x45650001,
+ 0xc78, 0x44660001,
+ 0xc78, 0x43670001,
+ 0xc78, 0x42680001,
+ 0xc78, 0x41690001,
+ 0xc78, 0x406a0001,
+ 0xc78, 0x266b0001,
+ 0xc78, 0x256c0001,
+ 0xc78, 0x246d0001,
+ 0xc78, 0x236e0001,
+ 0xc78, 0x226f0001,
+ 0xc78, 0x21700001,
+ 0xc78, 0x20710001,
+ 0xc78, 0x06720001,
+ 0xc78, 0x05730001,
+ 0xc78, 0x04740001,
+ 0xc78, 0x03750001,
+ 0xc78, 0x02760001,
+ 0xc78, 0x01770001,
+ 0xc78, 0x00780001,
+ 0xc78, 0x00790001,
+ 0xc78, 0x007a0001,
+ 0xc78, 0x007b0001,
+ 0xc78, 0x007c0001,
+ 0xc78, 0x007d0001,
+ 0xc78, 0x007e0001,
+ 0xc78, 0x007f0001,
+ 0xc78, 0x3800001e,
+ 0xc78, 0x3801001e,
+ 0xc78, 0x3802001e,
+ 0xc78, 0x3803001e,
+ 0xc78, 0x3804001e,
+ 0xc78, 0x3805001e,
+ 0xc78, 0x3806001e,
+ 0xc78, 0x3807001e,
+ 0xc78, 0x3808001e,
+ 0xc78, 0x3c09001e,
+ 0xc78, 0x3e0a001e,
+ 0xc78, 0x400b001e,
+ 0xc78, 0x440c001e,
+ 0xc78, 0x480d001e,
+ 0xc78, 0x4c0e001e,
+ 0xc78, 0x500f001e,
+ 0xc78, 0x5210001e,
+ 0xc78, 0x5611001e,
+ 0xc78, 0x5a12001e,
+ 0xc78, 0x5e13001e,
+ 0xc78, 0x6014001e,
+ 0xc78, 0x6015001e,
+ 0xc78, 0x6016001e,
+ 0xc78, 0x6217001e,
+ 0xc78, 0x6218001e,
+ 0xc78, 0x6219001e,
+ 0xc78, 0x621a001e,
+ 0xc78, 0x621b001e,
+ 0xc78, 0x621c001e,
+ 0xc78, 0x621d001e,
+ 0xc78, 0x621e001e,
+ 0xc78, 0x621f001e,
+};
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.h b/drivers/net/wireless/rtlwifi/rtl8192cu/table.h
new file mode 100644
index 000000000000..c3d5cd826cfa
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/table.h
@@ -0,0 +1,71 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_TABLE__H_
+#define __RTL92CU_TABLE__H_
+
+#include <linux/types.h>
+
+#define RTL8192CUPHY_REG_2TARRAY_LENGTH 374
+extern u32 RTL8192CUPHY_REG_2TARRAY[RTL8192CUPHY_REG_2TARRAY_LENGTH];
+#define RTL8192CUPHY_REG_1TARRAY_LENGTH 374
+extern u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH];
+
+#define RTL8192CUPHY_REG_ARRAY_PGLENGTH 336
+extern u32 RTL8192CUPHY_REG_ARRAY_PG[RTL8192CUPHY_REG_ARRAY_PGLENGTH];
+
+#define RTL8192CURADIOA_2TARRAYLENGTH 282
+extern u32 RTL8192CURADIOA_2TARRAY[RTL8192CURADIOA_2TARRAYLENGTH];
+#define RTL8192CURADIOB_2TARRAYLENGTH 78
+extern u32 RTL8192CU_RADIOB_2TARRAY[RTL8192CURADIOB_2TARRAYLENGTH];
+#define RTL8192CURADIOA_1TARRAYLENGTH 282
+extern u32 RTL8192CU_RADIOA_1TARRAY[RTL8192CURADIOA_1TARRAYLENGTH];
+#define RTL8192CURADIOB_1TARRAYLENGTH 1
+extern u32 RTL8192CU_RADIOB_1TARRAY[RTL8192CURADIOB_1TARRAYLENGTH];
+
+#define RTL8192CUMAC_2T_ARRAYLENGTH 172
+extern u32 RTL8192CUMAC_2T_ARRAY[RTL8192CUMAC_2T_ARRAYLENGTH];
+
+#define RTL8192CUAGCTAB_2TARRAYLENGTH 320
+extern u32 RTL8192CUAGCTAB_2TARRAY[RTL8192CUAGCTAB_2TARRAYLENGTH];
+#define RTL8192CUAGCTAB_1TARRAYLENGTH 320
+extern u32 RTL8192CUAGCTAB_1TARRAY[RTL8192CUAGCTAB_1TARRAYLENGTH];
+
+#define RTL8192CUPHY_REG_1T_HPArrayLength 378
+extern u32 RTL8192CUPHY_REG_1T_HPArray[RTL8192CUPHY_REG_1T_HPArrayLength];
+
+#define RTL8192CUPHY_REG_Array_PG_HPLength 336
+extern u32 RTL8192CUPHY_REG_Array_PG_HP[RTL8192CUPHY_REG_Array_PG_HPLength];
+
+#define RTL8192CURadioA_1T_HPArrayLength 282
+extern u32 RTL8192CURadioA_1T_HPArray[RTL8192CURadioA_1T_HPArrayLength];
+#define RTL8192CUAGCTAB_1T_HPArrayLength 320
+extern u32 Rtl8192CUAGCTAB_1T_HPArray[RTL8192CUAGCTAB_1T_HPArrayLength];
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
new file mode 100644
index 000000000000..d0b0d43b9a6d
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -0,0 +1,687 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../usb.h"
+#include "../ps.h"
+#include "../base.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+#include "mac.h"
+#include "trx.h"
+
+static int _ConfigVerTOutEP(struct ieee80211_hw *hw)
+{
+ u8 ep_cfg, txqsele;
+ u8 ep_nums = 0;
+
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+
+ rtlusb->out_queue_sel = 0;
+ ep_cfg = rtl_read_byte(rtlpriv, REG_TEST_SIE_OPTIONAL);
+ ep_cfg = (ep_cfg & USB_TEST_EP_MASK) >> USB_TEST_EP_SHIFT;
+ switch (ep_cfg) {
+ case 0: /* 2 bulk OUT, 1 bulk IN */
+ case 3:
+ rtlusb->out_queue_sel = TX_SELE_HQ | TX_SELE_LQ;
+ ep_nums = 2;
+ break;
+ case 1: /* 1 bulk IN/OUT => map all endpoint to Low queue */
+ case 2: /* 1 bulk IN, 1 bulk OUT => map all endpoint to High queue */
+ txqsele = rtl_read_byte(rtlpriv, REG_TEST_USB_TXQS);
+ if (txqsele & 0x0F) /* /map all endpoint to High queue */
+ rtlusb->out_queue_sel = TX_SELE_HQ;
+ else if (txqsele&0xF0) /* map all endpoint to Low queue */
+ rtlusb->out_queue_sel = TX_SELE_LQ;
+ ep_nums = 1;
+ break;
+ default:
+ break;
+ }
+ return (rtlusb->out_ep_nums == ep_nums) ? 0 : -EINVAL;
+}
+
+static int _ConfigVerNOutEP(struct ieee80211_hw *hw)
+{
+ u8 ep_cfg;
+ u8 ep_nums = 0;
+
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+
+ rtlusb->out_queue_sel = 0;
+ /* Normal and High queue */
+ ep_cfg = rtl_read_byte(rtlpriv, (REG_NORMAL_SIE_EP + 1));
+ if (ep_cfg & USB_NORMAL_SIE_EP_MASK) {
+ rtlusb->out_queue_sel |= TX_SELE_HQ;
+ ep_nums++;
+ }
+ if ((ep_cfg >> USB_NORMAL_SIE_EP_SHIFT) & USB_NORMAL_SIE_EP_MASK) {
+ rtlusb->out_queue_sel |= TX_SELE_NQ;
+ ep_nums++;
+ }
+ /* Low queue */
+ ep_cfg = rtl_read_byte(rtlpriv, (REG_NORMAL_SIE_EP + 2));
+ if (ep_cfg & USB_NORMAL_SIE_EP_MASK) {
+ rtlusb->out_queue_sel |= TX_SELE_LQ;
+ ep_nums++;
+ }
+ return (rtlusb->out_ep_nums == ep_nums) ? 0 : -EINVAL;
+}
+
+static void _TwoOutEpMapping(struct ieee80211_hw *hw, bool bIsChipB,
+ bool bwificfg, struct rtl_ep_map *ep_map)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (bwificfg) { /* for WMM */
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("USB Chip-B & WMM Setting.....\n"));
+ ep_map->ep_mapping[RTL_TXQ_BE] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BK] = 3;
+ ep_map->ep_mapping[RTL_TXQ_VI] = 3;
+ ep_map->ep_mapping[RTL_TXQ_VO] = 2;
+ ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
+ ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ } else { /* typical setting */
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("USB typical Setting.....\n"));
+ ep_map->ep_mapping[RTL_TXQ_BE] = 3;
+ ep_map->ep_mapping[RTL_TXQ_BK] = 3;
+ ep_map->ep_mapping[RTL_TXQ_VI] = 2;
+ ep_map->ep_mapping[RTL_TXQ_VO] = 2;
+ ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
+ ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ }
+}
+
+static void _ThreeOutEpMapping(struct ieee80211_hw *hw, bool bwificfg,
+ struct rtl_ep_map *ep_map)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ if (bwificfg) { /* for WMM */
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("USB 3EP Setting for WMM.....\n"));
+ ep_map->ep_mapping[RTL_TXQ_BE] = 5;
+ ep_map->ep_mapping[RTL_TXQ_BK] = 3;
+ ep_map->ep_mapping[RTL_TXQ_VI] = 3;
+ ep_map->ep_mapping[RTL_TXQ_VO] = 2;
+ ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
+ ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ } else { /* typical setting */
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("USB 3EP Setting for typical.....\n"));
+ ep_map->ep_mapping[RTL_TXQ_BE] = 5;
+ ep_map->ep_mapping[RTL_TXQ_BK] = 5;
+ ep_map->ep_mapping[RTL_TXQ_VI] = 3;
+ ep_map->ep_mapping[RTL_TXQ_VO] = 2;
+ ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
+ ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ }
+}
+
+static void _OneOutEpMapping(struct ieee80211_hw *hw, struct rtl_ep_map *ep_map)
+{
+ ep_map->ep_mapping[RTL_TXQ_BE] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BK] = 2;
+ ep_map->ep_mapping[RTL_TXQ_VI] = 2;
+ ep_map->ep_mapping[RTL_TXQ_VO] = 2;
+ ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
+ ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+}
+static int _out_ep_mapping(struct ieee80211_hw *hw)
+{
+ int err = 0;
+ bool bIsChipN, bwificfg = false;
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+ struct rtl_ep_map *ep_map = &(rtlusb->ep_map);
+
+ bIsChipN = IS_NORMAL_CHIP(rtlhal->version);
+ switch (rtlusb->out_ep_nums) {
+ case 2:
+ _TwoOutEpMapping(hw, bIsChipN, bwificfg, ep_map);
+ break;
+ case 3:
+ /* Test chip doesn't support three out EPs. */
+ if (!bIsChipN) {
+ err = -EINVAL;
+ goto err_out;
+ }
+ _ThreeOutEpMapping(hw, bIsChipN, ep_map);
+ break;
+ case 1:
+ _OneOutEpMapping(hw, ep_map);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+err_out:
+ return err;
+
+}
+/* endpoint mapping */
+int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ int error = 0;
+ if (likely(IS_NORMAL_CHIP(rtlhal->version)))
+ error = _ConfigVerNOutEP(hw);
+ else
+ error = _ConfigVerTOutEP(hw);
+ if (error)
+ goto err_out;
+ error = _out_ep_mapping(hw);
+ if (error)
+ goto err_out;
+err_out:
+ return error;
+}
+
+u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index)
+{
+ u16 hw_queue_index;
+
+ if (unlikely(ieee80211_is_beacon(fc))) {
+ hw_queue_index = RTL_TXQ_BCN;
+ goto out;
+ }
+ if (ieee80211_is_mgmt(fc)) {
+ hw_queue_index = RTL_TXQ_MGT;
+ goto out;
+ }
+ switch (mac80211_queue_index) {
+ case 0:
+ hw_queue_index = RTL_TXQ_VO;
+ break;
+ case 1:
+ hw_queue_index = RTL_TXQ_VI;
+ break;
+ case 2:
+ hw_queue_index = RTL_TXQ_BE;
+ break;
+ case 3:
+ hw_queue_index = RTL_TXQ_BK;
+ break;
+ default:
+ hw_queue_index = RTL_TXQ_BE;
+ RT_ASSERT(false, ("QSLT_BE queue, skb_queue:%d\n",
+ mac80211_queue_index));
+ break;
+ }
+out:
+ return hw_queue_index;
+}
+
+static enum rtl_desc_qsel _rtl8192cu_mq_to_descq(struct ieee80211_hw *hw,
+ __le16 fc, u16 mac80211_queue_index)
+{
+ enum rtl_desc_qsel qsel;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (unlikely(ieee80211_is_beacon(fc))) {
+ qsel = QSLT_BEACON;
+ goto out;
+ }
+ if (ieee80211_is_mgmt(fc)) {
+ qsel = QSLT_MGNT;
+ goto out;
+ }
+ switch (mac80211_queue_index) {
+ case 0: /* VO */
+ qsel = QSLT_VO;
+ RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
+ ("VO queue, set qsel = 0x%x\n", QSLT_VO));
+ break;
+ case 1: /* VI */
+ qsel = QSLT_VI;
+ RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
+ ("VI queue, set qsel = 0x%x\n", QSLT_VI));
+ break;
+ case 3: /* BK */
+ qsel = QSLT_BK;
+ RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
+ ("BK queue, set qsel = 0x%x\n", QSLT_BK));
+ break;
+ case 2: /* BE */
+ default:
+ qsel = QSLT_BE;
+ RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
+ ("BE queue, set qsel = 0x%x\n", QSLT_BE));
+ break;
+ }
+out:
+ return qsel;
+}
+
+/* =============================================================== */
+
+/*----------------------------------------------------------------------
+ *
+ * Rx handler
+ *
+ *---------------------------------------------------------------------- */
+bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
+ struct rtl_stats *stats,
+ struct ieee80211_rx_status *rx_status,
+ u8 *p_desc, struct sk_buff *skb)
+{
+ struct rx_fwinfo_92c *p_drvinfo;
+ struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
+ u32 phystatus = GET_RX_DESC_PHY_STATUS(pdesc);
+
+ stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
+ stats->rx_drvinfo_size = (u8)GET_RX_DESC_DRVINFO_SIZE(pdesc) *
+ RX_DRV_INFO_SIZE_UNIT;
+ stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
+ stats->icv = (u16) GET_RX_DESC_ICV(pdesc);
+ stats->crc = (u16) GET_RX_DESC_CRC32(pdesc);
+ stats->hwerror = (stats->crc | stats->icv);
+ stats->decrypted = !GET_RX_DESC_SWDEC(pdesc);
+ stats->rate = (u8) GET_RX_DESC_RX_MCS(pdesc);
+ stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
+ stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
+ stats->isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
+ && (GET_RX_DESC_FAGGR(pdesc) == 1));
+ stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
+ stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+ rx_status->freq = hw->conf.channel->center_freq;
+ rx_status->band = hw->conf.channel->band;
+ if (GET_RX_DESC_CRC32(pdesc))
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (!GET_RX_DESC_SWDEC(pdesc))
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ if (GET_RX_DESC_BW(pdesc))
+ rx_status->flag |= RX_FLAG_40MHZ;
+ if (GET_RX_DESC_RX_HT(pdesc))
+ rx_status->flag |= RX_FLAG_HT;
+ rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+ if (stats->decrypted)
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ rx_status->rate_idx = _rtl92c_rate_mapping(hw,
+ (bool)GET_RX_DESC_RX_HT(pdesc),
+ (u8)GET_RX_DESC_RX_MCS(pdesc),
+ (bool)GET_RX_DESC_PAGGR(pdesc));
+ rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
+ if (phystatus == true) {
+ p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE);
+ rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
+ p_drvinfo);
+ }
+ /*rx_status->qual = stats->signal; */
+ rx_status->signal = stats->rssi + 10;
+ /*rx_status->noise = -stats->noise; */
+ return true;
+}
+
+#define RTL_RX_DRV_INFO_UNIT 8
+
+static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *rx_status =
+ (struct ieee80211_rx_status *)IEEE80211_SKB_RXCB(skb);
+ u32 skb_len, pkt_len, drvinfo_len;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 *rxdesc;
+ struct rtl_stats stats = {
+ .signal = 0,
+ .noise = -98,
+ .rate = 0,
+ };
+ struct rx_fwinfo_92c *p_drvinfo;
+ bool bv;
+ __le16 fc;
+ struct ieee80211_hdr *hdr;
+
+ memset(rx_status, 0, sizeof(rx_status));
+ rxdesc = skb->data;
+ skb_len = skb->len;
+ drvinfo_len = (GET_RX_DESC_DRVINFO_SIZE(rxdesc) * RTL_RX_DRV_INFO_UNIT);
+ pkt_len = GET_RX_DESC_PKT_LEN(rxdesc);
+ /* TODO: Error recovery. drop this skb or something. */
+ WARN_ON(skb_len < (pkt_len + RTL_RX_DESC_SIZE + drvinfo_len));
+ stats.length = (u16) GET_RX_DESC_PKT_LEN(rxdesc);
+ stats.rx_drvinfo_size = (u8)GET_RX_DESC_DRVINFO_SIZE(rxdesc) *
+ RX_DRV_INFO_SIZE_UNIT;
+ stats.rx_bufshift = (u8) (GET_RX_DESC_SHIFT(rxdesc) & 0x03);
+ stats.icv = (u16) GET_RX_DESC_ICV(rxdesc);
+ stats.crc = (u16) GET_RX_DESC_CRC32(rxdesc);
+ stats.hwerror = (stats.crc | stats.icv);
+ stats.decrypted = !GET_RX_DESC_SWDEC(rxdesc);
+ stats.rate = (u8) GET_RX_DESC_RX_MCS(rxdesc);
+ stats.shortpreamble = (u16) GET_RX_DESC_SPLCP(rxdesc);
+ stats.isampdu = (bool) ((GET_RX_DESC_PAGGR(rxdesc) == 1)
+ && (GET_RX_DESC_FAGGR(rxdesc) == 1));
+ stats.timestamp_low = GET_RX_DESC_TSFL(rxdesc);
+ stats.rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(rxdesc);
+ /* TODO: is center_freq changed when doing scan? */
+ /* TODO: Shall we add protection or just skip those two step? */
+ rx_status->freq = hw->conf.channel->center_freq;
+ rx_status->band = hw->conf.channel->band;
+ if (GET_RX_DESC_CRC32(rxdesc))
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (!GET_RX_DESC_SWDEC(rxdesc))
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ if (GET_RX_DESC_BW(rxdesc))
+ rx_status->flag |= RX_FLAG_40MHZ;
+ if (GET_RX_DESC_RX_HT(rxdesc))
+ rx_status->flag |= RX_FLAG_HT;
+ /* Data rate */
+ rx_status->rate_idx = _rtl92c_rate_mapping(hw,
+ (bool)GET_RX_DESC_RX_HT(rxdesc),
+ (u8)GET_RX_DESC_RX_MCS(rxdesc),
+ (bool)GET_RX_DESC_PAGGR(rxdesc)
+ );
+ /* There is a phy status after this rx descriptor. */
+ if (GET_RX_DESC_PHY_STATUS(rxdesc)) {
+ p_drvinfo = (struct rx_fwinfo_92c *)(rxdesc + RTL_RX_DESC_SIZE);
+ rtl92c_translate_rx_signal_stuff(hw, skb, &stats,
+ (struct rx_desc_92c *)rxdesc, p_drvinfo);
+ }
+ skb_pull(skb, (drvinfo_len + RTL_RX_DESC_SIZE));
+ hdr = (struct ieee80211_hdr *)(skb->data);
+ fc = hdr->frame_control;
+ bv = ieee80211_is_probe_resp(fc);
+ if (bv)
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Got probe response frame.\n"));
+ if (ieee80211_is_beacon(fc))
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Got beacon frame.\n"));
+ if (ieee80211_is_data(fc))
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Got data frame.\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Fram: fc = 0x%X addr1 = 0x%02X:0x%02X:0x%02X:0x%02X:0x%02X:"
+ "0x%02X\n", fc, (u32)hdr->addr1[0], (u32)hdr->addr1[1],
+ (u32)hdr->addr1[2], (u32)hdr->addr1[3], (u32)hdr->addr1[4],
+ (u32)hdr->addr1[5]));
+ memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+ ieee80211_rx_irqsafe(hw, skb);
+}
+
+void rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb)
+{
+ _rtl_rx_process(hw, skb);
+}
+
+void rtl8192c_rx_segregate_hdl(
+ struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct sk_buff_head *skb_list)
+{
+}
+
+/*----------------------------------------------------------------------
+ *
+ * Tx handler
+ *
+ *---------------------------------------------------------------------- */
+void rtl8192c_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+}
+
+int rtl8192c_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb,
+ struct sk_buff *skb)
+{
+ return 0;
+}
+
+struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *hw,
+ struct sk_buff_head *list)
+{
+ return skb_dequeue(list);
+}
+
+/*======================================== trx ===============================*/
+
+static void _rtl_fill_usb_tx_desc(u8 *txdesc)
+{
+ SET_TX_DESC_OWN(txdesc, 1);
+ SET_TX_DESC_LAST_SEG(txdesc, 1);
+ SET_TX_DESC_FIRST_SEG(txdesc, 1);
+}
+/**
+ * For HW recovery information
+ */
+static void _rtl_tx_desc_checksum(u8 *txdesc)
+{
+ u16 *ptr = (u16 *)txdesc;
+ u16 checksum = 0;
+ u32 index;
+
+ /* Clear first */
+ SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0);
+ for (index = 0; index < 16; index++)
+ checksum = checksum ^ (*(ptr + index));
+ SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum);
+}
+
+void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ struct ieee80211_tx_info *info, struct sk_buff *skb,
+ unsigned int queue_index)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool defaultadapter = true;
+ struct ieee80211_sta *sta;
+ struct rtl_tcb_desc tcb_desc;
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ u16 seq_number;
+ __le16 fc = hdr->frame_control;
+ u8 rate_flag = info->control.rates[0].flags;
+ u16 pktlen = skb->len;
+ enum rtl_desc_qsel fw_qsel = _rtl8192cu_mq_to_descq(hw, fc,
+ skb_get_queue_mapping(skb));
+ u8 *txdesc;
+
+ seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+ rtl_get_tcb_desc(hw, info, skb, &tcb_desc);
+ txdesc = (u8 *)skb_push(skb, RTL_TX_HEADER_SIZE);
+ memset(txdesc, 0, RTL_TX_HEADER_SIZE);
+ SET_TX_DESC_PKT_SIZE(txdesc, pktlen);
+ SET_TX_DESC_LINIP(txdesc, 0);
+ SET_TX_DESC_PKT_OFFSET(txdesc, RTL_DUMMY_OFFSET);
+ SET_TX_DESC_OFFSET(txdesc, RTL_TX_HEADER_SIZE);
+ SET_TX_DESC_TX_RATE(txdesc, tcb_desc.hw_rate);
+ if (tcb_desc.use_shortgi || tcb_desc.use_shortpreamble)
+ SET_TX_DESC_DATA_SHORTGI(txdesc, 1);
+ if (mac->tids[tid].agg.agg_state == RTL_AGG_ON &&
+ info->flags & IEEE80211_TX_CTL_AMPDU) {
+ SET_TX_DESC_AGG_ENABLE(txdesc, 1);
+ SET_TX_DESC_MAX_AGG_NUM(txdesc, 0x14);
+ } else {
+ SET_TX_DESC_AGG_BREAK(txdesc, 1);
+ }
+ SET_TX_DESC_SEQ(txdesc, seq_number);
+ SET_TX_DESC_RTS_ENABLE(txdesc, ((tcb_desc.rts_enable &&
+ !tcb_desc.cts_enable) ? 1 : 0));
+ SET_TX_DESC_HW_RTS_ENABLE(txdesc, ((tcb_desc.rts_enable ||
+ tcb_desc.cts_enable) ? 1 : 0));
+ SET_TX_DESC_CTS2SELF(txdesc, ((tcb_desc.cts_enable) ? 1 : 0));
+ SET_TX_DESC_RTS_STBC(txdesc, ((tcb_desc.rts_stbc) ? 1 : 0));
+ SET_TX_DESC_RTS_RATE(txdesc, tcb_desc.rts_rate);
+ SET_TX_DESC_RTS_BW(txdesc, 0);
+ SET_TX_DESC_RTS_SC(txdesc, tcb_desc.rts_sc);
+ SET_TX_DESC_RTS_SHORT(txdesc,
+ ((tcb_desc.rts_rate <= DESC92C_RATE54M) ?
+ (tcb_desc.rts_use_shortpreamble ? 1 : 0)
+ : (tcb_desc.rts_use_shortgi ? 1 : 0)));
+ if (mac->bw_40) {
+ if (tcb_desc.packet_bw) {
+ SET_TX_DESC_DATA_BW(txdesc, 1);
+ SET_TX_DESC_DATA_SC(txdesc, 3);
+ } else {
+ SET_TX_DESC_DATA_BW(txdesc, 0);
+ if (rate_flag & IEEE80211_TX_RC_DUP_DATA)
+ SET_TX_DESC_DATA_SC(txdesc,
+ mac->cur_40_prime_sc);
+ }
+ } else {
+ SET_TX_DESC_DATA_BW(txdesc, 0);
+ SET_TX_DESC_DATA_SC(txdesc, 0);
+ }
+ rcu_read_lock();
+ sta = ieee80211_find_sta(mac->vif, mac->bssid);
+ if (sta) {
+ u8 ampdu_density = sta->ht_cap.ampdu_density;
+ SET_TX_DESC_AMPDU_DENSITY(txdesc, ampdu_density);
+ }
+ rcu_read_unlock();
+ if (info->control.hw_key) {
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ SET_TX_DESC_SEC_TYPE(txdesc, 0x1);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ SET_TX_DESC_SEC_TYPE(txdesc, 0x3);
+ break;
+ default:
+ SET_TX_DESC_SEC_TYPE(txdesc, 0x0);
+ break;
+ }
+ }
+ SET_TX_DESC_PKT_ID(txdesc, 0);
+ SET_TX_DESC_QUEUE_SEL(txdesc, fw_qsel);
+ SET_TX_DESC_DATA_RATE_FB_LIMIT(txdesc, 0x1F);
+ SET_TX_DESC_RTS_RATE_FB_LIMIT(txdesc, 0xF);
+ SET_TX_DESC_DISABLE_FB(txdesc, 0);
+ SET_TX_DESC_USE_RATE(txdesc, tcb_desc.use_driver_rate ? 1 : 0);
+ if (ieee80211_is_data_qos(fc)) {
+ if (mac->rdg_en) {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ ("Enable RDG function.\n"));
+ SET_TX_DESC_RDG_ENABLE(txdesc, 1);
+ SET_TX_DESC_HTC(txdesc, 1);
+ }
+ }
+ if (rtlpriv->dm.useramask) {
+ SET_TX_DESC_RATE_ID(txdesc, tcb_desc.ratr_index);
+ SET_TX_DESC_MACID(txdesc, tcb_desc.mac_id);
+ } else {
+ SET_TX_DESC_RATE_ID(txdesc, 0xC + tcb_desc.ratr_index);
+ SET_TX_DESC_MACID(txdesc, tcb_desc.ratr_index);
+ }
+ if ((!ieee80211_is_data_qos(fc)) && ppsc->leisure_ps &&
+ ppsc->fwctrl_lps) {
+ SET_TX_DESC_HWSEQ_EN(txdesc, 1);
+ SET_TX_DESC_PKT_ID(txdesc, 8);
+ if (!defaultadapter)
+ SET_TX_DESC_QOS(txdesc, 1);
+ }
+ if (ieee80211_has_morefrags(fc))
+ SET_TX_DESC_MORE_FRAG(txdesc, 1);
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
+ is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
+ SET_TX_DESC_BMC(txdesc, 1);
+ _rtl_fill_usb_tx_desc(txdesc);
+ _rtl_tx_desc_checksum(txdesc);
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, (" %s ==>\n", __func__));
+}
+
+void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
+ u32 buffer_len, bool bIsPsPoll)
+{
+ /* Clear all status */
+ memset(pDesc, 0, RTL_TX_HEADER_SIZE);
+ SET_TX_DESC_FIRST_SEG(pDesc, 1); /* bFirstSeg; */
+ SET_TX_DESC_LAST_SEG(pDesc, 1); /* bLastSeg; */
+ SET_TX_DESC_OFFSET(pDesc, RTL_TX_HEADER_SIZE); /* Offset = 32 */
+ SET_TX_DESC_PKT_SIZE(pDesc, buffer_len); /* Buffer size + command hdr */
+ SET_TX_DESC_QUEUE_SEL(pDesc, QSLT_MGNT); /* Fixed queue of Mgnt queue */
+ /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error
+ * vlaue by Hw. */
+ if (bIsPsPoll) {
+ SET_TX_DESC_NAV_USE_HDR(pDesc, 1);
+ } else {
+ SET_TX_DESC_HWSEQ_EN(pDesc, 1); /* Hw set sequence number */
+ SET_TX_DESC_PKT_ID(pDesc, 0x100); /* set bit3 to 1. */
+ }
+ SET_TX_DESC_USE_RATE(pDesc, 1); /* use data rate which is set by Sw */
+ SET_TX_DESC_OWN(pDesc, 1);
+ SET_TX_DESC_TX_RATE(pDesc, DESC92C_RATE1M);
+ _rtl_tx_desc_checksum(pDesc);
+}
+
+void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
+ u8 *pdesc, bool firstseg,
+ bool lastseg, struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 fw_queue = QSLT_BEACON;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
+ __le16 fc = hdr->frame_control;
+
+ memset((void *)pdesc, 0, RTL_TX_HEADER_SIZE);
+ if (firstseg)
+ SET_TX_DESC_OFFSET(pdesc, RTL_TX_HEADER_SIZE);
+ SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
+ SET_TX_DESC_SEQ(pdesc, 0);
+ SET_TX_DESC_LINIP(pdesc, 0);
+ SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
+ SET_TX_DESC_FIRST_SEG(pdesc, 1);
+ SET_TX_DESC_LAST_SEG(pdesc, 1);
+ SET_TX_DESC_RATE_ID(pdesc, 7);
+ SET_TX_DESC_MACID(pdesc, 0);
+ SET_TX_DESC_OWN(pdesc, 1);
+ SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
+ SET_TX_DESC_FIRST_SEG(pdesc, 1);
+ SET_TX_DESC_LAST_SEG(pdesc, 1);
+ SET_TX_DESC_OFFSET(pdesc, 0x20);
+ SET_TX_DESC_USE_RATE(pdesc, 1);
+ if (!ieee80211_is_data_qos(fc)) {
+ SET_TX_DESC_HWSEQ_EN(pdesc, 1);
+ SET_TX_DESC_PKT_ID(pdesc, 8);
+ }
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content\n",
+ pdesc, RTL_TX_DESC_SIZE);
+}
+
+bool rtl92cu_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ return true;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
new file mode 100644
index 000000000000..b396d46edbb7
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
@@ -0,0 +1,430 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CU_TRX_H__
+#define __RTL92CU_TRX_H__
+
+#define RTL92C_USB_BULK_IN_NUM 1
+#define RTL92C_NUM_RX_URBS 8
+#define RTL92C_NUM_TX_URBS 32
+
+#define RTL92C_SIZE_MAX_RX_BUFFER 15360 /* 8192 */
+#define RX_DRV_INFO_SIZE_UNIT 8
+
+enum usb_rx_agg_mode {
+ USB_RX_AGG_DISABLE,
+ USB_RX_AGG_DMA,
+ USB_RX_AGG_USB,
+ USB_RX_AGG_DMA_USB
+};
+
+#define TX_SELE_HQ BIT(0) /* High Queue */
+#define TX_SELE_LQ BIT(1) /* Low Queue */
+#define TX_SELE_NQ BIT(2) /* Normal Queue */
+
+#define RTL_USB_TX_AGG_NUM_DESC 5
+
+#define RTL_USB_RX_AGG_PAGE_NUM 4
+#define RTL_USB_RX_AGG_PAGE_TIMEOUT 3
+
+#define RTL_USB_RX_AGG_BLOCK_NUM 5
+#define RTL_USB_RX_AGG_BLOCK_TIMEOUT 3
+
+/*======================== rx status =========================================*/
+
+struct rx_drv_info_92c {
+ /*
+ * Driver info contain PHY status and other variabel size info
+ * PHY Status content as below
+ */
+
+ /* DWORD 0 */
+ u8 gain_trsw[4];
+
+ /* DWORD 1 */
+ u8 pwdb_all;
+ u8 cfosho[4];
+
+ /* DWORD 2 */
+ u8 cfotail[4];
+
+ /* DWORD 3 */
+ s8 rxevm[2];
+ s8 rxsnr[4];
+
+ /* DWORD 4 */
+ u8 pdsnr[2];
+
+ /* DWORD 5 */
+ u8 csi_current[2];
+ u8 csi_target[2];
+
+ /* DWORD 6 */
+ u8 sigevm;
+ u8 max_ex_pwr;
+ u8 ex_intf_flag:1;
+ u8 sgi_en:1;
+ u8 rxsc:2;
+ u8 reserve:4;
+} __packed;
+
+/* Define a macro that takes a le32 word, converts it to host ordering,
+ * right shifts by a specified count, creates a mask of the specified
+ * bit count, and extracts that number of bits.
+ */
+
+#define SHIFT_AND_MASK_LE(__pdesc, __shift, __bits) \
+ ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
+ BIT_LEN_MASK_32(__bits))
+
+/* Define a macro that clears a bit field in an le32 word and
+ * sets the specified value into that bit field. The resulting
+ * value remains in le32 ordering; however, it is properly converted
+ * to host ordering for the clear and set operations before conversion
+ * back to le32.
+ */
+
+#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \
+ (*(__le32 *)(__pdesc) = \
+ (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \
+ (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \
+ (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
+
+/* macros to read various fields in RX descriptor */
+
+/* DWORD 0 */
+#define GET_RX_DESC_PKT_LEN(__rxdesc) \
+ SHIFT_AND_MASK_LE((__rxdesc), 0, 14)
+#define GET_RX_DESC_CRC32(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 14, 1)
+#define GET_RX_DESC_ICV(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 15, 1)
+#define GET_RX_DESC_DRVINFO_SIZE(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 16, 4)
+#define GET_RX_DESC_SECURITY(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 20, 3)
+#define GET_RX_DESC_QOS(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 23, 1)
+#define GET_RX_DESC_SHIFT(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 24, 2)
+#define GET_RX_DESC_PHY_STATUS(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 26, 1)
+#define GET_RX_DESC_SWDEC(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 27, 1)
+#define GET_RX_DESC_LAST_SEG(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 28, 1)
+#define GET_RX_DESC_FIRST_SEG(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 29, 1)
+#define GET_RX_DESC_EOR(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 30, 1)
+#define GET_RX_DESC_OWN(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc, 31, 1)
+
+/* DWORD 1 */
+#define GET_RX_DESC_MACID(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 0, 5)
+#define GET_RX_DESC_TID(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 5, 4)
+#define GET_RX_DESC_PAGGR(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 14, 1)
+#define GET_RX_DESC_FAGGR(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 15, 1)
+#define GET_RX_DESC_A1_FIT(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 16, 4)
+#define GET_RX_DESC_A2_FIT(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 20, 4)
+#define GET_RX_DESC_PAM(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 24, 1)
+#define GET_RX_DESC_PWR(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 25, 1)
+#define GET_RX_DESC_MORE_DATA(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 26, 1)
+#define GET_RX_DESC_MORE_FRAG(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 27, 1)
+#define GET_RX_DESC_TYPE(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 28, 2)
+#define GET_RX_DESC_MC(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 30, 1)
+#define GET_RX_DESC_BC(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+4, 31, 1)
+
+/* DWORD 2 */
+#define GET_RX_DESC_SEQ(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+8, 0, 12)
+#define GET_RX_DESC_FRAG(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+8, 12, 4)
+#define GET_RX_DESC_USB_AGG_PKTNUM(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+8, 16, 8)
+#define GET_RX_DESC_NEXT_IND(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+8, 30, 1)
+
+/* DWORD 3 */
+#define GET_RX_DESC_RX_MCS(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 0, 6)
+#define GET_RX_DESC_RX_HT(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 6, 1)
+#define GET_RX_DESC_AMSDU(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 7, 1)
+#define GET_RX_DESC_SPLCP(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 8, 1)
+#define GET_RX_DESC_BW(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 9, 1)
+#define GET_RX_DESC_HTC(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 10, 1)
+#define GET_RX_DESC_TCP_CHK_RPT(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 11, 1)
+#define GET_RX_DESC_IP_CHK_RPT(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 12, 1)
+#define GET_RX_DESC_TCP_CHK_VALID(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 13, 1)
+#define GET_RX_DESC_HWPC_ERR(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 14, 1)
+#define GET_RX_DESC_HWPC_IND(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 15, 1)
+#define GET_RX_DESC_IV0(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+12, 16, 16)
+
+/* DWORD 4 */
+#define GET_RX_DESC_IV1(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+16, 0, 32)
+
+/* DWORD 5 */
+#define GET_RX_DESC_TSFL(__rxdesc) \
+ SHIFT_AND_MASK_LE(__rxdesc+20, 0, 32)
+
+/*======================= tx desc ============================================*/
+
+/* macros to set various fields in TX descriptor */
+
+/* Dword 0 */
+#define SET_TX_DESC_PKT_SIZE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc, 0, 16, __value)
+#define SET_TX_DESC_OFFSET(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc, 16, 8, __value)
+#define SET_TX_DESC_BMC(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc, 24, 1, __value)
+#define SET_TX_DESC_HTC(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc, 25, 1, __value)
+#define SET_TX_DESC_LAST_SEG(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc, 26, 1, __value)
+#define SET_TX_DESC_FIRST_SEG(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc, 27, 1, __value)
+#define SET_TX_DESC_LINIP(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc, 28, 1, __value)
+#define SET_TX_DESC_NO_ACM(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc, 29, 1, __value)
+#define SET_TX_DESC_GF(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc, 30, 1, __value)
+#define SET_TX_DESC_OWN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc, 31, 1, __value)
+
+
+/* Dword 1 */
+#define SET_TX_DESC_MACID(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 0, 5, __value)
+#define SET_TX_DESC_AGG_ENABLE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 5, 1, __value)
+#define SET_TX_DESC_AGG_BREAK(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 6, 1, __value)
+#define SET_TX_DESC_RDG_ENABLE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 7, 1, __value)
+#define SET_TX_DESC_QUEUE_SEL(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 8, 5, __value)
+#define SET_TX_DESC_RDG_NAV_EXT(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 13, 1, __value)
+#define SET_TX_DESC_LSIG_TXOP_EN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 14, 1, __value)
+#define SET_TX_DESC_PIFS(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 15, 1, __value)
+#define SET_TX_DESC_RATE_ID(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 16, 4, __value)
+#define SET_TX_DESC_RA_BRSR_ID(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 16, 4, __value)
+#define SET_TX_DESC_NAV_USE_HDR(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 20, 1, __value)
+#define SET_TX_DESC_EN_DESC_ID(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 21, 1, __value)
+#define SET_TX_DESC_SEC_TYPE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 22, 2, __value)
+#define SET_TX_DESC_PKT_OFFSET(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+4, 26, 5, __value)
+
+/* Dword 2 */
+#define SET_TX_DESC_RTS_RC(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 0, 6, __value)
+#define SET_TX_DESC_DATA_RC(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 6, 6, __value)
+#define SET_TX_DESC_BAR_RTY_TH(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 14, 2, __value)
+#define SET_TX_DESC_MORE_FRAG(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 17, 1, __value)
+#define SET_TX_DESC_RAW(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 18, 1, __value)
+#define SET_TX_DESC_CCX(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 19, 1, __value)
+#define SET_TX_DESC_AMPDU_DENSITY(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 20, 3, __value)
+#define SET_TX_DESC_ANTSEL_A(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 24, 1, __value)
+#define SET_TX_DESC_ANTSEL_B(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 25, 1, __value)
+#define SET_TX_DESC_TX_ANT_CCK(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 26, 2, __value)
+#define SET_TX_DESC_TX_ANTL(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 28, 2, __value)
+#define SET_TX_DESC_TX_ANT_HT(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+8, 30, 2, __value)
+
+/* Dword 3 */
+#define SET_TX_DESC_NEXT_HEAP_PAGE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+12, 0, 8, __value)
+#define SET_TX_DESC_TAIL_PAGE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+12, 8, 8, __value)
+#define SET_TX_DESC_SEQ(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+12, 16, 12, __value)
+#define SET_TX_DESC_PKT_ID(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+12, 28, 4, __value)
+
+/* Dword 4 */
+#define SET_TX_DESC_RTS_RATE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 0, 5, __value)
+#define SET_TX_DESC_AP_DCFE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 5, 1, __value)
+#define SET_TX_DESC_QOS(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 6, 1, __value)
+#define SET_TX_DESC_HWSEQ_EN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 7, 1, __value)
+#define SET_TX_DESC_USE_RATE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 8, 1, __value)
+#define SET_TX_DESC_DISABLE_RTS_FB(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 9, 1, __value)
+#define SET_TX_DESC_DISABLE_FB(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 10, 1, __value)
+#define SET_TX_DESC_CTS2SELF(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 11, 1, __value)
+#define SET_TX_DESC_RTS_ENABLE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 12, 1, __value)
+#define SET_TX_DESC_HW_RTS_ENABLE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 13, 1, __value)
+#define SET_TX_DESC_WAIT_DCTS(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 18, 1, __value)
+#define SET_TX_DESC_CTS2AP_EN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 19, 1, __value)
+#define SET_TX_DESC_DATA_SC(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 20, 2, __value)
+#define SET_TX_DESC_DATA_STBC(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 22, 2, __value)
+#define SET_TX_DESC_DATA_SHORT(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 24, 1, __value)
+#define SET_TX_DESC_DATA_BW(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 25, 1, __value)
+#define SET_TX_DESC_RTS_SHORT(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 26, 1, __value)
+#define SET_TX_DESC_RTS_BW(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 27, 1, __value)
+#define SET_TX_DESC_RTS_SC(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 28, 2, __value)
+#define SET_TX_DESC_RTS_STBC(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+16, 30, 2, __value)
+
+/* Dword 5 */
+#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val)
+#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val)
+#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val)
+#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+20, 8, 5, __value)
+#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+20, 13, 4, __value)
+#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+20, 17, 1, __value)
+#define SET_TX_DESC_DATA_RETRY_LIMIT(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+20, 18, 6, __value)
+#define SET_TX_DESC_USB_TXAGG_NUM(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+20, 24, 8, __value)
+
+/* Dword 6 */
+#define SET_TX_DESC_TXAGC_A(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+24, 0, 5, __value)
+#define SET_TX_DESC_TXAGC_B(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+24, 5, 5, __value)
+#define SET_TX_DESC_USB_MAX_LEN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+24, 10, 1, __value)
+#define SET_TX_DESC_MAX_AGG_NUM(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+24, 11, 5, __value)
+#define SET_TX_DESC_MCSG1_MAX_LEN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+24, 16, 4, __value)
+#define SET_TX_DESC_MCSG2_MAX_LEN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+24, 20, 4, __value)
+#define SET_TX_DESC_MCSG3_MAX_LEN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+24, 24, 4, __value)
+#define SET_TX_DESC_MCSG7_MAX_LEN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+24, 28, 4, __value)
+
+/* Dword 7 */
+#define SET_TX_DESC_TX_DESC_CHECKSUM(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+28, 0, 16, __value)
+#define SET_TX_DESC_MCSG4_MAX_LEN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+28, 16, 4, __value)
+#define SET_TX_DESC_MCSG5_MAX_LEN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+28, 20, 4, __value)
+#define SET_TX_DESC_MCSG6_MAX_LEN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+28, 24, 4, __value)
+#define SET_TX_DESC_MCSG15_MAX_LEN(__txdesc, __value) \
+ SET_BITS_OFFSET_LE(__txdesc+28, 28, 4, __value)
+
+
+int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw);
+u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index);
+bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
+ struct rtl_stats *stats,
+ struct ieee80211_rx_status *rx_status,
+ u8 *p_desc, struct sk_buff *skb);
+void rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb);
+void rtl8192c_rx_segregate_hdl(struct ieee80211_hw *, struct sk_buff *,
+ struct sk_buff_head *);
+void rtl8192c_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff *skb);
+int rtl8192c_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb,
+ struct sk_buff *skb);
+struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *,
+ struct sk_buff_head *);
+void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ struct ieee80211_tx_info *info, struct sk_buff *skb,
+ unsigned int queue_index);
+void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
+ u32 buffer_len, bool bIsPsPoll);
+void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
+ u8 *pdesc, bool b_firstseg,
+ bool b_lastseg, struct sk_buff *skb);
+bool rtl92cu_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
new file mode 100644
index 000000000000..a4b2613d6a8c
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -0,0 +1,1035 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ *****************************************************************************/
+#include <linux/usb.h>
+#include "core.h"
+#include "wifi.h"
+#include "usb.h"
+#include "base.h"
+#include "ps.h"
+
+#define REALTEK_USB_VENQT_READ 0xC0
+#define REALTEK_USB_VENQT_WRITE 0x40
+#define REALTEK_USB_VENQT_CMD_REQ 0x05
+#define REALTEK_USB_VENQT_CMD_IDX 0x00
+
+#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254
+
+static void usbctrl_async_callback(struct urb *urb)
+{
+ if (urb)
+ kfree(urb->context);
+}
+
+static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
+ u16 value, u16 index, void *pdata,
+ u16 len)
+{
+ int rc;
+ unsigned int pipe;
+ u8 reqtype;
+ struct usb_ctrlrequest *dr;
+ struct urb *urb;
+ struct rtl819x_async_write_data {
+ u8 data[REALTEK_USB_VENQT_MAX_BUF_SIZE];
+ struct usb_ctrlrequest dr;
+ } *buf;
+
+ pipe = usb_sndctrlpipe(udev, 0); /* write_out */
+ reqtype = REALTEK_USB_VENQT_WRITE;
+
+ buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ dr = &buf->dr;
+
+ dr->bRequestType = reqtype;
+ dr->bRequest = request;
+ dr->wValue = cpu_to_le16(value);
+ dr->wIndex = cpu_to_le16(index);
+ dr->wLength = cpu_to_le16(len);
+ memcpy(buf, pdata, len);
+ usb_fill_control_urb(urb, udev, pipe,
+ (unsigned char *)dr, buf, len,
+ usbctrl_async_callback, buf);
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+ if (rc < 0)
+ kfree(buf);
+ usb_free_urb(urb);
+ return rc;
+}
+
+static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
+ u16 value, u16 index, void *pdata,
+ u16 len)
+{
+ unsigned int pipe;
+ int status;
+ u8 reqtype;
+
+ pipe = usb_rcvctrlpipe(udev, 0); /* read_in */
+ reqtype = REALTEK_USB_VENQT_READ;
+
+ status = usb_control_msg(udev, pipe, request, reqtype, value, index,
+ pdata, len, 0); /* max. timeout */
+
+ if (status < 0)
+ printk(KERN_ERR "reg 0x%x, usbctrl_vendorreq TimeOut! "
+ "status:0x%x value=0x%x\n", value, status,
+ *(u32 *)pdata);
+ return status;
+}
+
+static u32 _usb_read_sync(struct usb_device *udev, u32 addr, u16 len)
+{
+ u8 request;
+ u16 wvalue;
+ u16 index;
+ u32 *data;
+ u32 ret;
+
+ data = kmalloc(sizeof(u32), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ request = REALTEK_USB_VENQT_CMD_REQ;
+ index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
+
+ wvalue = (u16)addr;
+ _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
+ ret = *data;
+ kfree(data);
+ return ret;
+}
+
+static u8 _usb_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+ struct device *dev = rtlpriv->io.dev;
+
+ return (u8)_usb_read_sync(to_usb_device(dev), addr, 1);
+}
+
+static u16 _usb_read16_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+ struct device *dev = rtlpriv->io.dev;
+
+ return (u16)_usb_read_sync(to_usb_device(dev), addr, 2);
+}
+
+static u32 _usb_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
+{
+ struct device *dev = rtlpriv->io.dev;
+
+ return _usb_read_sync(to_usb_device(dev), addr, 4);
+}
+
+static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val,
+ u16 len)
+{
+ u8 request;
+ u16 wvalue;
+ u16 index;
+ u32 data;
+
+ request = REALTEK_USB_VENQT_CMD_REQ;
+ index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
+ wvalue = (u16)(addr&0x0000ffff);
+ data = val;
+ _usbctrl_vendorreq_async_write(udev, request, wvalue, index, &data,
+ len);
+}
+
+static void _usb_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val)
+{
+ struct device *dev = rtlpriv->io.dev;
+
+ _usb_write_async(to_usb_device(dev), addr, val, 1);
+}
+
+static void _usb_write16_async(struct rtl_priv *rtlpriv, u32 addr, u16 val)
+{
+ struct device *dev = rtlpriv->io.dev;
+
+ _usb_write_async(to_usb_device(dev), addr, val, 2);
+}
+
+static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val)
+{
+ struct device *dev = rtlpriv->io.dev;
+
+ _usb_write_async(to_usb_device(dev), addr, val, 4);
+}
+
+static int _usb_nbytes_read_write(struct usb_device *udev, bool read, u32 addr,
+ u16 len, u8 *pdata)
+{
+ int status;
+ u8 request;
+ u16 wvalue;
+ u16 index;
+
+ request = REALTEK_USB_VENQT_CMD_REQ;
+ index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
+ wvalue = (u16)addr;
+ if (read)
+ status = _usbctrl_vendorreq_sync_read(udev, request, wvalue,
+ index, pdata, len);
+ else
+ status = _usbctrl_vendorreq_async_write(udev, request, wvalue,
+ index, pdata, len);
+ return status;
+}
+
+static int _usb_readN_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len,
+ u8 *pdata)
+{
+ struct device *dev = rtlpriv->io.dev;
+
+ return _usb_nbytes_read_write(to_usb_device(dev), true, addr, len,
+ pdata);
+}
+
+static int _usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, u16 len,
+ u8 *pdata)
+{
+ struct device *dev = rtlpriv->io.dev;
+
+ return _usb_nbytes_read_write(to_usb_device(dev), false, addr, len,
+ pdata);
+}
+
+static void _rtl_usb_io_handler_init(struct device *dev,
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->io.dev = dev;
+ mutex_init(&rtlpriv->io.bb_mutex);
+ rtlpriv->io.write8_async = _usb_write8_async;
+ rtlpriv->io.write16_async = _usb_write16_async;
+ rtlpriv->io.write32_async = _usb_write32_async;
+ rtlpriv->io.writeN_async = _usb_writeN_async;
+ rtlpriv->io.read8_sync = _usb_read8_sync;
+ rtlpriv->io.read16_sync = _usb_read16_sync;
+ rtlpriv->io.read32_sync = _usb_read32_sync;
+ rtlpriv->io.readN_sync = _usb_readN_sync;
+}
+
+static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ mutex_destroy(&rtlpriv->io.bb_mutex);
+}
+
+/**
+ *
+ * Default aggregation handler. Do nothing and just return the oldest skb.
+ */
+static struct sk_buff *_none_usb_tx_aggregate_hdl(struct ieee80211_hw *hw,
+ struct sk_buff_head *list)
+{
+ return skb_dequeue(list);
+}
+
+#define IS_HIGH_SPEED_USB(udev) \
+ ((USB_SPEED_HIGH == (udev)->speed) ? true : false)
+
+static int _rtl_usb_init_tx(struct ieee80211_hw *hw)
+{
+ u32 i;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ rtlusb->max_bulk_out_size = IS_HIGH_SPEED_USB(rtlusb->udev)
+ ? USB_HIGH_SPEED_BULK_SIZE
+ : USB_FULL_SPEED_BULK_SIZE;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("USB Max Bulk-out Size=%d\n",
+ rtlusb->max_bulk_out_size));
+
+ for (i = 0; i < __RTL_TXQ_NUM; i++) {
+ u32 ep_num = rtlusb->ep_map.ep_mapping[i];
+ if (!ep_num) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Invalid endpoint map setting!\n"));
+ return -EINVAL;
+ }
+ }
+
+ rtlusb->usb_tx_post_hdl =
+ rtlpriv->cfg->usb_interface_cfg->usb_tx_post_hdl;
+ rtlusb->usb_tx_cleanup =
+ rtlpriv->cfg->usb_interface_cfg->usb_tx_cleanup;
+ rtlusb->usb_tx_aggregate_hdl =
+ (rtlpriv->cfg->usb_interface_cfg->usb_tx_aggregate_hdl)
+ ? rtlpriv->cfg->usb_interface_cfg->usb_tx_aggregate_hdl
+ : &_none_usb_tx_aggregate_hdl;
+
+ init_usb_anchor(&rtlusb->tx_submitted);
+ for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) {
+ skb_queue_head_init(&rtlusb->tx_skb_queue[i]);
+ init_usb_anchor(&rtlusb->tx_pending[i]);
+ }
+ return 0;
+}
+
+static int _rtl_usb_init_rx(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+
+ rtlusb->rx_max_size = rtlpriv->cfg->usb_interface_cfg->rx_max_size;
+ rtlusb->rx_urb_num = rtlpriv->cfg->usb_interface_cfg->rx_urb_num;
+ rtlusb->in_ep = rtlpriv->cfg->usb_interface_cfg->in_ep_num;
+ rtlusb->usb_rx_hdl = rtlpriv->cfg->usb_interface_cfg->usb_rx_hdl;
+ rtlusb->usb_rx_segregate_hdl =
+ rtlpriv->cfg->usb_interface_cfg->usb_rx_segregate_hdl;
+
+ printk(KERN_INFO "rtl8192cu: rx_max_size %d, rx_urb_num %d, in_ep %d\n",
+ rtlusb->rx_max_size, rtlusb->rx_urb_num, rtlusb->in_ep);
+ init_usb_anchor(&rtlusb->rx_submitted);
+ return 0;
+}
+
+static int _rtl_usb_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+ int err;
+ u8 epidx;
+ struct usb_interface *usb_intf = rtlusb->intf;
+ u8 epnums = usb_intf->cur_altsetting->desc.bNumEndpoints;
+
+ rtlusb->out_ep_nums = rtlusb->in_ep_nums = 0;
+ for (epidx = 0; epidx < epnums; epidx++) {
+ struct usb_endpoint_descriptor *pep_desc;
+ pep_desc = &usb_intf->cur_altsetting->endpoint[epidx].desc;
+
+ if (usb_endpoint_dir_in(pep_desc))
+ rtlusb->in_ep_nums++;
+ else if (usb_endpoint_dir_out(pep_desc))
+ rtlusb->out_ep_nums++;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("USB EP(0x%02x), MaxPacketSize=%d ,Interval=%d.\n",
+ pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize,
+ pep_desc->bInterval));
+ }
+ if (rtlusb->in_ep_nums < rtlpriv->cfg->usb_interface_cfg->in_ep_num)
+ return -EINVAL ;
+
+ /* usb endpoint mapping */
+ err = rtlpriv->cfg->usb_interface_cfg->usb_endpoint_mapping(hw);
+ rtlusb->usb_mq_to_hwq = rtlpriv->cfg->usb_interface_cfg->usb_mq_to_hwq;
+ _rtl_usb_init_tx(hw);
+ _rtl_usb_init_rx(hw);
+ return err;
+}
+
+static int _rtl_usb_init_sw(struct ieee80211_hw *hw)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ rtlhal->hw = hw;
+ ppsc->inactiveps = false;
+ ppsc->leisure_ps = false;
+ ppsc->fwctrl_lps = false;
+ ppsc->reg_fwctrl_lps = 3;
+ ppsc->reg_max_lps_awakeintvl = 5;
+ ppsc->fwctrl_psmode = FW_PS_DTIM_MODE;
+
+ /* IBSS */
+ mac->beacon_interval = 100;
+
+ /* AMPDU */
+ mac->min_space_cfg = 0;
+ mac->max_mss_density = 0;
+
+ /* set sane AMPDU defaults */
+ mac->current_ampdu_density = 7;
+ mac->current_ampdu_factor = 3;
+
+ /* QOS */
+ rtlusb->acm_method = eAcmWay2_SW;
+
+ /* IRQ */
+ /* HIMR - turn all on */
+ rtlusb->irq_mask[0] = 0xFFFFFFFF;
+ /* HIMR_EX - turn all on */
+ rtlusb->irq_mask[1] = 0xFFFFFFFF;
+ rtlusb->disableHWSM = true;
+ return 0;
+}
+
+#define __RADIO_TAP_SIZE_RSV 32
+
+static void _rtl_rx_completed(struct urb *urb);
+
+static struct sk_buff *_rtl_prep_rx_urb(struct ieee80211_hw *hw,
+ struct rtl_usb *rtlusb,
+ struct urb *urb,
+ gfp_t gfp_mask)
+{
+ struct sk_buff *skb;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ skb = __dev_alloc_skb((rtlusb->rx_max_size + __RADIO_TAP_SIZE_RSV),
+ gfp_mask);
+ if (!skb) {
+ RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+ ("Failed to __dev_alloc_skb!!\n"))
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* reserve some space for mac80211's radiotap */
+ skb_reserve(skb, __RADIO_TAP_SIZE_RSV);
+ usb_fill_bulk_urb(urb, rtlusb->udev,
+ usb_rcvbulkpipe(rtlusb->udev, rtlusb->in_ep),
+ skb->data, min(skb_tailroom(skb),
+ (int)rtlusb->rx_max_size),
+ _rtl_rx_completed, skb);
+
+ _rtl_install_trx_info(rtlusb, skb, rtlusb->in_ep);
+ return skb;
+}
+
+#undef __RADIO_TAP_SIZE_RSV
+
+static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 *rxdesc = skb->data;
+ struct ieee80211_hdr *hdr;
+ bool unicast = false;
+ __le16 fc;
+ struct ieee80211_rx_status rx_status = {0};
+ struct rtl_stats stats = {
+ .signal = 0,
+ .noise = -98,
+ .rate = 0,
+ };
+
+ skb_pull(skb, RTL_RX_DESC_SIZE);
+ rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
+ skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
+ hdr = (struct ieee80211_hdr *)(skb->data);
+ fc = hdr->frame_control;
+ if (!stats.crc) {
+ memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+
+ if (is_broadcast_ether_addr(hdr->addr1)) {
+ /*TODO*/;
+ } else if (is_multicast_ether_addr(hdr->addr1)) {
+ /*TODO*/
+ } else {
+ unicast = true;
+ rtlpriv->stats.rxbytesunicast += skb->len;
+ }
+
+ rtl_is_special_data(hw, skb, false);
+
+ if (ieee80211_is_data(fc)) {
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
+
+ if (unicast)
+ rtlpriv->link_info.num_rx_inperiod++;
+ }
+ }
+}
+
+static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 *rxdesc = skb->data;
+ struct ieee80211_hdr *hdr;
+ bool unicast = false;
+ __le16 fc;
+ struct ieee80211_rx_status rx_status = {0};
+ struct rtl_stats stats = {
+ .signal = 0,
+ .noise = -98,
+ .rate = 0,
+ };
+
+ skb_pull(skb, RTL_RX_DESC_SIZE);
+ rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
+ skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
+ hdr = (struct ieee80211_hdr *)(skb->data);
+ fc = hdr->frame_control;
+ if (!stats.crc) {
+ memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+
+ if (is_broadcast_ether_addr(hdr->addr1)) {
+ /*TODO*/;
+ } else if (is_multicast_ether_addr(hdr->addr1)) {
+ /*TODO*/
+ } else {
+ unicast = true;
+ rtlpriv->stats.rxbytesunicast += skb->len;
+ }
+
+ rtl_is_special_data(hw, skb, false);
+
+ if (ieee80211_is_data(fc)) {
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
+
+ if (unicast)
+ rtlpriv->link_info.num_rx_inperiod++;
+ }
+ if (likely(rtl_action_proc(hw, skb, false))) {
+ struct sk_buff *uskb = NULL;
+ u8 *pdata;
+
+ uskb = dev_alloc_skb(skb->len + 128);
+ memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
+ sizeof(rx_status));
+ pdata = (u8 *)skb_put(uskb, skb->len);
+ memcpy(pdata, skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+ ieee80211_rx_irqsafe(hw, uskb);
+ } else {
+ dev_kfree_skb_any(skb);
+ }
+ }
+}
+
+static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct sk_buff *_skb;
+ struct sk_buff_head rx_queue;
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ skb_queue_head_init(&rx_queue);
+ if (rtlusb->usb_rx_segregate_hdl)
+ rtlusb->usb_rx_segregate_hdl(hw, skb, &rx_queue);
+ WARN_ON(skb_queue_empty(&rx_queue));
+ while (!skb_queue_empty(&rx_queue)) {
+ _skb = skb_dequeue(&rx_queue);
+ _rtl_usb_rx_process_agg(hw, skb);
+ ieee80211_rx_irqsafe(hw, skb);
+ }
+}
+
+static void _rtl_rx_completed(struct urb *_urb)
+{
+ struct sk_buff *skb = (struct sk_buff *)_urb->context;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct rtl_usb *rtlusb = (struct rtl_usb *)info->rate_driver_data[0];
+ struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int err = 0;
+
+ if (unlikely(IS_USB_STOP(rtlusb)))
+ goto free;
+
+ if (likely(0 == _urb->status)) {
+ /* If this code were moved to work queue, would CPU
+ * utilization be improved? NOTE: We shall allocate another skb
+ * and reuse the original one.
+ */
+ skb_put(skb, _urb->actual_length);
+
+ if (likely(!rtlusb->usb_rx_segregate_hdl)) {
+ struct sk_buff *_skb;
+ _rtl_usb_rx_process_noagg(hw, skb);
+ _skb = _rtl_prep_rx_urb(hw, rtlusb, _urb, GFP_ATOMIC);
+ if (IS_ERR(_skb)) {
+ err = PTR_ERR(_skb);
+ RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+ ("Can't allocate skb for bulk IN!\n"));
+ return;
+ }
+ skb = _skb;
+ } else{
+ /* TO DO */
+ _rtl_rx_pre_process(hw, skb);
+ printk(KERN_ERR "rtlwifi: rx agg not supported\n");
+ }
+ goto resubmit;
+ }
+
+ switch (_urb->status) {
+ /* disconnect */
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ goto free;
+ default:
+ break;
+ }
+
+resubmit:
+ skb_reset_tail_pointer(skb);
+ skb_trim(skb, 0);
+
+ usb_anchor_urb(_urb, &rtlusb->rx_submitted);
+ err = usb_submit_urb(_urb, GFP_ATOMIC);
+ if (unlikely(err)) {
+ usb_unanchor_urb(_urb);
+ goto free;
+ }
+ return;
+
+free:
+ dev_kfree_skb_irq(skb);
+}
+
+static int _rtl_usb_receive(struct ieee80211_hw *hw)
+{
+ struct urb *urb;
+ struct sk_buff *skb;
+ int err;
+ int i;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ WARN_ON(0 == rtlusb->rx_urb_num);
+ /* 1600 == 1514 + max WLAN header + rtk info */
+ WARN_ON(rtlusb->rx_max_size < 1600);
+
+ for (i = 0; i < rtlusb->rx_urb_num; i++) {
+ err = -ENOMEM;
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+ ("Failed to alloc URB!!\n"))
+ goto err_out;
+ }
+
+ skb = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL);
+ if (IS_ERR(skb)) {
+ RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+ ("Failed to prep_rx_urb!!\n"))
+ err = PTR_ERR(skb);
+ goto err_out;
+ }
+
+ usb_anchor_urb(urb, &rtlusb->rx_submitted);
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err)
+ goto err_out;
+ usb_free_urb(urb);
+ }
+ return 0;
+
+err_out:
+ usb_kill_anchored_urbs(&rtlusb->rx_submitted);
+ return err;
+}
+
+static int rtl_usb_start(struct ieee80211_hw *hw)
+{
+ int err;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ err = rtlpriv->cfg->ops->hw_init(hw);
+ rtl_init_rx_config(hw);
+
+ /* Enable software */
+ SET_USB_START(rtlusb);
+ /* should after adapter start and interrupt enable. */
+ set_hal_start(rtlhal);
+
+ /* Start bulk IN */
+ _rtl_usb_receive(hw);
+
+ return err;
+}
+/**
+ *
+ *
+ */
+
+/*======================= tx =========================================*/
+static void rtl_usb_cleanup(struct ieee80211_hw *hw)
+{
+ u32 i;
+ struct sk_buff *_skb;
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ struct ieee80211_tx_info *txinfo;
+
+ SET_USB_STOP(rtlusb);
+
+ /* clean up rx stuff. */
+ usb_kill_anchored_urbs(&rtlusb->rx_submitted);
+
+ /* clean up tx stuff */
+ for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) {
+ while ((_skb = skb_dequeue(&rtlusb->tx_skb_queue[i]))) {
+ rtlusb->usb_tx_cleanup(hw, _skb);
+ txinfo = IEEE80211_SKB_CB(_skb);
+ ieee80211_tx_info_clear_status(txinfo);
+ txinfo->flags |= IEEE80211_TX_STAT_ACK;
+ ieee80211_tx_status_irqsafe(hw, _skb);
+ }
+ usb_kill_anchored_urbs(&rtlusb->tx_pending[i]);
+ }
+ usb_kill_anchored_urbs(&rtlusb->tx_submitted);
+}
+
+/**
+ *
+ * We may add some struct into struct rtl_usb later. Do deinit here.
+ *
+ */
+static void rtl_usb_deinit(struct ieee80211_hw *hw)
+{
+ rtl_usb_cleanup(hw);
+}
+
+static void rtl_usb_stop(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ /* should after adapter start and interrupt enable. */
+ set_hal_stop(rtlhal);
+ /* Enable software */
+ SET_USB_STOP(rtlusb);
+ rtl_usb_deinit(hw);
+ rtlpriv->cfg->ops->hw_disable(hw);
+}
+
+static void _rtl_submit_tx_urb(struct ieee80211_hw *hw, struct urb *_urb)
+{
+ int err;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ usb_anchor_urb(_urb, &rtlusb->tx_submitted);
+ err = usb_submit_urb(_urb, GFP_ATOMIC);
+ if (err < 0) {
+ struct sk_buff *skb;
+
+ RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+ ("Failed to submit urb.\n"));
+ usb_unanchor_urb(_urb);
+ skb = (struct sk_buff *)_urb->context;
+ kfree_skb(skb);
+ }
+ usb_free_urb(_urb);
+}
+
+static int _usb_tx_post(struct ieee80211_hw *hw, struct urb *urb,
+ struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ struct ieee80211_tx_info *txinfo;
+
+ rtlusb->usb_tx_post_hdl(hw, urb, skb);
+ skb_pull(skb, RTL_TX_HEADER_SIZE);
+ txinfo = IEEE80211_SKB_CB(skb);
+ ieee80211_tx_info_clear_status(txinfo);
+ txinfo->flags |= IEEE80211_TX_STAT_ACK;
+
+ if (urb->status) {
+ RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+ ("Urb has error status 0x%X\n", urb->status));
+ goto out;
+ }
+ /* TODO: statistics */
+out:
+ ieee80211_tx_status_irqsafe(hw, skb);
+ return urb->status;
+}
+
+static void _rtl_tx_complete(struct urb *urb)
+{
+ struct sk_buff *skb = (struct sk_buff *)urb->context;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct rtl_usb *rtlusb = (struct rtl_usb *)info->rate_driver_data[0];
+ struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
+ int err;
+
+ if (unlikely(IS_USB_STOP(rtlusb)))
+ return;
+ err = _usb_tx_post(hw, urb, skb);
+ if (err) {
+ /* Ignore error and keep issuiing other urbs */
+ return;
+ }
+}
+
+static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw,
+ struct sk_buff *skb, u32 ep_num)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ struct urb *_urb;
+
+ WARN_ON(NULL == skb);
+ _urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!_urb) {
+ RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+ ("Can't allocate URB for bulk out!\n"));
+ kfree_skb(skb);
+ return NULL;
+ }
+ _rtl_install_trx_info(rtlusb, skb, ep_num);
+ usb_fill_bulk_urb(_urb, rtlusb->udev, usb_sndbulkpipe(rtlusb->udev,
+ ep_num), skb->data, skb->len, _rtl_tx_complete, skb);
+ _urb->transfer_flags |= URB_ZERO_PACKET;
+ return _urb;
+}
+
+static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
+ enum rtl_txq qnum)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ u32 ep_num;
+ struct urb *_urb = NULL;
+ struct sk_buff *_skb = NULL;
+ struct sk_buff_head *skb_list;
+ struct usb_anchor *urb_list;
+
+ WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl);
+ if (unlikely(IS_USB_STOP(rtlusb))) {
+ RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
+ ("USB device is stopping...\n"));
+ kfree_skb(skb);
+ return;
+ }
+ ep_num = rtlusb->ep_map.ep_mapping[qnum];
+ skb_list = &rtlusb->tx_skb_queue[ep_num];
+ _skb = skb;
+ _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num);
+ if (unlikely(!_urb)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Can't allocate urb. Drop skb!\n"));
+ return;
+ }
+ urb_list = &rtlusb->tx_pending[ep_num];
+ _rtl_submit_tx_urb(hw, _urb);
+}
+
+static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
+ u16 hw_queue)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct rtl_tx_desc *pdesc = NULL;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
+ __le16 fc = hdr->frame_control;
+ u8 *pda_addr = hdr->addr1;
+ /* ssn */
+ u8 *qc = NULL;
+ u8 tid = 0;
+ u16 seq_number = 0;
+
+ if (ieee80211_is_mgmt(fc))
+ rtl_tx_mgmt_proc(hw, skb);
+ rtl_action_proc(hw, skb, true);
+ if (is_multicast_ether_addr(pda_addr))
+ rtlpriv->stats.txbytesmulticast += skb->len;
+ else if (is_broadcast_ether_addr(pda_addr))
+ rtlpriv->stats.txbytesbroadcast += skb->len;
+ else
+ rtlpriv->stats.txbytesunicast += skb->len;
+ if (ieee80211_is_data_qos(fc)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ seq_number = (le16_to_cpu(hdr->seq_ctrl) &
+ IEEE80211_SCTL_SEQ) >> 4;
+ seq_number += 1;
+ seq_number <<= 4;
+ }
+ rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, skb,
+ hw_queue);
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ if (qc)
+ mac->tids[tid].seq_number = seq_number;
+ }
+ if (ieee80211_is_data(fc))
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
+}
+
+static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
+ __le16 fc = hdr->frame_control;
+ u16 hw_queue;
+
+ if (unlikely(is_hal_stop(rtlhal)))
+ goto err_free;
+ hw_queue = rtlusb->usb_mq_to_hwq(fc, skb_get_queue_mapping(skb));
+ _rtl_usb_tx_preprocess(hw, skb, hw_queue);
+ _rtl_usb_transmit(hw, skb, hw_queue);
+ return NETDEV_TX_OK;
+
+err_free:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ return false;
+}
+
+static struct rtl_intf_ops rtl_usb_ops = {
+ .adapter_start = rtl_usb_start,
+ .adapter_stop = rtl_usb_stop,
+ .adapter_tx = rtl_usb_tx,
+ .waitq_insert = rtl_usb_tx_chk_waitq_insert,
+};
+
+int __devinit rtl_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ int err;
+ struct ieee80211_hw *hw = NULL;
+ struct rtl_priv *rtlpriv = NULL;
+ struct usb_device *udev;
+ struct rtl_usb_priv *usb_priv;
+
+ hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) +
+ sizeof(struct rtl_usb_priv), &rtl_ops);
+ if (!hw) {
+ RT_ASSERT(false, ("%s : ieee80211 alloc failed\n", __func__));
+ return -ENOMEM;
+ }
+ rtlpriv = hw->priv;
+ SET_IEEE80211_DEV(hw, &intf->dev);
+ udev = interface_to_usbdev(intf);
+ usb_get_dev(udev);
+ usb_priv = rtl_usbpriv(hw);
+ memset(usb_priv, 0, sizeof(*usb_priv));
+ usb_priv->dev.intf = intf;
+ usb_priv->dev.udev = udev;
+ usb_set_intfdata(intf, hw);
+ /* init cfg & intf_ops */
+ rtlpriv->rtlhal.interface = INTF_USB;
+ rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_info);
+ rtlpriv->intf_ops = &rtl_usb_ops;
+ rtl_dbgp_flag_init(hw);
+ /* Init IO handler */
+ _rtl_usb_io_handler_init(&udev->dev, hw);
+ rtlpriv->cfg->ops->read_chip_version(hw);
+ /*like read eeprom and so on */
+ rtlpriv->cfg->ops->read_eeprom_info(hw);
+ if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Can't init_sw_vars.\n"));
+ goto error_out;
+ }
+ rtlpriv->cfg->ops->init_sw_leds(hw);
+ err = _rtl_usb_init(hw);
+ err = _rtl_usb_init_sw(hw);
+ /* Init mac80211 sw */
+ err = rtl_init_core(hw);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Can't allocate sw for mac80211.\n"));
+ goto error_out;
+ }
+
+ /*init rfkill */
+ /* rtl_init_rfkill(hw); */
+
+ err = ieee80211_register_hw(hw);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+ ("Can't register mac80211 hw.\n"));
+ goto error_out;
+ } else {
+ rtlpriv->mac80211.mac80211_registered = 1;
+ }
+ set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
+ return 0;
+error_out:
+ rtl_deinit_core(hw);
+ _rtl_usb_io_handler_release(hw);
+ ieee80211_free_hw(hw);
+ usb_put_dev(udev);
+ return -ENODEV;
+}
+EXPORT_SYMBOL(rtl_usb_probe);
+
+void rtl_usb_disconnect(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw = usb_get_intfdata(intf);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ if (unlikely(!rtlpriv))
+ return;
+ /*ieee80211_unregister_hw will call ops_stop */
+ if (rtlmac->mac80211_registered == 1) {
+ ieee80211_unregister_hw(hw);
+ rtlmac->mac80211_registered = 0;
+ } else {
+ rtl_deinit_deferred_work(hw);
+ rtlpriv->intf_ops->adapter_stop(hw);
+ }
+ /*deinit rfkill */
+ /* rtl_deinit_rfkill(hw); */
+ rtl_usb_deinit(hw);
+ rtl_deinit_core(hw);
+ rtlpriv->cfg->ops->deinit_sw_leds(hw);
+ rtlpriv->cfg->ops->deinit_sw_vars(hw);
+ _rtl_usb_io_handler_release(hw);
+ usb_put_dev(rtlusb->udev);
+ usb_set_intfdata(intf, NULL);
+ ieee80211_free_hw(hw);
+}
+EXPORT_SYMBOL(rtl_usb_disconnect);
+
+int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message)
+{
+ return 0;
+}
+EXPORT_SYMBOL(rtl_usb_suspend);
+
+int rtl_usb_resume(struct usb_interface *pusb_intf)
+{
+ return 0;
+}
+EXPORT_SYMBOL(rtl_usb_resume);
diff --git a/drivers/net/wireless/rtlwifi/usb.h b/drivers/net/wireless/rtlwifi/usb.h
new file mode 100644
index 000000000000..abadfe918d30
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/usb.h
@@ -0,0 +1,164 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_USB_H__
+#define __RTL_USB_H__
+
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+
+#define RTL_USB_DEVICE(vend, prod, cfg) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE, \
+ .idVendor = (vend), \
+ .idProduct = (prod), \
+ .driver_info = (kernel_ulong_t)&(cfg)
+
+#define USB_HIGH_SPEED_BULK_SIZE 512
+#define USB_FULL_SPEED_BULK_SIZE 64
+
+
+#define RTL_USB_MAX_TXQ_NUM 4 /* max tx queue */
+#define RTL_USB_MAX_EP_NUM 6 /* max ep number */
+#define RTL_USB_MAX_TX_URBS_NUM 8
+
+enum rtl_txq {
+ /* These definitions shall be consistent with value
+ * returned by skb_get_queue_mapping
+ *------------------------------------*/
+ RTL_TXQ_BK,
+ RTL_TXQ_BE,
+ RTL_TXQ_VI,
+ RTL_TXQ_VO,
+ /*------------------------------------*/
+ RTL_TXQ_BCN,
+ RTL_TXQ_MGT,
+ RTL_TXQ_HI,
+
+ /* Must be last */
+ __RTL_TXQ_NUM,
+};
+
+struct rtl_ep_map {
+ u32 ep_mapping[__RTL_TXQ_NUM];
+};
+
+struct _trx_info {
+ struct rtl_usb *rtlusb;
+ u32 ep_num;
+};
+
+static inline void _rtl_install_trx_info(struct rtl_usb *rtlusb,
+ struct sk_buff *skb,
+ u32 ep_num)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ info->rate_driver_data[0] = rtlusb;
+ info->rate_driver_data[1] = (void *)(__kernel_size_t)ep_num;
+}
+
+
+/* Add suspend/resume later */
+enum rtl_usb_state {
+ USB_STATE_STOP = 0,
+ USB_STATE_START = 1,
+};
+
+#define IS_USB_STOP(rtlusb_ptr) (USB_STATE_STOP == (rtlusb_ptr)->state)
+#define IS_USB_START(rtlusb_ptr) (USB_STATE_START == (rtlusb_ptr)->state)
+#define SET_USB_STOP(rtlusb_ptr) \
+ do { \
+ (rtlusb_ptr)->state = USB_STATE_STOP; \
+ } while (0)
+
+#define SET_USB_START(rtlusb_ptr) \
+ do { \
+ (rtlusb_ptr)->state = USB_STATE_START; \
+ } while (0)
+
+struct rtl_usb {
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ enum rtl_usb_state state;
+
+ /* Bcn control register setting */
+ u32 reg_bcn_ctrl_val;
+ /* for 88/92cu card disable */
+ u8 disableHWSM;
+ /*QOS & EDCA */
+ enum acm_method acm_method;
+ /* irq . HIMR,HIMR_EX */
+ u32 irq_mask[2];
+ bool irq_enabled;
+
+ u16 (*usb_mq_to_hwq)(__le16 fc, u16 mac80211_queue_index);
+
+ /* Tx */
+ u8 out_ep_nums ;
+ u8 out_queue_sel;
+ struct rtl_ep_map ep_map;
+
+ u32 max_bulk_out_size;
+ u32 tx_submitted_urbs;
+ struct sk_buff_head tx_skb_queue[RTL_USB_MAX_EP_NUM];
+
+ struct usb_anchor tx_pending[RTL_USB_MAX_EP_NUM];
+ struct usb_anchor tx_submitted;
+
+ struct sk_buff *(*usb_tx_aggregate_hdl)(struct ieee80211_hw *,
+ struct sk_buff_head *);
+ int (*usb_tx_post_hdl)(struct ieee80211_hw *,
+ struct urb *, struct sk_buff *);
+ void (*usb_tx_cleanup)(struct ieee80211_hw *, struct sk_buff *);
+
+ /* Rx */
+ u8 in_ep_nums ;
+ u32 in_ep; /* Bulk IN endpoint number */
+ u32 rx_max_size; /* Bulk IN max buffer size */
+ u32 rx_urb_num; /* How many Bulk INs are submitted to host. */
+ struct usb_anchor rx_submitted;
+ void (*usb_rx_segregate_hdl)(struct ieee80211_hw *, struct sk_buff *,
+ struct sk_buff_head *);
+ void (*usb_rx_hdl)(struct ieee80211_hw *, struct sk_buff *);
+};
+
+struct rtl_usb_priv {
+ struct rtl_usb dev;
+ struct rtl_led_ctl ledctl;
+};
+
+#define rtl_usbpriv(hw) (((struct rtl_usb_priv *)(rtl_priv(hw))->priv))
+#define rtl_usbdev(usbpriv) (&((usbpriv)->dev))
+
+
+
+int __devinit rtl_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id);
+void rtl_usb_disconnect(struct usb_interface *intf);
+int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message);
+int rtl_usb_resume(struct usb_interface *pusb_intf);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
new file mode 100644
index 000000000000..01226f8e70f9
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -0,0 +1,1858 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_WIFI_H__
+#define __RTL_WIFI_H__
+
+#include <linux/sched.h>
+#include <linux/firmware.h>
+#include <linux/version.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/usb.h>
+#include <net/mac80211.h>
+#include "debug.h"
+
+#define RF_CHANGE_BY_INIT 0
+#define RF_CHANGE_BY_IPS BIT(28)
+#define RF_CHANGE_BY_PS BIT(29)
+#define RF_CHANGE_BY_HW BIT(30)
+#define RF_CHANGE_BY_SW BIT(31)
+
+#define IQK_ADDA_REG_NUM 16
+#define IQK_MAC_REG_NUM 4
+
+#define MAX_KEY_LEN 61
+#define KEY_BUF_SIZE 5
+
+/* QoS related. */
+/*aci: 0x00 Best Effort*/
+/*aci: 0x01 Background*/
+/*aci: 0x10 Video*/
+/*aci: 0x11 Voice*/
+/*Max: define total number.*/
+#define AC0_BE 0
+#define AC1_BK 1
+#define AC2_VI 2
+#define AC3_VO 3
+#define AC_MAX 4
+#define QOS_QUEUE_NUM 4
+#define RTL_MAC80211_NUM_QUEUE 5
+
+#define QBSS_LOAD_SIZE 5
+#define MAX_WMMELE_LENGTH 64
+
+/*slot time for 11g. */
+#define RTL_SLOT_TIME_9 9
+#define RTL_SLOT_TIME_20 20
+
+/*related with tcp/ip. */
+/*if_ehther.h*/
+#define ETH_P_PAE 0x888E /*Port Access Entity (IEEE 802.1X) */
+#define ETH_P_IP 0x0800 /*Internet Protocol packet */
+#define ETH_P_ARP 0x0806 /*Address Resolution packet */
+#define SNAP_SIZE 6
+#define PROTOC_TYPE_SIZE 2
+
+/*related with 802.11 frame*/
+#define MAC80211_3ADDR_LEN 24
+#define MAC80211_4ADDR_LEN 30
+
+#define CHANNEL_MAX_NUMBER (14 + 24 + 21) /* 14 is the max channel no */
+#define CHANNEL_GROUP_MAX (3 + 9) /* ch1~3, 4~9, 10~14 = three groups */
+#define MAX_PG_GROUP 13
+#define CHANNEL_GROUP_MAX_2G 3
+#define CHANNEL_GROUP_IDX_5GL 3
+#define CHANNEL_GROUP_IDX_5GM 6
+#define CHANNEL_GROUP_IDX_5GH 9
+#define CHANNEL_GROUP_MAX_5G 9
+#define CHANNEL_MAX_NUMBER_2G 14
+#define AVG_THERMAL_NUM 8
+
+/* for early mode */
+#define EM_HDR_LEN 8
+enum intf_type {
+ INTF_PCI = 0,
+ INTF_USB = 1,
+};
+
+enum radio_path {
+ RF90_PATH_A = 0,
+ RF90_PATH_B = 1,
+ RF90_PATH_C = 2,
+ RF90_PATH_D = 3,
+};
+
+enum rt_eeprom_type {
+ EEPROM_93C46,
+ EEPROM_93C56,
+ EEPROM_BOOT_EFUSE,
+};
+
+enum rtl_status {
+ RTL_STATUS_INTERFACE_START = 0,
+};
+
+enum hardware_type {
+ HARDWARE_TYPE_RTL8192E,
+ HARDWARE_TYPE_RTL8192U,
+ HARDWARE_TYPE_RTL8192SE,
+ HARDWARE_TYPE_RTL8192SU,
+ HARDWARE_TYPE_RTL8192CE,
+ HARDWARE_TYPE_RTL8192CU,
+ HARDWARE_TYPE_RTL8192DE,
+ HARDWARE_TYPE_RTL8192DU,
+ HARDWARE_TYPE_RTL8723E,
+ HARDWARE_TYPE_RTL8723U,
+
+ /* keep it last */
+ HARDWARE_TYPE_NUM
+};
+
+#define IS_HARDWARE_TYPE_8192SU(rtlhal) \
+ (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SU)
+#define IS_HARDWARE_TYPE_8192SE(rtlhal) \
+ (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
+#define IS_HARDWARE_TYPE_8192CE(rtlhal) \
+ (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE)
+#define IS_HARDWARE_TYPE_8192CU(rtlhal) \
+ (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CU)
+#define IS_HARDWARE_TYPE_8192DE(rtlhal) \
+ (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE)
+#define IS_HARDWARE_TYPE_8192DU(rtlhal) \
+ (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DU)
+#define IS_HARDWARE_TYPE_8723E(rtlhal) \
+ (rtlhal->hw_type == HARDWARE_TYPE_RTL8723E)
+#define IS_HARDWARE_TYPE_8723U(rtlhal) \
+ (rtlhal->hw_type == HARDWARE_TYPE_RTL8723U)
+#define IS_HARDWARE_TYPE_8192S(rtlhal) \
+(IS_HARDWARE_TYPE_8192SE(rtlhal) || IS_HARDWARE_TYPE_8192SU(rtlhal))
+#define IS_HARDWARE_TYPE_8192C(rtlhal) \
+(IS_HARDWARE_TYPE_8192CE(rtlhal) || IS_HARDWARE_TYPE_8192CU(rtlhal))
+#define IS_HARDWARE_TYPE_8192D(rtlhal) \
+(IS_HARDWARE_TYPE_8192DE(rtlhal) || IS_HARDWARE_TYPE_8192DU(rtlhal))
+#define IS_HARDWARE_TYPE_8723(rtlhal) \
+(IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal))
+
+enum scan_operation_backup_opt {
+ SCAN_OPT_BACKUP = 0,
+ SCAN_OPT_RESTORE,
+ SCAN_OPT_MAX
+};
+
+/*RF state.*/
+enum rf_pwrstate {
+ ERFON,
+ ERFSLEEP,
+ ERFOFF
+};
+
+struct bb_reg_def {
+ u32 rfintfs;
+ u32 rfintfi;
+ u32 rfintfo;
+ u32 rfintfe;
+ u32 rf3wire_offset;
+ u32 rflssi_select;
+ u32 rftxgain_stage;
+ u32 rfhssi_para1;
+ u32 rfhssi_para2;
+ u32 rfswitch_control;
+ u32 rfagc_control1;
+ u32 rfagc_control2;
+ u32 rfrxiq_imbalance;
+ u32 rfrx_afe;
+ u32 rftxiq_imbalance;
+ u32 rftx_afe;
+ u32 rflssi_readback;
+ u32 rflssi_readbackpi;
+};
+
+enum io_type {
+ IO_CMD_PAUSE_DM_BY_SCAN = 0,
+ IO_CMD_RESUME_DM_BY_SCAN = 1,
+};
+
+enum hw_variables {
+ HW_VAR_ETHER_ADDR,
+ HW_VAR_MULTICAST_REG,
+ HW_VAR_BASIC_RATE,
+ HW_VAR_BSSID,
+ HW_VAR_MEDIA_STATUS,
+ HW_VAR_SECURITY_CONF,
+ HW_VAR_BEACON_INTERVAL,
+ HW_VAR_ATIM_WINDOW,
+ HW_VAR_LISTEN_INTERVAL,
+ HW_VAR_CS_COUNTER,
+ HW_VAR_DEFAULTKEY0,
+ HW_VAR_DEFAULTKEY1,
+ HW_VAR_DEFAULTKEY2,
+ HW_VAR_DEFAULTKEY3,
+ HW_VAR_SIFS,
+ HW_VAR_DIFS,
+ HW_VAR_EIFS,
+ HW_VAR_SLOT_TIME,
+ HW_VAR_ACK_PREAMBLE,
+ HW_VAR_CW_CONFIG,
+ HW_VAR_CW_VALUES,
+ HW_VAR_RATE_FALLBACK_CONTROL,
+ HW_VAR_CONTENTION_WINDOW,
+ HW_VAR_RETRY_COUNT,
+ HW_VAR_TR_SWITCH,
+ HW_VAR_COMMAND,
+ HW_VAR_WPA_CONFIG,
+ HW_VAR_AMPDU_MIN_SPACE,
+ HW_VAR_SHORTGI_DENSITY,
+ HW_VAR_AMPDU_FACTOR,
+ HW_VAR_MCS_RATE_AVAILABLE,
+ HW_VAR_AC_PARAM,
+ HW_VAR_ACM_CTRL,
+ HW_VAR_DIS_Req_Qsize,
+ HW_VAR_CCX_CHNL_LOAD,
+ HW_VAR_CCX_NOISE_HISTOGRAM,
+ HW_VAR_CCX_CLM_NHM,
+ HW_VAR_TxOPLimit,
+ HW_VAR_TURBO_MODE,
+ HW_VAR_RF_STATE,
+ HW_VAR_RF_OFF_BY_HW,
+ HW_VAR_BUS_SPEED,
+ HW_VAR_SET_DEV_POWER,
+
+ HW_VAR_RCR,
+ HW_VAR_RATR_0,
+ HW_VAR_RRSR,
+ HW_VAR_CPU_RST,
+ HW_VAR_CECHK_BSSID,
+ HW_VAR_LBK_MODE,
+ HW_VAR_AES_11N_FIX,
+ HW_VAR_USB_RX_AGGR,
+ HW_VAR_USER_CONTROL_TURBO_MODE,
+ HW_VAR_RETRY_LIMIT,
+ HW_VAR_INIT_TX_RATE,
+ HW_VAR_TX_RATE_REG,
+ HW_VAR_EFUSE_USAGE,
+ HW_VAR_EFUSE_BYTES,
+ HW_VAR_AUTOLOAD_STATUS,
+ HW_VAR_RF_2R_DISABLE,
+ HW_VAR_SET_RPWM,
+ HW_VAR_H2C_FW_PWRMODE,
+ HW_VAR_H2C_FW_JOINBSSRPT,
+ HW_VAR_FW_PSMODE_STATUS,
+ HW_VAR_1X1_RECV_COMBINE,
+ HW_VAR_STOP_SEND_BEACON,
+ HW_VAR_TSF_TIMER,
+ HW_VAR_IO_CMD,
+
+ HW_VAR_RF_RECOVERY,
+ HW_VAR_H2C_FW_UPDATE_GTK,
+ HW_VAR_WF_MASK,
+ HW_VAR_WF_CRC,
+ HW_VAR_WF_IS_MAC_ADDR,
+ HW_VAR_H2C_FW_OFFLOAD,
+ HW_VAR_RESET_WFCRC,
+
+ HW_VAR_HANDLE_FW_C2H,
+ HW_VAR_DL_FW_RSVD_PAGE,
+ HW_VAR_AID,
+ HW_VAR_HW_SEQ_ENABLE,
+ HW_VAR_CORRECT_TSF,
+ HW_VAR_BCN_VALID,
+ HW_VAR_FWLPS_RF_ON,
+ HW_VAR_DUAL_TSF_RST,
+ HW_VAR_SWITCH_EPHY_WoWLAN,
+ HW_VAR_INT_MIGRATION,
+ HW_VAR_INT_AC,
+ HW_VAR_RF_TIMING,
+
+ HW_VAR_MRC,
+
+ HW_VAR_MGT_FILTER,
+ HW_VAR_CTRL_FILTER,
+ HW_VAR_DATA_FILTER,
+};
+
+enum _RT_MEDIA_STATUS {
+ RT_MEDIA_DISCONNECT = 0,
+ RT_MEDIA_CONNECT = 1
+};
+
+enum rt_oem_id {
+ RT_CID_DEFAULT = 0,
+ RT_CID_8187_ALPHA0 = 1,
+ RT_CID_8187_SERCOMM_PS = 2,
+ RT_CID_8187_HW_LED = 3,
+ RT_CID_8187_NETGEAR = 4,
+ RT_CID_WHQL = 5,
+ RT_CID_819x_CAMEO = 6,
+ RT_CID_819x_RUNTOP = 7,
+ RT_CID_819x_Senao = 8,
+ RT_CID_TOSHIBA = 9,
+ RT_CID_819x_Netcore = 10,
+ RT_CID_Nettronix = 11,
+ RT_CID_DLINK = 12,
+ RT_CID_PRONET = 13,
+ RT_CID_COREGA = 14,
+ RT_CID_819x_ALPHA = 15,
+ RT_CID_819x_Sitecom = 16,
+ RT_CID_CCX = 17,
+ RT_CID_819x_Lenovo = 18,
+ RT_CID_819x_QMI = 19,
+ RT_CID_819x_Edimax_Belkin = 20,
+ RT_CID_819x_Sercomm_Belkin = 21,
+ RT_CID_819x_CAMEO1 = 22,
+ RT_CID_819x_MSI = 23,
+ RT_CID_819x_Acer = 24,
+ RT_CID_819x_HP = 27,
+ RT_CID_819x_CLEVO = 28,
+ RT_CID_819x_Arcadyan_Belkin = 29,
+ RT_CID_819x_SAMSUNG = 30,
+ RT_CID_819x_WNC_COREGA = 31,
+ RT_CID_819x_Foxcoon = 32,
+ RT_CID_819x_DELL = 33,
+};
+
+enum hw_descs {
+ HW_DESC_OWN,
+ HW_DESC_RXOWN,
+ HW_DESC_TX_NEXTDESC_ADDR,
+ HW_DESC_TXBUFF_ADDR,
+ HW_DESC_RXBUFF_ADDR,
+ HW_DESC_RXPKT_LEN,
+ HW_DESC_RXERO,
+};
+
+enum prime_sc {
+ PRIME_CHNL_OFFSET_DONT_CARE = 0,
+ PRIME_CHNL_OFFSET_LOWER = 1,
+ PRIME_CHNL_OFFSET_UPPER = 2,
+};
+
+enum rf_type {
+ RF_1T1R = 0,
+ RF_1T2R = 1,
+ RF_2T2R = 2,
+ RF_2T2R_GREEN = 3,
+};
+
+enum ht_channel_width {
+ HT_CHANNEL_WIDTH_20 = 0,
+ HT_CHANNEL_WIDTH_20_40 = 1,
+};
+
+/* Ref: 802.11i sepc D10.0 7.3.2.25.1
+Cipher Suites Encryption Algorithms */
+enum rt_enc_alg {
+ NO_ENCRYPTION = 0,
+ WEP40_ENCRYPTION = 1,
+ TKIP_ENCRYPTION = 2,
+ RSERVED_ENCRYPTION = 3,
+ AESCCMP_ENCRYPTION = 4,
+ WEP104_ENCRYPTION = 5,
+};
+
+enum rtl_hal_state {
+ _HAL_STATE_STOP = 0,
+ _HAL_STATE_START = 1,
+};
+
+enum rtl_var_map {
+ /*reg map */
+ SYS_ISO_CTRL = 0,
+ SYS_FUNC_EN,
+ SYS_CLK,
+ MAC_RCR_AM,
+ MAC_RCR_AB,
+ MAC_RCR_ACRC32,
+ MAC_RCR_ACF,
+ MAC_RCR_AAP,
+
+ /*efuse map */
+ EFUSE_TEST,
+ EFUSE_CTRL,
+ EFUSE_CLK,
+ EFUSE_CLK_CTRL,
+ EFUSE_PWC_EV12V,
+ EFUSE_FEN_ELDR,
+ EFUSE_LOADER_CLK_EN,
+ EFUSE_ANA8M,
+ EFUSE_HWSET_MAX_SIZE,
+ EFUSE_MAX_SECTION_MAP,
+ EFUSE_REAL_CONTENT_SIZE,
+
+ /*CAM map */
+ RWCAM,
+ WCAMI,
+ RCAMO,
+ CAMDBG,
+ SECR,
+ SEC_CAM_NONE,
+ SEC_CAM_WEP40,
+ SEC_CAM_TKIP,
+ SEC_CAM_AES,
+ SEC_CAM_WEP104,
+
+ /*IMR map */
+ RTL_IMR_BCNDMAINT6, /*Beacon DMA Interrupt 6 */
+ RTL_IMR_BCNDMAINT5, /*Beacon DMA Interrupt 5 */
+ RTL_IMR_BCNDMAINT4, /*Beacon DMA Interrupt 4 */
+ RTL_IMR_BCNDMAINT3, /*Beacon DMA Interrupt 3 */
+ RTL_IMR_BCNDMAINT2, /*Beacon DMA Interrupt 2 */
+ RTL_IMR_BCNDMAINT1, /*Beacon DMA Interrupt 1 */
+ RTL_IMR_BCNDOK8, /*Beacon Queue DMA OK Interrup 8 */
+ RTL_IMR_BCNDOK7, /*Beacon Queue DMA OK Interrup 7 */
+ RTL_IMR_BCNDOK6, /*Beacon Queue DMA OK Interrup 6 */
+ RTL_IMR_BCNDOK5, /*Beacon Queue DMA OK Interrup 5 */
+ RTL_IMR_BCNDOK4, /*Beacon Queue DMA OK Interrup 4 */
+ RTL_IMR_BCNDOK3, /*Beacon Queue DMA OK Interrup 3 */
+ RTL_IMR_BCNDOK2, /*Beacon Queue DMA OK Interrup 2 */
+ RTL_IMR_BCNDOK1, /*Beacon Queue DMA OK Interrup 1 */
+ RTL_IMR_TIMEOUT2, /*Timeout interrupt 2 */
+ RTL_IMR_TIMEOUT1, /*Timeout interrupt 1 */
+ RTL_IMR_TXFOVW, /*Transmit FIFO Overflow */
+ RTL_IMR_PSTIMEOUT, /*Power save time out interrupt */
+ RTL_IMR_BcnInt, /*Beacon DMA Interrupt 0 */
+ RTL_IMR_RXFOVW, /*Receive FIFO Overflow */
+ RTL_IMR_RDU, /*Receive Descriptor Unavailable */
+ RTL_IMR_ATIMEND, /*For 92C,ATIM Window End Interrupt */
+ RTL_IMR_BDOK, /*Beacon Queue DMA OK Interrup */
+ RTL_IMR_HIGHDOK, /*High Queue DMA OK Interrupt */
+ RTL_IMR_COMDOK, /*Command Queue DMA OK Interrupt*/
+ RTL_IMR_TBDOK, /*Transmit Beacon OK interrup */
+ RTL_IMR_MGNTDOK, /*Management Queue DMA OK Interrupt */
+ RTL_IMR_TBDER, /*For 92C,Transmit Beacon Error Interrupt */
+ RTL_IMR_BKDOK, /*AC_BK DMA OK Interrupt */
+ RTL_IMR_BEDOK, /*AC_BE DMA OK Interrupt */
+ RTL_IMR_VIDOK, /*AC_VI DMA OK Interrupt */
+ RTL_IMR_VODOK, /*AC_VO DMA Interrupt */
+ RTL_IMR_ROK, /*Receive DMA OK Interrupt */
+ RTL_IBSS_INT_MASKS, /*(RTL_IMR_BcnInt | RTL_IMR_TBDOK |
+ * RTL_IMR_TBDER) */
+
+ /*CCK Rates, TxHT = 0 */
+ RTL_RC_CCK_RATE1M,
+ RTL_RC_CCK_RATE2M,
+ RTL_RC_CCK_RATE5_5M,
+ RTL_RC_CCK_RATE11M,
+
+ /*OFDM Rates, TxHT = 0 */
+ RTL_RC_OFDM_RATE6M,
+ RTL_RC_OFDM_RATE9M,
+ RTL_RC_OFDM_RATE12M,
+ RTL_RC_OFDM_RATE18M,
+ RTL_RC_OFDM_RATE24M,
+ RTL_RC_OFDM_RATE36M,
+ RTL_RC_OFDM_RATE48M,
+ RTL_RC_OFDM_RATE54M,
+
+ RTL_RC_HT_RATEMCS7,
+ RTL_RC_HT_RATEMCS15,
+
+ /*keep it last */
+ RTL_VAR_MAP_MAX,
+};
+
+/*Firmware PS mode for control LPS.*/
+enum _fw_ps_mode {
+ FW_PS_ACTIVE_MODE = 0,
+ FW_PS_MIN_MODE = 1,
+ FW_PS_MAX_MODE = 2,
+ FW_PS_DTIM_MODE = 3,
+ FW_PS_VOIP_MODE = 4,
+ FW_PS_UAPSD_WMM_MODE = 5,
+ FW_PS_UAPSD_MODE = 6,
+ FW_PS_IBSS_MODE = 7,
+ FW_PS_WWLAN_MODE = 8,
+ FW_PS_PM_Radio_Off = 9,
+ FW_PS_PM_Card_Disable = 10,
+};
+
+enum rt_psmode {
+ EACTIVE, /*Active/Continuous access. */
+ EMAXPS, /*Max power save mode. */
+ EFASTPS, /*Fast power save mode. */
+ EAUTOPS, /*Auto power save mode. */
+};
+
+/*LED related.*/
+enum led_ctl_mode {
+ LED_CTL_POWER_ON = 1,
+ LED_CTL_LINK = 2,
+ LED_CTL_NO_LINK = 3,
+ LED_CTL_TX = 4,
+ LED_CTL_RX = 5,
+ LED_CTL_SITE_SURVEY = 6,
+ LED_CTL_POWER_OFF = 7,
+ LED_CTL_START_TO_LINK = 8,
+ LED_CTL_START_WPS = 9,
+ LED_CTL_STOP_WPS = 10,
+};
+
+enum rtl_led_pin {
+ LED_PIN_GPIO0,
+ LED_PIN_LED0,
+ LED_PIN_LED1,
+ LED_PIN_LED2
+};
+
+/*QoS related.*/
+/*acm implementation method.*/
+enum acm_method {
+ eAcmWay0_SwAndHw = 0,
+ eAcmWay1_HW = 1,
+ eAcmWay2_SW = 2,
+};
+
+enum macphy_mode {
+ SINGLEMAC_SINGLEPHY = 0,
+ DUALMAC_DUALPHY,
+ DUALMAC_SINGLEPHY,
+};
+
+enum band_type {
+ BAND_ON_2_4G = 0,
+ BAND_ON_5G,
+ BAND_ON_BOTH,
+ BANDMAX
+};
+
+/*aci/aifsn Field.
+Ref: WMM spec 2.2.2: WME Parameter Element, p.12.*/
+union aci_aifsn {
+ u8 char_data;
+
+ struct {
+ u8 aifsn:4;
+ u8 acm:1;
+ u8 aci:2;
+ u8 reserved:1;
+ } f; /* Field */
+};
+
+/*mlme related.*/
+enum wireless_mode {
+ WIRELESS_MODE_UNKNOWN = 0x00,
+ WIRELESS_MODE_A = 0x01,
+ WIRELESS_MODE_B = 0x02,
+ WIRELESS_MODE_G = 0x04,
+ WIRELESS_MODE_AUTO = 0x08,
+ WIRELESS_MODE_N_24G = 0x10,
+ WIRELESS_MODE_N_5G = 0x20
+};
+
+#define IS_WIRELESS_MODE_A(wirelessmode) \
+ (wirelessmode == WIRELESS_MODE_A)
+#define IS_WIRELESS_MODE_B(wirelessmode) \
+ (wirelessmode == WIRELESS_MODE_B)
+#define IS_WIRELESS_MODE_G(wirelessmode) \
+ (wirelessmode == WIRELESS_MODE_G)
+#define IS_WIRELESS_MODE_N_24G(wirelessmode) \
+ (wirelessmode == WIRELESS_MODE_N_24G)
+#define IS_WIRELESS_MODE_N_5G(wirelessmode) \
+ (wirelessmode == WIRELESS_MODE_N_5G)
+
+enum ratr_table_mode {
+ RATR_INX_WIRELESS_NGB = 0,
+ RATR_INX_WIRELESS_NG = 1,
+ RATR_INX_WIRELESS_NB = 2,
+ RATR_INX_WIRELESS_N = 3,
+ RATR_INX_WIRELESS_GB = 4,
+ RATR_INX_WIRELESS_G = 5,
+ RATR_INX_WIRELESS_B = 6,
+ RATR_INX_WIRELESS_MC = 7,
+ RATR_INX_WIRELESS_A = 8,
+};
+
+enum rtl_link_state {
+ MAC80211_NOLINK = 0,
+ MAC80211_LINKING = 1,
+ MAC80211_LINKED = 2,
+ MAC80211_LINKED_SCANNING = 3,
+};
+
+enum act_category {
+ ACT_CAT_QOS = 1,
+ ACT_CAT_DLS = 2,
+ ACT_CAT_BA = 3,
+ ACT_CAT_HT = 7,
+ ACT_CAT_WMM = 17,
+};
+
+enum ba_action {
+ ACT_ADDBAREQ = 0,
+ ACT_ADDBARSP = 1,
+ ACT_DELBA = 2,
+};
+
+struct octet_string {
+ u8 *octet;
+ u16 length;
+};
+
+struct rtl_hdr_3addr {
+ __le16 frame_ctl;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctl;
+ u8 payload[0];
+} __packed;
+
+struct rtl_info_element {
+ u8 id;
+ u8 len;
+ u8 data[0];
+} __packed;
+
+struct rtl_probe_rsp {
+ struct rtl_hdr_3addr header;
+ u32 time_stamp[2];
+ __le16 beacon_interval;
+ __le16 capability;
+ /*SSID, supported rates, FH params, DS params,
+ CF params, IBSS params, TIM (if beacon), RSN */
+ struct rtl_info_element info_element[0];
+} __packed;
+
+/*LED related.*/
+/*ledpin Identify how to implement this SW led.*/
+struct rtl_led {
+ void *hw;
+ enum rtl_led_pin ledpin;
+ bool ledon;
+};
+
+struct rtl_led_ctl {
+ bool led_opendrain;
+ struct rtl_led sw_led0;
+ struct rtl_led sw_led1;
+};
+
+struct rtl_qos_parameters {
+ __le16 cw_min;
+ __le16 cw_max;
+ u8 aifs;
+ u8 flag;
+ __le16 tx_op;
+} __packed;
+
+struct rt_smooth_data {
+ u32 elements[100]; /*array to store values */
+ u32 index; /*index to current array to store */
+ u32 total_num; /*num of valid elements */
+ u32 total_val; /*sum of valid elements */
+};
+
+struct false_alarm_statistics {
+ u32 cnt_parity_fail;
+ u32 cnt_rate_illegal;
+ u32 cnt_crc8_fail;
+ u32 cnt_mcs_fail;
+ u32 cnt_fast_fsync_fail;
+ u32 cnt_sb_search_fail;
+ u32 cnt_ofdm_fail;
+ u32 cnt_cck_fail;
+ u32 cnt_all;
+};
+
+struct init_gain {
+ u8 xaagccore1;
+ u8 xbagccore1;
+ u8 xcagccore1;
+ u8 xdagccore1;
+ u8 cca;
+
+};
+
+struct wireless_stats {
+ unsigned long txbytesunicast;
+ unsigned long txbytesmulticast;
+ unsigned long txbytesbroadcast;
+ unsigned long rxbytesunicast;
+
+ long rx_snr_db[4];
+ /*Correct smoothed ss in Dbm, only used
+ in driver to report real power now. */
+ long recv_signal_power;
+ long signal_quality;
+ long last_sigstrength_inpercent;
+
+ u32 rssi_calculate_cnt;
+
+ /*Transformed, in dbm. Beautified signal
+ strength for UI, not correct. */
+ long signal_strength;
+
+ u8 rx_rssi_percentage[4];
+ u8 rx_evm_percentage[2];
+
+ struct rt_smooth_data ui_rssi;
+ struct rt_smooth_data ui_link_quality;
+};
+
+struct rate_adaptive {
+ u8 rate_adaptive_disabled;
+ u8 ratr_state;
+ u16 reserve;
+
+ u32 high_rssi_thresh_for_ra;
+ u32 high2low_rssi_thresh_for_ra;
+ u8 low2high_rssi_thresh_for_ra40m;
+ u32 low_rssi_thresh_for_ra40M;
+ u8 low2high_rssi_thresh_for_ra20m;
+ u32 low_rssi_thresh_for_ra20M;
+ u32 upper_rssi_threshold_ratr;
+ u32 middleupper_rssi_threshold_ratr;
+ u32 middle_rssi_threshold_ratr;
+ u32 middlelow_rssi_threshold_ratr;
+ u32 low_rssi_threshold_ratr;
+ u32 ultralow_rssi_threshold_ratr;
+ u32 low_rssi_threshold_ratr_40m;
+ u32 low_rssi_threshold_ratr_20m;
+ u8 ping_rssi_enable;
+ u32 ping_rssi_ratr;
+ u32 ping_rssi_thresh_for_ra;
+ u32 last_ratr;
+ u8 pre_ratr_state;
+};
+
+struct regd_pair_mapping {
+ u16 reg_dmnenum;
+ u16 reg_5ghz_ctl;
+ u16 reg_2ghz_ctl;
+};
+
+struct rtl_regulatory {
+ char alpha2[2];
+ u16 country_code;
+ u16 max_power_level;
+ u32 tp_scale;
+ u16 current_rd;
+ u16 current_rd_ext;
+ int16_t power_limit;
+ struct regd_pair_mapping *regpair;
+};
+
+struct rtl_rfkill {
+ bool rfkill_state; /*0 is off, 1 is on */
+};
+
+#define IQK_MATRIX_REG_NUM 8
+#define IQK_MATRIX_SETTINGS_NUM (1 + 24 + 21)
+struct iqk_matrix_regs {
+ bool b_iqk_done;
+ long value[1][IQK_MATRIX_REG_NUM];
+};
+
+struct phy_parameters {
+ u16 length;
+ u32 *pdata;
+};
+
+enum hw_param_tab_index {
+ PHY_REG_2T,
+ PHY_REG_1T,
+ PHY_REG_PG,
+ RADIOA_2T,
+ RADIOB_2T,
+ RADIOA_1T,
+ RADIOB_1T,
+ MAC_REG,
+ AGCTAB_2T,
+ AGCTAB_1T,
+ MAX_TAB
+};
+
+struct rtl_phy {
+ struct bb_reg_def phyreg_def[4]; /*Radio A/B/C/D */
+ struct init_gain initgain_backup;
+ enum io_type current_io_type;
+
+ u8 rf_mode;
+ u8 rf_type;
+ u8 current_chan_bw;
+ u8 set_bwmode_inprogress;
+ u8 sw_chnl_inprogress;
+ u8 sw_chnl_stage;
+ u8 sw_chnl_step;
+ u8 current_channel;
+ u8 h2c_box_num;
+ u8 set_io_inprogress;
+ u8 lck_inprogress;
+
+ /* record for power tracking */
+ s32 reg_e94;
+ s32 reg_e9c;
+ s32 reg_ea4;
+ s32 reg_eac;
+ s32 reg_eb4;
+ s32 reg_ebc;
+ s32 reg_ec4;
+ s32 reg_ecc;
+ u8 rfpienable;
+ u8 reserve_0;
+ u16 reserve_1;
+ u32 reg_c04, reg_c08, reg_874;
+ u32 adda_backup[16];
+ u32 iqk_mac_backup[IQK_MAC_REG_NUM];
+ u32 iqk_bb_backup[10];
+
+ /* Dual mac */
+ bool need_iqk;
+ struct iqk_matrix_regs iqk_matrix_regsetting[IQK_MATRIX_SETTINGS_NUM];
+
+ bool rfpi_enable;
+
+ u8 pwrgroup_cnt;
+ u8 cck_high_power;
+ /* MAX_PG_GROUP groups of pwr diff by rates */
+ u32 mcs_txpwrlevel_origoffset[MAX_PG_GROUP][16];
+ u8 default_initialgain[4];
+
+ /* the current Tx power level */
+ u8 cur_cck_txpwridx;
+ u8 cur_ofdm24g_txpwridx;
+
+ u32 rfreg_chnlval[2];
+ bool apk_done;
+ u32 reg_rf3c[2]; /* pathA / pathB */
+
+ u8 framesync;
+ u32 framesync_c34;
+
+ u8 num_total_rfpath;
+ struct phy_parameters hwparam_tables[MAX_TAB];
+ u16 rf_pathmap;
+};
+
+#define MAX_TID_COUNT 9
+#define RTL_AGG_OFF 0
+#define RTL_AGG_ON 1
+#define RTL_AGG_EMPTYING_HW_QUEUE_ADDBA 2
+#define RTL_AGG_EMPTYING_HW_QUEUE_DELBA 3
+
+struct rtl_ht_agg {
+ u16 txq_id;
+ u16 wait_for_ba;
+ u16 start_idx;
+ u64 bitmap;
+ u32 rate_n_flags;
+ u8 agg_state;
+};
+
+struct rtl_tid_data {
+ u16 seq_number;
+ struct rtl_ht_agg agg;
+};
+
+struct rtl_priv;
+struct rtl_io {
+ struct device *dev;
+ struct mutex bb_mutex;
+
+ /*PCI MEM map */
+ unsigned long pci_mem_end; /*shared mem end */
+ unsigned long pci_mem_start; /*shared mem start */
+
+ /*PCI IO map */
+ unsigned long pci_base_addr; /*device I/O address */
+
+ void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val);
+ void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val);
+ void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val);
+ int (*writeN_async) (struct rtl_priv *rtlpriv, u32 addr, u16 len,
+ u8 *pdata);
+
+ u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr);
+ u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr);
+ u32(*read32_sync) (struct rtl_priv *rtlpriv, u32 addr);
+ int (*readN_sync) (struct rtl_priv *rtlpriv, u32 addr, u16 len,
+ u8 *pdata);
+};
+
+struct rtl_mac {
+ u8 mac_addr[ETH_ALEN];
+ u8 mac80211_registered;
+ u8 beacon_enabled;
+
+ u32 tx_ss_num;
+ u32 rx_ss_num;
+
+ struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+ struct ieee80211_hw *hw;
+ struct ieee80211_vif *vif;
+ enum nl80211_iftype opmode;
+
+ /*Probe Beacon management */
+ struct rtl_tid_data tids[MAX_TID_COUNT];
+ enum rtl_link_state link_state;
+
+ int n_channels;
+ int n_bitrates;
+
+ /*filters */
+ u32 rx_conf;
+ u16 rx_mgt_filter;
+ u16 rx_ctrl_filter;
+ u16 rx_data_filter;
+
+ bool act_scanning;
+ u8 cnt_after_linked;
+
+ /* early mode */
+ /* skb wait queue */
+ struct sk_buff_head skb_waitq[MAX_TID_COUNT];
+ u8 earlymode_threshold;
+
+ /*RDG*/
+ bool rdg_en;
+
+ /*AP*/
+ u8 bssid[6];
+ u32 vendor;
+ u8 mcs[16]; /* 16 bytes mcs for HT rates. */
+ u32 basic_rates; /* b/g rates */
+ u8 ht_enable;
+ u8 sgi_40;
+ u8 sgi_20;
+ u8 bw_40;
+ u8 mode; /* wireless mode */
+ u8 slot_time;
+ u8 short_preamble;
+ u8 use_cts_protect;
+ u8 cur_40_prime_sc;
+ u8 cur_40_prime_sc_bk;
+ u64 tsf;
+ u8 retry_short;
+ u8 retry_long;
+ u16 assoc_id;
+
+ /*IBSS*/
+ int beacon_interval;
+
+ /*AMPDU*/
+ u8 min_space_cfg; /*For Min spacing configurations */
+ u8 max_mss_density;
+ u8 current_ampdu_factor;
+ u8 current_ampdu_density;
+
+ /*QOS & EDCA */
+ struct ieee80211_tx_queue_params edca_param[RTL_MAC80211_NUM_QUEUE];
+ struct rtl_qos_parameters ac[AC_MAX];
+};
+
+struct rtl_hal {
+ struct ieee80211_hw *hw;
+
+ enum intf_type interface;
+ u16 hw_type; /*92c or 92d or 92s and so on */
+ u8 ic_class;
+ u8 oem_id;
+ u32 version; /*version of chip */
+ u8 state; /*stop 0, start 1 */
+
+ /*firmware */
+ u32 fwsize;
+ u8 *pfirmware;
+ u16 fw_version;
+ u16 fw_subversion;
+ bool h2c_setinprogress;
+ u8 last_hmeboxnum;
+ bool fw_ready;
+ /*Reserve page start offset except beacon in TxQ. */
+ u8 fw_rsvdpage_startoffset;
+ u8 h2c_txcmd_seq;
+
+ /* FW Cmd IO related */
+ u16 fwcmd_iomap;
+ u32 fwcmd_ioparam;
+ bool set_fwcmd_inprogress;
+ u8 current_fwcmd_io;
+
+ /**/
+ bool driver_going2unload;
+
+ /*AMPDU init min space*/
+ u8 minspace_cfg; /*For Min spacing configurations */
+
+ /* Dual mac */
+ enum macphy_mode macphymode;
+ enum band_type current_bandtype; /* 0:2.4G, 1:5G */
+ enum band_type current_bandtypebackup;
+ enum band_type bandset;
+ /* dual MAC 0--Mac0 1--Mac1 */
+ u32 interfaceindex;
+ /* just for DualMac S3S4 */
+ u8 macphyctl_reg;
+ bool earlymode_enable;
+ /* Dual mac*/
+ bool during_mac0init_radiob;
+ bool during_mac1init_radioa;
+ bool reloadtxpowerindex;
+ /* True if IMR or IQK have done
+ for 2.4G in scan progress */
+ bool load_imrandiqk_setting_for2g;
+
+ bool disable_amsdu_8k;
+};
+
+struct rtl_security {
+ /*default 0 */
+ bool use_sw_sec;
+
+ bool being_setkey;
+ bool use_defaultkey;
+ /*Encryption Algorithm for Unicast Packet */
+ enum rt_enc_alg pairwise_enc_algorithm;
+ /*Encryption Algorithm for Brocast/Multicast */
+ enum rt_enc_alg group_enc_algorithm;
+
+ /*local Key buffer, indx 0 is for
+ pairwise key 1-4 is for agoup key. */
+ u8 key_buf[KEY_BUF_SIZE][MAX_KEY_LEN];
+ u8 key_len[KEY_BUF_SIZE];
+
+ /*The pointer of Pairwise Key,
+ it always points to KeyBuf[4] */
+ u8 *pairwise_key;
+};
+
+struct rtl_dm {
+ /*PHY status for Dynamic Management */
+ long entry_min_undecoratedsmoothed_pwdb;
+ long undecorated_smoothed_pwdb; /*out dm */
+ long entry_max_undecoratedsmoothed_pwdb;
+ bool dm_initialgain_enable;
+ bool dynamic_txpower_enable;
+ bool current_turbo_edca;
+ bool is_any_nonbepkts; /*out dm */
+ bool is_cur_rdlstate;
+ bool txpower_trackingInit;
+ bool disable_framebursting;
+ bool cck_inch14;
+ bool txpower_tracking;
+ bool useramask;
+ bool rfpath_rxenable[4];
+ bool inform_fw_driverctrldm;
+ bool current_mrc_switch;
+ u8 txpowercount;
+
+ u8 thermalvalue_rxgain;
+ u8 thermalvalue_iqk;
+ u8 thermalvalue_lck;
+ u8 thermalvalue;
+ u8 last_dtp_lvl;
+ u8 thermalvalue_avg[AVG_THERMAL_NUM];
+ u8 thermalvalue_avg_index;
+ bool done_txpower;
+ u8 dynamic_txhighpower_lvl; /*Tx high power level */
+ u8 dm_flag; /*Indicate each dynamic mechanism's status. */
+ u8 dm_type;
+ u8 txpower_track_control;
+ bool interrupt_migration;
+ bool disable_tx_int;
+ char ofdm_index[2];
+ char cck_index;
+ u8 power_index_backup[6];
+};
+
+#define EFUSE_MAX_LOGICAL_SIZE 256
+
+struct rtl_efuse {
+ bool autoLoad_ok;
+ bool bootfromefuse;
+ u16 max_physical_size;
+
+ u8 efuse_map[2][EFUSE_MAX_LOGICAL_SIZE];
+ u16 efuse_usedbytes;
+ u8 efuse_usedpercentage;
+#ifdef EFUSE_REPG_WORKAROUND
+ bool efuse_re_pg_sec1flag;
+ u8 efuse_re_pg_data[8];
+#endif
+
+ u8 autoload_failflag;
+ u8 autoload_status;
+
+ short epromtype;
+ u16 eeprom_vid;
+ u16 eeprom_did;
+ u16 eeprom_svid;
+ u16 eeprom_smid;
+ u8 eeprom_oemid;
+ u16 eeprom_channelplan;
+ u8 eeprom_version;
+ u8 board_type;
+ u8 external_pa;
+
+ u8 dev_addr[6];
+
+ bool txpwr_fromeprom;
+ u8 eeprom_crystalcap;
+ u8 eeprom_tssi[2];
+ u8 eeprom_tssi_5g[3][2]; /* for 5GL/5GM/5GH band. */
+ u8 eeprom_pwrlimit_ht20[CHANNEL_GROUP_MAX];
+ u8 eeprom_pwrlimit_ht40[CHANNEL_GROUP_MAX];
+ u8 eeprom_chnlarea_txpwr_cck[2][CHANNEL_GROUP_MAX_2G];
+ u8 eeprom_chnlarea_txpwr_ht40_1s[2][CHANNEL_GROUP_MAX];
+ u8 eeprom_chnlarea_txpwr_ht40_2sdiif[2][CHANNEL_GROUP_MAX];
+ u8 txpwrlevel_cck[2][CHANNEL_MAX_NUMBER_2G];
+ u8 txpwrlevel_ht40_1s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */
+ u8 txpwrlevel_ht40_2s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */
+
+ u8 internal_pa_5g[2]; /* pathA / pathB */
+ u8 eeprom_c9;
+ u8 eeprom_cc;
+
+ /*For power group */
+ u8 eeprom_pwrgroup[2][3];
+ u8 pwrgroup_ht20[2][CHANNEL_MAX_NUMBER];
+ u8 pwrgroup_ht40[2][CHANNEL_MAX_NUMBER];
+
+ char txpwr_ht20diff[2][CHANNEL_MAX_NUMBER]; /*HT 20<->40 Pwr diff */
+ /*For HT<->legacy pwr diff*/
+ u8 txpwr_legacyhtdiff[2][CHANNEL_MAX_NUMBER];
+ u8 txpwr_safetyflag; /* Band edge enable flag */
+ u16 eeprom_txpowerdiff;
+ u8 legacy_httxpowerdiff; /* Legacy to HT rate power diff */
+ u8 antenna_txpwdiff[3];
+
+ u8 eeprom_regulatory;
+ u8 eeprom_thermalmeter;
+ u8 thermalmeter[2]; /*ThermalMeter, index 0 for RFIC0, 1 for RFIC1 */
+ u16 tssi_13dbm;
+ u8 crystalcap; /* CrystalCap. */
+ u8 delta_iqk;
+ u8 delta_lck;
+
+ u8 legacy_ht_txpowerdiff; /*Legacy to HT rate power diff */
+ bool apk_thermalmeterignore;
+
+ bool b1x1_recvcombine;
+ bool b1ss_support;
+
+ /*channel plan */
+ u8 channel_plan;
+};
+
+struct rtl_ps_ctl {
+ bool pwrdomain_protect;
+ bool set_rfpowerstate_inprogress;
+ bool in_powersavemode;
+ bool rfchange_inprogress;
+ bool swrf_processing;
+ bool hwradiooff;
+
+ /*
+ * just for PCIE ASPM
+ * If it supports ASPM, Offset[560h] = 0x40,
+ * otherwise Offset[560h] = 0x00.
+ * */
+ bool support_aspm;
+ bool support_backdoor;
+
+ /*for LPS */
+ enum rt_psmode dot11_psmode; /*Power save mode configured. */
+ bool swctrl_lps;
+ bool leisure_ps;
+ bool fwctrl_lps;
+ u8 fwctrl_psmode;
+ /*For Fw control LPS mode */
+ u8 reg_fwctrl_lps;
+ /*Record Fw PS mode status. */
+ bool fw_current_inpsmode;
+ u8 reg_max_lps_awakeintvl;
+ bool report_linked;
+
+ /*for IPS */
+ bool inactiveps;
+
+ u32 rfoff_reason;
+
+ /*RF OFF Level */
+ u32 cur_ps_level;
+ u32 reg_rfps_level;
+
+ /*just for PCIE ASPM */
+ u8 const_amdpci_aspm;
+
+ bool pwrdown_mode;
+
+ enum rf_pwrstate inactive_pwrstate;
+ enum rf_pwrstate rfpwr_state; /*cur power state */
+
+ /* for SW LPS*/
+ bool sw_ps_enabled;
+ bool state;
+ bool state_inap;
+ bool multi_buffered;
+ u16 nullfunc_seq;
+ unsigned int dtim_counter;
+ unsigned int sleep_ms;
+ unsigned long last_sleep_jiffies;
+ unsigned long last_awake_jiffies;
+ unsigned long last_delaylps_stamp_jiffies;
+ unsigned long last_dtim;
+ unsigned long last_beacon;
+ unsigned long last_action;
+ unsigned long last_slept;
+};
+
+struct rtl_stats {
+ u32 mac_time[2];
+ s8 rssi;
+ u8 signal;
+ u8 noise;
+ u16 rate; /*in 100 kbps */
+ u8 received_channel;
+ u8 control;
+ u8 mask;
+ u8 freq;
+ u16 len;
+ u64 tsf;
+ u32 beacon_time;
+ u8 nic_type;
+ u16 length;
+ u8 signalquality; /*in 0-100 index. */
+ /*
+ * Real power in dBm for this packet,
+ * no beautification and aggregation.
+ * */
+ s32 recvsignalpower;
+ s8 rxpower; /*in dBm Translate from PWdB */
+ u8 signalstrength; /*in 0-100 index. */
+ u16 hwerror:1;
+ u16 crc:1;
+ u16 icv:1;
+ u16 shortpreamble:1;
+ u16 antenna:1;
+ u16 decrypted:1;
+ u16 wakeup:1;
+ u32 timestamp_low;
+ u32 timestamp_high;
+
+ u8 rx_drvinfo_size;
+ u8 rx_bufshift;
+ bool isampdu;
+ bool isfirst_ampdu;
+ bool rx_is40Mhzpacket;
+ u32 rx_pwdb_all;
+ u8 rx_mimo_signalstrength[4]; /*in 0~100 index */
+ s8 rx_mimo_signalquality[2];
+ bool packet_matchbssid;
+ bool is_cck;
+ bool packet_toself;
+ bool packet_beacon; /*for rssi */
+ char cck_adc_pwdb[4]; /*for rx path selection */
+};
+
+struct rt_link_detect {
+ u32 num_tx_in4period[4];
+ u32 num_rx_in4period[4];
+
+ u32 num_tx_inperiod;
+ u32 num_rx_inperiod;
+
+ bool busytraffic;
+ bool higher_busytraffic;
+ bool higher_busyrxtraffic;
+};
+
+struct rtl_tcb_desc {
+ u8 packet_bw:1;
+ u8 multicast:1;
+ u8 broadcast:1;
+
+ u8 rts_stbc:1;
+ u8 rts_enable:1;
+ u8 cts_enable:1;
+ u8 rts_use_shortpreamble:1;
+ u8 rts_use_shortgi:1;
+ u8 rts_sc:1;
+ u8 rts_bw:1;
+ u8 rts_rate;
+
+ u8 use_shortgi:1;
+ u8 use_shortpreamble:1;
+ u8 use_driver_rate:1;
+ u8 disable_ratefallback:1;
+
+ u8 ratr_index;
+ u8 mac_id;
+ u8 hw_rate;
+
+ u8 last_inipkt:1;
+ u8 cmd_or_init:1;
+ u8 queue_index;
+
+ /* early mode */
+ u8 empkt_num;
+ /* The max value by HW */
+ u32 empkt_len[5];
+};
+
+struct rtl_hal_ops {
+ int (*init_sw_vars) (struct ieee80211_hw *hw);
+ void (*deinit_sw_vars) (struct ieee80211_hw *hw);
+ void (*read_chip_version)(struct ieee80211_hw *hw);
+ void (*read_eeprom_info) (struct ieee80211_hw *hw);
+ void (*interrupt_recognized) (struct ieee80211_hw *hw,
+ u32 *p_inta, u32 *p_intb);
+ int (*hw_init) (struct ieee80211_hw *hw);
+ void (*hw_disable) (struct ieee80211_hw *hw);
+ void (*hw_suspend) (struct ieee80211_hw *hw);
+ void (*hw_resume) (struct ieee80211_hw *hw);
+ void (*enable_interrupt) (struct ieee80211_hw *hw);
+ void (*disable_interrupt) (struct ieee80211_hw *hw);
+ int (*set_network_type) (struct ieee80211_hw *hw,
+ enum nl80211_iftype type);
+ void (*set_chk_bssid)(struct ieee80211_hw *hw,
+ bool check_bssid);
+ void (*set_bw_mode) (struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+ u8(*switch_channel) (struct ieee80211_hw *hw);
+ void (*set_qos) (struct ieee80211_hw *hw, int aci);
+ void (*set_bcn_reg) (struct ieee80211_hw *hw);
+ void (*set_bcn_intv) (struct ieee80211_hw *hw);
+ void (*update_interrupt_mask) (struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr);
+ void (*get_hw_reg) (struct ieee80211_hw *hw, u8 variable, u8 *val);
+ void (*set_hw_reg) (struct ieee80211_hw *hw, u8 variable, u8 *val);
+ void (*update_rate_table) (struct ieee80211_hw *hw);
+ void (*update_rate_mask) (struct ieee80211_hw *hw, u8 rssi_level);
+ void (*fill_tx_desc) (struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ struct ieee80211_tx_info *info,
+ struct sk_buff *skb, unsigned int queue_index);
+ void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 * pDesc,
+ u32 buffer_len, bool bIsPsPoll);
+ void (*fill_tx_cmddesc) (struct ieee80211_hw *hw, u8 *pdesc,
+ bool firstseg, bool lastseg,
+ struct sk_buff *skb);
+ bool (*cmd_send_packet)(struct ieee80211_hw *hw, struct sk_buff *skb);
+ bool (*query_rx_desc) (struct ieee80211_hw *hw,
+ struct rtl_stats *stats,
+ struct ieee80211_rx_status *rx_status,
+ u8 *pdesc, struct sk_buff *skb);
+ void (*set_channel_access) (struct ieee80211_hw *hw);
+ bool (*radio_onoff_checking) (struct ieee80211_hw *hw, u8 *valid);
+ void (*dm_watchdog) (struct ieee80211_hw *hw);
+ void (*scan_operation_backup) (struct ieee80211_hw *hw, u8 operation);
+ bool (*set_rf_power_state) (struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
+ void (*led_control) (struct ieee80211_hw *hw,
+ enum led_ctl_mode ledaction);
+ void (*set_desc) (u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+ u32 (*get_desc) (u8 *pdesc, bool istx, u8 desc_name);
+ void (*tx_polling) (struct ieee80211_hw *hw, unsigned int hw_queue);
+ void (*enable_hw_sec) (struct ieee80211_hw *hw);
+ void (*set_key) (struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all);
+ void (*init_sw_leds) (struct ieee80211_hw *hw);
+ void (*deinit_sw_leds) (struct ieee80211_hw *hw);
+ u32 (*get_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
+ void (*set_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
+ u32 data);
+ u32 (*get_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask);
+ void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data);
+ bool (*phy_rf6052_config) (struct ieee80211_hw *hw);
+ void (*phy_rf6052_set_cck_txpower) (struct ieee80211_hw *hw,
+ u8 *powerlevel);
+ void (*phy_rf6052_set_ofdm_txpower) (struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
+ bool (*config_bb_with_headerfile) (struct ieee80211_hw *hw,
+ u8 configtype);
+ bool (*config_bb_with_pgheaderfile) (struct ieee80211_hw *hw,
+ u8 configtype);
+ void (*phy_lc_calibrate) (struct ieee80211_hw *hw, bool is2t);
+ void (*phy_set_bw_mode_callback) (struct ieee80211_hw *hw);
+ void (*dm_dynamic_txpower) (struct ieee80211_hw *hw);
+};
+
+struct rtl_intf_ops {
+ /*com */
+ void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
+ int (*adapter_start) (struct ieee80211_hw *hw);
+ void (*adapter_stop) (struct ieee80211_hw *hw);
+
+ int (*adapter_tx) (struct ieee80211_hw *hw, struct sk_buff *skb);
+ int (*reset_trx_ring) (struct ieee80211_hw *hw);
+ bool (*waitq_insert) (struct ieee80211_hw *hw, struct sk_buff *skb);
+
+ /*pci */
+ void (*disable_aspm) (struct ieee80211_hw *hw);
+ void (*enable_aspm) (struct ieee80211_hw *hw);
+
+ /*usb */
+};
+
+struct rtl_mod_params {
+ /* default: 0 = using hardware encryption */
+ int sw_crypto;
+};
+
+struct rtl_hal_usbint_cfg {
+ /* data - rx */
+ u32 in_ep_num;
+ u32 rx_urb_num;
+ u32 rx_max_size;
+
+ /* op - rx */
+ void (*usb_rx_hdl)(struct ieee80211_hw *, struct sk_buff *);
+ void (*usb_rx_segregate_hdl)(struct ieee80211_hw *, struct sk_buff *,
+ struct sk_buff_head *);
+
+ /* tx */
+ void (*usb_tx_cleanup)(struct ieee80211_hw *, struct sk_buff *);
+ int (*usb_tx_post_hdl)(struct ieee80211_hw *, struct urb *,
+ struct sk_buff *);
+ struct sk_buff *(*usb_tx_aggregate_hdl)(struct ieee80211_hw *,
+ struct sk_buff_head *);
+
+ /* endpoint mapping */
+ int (*usb_endpoint_mapping)(struct ieee80211_hw *hw);
+ u16 (*usb_mq_to_hwq)(__le16 fc, u16 mac80211_queue_index);
+};
+
+struct rtl_hal_cfg {
+ u8 bar_id;
+ char *name;
+ char *fw_name;
+ struct rtl_hal_ops *ops;
+ struct rtl_mod_params *mod_params;
+ struct rtl_hal_usbint_cfg *usb_interface_cfg;
+
+ /*this map used for some registers or vars
+ defined int HAL but used in MAIN */
+ u32 maps[RTL_VAR_MAP_MAX];
+
+};
+
+struct rtl_locks {
+ /* mutex */
+ struct mutex conf_mutex;
+
+ /*spin lock */
+ spinlock_t ips_lock;
+ spinlock_t irq_th_lock;
+ spinlock_t h2c_lock;
+ spinlock_t rf_ps_lock;
+ spinlock_t rf_lock;
+ spinlock_t lps_lock;
+ spinlock_t waitq_lock;
+ spinlock_t tx_urb_lock;
+
+ /*Dual mac*/
+ spinlock_t cck_and_rw_pagea_lock;
+};
+
+struct rtl_works {
+ struct ieee80211_hw *hw;
+
+ /*timer */
+ struct timer_list watchdog_timer;
+
+ /*task */
+ struct tasklet_struct irq_tasklet;
+ struct tasklet_struct irq_prepare_bcn_tasklet;
+
+ /*work queue */
+ struct workqueue_struct *rtl_wq;
+ struct delayed_work watchdog_wq;
+ struct delayed_work ips_nic_off_wq;
+
+ /* For SW LPS */
+ struct delayed_work ps_work;
+ struct delayed_work ps_rfon_wq;
+};
+
+struct rtl_debug {
+ u32 dbgp_type[DBGP_TYPE_MAX];
+ u32 global_debuglevel;
+ u64 global_debugcomponents;
+
+ /* add for proc debug */
+ struct proc_dir_entry *proc_dir;
+ char proc_name[20];
+};
+
+struct rtl_priv {
+ struct rtl_locks locks;
+ struct rtl_works works;
+ struct rtl_mac mac80211;
+ struct rtl_hal rtlhal;
+ struct rtl_regulatory regd;
+ struct rtl_rfkill rfkill;
+ struct rtl_io io;
+ struct rtl_phy phy;
+ struct rtl_dm dm;
+ struct rtl_security sec;
+ struct rtl_efuse efuse;
+
+ struct rtl_ps_ctl psc;
+ struct rate_adaptive ra;
+ struct wireless_stats stats;
+ struct rt_link_detect link_info;
+ struct false_alarm_statistics falsealm_cnt;
+
+ struct rtl_rate_priv *rate_priv;
+
+ struct rtl_debug dbg;
+
+ /*
+ *hal_cfg : for diff cards
+ *intf_ops : for diff interrface usb/pcie
+ */
+ struct rtl_hal_cfg *cfg;
+ struct rtl_intf_ops *intf_ops;
+
+ /*this var will be set by set_bit,
+ and was used to indicate status of
+ interface or hardware */
+ unsigned long status;
+
+ /*This must be the last item so
+ that it points to the data allocated
+ beyond this structure like:
+ rtl_pci_priv or rtl_usb_priv */
+ u8 priv[0];
+};
+
+#define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv))
+#define rtl_mac(rtlpriv) (&((rtlpriv)->mac80211))
+#define rtl_hal(rtlpriv) (&((rtlpriv)->rtlhal))
+#define rtl_efuse(rtlpriv) (&((rtlpriv)->efuse))
+#define rtl_psc(rtlpriv) (&((rtlpriv)->psc))
+
+
+/***************************************
+ Bluetooth Co-existance Related
+****************************************/
+
+enum bt_ant_num {
+ ANT_X2 = 0,
+ ANT_X1 = 1,
+};
+
+enum bt_co_type {
+ BT_2WIRE = 0,
+ BT_ISSC_3WIRE = 1,
+ BT_ACCEL = 2,
+ BT_CSR_BC4 = 3,
+ BT_CSR_BC8 = 4,
+ BT_RTL8756 = 5,
+};
+
+enum bt_cur_state {
+ BT_OFF = 0,
+ BT_ON = 1,
+};
+
+enum bt_service_type {
+ BT_SCO = 0,
+ BT_A2DP = 1,
+ BT_HID = 2,
+ BT_HID_IDLE = 3,
+ BT_SCAN = 4,
+ BT_IDLE = 5,
+ BT_OTHER_ACTION = 6,
+ BT_BUSY = 7,
+ BT_OTHERBUSY = 8,
+ BT_PAN = 9,
+};
+
+enum bt_radio_shared {
+ BT_RADIO_SHARED = 0,
+ BT_RADIO_INDIVIDUAL = 1,
+};
+
+struct bt_coexist_info {
+
+ /* EEPROM BT info. */
+ u8 eeprom_bt_coexist;
+ u8 eeprom_bt_type;
+ u8 eeprom_bt_ant_num;
+ u8 eeprom_bt_ant_isolation;
+ u8 eeprom_bt_radio_shared;
+
+ u8 bt_coexistence;
+ u8 bt_ant_num;
+ u8 bt_coexist_type;
+ u8 bt_state;
+ u8 bt_cur_state; /* 0:on, 1:off */
+ u8 bt_ant_isolation; /* 0:good, 1:bad */
+ u8 bt_pape_ctrl; /* 0:SW, 1:SW/HW dynamic */
+ u8 bt_service;
+ u8 bt_radio_shared_type;
+ u8 bt_rfreg_origin_1e;
+ u8 bt_rfreg_origin_1f;
+ u8 bt_rssi_state;
+ u32 ratio_tx;
+ u32 ratio_pri;
+ u32 bt_edca_ul;
+ u32 bt_edca_dl;
+
+ bool b_init_set;
+ bool b_bt_busy_traffic;
+ bool b_bt_traffic_mode_set;
+ bool b_bt_non_traffic_mode_set;
+
+ bool b_fw_coexist_all_off;
+ bool b_sw_coexist_all_off;
+ u32 current_state;
+ u32 previous_state;
+ u8 bt_pre_rssi_state;
+
+ u8 b_reg_bt_iso;
+ u8 b_reg_bt_sco;
+
+};
+
+
+/****************************************
+ mem access macro define start
+ Call endian free function when
+ 1. Read/write packet content.
+ 2. Before write integer to IO.
+ 3. After read integer from IO.
+****************************************/
+/* Convert little data endian to host ordering */
+#define EF1BYTE(_val) \
+ ((u8)(_val))
+#define EF2BYTE(_val) \
+ (le16_to_cpu(_val))
+#define EF4BYTE(_val) \
+ (le32_to_cpu(_val))
+
+/* Read le16 data from memory and convert to host ordering */
+#define READEF2BYTE(_ptr) \
+ EF2BYTE(*((u16 *)(_ptr)))
+
+/* Write le16 data to memory in host ordering */
+#define WRITEEF2BYTE(_ptr, _val) \
+ (*((u16 *)(_ptr))) = EF2BYTE(_val)
+
+/* Create a bit mask
+ * Examples:
+ * BIT_LEN_MASK_32(0) => 0x00000000
+ * BIT_LEN_MASK_32(1) => 0x00000001
+ * BIT_LEN_MASK_32(2) => 0x00000003
+ * BIT_LEN_MASK_32(32) => 0xFFFFFFFF
+ */
+#define BIT_LEN_MASK_32(__bitlen) \
+ (0xFFFFFFFF >> (32 - (__bitlen)))
+#define BIT_LEN_MASK_16(__bitlen) \
+ (0xFFFF >> (16 - (__bitlen)))
+#define BIT_LEN_MASK_8(__bitlen) \
+ (0xFF >> (8 - (__bitlen)))
+
+/* Create an offset bit mask
+ * Examples:
+ * BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
+ * BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000
+ */
+#define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \
+ (BIT_LEN_MASK_32(__bitlen) << (__bitoffset))
+#define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \
+ (BIT_LEN_MASK_16(__bitlen) << (__bitoffset))
+#define BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen) \
+ (BIT_LEN_MASK_8(__bitlen) << (__bitoffset))
+
+/*Description:
+ * Return 4-byte value in host byte ordering from
+ * 4-byte pointer in little-endian system.
+ */
+#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
+ (EF4BYTE(*((u32 *)(__pstart))))
+#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
+ (EF2BYTE(*((u16 *)(__pstart))))
+#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
+ (EF1BYTE(*((u8 *)(__pstart))))
+
+/* Description:
+ * Mask subfield (continuous bits in little-endian) of 4-byte value
+ * and return the result in 4-byte value in host byte ordering.
+ */
+#define LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ LE_P4BYTE_TO_HOST_4BYTE(__pstart) & \
+ (~BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen)) \
+ )
+#define LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ LE_P2BYTE_TO_HOST_2BYTE(__pstart) & \
+ (~BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen)) \
+ )
+#define LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ LE_P1BYTE_TO_HOST_1BYTE(__pstart) & \
+ (~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen)) \
+ )
+
+/* Description:
+ * Set subfield of little-endian 4-byte value to specified value.
+ */
+#define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \
+ *((u8 *)(__pstart)) = EF1BYTE \
+ ( \
+ LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) | \
+ ((((u8)__val) & BIT_LEN_MASK_8(__bitlen)) << (__bitoffset)) \
+ );
+
+/****************************************
+ mem access macro define end
+****************************************/
+
+#define byte(x, n) ((x >> (8 * n)) & 0xff)
+
+#define RTL_WATCH_DOG_TIME 2000
+#define MSECS(t) msecs_to_jiffies(t)
+#define WLAN_FC_GET_VERS(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_VERS)
+#define WLAN_FC_GET_TYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE)
+#define WLAN_FC_GET_STYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE)
+#define WLAN_FC_MORE_DATA(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_MOREDATA)
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+
+#define RT_RF_OFF_LEVL_ASPM BIT(0) /*PCI ASPM */
+#define RT_RF_OFF_LEVL_CLK_REQ BIT(1) /*PCI clock request */
+#define RT_RF_OFF_LEVL_PCI_D3 BIT(2) /*PCI D3 mode */
+/*NIC halt, re-initialize hw parameters*/
+#define RT_RF_OFF_LEVL_HALT_NIC BIT(3)
+#define RT_RF_OFF_LEVL_FREE_FW BIT(4) /*FW free, re-download the FW */
+#define RT_RF_OFF_LEVL_FW_32K BIT(5) /*FW in 32k */
+/*Always enable ASPM and Clock Req in initialization.*/
+#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT(6)
+/* no matter RFOFF or SLEEP we set PS_ASPM_LEVL*/
+#define RT_PS_LEVEL_ASPM BIT(7)
+/*When LPS is on, disable 2R if no packet is received or transmittd.*/
+#define RT_RF_LPS_DISALBE_2R BIT(30)
+#define RT_RF_LPS_LEVEL_ASPM BIT(31) /*LPS with ASPM */
+#define RT_IN_PS_LEVEL(ppsc, _ps_flg) \
+ ((ppsc->cur_ps_level & _ps_flg) ? true : false)
+#define RT_CLEAR_PS_LEVEL(ppsc, _ps_flg) \
+ (ppsc->cur_ps_level &= (~(_ps_flg)))
+#define RT_SET_PS_LEVEL(ppsc, _ps_flg) \
+ (ppsc->cur_ps_level |= _ps_flg)
+
+#define container_of_dwork_rtl(x, y, z) \
+ container_of(container_of(x, struct delayed_work, work), y, z)
+
+static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr)
+{
+ return rtlpriv->io.read8_sync(rtlpriv, addr);
+}
+
+static inline u16 rtl_read_word(struct rtl_priv *rtlpriv, u32 addr)
+{
+ return rtlpriv->io.read16_sync(rtlpriv, addr);
+}
+
+static inline u32 rtl_read_dword(struct rtl_priv *rtlpriv, u32 addr)
+{
+ return rtlpriv->io.read32_sync(rtlpriv, addr);
+}
+
+static inline void rtl_write_byte(struct rtl_priv *rtlpriv, u32 addr, u8 val8)
+{
+ rtlpriv->io.write8_async(rtlpriv, addr, val8);
+}
+
+static inline void rtl_write_word(struct rtl_priv *rtlpriv, u32 addr, u16 val16)
+{
+ rtlpriv->io.write16_async(rtlpriv, addr, val16);
+}
+
+static inline void rtl_write_dword(struct rtl_priv *rtlpriv,
+ u32 addr, u32 val32)
+{
+ rtlpriv->io.write32_async(rtlpriv, addr, val32);
+}
+
+static inline u32 rtl_get_bbreg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask)
+{
+ return ((struct rtl_priv *)(hw)->priv)->cfg->ops->get_bbreg(hw,
+ regaddr,
+ bitmask);
+}
+
+static inline void rtl_set_bbreg(struct ieee80211_hw *hw, u32 regaddr,
+ u32 bitmask, u32 data)
+{
+ ((struct rtl_priv *)(hw)->priv)->cfg->ops->set_bbreg(hw,
+ regaddr, bitmask,
+ data);
+
+}
+
+static inline u32 rtl_get_rfreg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask)
+{
+ return ((struct rtl_priv *)(hw)->priv)->cfg->ops->get_rfreg(hw,
+ rfpath,
+ regaddr,
+ bitmask);
+}
+
+static inline void rtl_set_rfreg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask, u32 data)
+{
+ ((struct rtl_priv *)(hw)->priv)->cfg->ops->set_rfreg(hw,
+ rfpath, regaddr,
+ bitmask, data);
+}
+
+static inline bool is_hal_stop(struct rtl_hal *rtlhal)
+{
+ return (_HAL_STATE_STOP == rtlhal->state);
+}
+
+static inline void set_hal_start(struct rtl_hal *rtlhal)
+{
+ rtlhal->state = _HAL_STATE_START;
+}
+
+static inline void set_hal_stop(struct rtl_hal *rtlhal)
+{
+ rtlhal->state = _HAL_STATE_STOP;
+}
+
+static inline u8 get_rf_type(struct rtl_phy *rtlphy)
+{
+ return rtlphy->rf_type;
+}
+
+#endif
diff --git a/drivers/net/wireless/wl1251/acx.c b/drivers/net/wireless/wl1251/acx.c
index 64a0214cfb29..ef8370edace7 100644
--- a/drivers/net/wireless/wl1251/acx.c
+++ b/drivers/net/wireless/wl1251/acx.c
@@ -776,6 +776,31 @@ out:
return ret;
}
+int wl1251_acx_low_rssi(struct wl1251 *wl, s8 threshold, u8 weight,
+ u8 depth, enum wl1251_acx_low_rssi_type type)
+{
+ struct acx_low_rssi *rssi;
+ int ret;
+
+ wl1251_debug(DEBUG_ACX, "acx low rssi");
+
+ rssi = kzalloc(sizeof(*rssi), GFP_KERNEL);
+ if (!rssi)
+ return -ENOMEM;
+
+ rssi->threshold = threshold;
+ rssi->weight = weight;
+ rssi->depth = depth;
+ rssi->type = type;
+
+ ret = wl1251_cmd_configure(wl, ACX_LOW_RSSI, rssi, sizeof(*rssi));
+ if (ret < 0)
+ wl1251_warning("failed to set low rssi threshold: %d", ret);
+
+ kfree(rssi);
+ return ret;
+}
+
int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble)
{
struct acx_preamble *acx;
@@ -978,6 +1003,34 @@ out:
return ret;
}
+int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
+ u8 max_consecutive)
+{
+ struct wl1251_acx_bet_enable *acx;
+ int ret;
+
+ wl1251_debug(DEBUG_ACX, "acx bet enable");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->enable = mode;
+ acx->max_consecutive = max_consecutive;
+
+ ret = wl1251_cmd_configure(wl, ACX_BET_ENABLE, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1251_warning("wl1251 acx bet enable failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
u8 aifs, u16 txop)
{
diff --git a/drivers/net/wireless/wl1251/acx.h b/drivers/net/wireless/wl1251/acx.h
index e54b21a4f8b1..c2ba100f9b1a 100644
--- a/drivers/net/wireless/wl1251/acx.h
+++ b/drivers/net/wireless/wl1251/acx.h
@@ -399,6 +399,49 @@ struct acx_rts_threshold {
u8 pad[2];
} __packed;
+enum wl1251_acx_low_rssi_type {
+ /*
+ * The event is a "Level" indication which keeps triggering
+ * as long as the average RSSI is below the threshold.
+ */
+ WL1251_ACX_LOW_RSSI_TYPE_LEVEL = 0,
+
+ /*
+ * The event is an "Edge" indication which triggers
+ * only when the RSSI threshold is crossed from above.
+ */
+ WL1251_ACX_LOW_RSSI_TYPE_EDGE = 1,
+};
+
+struct acx_low_rssi {
+ struct acx_header header;
+
+ /*
+ * The threshold (in dBm) below (or above after low rssi
+ * indication) which the firmware generates an interrupt to the
+ * host. This parameter is signed.
+ */
+ s8 threshold;
+
+ /*
+ * The weight of the current RSSI sample, before adding the new
+ * sample, that is used to calculate the average RSSI.
+ */
+ u8 weight;
+
+ /*
+ * The number of Beacons/Probe response frames that will be
+ * received before issuing the Low or Regained RSSI event.
+ */
+ u8 depth;
+
+ /*
+ * Configures how the Low RSSI Event is triggered. Refer to
+ * enum wl1251_acx_low_rssi_type for more.
+ */
+ u8 type;
+} __packed;
+
struct acx_beacon_filter_option {
struct acx_header header;
@@ -1164,6 +1207,31 @@ struct wl1251_acx_wr_tbtt_and_dtim {
u8 padding;
} __packed;
+enum wl1251_acx_bet_mode {
+ WL1251_ACX_BET_DISABLE = 0,
+ WL1251_ACX_BET_ENABLE = 1,
+};
+
+struct wl1251_acx_bet_enable {
+ struct acx_header header;
+
+ /*
+ * Specifies if beacon early termination procedure is enabled or
+ * disabled, see enum wl1251_acx_bet_mode.
+ */
+ u8 enable;
+
+ /*
+ * Specifies the maximum number of consecutive beacons that may be
+ * early terminated. After this number is reached at least one full
+ * beacon must be correctly received in FW before beacon ET
+ * resumes. Range 0 - 255.
+ */
+ u8 max_consecutive;
+
+ u8 padding[2];
+} __packed;
+
struct wl1251_acx_ac_cfg {
struct acx_header header;
@@ -1272,10 +1340,10 @@ struct wl1251_acx_tid_cfg {
/* OBSOLETE */
#define WL1251_ACX_INTR_WAKE_ON_HOST BIT(6)
-/* Trace meassge on MBOX #A */
+/* Trace message on MBOX #A */
#define WL1251_ACX_INTR_TRACE_A BIT(7)
-/* Trace meassge on MBOX #B */
+/* Trace message on MBOX #B */
#define WL1251_ACX_INTR_TRACE_B BIT(8)
/* Command processing completion */
@@ -1393,6 +1461,8 @@ int wl1251_acx_cca_threshold(struct wl1251 *wl);
int wl1251_acx_bcn_dtim_options(struct wl1251 *wl);
int wl1251_acx_aid(struct wl1251 *wl, u16 aid);
int wl1251_acx_event_mbox_mask(struct wl1251 *wl, u32 event_mask);
+int wl1251_acx_low_rssi(struct wl1251 *wl, s8 threshold, u8 weight,
+ u8 depth, enum wl1251_acx_low_rssi_type type);
int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble);
int wl1251_acx_cts_protect(struct wl1251 *wl,
enum acx_ctsprotect_type ctsprotect);
@@ -1401,6 +1471,8 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime);
int wl1251_acx_rate_policies(struct wl1251 *wl);
int wl1251_acx_mem_cfg(struct wl1251 *wl);
int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim);
+int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
+ u8 max_consecutive);
int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
u8 aifs, u16 txop);
int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
diff --git a/drivers/net/wireless/wl1251/boot.c b/drivers/net/wireless/wl1251/boot.c
index 61572dfa1f60..d729daf8e841 100644
--- a/drivers/net/wireless/wl1251/boot.c
+++ b/drivers/net/wireless/wl1251/boot.c
@@ -19,7 +19,6 @@
*
*/
-#include <linux/gpio.h>
#include <linux/slab.h>
#include "reg.h"
diff --git a/drivers/net/wireless/wl1251/event.c b/drivers/net/wireless/wl1251/event.c
index 712372e50a87..dfc4579acb06 100644
--- a/drivers/net/wireless/wl1251/event.c
+++ b/drivers/net/wireless/wl1251/event.c
@@ -90,6 +90,24 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
}
}
+ if (wl->vif && wl->rssi_thold) {
+ if (vector & ROAMING_TRIGGER_LOW_RSSI_EVENT_ID) {
+ wl1251_debug(DEBUG_EVENT,
+ "ROAMING_TRIGGER_LOW_RSSI_EVENT");
+ ieee80211_cqm_rssi_notify(wl->vif,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
+ GFP_KERNEL);
+ }
+
+ if (vector & ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID) {
+ wl1251_debug(DEBUG_EVENT,
+ "ROAMING_TRIGGER_REGAINED_RSSI_EVENT");
+ ieee80211_cqm_rssi_notify(wl->vif,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
+ GFP_KERNEL);
+ }
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c
index 7a8762553cdc..12c9e635a6d6 100644
--- a/drivers/net/wireless/wl1251/main.c
+++ b/drivers/net/wireless/wl1251/main.c
@@ -52,14 +52,14 @@ void wl1251_disable_interrupts(struct wl1251 *wl)
wl->if_ops->disable_irq(wl);
}
-static void wl1251_power_off(struct wl1251 *wl)
+static int wl1251_power_off(struct wl1251 *wl)
{
- wl->set_power(false);
+ return wl->if_ops->power(wl, false);
}
-static void wl1251_power_on(struct wl1251 *wl)
+static int wl1251_power_on(struct wl1251 *wl)
{
- wl->set_power(true);
+ return wl->if_ops->power(wl, true);
}
static int wl1251_fetch_firmware(struct wl1251 *wl)
@@ -152,9 +152,12 @@ static void wl1251_fw_wakeup(struct wl1251 *wl)
static int wl1251_chip_wakeup(struct wl1251 *wl)
{
- int ret = 0;
+ int ret;
+
+ ret = wl1251_power_on(wl);
+ if (ret < 0)
+ return ret;
- wl1251_power_on(wl);
msleep(WL1251_POWER_ON_SLEEP);
wl->if_ops->reset(wl);
@@ -372,7 +375,7 @@ out:
mutex_unlock(&wl->mutex);
}
-static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct wl1251 *wl = hw->priv;
unsigned long flags;
@@ -398,8 +401,6 @@ static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
wl->tx_queue_stopped = true;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
-
- return NETDEV_TX_OK;
}
static int wl1251_op_start(struct ieee80211_hw *hw)
@@ -499,6 +500,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
wl->psm = 0;
wl->tx_queue_stopped = false;
wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
+ wl->rssi_thold = 0;
wl->channel = WL1251_DEFAULT_CHANNEL;
wl1251_debugfs_reset(wl);
@@ -956,6 +958,16 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
+ if (changed & BSS_CHANGED_CQM) {
+ ret = wl1251_acx_low_rssi(wl, bss_conf->cqm_rssi_thold,
+ WL1251_DEFAULT_LOW_RSSI_WEIGHT,
+ WL1251_DEFAULT_LOW_RSSI_DEPTH,
+ WL1251_ACX_LOW_RSSI_TYPE_EDGE);
+ if (ret < 0)
+ goto out;
+ wl->rssi_thold = bss_conf->cqm_rssi_thold;
+ }
+
if (changed & BSS_CHANGED_BSSID) {
memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
@@ -1036,6 +1048,9 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BEACON) {
beacon = ieee80211_beacon_get(hw, vif);
+ if (!beacon)
+ goto out_sleep;
+
ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data,
beacon->len);
@@ -1307,9 +1322,11 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_BEACON_FILTER |
- IEEE80211_HW_SUPPORTS_UAPSD;
+ IEEE80211_HW_SUPPORTS_UAPSD |
+ IEEE80211_HW_SUPPORTS_CQM_RSSI;
- wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
wl->hw->wiphy->max_scan_ssids = 1;
wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz;
@@ -1371,6 +1388,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
wl->psm_requested = false;
wl->tx_queue_stopped = false;
wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
+ wl->rssi_thold = 0;
wl->beacon_int = WL1251_DEFAULT_BEACON_INT;
wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD;
wl->vif = NULL;
diff --git a/drivers/net/wireless/wl1251/ps.c b/drivers/net/wireless/wl1251/ps.c
index 5ed47c8373d2..9cc514703d2a 100644
--- a/drivers/net/wireless/wl1251/ps.c
+++ b/drivers/net/wireless/wl1251/ps.c
@@ -58,7 +58,6 @@ void wl1251_ps_elp_sleep(struct wl1251 *wl)
unsigned long delay;
if (wl->psm) {
- cancel_delayed_work(&wl->elp_work);
delay = msecs_to_jiffies(ELP_ENTRY_DELAY);
ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, delay);
}
@@ -69,6 +68,9 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
unsigned long timeout, start;
u32 elp_reg;
+ if (delayed_work_pending(&wl->elp_work))
+ cancel_delayed_work(&wl->elp_work);
+
if (!wl->elp)
return 0;
@@ -102,38 +104,6 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
return 0;
}
-static int wl1251_ps_set_elp(struct wl1251 *wl, bool enable)
-{
- int ret;
-
- if (enable) {
- wl1251_debug(DEBUG_PSM, "sleep auth psm/elp");
-
- ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_ELP);
- if (ret < 0)
- return ret;
-
- wl1251_ps_elp_sleep(wl);
- } else {
- wl1251_debug(DEBUG_PSM, "sleep auth cam");
-
- /*
- * When the target is in ELP, we can only
- * access the ELP control register. Thus,
- * we have to wake the target up before
- * changing the power authorization.
- */
-
- wl1251_ps_elp_wakeup(wl);
-
- ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_CAM);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
{
int ret;
@@ -153,11 +123,16 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
if (ret < 0)
return ret;
+ ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_ENABLE,
+ WL1251_DEFAULT_BET_CONSECUTIVE);
+ if (ret < 0)
+ return ret;
+
ret = wl1251_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
if (ret < 0)
return ret;
- ret = wl1251_ps_set_elp(wl, true);
+ ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_ELP);
if (ret < 0)
return ret;
@@ -166,7 +141,14 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
case STATION_ACTIVE_MODE:
default:
wl1251_debug(DEBUG_PSM, "leaving psm");
- ret = wl1251_ps_set_elp(wl, false);
+
+ ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_CAM);
+ if (ret < 0)
+ return ret;
+
+ /* disable BET */
+ ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_DISABLE,
+ WL1251_DEFAULT_BET_CONSECUTIVE);
if (ret < 0)
return ret;
diff --git a/drivers/net/wireless/wl1251/rx.c b/drivers/net/wireless/wl1251/rx.c
index efa53607d5c9..c1b3b3f03da2 100644
--- a/drivers/net/wireless/wl1251/rx.c
+++ b/drivers/net/wireless/wl1251/rx.c
@@ -78,9 +78,10 @@ static void wl1251_rx_status(struct wl1251 *wl,
*/
wl->noise = desc->rssi - desc->snr / 2;
- status->freq = ieee80211_channel_to_frequency(desc->channel);
+ status->freq = ieee80211_channel_to_frequency(desc->channel,
+ status->band);
- status->flag |= RX_FLAG_TSFT;
+ status->flag |= RX_FLAG_MACTIME_MPDU;
if (desc->flags & RX_DESC_ENCRYPTION_MASK) {
status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
@@ -95,8 +96,52 @@ static void wl1251_rx_status(struct wl1251 *wl,
if (unlikely(!(desc->flags & RX_DESC_VALID_FCS)))
status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ switch (desc->rate) {
+ /* skip 1 and 12 Mbps because they have same value 0x0a */
+ case RATE_2MBPS:
+ status->rate_idx = 1;
+ break;
+ case RATE_5_5MBPS:
+ status->rate_idx = 2;
+ break;
+ case RATE_11MBPS:
+ status->rate_idx = 3;
+ break;
+ case RATE_6MBPS:
+ status->rate_idx = 4;
+ break;
+ case RATE_9MBPS:
+ status->rate_idx = 5;
+ break;
+ case RATE_18MBPS:
+ status->rate_idx = 7;
+ break;
+ case RATE_24MBPS:
+ status->rate_idx = 8;
+ break;
+ case RATE_36MBPS:
+ status->rate_idx = 9;
+ break;
+ case RATE_48MBPS:
+ status->rate_idx = 10;
+ break;
+ case RATE_54MBPS:
+ status->rate_idx = 11;
+ break;
+ }
+
+ /* for 1 and 12 Mbps we have to check the modulation */
+ if (desc->rate == RATE_1MBPS) {
+ if (!(desc->mod_pre & OFDM_RATE_BIT))
+ /* CCK -> RATE_1MBPS */
+ status->rate_idx = 0;
+ else
+ /* OFDM -> RATE_12MBPS */
+ status->rate_idx = 6;
+ }
- /* FIXME: set status->rate_idx */
+ if (desc->mod_pre & SHORT_PREAMBLE_BIT)
+ status->flag |= RX_FLAG_SHORTPRE;
}
static void wl1251_rx_body(struct wl1251 *wl,
diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/wl1251/sdio.c
index 74ba9ced5393..d550b5e68d3c 100644
--- a/drivers/net/wireless/wl1251/sdio.c
+++ b/drivers/net/wireless/wl1251/sdio.c
@@ -26,6 +26,7 @@
#include <linux/platform_device.h>
#include <linux/wl12xx.h>
#include <linux/irq.h>
+#include <linux/pm_runtime.h>
#include "wl1251.h"
@@ -42,8 +43,6 @@ struct wl1251_sdio {
u32 elp_val;
};
-static struct wl12xx_platform_data *wl12xx_board_data;
-
static struct sdio_func *wl_to_func(struct wl1251 *wl)
{
struct wl1251_sdio *wl_sdio = wl->if_priv;
@@ -171,8 +170,42 @@ static void wl1251_disable_line_irq(struct wl1251 *wl)
return disable_irq(wl->irq);
}
-static void wl1251_sdio_set_power(bool enable)
+static int wl1251_sdio_set_power(struct wl1251 *wl, bool enable)
{
+ struct sdio_func *func = wl_to_func(wl);
+ int ret;
+
+ if (enable) {
+ /*
+ * Power is controlled by runtime PM, but we still call board
+ * callback in case it wants to do any additional setup,
+ * for example enabling clock buffer for the module.
+ */
+ if (wl->set_power)
+ wl->set_power(true);
+
+ ret = pm_runtime_get_sync(&func->dev);
+ if (ret < 0)
+ goto out;
+
+ sdio_claim_host(func);
+ sdio_enable_func(func);
+ sdio_release_host(func);
+ } else {
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+
+ ret = pm_runtime_put_sync(&func->dev);
+ if (ret < 0)
+ goto out;
+
+ if (wl->set_power)
+ wl->set_power(false);
+ }
+
+out:
+ return ret;
}
static struct wl1251_if_operations wl1251_sdio_ops = {
@@ -181,30 +214,7 @@ static struct wl1251_if_operations wl1251_sdio_ops = {
.write_elp = wl1251_sdio_write_elp,
.read_elp = wl1251_sdio_read_elp,
.reset = wl1251_sdio_reset,
-};
-
-static int wl1251_platform_probe(struct platform_device *pdev)
-{
- if (pdev->id != -1) {
- wl1251_error("can only handle single device");
- return -ENODEV;
- }
-
- wl12xx_board_data = pdev->dev.platform_data;
- return 0;
-}
-
-/*
- * Dummy platform_driver for passing platform_data to this driver,
- * until we have a way to pass this through SDIO subsystem or
- * some other way.
- */
-static struct platform_driver wl1251_platform_driver = {
- .driver = {
- .name = "wl1251_data",
- .owner = THIS_MODULE,
- },
- .probe = wl1251_platform_probe,
+ .power = wl1251_sdio_set_power,
};
static int wl1251_sdio_probe(struct sdio_func *func,
@@ -214,6 +224,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
struct wl1251 *wl;
struct ieee80211_hw *hw;
struct wl1251_sdio *wl_sdio;
+ const struct wl12xx_platform_data *wl12xx_board_data;
hw = wl1251_alloc_hw();
if (IS_ERR(hw))
@@ -239,9 +250,9 @@ static int wl1251_sdio_probe(struct sdio_func *func,
wl_sdio->func = func;
wl->if_priv = wl_sdio;
wl->if_ops = &wl1251_sdio_ops;
- wl->set_power = wl1251_sdio_set_power;
- if (wl12xx_board_data != NULL) {
+ wl12xx_board_data = wl12xx_get_platform_data();
+ if (!IS_ERR(wl12xx_board_data)) {
wl->set_power = wl12xx_board_data->set_power;
wl->irq = wl12xx_board_data->irq;
wl->use_eeprom = wl12xx_board_data->use_eeprom;
@@ -273,6 +284,10 @@ static int wl1251_sdio_probe(struct sdio_func *func,
goto out_free_irq;
sdio_set_drvdata(func, wl);
+
+ /* Tell PM core that we don't need the card to be powered now */
+ pm_runtime_put_noidle(&func->dev);
+
return ret;
out_free_irq:
@@ -294,6 +309,9 @@ static void __devexit wl1251_sdio_remove(struct sdio_func *func)
struct wl1251 *wl = sdio_get_drvdata(func);
struct wl1251_sdio *wl_sdio = wl->if_priv;
+ /* Undo decrement done above in wl1251_probe */
+ pm_runtime_get_noresume(&func->dev);
+
if (wl->irq)
free_irq(wl->irq, wl);
kfree(wl_sdio);
@@ -305,23 +323,37 @@ static void __devexit wl1251_sdio_remove(struct sdio_func *func)
sdio_release_host(func);
}
+static int wl1251_suspend(struct device *dev)
+{
+ /*
+ * Tell MMC/SDIO core it's OK to power down the card
+ * (if it isn't already), but not to remove it completely.
+ */
+ return 0;
+}
+
+static int wl1251_resume(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops wl1251_sdio_pm_ops = {
+ .suspend = wl1251_suspend,
+ .resume = wl1251_resume,
+};
+
static struct sdio_driver wl1251_sdio_driver = {
.name = "wl1251_sdio",
.id_table = wl1251_devices,
.probe = wl1251_sdio_probe,
.remove = __devexit_p(wl1251_sdio_remove),
+ .drv.pm = &wl1251_sdio_pm_ops,
};
static int __init wl1251_sdio_init(void)
{
int err;
- err = platform_driver_register(&wl1251_platform_driver);
- if (err) {
- wl1251_error("failed to register platform driver: %d", err);
- return err;
- }
-
err = sdio_register_driver(&wl1251_sdio_driver);
if (err)
wl1251_error("failed to register sdio driver: %d", err);
@@ -331,7 +363,6 @@ static int __init wl1251_sdio_init(void)
static void __exit wl1251_sdio_exit(void)
{
sdio_unregister_driver(&wl1251_sdio_driver);
- platform_driver_unregister(&wl1251_platform_driver);
wl1251_notice("unloaded");
}
diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/wl1251/spi.c
index 88fa8e69d0d1..ac872b38960f 100644
--- a/drivers/net/wireless/wl1251/spi.c
+++ b/drivers/net/wireless/wl1251/spi.c
@@ -215,12 +215,21 @@ static void wl1251_spi_disable_irq(struct wl1251 *wl)
return disable_irq(wl->irq);
}
+static int wl1251_spi_set_power(struct wl1251 *wl, bool enable)
+{
+ if (wl->set_power)
+ wl->set_power(enable);
+
+ return 0;
+}
+
static const struct wl1251_if_operations wl1251_spi_ops = {
.read = wl1251_spi_read,
.write = wl1251_spi_write,
.reset = wl1251_spi_reset_wake,
.enable_irq = wl1251_spi_enable_irq,
.disable_irq = wl1251_spi_disable_irq,
+ .power = wl1251_spi_set_power,
};
static int __devinit wl1251_spi_probe(struct spi_device *spi)
diff --git a/drivers/net/wireless/wl1251/tx.c b/drivers/net/wireless/wl1251/tx.c
index 554b4f9a3d3e..28121c590a2b 100644
--- a/drivers/net/wireless/wl1251/tx.c
+++ b/drivers/net/wireless/wl1251/tx.c
@@ -213,16 +213,30 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
wl1251_debug(DEBUG_TX, "skb offset %d", offset);
/* check whether the current skb can be used */
- if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
- unsigned char *src = skb->data;
+ if (skb_cloned(skb) || (skb_tailroom(skb) < offset)) {
+ struct sk_buff *newskb = skb_copy_expand(skb, 0, 3,
+ GFP_KERNEL);
+
+ if (unlikely(newskb == NULL)) {
+ wl1251_error("Can't allocate skb!");
+ return -EINVAL;
+ }
- /* align the buffer on a 4-byte boundary */
+ tx_hdr = (struct tx_double_buffer_desc *) newskb->data;
+
+ dev_kfree_skb_any(skb);
+ wl->tx_frames[tx_hdr->id] = skb = newskb;
+
+ offset = (4 - (long)skb->data) & 0x03;
+ wl1251_debug(DEBUG_TX, "new skb offset %d", offset);
+ }
+
+ /* align the buffer on a 4-byte boundary */
+ if (offset) {
+ unsigned char *src = skb->data;
skb_reserve(skb, offset);
memmove(skb->data, src, skb->len);
tx_hdr = (struct tx_double_buffer_desc *) skb->data;
- } else {
- wl1251_info("No handler, fixme!");
- return -EINVAL;
}
}
@@ -368,7 +382,7 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl,
{
struct ieee80211_tx_info *info;
struct sk_buff *skb;
- int hdrlen, ret;
+ int hdrlen;
u8 *frame;
skb = wl->tx_frames[result->id];
@@ -407,40 +421,12 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl,
ieee80211_tx_status(wl->hw, skb);
wl->tx_frames[result->id] = NULL;
-
- if (wl->tx_queue_stopped) {
- wl1251_debug(DEBUG_TX, "cb: queue was stopped");
-
- skb = skb_dequeue(&wl->tx_queue);
-
- /* The skb can be NULL because tx_work might have been
- scheduled before the queue was stopped making the
- queue empty */
-
- if (skb) {
- ret = wl1251_tx_frame(wl, skb);
- if (ret == -EBUSY) {
- /* firmware buffer is still full */
- wl1251_debug(DEBUG_TX, "cb: fw buffer "
- "still full");
- skb_queue_head(&wl->tx_queue, skb);
- return;
- } else if (ret < 0) {
- dev_kfree_skb(skb);
- return;
- }
- }
-
- wl1251_debug(DEBUG_TX, "cb: waking queues");
- ieee80211_wake_queues(wl->hw);
- wl->tx_queue_stopped = false;
- }
}
/* Called upon reception of a TX complete interrupt */
void wl1251_tx_complete(struct wl1251 *wl)
{
- int i, result_index, num_complete = 0;
+ int i, result_index, num_complete = 0, queue_len;
struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr;
unsigned long flags;
@@ -471,18 +457,22 @@ void wl1251_tx_complete(struct wl1251 *wl)
}
}
- if (wl->tx_queue_stopped
- &&
- skb_queue_len(&wl->tx_queue) <= WL1251_TX_QUEUE_LOW_WATERMARK){
+ queue_len = skb_queue_len(&wl->tx_queue);
- /* firmware buffer has space, restart queues */
+ if ((num_complete > 0) && (queue_len > 0)) {
+ /* firmware buffer has space, reschedule tx_work */
+ wl1251_debug(DEBUG_TX, "tx_complete: reschedule tx_work");
+ ieee80211_queue_work(wl->hw, &wl->tx_work);
+ }
+
+ if (wl->tx_queue_stopped &&
+ queue_len <= WL1251_TX_QUEUE_LOW_WATERMARK) {
+ /* tx_queue has space, restart queues */
wl1251_debug(DEBUG_TX, "tx_complete: waking queues");
spin_lock_irqsave(&wl->wl_lock, flags);
ieee80211_wake_queues(wl->hw);
wl->tx_queue_stopped = false;
spin_unlock_irqrestore(&wl->wl_lock, flags);
- ieee80211_queue_work(wl->hw, &wl->tx_work);
-
}
/* Every completed frame needs to be acknowledged */
diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
index e113d4c1fb35..bb23cd522b22 100644
--- a/drivers/net/wireless/wl1251/wl1251.h
+++ b/drivers/net/wireless/wl1251/wl1251.h
@@ -256,6 +256,7 @@ struct wl1251_if_operations {
void (*write)(struct wl1251 *wl, int addr, void *buf, size_t len);
void (*read_elp)(struct wl1251 *wl, int addr, u32 *val);
void (*write_elp)(struct wl1251 *wl, int addr, u32 val);
+ int (*power)(struct wl1251 *wl, bool enable);
void (*reset)(struct wl1251 *wl);
void (*enable_irq)(struct wl1251 *wl);
void (*disable_irq)(struct wl1251 *wl);
@@ -369,6 +370,8 @@ struct wl1251 {
/* in dBm */
int power_level;
+ int rssi_thold;
+
struct wl1251_stats stats;
struct wl1251_debugfs debugfs;
@@ -409,6 +412,8 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
#define WL1251_DEFAULT_CHANNEL 0
+#define WL1251_DEFAULT_BET_CONSECUTIVE 10
+
#define CHIP_ID_1251_PG10 (0x7010101)
#define CHIP_ID_1251_PG11 (0x7020101)
#define CHIP_ID_1251_PG12 (0x7030101)
@@ -418,7 +423,7 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
#define WL1251_FW_NAME "wl1251-fw.bin"
#define WL1251_NVS_NAME "wl1251-nvs.bin"
-#define WL1251_POWER_ON_SLEEP 10 /* in miliseconds */
+#define WL1251_POWER_ON_SLEEP 10 /* in milliseconds */
#define WL1251_PART_DOWN_MEM_START 0x0
#define WL1251_PART_DOWN_MEM_SIZE 0x16800
@@ -430,4 +435,7 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
#define WL1251_PART_WORK_REG_START REGISTERS_BASE
#define WL1251_PART_WORK_REG_SIZE REGISTERS_WORK_SIZE
+#define WL1251_DEFAULT_LOW_RSSI_WEIGHT 10
+#define WL1251_DEFAULT_LOW_RSSI_DEPTH 10
+
#endif
diff --git a/drivers/net/wireless/wl1251/wl12xx_80211.h b/drivers/net/wireless/wl1251/wl12xx_80211.h
index 184628027213..1417b1445c3d 100644
--- a/drivers/net/wireless/wl1251/wl12xx_80211.h
+++ b/drivers/net/wireless/wl1251/wl12xx_80211.h
@@ -54,7 +54,6 @@
/* This really should be 8, but not for our firmware */
#define MAX_SUPPORTED_RATES 32
-#define COUNTRY_STRING_LEN 3
#define MAX_COUNTRY_TRIPLETS 32
/* Headers */
@@ -98,7 +97,7 @@ struct country_triplet {
struct wl12xx_ie_country {
struct wl12xx_ie_header header;
- u8 country_string[COUNTRY_STRING_LEN];
+ u8 country_string[IEEE80211_COUNTRY_STRING_LEN];
struct country_triplet triplets[MAX_COUNTRY_TRIPLETS];
} __packed;
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index b447559f1db5..692ebff38fc8 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -1,46 +1,68 @@
-menuconfig WL12XX
+menuconfig WL12XX_MENU
tristate "TI wl12xx driver support"
depends on MAC80211 && EXPERIMENTAL
---help---
- This will enable TI wl12xx driver support. The drivers make
- use of the mac80211 stack.
+ This will enable TI wl12xx driver support for the following chips:
+ wl1271 and wl1273.
+ The drivers make use of the mac80211 stack.
-config WL1271
- tristate "TI wl1271 support"
- depends on WL12XX && GENERIC_HARDIRQS
+config WL12XX
+ tristate "TI wl12xx support"
+ depends on WL12XX_MENU && GENERIC_HARDIRQS
depends on INET
select FW_LOADER
select CRC7
---help---
- This module adds support for wireless adapters based on the
- TI wl1271 chipset.
+ This module adds support for wireless adapters based on TI wl1271 and
+ TI wl1273 chipsets. This module does *not* include support for wl1251.
+ For wl1251 support, use the separate homonymous driver instead.
- If you choose to build a module, it'll be called wl1271. Say N if
+ If you choose to build a module, it will be called wl12xx. Say N if
unsure.
-config WL1271_SPI
- tristate "TI wl1271 SPI support"
- depends on WL1271 && SPI_MASTER
+config WL12XX_HT
+ bool "TI wl12xx 802.11 HT support (EXPERIMENTAL)"
+ depends on WL12XX && EXPERIMENTAL
+ default n
+ ---help---
+ This will enable 802.11 HT support in the wl12xx module.
+
+ That configuration is temporary due to the code incomplete and
+ still in testing process.
+
+config WL12XX_SPI
+ tristate "TI wl12xx SPI support"
+ depends on WL12XX && SPI_MASTER
---help---
This module adds support for the SPI interface of adapters using
- TI wl1271 chipset. Select this if your platform is using
+ TI wl12xx chipsets. Select this if your platform is using
the SPI bus.
- If you choose to build a module, it'll be called wl1251_spi.
+ If you choose to build a module, it'll be called wl12xx_spi.
Say N if unsure.
-config WL1271_SDIO
- tristate "TI wl1271 SDIO support"
- depends on WL1271 && MMC
+config WL12XX_SDIO
+ tristate "TI wl12xx SDIO support"
+ depends on WL12XX && MMC
---help---
This module adds support for the SDIO interface of adapters using
- TI wl1271 chipset. Select this if your platform is using
+ TI wl12xx chipsets. Select this if your platform is using
the SDIO bus.
- If you choose to build a module, it'll be called
- wl1271_sdio. Say N if unsure.
+ If you choose to build a module, it'll be called wl12xx_sdio.
+ Say N if unsure.
+
+config WL12XX_SDIO_TEST
+ tristate "TI wl12xx SDIO testing support"
+ depends on WL12XX && MMC && WL12XX_SDIO
+ default n
+ ---help---
+ This module adds support for the SDIO bus testing with the
+ TI wl12xx chipsets. You probably don't want this unless you are
+ testing a new hardware platform. Select this if you want to test the
+ SDIO bus which is connected to the wl12xx chip.
config WL12XX_PLATFORM_DATA
bool
- depends on WL1271_SDIO != n
+ depends on WL12XX_SDIO != n || WL1251_SDIO != n
default y
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
index 3a807444b2af..521c0414e52e 100644
--- a/drivers/net/wireless/wl12xx/Makefile
+++ b/drivers/net/wireless/wl12xx/Makefile
@@ -1,12 +1,16 @@
-wl1271-objs = wl1271_main.o wl1271_cmd.o wl1271_io.o \
- wl1271_event.o wl1271_tx.o wl1271_rx.o \
- wl1271_ps.o wl1271_acx.o wl1271_boot.o \
- wl1271_init.o wl1271_debugfs.o wl1271_scan.o
-
-wl1271-$(CONFIG_NL80211_TESTMODE) += wl1271_testmode.o
-obj-$(CONFIG_WL1271) += wl1271.o
-obj-$(CONFIG_WL1271_SPI) += wl1271_spi.o
-obj-$(CONFIG_WL1271_SDIO) += wl1271_sdio.o
+wl12xx-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
+ boot.o init.o debugfs.o scan.o
+
+wl12xx_spi-objs = spi.o
+wl12xx_sdio-objs = sdio.o
+wl12xx_sdio_test-objs = sdio_test.o
+
+wl12xx-$(CONFIG_NL80211_TESTMODE) += testmode.o
+obj-$(CONFIG_WL12XX) += wl12xx.o
+obj-$(CONFIG_WL12XX_SPI) += wl12xx_spi.o
+obj-$(CONFIG_WL12XX_SDIO) += wl12xx_sdio.o
+
+obj-$(CONFIG_WL12XX_SDIO_TEST) += wl12xx_sdio_test.o
# small builtin driver bit
obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/acx.c
index 618993405262..a3db755ceeda 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -21,7 +21,7 @@
*
*/
-#include "wl1271_acx.h"
+#include "acx.h"
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -29,10 +29,10 @@
#include <linux/spi/spi.h>
#include <linux/slab.h>
-#include "wl1271.h"
+#include "wl12xx.h"
#include "wl12xx_80211.h"
-#include "wl1271_reg.h"
-#include "wl1271_ps.h"
+#include "reg.h"
+#include "ps.h"
int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
{
@@ -751,10 +751,10 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
return 0;
}
-int wl1271_acx_rate_policies(struct wl1271 *wl)
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
{
- struct acx_rate_policy *acx;
- struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf;
+ struct acx_sta_rate_policy *acx;
+ struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf;
int idx = 0;
int ret = 0;
@@ -783,6 +783,10 @@ int wl1271_acx_rate_policies(struct wl1271 *wl)
acx->rate_class_cnt = cpu_to_le32(ACX_TX_RATE_POLICY_CNT);
+ wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x",
+ acx->rate_class[ACX_TX_BASIC_RATE].enabled_rates,
+ acx->rate_class[ACX_TX_AP_FULL_RATE].enabled_rates);
+
ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("Setting of rate policies failed: %d", ret);
@@ -794,6 +798,38 @@ out:
return ret;
}
+int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
+ u8 idx)
+{
+ struct acx_ap_rate_policy *acx;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx ap rate policy");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->rate_policy.enabled_rates = cpu_to_le32(c->enabled_rates);
+ acx->rate_policy.short_retry_limit = c->short_retry_limit;
+ acx->rate_policy.long_retry_limit = c->long_retry_limit;
+ acx->rate_policy.aflags = c->aflags;
+
+ acx->rate_policy_idx = cpu_to_le32(idx);
+
+ ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("Setting of ap rate policy failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
u8 aifsn, u16 txop)
{
@@ -862,7 +898,7 @@ out:
return ret;
}
-int wl1271_acx_frag_threshold(struct wl1271 *wl)
+int wl1271_acx_frag_threshold(struct wl1271 *wl, u16 frag_threshold)
{
struct acx_frag_threshold *acx;
int ret = 0;
@@ -876,7 +912,7 @@ int wl1271_acx_frag_threshold(struct wl1271 *wl)
goto out;
}
- acx->frag_threshold = cpu_to_le16(wl->conf.tx.frag_threshold);
+ acx->frag_threshold = cpu_to_le16(frag_threshold);
ret = wl1271_cmd_configure(wl, ACX_FRAG_CFG, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("Setting of frag threshold failed: %d", ret);
@@ -915,9 +951,9 @@ out:
return ret;
}
-int wl1271_acx_mem_cfg(struct wl1271 *wl)
+int wl1271_acx_ap_mem_cfg(struct wl1271 *wl)
{
- struct wl1271_acx_config_memory *mem_conf;
+ struct wl1271_acx_ap_config_memory *mem_conf;
int ret;
wl1271_debug(DEBUG_ACX, "wl1271 mem cfg");
@@ -929,10 +965,10 @@ int wl1271_acx_mem_cfg(struct wl1271 *wl)
}
/* memory config */
- mem_conf->num_stations = DEFAULT_NUM_STATIONS;
- mem_conf->rx_mem_block_num = ACX_RX_MEM_BLOCKS;
- mem_conf->tx_min_mem_block_num = ACX_TX_MIN_MEM_BLOCKS;
- mem_conf->num_ssid_profiles = ACX_NUM_SSID_PROFILES;
+ mem_conf->num_stations = wl->conf.mem.num_stations;
+ mem_conf->rx_mem_block_num = wl->conf.mem.rx_block_num;
+ mem_conf->tx_min_mem_block_num = wl->conf.mem.tx_min_block_num;
+ mem_conf->num_ssid_profiles = wl->conf.mem.ssid_profiles;
mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
@@ -947,13 +983,45 @@ out:
return ret;
}
-int wl1271_acx_init_mem_config(struct wl1271 *wl)
+int wl1271_acx_sta_mem_cfg(struct wl1271 *wl)
{
+ struct wl1271_acx_sta_config_memory *mem_conf;
int ret;
- ret = wl1271_acx_mem_cfg(wl);
- if (ret < 0)
- return ret;
+ wl1271_debug(DEBUG_ACX, "wl1271 mem cfg");
+
+ mem_conf = kzalloc(sizeof(*mem_conf), GFP_KERNEL);
+ if (!mem_conf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* memory config */
+ mem_conf->num_stations = wl->conf.mem.num_stations;
+ mem_conf->rx_mem_block_num = wl->conf.mem.rx_block_num;
+ mem_conf->tx_min_mem_block_num = wl->conf.mem.tx_min_block_num;
+ mem_conf->num_ssid_profiles = wl->conf.mem.ssid_profiles;
+ mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
+ mem_conf->dyn_mem_enable = wl->conf.mem.dynamic_memory;
+ mem_conf->tx_free_req = wl->conf.mem.min_req_tx_blocks;
+ mem_conf->rx_free_req = wl->conf.mem.min_req_rx_blocks;
+ mem_conf->tx_min = wl->conf.mem.tx_min;
+
+ ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
+ sizeof(*mem_conf));
+ if (ret < 0) {
+ wl1271_warning("wl1271 mem config failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(mem_conf);
+ return ret;
+}
+
+int wl1271_acx_init_mem_config(struct wl1271 *wl)
+{
+ int ret;
wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map),
GFP_KERNEL);
@@ -1041,7 +1109,7 @@ out:
return ret;
}
-int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, __be32 address)
+int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address)
{
struct wl1271_acx_arp_filter *acx;
int ret;
@@ -1057,7 +1125,7 @@ int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, __be32 address)
acx->version = ACX_IPV4_VERSION;
acx->enable = enable;
- if (enable == true)
+ if (enable)
memcpy(acx->address, &address, ACX_IPV4_ADDR_SIZE);
ret = wl1271_cmd_configure(wl, ACX_ARP_IP_FILTER,
@@ -1226,6 +1294,175 @@ out:
return ret;
}
+int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
+ struct ieee80211_sta_ht_cap *ht_cap,
+ bool allow_ht_operation)
+{
+ struct wl1271_acx_ht_capabilities *acx;
+ u8 mac_address[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ int ret = 0;
+ u32 ht_capabilites = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx ht capabilities setting");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Allow HT Operation ? */
+ if (allow_ht_operation) {
+ ht_capabilites =
+ WL1271_ACX_FW_CAP_HT_OPERATION;
+ if (ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD)
+ ht_capabilites |=
+ WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT;
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+ ht_capabilites |=
+ WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS;
+ if (ht_cap->cap & IEEE80211_HT_CAP_LSIG_TXOP_PROT)
+ ht_capabilites |=
+ WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION;
+
+ /* get data from A-MPDU parameters field */
+ acx->ampdu_max_length = ht_cap->ampdu_factor;
+ acx->ampdu_min_spacing = ht_cap->ampdu_density;
+ }
+
+ memcpy(acx->mac_address, mac_address, ETH_ALEN);
+ acx->ht_capabilites = cpu_to_le32(ht_capabilites);
+
+ ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx ht capabilities setting failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl1271_acx_set_ht_information(struct wl1271 *wl,
+ u16 ht_operation_mode)
+{
+ struct wl1271_acx_ht_information *acx;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx ht information setting");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->ht_protection =
+ (u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
+ acx->rifs_mode = 0;
+ acx->gf_protection =
+ !!(ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+ acx->ht_tx_burst_limit = 0;
+ acx->dual_cts_protection = 0;
+
+ ret = wl1271_cmd_configure(wl, ACX_HT_BSS_OPERATION, acx, sizeof(*acx));
+
+ if (ret < 0) {
+ wl1271_warning("acx ht information setting failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+/* Configure BA session initiator/receiver parameters setting in the FW. */
+int wl1271_acx_set_ba_session(struct wl1271 *wl,
+ enum ieee80211_back_parties direction,
+ u8 tid_index, u8 policy)
+{
+ struct wl1271_acx_ba_session_policy *acx;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx ba session setting");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* ANY role */
+ acx->role_id = 0xff;
+ acx->tid = tid_index;
+ acx->enable = policy;
+ acx->ba_direction = direction;
+
+ switch (direction) {
+ case WLAN_BACK_INITIATOR:
+ acx->win_size = wl->conf.ht.tx_ba_win_size;
+ acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
+ break;
+ case WLAN_BACK_RECIPIENT:
+ acx->win_size = RX_BA_WIN_SIZE;
+ acx->inactivity_timeout = 0;
+ break;
+ default:
+ wl1271_error("Incorrect acx command id=%x\n", direction);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = wl1271_cmd_configure(wl,
+ ACX_BA_SESSION_POLICY_CFG,
+ acx,
+ sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx ba session setting failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+/* setup BA session receiver setting in the FW. */
+int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
+ bool enable)
+{
+ struct wl1271_acx_ba_receiver_setup *acx;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx ba receiver session setting");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Single link for now */
+ acx->link_id = 1;
+ acx->tid = tid_index;
+ acx->enable = enable;
+ acx->win_size = 0;
+ acx->ssn = ssn;
+
+ ret = wl1271_cmd_configure(wl, ACX_BA_SESSION_RX_SETUP, acx,
+ sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx ba receiver session failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime)
{
struct wl1271_acx_fw_tsf_information *tsf_info;
@@ -1251,3 +1488,82 @@ out:
kfree(tsf_info);
return ret;
}
+
+int wl1271_acx_max_tx_retry(struct wl1271 *wl)
+{
+ struct wl1271_acx_max_tx_retry *acx = NULL;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx max tx retry");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx)
+ return -ENOMEM;
+
+ acx->max_tx_retry = cpu_to_le16(wl->conf.tx.ap_max_tx_retries);
+
+ ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx max tx retry failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl1271_acx_config_ps(struct wl1271 *wl)
+{
+ struct wl1271_acx_config_ps *config_ps;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx config ps");
+
+ config_ps = kzalloc(sizeof(*config_ps), GFP_KERNEL);
+ if (!config_ps) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ config_ps->exit_retries = wl->conf.conn.psm_exit_retries;
+ config_ps->enter_retries = wl->conf.conn.psm_entry_retries;
+ config_ps->null_data_rate = cpu_to_le32(wl->basic_rate);
+
+ ret = wl1271_cmd_configure(wl, ACX_CONFIG_PS, config_ps,
+ sizeof(*config_ps));
+
+ if (ret < 0) {
+ wl1271_warning("acx config ps failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(config_ps);
+ return ret;
+}
+
+int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr)
+{
+ struct wl1271_acx_inconnection_sta *acx = NULL;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx set inconnaction sta %pM", addr);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx)
+ return -ENOMEM;
+
+ memcpy(acx->addr, addr, ETH_ALEN);
+
+ ret = wl1271_cmd_configure(wl, ACX_UPDATE_INCONNECTION_STA_LIST,
+ acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx set inconnaction sta failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/acx.h
index ebb341d36e8c..dd19b01d807b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -22,11 +22,11 @@
*
*/
-#ifndef __WL1271_ACX_H__
-#define __WL1271_ACX_H__
+#ifndef __ACX_H__
+#define __ACX_H__
-#include "wl1271.h"
-#include "wl1271_cmd.h"
+#include "wl12xx.h"
+#include "cmd.h"
/*************************************************************************
@@ -47,9 +47,9 @@
#define WL1271_ACX_INTR_HW_AVAILABLE BIT(5)
/* The MISC bit is used for aggregation of RX, TxComplete and TX rate update */
#define WL1271_ACX_INTR_DATA BIT(6)
-/* Trace meassge on MBOX #A */
+/* Trace message on MBOX #A */
#define WL1271_ACX_INTR_TRACE_A BIT(7)
-/* Trace meassge on MBOX #B */
+/* Trace message on MBOX #B */
#define WL1271_ACX_INTR_TRACE_B BIT(8)
#define WL1271_ACX_INTR_ALL 0xFFFFFFFF
@@ -61,7 +61,8 @@
WL1271_ACX_INTR_HW_AVAILABLE | \
WL1271_ACX_INTR_DATA)
-#define WL1271_INTR_MASK (WL1271_ACX_INTR_EVENT_A | \
+#define WL1271_INTR_MASK (WL1271_ACX_INTR_WATCHDOG | \
+ WL1271_ACX_INTR_EVENT_A | \
WL1271_ACX_INTR_EVENT_B | \
WL1271_ACX_INTR_HW_AVAILABLE | \
WL1271_ACX_INTR_DATA)
@@ -132,7 +133,6 @@ enum {
#define DEFAULT_UCAST_PRIORITY 0
#define DEFAULT_RX_Q_PRIORITY 0
-#define DEFAULT_NUM_STATIONS 1
#define DEFAULT_RXQ_PRIORITY 0 /* low 0 .. 15 high */
#define DEFAULT_RXQ_TYPE 0x07 /* All frames, Data/Ctrl/Mgmt */
#define TRACE_BUFFER_MAX_SIZE 256
@@ -746,13 +746,23 @@ struct acx_rate_class {
#define ACX_TX_BASIC_RATE 0
#define ACX_TX_AP_FULL_RATE 1
#define ACX_TX_RATE_POLICY_CNT 2
-struct acx_rate_policy {
+struct acx_sta_rate_policy {
struct acx_header header;
__le32 rate_class_cnt;
struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES];
} __packed;
+
+#define ACX_TX_AP_MODE_MGMT_RATE 4
+#define ACX_TX_AP_MODE_BCST_RATE 5
+struct acx_ap_rate_policy {
+ struct acx_header header;
+
+ __le32 rate_policy_idx;
+ struct acx_rate_class rate_policy;
+} __packed;
+
struct acx_ac_cfg {
struct acx_header header;
u8 ac;
@@ -786,12 +796,9 @@ struct acx_tx_config_options {
__le16 tx_compl_threshold; /* number of packets */
} __packed;
-#define ACX_RX_MEM_BLOCKS 70
-#define ACX_TX_MIN_MEM_BLOCKS 40
#define ACX_TX_DESCRIPTORS 32
-#define ACX_NUM_SSID_PROFILES 1
-struct wl1271_acx_config_memory {
+struct wl1271_acx_ap_config_memory {
struct acx_header header;
u8 rx_mem_block_num;
@@ -801,6 +808,20 @@ struct wl1271_acx_config_memory {
__le32 total_tx_descriptors;
} __packed;
+struct wl1271_acx_sta_config_memory {
+ struct acx_header header;
+
+ u8 rx_mem_block_num;
+ u8 tx_min_mem_block_num;
+ u8 num_stations;
+ u8 num_ssid_profiles;
+ __le32 total_tx_descriptors;
+ u8 dyn_mem_enable;
+ u8 tx_free_req;
+ u8 rx_free_req;
+ u8 tx_min;
+} __packed;
+
struct wl1271_acx_mem_map {
struct acx_header header;
@@ -867,10 +888,15 @@ struct wl1271_acx_bet_enable {
#define ACX_IPV4_VERSION 4
#define ACX_IPV6_VERSION 6
#define ACX_IPV4_ADDR_SIZE 4
+
+/* bitmap of enabled arp_filter features */
+#define ACX_ARP_FILTER_ARP_FILTERING BIT(0)
+#define ACX_ARP_FILTER_AUTO_ARP BIT(1)
+
struct wl1271_acx_arp_filter {
struct acx_header header;
u8 version; /* ACX_IPV4_VERSION, ACX_IPV6_VERSION */
- u8 enable; /* 1 to enable ARP filtering, 0 to disable */
+ u8 enable; /* bitmap of enabled ARP filtering features */
u8 padding[2];
u8 address[16]; /* The configured device IP address - all ARP
requests directed to this IP address will pass
@@ -964,6 +990,140 @@ struct wl1271_acx_rssi_snr_avg_weights {
u8 snr_data;
};
+/*
+ * ACX_PEER_HT_CAP
+ * Configure HT capabilities - declare the capabilities of the peer
+ * we are connected to.
+ */
+struct wl1271_acx_ht_capabilities {
+ struct acx_header header;
+
+ /*
+ * bit 0 - Allow HT Operation
+ * bit 1 - Allow Greenfield format in TX
+ * bit 2 - Allow Short GI in TX
+ * bit 3 - Allow L-SIG TXOP Protection in TX
+ * bit 4 - Allow HT Control fields in TX.
+ * Note, driver will still leave space for HT control in packets
+ * regardless of the value of this field. FW will be responsible
+ * to drop the HT field from any frame when this Bit set to 0.
+ * bit 5 - Allow RD initiation in TXOP. FW is allowed to initate RD.
+ * Exact policy setting for this feature is TBD.
+ * Note, this bit can only be set to 1 if bit 3 is set to 1.
+ */
+ __le32 ht_capabilites;
+
+ /*
+ * Indicates to which peer these capabilities apply.
+ * For infrastructure use ff:ff:ff:ff:ff:ff that indicates relevance
+ * for all peers.
+ * Only valid for IBSS/DLS operation.
+ */
+ u8 mac_address[ETH_ALEN];
+
+ /*
+ * This the maximum A-MPDU length supported by the AP. The FW may not
+ * exceed this length when sending A-MPDUs
+ */
+ u8 ampdu_max_length;
+
+ /* This is the minimal spacing required when sending A-MPDUs to the AP*/
+ u8 ampdu_min_spacing;
+} __packed;
+
+/* HT Capabilites Fw Bit Mask Mapping */
+#define WL1271_ACX_FW_CAP_HT_OPERATION BIT(0)
+#define WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT BIT(1)
+#define WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS BIT(2)
+#define WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION BIT(3)
+#define WL1271_ACX_FW_CAP_HT_CONTROL_FIELDS BIT(4)
+#define WL1271_ACX_FW_CAP_RD_INITIATION BIT(5)
+
+
+/*
+ * ACX_HT_BSS_OPERATION
+ * Configure HT capabilities - AP rules for behavior in the BSS.
+ */
+struct wl1271_acx_ht_information {
+ struct acx_header header;
+
+ /* Values: 0 - RIFS not allowed, 1 - RIFS allowed */
+ u8 rifs_mode;
+
+ /* Values: 0 - 3 like in spec */
+ u8 ht_protection;
+
+ /* Values: 0 - GF protection not required, 1 - GF protection required */
+ u8 gf_protection;
+
+ /*Values: 0 - TX Burst limit not required, 1 - TX Burst Limit required*/
+ u8 ht_tx_burst_limit;
+
+ /*
+ * Values: 0 - Dual CTS protection not required,
+ * 1 - Dual CTS Protection required
+ * Note: When this value is set to 1 FW will protect all TXOP with RTS
+ * frame and will not use CTS-to-self regardless of the value of the
+ * ACX_CTS_PROTECTION information element
+ */
+ u8 dual_cts_protection;
+
+ u8 padding[3];
+} __packed;
+
+#define RX_BA_WIN_SIZE 8
+
+struct wl1271_acx_ba_session_policy {
+ struct acx_header header;
+ /*
+ * Specifies role Id, Range 0-7, 0xFF means ANY role.
+ * Future use. For now this field is irrelevant
+ */
+ u8 role_id;
+ /*
+ * Specifies Link Id, Range 0-31, 0xFF means ANY Link Id.
+ * Not applicable if Role Id is set to ANY.
+ */
+ u8 link_id;
+
+ u8 tid;
+
+ u8 enable;
+
+ /* Windows size in number of packets */
+ u16 win_size;
+
+ /*
+ * As initiator inactivity timeout in time units(TU) of 1024us.
+ * As receiver reserved
+ */
+ u16 inactivity_timeout;
+
+ /* Initiator = 1/Receiver = 0 */
+ u8 ba_direction;
+
+ u8 padding[3];
+} __packed;
+
+struct wl1271_acx_ba_receiver_setup {
+ struct acx_header header;
+
+ /* Specifies Link Id, Range 0-31, 0xFF means ANY Link Id */
+ u8 link_id;
+
+ u8 tid;
+
+ u8 enable;
+
+ u8 padding[1];
+
+ /* Windows size in number of packets */
+ u16 win_size;
+
+ /* BA session starting sequence number. RANGE 0-FFF */
+ u16 ssn;
+} __packed;
+
struct wl1271_acx_fw_tsf_information {
struct acx_header header;
@@ -975,6 +1135,33 @@ struct wl1271_acx_fw_tsf_information {
u8 padding[3];
} __packed;
+struct wl1271_acx_max_tx_retry {
+ struct acx_header header;
+
+ /*
+ * the number of frames transmission failures before
+ * issuing the aging event.
+ */
+ __le16 max_tx_retry;
+ u8 padding_1[2];
+} __packed;
+
+struct wl1271_acx_config_ps {
+ struct acx_header header;
+
+ u8 exit_retries;
+ u8 enter_retries;
+ u8 padding[2];
+ __le32 null_data_rate;
+} __packed;
+
+struct wl1271_acx_inconnection_sta {
+ struct acx_header header;
+
+ u8 addr[ETH_ALEN];
+ u8 padding1[2];
+} __packed;
+
enum {
ACX_WAKE_UP_CONDITIONS = 0x0002,
ACX_MEM_CFG = 0x0003,
@@ -1026,22 +1213,24 @@ enum {
ACX_RSSI_SNR_WEIGHTS = 0x0052,
ACX_KEEP_ALIVE_MODE = 0x0053,
ACX_SET_KEEP_ALIVE_CONFIG = 0x0054,
- ACX_BA_SESSION_RESPONDER_POLICY = 0x0055,
- ACX_BA_SESSION_INITIATOR_POLICY = 0x0056,
+ ACX_BA_SESSION_POLICY_CFG = 0x0055,
+ ACX_BA_SESSION_RX_SETUP = 0x0056,
ACX_PEER_HT_CAP = 0x0057,
ACX_HT_BSS_OPERATION = 0x0058,
ACX_COEX_ACTIVITY = 0x0059,
ACX_SET_DCO_ITRIM_PARAMS = 0x0061,
+ ACX_GEN_FW_CMD = 0x0070,
+ ACX_HOST_IF_CFG_BITMAP = 0x0071,
+ ACX_MAX_TX_FAILURE = 0x0072,
+ ACX_UPDATE_INCONNECTION_STA_LIST = 0x0073,
DOT11_RX_MSDU_LIFE_TIME = 0x1004,
DOT11_CUR_TX_PWR = 0x100D,
DOT11_RX_DOT11_MODE = 0x1012,
DOT11_RTS_THRESHOLD = 0x1013,
DOT11_GROUP_ADDRESS_TBL = 0x1014,
ACX_PM_CONFIG = 0x1016,
-
- MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL,
-
- MAX_IE = 0xFFFF
+ ACX_CONFIG_PS = 0x1017,
+ ACX_CONFIG_HANGOVER = 0x1018,
};
@@ -1073,26 +1262,42 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
int wl1271_acx_cts_protect(struct wl1271 *wl,
enum acx_ctsprotect_type ctsprotect);
int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
-int wl1271_acx_rate_policies(struct wl1271 *wl);
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl);
+int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
+ u8 idx);
int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
u8 aifsn, u16 txop);
int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
u8 tsid, u8 ps_scheme, u8 ack_policy,
u32 apsd_conf0, u32 apsd_conf1);
-int wl1271_acx_frag_threshold(struct wl1271 *wl);
+int wl1271_acx_frag_threshold(struct wl1271 *wl, u16 frag_threshold);
int wl1271_acx_tx_config_options(struct wl1271 *wl);
-int wl1271_acx_mem_cfg(struct wl1271 *wl);
+int wl1271_acx_ap_mem_cfg(struct wl1271 *wl);
+int wl1271_acx_sta_mem_cfg(struct wl1271 *wl);
int wl1271_acx_init_mem_config(struct wl1271 *wl);
int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
int wl1271_acx_smart_reflex(struct wl1271 *wl);
int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
-int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, __be32 address);
+int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address);
int wl1271_acx_pm_config(struct wl1271 *wl);
int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable);
int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid);
int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
s16 thold, u8 hyst);
int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl);
+int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
+ struct ieee80211_sta_ht_cap *ht_cap,
+ bool allow_ht_operation);
+int wl1271_acx_set_ht_information(struct wl1271 *wl,
+ u16 ht_operation_mode);
+int wl1271_acx_set_ba_session(struct wl1271 *wl,
+ enum ieee80211_back_parties direction,
+ u8 tid_index, u8 policy);
+int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
+ bool enable);
int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
+int wl1271_acx_max_tx_retry(struct wl1271 *wl);
+int wl1271_acx_config_ps(struct wl1271 *wl);
+int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/boot.c
index b91021242098..6934dffd5174 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/boot.c
@@ -21,14 +21,14 @@
*
*/
-#include <linux/gpio.h>
#include <linux/slab.h>
-#include "wl1271_acx.h"
-#include "wl1271_reg.h"
-#include "wl1271_boot.h"
-#include "wl1271_io.h"
-#include "wl1271_event.h"
+#include "acx.h"
+#include "reg.h"
+#include "boot.h"
+#include "io.h"
+#include "event.h"
+#include "rx.h"
static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
[PART_DOWN] = {
@@ -101,6 +101,22 @@ static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
wl1271_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
}
+static void wl1271_parse_fw_ver(struct wl1271 *wl)
+{
+ int ret;
+
+ ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u",
+ &wl->chip.fw_ver[0], &wl->chip.fw_ver[1],
+ &wl->chip.fw_ver[2], &wl->chip.fw_ver[3],
+ &wl->chip.fw_ver[4]);
+
+ if (ret != 5) {
+ wl1271_warning("fw version incorrect value");
+ memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver));
+ return;
+ }
+}
+
static void wl1271_boot_fw_version(struct wl1271 *wl)
{
struct wl1271_static_data static_data;
@@ -108,11 +124,13 @@ static void wl1271_boot_fw_version(struct wl1271 *wl)
wl1271_read(wl, wl->cmd_box_addr, &static_data, sizeof(static_data),
false);
- strncpy(wl->chip.fw_ver, static_data.fw_version,
- sizeof(wl->chip.fw_ver));
+ strncpy(wl->chip.fw_ver_str, static_data.fw_version,
+ sizeof(wl->chip.fw_ver_str));
/* make sure the string is NULL-terminated */
- wl->chip.fw_ver[sizeof(wl->chip.fw_ver) - 1] = '\0';
+ wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0';
+
+ wl1271_parse_fw_ver(wl);
}
static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
@@ -232,7 +250,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
*/
if (wl->nvs_len == sizeof(struct wl1271_nvs_file) ||
wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) {
- if (wl->nvs->general_params.dual_mode_select)
+ /* for now 11a is unsupported in AP mode */
+ if (wl->bss_type != BSS_TYPE_AP_BSS &&
+ wl->nvs->general_params.dual_mode_select)
wl->enable_11a = true;
}
@@ -432,6 +452,9 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
PSPOLL_DELIVERY_FAILURE_EVENT_ID |
SOFT_GEMINI_SENSE_EVENT_ID;
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ wl->event_mask |= STA_REMOVE_COMPLETE_EVENT_ID;
+
ret = wl1271_event_unmask(wl);
if (ret < 0) {
wl1271_error("EVENT mask setting failed");
@@ -465,26 +488,29 @@ static void wl1271_boot_hw_version(struct wl1271 *wl)
fuse = (fuse & PG_VER_MASK) >> PG_VER_OFFSET;
wl->hw_pg_ver = (s8)fuse;
+
+ if (((wl->hw_pg_ver & PG_MAJOR_VER_MASK) >> PG_MAJOR_VER_OFFSET) < 3)
+ wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION;
}
-int wl1271_boot(struct wl1271 *wl)
+/* uploads NVS and firmware */
+int wl1271_load_firmware(struct wl1271 *wl)
{
int ret = 0;
u32 tmp, clk, pause;
- int ref_clock = wl->ref_clock;
wl1271_boot_hw_version(wl);
- if (ref_clock == 0 || ref_clock == 2 || ref_clock == 4)
+ if (wl->ref_clock == 0 || wl->ref_clock == 2 || wl->ref_clock == 4)
/* ref clk: 19.2/38.4/38.4-XTAL */
clk = 0x3;
- else if (ref_clock == 1 || ref_clock == 3)
+ else if (wl->ref_clock == 1 || wl->ref_clock == 3)
/* ref clk: 26/52 */
clk = 0x5;
else
return -EINVAL;
- if (ref_clock != 0) {
+ if (wl->ref_clock != 0) {
u16 val;
/* Set clock type (open drain) */
val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE);
@@ -529,8 +555,7 @@ int wl1271_boot(struct wl1271 *wl)
wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
- /* 2 */
- clk |= (ref_clock << 1) << 4;
+ clk |= (wl->ref_clock << 1) << 4;
wl1271_write32(wl, DRPW_SCRATCH_START, clk);
wl1271_set_partition(wl, &part_table[PART_WORK]);
@@ -574,6 +599,20 @@ int wl1271_boot(struct wl1271 *wl)
if (ret < 0)
goto out;
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(wl1271_load_firmware);
+
+int wl1271_boot(struct wl1271 *wl)
+{
+ int ret;
+
+ /* upload NVS and firmware */
+ ret = wl1271_load_firmware(wl);
+ if (ret)
+ return ret;
+
/* 10.5 start firmware */
ret = wl1271_boot_run_firmware(wl);
if (ret < 0)
@@ -583,8 +622,7 @@ int wl1271_boot(struct wl1271 *wl)
wl1271_boot_enable_interrupts(wl);
/* set the wl1271 default filters */
- wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
- wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
+ wl1271_set_default_filters(wl);
wl1271_event_mbox_config(wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.h b/drivers/net/wireless/wl12xx/boot.h
index f73b0b15a280..17229b86fc71 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.h
+++ b/drivers/net/wireless/wl12xx/boot.h
@@ -24,9 +24,10 @@
#ifndef __BOOT_H__
#define __BOOT_H__
-#include "wl1271.h"
+#include "wl12xx.h"
int wl1271_boot(struct wl1271 *wl);
+int wl1271_load_firmware(struct wl1271 *wl);
#define WL1271_NO_SUBBANDS 8
#define WL1271_NO_POWER_LEVELS 4
@@ -58,6 +59,11 @@ struct wl1271_static_data {
#define PG_VER_MASK 0x3c
#define PG_VER_OFFSET 2
+#define PG_MAJOR_VER_MASK 0x3
+#define PG_MAJOR_VER_OFFSET 0x0
+#define PG_MINOR_VER_MASK 0xc
+#define PG_MINOR_VER_OFFSET 0x2
+
#define CMD_MBOX_ADDRESS 0x407B4
#define POLARITY_LOW BIT(1)
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/cmd.c
index 5d3e8485ea4e..f0aa7ab97bf7 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/cmd.c
@@ -29,13 +29,14 @@
#include <linux/ieee80211.h>
#include <linux/slab.h>
-#include "wl1271.h"
-#include "wl1271_reg.h"
-#include "wl1271_io.h"
-#include "wl1271_acx.h"
+#include "wl12xx.h"
+#include "reg.h"
+#include "io.h"
+#include "acx.h"
#include "wl12xx_80211.h"
-#include "wl1271_cmd.h"
-#include "wl1271_event.h"
+#include "cmd.h"
+#include "event.h"
+#include "tx.h"
#define WL1271_CMD_FAST_POLL_COUNT 50
@@ -62,6 +63,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
cmd->status = 0;
WARN_ON(len % 4 != 0);
+ WARN_ON(test_bit(WL1271_FLAG_IN_ELP, &wl->flags));
wl1271_write(wl, wl->cmd_box_addr, buf, len, false);
@@ -221,7 +223,7 @@ int wl1271_cmd_ext_radio_parms(struct wl1271 *wl)
* Poll the mailbox event field until any of the bits in the mask is set or a
* timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
*/
-static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
+static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask)
{
u32 events_vector, event;
unsigned long timeout;
@@ -230,7 +232,8 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
do {
if (time_after(jiffies, timeout)) {
- ieee80211_queue_work(wl->hw, &wl->recovery_work);
+ wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
+ (int)mask);
return -ETIMEDOUT;
}
@@ -248,6 +251,19 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
return 0;
}
+static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
+{
+ int ret;
+
+ ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask);
+ if (ret != 0) {
+ ieee80211_queue_work(wl->hw, &wl->recovery_work);
+ return ret;
+ }
+
+ return 0;
+}
+
int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
{
struct wl1271_cmd_join *join;
@@ -271,6 +287,7 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
join->rx_filter_options = cpu_to_le32(wl->rx_filter);
join->bss_type = bss_type;
join->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
+ join->supported_rate_set = cpu_to_le32(wl->rate_set);
if (wl->band == IEEE80211_BAND_5GHZ)
join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ;
@@ -288,6 +305,9 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
wl->tx_security_last_seq = 0;
wl->tx_security_seq = 0;
+ wl1271_debug(DEBUG_CMD, "cmd join: basic_rate_set=0x%x, rate_set=0x%x",
+ join->basic_rate_set, join->supported_rate_set);
+
ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0);
if (ret < 0) {
wl1271_error("failed to initiate cmd join");
@@ -439,7 +459,7 @@ out:
return ret;
}
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send)
+int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
{
struct wl1271_cmd_ps_params *ps_params = NULL;
int ret = 0;
@@ -453,10 +473,6 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send)
}
ps_params->ps_mode = ps_mode;
- ps_params->send_null_data = send;
- ps_params->retries = wl->conf.conn.psm_entry_nullfunc_retries;
- ps_params->hang_over_period = wl->conf.conn.psm_entry_hangover_period;
- ps_params->null_data_rate = cpu_to_le32(rates);
ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
sizeof(*ps_params), 0);
@@ -490,8 +506,8 @@ int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
cmd->len = cpu_to_le16(buf_len);
cmd->template_type = template_id;
cmd->enabled_rates = cpu_to_le32(rates);
- cmd->short_retry_limit = wl->conf.tx.rc_conf.short_retry_limit;
- cmd->long_retry_limit = wl->conf.tx.rc_conf.long_retry_limit;
+ cmd->short_retry_limit = wl->conf.tx.tmpl_short_retry_limit;
+ cmd->long_retry_limit = wl->conf.tx.tmpl_long_retry_limit;
cmd->index = index;
if (buf)
@@ -611,6 +627,75 @@ out:
return ret;
}
+struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
+ struct sk_buff *skb)
+{
+ int ret;
+
+ if (!skb)
+ skb = ieee80211_ap_probereq_get(wl->hw, wl->vif);
+ if (!skb)
+ goto out;
+
+ wl1271_dump(DEBUG_SCAN, "AP PROBE REQ: ", skb->data, skb->len);
+
+ if (wl->band == IEEE80211_BAND_2GHZ)
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
+ skb->data, skb->len, 0,
+ wl->conf.tx.basic_rate);
+ else
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
+ skb->data, skb->len, 0,
+ wl->conf.tx.basic_rate_5);
+
+ if (ret < 0)
+ wl1271_error("Unable to set ap probe request template.");
+
+out:
+ return skb;
+}
+
+int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
+{
+ int ret;
+ struct wl12xx_arp_rsp_template tmpl;
+ struct ieee80211_hdr_3addr *hdr;
+ struct arphdr *arp_hdr;
+
+ memset(&tmpl, 0, sizeof(tmpl));
+
+ /* mac80211 header */
+ hdr = &tmpl.hdr;
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_DATA |
+ IEEE80211_FCTL_TODS);
+ memcpy(hdr->addr1, wl->vif->bss_conf.bssid, ETH_ALEN);
+ memcpy(hdr->addr2, wl->vif->addr, ETH_ALEN);
+ memset(hdr->addr3, 0xff, ETH_ALEN);
+
+ /* llc layer */
+ memcpy(tmpl.llc_hdr, rfc1042_header, sizeof(rfc1042_header));
+ tmpl.llc_type = cpu_to_be16(ETH_P_ARP);
+
+ /* arp header */
+ arp_hdr = &tmpl.arp_hdr;
+ arp_hdr->ar_hrd = cpu_to_be16(ARPHRD_ETHER);
+ arp_hdr->ar_pro = cpu_to_be16(ETH_P_IP);
+ arp_hdr->ar_hln = ETH_ALEN;
+ arp_hdr->ar_pln = 4;
+ arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY);
+
+ /* arp payload */
+ memcpy(tmpl.sender_hw, wl->vif->addr, ETH_ALEN);
+ tmpl.sender_ip = ip_addr;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_ARP_RSP,
+ &tmpl, sizeof(tmpl), 0,
+ wl->basic_rate);
+
+ return ret;
+}
+
int wl1271_build_qos_null_data(struct wl1271 *wl)
{
struct ieee80211_qos_hdr template;
@@ -633,9 +718,9 @@ int wl1271_build_qos_null_data(struct wl1271 *wl)
wl->basic_rate);
}
-int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id)
+int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id)
{
- struct wl1271_cmd_set_keys *cmd;
+ struct wl1271_cmd_set_sta_keys *cmd;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd set_default_wep_key %d", id);
@@ -662,11 +747,42 @@ out:
return ret;
}
-int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id)
+{
+ struct wl1271_cmd_set_ap_keys *cmd;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_CMD, "cmd set_ap_default_wep_key %d", id);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->hlid = WL1271_AP_BROADCAST_HLID;
+ cmd->key_id = id;
+ cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
+ cmd->key_action = cpu_to_le16(KEY_SET_ID);
+ cmd->key_type = KEY_WEP;
+
+ ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_warning("cmd set_ap_default_wep_key failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(cmd);
+
+ return ret;
+}
+
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, const u8 *addr,
u32 tx_seq_32, u16 tx_seq_16)
{
- struct wl1271_cmd_set_keys *cmd;
+ struct wl1271_cmd_set_sta_keys *cmd;
int ret = 0;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -719,6 +835,67 @@ out:
return ret;
}
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+ u16 tx_seq_16)
+{
+ struct wl1271_cmd_set_ap_keys *cmd;
+ int ret = 0;
+ u8 lid_type;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ if (hlid == WL1271_AP_BROADCAST_HLID) {
+ if (key_type == KEY_WEP)
+ lid_type = WEP_DEFAULT_LID_TYPE;
+ else
+ lid_type = BROADCAST_LID_TYPE;
+ } else {
+ lid_type = UNICAST_LID_TYPE;
+ }
+
+ wl1271_debug(DEBUG_CRYPT, "ap key action: %d id: %d lid: %d type: %d"
+ " hlid: %d", (int)action, (int)id, (int)lid_type,
+ (int)key_type, (int)hlid);
+
+ cmd->lid_key_type = lid_type;
+ cmd->hlid = hlid;
+ cmd->key_action = cpu_to_le16(action);
+ cmd->key_size = key_size;
+ cmd->key_type = key_type;
+ cmd->key_id = id;
+ cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16);
+ cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32);
+
+ if (key_type == KEY_TKIP) {
+ /*
+ * We get the key in the following form:
+ * TKIP (16 bytes) - TX MIC (8 bytes) - RX MIC (8 bytes)
+ * but the target is expecting:
+ * TKIP - RX MIC - TX MIC
+ */
+ memcpy(cmd->key, key, 16);
+ memcpy(cmd->key + 16, key + 24, 8);
+ memcpy(cmd->key + 24, key + 16, 8);
+ } else {
+ memcpy(cmd->key, key, key_size);
+ }
+
+ wl1271_dump(DEBUG_CRYPT, "TARGET AP KEY: ", cmd, sizeof(*cmd));
+
+ ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_warning("could not set ap keys");
+ goto out;
+ }
+
+out:
+ kfree(cmd);
+ return ret;
+}
+
int wl1271_cmd_disconnect(struct wl1271 *wl)
{
struct wl1271_cmd_disconnect *cmd;
@@ -781,3 +958,180 @@ out_free:
out:
return ret;
}
+
+int wl1271_cmd_start_bss(struct wl1271 *wl)
+{
+ struct wl1271_cmd_bss_start *cmd;
+ struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD, "cmd start bss");
+
+ /*
+ * FIXME: We currently do not support hidden SSID. The real SSID
+ * should be fetched from mac80211 first.
+ */
+ if (wl->ssid_len == 0) {
+ wl1271_warning("Hidden SSID currently not supported for AP");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(cmd->bssid, bss_conf->bssid, ETH_ALEN);
+
+ cmd->aging_period = cpu_to_le16(WL1271_AP_DEF_INACTIV_SEC);
+ cmd->bss_index = WL1271_AP_BSS_INDEX;
+ cmd->global_hlid = WL1271_AP_GLOBAL_HLID;
+ cmd->broadcast_hlid = WL1271_AP_BROADCAST_HLID;
+ cmd->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
+ cmd->beacon_interval = cpu_to_le16(wl->beacon_int);
+ cmd->dtim_interval = bss_conf->dtim_period;
+ cmd->beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
+ cmd->channel = wl->channel;
+ cmd->ssid_len = wl->ssid_len;
+ cmd->ssid_type = SSID_TYPE_PUBLIC;
+ memcpy(cmd->ssid, wl->ssid, wl->ssid_len);
+
+ switch (wl->band) {
+ case IEEE80211_BAND_2GHZ:
+ cmd->band = RADIO_BAND_2_4GHZ;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ cmd->band = RADIO_BAND_5GHZ;
+ break;
+ default:
+ wl1271_warning("bss start - unknown band: %d", (int)wl->band);
+ cmd->band = RADIO_BAND_2_4GHZ;
+ break;
+ }
+
+ ret = wl1271_cmd_send(wl, CMD_BSS_START, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd start bss");
+ goto out_free;
+ }
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
+
+int wl1271_cmd_stop_bss(struct wl1271 *wl)
+{
+ struct wl1271_cmd_bss_start *cmd;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD, "cmd stop bss");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->bss_index = WL1271_AP_BSS_INDEX;
+
+ ret = wl1271_cmd_send(wl, CMD_BSS_STOP, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd stop bss");
+ goto out_free;
+ }
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
+
+int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
+{
+ struct wl1271_cmd_add_sta *cmd;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD, "cmd add sta %d", (int)hlid);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* currently we don't support UAPSD */
+ cmd->sp_len = 0;
+
+ memcpy(cmd->addr, sta->addr, ETH_ALEN);
+ cmd->bss_index = WL1271_AP_BSS_INDEX;
+ cmd->aid = sta->aid;
+ cmd->hlid = hlid;
+
+ /*
+ * FIXME: Does STA support QOS? We need to propagate this info from
+ * hostapd. Currently not that important since this is only used for
+ * sending the correct flavor of null-data packet in response to a
+ * trigger.
+ */
+ cmd->wmm = 0;
+
+ cmd->supported_rates = cpu_to_le32(wl1271_tx_enabled_rates_get(wl,
+ sta->supp_rates[wl->band]));
+
+ wl1271_debug(DEBUG_CMD, "new sta rates: 0x%x", cmd->supported_rates);
+
+ ret = wl1271_cmd_send(wl, CMD_ADD_STA, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd add sta");
+ goto out_free;
+ }
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
+
+int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid)
+{
+ struct wl1271_cmd_remove_sta *cmd;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD, "cmd remove sta %d", (int)hlid);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->hlid = hlid;
+ /* We never send a deauth, mac80211 is in charge of this */
+ cmd->reason_opcode = 0;
+ cmd->send_deauth_flag = 0;
+
+ ret = wl1271_cmd_send(wl, CMD_REMOVE_STA, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd remove sta");
+ goto out_free;
+ }
+
+ /*
+ * We are ok with a timeout here. The event is sometimes not sent
+ * due to a firmware bug.
+ */
+ wl1271_cmd_wait_for_event_or_timeout(wl, STA_REMOVE_COMPLETE_EVENT_ID);
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/cmd.h
index a0caf4fc37b1..54c12e71417e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/cmd.h
@@ -22,10 +22,10 @@
*
*/
-#ifndef __WL1271_CMD_H__
-#define __WL1271_CMD_H__
+#ifndef __CMD_H__
+#define __CMD_H__
-#include "wl1271.h"
+#include "wl12xx.h"
struct acx_header;
@@ -39,7 +39,7 @@ int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send);
+int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
size_t len);
int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
@@ -49,14 +49,25 @@ int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid);
int wl1271_cmd_build_probe_req(struct wl1271 *wl,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len, u8 band);
+struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
+ struct sk_buff *skb);
+int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr);
int wl1271_build_qos_null_data(struct wl1271 *wl);
int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
-int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id);
-int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
- u8 key_size, const u8 *key, const u8 *addr,
- u32 tx_seq_32, u16 tx_seq_16);
+int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id);
+int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id);
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, const u8 *addr,
+ u32 tx_seq_32, u16 tx_seq_16);
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+ u16 tx_seq_16);
int wl1271_cmd_disconnect(struct wl1271 *wl);
int wl1271_cmd_set_sta_state(struct wl1271 *wl);
+int wl1271_cmd_start_bss(struct wl1271 *wl);
+int wl1271_cmd_stop_bss(struct wl1271 *wl);
+int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid);
+int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid);
enum wl1271_commands {
CMD_INTERROGATE = 1, /*use this to read information elements*/
@@ -95,6 +106,12 @@ enum wl1271_commands {
CMD_STOP_PERIODIC_SCAN = 51,
CMD_SET_STA_STATE = 52,
+ /* AP mode commands */
+ CMD_BSS_START = 60,
+ CMD_BSS_STOP = 61,
+ CMD_ADD_STA = 62,
+ CMD_REMOVE_STA = 63,
+
NUM_COMMANDS,
MAX_COMMAND_ID = 0xFFFF,
};
@@ -122,6 +139,15 @@ enum cmd_templ {
CMD_TEMPL_CTS, /*
* For CTS-to-self (FastCTS) mechanism
* for BT/WLAN coexistence (SoftGemini). */
+ CMD_TEMPL_ARP_RSP,
+ CMD_TEMPL_LINK_MEASUREMENT_REPORT,
+
+ /* AP-mode specific */
+ CMD_TEMPL_AP_BEACON = 13,
+ CMD_TEMPL_AP_PROBE_RESPONSE,
+ CMD_TEMPL_AP_ARP_RSP,
+ CMD_TEMPL_DEAUTH_AP,
+
CMD_TEMPL_MAX = 0xff
};
@@ -191,6 +217,7 @@ struct wl1271_cmd_join {
* ACK or CTS frames).
*/
__le32 basic_rate_set;
+ __le32 supported_rate_set;
u8 dtim_interval;
/*
* bits 0-2: This bitwise field specifies the type
@@ -253,20 +280,11 @@ struct wl1271_cmd_ps_params {
struct wl1271_cmd_header header;
u8 ps_mode; /* STATION_* */
- u8 send_null_data; /* Do we have to send NULL data packet ? */
- u8 retries; /* Number of retires for the initial NULL data packet */
-
- /*
- * TUs during which the target stays awake after switching
- * to power save mode.
- */
- u8 hang_over_period;
- __le32 null_data_rate;
+ u8 padding[3];
} __packed;
/* HW encryption keys */
#define NUM_ACCESS_CATEGORIES_COPY 4
-#define MAX_KEY_SIZE 32
enum wl1271_cmd_key_action {
KEY_ADD_OR_REPLACE = 1,
@@ -285,7 +303,7 @@ enum wl1271_cmd_key_type {
/* FIXME: Add description for key-types */
-struct wl1271_cmd_set_keys {
+struct wl1271_cmd_set_sta_keys {
struct wl1271_cmd_header header;
/* Ignored for default WEP key */
@@ -314,6 +332,57 @@ struct wl1271_cmd_set_keys {
__le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
} __packed;
+enum wl1271_cmd_lid_key_type {
+ UNICAST_LID_TYPE = 0,
+ BROADCAST_LID_TYPE = 1,
+ WEP_DEFAULT_LID_TYPE = 2
+};
+
+struct wl1271_cmd_set_ap_keys {
+ struct wl1271_cmd_header header;
+
+ /*
+ * Indicates whether the HLID is a unicast key set
+ * or broadcast key set. A special value 0xFF is
+ * used to indicate that the HLID is on WEP-default
+ * (multi-hlids). of type wl1271_cmd_lid_key_type.
+ */
+ u8 hlid;
+
+ /*
+ * In WEP-default network (hlid == 0xFF) used to
+ * indicate which network STA/IBSS/AP role should be
+ * changed
+ */
+ u8 lid_key_type;
+
+ /*
+ * Key ID - For TKIP and AES key types, this field
+ * indicates the value that should be inserted into
+ * the KeyID field of frames transmitted using this
+ * key entry. For broadcast keys the index use as a
+ * marker for TX/RX key.
+ * For WEP default network (HLID=0xFF), this field
+ * indicates the ID of the key to add or remove.
+ */
+ u8 key_id;
+ u8 reserved_1;
+
+ /* key_action_e */
+ __le16 key_action;
+
+ /* key size in bytes */
+ u8 key_size;
+
+ /* key_type_e */
+ u8 key_type;
+
+ /* This field holds the security key data to add to the STA table */
+ u8 key[MAX_KEY_SIZE];
+ __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
+ __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
+} __packed;
+
struct wl1271_cmd_test_header {
u8 id;
u8 padding[3];
@@ -327,9 +396,6 @@ enum wl1271_channel_tune_bands {
#define WL1271_PD_REFERENCE_POINT_BAND_B_G 0
-#define TEST_CMD_P2G_CAL 0x02
-#define TEST_CMD_CHANNEL_TUNE 0x0d
-#define TEST_CMD_UPDATE_PD_REFERENCE_POINT 0x1d
#define TEST_CMD_INI_FILE_RADIO_PARAM 0x19
#define TEST_CMD_INI_FILE_GENERAL_PARAM 0x1E
#define TEST_CMD_INI_FILE_RF_EXTENDED_PARAM 0x26
@@ -375,51 +441,6 @@ struct wl1271_ext_radio_parms_cmd {
u8 padding[3];
} __packed;
-struct wl1271_cmd_cal_channel_tune {
- struct wl1271_cmd_header header;
-
- struct wl1271_cmd_test_header test;
-
- u8 band;
- u8 channel;
-
- __le16 radio_status;
-} __packed;
-
-struct wl1271_cmd_cal_update_ref_point {
- struct wl1271_cmd_header header;
-
- struct wl1271_cmd_test_header test;
-
- __le32 ref_power;
- __le32 ref_detector;
- u8 sub_band;
- u8 padding[3];
-} __packed;
-
-#define MAX_TLV_LENGTH 400
-#define MAX_NVS_VERSION_LENGTH 12
-
-#define WL1271_CAL_P2G_BAND_B_G BIT(0)
-
-struct wl1271_cmd_cal_p2g {
- struct wl1271_cmd_header header;
-
- struct wl1271_cmd_test_header test;
-
- __le16 len;
- u8 buf[MAX_TLV_LENGTH];
- u8 type;
- u8 padding;
-
- __le16 radio_status;
- u8 nvs_version[MAX_NVS_VERSION_LENGTH];
-
- u8 sub_band_mask;
- u8 padding2;
-} __packed;
-
-
/*
* There are three types of disconnections:
*
@@ -456,4 +477,68 @@ struct wl1271_cmd_set_sta_state {
u8 padding[3];
} __packed;
+enum wl1271_ssid_type {
+ SSID_TYPE_PUBLIC = 0,
+ SSID_TYPE_HIDDEN = 1
+};
+
+struct wl1271_cmd_bss_start {
+ struct wl1271_cmd_header header;
+
+ /* wl1271_ssid_type */
+ u8 ssid_type;
+ u8 ssid_len;
+ u8 ssid[IW_ESSID_MAX_SIZE];
+ u8 padding_1[2];
+
+ /* Basic rate set */
+ __le32 basic_rate_set;
+ /* Aging period in seconds*/
+ __le16 aging_period;
+
+ /*
+ * This field specifies the time between target beacon
+ * transmission times (TBTTs), in time units (TUs).
+ * Valid values are 1 to 1024.
+ */
+ __le16 beacon_interval;
+ u8 bssid[ETH_ALEN];
+ u8 bss_index;
+ /* Radio band */
+ u8 band;
+ u8 channel;
+ /* The host link id for the AP's global queue */
+ u8 global_hlid;
+ /* The host link id for the AP's broadcast queue */
+ u8 broadcast_hlid;
+ /* DTIM count */
+ u8 dtim_interval;
+ /* Beacon expiry time in ms */
+ u8 beacon_expiry;
+ u8 padding_2[3];
+} __packed;
+
+struct wl1271_cmd_add_sta {
+ struct wl1271_cmd_header header;
+
+ u8 addr[ETH_ALEN];
+ u8 hlid;
+ u8 aid;
+ u8 psd_type[NUM_ACCESS_CATEGORIES_COPY];
+ __le32 supported_rates;
+ u8 bss_index;
+ u8 sp_len;
+ u8 wmm;
+ u8 padding1;
+} __packed;
+
+struct wl1271_cmd_remove_sta {
+ struct wl1271_cmd_header header;
+
+ u8 hlid;
+ u8 reason_opcode;
+ u8 send_deauth_flag;
+ u8 padding1;
+} __packed;
+
#endif /* __WL1271_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/conf.h
index 5f78a6cb1433..856a8a2fff4f 100644
--- a/drivers/net/wireless/wl12xx/wl1271_conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -21,8 +21,8 @@
*
*/
-#ifndef __WL1271_CONF_H__
-#define __WL1271_CONF_H__
+#ifndef __CONF_H__
+#define __CONF_H__
enum {
CONF_HW_BIT_RATE_1MBPS = BIT(0),
@@ -496,6 +496,26 @@ struct conf_rx_settings {
CONF_HW_BIT_RATE_2MBPS)
#define CONF_TX_RATE_RETRY_LIMIT 10
+/*
+ * Rates supported for data packets when operating as AP. Note the absense
+ * of the 22Mbps rate. There is a FW limitation on 12 rates so we must drop
+ * one. The rate dropped is not mandatory under any operating mode.
+ */
+#define CONF_TX_AP_ENABLED_RATES (CONF_HW_BIT_RATE_1MBPS | \
+ CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \
+ CONF_HW_BIT_RATE_6MBPS | CONF_HW_BIT_RATE_9MBPS | \
+ CONF_HW_BIT_RATE_11MBPS | CONF_HW_BIT_RATE_12MBPS | \
+ CONF_HW_BIT_RATE_18MBPS | CONF_HW_BIT_RATE_24MBPS | \
+ CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
+ CONF_HW_BIT_RATE_54MBPS)
+
+/*
+ * Default rates for management traffic when operating in AP mode. This
+ * should be configured according to the basic rate set of the AP
+ */
+#define CONF_TX_AP_DEFAULT_MGMT_RATES (CONF_HW_BIT_RATE_1MBPS | \
+ CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS)
+
struct conf_tx_rate_class {
/*
@@ -636,9 +656,9 @@ struct conf_tx_settings {
/*
* Configuration for rate classes for TX (currently only one
- * rate class supported.)
+ * rate class supported). Used in non-AP mode.
*/
- struct conf_tx_rate_class rc_conf;
+ struct conf_tx_rate_class sta_rc_conf;
/*
* Configuration for access categories for TX rate control.
@@ -647,6 +667,28 @@ struct conf_tx_settings {
struct conf_tx_ac_category ac_conf[CONF_TX_MAX_AC_COUNT];
/*
+ * Configuration for rate classes in AP-mode. These rate classes
+ * are for the AC TX queues
+ */
+ struct conf_tx_rate_class ap_rc_conf[CONF_TX_MAX_AC_COUNT];
+
+ /*
+ * Management TX rate class for AP-mode.
+ */
+ struct conf_tx_rate_class ap_mgmt_conf;
+
+ /*
+ * Broadcast TX rate class for AP-mode.
+ */
+ struct conf_tx_rate_class ap_bcst_conf;
+
+ /*
+ * AP-mode - allow this number of TX retries to a station before an
+ * event is triggered from FW.
+ */
+ u16 ap_max_tx_retries;
+
+ /*
* Configuration for TID parameters.
*/
u8 tid_conf_count;
@@ -687,6 +729,12 @@ struct conf_tx_settings {
* Range: CONF_HW_BIT_RATE_* bit mask
*/
u32 basic_rate_5;
+
+ /*
+ * TX retry limits for templates
+ */
+ u8 tmpl_short_retry_limit;
+ u8 tmpl_long_retry_limit;
};
enum {
@@ -912,6 +960,14 @@ struct conf_conn_settings {
u8 psm_entry_retries;
/*
+ * Specifies the maximum number of times to try PSM exit if it fails
+ * (if sending the appropriate null-func message fails.)
+ *
+ * Range 0 - 255
+ */
+ u8 psm_exit_retries;
+
+ /*
* Specifies the maximum number of times to try transmit the PSM entry
* null-func frame for each PSM entry attempt
*
@@ -1036,30 +1092,30 @@ struct conf_scan_settings {
/*
* The minimum time to wait on each channel for active scans
*
- * Range: 0 - 65536 tu
+ * Range: u32 tu/1000
*/
- u16 min_dwell_time_active;
+ u32 min_dwell_time_active;
/*
* The maximum time to wait on each channel for active scans
*
- * Range: 0 - 65536 tu
+ * Range: u32 tu/1000
*/
- u16 max_dwell_time_active;
+ u32 max_dwell_time_active;
/*
- * The maximum time to wait on each channel for passive scans
+ * The minimum time to wait on each channel for passive scans
*
- * Range: 0 - 65536 tu
+ * Range: u32 tu/1000
*/
- u16 min_dwell_time_passive;
+ u32 min_dwell_time_passive;
/*
* The maximum time to wait on each channel for passive scans
*
- * Range: 0 - 65536 tu
+ * Range: u32 tu/1000
*/
- u16 max_dwell_time_passive;
+ u32 max_dwell_time_passive;
/*
* Number of probe requests to transmit on each active scan channel
@@ -1090,6 +1146,51 @@ struct conf_rf_settings {
u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5];
};
+struct conf_ht_setting {
+ u16 tx_ba_win_size;
+ u16 inactivity_timeout;
+};
+
+struct conf_memory_settings {
+ /* Number of stations supported in IBSS mode */
+ u8 num_stations;
+
+ /* Number of ssid profiles used in IBSS mode */
+ u8 ssid_profiles;
+
+ /* Number of memory buffers allocated to rx pool */
+ u8 rx_block_num;
+
+ /* Minimum number of blocks allocated to tx pool */
+ u8 tx_min_block_num;
+
+ /* Disable/Enable dynamic memory */
+ u8 dynamic_memory;
+
+ /*
+ * Minimum required free tx memory blocks in order to assure optimum
+ * performence
+ *
+ * Range: 0-120
+ */
+ u8 min_req_tx_blocks;
+
+ /*
+ * Minimum required free rx memory blocks in order to assure optimum
+ * performence
+ *
+ * Range: 0-120
+ */
+ u8 min_req_rx_blocks;
+
+ /*
+ * Minimum number of mem blocks (free+used) guaranteed for TX
+ *
+ * Range: 0-120
+ */
+ u8 tx_min;
+};
+
struct conf_drv_settings {
struct conf_sg_settings sg;
struct conf_rx_settings rx;
@@ -1100,6 +1201,8 @@ struct conf_drv_settings {
struct conf_roam_trigger_settings roam_trigger;
struct conf_scan_settings scan;
struct conf_rf_settings rf;
+ struct conf_ht_setting ht;
+ struct conf_memory_settings mem;
};
#endif
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c
new file mode 100644
index 000000000000..8e75b09723b9
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/debugfs.c
@@ -0,0 +1,469 @@
+/*
+ * This file is part of wl1271
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "debugfs.h"
+
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+
+#include "wl12xx.h"
+#include "acx.h"
+#include "ps.h"
+#include "io.h"
+
+/* ms */
+#define WL1271_DEBUGFS_STATS_LIFETIME 1000
+
+/* debugfs macros idea from mac80211 */
+#define DEBUGFS_FORMAT_BUFFER_SIZE 100
+static int wl1271_format_buffer(char __user *userbuf, size_t count,
+ loff_t *ppos, char *fmt, ...)
+{
+ va_list args;
+ char buf[DEBUGFS_FORMAT_BUFFER_SIZE];
+ int res;
+
+ va_start(args, fmt);
+ res = vscnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, res);
+}
+
+#define DEBUGFS_READONLY_FILE(name, fmt, value...) \
+static ssize_t name## _read(struct file *file, char __user *userbuf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct wl1271 *wl = file->private_data; \
+ return wl1271_format_buffer(userbuf, count, ppos, \
+ fmt "\n", ##value); \
+} \
+ \
+static const struct file_operations name## _ops = { \
+ .read = name## _read, \
+ .open = wl1271_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_ADD(name, parent) \
+ entry = debugfs_create_file(#name, 0400, parent, \
+ wl, &name## _ops); \
+ if (!entry || IS_ERR(entry)) \
+ goto err; \
+
+#define DEBUGFS_FWSTATS_FILE(sub, name, fmt) \
+static ssize_t sub## _ ##name## _read(struct file *file, \
+ char __user *userbuf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct wl1271 *wl = file->private_data; \
+ \
+ wl1271_debugfs_update_stats(wl); \
+ \
+ return wl1271_format_buffer(userbuf, count, ppos, fmt "\n", \
+ wl->stats.fw_stats->sub.name); \
+} \
+ \
+static const struct file_operations sub## _ ##name## _ops = { \
+ .read = sub## _ ##name## _read, \
+ .open = wl1271_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_FWSTATS_ADD(sub, name) \
+ DEBUGFS_ADD(sub## _ ##name, stats)
+
+static void wl1271_debugfs_update_stats(struct wl1271 *wl)
+{
+ int ret;
+
+ mutex_lock(&wl->mutex);
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ if (wl->state == WL1271_STATE_ON &&
+ time_after(jiffies, wl->stats.fw_stats_update +
+ msecs_to_jiffies(WL1271_DEBUGFS_STATS_LIFETIME))) {
+ wl1271_acx_statistics(wl, wl->stats.fw_stats);
+ wl->stats.fw_stats_update = jiffies;
+ }
+
+ wl1271_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+}
+
+static int wl1271_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, "%u");
+
+DEBUGFS_FWSTATS_FILE(rx, out_of_mem, "%u");
+DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, "%u");
+DEBUGFS_FWSTATS_FILE(rx, hw_stuck, "%u");
+DEBUGFS_FWSTATS_FILE(rx, dropped, "%u");
+DEBUGFS_FWSTATS_FILE(rx, fcs_err, "%u");
+DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, "%u");
+DEBUGFS_FWSTATS_FILE(rx, path_reset, "%u");
+DEBUGFS_FWSTATS_FILE(rx, reset_counter, "%u");
+
+DEBUGFS_FWSTATS_FILE(dma, rx_requested, "%u");
+DEBUGFS_FWSTATS_FILE(dma, rx_errors, "%u");
+DEBUGFS_FWSTATS_FILE(dma, tx_requested, "%u");
+DEBUGFS_FWSTATS_FILE(dma, tx_errors, "%u");
+
+DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, "%u");
+DEBUGFS_FWSTATS_FILE(isr, fiqs, "%u");
+DEBUGFS_FWSTATS_FILE(isr, rx_headers, "%u");
+DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, "%u");
+DEBUGFS_FWSTATS_FILE(isr, rx_rdys, "%u");
+DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
+DEBUGFS_FWSTATS_FILE(isr, tx_procs, "%u");
+DEBUGFS_FWSTATS_FILE(isr, decrypt_done, "%u");
+DEBUGFS_FWSTATS_FILE(isr, dma0_done, "%u");
+DEBUGFS_FWSTATS_FILE(isr, dma1_done, "%u");
+DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, "%u");
+DEBUGFS_FWSTATS_FILE(isr, commands, "%u");
+DEBUGFS_FWSTATS_FILE(isr, rx_procs, "%u");
+DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, "%u");
+DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, "%u");
+DEBUGFS_FWSTATS_FILE(isr, pci_pm, "%u");
+DEBUGFS_FWSTATS_FILE(isr, wakeups, "%u");
+DEBUGFS_FWSTATS_FILE(isr, low_rssi, "%u");
+
+DEBUGFS_FWSTATS_FILE(wep, addr_key_count, "%u");
+DEBUGFS_FWSTATS_FILE(wep, default_key_count, "%u");
+/* skipping wep.reserved */
+DEBUGFS_FWSTATS_FILE(wep, key_not_found, "%u");
+DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, "%u");
+DEBUGFS_FWSTATS_FILE(wep, packets, "%u");
+DEBUGFS_FWSTATS_FILE(wep, interrupt, "%u");
+
+DEBUGFS_FWSTATS_FILE(pwr, ps_enter, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, elp_enter, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, power_save_off, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, enable_ps, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, disable_ps, "%u");
+DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, "%u");
+/* skipping cont_miss_bcns_spread for now */
+DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, "%u");
+
+DEBUGFS_FWSTATS_FILE(mic, rx_pkts, "%u");
+DEBUGFS_FWSTATS_FILE(mic, calc_failure, "%u");
+
+DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, "%u");
+DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, "%u");
+DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, "%u");
+DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, "%u");
+DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, "%u");
+DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, "%u");
+
+DEBUGFS_FWSTATS_FILE(event, heart_beat, "%u");
+DEBUGFS_FWSTATS_FILE(event, calibration, "%u");
+DEBUGFS_FWSTATS_FILE(event, rx_mismatch, "%u");
+DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, "%u");
+DEBUGFS_FWSTATS_FILE(event, rx_pool, "%u");
+DEBUGFS_FWSTATS_FILE(event, oom_late, "%u");
+DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, "%u");
+DEBUGFS_FWSTATS_FILE(event, tx_stuck, "%u");
+
+DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, "%u");
+DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, "%u");
+DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, "%u");
+DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, "%u");
+DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, "%u");
+DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, "%u");
+DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, "%u");
+
+DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, "%u");
+DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, "%u");
+DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data, "%u");
+DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, "%u");
+DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, "%u");
+
+DEBUGFS_READONLY_FILE(retry_count, "%u", wl->stats.retry_count);
+DEBUGFS_READONLY_FILE(excessive_retries, "%u",
+ wl->stats.excessive_retries);
+
+static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ u32 queue_len;
+ char buf[20];
+ int res;
+
+ queue_len = wl->tx_queue_count;
+
+ res = scnprintf(buf, sizeof(buf), "%u\n", queue_len);
+ return simple_read_from_buffer(userbuf, count, ppos, buf, res);
+}
+
+static const struct file_operations tx_queue_len_ops = {
+ .read = tx_queue_len_read,
+ .open = wl1271_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static ssize_t gpio_power_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ bool state = test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+
+ int res;
+ char buf[10];
+
+ res = scnprintf(buf, sizeof(buf), "%d\n", state);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, res);
+}
+
+static ssize_t gpio_power_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ char buf[10];
+ size_t len;
+ unsigned long value;
+ int ret;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len)) {
+ return -EFAULT;
+ }
+ buf[len] = '\0';
+
+ ret = strict_strtoul(buf, 0, &value);
+ if (ret < 0) {
+ wl1271_warning("illegal value in gpio_power");
+ return -EINVAL;
+ }
+
+ mutex_lock(&wl->mutex);
+
+ if (value)
+ wl1271_power_on(wl);
+ else
+ wl1271_power_off(wl);
+
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static const struct file_operations gpio_power_ops = {
+ .read = gpio_power_read,
+ .write = gpio_power_write,
+ .open = wl1271_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static int wl1271_debugfs_add_files(struct wl1271 *wl,
+ struct dentry *rootdir)
+{
+ int ret = 0;
+ struct dentry *entry, *stats;
+
+ stats = debugfs_create_dir("fw-statistics", rootdir);
+ if (!stats || IS_ERR(stats)) {
+ entry = stats;
+ goto err;
+ }
+
+ DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow);
+
+ DEBUGFS_FWSTATS_ADD(rx, out_of_mem);
+ DEBUGFS_FWSTATS_ADD(rx, hdr_overflow);
+ DEBUGFS_FWSTATS_ADD(rx, hw_stuck);
+ DEBUGFS_FWSTATS_ADD(rx, dropped);
+ DEBUGFS_FWSTATS_ADD(rx, fcs_err);
+ DEBUGFS_FWSTATS_ADD(rx, xfr_hint_trig);
+ DEBUGFS_FWSTATS_ADD(rx, path_reset);
+ DEBUGFS_FWSTATS_ADD(rx, reset_counter);
+
+ DEBUGFS_FWSTATS_ADD(dma, rx_requested);
+ DEBUGFS_FWSTATS_ADD(dma, rx_errors);
+ DEBUGFS_FWSTATS_ADD(dma, tx_requested);
+ DEBUGFS_FWSTATS_ADD(dma, tx_errors);
+
+ DEBUGFS_FWSTATS_ADD(isr, cmd_cmplt);
+ DEBUGFS_FWSTATS_ADD(isr, fiqs);
+ DEBUGFS_FWSTATS_ADD(isr, rx_headers);
+ DEBUGFS_FWSTATS_ADD(isr, rx_mem_overflow);
+ DEBUGFS_FWSTATS_ADD(isr, rx_rdys);
+ DEBUGFS_FWSTATS_ADD(isr, irqs);
+ DEBUGFS_FWSTATS_ADD(isr, tx_procs);
+ DEBUGFS_FWSTATS_ADD(isr, decrypt_done);
+ DEBUGFS_FWSTATS_ADD(isr, dma0_done);
+ DEBUGFS_FWSTATS_ADD(isr, dma1_done);
+ DEBUGFS_FWSTATS_ADD(isr, tx_exch_complete);
+ DEBUGFS_FWSTATS_ADD(isr, commands);
+ DEBUGFS_FWSTATS_ADD(isr, rx_procs);
+ DEBUGFS_FWSTATS_ADD(isr, hw_pm_mode_changes);
+ DEBUGFS_FWSTATS_ADD(isr, host_acknowledges);
+ DEBUGFS_FWSTATS_ADD(isr, pci_pm);
+ DEBUGFS_FWSTATS_ADD(isr, wakeups);
+ DEBUGFS_FWSTATS_ADD(isr, low_rssi);
+
+ DEBUGFS_FWSTATS_ADD(wep, addr_key_count);
+ DEBUGFS_FWSTATS_ADD(wep, default_key_count);
+ /* skipping wep.reserved */
+ DEBUGFS_FWSTATS_ADD(wep, key_not_found);
+ DEBUGFS_FWSTATS_ADD(wep, decrypt_fail);
+ DEBUGFS_FWSTATS_ADD(wep, packets);
+ DEBUGFS_FWSTATS_ADD(wep, interrupt);
+
+ DEBUGFS_FWSTATS_ADD(pwr, ps_enter);
+ DEBUGFS_FWSTATS_ADD(pwr, elp_enter);
+ DEBUGFS_FWSTATS_ADD(pwr, missing_bcns);
+ DEBUGFS_FWSTATS_ADD(pwr, wake_on_host);
+ DEBUGFS_FWSTATS_ADD(pwr, wake_on_timer_exp);
+ DEBUGFS_FWSTATS_ADD(pwr, tx_with_ps);
+ DEBUGFS_FWSTATS_ADD(pwr, tx_without_ps);
+ DEBUGFS_FWSTATS_ADD(pwr, rcvd_beacons);
+ DEBUGFS_FWSTATS_ADD(pwr, power_save_off);
+ DEBUGFS_FWSTATS_ADD(pwr, enable_ps);
+ DEBUGFS_FWSTATS_ADD(pwr, disable_ps);
+ DEBUGFS_FWSTATS_ADD(pwr, fix_tsf_ps);
+ /* skipping cont_miss_bcns_spread for now */
+ DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_beacons);
+
+ DEBUGFS_FWSTATS_ADD(mic, rx_pkts);
+ DEBUGFS_FWSTATS_ADD(mic, calc_failure);
+
+ DEBUGFS_FWSTATS_ADD(aes, encrypt_fail);
+ DEBUGFS_FWSTATS_ADD(aes, decrypt_fail);
+ DEBUGFS_FWSTATS_ADD(aes, encrypt_packets);
+ DEBUGFS_FWSTATS_ADD(aes, decrypt_packets);
+ DEBUGFS_FWSTATS_ADD(aes, encrypt_interrupt);
+ DEBUGFS_FWSTATS_ADD(aes, decrypt_interrupt);
+
+ DEBUGFS_FWSTATS_ADD(event, heart_beat);
+ DEBUGFS_FWSTATS_ADD(event, calibration);
+ DEBUGFS_FWSTATS_ADD(event, rx_mismatch);
+ DEBUGFS_FWSTATS_ADD(event, rx_mem_empty);
+ DEBUGFS_FWSTATS_ADD(event, rx_pool);
+ DEBUGFS_FWSTATS_ADD(event, oom_late);
+ DEBUGFS_FWSTATS_ADD(event, phy_transmit_error);
+ DEBUGFS_FWSTATS_ADD(event, tx_stuck);
+
+ DEBUGFS_FWSTATS_ADD(ps, pspoll_timeouts);
+ DEBUGFS_FWSTATS_ADD(ps, upsd_timeouts);
+ DEBUGFS_FWSTATS_ADD(ps, upsd_max_sptime);
+ DEBUGFS_FWSTATS_ADD(ps, upsd_max_apturn);
+ DEBUGFS_FWSTATS_ADD(ps, pspoll_max_apturn);
+ DEBUGFS_FWSTATS_ADD(ps, pspoll_utilization);
+ DEBUGFS_FWSTATS_ADD(ps, upsd_utilization);
+
+ DEBUGFS_FWSTATS_ADD(rxpipe, rx_prep_beacon_drop);
+ DEBUGFS_FWSTATS_ADD(rxpipe, descr_host_int_trig_rx_data);
+ DEBUGFS_FWSTATS_ADD(rxpipe, beacon_buffer_thres_host_int_trig_rx_data);
+ DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data);
+ DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
+
+ DEBUGFS_ADD(tx_queue_len, rootdir);
+ DEBUGFS_ADD(retry_count, rootdir);
+ DEBUGFS_ADD(excessive_retries, rootdir);
+
+ DEBUGFS_ADD(gpio_power, rootdir);
+
+ return 0;
+
+err:
+ if (IS_ERR(entry))
+ ret = PTR_ERR(entry);
+ else
+ ret = -ENOMEM;
+
+ return ret;
+}
+
+void wl1271_debugfs_reset(struct wl1271 *wl)
+{
+ if (!wl->stats.fw_stats)
+ return;
+
+ memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
+ wl->stats.retry_count = 0;
+ wl->stats.excessive_retries = 0;
+}
+
+int wl1271_debugfs_init(struct wl1271 *wl)
+{
+ int ret;
+ struct dentry *rootdir;
+
+ rootdir = debugfs_create_dir(KBUILD_MODNAME,
+ wl->hw->wiphy->debugfsdir);
+
+ if (IS_ERR(rootdir)) {
+ ret = PTR_ERR(rootdir);
+ goto err;
+ }
+
+ wl->stats.fw_stats = kzalloc(sizeof(*wl->stats.fw_stats),
+ GFP_KERNEL);
+
+ if (!wl->stats.fw_stats) {
+ ret = -ENOMEM;
+ goto err_fw;
+ }
+
+ wl->stats.fw_stats_update = jiffies;
+
+ ret = wl1271_debugfs_add_files(wl, rootdir);
+
+ if (ret < 0)
+ goto err_file;
+
+ return 0;
+
+err_file:
+ kfree(wl->stats.fw_stats);
+ wl->stats.fw_stats = NULL;
+
+err_fw:
+ debugfs_remove_recursive(rootdir);
+
+err:
+ return ret;
+}
+
+void wl1271_debugfs_exit(struct wl1271 *wl)
+{
+ kfree(wl->stats.fw_stats);
+ wl->stats.fw_stats = NULL;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.h b/drivers/net/wireless/wl12xx/debugfs.h
index 00a45b2669ad..254c5b292cf6 100644
--- a/drivers/net/wireless/wl12xx/wl1271_debugfs.h
+++ b/drivers/net/wireless/wl12xx/debugfs.h
@@ -21,10 +21,10 @@
*
*/
-#ifndef WL1271_DEBUGFS_H
-#define WL1271_DEBUGFS_H
+#ifndef __DEBUGFS_H__
+#define __DEBUGFS_H__
-#include "wl1271.h"
+#include "wl12xx.h"
int wl1271_debugfs_init(struct wl1271 *wl);
void wl1271_debugfs_exit(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/event.c
index 7b3f50382963..1b170c5cc595 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/event.c
@@ -21,12 +21,12 @@
*
*/
-#include "wl1271.h"
-#include "wl1271_reg.h"
-#include "wl1271_io.h"
-#include "wl1271_event.h"
-#include "wl1271_ps.h"
-#include "wl1271_scan.h"
+#include "wl12xx.h"
+#include "reg.h"
+#include "io.h"
+#include "event.h"
+#include "ps.h"
+#include "scan.h"
#include "wl12xx_80211.h"
void wl1271_pspoll_work(struct work_struct *work)
@@ -134,23 +134,7 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
/* go to extremely low power mode */
wl1271_ps_elp_sleep(wl);
- if (ret < 0)
- break;
- break;
- case EVENT_EXIT_POWER_SAVE_FAIL:
- wl1271_debug(DEBUG_PSM, "PSM exit failed");
-
- if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
- wl->psm_entry_retry = 0;
- break;
- }
-
- /* make sure the firmware goes to active mode - the frame to
- be sent next will indicate to the AP, that we are active. */
- ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
- wl->basic_rate, false);
break;
- case EVENT_EXIT_POWER_SAVE_SUCCESS:
default:
break;
}
@@ -188,6 +172,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
int ret;
u32 vector;
bool beacon_loss = false;
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
wl1271_event_mbox_dump(mbox);
@@ -220,21 +205,21 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
* BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
*
*/
- if (vector & BSS_LOSE_EVENT_ID) {
+ if ((vector & BSS_LOSE_EVENT_ID) && !is_ap) {
wl1271_info("Beacon loss detected.");
/* indicate to the stack, that beacons have been lost */
beacon_loss = true;
}
- if (vector & PS_REPORT_EVENT_ID) {
+ if ((vector & PS_REPORT_EVENT_ID) && !is_ap) {
wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
ret = wl1271_event_ps_report(wl, mbox, &beacon_loss);
if (ret < 0)
return ret;
}
- if (vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID)
+ if ((vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) && !is_ap)
wl1271_event_pspoll_delivery_fail(wl);
if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/event.h
index e4751667cf5e..0e80886f3031 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/event.h
@@ -22,8 +22,8 @@
*
*/
-#ifndef __WL1271_EVENT_H__
-#define __WL1271_EVENT_H__
+#ifndef __EVENT_H__
+#define __EVENT_H__
/*
* Mbox events
@@ -59,6 +59,7 @@ enum {
BSS_LOSE_EVENT_ID = BIT(18),
REGAINED_BSS_EVENT_ID = BIT(19),
ROAMING_TRIGGER_MAX_TX_RETRY_EVENT_ID = BIT(20),
+ STA_REMOVE_COMPLETE_EVENT_ID = BIT(21), /* AP */
SOFT_GEMINI_SENSE_EVENT_ID = BIT(22),
SOFT_GEMINI_PREDICTION_EVENT_ID = BIT(23),
SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24),
@@ -74,8 +75,6 @@ enum {
enum {
EVENT_ENTER_POWER_SAVE_FAIL = 0,
EVENT_ENTER_POWER_SAVE_SUCCESS,
- EVENT_EXIT_POWER_SAVE_FAIL,
- EVENT_EXIT_POWER_SAVE_SUCCESS,
};
struct event_debug_report {
@@ -115,7 +114,12 @@ struct event_mailbox {
u8 scheduled_scan_status;
u8 ps_status;
- u8 reserved_5[29];
+ /* AP FW only */
+ u8 hlid_removed;
+ __le16 sta_aging_status;
+ __le16 sta_tx_retry_exceeded;
+
+ u8 reserved_5[24];
} __packed;
int wl1271_event_unmask(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271_ini.h b/drivers/net/wireless/wl12xx/ini.h
index 2313047d4015..c330a2583dfd 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ini.h
+++ b/drivers/net/wireless/wl12xx/ini.h
@@ -21,8 +21,8 @@
*
*/
-#ifndef __WL1271_INI_H__
-#define __WL1271_INI_H__
+#ifndef __INI_H__
+#define __INI_H__
#define WL1271_INI_MAX_SMART_REFLEX_PARAM 16
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/init.c
index 8044bba70ee7..6072fe457135 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.c
+++ b/drivers/net/wireless/wl12xx/init.c
@@ -25,46 +25,26 @@
#include <linux/module.h>
#include <linux/slab.h>
-#include "wl1271_init.h"
+#include "init.h"
#include "wl12xx_80211.h"
-#include "wl1271_acx.h"
-#include "wl1271_cmd.h"
-#include "wl1271_reg.h"
+#include "acx.h"
+#include "cmd.h"
+#include "reg.h"
+#include "tx.h"
-static int wl1271_init_hwenc_config(struct wl1271 *wl)
-{
- int ret;
-
- ret = wl1271_acx_feature_cfg(wl);
- if (ret < 0) {
- wl1271_warning("couldn't set feature config");
- return ret;
- }
-
- ret = wl1271_cmd_set_default_wep_key(wl, wl->default_key);
- if (ret < 0) {
- wl1271_warning("couldn't set default key");
- return ret;
- }
-
- return 0;
-}
-
-int wl1271_init_templates_config(struct wl1271 *wl)
+int wl1271_sta_init_templates_config(struct wl1271 *wl)
{
int ret, i;
- size_t size;
/* send empty templates for fw memory reservation */
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL,
- sizeof(struct wl12xx_probe_req_template),
+ WL1271_CMD_TEMPL_MAX_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
- size = sizeof(struct wl12xx_probe_req_template);
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
- NULL, size, 0,
+ NULL, WL1271_CMD_TEMPL_MAX_SIZE, 0,
WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
@@ -102,6 +82,13 @@ int wl1271_init_templates_config(struct wl1271 *wl)
if (ret < 0)
return ret;
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_ARP_RSP, NULL,
+ sizeof
+ (struct wl12xx_arp_rsp_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL,
WL1271_CMD_TEMPL_MAX_SIZE, i,
@@ -113,6 +100,132 @@ int wl1271_init_templates_config(struct wl1271 *wl)
return 0;
}
+static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
+{
+ struct wl12xx_disconn_template *tmpl;
+ int ret;
+
+ tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
+ if (!tmpl) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_DEAUTH);
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP,
+ tmpl, sizeof(*tmpl), 0,
+ wl1271_tx_min_rate_get(wl));
+
+out:
+ kfree(tmpl);
+ return ret;
+}
+
+static int wl1271_ap_init_null_template(struct wl1271 *wl)
+{
+ struct ieee80211_hdr_3addr *nullfunc;
+ int ret;
+
+ nullfunc = kzalloc(sizeof(*nullfunc), GFP_KERNEL);
+ if (!nullfunc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_NULLFUNC |
+ IEEE80211_FCTL_FROMDS);
+
+ /* nullfunc->addr1 is filled by FW */
+
+ memcpy(nullfunc->addr2, wl->mac_addr, ETH_ALEN);
+ memcpy(nullfunc->addr3, wl->mac_addr, ETH_ALEN);
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc,
+ sizeof(*nullfunc), 0,
+ wl1271_tx_min_rate_get(wl));
+
+out:
+ kfree(nullfunc);
+ return ret;
+}
+
+static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
+{
+ struct ieee80211_qos_hdr *qosnull;
+ int ret;
+
+ qosnull = kzalloc(sizeof(*qosnull), GFP_KERNEL);
+ if (!qosnull) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ qosnull->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_NULLFUNC |
+ IEEE80211_FCTL_FROMDS);
+
+ /* qosnull->addr1 is filled by FW */
+
+ memcpy(qosnull->addr2, wl->mac_addr, ETH_ALEN);
+ memcpy(qosnull->addr3, wl->mac_addr, ETH_ALEN);
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull,
+ sizeof(*qosnull), 0,
+ wl1271_tx_min_rate_get(wl));
+
+out:
+ kfree(qosnull);
+ return ret;
+}
+
+static int wl1271_ap_init_templates_config(struct wl1271 *wl)
+{
+ int ret;
+
+ /*
+ * Put very large empty placeholders for all templates. These
+ * reserve memory for later.
+ */
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
+ sizeof
+ (struct wl12xx_probe_resp_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
+ sizeof
+ (struct wl12xx_beacon_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
+ sizeof
+ (struct wl12xx_disconn_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
+ sizeof(struct wl12xx_null_data_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
+ sizeof
+ (struct wl12xx_qos_null_data_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter)
{
int ret;
@@ -140,10 +253,6 @@ int wl1271_init_phy_config(struct wl1271 *wl)
if (ret < 0)
return ret;
- ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
- if (ret < 0)
- return ret;
-
ret = wl1271_acx_service_period_timeout(wl);
if (ret < 0)
return ret;
@@ -208,11 +317,199 @@ static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
return 0;
}
+static int wl1271_sta_hw_init(struct wl1271 *wl)
+{
+ int ret;
+
+ ret = wl1271_cmd_ext_radio_parms(wl);
+ if (ret < 0)
+ return ret;
+
+ /* PS config */
+ ret = wl1271_acx_config_ps(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_sta_init_templates_config(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize connection monitoring thresholds */
+ ret = wl1271_acx_conn_monit_params(wl, false);
+ if (ret < 0)
+ return ret;
+
+ /* Beacon filtering */
+ ret = wl1271_init_beacon_filter(wl);
+ if (ret < 0)
+ return ret;
+
+ /* Bluetooth WLAN coexistence */
+ ret = wl1271_init_pta(wl);
+ if (ret < 0)
+ return ret;
+
+ /* Beacons and broadcast settings */
+ ret = wl1271_init_beacon_broadcast(wl);
+ if (ret < 0)
+ return ret;
+
+ /* Configure for ELP power saving */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
+ if (ret < 0)
+ return ret;
+
+ /* Configure rssi/snr averaging weights */
+ ret = wl1271_acx_rssi_snr_avg_weights(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_sta_rate_policies(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_sta_mem_cfg(wl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl)
+{
+ int ret, i;
+
+ ret = wl1271_cmd_set_sta_default_wep_key(wl, wl->default_key);
+ if (ret < 0) {
+ wl1271_warning("couldn't set default key");
+ return ret;
+ }
+
+ /* disable all keep-alive templates */
+ for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
+ ret = wl1271_acx_keep_alive_config(wl, i,
+ ACX_KEEP_ALIVE_TPL_INVALID);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* disable the keep-alive feature */
+ ret = wl1271_acx_keep_alive_mode(wl, false);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int wl1271_ap_hw_init(struct wl1271 *wl)
+{
+ int ret, i;
+
+ ret = wl1271_ap_init_templates_config(wl);
+ if (ret < 0)
+ return ret;
+
+ /* Configure for power always on */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+ if (ret < 0)
+ return ret;
+
+ /* Configure initial TX rate classes */
+ for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
+ ret = wl1271_acx_ap_rate_policy(wl,
+ &wl->conf.tx.ap_rc_conf[i], i);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = wl1271_acx_ap_rate_policy(wl,
+ &wl->conf.tx.ap_mgmt_conf,
+ ACX_TX_AP_MODE_MGMT_RATE);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_ap_rate_policy(wl,
+ &wl->conf.tx.ap_bcst_conf,
+ ACX_TX_AP_MODE_BCST_RATE);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_max_tx_retry(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_ap_mem_cfg(wl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl)
+{
+ int ret;
+
+ ret = wl1271_ap_init_deauth_template(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_ap_init_null_template(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_ap_init_qos_null_template(wl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void wl1271_check_ba_support(struct wl1271 *wl)
+{
+ /* validate FW cose ver x.x.x.50-60.x */
+ if ((wl->chip.fw_ver[3] >= WL12XX_BA_SUPPORT_FW_COST_VER2_START) &&
+ (wl->chip.fw_ver[3] < WL12XX_BA_SUPPORT_FW_COST_VER2_END)) {
+ wl->ba_support = true;
+ return;
+ }
+
+ wl->ba_support = false;
+}
+
+static int wl1271_set_ba_policies(struct wl1271 *wl)
+{
+ u8 tid_index;
+ int ret = 0;
+
+ /* Reset the BA RX indicators */
+ wl->ba_rx_bitmap = 0;
+
+ /* validate that FW support BA */
+ wl1271_check_ba_support(wl);
+
+ if (wl->ba_support)
+ /* 802.11n initiator BA session setting */
+ for (tid_index = 0; tid_index < CONF_TX_MAX_TID_COUNT;
+ ++tid_index) {
+ ret = wl1271_acx_set_ba_session(wl, WLAN_BACK_INITIATOR,
+ tid_index, true);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
int wl1271_hw_init(struct wl1271 *wl)
{
struct conf_tx_ac_category *conf_ac;
struct conf_tx_tid *conf_tid;
int ret, i;
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
ret = wl1271_cmd_general_parms(wl);
if (ret < 0)
@@ -222,12 +519,12 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
return ret;
- ret = wl1271_cmd_ext_radio_parms(wl);
- if (ret < 0)
- return ret;
+ /* Mode specific init */
+ if (is_ap)
+ ret = wl1271_ap_hw_init(wl);
+ else
+ ret = wl1271_sta_hw_init(wl);
- /* Template settings */
- ret = wl1271_init_templates_config(wl);
if (ret < 0)
return ret;
@@ -254,16 +551,6 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* Initialize connection monitoring thresholds */
- ret = wl1271_acx_conn_monit_params(wl, false);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Beacon filtering */
- ret = wl1271_init_beacon_filter(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* Configure TX patch complete interrupt behavior */
ret = wl1271_acx_tx_config_options(wl);
if (ret < 0)
@@ -274,23 +561,13 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* Bluetooth WLAN coexistence */
- ret = wl1271_init_pta(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* Energy detection */
ret = wl1271_init_energy_detection(wl);
if (ret < 0)
goto out_free_memmap;
- /* Beacons and boradcast settings */
- ret = wl1271_init_beacon_broadcast(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* Default fragmentation threshold */
- ret = wl1271_acx_frag_threshold(wl);
+ ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
if (ret < 0)
goto out_free_memmap;
@@ -316,23 +593,13 @@ int wl1271_hw_init(struct wl1271 *wl)
goto out_free_memmap;
}
- /* Configure TX rate classes */
- ret = wl1271_acx_rate_policies(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* Enable data path */
ret = wl1271_cmd_data_path(wl, 1);
if (ret < 0)
goto out_free_memmap;
- /* Configure for ELP power saving */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
- if (ret < 0)
- goto out_free_memmap;
-
/* Configure HW encryption */
- ret = wl1271_init_hwenc_config(wl);
+ ret = wl1271_acx_feature_cfg(wl);
if (ret < 0)
goto out_free_memmap;
@@ -341,21 +608,17 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* disable all keep-alive templates */
- for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
- ret = wl1271_acx_keep_alive_config(wl, i,
- ACX_KEEP_ALIVE_TPL_INVALID);
- if (ret < 0)
- goto out_free_memmap;
- }
+ /* Mode specific init - post mem init */
+ if (is_ap)
+ ret = wl1271_ap_hw_init_post_mem(wl);
+ else
+ ret = wl1271_sta_hw_init_post_mem(wl);
- /* disable the keep-alive feature */
- ret = wl1271_acx_keep_alive_mode(wl, false);
if (ret < 0)
goto out_free_memmap;
- /* Configure rssi/snr averaging weights */
- ret = wl1271_acx_rssi_snr_avg_weights(wl);
+ /* Configure initiator BA sessions policies */
+ ret = wl1271_set_ba_policies(wl);
if (ret < 0)
goto out_free_memmap;
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.h b/drivers/net/wireless/wl12xx/init.h
index bc26f8c53b91..3a8bd3f426d2 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.h
+++ b/drivers/net/wireless/wl12xx/init.h
@@ -21,13 +21,13 @@
*
*/
-#ifndef __WL1271_INIT_H__
-#define __WL1271_INIT_H__
+#ifndef __INIT_H__
+#define __INIT_H__
-#include "wl1271.h"
+#include "wl12xx.h"
int wl1271_hw_init_power_auth(struct wl1271 *wl);
-int wl1271_init_templates_config(struct wl1271 *wl);
+int wl1271_sta_init_templates_config(struct wl1271 *wl);
int wl1271_init_phy_config(struct wl1271 *wl);
int wl1271_init_pta(struct wl1271 *wl);
int wl1271_init_energy_detection(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.c b/drivers/net/wireless/wl12xx/io.c
index c8759acef131..d557f73e7c19 100644
--- a/drivers/net/wireless/wl12xx/wl1271_io.c
+++ b/drivers/net/wireless/wl12xx/io.c
@@ -26,9 +26,9 @@
#include <linux/crc7.h>
#include <linux/spi/spi.h>
-#include "wl1271.h"
+#include "wl12xx.h"
#include "wl12xx_80211.h"
-#include "wl1271_io.h"
+#include "io.h"
#define OCP_CMD_LOOP 32
@@ -113,6 +113,7 @@ int wl1271_set_partition(struct wl1271 *wl,
return 0;
}
+EXPORT_SYMBOL_GPL(wl1271_set_partition);
void wl1271_io_reset(struct wl1271 *wl)
{
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.h b/drivers/net/wireless/wl12xx/io.h
index c1f92e65ded0..c1aac8292089 100644
--- a/drivers/net/wireless/wl12xx/wl1271_io.h
+++ b/drivers/net/wireless/wl12xx/io.h
@@ -22,10 +22,10 @@
*
*/
-#ifndef __WL1271_IO_H__
-#define __WL1271_IO_H__
+#ifndef __IO_H__
+#define __IO_H__
-#include "wl1271_reg.h"
+#include "reg.h"
#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0
@@ -168,5 +168,6 @@ void wl1271_unregister_hw(struct wl1271 *wl);
int wl1271_init_ieee80211(struct wl1271 *wl);
struct ieee80211_hw *wl1271_alloc_hw(void);
int wl1271_free_hw(struct wl1271 *wl);
+irqreturn_t wl1271_irq(int irq, void *data);
#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/main.c
index 48a4b9961ae6..8b3c8d196b03 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -31,20 +31,20 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include "wl1271.h"
+#include "wl12xx.h"
#include "wl12xx_80211.h"
-#include "wl1271_reg.h"
-#include "wl1271_io.h"
-#include "wl1271_event.h"
-#include "wl1271_tx.h"
-#include "wl1271_rx.h"
-#include "wl1271_ps.h"
-#include "wl1271_init.h"
-#include "wl1271_debugfs.h"
-#include "wl1271_cmd.h"
-#include "wl1271_boot.h"
-#include "wl1271_testmode.h"
-#include "wl1271_scan.h"
+#include "reg.h"
+#include "io.h"
+#include "event.h"
+#include "tx.h"
+#include "rx.h"
+#include "ps.h"
+#include "init.h"
+#include "debugfs.h"
+#include "cmd.h"
+#include "boot.h"
+#include "testmode.h"
+#include "scan.h"
#define WL1271_BOOT_RETRIES 3
@@ -116,11 +116,11 @@ static struct conf_drv_settings default_conf = {
},
.tx = {
.tx_energy_detection = 0,
- .rc_conf = {
+ .sta_rc_conf = {
.enabled_rates = 0,
.short_retry_limit = 10,
.long_retry_limit = 10,
- .aflags = 0
+ .aflags = 0,
},
.ac_conf_count = 4,
.ac_conf = {
@@ -153,6 +153,45 @@ static struct conf_drv_settings default_conf = {
.tx_op_limit = 1504,
},
},
+ .ap_rc_conf = {
+ [0] = {
+ .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ [1] = {
+ .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ [2] = {
+ .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ [3] = {
+ .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ },
+ .ap_mgmt_conf = {
+ .enabled_rates = CONF_TX_AP_DEFAULT_MGMT_RATES,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ .ap_bcst_conf = {
+ .enabled_rates = CONF_HW_BIT_RATE_1MBPS,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ .ap_max_tx_retries = 100,
.tid_conf_count = 4,
.tid_conf = {
[CONF_TX_AC_BE] = {
@@ -193,6 +232,8 @@ static struct conf_drv_settings default_conf = {
.tx_compl_threshold = 4,
.basic_rate = CONF_HW_BIT_RATE_1MBPS,
.basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
+ .tmpl_short_retry_limit = 10,
+ .tmpl_long_retry_limit = 10,
},
.conn = {
.wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
@@ -215,6 +256,7 @@ static struct conf_drv_settings default_conf = {
.bet_enable = CONF_BET_MODE_ENABLE,
.bet_max_consecutive = 10,
.psm_entry_retries = 5,
+ .psm_exit_retries = 255,
.psm_entry_nullfunc_retries = 3,
.psm_entry_hangover_period = 1,
.keep_alive_interval = 55000,
@@ -233,13 +275,13 @@ static struct conf_drv_settings default_conf = {
.avg_weight_rssi_beacon = 20,
.avg_weight_rssi_data = 10,
.avg_weight_snr_beacon = 20,
- .avg_weight_snr_data = 10
+ .avg_weight_snr_data = 10,
},
.scan = {
.min_dwell_time_active = 7500,
.max_dwell_time_active = 30000,
- .min_dwell_time_passive = 30000,
- .max_dwell_time_passive = 60000,
+ .min_dwell_time_passive = 100000,
+ .max_dwell_time_passive = 100000,
.num_probe_reqs = 2,
},
.rf = {
@@ -252,9 +294,24 @@ static struct conf_drv_settings default_conf = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
},
+ .ht = {
+ .tx_ba_win_size = 64,
+ .inactivity_timeout = 10000,
+ },
+ .mem = {
+ .num_stations = 1,
+ .ssid_profiles = 1,
+ .rx_block_num = 70,
+ .tx_min_block_num = 40,
+ .dynamic_memory = 0,
+ .min_req_tx_blocks = 100,
+ .min_req_rx_blocks = 22,
+ .tx_min = 27,
+ }
};
static void __wl1271_op_remove_interface(struct wl1271 *wl);
+static void wl1271_free_ap_keys(struct wl1271 *wl);
static void wl1271_device_release(struct device *dev)
@@ -317,7 +374,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
goto out;
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -335,6 +392,28 @@ out:
return NOTIFY_OK;
}
+static int wl1271_reg_notify(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_supported_band *band;
+ struct ieee80211_channel *ch;
+ int i;
+
+ band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ for (i = 0; i < band->n_channels; i++) {
+ ch = &band->channels[i];
+ if (ch->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ if (ch->flags & IEEE80211_CHAN_RADAR)
+ ch->flags |= IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN;
+
+ }
+
+ return 0;
+}
+
static void wl1271_conf_init(struct wl1271 *wl)
{
@@ -371,7 +450,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
if (ret < 0)
return ret;
- ret = wl1271_init_templates_config(wl);
+ ret = wl1271_sta_init_templates_config(wl);
if (ret < 0)
return ret;
@@ -403,8 +482,12 @@ static int wl1271_plt_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
+ ret = wl1271_acx_sta_mem_cfg(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
/* Default fragmentation threshold */
- ret = wl1271_acx_frag_threshold(wl);
+ ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
if (ret < 0)
goto out_free_memmap;
@@ -454,14 +537,71 @@ static int wl1271_plt_init(struct wl1271 *wl)
return ret;
}
+static void wl1271_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_blks)
+{
+ bool fw_ps;
+
+ /* only regulate station links */
+ if (hlid < WL1271_AP_STA_HLID_START)
+ return;
+
+ fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+
+ /*
+ * Wake up from high level PS if the STA is asleep with too little
+ * blocks in FW or if the STA is awake.
+ */
+ if (!fw_ps || tx_blks < WL1271_PS_STA_MAX_BLOCKS)
+ wl1271_ps_link_end(wl, hlid);
+
+ /* Start high-level PS if the STA is asleep with enough blocks in FW */
+ else if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
+ wl1271_ps_link_start(wl, hlid, true);
+}
+
+static void wl1271_irq_update_links_status(struct wl1271 *wl,
+ struct wl1271_fw_ap_status *status)
+{
+ u32 cur_fw_ps_map;
+ u8 hlid;
+
+ cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
+ if (wl->ap_fw_ps_map != cur_fw_ps_map) {
+ wl1271_debug(DEBUG_PSM,
+ "link ps prev 0x%x cur 0x%x changed 0x%x",
+ wl->ap_fw_ps_map, cur_fw_ps_map,
+ wl->ap_fw_ps_map ^ cur_fw_ps_map);
+
+ wl->ap_fw_ps_map = cur_fw_ps_map;
+ }
+
+ for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) {
+ u8 cnt = status->tx_lnk_free_blks[hlid] -
+ wl->links[hlid].prev_freed_blks;
+
+ wl->links[hlid].prev_freed_blks =
+ status->tx_lnk_free_blks[hlid];
+ wl->links[hlid].allocated_blks -= cnt;
+
+ wl1271_irq_ps_regulate_link(wl, hlid,
+ wl->links[hlid].allocated_blks);
+ }
+}
+
static void wl1271_fw_status(struct wl1271 *wl,
- struct wl1271_fw_status *status)
+ struct wl1271_fw_full_status *full_status)
{
+ struct wl1271_fw_common_status *status = &full_status->common;
struct timespec ts;
u32 total = 0;
int i;
- wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false);
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ wl1271_raw_read(wl, FW_STATUS_ADDR, status,
+ sizeof(struct wl1271_fw_ap_status), false);
+ else
+ wl1271_raw_read(wl, FW_STATUS_ADDR, status,
+ sizeof(struct wl1271_fw_sta_status), false);
wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
"drv_rx_counter = %d, tx_results_counter = %d)",
@@ -481,9 +621,13 @@ static void wl1271_fw_status(struct wl1271 *wl,
total += cnt;
}
- /* if more blocks are available now, schedule some tx work */
- if (total && !skb_queue_empty(&wl->tx_queue))
- ieee80211_queue_work(wl->hw, &wl->tx_work);
+ /* if more blocks are available now, tx work can be scheduled */
+ if (total)
+ clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
+
+ /* for AP update num of allocated TX blocks per link and ps status */
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ wl1271_irq_update_links_status(wl, &full_status->ap);
/* update the host-chipset time offset */
getnstimeofday(&ts);
@@ -491,16 +635,44 @@ static void wl1271_fw_status(struct wl1271 *wl,
(s64)le32_to_cpu(status->fw_localtime);
}
-#define WL1271_IRQ_MAX_LOOPS 10
+static void wl1271_flush_deferred_work(struct wl1271 *wl)
+{
+ struct sk_buff *skb;
+
+ /* Pass all received frames to the network stack */
+ while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
+ ieee80211_rx_ni(wl->hw, skb);
+
+ /* Return sent skbs to the network stack */
+ while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
+ ieee80211_tx_status(wl->hw, skb);
+}
+
+static void wl1271_netstack_work(struct work_struct *work)
+{
+ struct wl1271 *wl =
+ container_of(work, struct wl1271, netstack_work);
+
+ do {
+ wl1271_flush_deferred_work(wl);
+ } while (skb_queue_len(&wl->deferred_rx_queue));
+}
+
+#define WL1271_IRQ_MAX_LOOPS 256
-static void wl1271_irq_work(struct work_struct *work)
+irqreturn_t wl1271_irq(int irq, void *cookie)
{
int ret;
u32 intr;
int loopcount = WL1271_IRQ_MAX_LOOPS;
+ struct wl1271 *wl = (struct wl1271 *)cookie;
+ bool done = false;
+ unsigned int defer_count;
unsigned long flags;
- struct wl1271 *wl =
- container_of(work, struct wl1271, irq_work);
+
+ /* TX might be handled here, avoid redundant work */
+ set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
+ cancel_work_sync(&wl->tx_work);
mutex_lock(&wl->mutex);
@@ -509,35 +681,65 @@ static void wl1271_irq_work(struct work_struct *work)
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- ret = wl1271_ps_elp_wakeup(wl, true);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
- spin_lock_irqsave(&wl->wl_lock, flags);
- while (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags) && loopcount) {
- clear_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- loopcount--;
+ while (!done && loopcount--) {
+ /*
+ * In order to avoid a race with the hardirq, clear the flag
+ * before acknowledging the chip. Since the mutex is held,
+ * wl1271_ps_elp_wakeup cannot be called concurrently.
+ */
+ clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
+ smp_mb__after_clear_bit();
wl1271_fw_status(wl, wl->fw_status);
- intr = le32_to_cpu(wl->fw_status->intr);
+ intr = le32_to_cpu(wl->fw_status->common.intr);
+ intr &= WL1271_INTR_MASK;
if (!intr) {
- wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
- spin_lock_irqsave(&wl->wl_lock, flags);
+ done = true;
continue;
}
- intr &= WL1271_INTR_MASK;
+ if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
+ wl1271_error("watchdog interrupt received! "
+ "starting recovery.");
+ ieee80211_queue_work(wl->hw, &wl->recovery_work);
- if (intr & WL1271_ACX_INTR_DATA) {
+ /* restarting the chip. ignore any other interrupt. */
+ goto out;
+ }
+
+ if (likely(intr & WL1271_ACX_INTR_DATA)) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
+ wl1271_rx(wl, &wl->fw_status->common);
+
+ /* Check if any tx blocks were freed */
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
+ wl->tx_queue_count) {
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ /*
+ * In order to avoid starvation of the TX path,
+ * call the work function directly.
+ */
+ wl1271_tx_work_locked(wl);
+ } else {
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ }
+
/* check for tx results */
- if (wl->fw_status->tx_results_counter !=
+ if (wl->fw_status->common.tx_results_counter !=
(wl->tx_results_count & 0xff))
wl1271_tx_complete(wl);
- wl1271_rx(wl, wl->fw_status);
+ /* Make sure the deferred queues don't get too long */
+ defer_count = skb_queue_len(&wl->deferred_tx_queue) +
+ skb_queue_len(&wl->deferred_rx_queue);
+ if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
+ wl1271_flush_deferred_work(wl);
}
if (intr & WL1271_ACX_INTR_EVENT_A) {
@@ -556,28 +758,48 @@ static void wl1271_irq_work(struct work_struct *work)
if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
-
- spin_lock_irqsave(&wl->wl_lock, flags);
}
- if (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags))
- ieee80211_queue_work(wl->hw, &wl->irq_work);
- else
- clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
- spin_unlock_irqrestore(&wl->wl_lock, flags);
-
wl1271_ps_elp_sleep(wl);
out:
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ /* In case TX was not handled here, queue TX work */
+ clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
+ if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
+ wl->tx_queue_count)
+ ieee80211_queue_work(wl->hw, &wl->tx_work);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
mutex_unlock(&wl->mutex);
+
+ return IRQ_HANDLED;
}
+EXPORT_SYMBOL_GPL(wl1271_irq);
static int wl1271_fetch_firmware(struct wl1271 *wl)
{
const struct firmware *fw;
+ const char *fw_name;
int ret;
- ret = request_firmware(&fw, WL1271_FW_NAME, wl1271_wl_to_dev(wl));
+ switch (wl->bss_type) {
+ case BSS_TYPE_AP_BSS:
+ fw_name = WL1271_AP_FW_NAME;
+ break;
+ case BSS_TYPE_IBSS:
+ case BSS_TYPE_STA_BSS:
+ fw_name = WL1271_FW_NAME;
+ break;
+ default:
+ wl1271_error("no compatible firmware for bss_type %d",
+ wl->bss_type);
+ return -EINVAL;
+ }
+
+ wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
+
+ ret = request_firmware(&fw, fw_name, wl1271_wl_to_dev(wl));
if (ret < 0) {
wl1271_error("could not get firmware: %d", ret);
@@ -591,6 +813,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
goto out;
}
+ vfree(wl->fw);
wl->fw_len = fw->size;
wl->fw = vmalloc(wl->fw_len);
@@ -601,7 +824,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
}
memcpy(wl->fw, fw->data, wl->fw_len);
-
+ wl->fw_bss_type = wl->bss_type;
ret = 0;
out:
@@ -737,7 +960,8 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
goto out;
}
- if (wl->fw == NULL) {
+ /* Make sure the firmware type matches the BSS type */
+ if (wl->fw == NULL || wl->fw_bss_type != wl->bss_type) {
ret = wl1271_fetch_firmware(wl);
if (ret < 0)
goto out;
@@ -770,6 +994,8 @@ int wl1271_plt_start(struct wl1271 *wl)
goto out;
}
+ wl->bss_type = BSS_TYPE_STA_BSS;
+
while (retries) {
retries--;
ret = wl1271_chip_wakeup(wl);
@@ -786,11 +1012,10 @@ int wl1271_plt_start(struct wl1271 *wl)
wl->state = WL1271_STATE_PLT;
wl1271_notice("firmware booted in PLT mode (%s)",
- wl->chip.fw_ver);
+ wl->chip.fw_ver_str);
goto out;
irq_disable:
- wl1271_disable_interrupts(wl);
mutex_unlock(&wl->mutex);
/* Unlocking the mutex in the middle of handling is
inherently unsafe. In this case we deem it safe to do,
@@ -799,7 +1024,9 @@ irq_disable:
work function will not do anything.) Also, any other
possible concurrent operations will fail due to the
current state, hence the wl1271 struct should be safe. */
- cancel_work_sync(&wl->irq_work);
+ wl1271_disable_interrupts(wl);
+ wl1271_flush_deferred_work(wl);
+ cancel_work_sync(&wl->netstack_work);
mutex_lock(&wl->mutex);
power_off:
wl1271_power_off(wl);
@@ -813,12 +1040,10 @@ out:
return ret;
}
-int wl1271_plt_stop(struct wl1271 *wl)
+int __wl1271_plt_stop(struct wl1271 *wl)
{
int ret = 0;
- mutex_lock(&wl->mutex);
-
wl1271_notice("power down");
if (wl->state != WL1271_STATE_PLT) {
@@ -828,61 +1053,75 @@ int wl1271_plt_stop(struct wl1271 *wl)
goto out;
}
- wl1271_disable_interrupts(wl);
wl1271_power_off(wl);
wl->state = WL1271_STATE_OFF;
wl->rx_counter = 0;
-out:
mutex_unlock(&wl->mutex);
-
- cancel_work_sync(&wl->irq_work);
+ wl1271_disable_interrupts(wl);
+ wl1271_flush_deferred_work(wl);
+ cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->recovery_work);
+ mutex_lock(&wl->mutex);
+out:
+ return ret;
+}
+
+int wl1271_plt_stop(struct wl1271 *wl)
+{
+ int ret;
+ mutex_lock(&wl->mutex);
+ ret = __wl1271_plt_stop(wl);
+ mutex_unlock(&wl->mutex);
return ret;
}
-static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct wl1271 *wl = hw->priv;
- struct ieee80211_conf *conf = &hw->conf;
- struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
- struct ieee80211_sta *sta = txinfo->control.sta;
unsigned long flags;
+ int q;
+ u8 hlid = 0;
- /* peek into the rates configured in the STA entry */
- spin_lock_irqsave(&wl->wl_lock, flags);
- if (sta && sta->supp_rates[conf->channel->band] != wl->sta_rate_set) {
- wl->sta_rate_set = sta->supp_rates[conf->channel->band];
- set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
- }
- spin_unlock_irqrestore(&wl->wl_lock, flags);
+ q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
- /* queue the packet */
- skb_queue_tail(&wl->tx_queue, skb);
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ hlid = wl1271_tx_get_hlid(skb);
- /*
- * The chip specific setup must run before the first TX packet -
- * before that, the tx_work will not be initialized!
- */
+ spin_lock_irqsave(&wl->wl_lock, flags);
- ieee80211_queue_work(wl->hw, &wl->tx_work);
+ wl->tx_queue_count++;
/*
* The workqueue is slow to process the tx_queue and we need stop
* the queue here, otherwise the queue will get too long.
*/
- if (skb_queue_len(&wl->tx_queue) >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
+ if (wl->tx_queue_count >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
wl1271_debug(DEBUG_TX, "op_tx: stopping queues");
-
- spin_lock_irqsave(&wl->wl_lock, flags);
ieee80211_stop_queues(wl->hw);
set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
- spin_unlock_irqrestore(&wl->wl_lock, flags);
}
- return NETDEV_TX_OK;
+ /* queue the packet */
+ if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
+ skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
+ } else {
+ skb_queue_tail(&wl->tx_queue[q], skb);
+ }
+
+ /*
+ * The chip specific setup must run before the first TX packet -
+ * before that, the tx_work will not be initialized!
+ */
+
+ if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
+ !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
+ ieee80211_queue_work(wl->hw, &wl->tx_work);
+
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
}
static struct notifier_block wl1271_dev_notifier = {
@@ -902,6 +1141,9 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
*
* The MAC address is first known when the corresponding interface
* is added. That is where we will initialize the hardware.
+ *
+ * In addition, we currently have different firmwares for AP and managed
+ * operation. We will know which to boot according to interface type.
*/
return 0;
@@ -919,18 +1161,19 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
struct wiphy *wiphy = hw->wiphy;
int retries = WL1271_BOOT_RETRIES;
int ret = 0;
+ bool booted = false;
wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
vif->type, vif->addr);
mutex_lock(&wl->mutex);
if (wl->vif) {
+ wl1271_debug(DEBUG_MAC80211,
+ "multiple vifs are not supported yet");
ret = -EBUSY;
goto out;
}
- wl->vif = vif;
-
switch (vif->type) {
case NL80211_IFTYPE_STATION:
wl->bss_type = BSS_TYPE_STA_BSS;
@@ -940,6 +1183,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
wl->bss_type = BSS_TYPE_IBSS;
wl->set_bss_type = BSS_TYPE_STA_BSS;
break;
+ case NL80211_IFTYPE_AP:
+ wl->bss_type = BSS_TYPE_AP_BSS;
+ break;
default:
ret = -EOPNOTSUPP;
goto out;
@@ -968,18 +1214,10 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
if (ret < 0)
goto irq_disable;
- wl->state = WL1271_STATE_ON;
- wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
-
- /* update hw/fw version info in wiphy struct */
- wiphy->hw_version = wl->chip.id;
- strncpy(wiphy->fw_version, wl->chip.fw_ver,
- sizeof(wiphy->fw_version));
-
- goto out;
+ booted = true;
+ break;
irq_disable:
- wl1271_disable_interrupts(wl);
mutex_unlock(&wl->mutex);
/* Unlocking the mutex in the middle of handling is
inherently unsafe. In this case we deem it safe to do,
@@ -988,14 +1226,39 @@ irq_disable:
work function will not do anything.) Also, any other
possible concurrent operations will fail due to the
current state, hence the wl1271 struct should be safe. */
- cancel_work_sync(&wl->irq_work);
+ wl1271_disable_interrupts(wl);
+ wl1271_flush_deferred_work(wl);
+ cancel_work_sync(&wl->netstack_work);
mutex_lock(&wl->mutex);
power_off:
wl1271_power_off(wl);
}
- wl1271_error("firmware boot failed despite %d retries",
- WL1271_BOOT_RETRIES);
+ if (!booted) {
+ wl1271_error("firmware boot failed despite %d retries",
+ WL1271_BOOT_RETRIES);
+ goto out;
+ }
+
+ wl->vif = vif;
+ wl->state = WL1271_STATE_ON;
+ wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
+
+ /* update hw/fw version info in wiphy struct */
+ wiphy->hw_version = wl->chip.id;
+ strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
+ sizeof(wiphy->fw_version));
+
+ /*
+ * Now we know if 11a is supported (info from the NVS), so disable
+ * 11a channels if not supported
+ */
+ if (!wl->enable_11a)
+ wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
+
+ wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
+ wl->enable_11a ? "" : "not ");
+
out:
mutex_unlock(&wl->mutex);
@@ -1025,17 +1288,18 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
wl->scan.state = WL1271_SCAN_STATE_IDLE;
kfree(wl->scan.scanned_ch);
wl->scan.scanned_ch = NULL;
+ wl->scan.req = NULL;
ieee80211_scan_completed(wl->hw, true);
}
wl->state = WL1271_STATE_OFF;
- wl1271_disable_interrupts(wl);
-
mutex_unlock(&wl->mutex);
+ wl1271_disable_interrupts(wl);
+ wl1271_flush_deferred_work(wl);
cancel_delayed_work_sync(&wl->scan_complete_work);
- cancel_work_sync(&wl->irq_work);
+ cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->tx_work);
cancel_delayed_work_sync(&wl->pspoll_work);
cancel_delayed_work_sync(&wl->elp_work);
@@ -1064,10 +1328,13 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
wl->time_offset = 0;
wl->session_counter = 0;
wl->rate_set = CONF_TX_RATE_MASK_BASIC;
- wl->sta_rate_set = 0;
wl->flags = 0;
wl->vif = NULL;
wl->filters = 0;
+ wl1271_free_ap_keys(wl);
+ memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map));
+ wl->ap_fw_ps_map = 0;
+ wl->ap_ps_map = 0;
for (i = 0; i < NUM_TX_QUEUES; i++)
wl->tx_blocks_freed[i] = 0;
@@ -1088,17 +1355,22 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
struct wl1271 *wl = hw->priv;
mutex_lock(&wl->mutex);
- WARN_ON(wl->vif != vif);
- __wl1271_op_remove_interface(wl);
- mutex_unlock(&wl->mutex);
+ /*
+ * wl->vif can be null here if someone shuts down the interface
+ * just when hardware recovery has been started.
+ */
+ if (wl->vif) {
+ WARN_ON(wl->vif != vif);
+ __wl1271_op_remove_interface(wl);
+ }
+ mutex_unlock(&wl->mutex);
cancel_work_sync(&wl->recovery_work);
}
static void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters)
{
- wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
- wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
+ wl1271_set_default_filters(wl);
/* combine requested filters with current filter config */
filters = wl->filters | filters;
@@ -1233,25 +1505,7 @@ static void wl1271_set_band_rate(struct wl1271 *wl)
wl->basic_rate_set = wl->conf.tx.basic_rate_5;
}
-static u32 wl1271_min_rate_get(struct wl1271 *wl)
-{
- int i;
- u32 rate = 0;
-
- if (!wl->basic_rate_set) {
- WARN_ON(1);
- wl->basic_rate_set = wl->conf.tx.basic_rate;
- }
-
- for (i = 0; !rate; i++) {
- if ((wl->basic_rate_set >> i) & 0x1)
- rate = 1 << i;
- }
-
- return rate;
-}
-
-static int wl1271_handle_idle(struct wl1271 *wl, bool idle)
+static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
{
int ret;
@@ -1261,9 +1515,8 @@ static int wl1271_handle_idle(struct wl1271 *wl, bool idle)
if (ret < 0)
goto out;
}
- wl->rate_set = wl1271_min_rate_get(wl);
- wl->sta_rate_set = 0;
- ret = wl1271_acx_rate_policies(wl);
+ wl->rate_set = wl1271_tx_min_rate_get(wl);
+ ret = wl1271_acx_sta_rate_policies(wl);
if (ret < 0)
goto out;
ret = wl1271_acx_keep_alive_config(
@@ -1292,14 +1545,17 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
struct wl1271 *wl = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
int channel, ret = 0;
+ bool is_ap;
channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
- wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s",
+ wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
+ " changed 0x%x",
channel,
conf->flags & IEEE80211_CONF_PS ? "on" : "off",
conf->power_level,
- conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use");
+ conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
+ changed);
/*
* mac80211 will go to idle nearly immediately after transmitting some
@@ -1312,10 +1568,14 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&wl->mutex);
- if (unlikely(wl->state == WL1271_STATE_OFF))
+ if (unlikely(wl->state == WL1271_STATE_OFF)) {
+ ret = -EAGAIN;
goto out;
+ }
+
+ is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -1326,31 +1586,34 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
wl->band = conf->channel->band;
wl->channel = channel;
- /*
- * FIXME: the mac80211 should really provide a fixed rate
- * to use here. for now, just use the smallest possible rate
- * for the band as a fixed rate for association frames and
- * other control messages.
- */
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
- wl1271_set_band_rate(wl);
-
- wl->basic_rate = wl1271_min_rate_get(wl);
- ret = wl1271_acx_rate_policies(wl);
- if (ret < 0)
- wl1271_warning("rate policy for update channel "
- "failed %d", ret);
+ if (!is_ap) {
+ /*
+ * FIXME: the mac80211 should really provide a fixed
+ * rate to use here. for now, just use the smallest
+ * possible rate for the band as a fixed rate for
+ * association frames and other control messages.
+ */
+ if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ wl1271_set_band_rate(wl);
- if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
- ret = wl1271_join(wl, false);
+ wl->basic_rate = wl1271_tx_min_rate_get(wl);
+ ret = wl1271_acx_sta_rate_policies(wl);
if (ret < 0)
- wl1271_warning("cmd join to update channel "
+ wl1271_warning("rate policy for channel "
"failed %d", ret);
+
+ if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
+ ret = wl1271_join(wl, false);
+ if (ret < 0)
+ wl1271_warning("cmd join on channel "
+ "failed %d", ret);
+ }
}
}
- if (changed & IEEE80211_CONF_CHANGE_IDLE) {
- ret = wl1271_handle_idle(wl, conf->flags & IEEE80211_CONF_IDLE);
+ if (changed & IEEE80211_CONF_CHANGE_IDLE && !is_ap) {
+ ret = wl1271_sta_handle_idle(wl,
+ conf->flags & IEEE80211_CONF_IDLE);
if (ret < 0)
wl1271_warning("idle mode change failed %d", ret);
}
@@ -1457,7 +1720,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
struct wl1271 *wl = hw->priv;
int ret;
- wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter");
+ wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
+ " total %x", changed, *total);
mutex_lock(&wl->mutex);
@@ -1467,19 +1731,20 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
-
- if (*total & FIF_ALLMULTI)
- ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
- else if (fp)
- ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
- fp->mc_list,
- fp->mc_list_length);
- if (ret < 0)
- goto out_sleep;
+ if (wl->bss_type != BSS_TYPE_AP_BSS) {
+ if (*total & FIF_ALLMULTI)
+ ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
+ else if (fp)
+ ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
+ fp->mc_list,
+ fp->mc_list_length);
+ if (ret < 0)
+ goto out_sleep;
+ }
/* determine, whether supported filter values have changed */
if (changed == 0)
@@ -1502,41 +1767,200 @@ out:
kfree(fp);
}
+static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+ u16 tx_seq_16)
+{
+ struct wl1271_ap_key *ap_key;
+ int i;
+
+ wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
+
+ if (key_size > MAX_KEY_SIZE)
+ return -EINVAL;
+
+ /*
+ * Find next free entry in ap_keys. Also check we are not replacing
+ * an existing key.
+ */
+ for (i = 0; i < MAX_NUM_KEYS; i++) {
+ if (wl->recorded_ap_keys[i] == NULL)
+ break;
+
+ if (wl->recorded_ap_keys[i]->id == id) {
+ wl1271_warning("trying to record key replacement");
+ return -EINVAL;
+ }
+ }
+
+ if (i == MAX_NUM_KEYS)
+ return -EBUSY;
+
+ ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
+ if (!ap_key)
+ return -ENOMEM;
+
+ ap_key->id = id;
+ ap_key->key_type = key_type;
+ ap_key->key_size = key_size;
+ memcpy(ap_key->key, key, key_size);
+ ap_key->hlid = hlid;
+ ap_key->tx_seq_32 = tx_seq_32;
+ ap_key->tx_seq_16 = tx_seq_16;
+
+ wl->recorded_ap_keys[i] = ap_key;
+ return 0;
+}
+
+static void wl1271_free_ap_keys(struct wl1271 *wl)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_KEYS; i++) {
+ kfree(wl->recorded_ap_keys[i]);
+ wl->recorded_ap_keys[i] = NULL;
+ }
+}
+
+static int wl1271_ap_init_hwenc(struct wl1271 *wl)
+{
+ int i, ret = 0;
+ struct wl1271_ap_key *key;
+ bool wep_key_added = false;
+
+ for (i = 0; i < MAX_NUM_KEYS; i++) {
+ if (wl->recorded_ap_keys[i] == NULL)
+ break;
+
+ key = wl->recorded_ap_keys[i];
+ ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE,
+ key->id, key->key_type,
+ key->key_size, key->key,
+ key->hlid, key->tx_seq_32,
+ key->tx_seq_16);
+ if (ret < 0)
+ goto out;
+
+ if (key->key_type == KEY_WEP)
+ wep_key_added = true;
+ }
+
+ if (wep_key_added) {
+ ret = wl1271_cmd_set_ap_default_wep_key(wl, wl->default_key);
+ if (ret < 0)
+ goto out;
+ }
+
+out:
+ wl1271_free_ap_keys(wl);
+ return ret;
+}
+
+static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, u32 tx_seq_32,
+ u16 tx_seq_16, struct ieee80211_sta *sta)
+{
+ int ret;
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+
+ if (is_ap) {
+ struct wl1271_station *wl_sta;
+ u8 hlid;
+
+ if (sta) {
+ wl_sta = (struct wl1271_station *)sta->drv_priv;
+ hlid = wl_sta->hlid;
+ } else {
+ hlid = WL1271_AP_BROADCAST_HLID;
+ }
+
+ if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+ /*
+ * We do not support removing keys after AP shutdown.
+ * Pretend we do to make mac80211 happy.
+ */
+ if (action != KEY_ADD_OR_REPLACE)
+ return 0;
+
+ ret = wl1271_record_ap_key(wl, id,
+ key_type, key_size,
+ key, hlid, tx_seq_32,
+ tx_seq_16);
+ } else {
+ ret = wl1271_cmd_set_ap_key(wl, action,
+ id, key_type, key_size,
+ key, hlid, tx_seq_32,
+ tx_seq_16);
+ }
+
+ if (ret < 0)
+ return ret;
+ } else {
+ const u8 *addr;
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+
+ addr = sta ? sta->addr : bcast_addr;
+
+ if (is_zero_ether_addr(addr)) {
+ /* We dont support TX only encryption */
+ return -EOPNOTSUPP;
+ }
+
+ /* The wl1271 does not allow to remove unicast keys - they
+ will be cleared automatically on next CMD_JOIN. Ignore the
+ request silently, as we dont want the mac80211 to emit
+ an error message. */
+ if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
+ return 0;
+
+ ret = wl1271_cmd_set_sta_key(wl, action,
+ id, key_type, key_size,
+ key, addr, tx_seq_32,
+ tx_seq_16);
+ if (ret < 0)
+ return ret;
+
+ /* the default WEP key needs to be configured at least once */
+ if (key_type == KEY_WEP) {
+ ret = wl1271_cmd_set_sta_default_wep_key(wl,
+ wl->default_key);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key_conf)
{
struct wl1271 *wl = hw->priv;
- const u8 *addr;
int ret;
u32 tx_seq_32 = 0;
u16 tx_seq_16 = 0;
u8 key_type;
- static const u8 bcast_addr[ETH_ALEN] =
- { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
-
wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
- addr = sta ? sta->addr : bcast_addr;
-
- wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd);
- wl1271_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN);
+ wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
key_conf->cipher, key_conf->keyidx,
key_conf->keylen, key_conf->flags);
wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
- if (is_zero_ether_addr(addr)) {
- /* We dont support TX only encryption */
- ret = -EOPNOTSUPP;
- goto out;
- }
-
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl, false);
+ if (unlikely(wl->state == WL1271_STATE_OFF)) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_unlock;
@@ -1575,36 +1999,21 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (cmd) {
case SET_KEY:
- ret = wl1271_cmd_set_key(wl, KEY_ADD_OR_REPLACE,
- key_conf->keyidx, key_type,
- key_conf->keylen, key_conf->key,
- addr, tx_seq_32, tx_seq_16);
+ ret = wl1271_set_key(wl, KEY_ADD_OR_REPLACE,
+ key_conf->keyidx, key_type,
+ key_conf->keylen, key_conf->key,
+ tx_seq_32, tx_seq_16, sta);
if (ret < 0) {
wl1271_error("Could not add or replace key");
goto out_sleep;
}
-
- /* the default WEP key needs to be configured at least once */
- if (key_type == KEY_WEP) {
- ret = wl1271_cmd_set_default_wep_key(wl,
- wl->default_key);
- if (ret < 0)
- goto out_sleep;
- }
break;
case DISABLE_KEY:
- /* The wl1271 does not allow to remove unicast keys - they
- will be cleared automatically on next CMD_JOIN. Ignore the
- request silently, as we dont want the mac80211 to emit
- an error message. */
- if (!is_broadcast_ether_addr(addr))
- break;
-
- ret = wl1271_cmd_set_key(wl, KEY_REMOVE,
- key_conf->keyidx, key_type,
- key_conf->keylen, key_conf->key,
- addr, 0, 0);
+ ret = wl1271_set_key(wl, KEY_REMOVE,
+ key_conf->keyidx, key_type,
+ key_conf->keylen, key_conf->key,
+ 0, 0, sta);
if (ret < 0) {
wl1271_error("Could not remove key");
goto out_sleep;
@@ -1623,7 +2032,6 @@ out_sleep:
out_unlock:
mutex_unlock(&wl->mutex);
-out:
return ret;
}
@@ -1645,7 +2053,17 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl, false);
+ if (wl->state == WL1271_STATE_OFF) {
+ /*
+ * We cannot return -EBUSY here because cfg80211 will expect
+ * a call to ieee80211_scan_completed if we do - in this case
+ * there won't be any call.
+ */
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -1659,6 +2077,34 @@ out:
return ret;
}
+static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct wl1271 *wl = hw->priv;
+ int ret = 0;
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_acx_frag_threshold(wl, (u16)value);
+ if (ret < 0)
+ wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
+
+ wl1271_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+
+ return ret;
+}
+
static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct wl1271 *wl = hw->priv;
@@ -1666,10 +2112,12 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
mutex_lock(&wl->mutex);
- if (unlikely(wl->state == WL1271_STATE_OFF))
+ if (unlikely(wl->state == WL1271_STATE_OFF)) {
+ ret = -EAGAIN;
goto out;
+ }
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -1685,92 +2133,223 @@ out:
return ret;
}
-static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *beacon)
+static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
+ int offset)
{
- u8 *ptr = beacon->data +
- offsetof(struct ieee80211_mgmt, u.beacon.variable);
+ u8 *ptr = skb->data + offset;
/* find the location of the ssid in the beacon */
- while (ptr < beacon->data + beacon->len) {
+ while (ptr < skb->data + skb->len) {
if (ptr[0] == WLAN_EID_SSID) {
wl->ssid_len = ptr[1];
memcpy(wl->ssid, ptr+2, wl->ssid_len);
- return;
+ return 0;
}
- ptr += ptr[1];
+ ptr += (ptr[1] + 2);
}
- wl1271_error("ad-hoc beacon template has no SSID!\n");
+
+ wl1271_error("No SSID in IEs!\n");
+ return -ENOENT;
}
-static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
+static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
- enum wl1271_cmd_ps_mode mode;
- struct wl1271 *wl = hw->priv;
- bool do_join = false;
- bool set_assoc = false;
- int ret;
+ int ret = 0;
- wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed");
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ if (bss_conf->use_short_slot)
+ ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
+ else
+ ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
+ if (ret < 0) {
+ wl1271_warning("Set slot time failed %d", ret);
+ goto out;
+ }
+ }
- mutex_lock(&wl->mutex);
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ if (bss_conf->use_short_preamble)
+ wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
+ else
+ wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
+ }
- ret = wl1271_ps_elp_wakeup(wl, false);
- if (ret < 0)
- goto out;
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ if (bss_conf->use_cts_prot)
+ ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
+ else
+ ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
+ if (ret < 0) {
+ wl1271_warning("Set ctsprotect failed %d", ret);
+ goto out;
+ }
+ }
- if ((changed & BSS_CHANGED_BEACON_INT) &&
- (wl->bss_type == BSS_TYPE_IBSS)) {
- wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon interval updated: %d",
+out:
+ return ret;
+}
+
+static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ int ret = 0;
+
+ if ((changed & BSS_CHANGED_BEACON_INT)) {
+ wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
bss_conf->beacon_int);
wl->beacon_int = bss_conf->beacon_int;
- do_join = true;
}
- if ((changed & BSS_CHANGED_BEACON) &&
- (wl->bss_type == BSS_TYPE_IBSS)) {
- struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
+ if ((changed & BSS_CHANGED_BEACON)) {
+ struct ieee80211_hdr *hdr;
+ int ieoffset = offsetof(struct ieee80211_mgmt,
+ u.beacon.variable);
+ struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
+ u16 tmpl_id;
- wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon updated");
+ if (!beacon)
+ goto out;
- if (beacon) {
- struct ieee80211_hdr *hdr;
+ wl1271_debug(DEBUG_MASTER, "beacon updated");
- wl1271_ssid_set(wl, beacon);
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
- beacon->data,
- beacon->len, 0,
- wl1271_min_rate_get(wl));
+ ret = wl1271_ssid_set(wl, beacon, ieoffset);
+ if (ret < 0) {
+ dev_kfree_skb(beacon);
+ goto out;
+ }
+ tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
+ CMD_TEMPL_BEACON;
+ ret = wl1271_cmd_template_set(wl, tmpl_id,
+ beacon->data,
+ beacon->len, 0,
+ wl1271_tx_min_rate_get(wl));
+ if (ret < 0) {
+ dev_kfree_skb(beacon);
+ goto out;
+ }
- if (ret < 0) {
- dev_kfree_skb(beacon);
- goto out_sleep;
- }
+ hdr = (struct ieee80211_hdr *) beacon->data;
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_PROBE_RESP);
+
+ tmpl_id = is_ap ? CMD_TEMPL_AP_PROBE_RESPONSE :
+ CMD_TEMPL_PROBE_RESPONSE;
+ ret = wl1271_cmd_template_set(wl,
+ tmpl_id,
+ beacon->data,
+ beacon->len, 0,
+ wl1271_tx_min_rate_get(wl));
+ dev_kfree_skb(beacon);
+ if (ret < 0)
+ goto out;
+ }
- hdr = (struct ieee80211_hdr *) beacon->data;
- hdr->frame_control = cpu_to_le16(
- IEEE80211_FTYPE_MGMT |
- IEEE80211_STYPE_PROBE_RESP);
+out:
+ return ret;
+}
- ret = wl1271_cmd_template_set(wl,
- CMD_TEMPL_PROBE_RESPONSE,
- beacon->data,
- beacon->len, 0,
- wl1271_min_rate_get(wl));
- dev_kfree_skb(beacon);
- if (ret < 0)
- goto out_sleep;
+/* AP mode changes */
+static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ int ret = 0;
- /* Need to update the SSID (for filtering etc) */
- do_join = true;
+ if ((changed & BSS_CHANGED_BASIC_RATES)) {
+ u32 rates = bss_conf->basic_rates;
+ struct conf_tx_rate_class mgmt_rc;
+
+ wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates);
+ wl->basic_rate = wl1271_tx_min_rate_get(wl);
+ wl1271_debug(DEBUG_AP, "basic rates: 0x%x",
+ wl->basic_rate_set);
+
+ /* update the AP management rate policy with the new rates */
+ mgmt_rc.enabled_rates = wl->basic_rate_set;
+ mgmt_rc.long_retry_limit = 10;
+ mgmt_rc.short_retry_limit = 10;
+ mgmt_rc.aflags = 0;
+ ret = wl1271_acx_ap_rate_policy(wl, &mgmt_rc,
+ ACX_TX_AP_MODE_MGMT_RATE);
+ if (ret < 0) {
+ wl1271_error("AP mgmt policy change failed %d", ret);
+ goto out;
}
}
- if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
- (wl->bss_type == BSS_TYPE_IBSS)) {
+ ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
+ if (ret < 0)
+ goto out;
+
+ if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
+ if (bss_conf->enable_beacon) {
+ if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+ ret = wl1271_cmd_start_bss(wl);
+ if (ret < 0)
+ goto out;
+
+ set_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+ wl1271_debug(DEBUG_AP, "started AP");
+
+ ret = wl1271_ap_init_hwenc(wl);
+ if (ret < 0)
+ goto out;
+ }
+ } else {
+ if (test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+ ret = wl1271_cmd_stop_bss(wl);
+ if (ret < 0)
+ goto out;
+
+ clear_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+ wl1271_debug(DEBUG_AP, "stopped AP");
+ }
+ }
+ }
+
+ ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+ if (ret < 0)
+ goto out;
+out:
+ return;
+}
+
+/* STA/IBSS mode changes */
+static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ bool do_join = false, set_assoc = false;
+ bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
+ u32 sta_rate_set = 0;
+ int ret;
+ struct ieee80211_sta *sta;
+ bool sta_exists = false;
+ struct ieee80211_sta_ht_cap sta_ht_cap;
+
+ if (is_ibss) {
+ ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
+ changed);
+ if (ret < 0)
+ goto out;
+ }
+
+ if ((changed & BSS_CHANGED_BEACON_INT) && is_ibss)
+ do_join = true;
+
+ /* Need to update the SSID (for filtering etc) */
+ if ((changed & BSS_CHANGED_BEACON) && is_ibss)
+ do_join = true;
+
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) && is_ibss) {
wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
bss_conf->enable_beacon ? "enabled" : "disabled");
@@ -1781,7 +2360,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
do_join = true;
}
- if (changed & BSS_CHANGED_CQM) {
+ if ((changed & BSS_CHANGED_CQM)) {
bool enable = false;
if (bss_conf->cqm_rssi_thold)
enable = true;
@@ -1799,26 +2378,73 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
* and enable the BSSID filter
*/
memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
- memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
+ memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
+ if (!is_zero_ether_addr(wl->bssid)) {
ret = wl1271_cmd_build_null_data(wl);
if (ret < 0)
- goto out_sleep;
+ goto out;
ret = wl1271_build_qos_null_data(wl);
if (ret < 0)
- goto out_sleep;
+ goto out;
/* filter out all packets not from this BSSID */
wl1271_configure_filters(wl, 0);
/* Need to update the BSSID (for filtering etc) */
do_join = true;
+ }
+ }
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (sta) {
+ /* save the supp_rates of the ap */
+ sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
+ if (sta->ht_cap.ht_supported)
+ sta_rate_set |=
+ (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
+ sta_ht_cap = sta->ht_cap;
+ sta_exists = true;
+ }
+ rcu_read_unlock();
+
+ if (sta_exists) {
+ /* handle new association with HT and HT information change */
+ if ((changed & BSS_CHANGED_HT) &&
+ (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
+ ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap,
+ true);
+ if (ret < 0) {
+ wl1271_warning("Set ht cap true failed %d",
+ ret);
+ goto out;
+ }
+ ret = wl1271_acx_set_ht_information(wl,
+ bss_conf->ht_operation_mode);
+ if (ret < 0) {
+ wl1271_warning("Set ht information failed %d",
+ ret);
+ goto out;
+ }
+ }
+ /* handle new association without HT and disassociation */
+ else if (changed & BSS_CHANGED_ASSOC) {
+ ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap,
+ false);
+ if (ret < 0) {
+ wl1271_warning("Set ht cap false failed %d",
+ ret);
+ goto out;
+ }
+ }
}
- if (changed & BSS_CHANGED_ASSOC) {
+ if ((changed & BSS_CHANGED_ASSOC)) {
if (bss_conf->assoc) {
u32 rates;
+ int ieoffset;
wl->aid = bss_conf->aid;
set_assoc = true;
@@ -1831,10 +2457,13 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
rates = bss_conf->basic_rates;
wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl,
rates);
- wl->basic_rate = wl1271_min_rate_get(wl);
- ret = wl1271_acx_rate_policies(wl);
+ wl->basic_rate = wl1271_tx_min_rate_get(wl);
+ if (sta_rate_set)
+ wl->rate_set = wl1271_tx_enabled_rates_get(wl,
+ sta_rate_set);
+ ret = wl1271_acx_sta_rate_policies(wl);
if (ret < 0)
- goto out_sleep;
+ goto out;
/*
* with wl1271, we don't need to update the
@@ -1844,31 +2473,33 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
*/
ret = wl1271_cmd_build_ps_poll(wl, wl->aid);
if (ret < 0)
- goto out_sleep;
+ goto out;
/*
- * The SSID is intentionally set to NULL here - the
- * firmware will set the probe request with a
- * broadcast SSID regardless of what we set in the
- * template.
+ * Get a template for hardware connection maintenance
*/
- ret = wl1271_cmd_build_probe_req(wl, NULL, 0,
- NULL, 0, wl->band);
+ dev_kfree_skb(wl->probereq);
+ wl->probereq = wl1271_cmd_build_ap_probe_req(wl, NULL);
+ ieoffset = offsetof(struct ieee80211_mgmt,
+ u.probe_req.variable);
+ wl1271_ssid_set(wl, wl->probereq, ieoffset);
/* enable the connection monitoring feature */
ret = wl1271_acx_conn_monit_params(wl, true);
if (ret < 0)
- goto out_sleep;
+ goto out;
/* If we want to go in PSM but we're not there yet */
if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ enum wl1271_cmd_ps_mode mode;
+
mode = STATION_POWER_SAVE_MODE;
ret = wl1271_ps_set_mode(wl, mode,
wl->basic_rate,
true);
if (ret < 0)
- goto out_sleep;
+ goto out;
}
} else {
/* use defaults when not associated */
@@ -1876,79 +2507,104 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
wl->aid = 0;
+ /* free probe-request template */
+ dev_kfree_skb(wl->probereq);
+ wl->probereq = NULL;
+
/* re-enable dynamic ps - just in case */
ieee80211_enable_dyn_ps(wl->vif);
/* revert back to minimum rates for the current band */
wl1271_set_band_rate(wl);
- wl->basic_rate = wl1271_min_rate_get(wl);
- ret = wl1271_acx_rate_policies(wl);
+ wl->basic_rate = wl1271_tx_min_rate_get(wl);
+ ret = wl1271_acx_sta_rate_policies(wl);
if (ret < 0)
- goto out_sleep;
+ goto out;
/* disable connection monitor features */
ret = wl1271_acx_conn_monit_params(wl, false);
/* Disable the keep-alive feature */
ret = wl1271_acx_keep_alive_mode(wl, false);
-
if (ret < 0)
- goto out_sleep;
- }
-
- }
+ goto out;
- if (changed & BSS_CHANGED_ERP_SLOT) {
- if (bss_conf->use_short_slot)
- ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
- else
- ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
- if (ret < 0) {
- wl1271_warning("Set slot time failed %d", ret);
- goto out_sleep;
+ /* restore the bssid filter and go to dummy bssid */
+ wl1271_unjoin(wl);
+ wl1271_dummy_join(wl);
}
}
- if (changed & BSS_CHANGED_ERP_PREAMBLE) {
- if (bss_conf->use_short_preamble)
- wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
- else
- wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
- }
-
- if (changed & BSS_CHANGED_ERP_CTS_PROT) {
- if (bss_conf->use_cts_prot)
- ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
- else
- ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
- if (ret < 0) {
- wl1271_warning("Set ctsprotect failed %d", ret);
- goto out_sleep;
- }
- }
+ ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+ if (ret < 0)
+ goto out;
if (changed & BSS_CHANGED_ARP_FILTER) {
__be32 addr = bss_conf->arp_addr_list[0];
WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
- if (bss_conf->arp_addr_cnt == 1 && bss_conf->arp_filter_enabled)
- ret = wl1271_acx_arp_ip_filter(wl, true, addr);
- else
- ret = wl1271_acx_arp_ip_filter(wl, false, addr);
+ if (bss_conf->arp_addr_cnt == 1 &&
+ bss_conf->arp_filter_enabled) {
+ /*
+ * The template should have been configured only upon
+ * association. however, it seems that the correct ip
+ * isn't being set (when sending), so we have to
+ * reconfigure the template upon every ip change.
+ */
+ ret = wl1271_cmd_build_arp_rsp(wl, addr);
+ if (ret < 0) {
+ wl1271_warning("build arp rsp failed: %d", ret);
+ goto out;
+ }
+
+ ret = wl1271_acx_arp_ip_filter(wl,
+ ACX_ARP_FILTER_ARP_FILTERING,
+ addr);
+ } else
+ ret = wl1271_acx_arp_ip_filter(wl, 0, addr);
if (ret < 0)
- goto out_sleep;
+ goto out;
}
if (do_join) {
ret = wl1271_join(wl, set_assoc);
if (ret < 0) {
wl1271_warning("cmd join failed %d", ret);
- goto out_sleep;
+ goto out;
}
}
-out_sleep:
+out:
+ return;
+}
+
+static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ struct wl1271 *wl = hw->priv;
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ int ret;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
+ (int)changed);
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ if (is_ap)
+ wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
+ else
+ wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
+
wl1271_ps_elp_sleep(wl);
out:
@@ -1960,37 +2616,66 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
{
struct wl1271 *wl = hw->priv;
u8 ps_scheme;
- int ret;
+ int ret = 0;
mutex_lock(&wl->mutex);
wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
- ret = wl1271_ps_elp_wakeup(wl, false);
- if (ret < 0)
- goto out;
-
- /* the txop is confed in units of 32us by the mac80211, we need us */
- ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
- params->cw_min, params->cw_max,
- params->aifs, params->txop << 5);
- if (ret < 0)
- goto out_sleep;
-
if (params->uapsd)
ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
else
ps_scheme = CONF_PS_SCHEME_LEGACY;
- ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
- CONF_CHANNEL_TYPE_EDCF,
- wl1271_tx_get_queue(queue),
- ps_scheme, CONF_ACK_POLICY_LEGACY, 0, 0);
- if (ret < 0)
- goto out_sleep;
+ if (wl->state == WL1271_STATE_OFF) {
+ /*
+ * If the state is off, the parameters will be recorded and
+ * configured on init. This happens in AP-mode.
+ */
+ struct conf_tx_ac_category *conf_ac =
+ &wl->conf.tx.ac_conf[wl1271_tx_get_queue(queue)];
+ struct conf_tx_tid *conf_tid =
+ &wl->conf.tx.tid_conf[wl1271_tx_get_queue(queue)];
+
+ conf_ac->ac = wl1271_tx_get_queue(queue);
+ conf_ac->cw_min = (u8)params->cw_min;
+ conf_ac->cw_max = params->cw_max;
+ conf_ac->aifsn = params->aifs;
+ conf_ac->tx_op_limit = params->txop << 5;
+
+ conf_tid->queue_id = wl1271_tx_get_queue(queue);
+ conf_tid->channel_type = CONF_CHANNEL_TYPE_EDCF;
+ conf_tid->tsid = wl1271_tx_get_queue(queue);
+ conf_tid->ps_scheme = ps_scheme;
+ conf_tid->ack_policy = CONF_ACK_POLICY_LEGACY;
+ conf_tid->apsd_conf[0] = 0;
+ conf_tid->apsd_conf[1] = 0;
+ } else {
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * the txop is confed in units of 32us by the mac80211,
+ * we need us
+ */
+ ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
+ params->cw_min, params->cw_max,
+ params->aifs, params->txop << 5);
+ if (ret < 0)
+ goto out_sleep;
+
+ ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
+ CONF_CHANNEL_TYPE_EDCF,
+ wl1271_tx_get_queue(queue),
+ ps_scheme, CONF_ACK_POLICY_LEGACY,
+ 0, 0);
+ if (ret < 0)
+ goto out_sleep;
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ wl1271_ps_elp_sleep(wl);
+ }
out:
mutex_unlock(&wl->mutex);
@@ -2009,7 +2694,10 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw)
mutex_lock(&wl->mutex);
- ret = wl1271_ps_elp_wakeup(wl, false);
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -2030,17 +2718,195 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
{
struct wl1271 *wl = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
-
+
if (idx != 0)
return -ENOENT;
-
+
survey->channel = conf->channel;
survey->filled = SURVEY_INFO_NOISE_DBM;
survey->noise = wl->noise;
-
+
return 0;
}
+static int wl1271_allocate_sta(struct wl1271 *wl,
+ struct ieee80211_sta *sta,
+ u8 *hlid)
+{
+ struct wl1271_station *wl_sta;
+ int id;
+
+ id = find_first_zero_bit(wl->ap_hlid_map, AP_MAX_STATIONS);
+ if (id >= AP_MAX_STATIONS) {
+ wl1271_warning("could not allocate HLID - too much stations");
+ return -EBUSY;
+ }
+
+ wl_sta = (struct wl1271_station *)sta->drv_priv;
+ __set_bit(id, wl->ap_hlid_map);
+ wl_sta->hlid = WL1271_AP_STA_HLID_START + id;
+ *hlid = wl_sta->hlid;
+ memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
+ return 0;
+}
+
+static void wl1271_free_sta(struct wl1271 *wl, u8 hlid)
+{
+ int id = hlid - WL1271_AP_STA_HLID_START;
+
+ if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
+ return;
+
+ __clear_bit(id, wl->ap_hlid_map);
+ memset(wl->links[hlid].addr, 0, ETH_ALEN);
+ wl1271_tx_reset_link_queues(wl, hlid);
+ __clear_bit(hlid, &wl->ap_ps_map);
+ __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+}
+
+static int wl1271_op_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wl1271 *wl = hw->priv;
+ int ret = 0;
+ u8 hlid;
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ if (wl->bss_type != BSS_TYPE_AP_BSS)
+ goto out;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
+
+ ret = wl1271_allocate_sta(wl, sta, &hlid);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out_free_sta;
+
+ ret = wl1271_cmd_add_sta(wl, sta, hlid);
+ if (ret < 0)
+ goto out_sleep;
+
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+
+out_free_sta:
+ if (ret < 0)
+ wl1271_free_sta(wl, hlid);
+
+out:
+ mutex_unlock(&wl->mutex);
+ return ret;
+}
+
+static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wl1271 *wl = hw->priv;
+ struct wl1271_station *wl_sta;
+ int ret = 0, id;
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ if (wl->bss_type != BSS_TYPE_AP_BSS)
+ goto out;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
+
+ wl_sta = (struct wl1271_station *)sta->drv_priv;
+ id = wl_sta->hlid - WL1271_AP_STA_HLID_START;
+ if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_cmd_remove_sta(wl, wl_sta->hlid);
+ if (ret < 0)
+ goto out_sleep;
+
+ wl1271_free_sta(wl, wl_sta->hlid);
+
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+ return ret;
+}
+
+int wl1271_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
+{
+ struct wl1271 *wl = hw->priv;
+ int ret;
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ if (wl->ba_support) {
+ ret = wl1271_acx_set_ba_receiver_session(wl, tid, *ssn,
+ true);
+ if (!ret)
+ wl->ba_rx_bitmap |= BIT(tid);
+ } else {
+ ret = -ENOTSUPP;
+ }
+ break;
+
+ case IEEE80211_AMPDU_RX_STOP:
+ ret = wl1271_acx_set_ba_receiver_session(wl, tid, 0, false);
+ if (!ret)
+ wl->ba_rx_bitmap &= ~BIT(tid);
+ break;
+
+ /*
+ * The BA initiator session management in FW independently.
+ * Falling break here on purpose for all TX APDU commands.
+ */
+ case IEEE80211_AMPDU_TX_START:
+ case IEEE80211_AMPDU_TX_STOP:
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ret = -EINVAL;
+ break;
+
+ default:
+ wl1271_error("Incorrect ampdu action id=%x\n", action);
+ ret = -EINVAL;
+ }
+
+ wl1271_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+
+ return ret;
+}
+
/* can't be const, mac80211 writes to this */
static struct ieee80211_rate wl1271_rates[] = {
{ .bitrate = 10,
@@ -2084,37 +2950,35 @@ static struct ieee80211_rate wl1271_rates[] = {
.hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
};
-/*
- * Can't be const, mac80211 writes to this. The order of the channels here
- * is designed to improve scanning.
- */
+/* can't be const, mac80211 writes to this */
static struct ieee80211_channel wl1271_channels[] = {
{ .hw_value = 1, .center_freq = 2412, .max_power = 25 },
- { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
- { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
- { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
- { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
- { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
- { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
- { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
- { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
- { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
{ .hw_value = 2, .center_freq = 2417, .max_power = 25 },
+ { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
+ { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
+ { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
{ .hw_value = 6, .center_freq = 2437, .max_power = 25 },
+ { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
+ { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
+ { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
{ .hw_value = 10, .center_freq = 2457, .max_power = 25 },
+ { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
+ { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
+ { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
+ { .hw_value = 14, .center_freq = 2484, .max_power = 25 },
};
/* mapping to indexes for wl1271_rates */
static const u8 wl1271_rate_to_idx_2ghz[] = {
/* MCS rates are used only with 11n */
- CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */
- CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */
- CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS5 */
- CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS4 */
- CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS3 */
- CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS2 */
- CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS1 */
- CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS0 */
+ 7, /* CONF_HW_RXTX_RATE_MCS7 */
+ 6, /* CONF_HW_RXTX_RATE_MCS6 */
+ 5, /* CONF_HW_RXTX_RATE_MCS5 */
+ 4, /* CONF_HW_RXTX_RATE_MCS4 */
+ 3, /* CONF_HW_RXTX_RATE_MCS3 */
+ 2, /* CONF_HW_RXTX_RATE_MCS2 */
+ 1, /* CONF_HW_RXTX_RATE_MCS1 */
+ 0, /* CONF_HW_RXTX_RATE_MCS0 */
11, /* CONF_HW_RXTX_RATE_54 */
10, /* CONF_HW_RXTX_RATE_48 */
@@ -2134,12 +2998,34 @@ static const u8 wl1271_rate_to_idx_2ghz[] = {
0 /* CONF_HW_RXTX_RATE_1 */
};
+/* 11n STA capabilities */
+#define HW_RX_HIGHEST_RATE 72
+
+#ifdef CONFIG_WL12XX_HT
+#define WL12XX_HT_CAP { \
+ .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20, \
+ .ht_supported = true, \
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K, \
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
+ .mcs = { \
+ .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, \
+ .rx_highest = cpu_to_le16(HW_RX_HIGHEST_RATE), \
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
+ }, \
+}
+#else
+#define WL12XX_HT_CAP { \
+ .ht_supported = false, \
+}
+#endif
+
/* can't be const, mac80211 writes to this */
static struct ieee80211_supported_band wl1271_band_2ghz = {
.channels = wl1271_channels,
.n_channels = ARRAY_SIZE(wl1271_channels),
.bitrates = wl1271_rates,
.n_bitrates = ARRAY_SIZE(wl1271_rates),
+ .ht_cap = WL12XX_HT_CAP,
};
/* 5 GHz data rates for WL1273 */
@@ -2170,66 +3056,55 @@ static struct ieee80211_rate wl1271_rates_5ghz[] = {
.hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
};
-/*
- * 5 GHz band channels for WL1273 - can't be const, mac80211 writes to this.
- * The order of the channels here is designed to improve scanning.
- */
+/* 5 GHz band channels for WL1273 */
static struct ieee80211_channel wl1271_channels_5ghz[] = {
- { .hw_value = 183, .center_freq = 4915},
- { .hw_value = 188, .center_freq = 4940},
+ { .hw_value = 7, .center_freq = 5035},
{ .hw_value = 8, .center_freq = 5040},
- { .hw_value = 34, .center_freq = 5170},
- { .hw_value = 44, .center_freq = 5220},
- { .hw_value = 60, .center_freq = 5300},
- { .hw_value = 112, .center_freq = 5560},
- { .hw_value = 132, .center_freq = 5660},
- { .hw_value = 157, .center_freq = 5785},
- { .hw_value = 184, .center_freq = 4920},
- { .hw_value = 189, .center_freq = 4945},
{ .hw_value = 9, .center_freq = 5045},
- { .hw_value = 36, .center_freq = 5180},
- { .hw_value = 46, .center_freq = 5230},
- { .hw_value = 64, .center_freq = 5320},
- { .hw_value = 116, .center_freq = 5580},
- { .hw_value = 136, .center_freq = 5680},
- { .hw_value = 192, .center_freq = 4960},
{ .hw_value = 11, .center_freq = 5055},
- { .hw_value = 38, .center_freq = 5190},
- { .hw_value = 48, .center_freq = 5240},
- { .hw_value = 100, .center_freq = 5500},
- { .hw_value = 120, .center_freq = 5600},
- { .hw_value = 140, .center_freq = 5700},
- { .hw_value = 185, .center_freq = 4925},
- { .hw_value = 196, .center_freq = 4980},
{ .hw_value = 12, .center_freq = 5060},
- { .hw_value = 40, .center_freq = 5200},
- { .hw_value = 52, .center_freq = 5260},
- { .hw_value = 104, .center_freq = 5520},
- { .hw_value = 124, .center_freq = 5620},
- { .hw_value = 149, .center_freq = 5745},
- { .hw_value = 161, .center_freq = 5805},
- { .hw_value = 187, .center_freq = 4935},
- { .hw_value = 7, .center_freq = 5035},
{ .hw_value = 16, .center_freq = 5080},
+ { .hw_value = 34, .center_freq = 5170},
+ { .hw_value = 36, .center_freq = 5180},
+ { .hw_value = 38, .center_freq = 5190},
+ { .hw_value = 40, .center_freq = 5200},
{ .hw_value = 42, .center_freq = 5210},
+ { .hw_value = 44, .center_freq = 5220},
+ { .hw_value = 46, .center_freq = 5230},
+ { .hw_value = 48, .center_freq = 5240},
+ { .hw_value = 52, .center_freq = 5260},
{ .hw_value = 56, .center_freq = 5280},
+ { .hw_value = 60, .center_freq = 5300},
+ { .hw_value = 64, .center_freq = 5320},
+ { .hw_value = 100, .center_freq = 5500},
+ { .hw_value = 104, .center_freq = 5520},
{ .hw_value = 108, .center_freq = 5540},
+ { .hw_value = 112, .center_freq = 5560},
+ { .hw_value = 116, .center_freq = 5580},
+ { .hw_value = 120, .center_freq = 5600},
+ { .hw_value = 124, .center_freq = 5620},
{ .hw_value = 128, .center_freq = 5640},
+ { .hw_value = 132, .center_freq = 5660},
+ { .hw_value = 136, .center_freq = 5680},
+ { .hw_value = 140, .center_freq = 5700},
+ { .hw_value = 149, .center_freq = 5745},
{ .hw_value = 153, .center_freq = 5765},
+ { .hw_value = 157, .center_freq = 5785},
+ { .hw_value = 161, .center_freq = 5805},
{ .hw_value = 165, .center_freq = 5825},
};
/* mapping to indexes for wl1271_rates_5ghz */
static const u8 wl1271_rate_to_idx_5ghz[] = {
/* MCS rates are used only with 11n */
- CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */
- CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */
- CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS5 */
- CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS4 */
- CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS3 */
- CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS2 */
- CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS1 */
- CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS0 */
+ 7, /* CONF_HW_RXTX_RATE_MCS7 */
+ 6, /* CONF_HW_RXTX_RATE_MCS6 */
+ 5, /* CONF_HW_RXTX_RATE_MCS5 */
+ 4, /* CONF_HW_RXTX_RATE_MCS4 */
+ 3, /* CONF_HW_RXTX_RATE_MCS3 */
+ 2, /* CONF_HW_RXTX_RATE_MCS2 */
+ 1, /* CONF_HW_RXTX_RATE_MCS1 */
+ 0, /* CONF_HW_RXTX_RATE_MCS0 */
7, /* CONF_HW_RXTX_RATE_54 */
6, /* CONF_HW_RXTX_RATE_48 */
@@ -2254,6 +3129,7 @@ static struct ieee80211_supported_band wl1271_band_5ghz = {
.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
.bitrates = wl1271_rates_5ghz,
.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
+ .ht_cap = WL12XX_HT_CAP,
};
static const u8 *wl1271_band_rate_to_idx[] = {
@@ -2273,26 +3149,30 @@ static const struct ieee80211_ops wl1271_ops = {
.set_key = wl1271_op_set_key,
.hw_scan = wl1271_op_hw_scan,
.bss_info_changed = wl1271_op_bss_info_changed,
+ .set_frag_threshold = wl1271_op_set_frag_threshold,
.set_rts_threshold = wl1271_op_set_rts_threshold,
.conf_tx = wl1271_op_conf_tx,
.get_tsf = wl1271_op_get_tsf,
.get_survey = wl1271_op_get_survey,
+ .sta_add = wl1271_op_sta_add,
+ .sta_remove = wl1271_op_sta_remove,
+ .ampdu_action = wl1271_op_ampdu_action,
CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
};
-u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate)
+u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band)
{
u8 idx;
- BUG_ON(wl->band >= sizeof(wl1271_band_rate_to_idx)/sizeof(u8 *));
+ BUG_ON(band >= sizeof(wl1271_band_rate_to_idx)/sizeof(u8 *));
if (unlikely(rate >= CONF_HW_RXTX_RATE_MAX)) {
wl1271_error("Illegal RX rate from HW: %d", rate);
return 0;
}
- idx = wl1271_band_rate_to_idx[wl->band][rate];
+ idx = wl1271_band_rate_to_idx[band][rate];
if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
wl1271_error("Unsupported RX rate from HW: %d", rate);
return 0;
@@ -2346,7 +3226,7 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
if (wl->state == WL1271_STATE_OFF)
goto out;
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -2391,6 +3271,18 @@ int wl1271_register_hw(struct wl1271 *wl)
if (wl->mac80211_registered)
return 0;
+ ret = wl1271_fetch_nvs(wl);
+ if (ret == 0) {
+ u8 *nvs_ptr = (u8 *)wl->nvs->nvs;
+
+ wl->mac_addr[0] = nvs_ptr[11];
+ wl->mac_addr[1] = nvs_ptr[10];
+ wl->mac_addr[2] = nvs_ptr[6];
+ wl->mac_addr[3] = nvs_ptr[5];
+ wl->mac_addr[4] = nvs_ptr[4];
+ wl->mac_addr[5] = nvs_ptr[3];
+ }
+
SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
ret = ieee80211_register_hw(wl->hw);
@@ -2401,6 +3293,8 @@ int wl1271_register_hw(struct wl1271 *wl)
wl->mac80211_registered = true;
+ wl1271_debugfs_init(wl);
+
register_netdevice_notifier(&wl1271_dev_notifier);
wl1271_notice("loaded");
@@ -2411,6 +3305,9 @@ EXPORT_SYMBOL_GPL(wl1271_register_hw);
void wl1271_unregister_hw(struct wl1271 *wl)
{
+ if (wl->state == WL1271_STATE_PLT)
+ __wl1271_plt_stop(wl);
+
unregister_netdevice_notifier(&wl1271_dev_notifier);
ieee80211_unregister_hw(wl->hw);
wl->mac80211_registered = false;
@@ -2443,22 +3340,49 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
IEEE80211_HW_SUPPORTS_UAPSD |
IEEE80211_HW_HAS_RATE_CONTROL |
IEEE80211_HW_CONNECTION_MONITOR |
- IEEE80211_HW_SUPPORTS_CQM_RSSI;
+ IEEE80211_HW_SUPPORTS_CQM_RSSI |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+ IEEE80211_HW_AP_LINK_PS;
wl->hw->wiphy->cipher_suites = cipher_suites;
wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC);
+ BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
wl->hw->wiphy->max_scan_ssids = 1;
- wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz;
- wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz;
+ /*
+ * Maximum length of elements in scanning probe request templates
+ * should be the maximum length possible for a template, without
+ * the IEEE80211 header of the template
+ */
+ wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
+ sizeof(struct ieee80211_header);
+
+ /*
+ * We keep local copies of the band structs because we need to
+ * modify them on a per-device basis.
+ */
+ memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
+ sizeof(wl1271_band_2ghz));
+ memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
+ sizeof(wl1271_band_5ghz));
+
+ wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &wl->bands[IEEE80211_BAND_2GHZ];
+ wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &wl->bands[IEEE80211_BAND_5GHZ];
wl->hw->queues = 4;
wl->hw->max_rates = 1;
+ wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
+
SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl));
+ wl->hw->sta_data_size = sizeof(struct wl1271_station);
+
+ wl->hw->max_rx_aggregation_subframes = 8;
+
return 0;
}
EXPORT_SYMBOL_GPL(wl1271_init_ieee80211);
@@ -2470,7 +3394,7 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
struct ieee80211_hw *hw;
struct platform_device *plat_dev = NULL;
struct wl1271 *wl;
- int i, ret;
+ int i, j, ret;
unsigned int order;
hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
@@ -2495,11 +3419,19 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
wl->hw = hw;
wl->plat_dev = plat_dev;
- skb_queue_head_init(&wl->tx_queue);
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ skb_queue_head_init(&wl->tx_queue[i]);
+
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ for (j = 0; j < AP_MAX_LINKS; j++)
+ skb_queue_head_init(&wl->links[j].tx_queue[i]);
+
+ skb_queue_head_init(&wl->deferred_rx_queue);
+ skb_queue_head_init(&wl->deferred_tx_queue);
INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
- INIT_WORK(&wl->irq_work, wl1271_irq_work);
+ INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
INIT_WORK(&wl->tx_work, wl1271_tx_work);
INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
@@ -2507,20 +3439,27 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
wl->default_key = 0;
wl->rx_counter = 0;
- wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
- wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
+ wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG;
+ wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER;
wl->psm_entry_retry = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
wl->basic_rate = CONF_TX_RATE_MASK_BASIC;
wl->rate_set = CONF_TX_RATE_MASK_BASIC;
- wl->sta_rate_set = 0;
wl->band = IEEE80211_BAND_2GHZ;
wl->vif = NULL;
wl->flags = 0;
wl->sg_enabled = true;
wl->hw_pg_ver = -1;
+ wl->bss_type = MAX_BSS_TYPE;
+ wl->set_bss_type = MAX_BSS_TYPE;
+ wl->fw_bss_type = MAX_BSS_TYPE;
+ wl->last_tx_hlid = 0;
+ wl->ap_ps_map = 0;
+ wl->ap_fw_ps_map = 0;
+ wl->quirks = 0;
+ memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
wl->tx_frames[i] = NULL;
@@ -2532,8 +3471,6 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
/* Apply default driver configuration. */
wl1271_conf_init(wl);
- wl1271_debugfs_init(wl);
-
order = get_order(WL1271_AGGR_BUFFER_SIZE);
wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
if (!wl->aggr_buf) {
@@ -2610,6 +3547,11 @@ int wl1271_free_hw(struct wl1271 *wl)
}
EXPORT_SYMBOL_GPL(wl1271_free_hw);
+u32 wl12xx_debug_level = DEBUG_NONE;
+EXPORT_SYMBOL_GPL(wl12xx_debug_level);
+module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
+MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
+
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.c b/drivers/net/wireless/wl12xx/ps.c
index e3c332e2f97c..971f13e792da 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.c
+++ b/drivers/net/wireless/wl12xx/ps.c
@@ -21,9 +21,10 @@
*
*/
-#include "wl1271_reg.h"
-#include "wl1271_ps.h"
-#include "wl1271_io.h"
+#include "reg.h"
+#include "ps.h"
+#include "io.h"
+#include "tx.h"
#define WL1271_WAKEUP_TIMEOUT 500
@@ -68,7 +69,7 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
}
}
-int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
+int wl1271_ps_elp_wakeup(struct wl1271 *wl)
{
DECLARE_COMPLETION_ONSTACK(compl);
unsigned long flags;
@@ -86,7 +87,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
* the completion variable in one entity.
*/
spin_lock_irqsave(&wl->wl_lock, flags);
- if (work_pending(&wl->irq_work) || chip_awake)
+ if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
pending = true;
else
wl->elp_compl = &compl;
@@ -139,8 +140,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
return ret;
}
- ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE,
- rates, send);
+ ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
if (ret < 0)
return ret;
@@ -149,7 +149,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
case STATION_ACTIVE_MODE:
default:
wl1271_debug(DEBUG_PSM, "leaving psm");
- ret = wl1271_ps_elp_wakeup(wl, false);
+ ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
return ret;
@@ -163,8 +163,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
if (ret < 0)
return ret;
- ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE,
- rates, send);
+ ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
if (ret < 0)
return ret;
@@ -175,4 +174,81 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
return ret;
}
+static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
+{
+ int i, filtered = 0;
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *info;
+ unsigned long flags;
+
+ /* filter all frames currently the low level queus for this hlid */
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
+ while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
+ info = IEEE80211_SKB_CB(skb);
+ info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ info->status.rates[0].idx = -1;
+ ieee80211_tx_status(wl->hw, skb);
+ filtered++;
+ }
+ }
+
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ wl->tx_queue_count -= filtered;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ wl1271_handle_tx_low_watermark(wl);
+}
+
+void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
+{
+ struct ieee80211_sta *sta;
+
+ if (test_bit(hlid, &wl->ap_ps_map))
+ return;
+
+ wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d blks %d "
+ "clean_queues %d", hlid, wl->links[hlid].allocated_blks,
+ clean_queues);
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+ if (!sta) {
+ wl1271_error("could not find sta %pM for starting ps",
+ wl->links[hlid].addr);
+ rcu_read_unlock();
+ return;
+ }
+ ieee80211_sta_ps_transition_ni(sta, true);
+ rcu_read_unlock();
+
+ /* do we want to filter all frames from this link's queues? */
+ if (clean_queues)
+ wl1271_ps_filter_frames(wl, hlid);
+
+ __set_bit(hlid, &wl->ap_ps_map);
+}
+
+void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid)
+{
+ struct ieee80211_sta *sta;
+
+ if (!test_bit(hlid, &wl->ap_ps_map))
+ return;
+
+ wl1271_debug(DEBUG_PSM, "end mac80211 PSM on hlid %d", hlid);
+
+ __clear_bit(hlid, &wl->ap_ps_map);
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+ if (!sta) {
+ wl1271_error("could not find sta %pM for ending ps",
+ wl->links[hlid].addr);
+ goto end;
+ }
+
+ ieee80211_sta_ps_transition_ni(sta, false);
+end:
+ rcu_read_unlock();
+}
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.h b/drivers/net/wireless/wl12xx/ps.h
index 6ba7b032736f..c41bd0a711bc 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.h
+++ b/drivers/net/wireless/wl12xx/ps.h
@@ -21,16 +21,18 @@
*
*/
-#ifndef __WL1271_PS_H__
-#define __WL1271_PS_H__
+#ifndef __PS_H__
+#define __PS_H__
-#include "wl1271.h"
-#include "wl1271_acx.h"
+#include "wl12xx.h"
+#include "acx.h"
int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
u32 rates, bool send);
void wl1271_ps_elp_sleep(struct wl1271 *wl);
-int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake);
+int wl1271_ps_elp_wakeup(struct wl1271 *wl);
void wl1271_elp_work(struct work_struct *work);
+void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues);
+void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid);
#endif /* __WL1271_PS_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_reg.h b/drivers/net/wireless/wl12xx/reg.h
index 990960771528..990960771528 100644
--- a/drivers/net/wireless/wl12xx/wl1271_reg.h
+++ b/drivers/net/wireless/wl12xx/reg.h
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/rx.c
index bea133b6e489..919b59f00301 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.c
+++ b/drivers/net/wireless/wl12xx/rx.c
@@ -23,20 +23,20 @@
#include <linux/gfp.h>
-#include "wl1271.h"
-#include "wl1271_acx.h"
-#include "wl1271_reg.h"
-#include "wl1271_rx.h"
-#include "wl1271_io.h"
+#include "wl12xx.h"
+#include "acx.h"
+#include "reg.h"
+#include "rx.h"
+#include "io.h"
-static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status,
+static u8 wl1271_rx_get_mem_block(struct wl1271_fw_common_status *status,
u32 drv_rx_counter)
{
return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
RX_MEM_BLOCK_MASK;
}
-static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status,
+static u32 wl1271_rx_get_buf_size(struct wl1271_fw_common_status *status,
u32 drv_rx_counter)
{
return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
@@ -48,10 +48,24 @@ static void wl1271_rx_status(struct wl1271 *wl,
struct ieee80211_rx_status *status,
u8 beacon)
{
+ enum ieee80211_band desc_band;
+
memset(status, 0, sizeof(struct ieee80211_rx_status));
status->band = wl->band;
- status->rate_idx = wl1271_rate_to_idx(wl, desc->rate);
+
+ if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG)
+ desc_band = IEEE80211_BAND_2GHZ;
+ else
+ desc_band = IEEE80211_BAND_5GHZ;
+
+ status->rate_idx = wl1271_rate_to_idx(desc->rate, desc_band);
+
+#ifdef CONFIG_WL12XX_HT
+ /* 11n support */
+ if (desc->rate <= CONF_HW_RXTX_RATE_MCS0)
+ status->flag |= RX_FLAG_HT;
+#endif
status->signal = desc->rssi;
@@ -62,7 +76,7 @@ static void wl1271_rx_status(struct wl1271 *wl,
*/
wl->noise = desc->rssi - (desc->snr >> 1);
- status->freq = ieee80211_channel_to_frequency(desc->channel);
+ status->freq = ieee80211_channel_to_frequency(desc->channel, desc_band);
if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) {
status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
@@ -78,7 +92,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
{
struct wl1271_rx_descriptor *desc;
struct sk_buff *skb;
- u16 *fc;
+ struct ieee80211_hdr *hdr;
u8 *buf;
u8 beacon = 0;
@@ -104,8 +118,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
/* now we pull the descriptor out of the buffer */
skb_pull(skb, sizeof(*desc));
- fc = (u16 *)skb->data;
- if ((*fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON)
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (ieee80211_is_beacon(hdr->frame_control))
beacon = 1;
wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
@@ -115,12 +129,13 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
skb_trim(skb, skb->len - desc->pad_len);
- ieee80211_rx_ni(wl->hw, skb);
+ skb_queue_tail(&wl->deferred_rx_queue, skb);
+ ieee80211_queue_work(wl->hw, &wl->netstack_work);
return 0;
}
-void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
+void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
{
struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
u32 buf_size;
@@ -170,16 +185,36 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
while (pkt_offset < buf_size) {
pkt_length = wl1271_rx_get_buf_size(status,
drv_rx_counter);
- if (wl1271_rx_handle_data(wl,
- wl->aggr_buf + pkt_offset,
- pkt_length) < 0)
- break;
+ /*
+ * the handle data call can only fail in memory-outage
+ * conditions, in that case the received frame will just
+ * be dropped.
+ */
+ wl1271_rx_handle_data(wl,
+ wl->aggr_buf + pkt_offset,
+ pkt_length);
wl->rx_counter++;
drv_rx_counter++;
drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
pkt_offset += pkt_length;
}
}
- wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS,
- cpu_to_le32(wl->rx_counter));
+
+ /*
+ * Write the driver's packet counter to the FW. This is only required
+ * for older hardware revisions
+ */
+ if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
+ wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
+}
+
+void wl1271_set_default_filters(struct wl1271 *wl)
+{
+ if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ wl->rx_config = WL1271_DEFAULT_AP_RX_CONFIG;
+ wl->rx_filter = WL1271_DEFAULT_AP_RX_FILTER;
+ } else {
+ wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG;
+ wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER;
+ }
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.h b/drivers/net/wireless/wl12xx/rx.h
index 13a232333b13..75fabf836491 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.h
+++ b/drivers/net/wireless/wl12xx/rx.h
@@ -22,18 +22,14 @@
*
*/
-#ifndef __WL1271_RX_H__
-#define __WL1271_RX_H__
+#ifndef __RX_H__
+#define __RX_H__
#include <linux/bitops.h>
#define WL1271_RX_MAX_RSSI -30
#define WL1271_RX_MIN_RSSI -95
-#define WL1271_RX_ALIGN_TO 4
-#define WL1271_RX_ALIGN(len) (((len) + WL1271_RX_ALIGN_TO - 1) & \
- ~(WL1271_RX_ALIGN_TO - 1))
-
#define SHORT_PREAMBLE_BIT BIT(0)
#define OFDM_RATE_BIT BIT(6)
#define PBCC_RATE_BIT BIT(7)
@@ -86,8 +82,9 @@
/*
* RX Descriptor status
*
- * Bits 0-2 - status
- * Bits 3-7 - reserved
+ * Bits 0-2 - error code
+ * Bits 3-5 - process_id tag (AP mode FW)
+ * Bits 6-7 - reserved
*/
#define WL1271_RX_DESC_STATUS_MASK 0x07
@@ -110,12 +107,16 @@ struct wl1271_rx_descriptor {
u8 snr;
__le32 timestamp;
u8 packet_class;
- u8 process_id;
+ union {
+ u8 process_id; /* STA FW */
+ u8 hlid; /* AP FW */
+ } __packed;
u8 pad_len;
u8 reserved;
} __packed;
-void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status);
-u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate);
+void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status);
+u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
+void wl1271_set_default_filters(struct wl1271 *wl);
#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_scan.c b/drivers/net/wireless/wl12xx/scan.c
index 909bb47995b6..420653a2859c 100644
--- a/drivers/net/wireless/wl12xx/wl1271_scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -23,10 +23,11 @@
#include <linux/ieee80211.h>
-#include "wl1271.h"
-#include "wl1271_cmd.h"
-#include "wl1271_scan.h"
-#include "wl1271_acx.h"
+#include "wl12xx.h"
+#include "cmd.h"
+#include "scan.h"
+#include "acx.h"
+#include "ps.h"
void wl1271_scan_complete_work(struct work_struct *work)
{
@@ -40,22 +41,34 @@ void wl1271_scan_complete_work(struct work_struct *work)
mutex_lock(&wl->mutex);
- if (wl->scan.state == WL1271_SCAN_STATE_IDLE) {
- mutex_unlock(&wl->mutex);
- return;
- }
+ if (wl->state == WL1271_STATE_OFF)
+ goto out;
+
+ if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
+ goto out;
wl->scan.state = WL1271_SCAN_STATE_IDLE;
kfree(wl->scan.scanned_ch);
wl->scan.scanned_ch = NULL;
- mutex_unlock(&wl->mutex);
-
+ wl->scan.req = NULL;
ieee80211_scan_completed(wl->hw, false);
+ /* restore hardware connection monitoring template */
+ if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+ if (wl1271_ps_elp_wakeup(wl) == 0) {
+ wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
+ wl1271_ps_elp_sleep(wl);
+ }
+ }
+
if (wl->scan.failed) {
wl1271_info("Scan completed due to error.");
ieee80211_queue_work(wl->hw, &wl->recovery_work);
}
+
+out:
+ mutex_unlock(&wl->mutex);
+
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_scan.h b/drivers/net/wireless/wl12xx/scan.h
index 6d57127b5e6b..421a750add5a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_scan.h
+++ b/drivers/net/wireless/wl12xx/scan.h
@@ -21,10 +21,10 @@
*
*/
-#ifndef __WL1271_SCAN_H__
-#define __WL1271_SCAN_H__
+#ifndef __SCAN_H__
+#define __SCAN_H__
-#include "wl1271.h"
+#include "wl12xx.h"
int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
struct cfg80211_scan_request *req);
diff --git a/drivers/net/wireless/wl12xx/wl1271_sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 784ef3432641..5b9dbeafec06 100644
--- a/drivers/net/wireless/wl12xx/wl1271_sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -28,13 +28,14 @@
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
#include <linux/gpio.h>
#include <linux/wl12xx.h>
#include <linux/pm_runtime.h>
-#include "wl1271.h"
+#include "wl12xx.h"
#include "wl12xx_80211.h"
-#include "wl1271_io.h"
+#include "io.h"
#ifndef SDIO_VENDOR_ID_TI
#define SDIO_VENDOR_ID_TI 0x0097
@@ -60,7 +61,7 @@ static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
return &(wl_to_func(wl)->dev);
}
-static irqreturn_t wl1271_irq(int irq, void *cookie)
+static irqreturn_t wl1271_hardirq(int irq, void *cookie)
{
struct wl1271 *wl = cookie;
unsigned long flags;
@@ -69,17 +70,14 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
/* complete the ELP completion */
spin_lock_irqsave(&wl->wl_lock, flags);
+ set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
if (wl->elp_compl) {
complete(wl->elp_compl);
wl->elp_compl = NULL;
}
-
- if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
- ieee80211_queue_work(wl->hw, &wl->irq_work);
- set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
spin_unlock_irqrestore(&wl->wl_lock, flags);
- return IRQ_HANDLED;
+ return IRQ_WAKE_THREAD;
}
static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
@@ -106,8 +104,6 @@ static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
int ret;
struct sdio_func *func = wl_to_func(wl);
- sdio_claim_host(func);
-
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
@@ -123,8 +119,6 @@ static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
}
- sdio_release_host(func);
-
if (ret)
wl1271_error("sdio read failed (%d)", ret);
}
@@ -135,8 +129,6 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
int ret;
struct sdio_func *func = wl_to_func(wl);
- sdio_claim_host(func);
-
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
@@ -152,8 +144,6 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
ret = sdio_memcpy_toio(func, addr, buf, len);
}
- sdio_release_host(func);
-
if (ret)
wl1271_error("sdio write failed (%d)", ret);
}
@@ -163,14 +153,18 @@ static int wl1271_sdio_power_on(struct wl1271 *wl)
struct sdio_func *func = wl_to_func(wl);
int ret;
- /* Power up the card */
+ /* Make sure the card will not be powered off by runtime PM */
ret = pm_runtime_get_sync(&func->dev);
if (ret < 0)
goto out;
+ /* Runtime PM might be disabled, so power up the card manually */
+ ret = mmc_power_restore_host(func->card->host);
+ if (ret < 0)
+ goto out;
+
sdio_claim_host(func);
sdio_enable_func(func);
- sdio_release_host(func);
out:
return ret;
@@ -179,12 +173,17 @@ out:
static int wl1271_sdio_power_off(struct wl1271 *wl)
{
struct sdio_func *func = wl_to_func(wl);
+ int ret;
- sdio_claim_host(func);
sdio_disable_func(func);
sdio_release_host(func);
- /* Power down the card */
+ /* Runtime PM might be disabled, so power off the card manually */
+ ret = mmc_power_save_host(func->card->host);
+ if (ret < 0)
+ return ret;
+
+ /* Let runtime PM know the card is powered off */
return pm_runtime_put_sync(&func->dev);
}
@@ -241,14 +240,14 @@ static int __devinit wl1271_probe(struct sdio_func *func,
wl->irq = wlan_data->irq;
wl->ref_clock = wlan_data->board_ref_clock;
- ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
+ ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ DRIVER_NAME, wl);
if (ret < 0) {
wl1271_error("request_irq() failed: %d", ret);
goto out_free;
}
- set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
-
disable_irq(wl->irq);
ret = wl1271_init_ieee80211(wl);
@@ -271,7 +270,6 @@ static int __devinit wl1271_probe(struct sdio_func *func,
out_irq:
free_irq(wl->irq, wl);
-
out_free:
wl1271_free_hw(wl);
@@ -345,3 +343,4 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
MODULE_FIRMWARE(WL1271_FW_NAME);
+MODULE_FIRMWARE(WL1271_AP_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/sdio_test.c b/drivers/net/wireless/wl12xx/sdio_test.c
new file mode 100644
index 000000000000..9fcbd3dd8490
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/sdio_test.c
@@ -0,0 +1,520 @@
+/*
+ * SDIO testing driver for wl12xx
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Roger Quadros <roger.quadros@nokia.com>
+ *
+ * wl12xx read/write routines taken from the main module
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/crc7.h>
+#include <linux/vmalloc.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/card.h>
+#include <linux/gpio.h>
+#include <linux/wl12xx.h>
+#include <linux/kthread.h>
+#include <linux/firmware.h>
+#include <linux/pm_runtime.h>
+
+#include "wl12xx.h"
+#include "io.h"
+#include "boot.h"
+
+#ifndef SDIO_VENDOR_ID_TI
+#define SDIO_VENDOR_ID_TI 0x0097
+#endif
+
+#ifndef SDIO_DEVICE_ID_TI_WL1271
+#define SDIO_DEVICE_ID_TI_WL1271 0x4076
+#endif
+
+static bool rx, tx;
+
+module_param(rx, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(rx, "Perform rx test. Default (0). "
+ "This test continuously reads data from the SDIO device.\n");
+
+module_param(tx, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tx, "Perform tx test. Default (0). "
+ "This test continuously writes data to the SDIO device.\n");
+
+struct wl1271_test {
+ struct wl1271 wl;
+ struct task_struct *test_task;
+};
+
+static const struct sdio_device_id wl1271_devices[] = {
+ { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
+ {}
+};
+
+static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
+{
+ return wl->if_priv;
+}
+
+static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
+{
+ return &(wl_to_func(wl)->dev);
+}
+
+static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ int ret = 0;
+ struct sdio_func *func = wl_to_func(wl);
+
+ if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
+ ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
+ wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
+ addr, ((u8 *)buf)[0]);
+ } else {
+ if (fixed)
+ ret = sdio_readsb(func, buf, addr, len);
+ else
+ ret = sdio_memcpy_fromio(func, buf, addr, len);
+
+ wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes",
+ addr, len);
+ wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
+ }
+
+ if (ret)
+ wl1271_error("sdio read failed (%d)", ret);
+}
+
+static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ int ret = 0;
+ struct sdio_func *func = wl_to_func(wl);
+
+ if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
+ sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
+ wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
+ addr, ((u8 *)buf)[0]);
+ } else {
+ wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes",
+ addr, len);
+ wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
+
+ if (fixed)
+ ret = sdio_writesb(func, addr, buf, len);
+ else
+ ret = sdio_memcpy_toio(func, addr, buf, len);
+ }
+ if (ret)
+ wl1271_error("sdio write failed (%d)", ret);
+
+}
+
+static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
+{
+ struct sdio_func *func = wl_to_func(wl);
+ int ret;
+
+ /* Let the SDIO stack handle wlan_enable control, so we
+ * keep host claimed while wlan is in use to keep wl1271
+ * alive.
+ */
+ if (enable) {
+ /* Power up the card */
+ ret = pm_runtime_get_sync(&func->dev);
+ if (ret < 0)
+ goto out;
+ sdio_claim_host(func);
+ sdio_enable_func(func);
+ sdio_release_host(func);
+ } else {
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+
+ /* Power down the card */
+ ret = pm_runtime_put_sync(&func->dev);
+ }
+
+out:
+ return ret;
+}
+
+static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
+{
+}
+
+static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
+{
+}
+
+
+static struct wl1271_if_operations sdio_ops = {
+ .read = wl1271_sdio_raw_read,
+ .write = wl1271_sdio_raw_write,
+ .power = wl1271_sdio_set_power,
+ .dev = wl1271_sdio_wl_to_dev,
+ .enable_irq = wl1271_sdio_enable_interrupts,
+ .disable_irq = wl1271_sdio_disable_interrupts,
+};
+
+static void wl1271_fw_wakeup(struct wl1271 *wl)
+{
+ u32 elp_reg;
+
+ elp_reg = ELPCTRL_WAKE_UP;
+ wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
+}
+
+static int wl1271_fetch_firmware(struct wl1271 *wl)
+{
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, WL1271_FW_NAME, wl1271_wl_to_dev(wl));
+
+ if (ret < 0) {
+ wl1271_error("could not get firmware: %d", ret);
+ return ret;
+ }
+
+ if (fw->size % 4) {
+ wl1271_error("firmware size is not multiple of 32 bits: %zu",
+ fw->size);
+ ret = -EILSEQ;
+ goto out;
+ }
+
+ wl->fw_len = fw->size;
+ wl->fw = vmalloc(wl->fw_len);
+
+ if (!wl->fw) {
+ wl1271_error("could not allocate memory for the firmware");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(wl->fw, fw->data, wl->fw_len);
+
+ ret = 0;
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int wl1271_fetch_nvs(struct wl1271 *wl)
+{
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, WL1271_NVS_NAME, wl1271_wl_to_dev(wl));
+
+ if (ret < 0) {
+ wl1271_error("could not get nvs file: %d", ret);
+ return ret;
+ }
+
+ wl->nvs = kmemdup(fw->data, sizeof(struct wl1271_nvs_file), GFP_KERNEL);
+
+ if (!wl->nvs) {
+ wl1271_error("could not allocate memory for the nvs file");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ wl->nvs_len = fw->size;
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int wl1271_chip_wakeup(struct wl1271 *wl)
+{
+ struct wl1271_partition_set partition;
+ int ret;
+
+ msleep(WL1271_PRE_POWER_ON_SLEEP);
+ ret = wl1271_power_on(wl);
+ if (ret)
+ return ret;
+
+ msleep(WL1271_POWER_ON_SLEEP);
+
+ /* We don't need a real memory partition here, because we only want
+ * to use the registers at this point. */
+ memset(&partition, 0, sizeof(partition));
+ partition.reg.start = REGISTERS_BASE;
+ partition.reg.size = REGISTERS_DOWN_SIZE;
+ wl1271_set_partition(wl, &partition);
+
+ /* ELP module wake up */
+ wl1271_fw_wakeup(wl);
+
+ /* whal_FwCtrl_BootSm() */
+
+ /* 0. read chip id from CHIP_ID */
+ wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
+
+ /* 1. check if chip id is valid */
+
+ switch (wl->chip.id) {
+ case CHIP_ID_1271_PG10:
+ wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
+ wl->chip.id);
+ break;
+ case CHIP_ID_1271_PG20:
+ wl1271_notice("chip id 0x%x (1271 PG20)",
+ wl->chip.id);
+ break;
+ default:
+ wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
+ return -ENODEV;
+ }
+
+ return ret;
+}
+
+static struct wl1271_partition_set part_down = {
+ .mem = {
+ .start = 0x00000000,
+ .size = 0x000177c0
+ },
+ .reg = {
+ .start = REGISTERS_BASE,
+ .size = 0x00008800
+ },
+ .mem2 = {
+ .start = 0x00000000,
+ .size = 0x00000000
+ },
+ .mem3 = {
+ .start = 0x00000000,
+ .size = 0x00000000
+ },
+};
+
+static int tester(void *data)
+{
+ struct wl1271 *wl = data;
+ struct sdio_func *func = wl_to_func(wl);
+ struct device *pdev = &func->dev;
+ int ret = 0;
+ bool rx_started = 0;
+ bool tx_started = 0;
+ uint8_t *tx_buf, *rx_buf;
+ int test_size = PAGE_SIZE;
+ u32 addr = 0;
+ struct wl1271_partition_set partition;
+
+ /* We assume chip is powered up and firmware fetched */
+
+ memcpy(&partition, &part_down, sizeof(partition));
+ partition.mem.start = addr;
+ wl1271_set_partition(wl, &partition);
+
+ tx_buf = kmalloc(test_size, GFP_KERNEL);
+ rx_buf = kmalloc(test_size, GFP_KERNEL);
+ if (!tx_buf || !rx_buf) {
+ dev_err(pdev,
+ "Could not allocate memory. Test will not run.\n");
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ memset(tx_buf, 0x5a, test_size);
+
+ /* write something in data area so we can read it back */
+ wl1271_write(wl, addr, tx_buf, test_size, false);
+
+ while (!kthread_should_stop()) {
+ if (rx && !rx_started) {
+ dev_info(pdev, "starting rx test\n");
+ rx_started = 1;
+ } else if (!rx && rx_started) {
+ dev_info(pdev, "stopping rx test\n");
+ rx_started = 0;
+ }
+
+ if (tx && !tx_started) {
+ dev_info(pdev, "starting tx test\n");
+ tx_started = 1;
+ } else if (!tx && tx_started) {
+ dev_info(pdev, "stopping tx test\n");
+ tx_started = 0;
+ }
+
+ if (rx_started)
+ wl1271_read(wl, addr, rx_buf, test_size, false);
+
+ if (tx_started)
+ wl1271_write(wl, addr, tx_buf, test_size, false);
+
+ if (!rx_started && !tx_started)
+ msleep(100);
+ }
+
+free:
+ kfree(tx_buf);
+ kfree(rx_buf);
+ return ret;
+}
+
+static int __devinit wl1271_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ const struct wl12xx_platform_data *wlan_data;
+ struct wl1271 *wl;
+ struct wl1271_test *wl_test;
+ int ret = 0;
+
+ /* wl1271 has 2 sdio functions we handle just the wlan part */
+ if (func->num != 0x02)
+ return -ENODEV;
+
+ wl_test = kzalloc(sizeof(struct wl1271_test), GFP_KERNEL);
+ if (!wl_test) {
+ dev_err(&func->dev, "Could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ wl = &wl_test->wl;
+
+ wl->if_priv = func;
+ wl->if_ops = &sdio_ops;
+
+ /* Grab access to FN0 for ELP reg. */
+ func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
+
+ wlan_data = wl12xx_get_platform_data();
+ if (IS_ERR(wlan_data)) {
+ ret = PTR_ERR(wlan_data);
+ dev_err(&func->dev, "missing wlan platform data: %d\n", ret);
+ goto out_free;
+ }
+
+ wl->irq = wlan_data->irq;
+ wl->ref_clock = wlan_data->board_ref_clock;
+
+ sdio_set_drvdata(func, wl_test);
+
+
+ /* power up the device */
+ ret = wl1271_chip_wakeup(wl);
+ if (ret) {
+ dev_err(&func->dev, "could not wake up chip\n");
+ goto out_free;
+ }
+
+ if (wl->fw == NULL) {
+ ret = wl1271_fetch_firmware(wl);
+ if (ret < 0) {
+ dev_err(&func->dev, "firmware fetch error\n");
+ goto out_off;
+ }
+ }
+
+ /* fetch NVS */
+ if (wl->nvs == NULL) {
+ ret = wl1271_fetch_nvs(wl);
+ if (ret < 0) {
+ dev_err(&func->dev, "NVS fetch error\n");
+ goto out_off;
+ }
+ }
+
+ ret = wl1271_load_firmware(wl);
+ if (ret < 0) {
+ dev_err(&func->dev, "firmware load error: %d\n", ret);
+ goto out_free;
+ }
+
+ dev_info(&func->dev, "initialized\n");
+
+ /* I/O testing will be done in the tester thread */
+
+ wl_test->test_task = kthread_run(tester, wl, "sdio_tester");
+ if (IS_ERR(wl_test->test_task)) {
+ dev_err(&func->dev, "unable to create kernel thread\n");
+ ret = PTR_ERR(wl_test->test_task);
+ goto out_free;
+ }
+
+ return 0;
+
+out_off:
+ /* power off the chip */
+ wl1271_power_off(wl);
+
+out_free:
+ kfree(wl_test);
+ return ret;
+}
+
+static void __devexit wl1271_remove(struct sdio_func *func)
+{
+ struct wl1271_test *wl_test = sdio_get_drvdata(func);
+
+ /* stop the I/O test thread */
+ kthread_stop(wl_test->test_task);
+
+ /* power off the chip */
+ wl1271_power_off(&wl_test->wl);
+
+ vfree(wl_test->wl.fw);
+ wl_test->wl.fw = NULL;
+ kfree(wl_test->wl.nvs);
+ wl_test->wl.nvs = NULL;
+
+ kfree(wl_test);
+}
+
+static struct sdio_driver wl1271_sdio_driver = {
+ .name = "wl12xx_sdio_test",
+ .id_table = wl1271_devices,
+ .probe = wl1271_probe,
+ .remove = __devexit_p(wl1271_remove),
+};
+
+static int __init wl1271_init(void)
+{
+ int ret;
+
+ ret = sdio_register_driver(&wl1271_sdio_driver);
+ if (ret < 0)
+ pr_err("failed to register sdio driver: %d\n", ret);
+
+ return ret;
+}
+module_init(wl1271_init);
+
+static void __exit wl1271_exit(void)
+{
+ sdio_unregister_driver(&wl1271_sdio_driver);
+}
+module_exit(wl1271_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Roger Quadros <roger.quadros@nokia.com>");
+
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/spi.c
index ef801680773f..18cf01719ae0 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -28,11 +28,11 @@
#include <linux/wl12xx.h>
#include <linux/slab.h>
-#include "wl1271.h"
+#include "wl12xx.h"
#include "wl12xx_80211.h"
-#include "wl1271_io.h"
+#include "io.h"
-#include "wl1271_reg.h"
+#include "reg.h"
#define WSPI_CMD_READ 0x40000000
#define WSPI_CMD_WRITE 0x00000000
@@ -110,9 +110,9 @@ static void wl1271_spi_reset(struct wl1271 *wl)
spi_message_add_tail(&t, &m);
spi_sync(wl_to_spi(wl), &m);
- kfree(cmd);
wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
+ kfree(cmd);
}
static void wl1271_spi_init(struct wl1271 *wl)
@@ -320,28 +320,23 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
spi_sync(wl_to_spi(wl), &m);
}
-static irqreturn_t wl1271_irq(int irq, void *cookie)
+static irqreturn_t wl1271_hardirq(int irq, void *cookie)
{
- struct wl1271 *wl;
+ struct wl1271 *wl = cookie;
unsigned long flags;
wl1271_debug(DEBUG_IRQ, "IRQ");
- wl = cookie;
-
/* complete the ELP completion */
spin_lock_irqsave(&wl->wl_lock, flags);
+ set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
if (wl->elp_compl) {
complete(wl->elp_compl);
wl->elp_compl = NULL;
}
-
- if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
- ieee80211_queue_work(wl->hw, &wl->irq_work);
- set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
spin_unlock_irqrestore(&wl->wl_lock, flags);
- return IRQ_HANDLED;
+ return IRQ_WAKE_THREAD;
}
static int wl1271_spi_set_power(struct wl1271 *wl, bool enable)
@@ -413,14 +408,14 @@ static int __devinit wl1271_probe(struct spi_device *spi)
goto out_free;
}
- ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
+ ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ DRIVER_NAME, wl);
if (ret < 0) {
wl1271_error("request_irq() failed: %d", ret);
goto out_free;
}
- set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
-
disable_irq(wl->irq);
ret = wl1271_init_ieee80211(wl);
@@ -495,4 +490,5 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
MODULE_FIRMWARE(WL1271_FW_NAME);
+MODULE_FIRMWARE(WL1271_AP_FW_NAME);
MODULE_ALIAS("spi:wl1271");
diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.c b/drivers/net/wireless/wl12xx/testmode.c
index a3aa84386c88..e64403b6896d 100644
--- a/drivers/net/wireless/wl12xx/wl1271_testmode.c
+++ b/drivers/net/wireless/wl12xx/testmode.c
@@ -20,13 +20,13 @@
* 02110-1301 USA
*
*/
-#include "wl1271_testmode.h"
+#include "testmode.h"
#include <linux/slab.h>
#include <net/genetlink.h>
-#include "wl1271.h"
-#include "wl1271_acx.h"
+#include "wl12xx.h"
+#include "acx.h"
#define WL1271_TM_MAX_DATA_LENGTH 1024
@@ -37,6 +37,7 @@ enum wl1271_tm_commands {
WL1271_TM_CMD_CONFIGURE,
WL1271_TM_CMD_NVS_PUSH,
WL1271_TM_CMD_SET_PLT_MODE,
+ WL1271_TM_CMD_RECOVER,
__WL1271_TM_CMD_AFTER_LAST
};
@@ -248,6 +249,15 @@ static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
return ret;
}
+static int wl1271_tm_cmd_recover(struct wl1271 *wl, struct nlattr *tb[])
+{
+ wl1271_debug(DEBUG_TESTMODE, "testmode cmd recover");
+
+ ieee80211_queue_work(wl->hw, &wl->recovery_work);
+
+ return 0;
+}
+
int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
{
struct wl1271 *wl = hw->priv;
@@ -272,6 +282,8 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
return wl1271_tm_cmd_nvs_push(wl, tb);
case WL1271_TM_CMD_SET_PLT_MODE:
return wl1271_tm_cmd_set_plt_mode(wl, tb);
+ case WL1271_TM_CMD_RECOVER:
+ return wl1271_tm_cmd_recover(wl, tb);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.h b/drivers/net/wireless/wl12xx/testmode.h
index c196d28f9d9d..8071654259ea 100644
--- a/drivers/net/wireless/wl12xx/wl1271_testmode.h
+++ b/drivers/net/wireless/wl12xx/testmode.h
@@ -21,8 +21,8 @@
*
*/
-#ifndef __WL1271_TESTMODE_H__
-#define __WL1271_TESTMODE_H__
+#ifndef __TESTMODE_H__
+#define __TESTMODE_H__
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c
new file mode 100644
index 000000000000..5e9ef7d53e7e
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -0,0 +1,762 @@
+/*
+ * This file is part of wl1271
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+
+#include "wl12xx.h"
+#include "io.h"
+#include "reg.h"
+#include "ps.h"
+#include "tx.h"
+
+static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id)
+{
+ int ret;
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+
+ if (is_ap)
+ ret = wl1271_cmd_set_ap_default_wep_key(wl, id);
+ else
+ ret = wl1271_cmd_set_sta_default_wep_key(wl, id);
+
+ if (ret < 0)
+ return ret;
+
+ wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
+ return 0;
+}
+
+static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
+{
+ int id;
+
+ id = find_first_zero_bit(wl->tx_frames_map, ACX_TX_DESCRIPTORS);
+ if (id >= ACX_TX_DESCRIPTORS)
+ return -EBUSY;
+
+ __set_bit(id, wl->tx_frames_map);
+ wl->tx_frames[id] = skb;
+ wl->tx_frames_cnt++;
+ return id;
+}
+
+static void wl1271_free_tx_id(struct wl1271 *wl, int id)
+{
+ if (__test_and_clear_bit(id, wl->tx_frames_map)) {
+ wl->tx_frames[id] = NULL;
+ wl->tx_frames_cnt--;
+ }
+}
+
+static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ /*
+ * add the station to the known list before transmitting the
+ * authentication response. this way it won't get de-authed by FW
+ * when transmitting too soon.
+ */
+ hdr = (struct ieee80211_hdr *)(skb->data +
+ sizeof(struct wl1271_tx_hw_descr));
+ if (ieee80211_is_auth(hdr->frame_control))
+ wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
+}
+
+static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
+{
+ bool fw_ps;
+ u8 tx_blks;
+
+ /* only regulate station links */
+ if (hlid < WL1271_AP_STA_HLID_START)
+ return;
+
+ fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+ tx_blks = wl->links[hlid].allocated_blks;
+
+ /*
+ * if in FW PS and there is enough data in FW we can put the link
+ * into high-level PS and clean out its TX queues.
+ */
+ if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
+ wl1271_ps_link_start(wl, hlid, true);
+}
+
+u8 wl1271_tx_get_hlid(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
+
+ if (control->control.sta) {
+ struct wl1271_station *wl_sta;
+
+ wl_sta = (struct wl1271_station *)
+ control->control.sta->drv_priv;
+ return wl_sta->hlid;
+ } else {
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ return WL1271_AP_GLOBAL_HLID;
+ else
+ return WL1271_AP_BROADCAST_HLID;
+ }
+}
+
+static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
+ u32 buf_offset, u8 hlid)
+{
+ struct wl1271_tx_hw_descr *desc;
+ u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
+ u32 total_blocks;
+ int id, ret = -EBUSY;
+
+ if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
+ return -EAGAIN;
+
+ /* allocate free identifier for the packet */
+ id = wl1271_alloc_tx_id(wl, skb);
+ if (id < 0)
+ return id;
+
+ /* approximate the number of blocks required for this packet
+ in the firmware */
+ total_blocks = total_len + TX_HW_BLOCK_SIZE - 1;
+ total_blocks = total_blocks / TX_HW_BLOCK_SIZE + TX_HW_BLOCK_SPARE;
+ if (total_blocks <= wl->tx_blocks_available) {
+ desc = (struct wl1271_tx_hw_descr *)skb_push(
+ skb, total_len - skb->len);
+
+ desc->extra_mem_blocks = TX_HW_BLOCK_SPARE;
+ desc->total_mem_blocks = total_blocks;
+ desc->id = id;
+
+ wl->tx_blocks_available -= total_blocks;
+
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ wl->links[hlid].allocated_blks += total_blocks;
+
+ ret = 0;
+
+ wl1271_debug(DEBUG_TX,
+ "tx_allocate: size: %d, blocks: %d, id: %d",
+ total_len, total_blocks, id);
+ } else {
+ wl1271_free_tx_id(wl, id);
+ }
+
+ return ret;
+}
+
+static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
+ u32 extra, struct ieee80211_tx_info *control,
+ u8 hlid)
+{
+ struct timespec ts;
+ struct wl1271_tx_hw_descr *desc;
+ int pad, ac, rate_idx;
+ s64 hosttime;
+ u16 tx_attr;
+
+ desc = (struct wl1271_tx_hw_descr *) skb->data;
+
+ /* relocate space for security header */
+ if (extra) {
+ void *framestart = skb->data + sizeof(*desc);
+ u16 fc = *(u16 *)(framestart + extra);
+ int hdrlen = ieee80211_hdrlen(cpu_to_le16(fc));
+ memmove(framestart, framestart + extra, hdrlen);
+ }
+
+ /* configure packet life time */
+ getnstimeofday(&ts);
+ hosttime = (timespec_to_ns(&ts) >> 10);
+ desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
+
+ if (wl->bss_type != BSS_TYPE_AP_BSS)
+ desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
+ else
+ desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
+
+ /* configure the tx attributes */
+ tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
+
+ /* queue (we use same identifiers for tid's and ac's */
+ ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
+ desc->tid = ac;
+
+ if (wl->bss_type != BSS_TYPE_AP_BSS) {
+ desc->aid = hlid;
+
+ /* if the packets are destined for AP (have a STA entry)
+ send them with AP rate policies, otherwise use default
+ basic rates */
+ if (control->control.sta)
+ rate_idx = ACX_TX_AP_FULL_RATE;
+ else
+ rate_idx = ACX_TX_BASIC_RATE;
+ } else {
+ desc->hlid = hlid;
+ switch (hlid) {
+ case WL1271_AP_GLOBAL_HLID:
+ rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
+ break;
+ case WL1271_AP_BROADCAST_HLID:
+ rate_idx = ACX_TX_AP_MODE_BCST_RATE;
+ break;
+ default:
+ rate_idx = ac;
+ break;
+ }
+ }
+
+ tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
+ desc->reserved = 0;
+
+ /* align the length (and store in terms of words) */
+ pad = ALIGN(skb->len, WL1271_TX_ALIGN_TO);
+ desc->length = cpu_to_le16(pad >> 2);
+
+ /* calculate number of padding bytes */
+ pad = pad - skb->len;
+ tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
+
+ desc->tx_attr = cpu_to_le16(tx_attr);
+
+ wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d hlid: %d "
+ "tx_attr: 0x%x len: %d life: %d mem: %d", pad, desc->hlid,
+ le16_to_cpu(desc->tx_attr), le16_to_cpu(desc->length),
+ le16_to_cpu(desc->life_time), desc->total_mem_blocks);
+}
+
+/* caller must hold wl->mutex */
+static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
+ u32 buf_offset)
+{
+ struct ieee80211_tx_info *info;
+ u32 extra = 0;
+ int ret = 0;
+ u32 total_len;
+ u8 hlid;
+
+ if (!skb)
+ return -EINVAL;
+
+ info = IEEE80211_SKB_CB(skb);
+
+ if (info->control.hw_key &&
+ info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
+ extra = WL1271_TKIP_IV_SPACE;
+
+ if (info->control.hw_key) {
+ bool is_wep;
+ u8 idx = info->control.hw_key->hw_key_idx;
+ u32 cipher = info->control.hw_key->cipher;
+
+ is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
+ (cipher == WLAN_CIPHER_SUITE_WEP104);
+
+ if (unlikely(is_wep && wl->default_key != idx)) {
+ ret = wl1271_set_default_wep_key(wl, idx);
+ if (ret < 0)
+ return ret;
+ wl->default_key = idx;
+ }
+ }
+
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ hlid = wl1271_tx_get_hlid(skb);
+ else
+ hlid = TX_HW_DEFAULT_AID;
+
+ ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid);
+ if (ret < 0)
+ return ret;
+
+ if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ wl1271_tx_ap_update_inconnection_sta(wl, skb);
+ wl1271_tx_regulate_link(wl, hlid);
+ }
+
+ wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
+
+ /*
+ * The length of each packet is stored in terms of words. Thus, we must
+ * pad the skb data to make sure its length is aligned.
+ * The number of padding bytes is computed and set in wl1271_tx_fill_hdr
+ */
+ total_len = ALIGN(skb->len, WL1271_TX_ALIGN_TO);
+ memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
+ memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
+
+ return total_len;
+}
+
+u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
+{
+ struct ieee80211_supported_band *band;
+ u32 enabled_rates = 0;
+ int bit;
+
+ band = wl->hw->wiphy->bands[wl->band];
+ for (bit = 0; bit < band->n_bitrates; bit++) {
+ if (rate_set & 0x1)
+ enabled_rates |= band->bitrates[bit].hw_value;
+ rate_set >>= 1;
+ }
+
+#ifdef CONFIG_WL12XX_HT
+ /* MCS rates indication are on bits 16 - 23 */
+ rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
+
+ for (bit = 0; bit < 8; bit++) {
+ if (rate_set & 0x1)
+ enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
+ rate_set >>= 1;
+ }
+#endif
+
+ return enabled_rates;
+}
+
+void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
+{
+ unsigned long flags;
+
+ if (test_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags) &&
+ wl->tx_queue_count <= WL1271_TX_QUEUE_LOW_WATERMARK) {
+ /* firmware buffer has space, restart queues */
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ ieee80211_wake_queues(wl->hw);
+ clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ }
+}
+
+static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl)
+{
+ struct sk_buff *skb = NULL;
+ unsigned long flags;
+
+ skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_VO]);
+ if (skb)
+ goto out;
+ skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_VI]);
+ if (skb)
+ goto out;
+ skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_BE]);
+ if (skb)
+ goto out;
+ skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_BK]);
+
+out:
+ if (skb) {
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ wl->tx_queue_count--;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ }
+
+ return skb;
+}
+
+static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
+{
+ struct sk_buff *skb = NULL;
+ unsigned long flags;
+ int i, h, start_hlid;
+
+ /* start from the link after the last one */
+ start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS;
+
+ /* dequeue according to AC, round robin on each link */
+ for (i = 0; i < AP_MAX_LINKS; i++) {
+ h = (start_hlid + i) % AP_MAX_LINKS;
+
+ skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VO]);
+ if (skb)
+ goto out;
+ skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VI]);
+ if (skb)
+ goto out;
+ skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BE]);
+ if (skb)
+ goto out;
+ skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BK]);
+ if (skb)
+ goto out;
+ }
+
+out:
+ if (skb) {
+ wl->last_tx_hlid = h;
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ wl->tx_queue_count--;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ } else {
+ wl->last_tx_hlid = 0;
+ }
+
+ return skb;
+}
+
+static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
+{
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ return wl1271_ap_skb_dequeue(wl);
+
+ return wl1271_sta_skb_dequeue(wl);
+}
+
+static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
+{
+ unsigned long flags;
+ int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
+
+ if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ u8 hlid = wl1271_tx_get_hlid(skb);
+ skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
+
+ /* make sure we dequeue the same packet next time */
+ wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS;
+ } else {
+ skb_queue_head(&wl->tx_queue[q], skb);
+ }
+
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ wl->tx_queue_count++;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+}
+
+void wl1271_tx_work_locked(struct wl1271 *wl)
+{
+ struct sk_buff *skb;
+ bool woken_up = false;
+ u32 buf_offset = 0;
+ bool sent_packets = false;
+ int ret;
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ while ((skb = wl1271_skb_dequeue(wl))) {
+ if (!woken_up) {
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out_ack;
+ woken_up = true;
+ }
+
+ ret = wl1271_prepare_tx_frame(wl, skb, buf_offset);
+ if (ret == -EAGAIN) {
+ /*
+ * Aggregation buffer is full.
+ * Flush buffer and try again.
+ */
+ wl1271_skb_queue_head(wl, skb);
+ wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
+ buf_offset, true);
+ sent_packets = true;
+ buf_offset = 0;
+ continue;
+ } else if (ret == -EBUSY) {
+ /*
+ * Firmware buffer is full.
+ * Queue back last skb, and stop aggregating.
+ */
+ wl1271_skb_queue_head(wl, skb);
+ /* No work left, avoid scheduling redundant tx work */
+ set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
+ goto out_ack;
+ } else if (ret < 0) {
+ dev_kfree_skb(skb);
+ goto out_ack;
+ }
+ buf_offset += ret;
+ wl->tx_packets_count++;
+ }
+
+out_ack:
+ if (buf_offset) {
+ wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
+ buf_offset, true);
+ sent_packets = true;
+ }
+ if (sent_packets) {
+ /*
+ * Interrupt the firmware with the new packets. This is only
+ * required for older hardware revisions
+ */
+ if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
+ wl1271_write32(wl, WL1271_HOST_WR_ACCESS,
+ wl->tx_packets_count);
+
+ wl1271_handle_tx_low_watermark(wl);
+ }
+
+out:
+ if (woken_up)
+ wl1271_ps_elp_sleep(wl);
+}
+
+void wl1271_tx_work(struct work_struct *work)
+{
+ struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
+
+ mutex_lock(&wl->mutex);
+ wl1271_tx_work_locked(wl);
+ mutex_unlock(&wl->mutex);
+}
+
+static void wl1271_tx_complete_packet(struct wl1271 *wl,
+ struct wl1271_tx_hw_res_descr *result)
+{
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb;
+ int id = result->id;
+ int rate = -1;
+ u8 retries = 0;
+
+ /* check for id legality */
+ if (unlikely(id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL)) {
+ wl1271_warning("TX result illegal id: %d", id);
+ return;
+ }
+
+ skb = wl->tx_frames[id];
+ info = IEEE80211_SKB_CB(skb);
+
+ /* update the TX status info */
+ if (result->status == TX_SUCCESS) {
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ rate = wl1271_rate_to_idx(result->rate_class_index, wl->band);
+ retries = result->ack_failures;
+ } else if (result->status == TX_RETRY_EXCEEDED) {
+ wl->stats.excessive_retries++;
+ retries = result->ack_failures;
+ }
+
+ info->status.rates[0].idx = rate;
+ info->status.rates[0].count = retries;
+ info->status.rates[0].flags = 0;
+ info->status.ack_signal = -1;
+
+ wl->stats.retry_count += result->ack_failures;
+
+ /* update security sequence number */
+ wl->tx_security_seq += (result->lsb_security_sequence_number -
+ wl->tx_security_last_seq);
+ wl->tx_security_last_seq = result->lsb_security_sequence_number;
+
+ /* remove private header from packet */
+ skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
+
+ /* remove TKIP header space if present */
+ if (info->control.hw_key &&
+ info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data, hdrlen);
+ skb_pull(skb, WL1271_TKIP_IV_SPACE);
+ }
+
+ wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
+ " status 0x%x",
+ result->id, skb, result->ack_failures,
+ result->rate_class_index, result->status);
+
+ /* return the packet to the stack */
+ skb_queue_tail(&wl->deferred_tx_queue, skb);
+ ieee80211_queue_work(wl->hw, &wl->netstack_work);
+ wl1271_free_tx_id(wl, result->id);
+}
+
+/* Called upon reception of a TX complete interrupt */
+void wl1271_tx_complete(struct wl1271 *wl)
+{
+ struct wl1271_acx_mem_map *memmap =
+ (struct wl1271_acx_mem_map *)wl->target_mem_map;
+ u32 count, fw_counter;
+ u32 i;
+
+ /* read the tx results from the chipset */
+ wl1271_read(wl, le32_to_cpu(memmap->tx_result),
+ wl->tx_res_if, sizeof(*wl->tx_res_if), false);
+ fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
+
+ /* write host counter to chipset (to ack) */
+ wl1271_write32(wl, le32_to_cpu(memmap->tx_result) +
+ offsetof(struct wl1271_tx_hw_res_if,
+ tx_result_host_counter), fw_counter);
+
+ count = fw_counter - wl->tx_results_count;
+ wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
+
+ /* verify that the result buffer is not getting overrun */
+ if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
+ wl1271_warning("TX result overflow from chipset: %d", count);
+
+ /* process the results */
+ for (i = 0; i < count; i++) {
+ struct wl1271_tx_hw_res_descr *result;
+ u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK;
+
+ /* process the packet */
+ result = &(wl->tx_res_if->tx_results_queue[offset]);
+ wl1271_tx_complete_packet(wl, result);
+
+ wl->tx_results_count++;
+ }
+}
+
+void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
+{
+ struct sk_buff *skb;
+ int i, total = 0;
+ unsigned long flags;
+ struct ieee80211_tx_info *info;
+
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
+ while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
+ wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
+ info = IEEE80211_SKB_CB(skb);
+ info->status.rates[0].idx = -1;
+ info->status.rates[0].count = 0;
+ ieee80211_tx_status(wl->hw, skb);
+ total++;
+ }
+ }
+
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ wl->tx_queue_count -= total;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ wl1271_handle_tx_low_watermark(wl);
+}
+
+/* caller must hold wl->mutex */
+void wl1271_tx_reset(struct wl1271 *wl)
+{
+ int i;
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *info;
+
+ /* TX failure */
+ if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ for (i = 0; i < AP_MAX_LINKS; i++) {
+ wl1271_tx_reset_link_queues(wl, i);
+ wl->links[i].allocated_blks = 0;
+ wl->links[i].prev_freed_blks = 0;
+ }
+
+ wl->last_tx_hlid = 0;
+ } else {
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
+ while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
+ wl1271_debug(DEBUG_TX, "freeing skb 0x%p",
+ skb);
+ info = IEEE80211_SKB_CB(skb);
+ info->status.rates[0].idx = -1;
+ info->status.rates[0].count = 0;
+ ieee80211_tx_status(wl->hw, skb);
+ }
+ }
+ }
+
+ wl->tx_queue_count = 0;
+
+ /*
+ * Make sure the driver is at a consistent state, in case this
+ * function is called from a context other than interface removal.
+ */
+ wl1271_handle_tx_low_watermark(wl);
+
+ for (i = 0; i < ACX_TX_DESCRIPTORS; i++) {
+ if (wl->tx_frames[i] == NULL)
+ continue;
+
+ skb = wl->tx_frames[i];
+ wl1271_free_tx_id(wl, i);
+ wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
+
+ /* Remove private headers before passing the skb to mac80211 */
+ info = IEEE80211_SKB_CB(skb);
+ skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
+ if (info->control.hw_key &&
+ info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data,
+ hdrlen);
+ skb_pull(skb, WL1271_TKIP_IV_SPACE);
+ }
+
+ info->status.rates[0].idx = -1;
+ info->status.rates[0].count = 0;
+
+ ieee80211_tx_status(wl->hw, skb);
+ }
+}
+
+#define WL1271_TX_FLUSH_TIMEOUT 500000
+
+/* caller must *NOT* hold wl->mutex */
+void wl1271_tx_flush(struct wl1271 *wl)
+{
+ unsigned long timeout;
+ timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
+
+ while (!time_after(jiffies, timeout)) {
+ mutex_lock(&wl->mutex);
+ wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
+ wl->tx_frames_cnt, wl->tx_queue_count);
+ if ((wl->tx_frames_cnt == 0) && (wl->tx_queue_count == 0)) {
+ mutex_unlock(&wl->mutex);
+ return;
+ }
+ mutex_unlock(&wl->mutex);
+ msleep(1);
+ }
+
+ wl1271_warning("Unable to flush all TX buffers, timed out.");
+}
+
+u32 wl1271_tx_min_rate_get(struct wl1271 *wl)
+{
+ int i;
+ u32 rate = 0;
+
+ if (!wl->basic_rate_set) {
+ WARN_ON(1);
+ wl->basic_rate_set = wl->conf.tx.basic_rate;
+ }
+
+ for (i = 0; !rate; i++) {
+ if ((wl->basic_rate_set >> i) & 0x1)
+ rate = 1 << i;
+ }
+
+ return rate;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/tx.h
index d12a129ad11c..02f07fa66e82 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.h
+++ b/drivers/net/wireless/wl12xx/tx.h
@@ -22,13 +22,14 @@
*
*/
-#ifndef __WL1271_TX_H__
-#define __WL1271_TX_H__
+#ifndef __TX_H__
+#define __TX_H__
#define TX_HW_BLOCK_SPARE 2
#define TX_HW_BLOCK_SIZE 252
#define TX_HW_MGMT_PKT_LIFETIME_TU 2000
+#define TX_HW_AP_MODE_PKT_LIFETIME_TU 8000
/* The chipset reference driver states, that the "aid" value 1
* is for infra-BSS, but is still always used */
#define TX_HW_DEFAULT_AID 1
@@ -52,8 +53,6 @@
#define TX_HW_RESULT_QUEUE_LEN_MASK 0xf
#define WL1271_TX_ALIGN_TO 4
-#define WL1271_TX_ALIGN(len) (((len) + WL1271_TX_ALIGN_TO - 1) & \
- ~(WL1271_TX_ALIGN_TO - 1))
#define WL1271_TKIP_IV_SPACE 4
struct wl1271_tx_hw_descr {
@@ -77,8 +76,12 @@ struct wl1271_tx_hw_descr {
u8 id;
/* The packet TID value (as User-Priority) */
u8 tid;
- /* Identifier of the remote STA in IBSS, 1 in infra-BSS */
- u8 aid;
+ union {
+ /* STA - Identifier of the remote STA in IBSS, 1 in infra-BSS */
+ u8 aid;
+ /* AP - host link ID (HLID) */
+ u8 hlid;
+ } __packed;
u8 reserved;
} __packed;
@@ -140,10 +143,15 @@ static inline int wl1271_tx_get_queue(int queue)
}
void wl1271_tx_work(struct work_struct *work);
+void wl1271_tx_work_locked(struct wl1271 *wl);
void wl1271_tx_complete(struct wl1271 *wl);
void wl1271_tx_reset(struct wl1271 *wl);
void wl1271_tx_flush(struct wl1271 *wl);
-u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate);
+u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
+u32 wl1271_tx_min_rate_get(struct wl1271 *wl);
+u8 wl1271_tx_get_hlid(struct sk_buff *skb);
+void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
+void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.c b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
deleted file mode 100644
index 66c2b90ddfd4..000000000000
--- a/drivers/net/wireless/wl12xx/wl1271_debugfs.c
+++ /dev/null
@@ -1,583 +0,0 @@
-/*
- * This file is part of wl1271
- *
- * Copyright (C) 2009 Nokia Corporation
- *
- * Contact: Luciano Coelho <luciano.coelho@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include "wl1271_debugfs.h"
-
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-
-#include "wl1271.h"
-#include "wl1271_acx.h"
-#include "wl1271_ps.h"
-#include "wl1271_io.h"
-
-/* ms */
-#define WL1271_DEBUGFS_STATS_LIFETIME 1000
-
-/* debugfs macros idea from mac80211 */
-
-#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \
-static ssize_t name## _read(struct file *file, char __user *userbuf, \
- size_t count, loff_t *ppos) \
-{ \
- struct wl1271 *wl = file->private_data; \
- char buf[buflen]; \
- int res; \
- \
- res = scnprintf(buf, buflen, fmt "\n", ##value); \
- return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
-} \
- \
-static const struct file_operations name## _ops = { \
- .read = name## _read, \
- .open = wl1271_open_file_generic, \
- .llseek = generic_file_llseek, \
-};
-
-#define DEBUGFS_ADD(name, parent) \
- wl->debugfs.name = debugfs_create_file(#name, 0400, parent, \
- wl, &name## _ops); \
- if (IS_ERR(wl->debugfs.name)) { \
- ret = PTR_ERR(wl->debugfs.name); \
- wl->debugfs.name = NULL; \
- goto out; \
- }
-
-#define DEBUGFS_DEL(name) \
- do { \
- debugfs_remove(wl->debugfs.name); \
- wl->debugfs.name = NULL; \
- } while (0)
-
-#define DEBUGFS_FWSTATS_FILE(sub, name, buflen, fmt) \
-static ssize_t sub## _ ##name## _read(struct file *file, \
- char __user *userbuf, \
- size_t count, loff_t *ppos) \
-{ \
- struct wl1271 *wl = file->private_data; \
- char buf[buflen]; \
- int res; \
- \
- wl1271_debugfs_update_stats(wl); \
- \
- res = scnprintf(buf, buflen, fmt "\n", \
- wl->stats.fw_stats->sub.name); \
- return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
-} \
- \
-static const struct file_operations sub## _ ##name## _ops = { \
- .read = sub## _ ##name## _read, \
- .open = wl1271_open_file_generic, \
- .llseek = generic_file_llseek, \
-};
-
-#define DEBUGFS_FWSTATS_ADD(sub, name) \
- DEBUGFS_ADD(sub## _ ##name, wl->debugfs.fw_statistics)
-
-#define DEBUGFS_FWSTATS_DEL(sub, name) \
- DEBUGFS_DEL(sub## _ ##name)
-
-static void wl1271_debugfs_update_stats(struct wl1271 *wl)
-{
- int ret;
-
- mutex_lock(&wl->mutex);
-
- ret = wl1271_ps_elp_wakeup(wl, false);
- if (ret < 0)
- goto out;
-
- if (wl->state == WL1271_STATE_ON &&
- time_after(jiffies, wl->stats.fw_stats_update +
- msecs_to_jiffies(WL1271_DEBUGFS_STATS_LIFETIME))) {
- wl1271_acx_statistics(wl, wl->stats.fw_stats);
- wl->stats.fw_stats_update = jiffies;
- }
-
- wl1271_ps_elp_sleep(wl);
-
-out:
- mutex_unlock(&wl->mutex);
-}
-
-static int wl1271_open_file_generic(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
-DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, 20, "%u");
-
-DEBUGFS_FWSTATS_FILE(rx, out_of_mem, 20, "%u");
-DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, 20, "%u");
-DEBUGFS_FWSTATS_FILE(rx, hw_stuck, 20, "%u");
-DEBUGFS_FWSTATS_FILE(rx, dropped, 20, "%u");
-DEBUGFS_FWSTATS_FILE(rx, fcs_err, 20, "%u");
-DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, 20, "%u");
-DEBUGFS_FWSTATS_FILE(rx, path_reset, 20, "%u");
-DEBUGFS_FWSTATS_FILE(rx, reset_counter, 20, "%u");
-
-DEBUGFS_FWSTATS_FILE(dma, rx_requested, 20, "%u");
-DEBUGFS_FWSTATS_FILE(dma, rx_errors, 20, "%u");
-DEBUGFS_FWSTATS_FILE(dma, tx_requested, 20, "%u");
-DEBUGFS_FWSTATS_FILE(dma, tx_errors, 20, "%u");
-
-DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, fiqs, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, rx_headers, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, rx_rdys, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, irqs, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, tx_procs, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, decrypt_done, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, dma0_done, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, dma1_done, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, commands, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, rx_procs, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, pci_pm, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, wakeups, 20, "%u");
-DEBUGFS_FWSTATS_FILE(isr, low_rssi, 20, "%u");
-
-DEBUGFS_FWSTATS_FILE(wep, addr_key_count, 20, "%u");
-DEBUGFS_FWSTATS_FILE(wep, default_key_count, 20, "%u");
-/* skipping wep.reserved */
-DEBUGFS_FWSTATS_FILE(wep, key_not_found, 20, "%u");
-DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, 20, "%u");
-DEBUGFS_FWSTATS_FILE(wep, packets, 20, "%u");
-DEBUGFS_FWSTATS_FILE(wep, interrupt, 20, "%u");
-
-DEBUGFS_FWSTATS_FILE(pwr, ps_enter, 20, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, elp_enter, 20, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, 20, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, 20, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, 20, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, 20, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, 20, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, 20, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, power_save_off, 20, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, enable_ps, 20, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, disable_ps, 20, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, 20, "%u");
-/* skipping cont_miss_bcns_spread for now */
-DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, 20, "%u");
-
-DEBUGFS_FWSTATS_FILE(mic, rx_pkts, 20, "%u");
-DEBUGFS_FWSTATS_FILE(mic, calc_failure, 20, "%u");
-
-DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, 20, "%u");
-DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, 20, "%u");
-DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, 20, "%u");
-DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, 20, "%u");
-DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, 20, "%u");
-DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, 20, "%u");
-
-DEBUGFS_FWSTATS_FILE(event, heart_beat, 20, "%u");
-DEBUGFS_FWSTATS_FILE(event, calibration, 20, "%u");
-DEBUGFS_FWSTATS_FILE(event, rx_mismatch, 20, "%u");
-DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, 20, "%u");
-DEBUGFS_FWSTATS_FILE(event, rx_pool, 20, "%u");
-DEBUGFS_FWSTATS_FILE(event, oom_late, 20, "%u");
-DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, 20, "%u");
-DEBUGFS_FWSTATS_FILE(event, tx_stuck, 20, "%u");
-
-DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, 20, "%u");
-DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, 20, "%u");
-DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, 20, "%u");
-DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, 20, "%u");
-DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, 20, "%u");
-DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, 20, "%u");
-DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, 20, "%u");
-
-DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, 20, "%u");
-DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, 20, "%u");
-DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data,
- 20, "%u");
-DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, 20, "%u");
-DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, 20, "%u");
-
-DEBUGFS_READONLY_FILE(retry_count, 20, "%u", wl->stats.retry_count);
-DEBUGFS_READONLY_FILE(excessive_retries, 20, "%u",
- wl->stats.excessive_retries);
-
-static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- struct wl1271 *wl = file->private_data;
- u32 queue_len;
- char buf[20];
- int res;
-
- queue_len = skb_queue_len(&wl->tx_queue);
-
- res = scnprintf(buf, sizeof(buf), "%u\n", queue_len);
- return simple_read_from_buffer(userbuf, count, ppos, buf, res);
-}
-
-static const struct file_operations tx_queue_len_ops = {
- .read = tx_queue_len_read,
- .open = wl1271_open_file_generic,
- .llseek = default_llseek,
-};
-
-static ssize_t gpio_power_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct wl1271 *wl = file->private_data;
- bool state = test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
-
- int res;
- char buf[10];
-
- res = scnprintf(buf, sizeof(buf), "%d\n", state);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, res);
-}
-
-static ssize_t gpio_power_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct wl1271 *wl = file->private_data;
- char buf[10];
- size_t len;
- unsigned long value;
- int ret;
-
- mutex_lock(&wl->mutex);
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len)) {
- ret = -EFAULT;
- goto out;
- }
- buf[len] = '\0';
-
- ret = strict_strtoul(buf, 0, &value);
- if (ret < 0) {
- wl1271_warning("illegal value in gpio_power");
- goto out;
- }
-
- if (value)
- wl1271_power_on(wl);
- else
- wl1271_power_off(wl);
-
-out:
- mutex_unlock(&wl->mutex);
- return count;
-}
-
-static const struct file_operations gpio_power_ops = {
- .read = gpio_power_read,
- .write = gpio_power_write,
- .open = wl1271_open_file_generic,
- .llseek = default_llseek,
-};
-
-static void wl1271_debugfs_delete_files(struct wl1271 *wl)
-{
- DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow);
-
- DEBUGFS_FWSTATS_DEL(rx, out_of_mem);
- DEBUGFS_FWSTATS_DEL(rx, hdr_overflow);
- DEBUGFS_FWSTATS_DEL(rx, hw_stuck);
- DEBUGFS_FWSTATS_DEL(rx, dropped);
- DEBUGFS_FWSTATS_DEL(rx, fcs_err);
- DEBUGFS_FWSTATS_DEL(rx, xfr_hint_trig);
- DEBUGFS_FWSTATS_DEL(rx, path_reset);
- DEBUGFS_FWSTATS_DEL(rx, reset_counter);
-
- DEBUGFS_FWSTATS_DEL(dma, rx_requested);
- DEBUGFS_FWSTATS_DEL(dma, rx_errors);
- DEBUGFS_FWSTATS_DEL(dma, tx_requested);
- DEBUGFS_FWSTATS_DEL(dma, tx_errors);
-
- DEBUGFS_FWSTATS_DEL(isr, cmd_cmplt);
- DEBUGFS_FWSTATS_DEL(isr, fiqs);
- DEBUGFS_FWSTATS_DEL(isr, rx_headers);
- DEBUGFS_FWSTATS_DEL(isr, rx_mem_overflow);
- DEBUGFS_FWSTATS_DEL(isr, rx_rdys);
- DEBUGFS_FWSTATS_DEL(isr, irqs);
- DEBUGFS_FWSTATS_DEL(isr, tx_procs);
- DEBUGFS_FWSTATS_DEL(isr, decrypt_done);
- DEBUGFS_FWSTATS_DEL(isr, dma0_done);
- DEBUGFS_FWSTATS_DEL(isr, dma1_done);
- DEBUGFS_FWSTATS_DEL(isr, tx_exch_complete);
- DEBUGFS_FWSTATS_DEL(isr, commands);
- DEBUGFS_FWSTATS_DEL(isr, rx_procs);
- DEBUGFS_FWSTATS_DEL(isr, hw_pm_mode_changes);
- DEBUGFS_FWSTATS_DEL(isr, host_acknowledges);
- DEBUGFS_FWSTATS_DEL(isr, pci_pm);
- DEBUGFS_FWSTATS_DEL(isr, wakeups);
- DEBUGFS_FWSTATS_DEL(isr, low_rssi);
-
- DEBUGFS_FWSTATS_DEL(wep, addr_key_count);
- DEBUGFS_FWSTATS_DEL(wep, default_key_count);
- /* skipping wep.reserved */
- DEBUGFS_FWSTATS_DEL(wep, key_not_found);
- DEBUGFS_FWSTATS_DEL(wep, decrypt_fail);
- DEBUGFS_FWSTATS_DEL(wep, packets);
- DEBUGFS_FWSTATS_DEL(wep, interrupt);
-
- DEBUGFS_FWSTATS_DEL(pwr, ps_enter);
- DEBUGFS_FWSTATS_DEL(pwr, elp_enter);
- DEBUGFS_FWSTATS_DEL(pwr, missing_bcns);
- DEBUGFS_FWSTATS_DEL(pwr, wake_on_host);
- DEBUGFS_FWSTATS_DEL(pwr, wake_on_timer_exp);
- DEBUGFS_FWSTATS_DEL(pwr, tx_with_ps);
- DEBUGFS_FWSTATS_DEL(pwr, tx_without_ps);
- DEBUGFS_FWSTATS_DEL(pwr, rcvd_beacons);
- DEBUGFS_FWSTATS_DEL(pwr, power_save_off);
- DEBUGFS_FWSTATS_DEL(pwr, enable_ps);
- DEBUGFS_FWSTATS_DEL(pwr, disable_ps);
- DEBUGFS_FWSTATS_DEL(pwr, fix_tsf_ps);
- /* skipping cont_miss_bcns_spread for now */
- DEBUGFS_FWSTATS_DEL(pwr, rcvd_awake_beacons);
-
- DEBUGFS_FWSTATS_DEL(mic, rx_pkts);
- DEBUGFS_FWSTATS_DEL(mic, calc_failure);
-
- DEBUGFS_FWSTATS_DEL(aes, encrypt_fail);
- DEBUGFS_FWSTATS_DEL(aes, decrypt_fail);
- DEBUGFS_FWSTATS_DEL(aes, encrypt_packets);
- DEBUGFS_FWSTATS_DEL(aes, decrypt_packets);
- DEBUGFS_FWSTATS_DEL(aes, encrypt_interrupt);
- DEBUGFS_FWSTATS_DEL(aes, decrypt_interrupt);
-
- DEBUGFS_FWSTATS_DEL(event, heart_beat);
- DEBUGFS_FWSTATS_DEL(event, calibration);
- DEBUGFS_FWSTATS_DEL(event, rx_mismatch);
- DEBUGFS_FWSTATS_DEL(event, rx_mem_empty);
- DEBUGFS_FWSTATS_DEL(event, rx_pool);
- DEBUGFS_FWSTATS_DEL(event, oom_late);
- DEBUGFS_FWSTATS_DEL(event, phy_transmit_error);
- DEBUGFS_FWSTATS_DEL(event, tx_stuck);
-
- DEBUGFS_FWSTATS_DEL(ps, pspoll_timeouts);
- DEBUGFS_FWSTATS_DEL(ps, upsd_timeouts);
- DEBUGFS_FWSTATS_DEL(ps, upsd_max_sptime);
- DEBUGFS_FWSTATS_DEL(ps, upsd_max_apturn);
- DEBUGFS_FWSTATS_DEL(ps, pspoll_max_apturn);
- DEBUGFS_FWSTATS_DEL(ps, pspoll_utilization);
- DEBUGFS_FWSTATS_DEL(ps, upsd_utilization);
-
- DEBUGFS_FWSTATS_DEL(rxpipe, rx_prep_beacon_drop);
- DEBUGFS_FWSTATS_DEL(rxpipe, descr_host_int_trig_rx_data);
- DEBUGFS_FWSTATS_DEL(rxpipe, beacon_buffer_thres_host_int_trig_rx_data);
- DEBUGFS_FWSTATS_DEL(rxpipe, missed_beacon_host_int_trig_rx_data);
- DEBUGFS_FWSTATS_DEL(rxpipe, tx_xfr_host_int_trig_rx_data);
-
- DEBUGFS_DEL(tx_queue_len);
- DEBUGFS_DEL(retry_count);
- DEBUGFS_DEL(excessive_retries);
-
- DEBUGFS_DEL(gpio_power);
-}
-
-static int wl1271_debugfs_add_files(struct wl1271 *wl)
-{
- int ret = 0;
-
- DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow);
-
- DEBUGFS_FWSTATS_ADD(rx, out_of_mem);
- DEBUGFS_FWSTATS_ADD(rx, hdr_overflow);
- DEBUGFS_FWSTATS_ADD(rx, hw_stuck);
- DEBUGFS_FWSTATS_ADD(rx, dropped);
- DEBUGFS_FWSTATS_ADD(rx, fcs_err);
- DEBUGFS_FWSTATS_ADD(rx, xfr_hint_trig);
- DEBUGFS_FWSTATS_ADD(rx, path_reset);
- DEBUGFS_FWSTATS_ADD(rx, reset_counter);
-
- DEBUGFS_FWSTATS_ADD(dma, rx_requested);
- DEBUGFS_FWSTATS_ADD(dma, rx_errors);
- DEBUGFS_FWSTATS_ADD(dma, tx_requested);
- DEBUGFS_FWSTATS_ADD(dma, tx_errors);
-
- DEBUGFS_FWSTATS_ADD(isr, cmd_cmplt);
- DEBUGFS_FWSTATS_ADD(isr, fiqs);
- DEBUGFS_FWSTATS_ADD(isr, rx_headers);
- DEBUGFS_FWSTATS_ADD(isr, rx_mem_overflow);
- DEBUGFS_FWSTATS_ADD(isr, rx_rdys);
- DEBUGFS_FWSTATS_ADD(isr, irqs);
- DEBUGFS_FWSTATS_ADD(isr, tx_procs);
- DEBUGFS_FWSTATS_ADD(isr, decrypt_done);
- DEBUGFS_FWSTATS_ADD(isr, dma0_done);
- DEBUGFS_FWSTATS_ADD(isr, dma1_done);
- DEBUGFS_FWSTATS_ADD(isr, tx_exch_complete);
- DEBUGFS_FWSTATS_ADD(isr, commands);
- DEBUGFS_FWSTATS_ADD(isr, rx_procs);
- DEBUGFS_FWSTATS_ADD(isr, hw_pm_mode_changes);
- DEBUGFS_FWSTATS_ADD(isr, host_acknowledges);
- DEBUGFS_FWSTATS_ADD(isr, pci_pm);
- DEBUGFS_FWSTATS_ADD(isr, wakeups);
- DEBUGFS_FWSTATS_ADD(isr, low_rssi);
-
- DEBUGFS_FWSTATS_ADD(wep, addr_key_count);
- DEBUGFS_FWSTATS_ADD(wep, default_key_count);
- /* skipping wep.reserved */
- DEBUGFS_FWSTATS_ADD(wep, key_not_found);
- DEBUGFS_FWSTATS_ADD(wep, decrypt_fail);
- DEBUGFS_FWSTATS_ADD(wep, packets);
- DEBUGFS_FWSTATS_ADD(wep, interrupt);
-
- DEBUGFS_FWSTATS_ADD(pwr, ps_enter);
- DEBUGFS_FWSTATS_ADD(pwr, elp_enter);
- DEBUGFS_FWSTATS_ADD(pwr, missing_bcns);
- DEBUGFS_FWSTATS_ADD(pwr, wake_on_host);
- DEBUGFS_FWSTATS_ADD(pwr, wake_on_timer_exp);
- DEBUGFS_FWSTATS_ADD(pwr, tx_with_ps);
- DEBUGFS_FWSTATS_ADD(pwr, tx_without_ps);
- DEBUGFS_FWSTATS_ADD(pwr, rcvd_beacons);
- DEBUGFS_FWSTATS_ADD(pwr, power_save_off);
- DEBUGFS_FWSTATS_ADD(pwr, enable_ps);
- DEBUGFS_FWSTATS_ADD(pwr, disable_ps);
- DEBUGFS_FWSTATS_ADD(pwr, fix_tsf_ps);
- /* skipping cont_miss_bcns_spread for now */
- DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_beacons);
-
- DEBUGFS_FWSTATS_ADD(mic, rx_pkts);
- DEBUGFS_FWSTATS_ADD(mic, calc_failure);
-
- DEBUGFS_FWSTATS_ADD(aes, encrypt_fail);
- DEBUGFS_FWSTATS_ADD(aes, decrypt_fail);
- DEBUGFS_FWSTATS_ADD(aes, encrypt_packets);
- DEBUGFS_FWSTATS_ADD(aes, decrypt_packets);
- DEBUGFS_FWSTATS_ADD(aes, encrypt_interrupt);
- DEBUGFS_FWSTATS_ADD(aes, decrypt_interrupt);
-
- DEBUGFS_FWSTATS_ADD(event, heart_beat);
- DEBUGFS_FWSTATS_ADD(event, calibration);
- DEBUGFS_FWSTATS_ADD(event, rx_mismatch);
- DEBUGFS_FWSTATS_ADD(event, rx_mem_empty);
- DEBUGFS_FWSTATS_ADD(event, rx_pool);
- DEBUGFS_FWSTATS_ADD(event, oom_late);
- DEBUGFS_FWSTATS_ADD(event, phy_transmit_error);
- DEBUGFS_FWSTATS_ADD(event, tx_stuck);
-
- DEBUGFS_FWSTATS_ADD(ps, pspoll_timeouts);
- DEBUGFS_FWSTATS_ADD(ps, upsd_timeouts);
- DEBUGFS_FWSTATS_ADD(ps, upsd_max_sptime);
- DEBUGFS_FWSTATS_ADD(ps, upsd_max_apturn);
- DEBUGFS_FWSTATS_ADD(ps, pspoll_max_apturn);
- DEBUGFS_FWSTATS_ADD(ps, pspoll_utilization);
- DEBUGFS_FWSTATS_ADD(ps, upsd_utilization);
-
- DEBUGFS_FWSTATS_ADD(rxpipe, rx_prep_beacon_drop);
- DEBUGFS_FWSTATS_ADD(rxpipe, descr_host_int_trig_rx_data);
- DEBUGFS_FWSTATS_ADD(rxpipe, beacon_buffer_thres_host_int_trig_rx_data);
- DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data);
- DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
-
- DEBUGFS_ADD(tx_queue_len, wl->debugfs.rootdir);
- DEBUGFS_ADD(retry_count, wl->debugfs.rootdir);
- DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir);
-
- DEBUGFS_ADD(gpio_power, wl->debugfs.rootdir);
-
-out:
- if (ret < 0)
- wl1271_debugfs_delete_files(wl);
-
- return ret;
-}
-
-void wl1271_debugfs_reset(struct wl1271 *wl)
-{
- memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
- wl->stats.retry_count = 0;
- wl->stats.excessive_retries = 0;
-}
-
-int wl1271_debugfs_init(struct wl1271 *wl)
-{
- int ret;
-
- wl->debugfs.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
-
- if (IS_ERR(wl->debugfs.rootdir)) {
- ret = PTR_ERR(wl->debugfs.rootdir);
- wl->debugfs.rootdir = NULL;
- goto err;
- }
-
- wl->debugfs.fw_statistics = debugfs_create_dir("fw-statistics",
- wl->debugfs.rootdir);
-
- if (IS_ERR(wl->debugfs.fw_statistics)) {
- ret = PTR_ERR(wl->debugfs.fw_statistics);
- wl->debugfs.fw_statistics = NULL;
- goto err_root;
- }
-
- wl->stats.fw_stats = kzalloc(sizeof(*wl->stats.fw_stats),
- GFP_KERNEL);
-
- if (!wl->stats.fw_stats) {
- ret = -ENOMEM;
- goto err_fw;
- }
-
- wl->stats.fw_stats_update = jiffies;
-
- ret = wl1271_debugfs_add_files(wl);
-
- if (ret < 0)
- goto err_file;
-
- return 0;
-
-err_file:
- kfree(wl->stats.fw_stats);
- wl->stats.fw_stats = NULL;
-
-err_fw:
- debugfs_remove(wl->debugfs.fw_statistics);
- wl->debugfs.fw_statistics = NULL;
-
-err_root:
- debugfs_remove(wl->debugfs.rootdir);
- wl->debugfs.rootdir = NULL;
-
-err:
- return ret;
-}
-
-void wl1271_debugfs_exit(struct wl1271 *wl)
-{
- wl1271_debugfs_delete_files(wl);
-
- kfree(wl->stats.fw_stats);
- wl->stats.fw_stats = NULL;
-
- debugfs_remove(wl->debugfs.fw_statistics);
- wl->debugfs.fw_statistics = NULL;
-
- debugfs_remove(wl->debugfs.rootdir);
- wl->debugfs.rootdir = NULL;
-
-}
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
deleted file mode 100644
index e3dc13c4d01a..000000000000
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ /dev/null
@@ -1,437 +0,0 @@
-/*
- * This file is part of wl1271
- *
- * Copyright (C) 2009 Nokia Corporation
- *
- * Contact: Luciano Coelho <luciano.coelho@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include "wl1271.h"
-#include "wl1271_io.h"
-#include "wl1271_reg.h"
-#include "wl1271_ps.h"
-#include "wl1271_tx.h"
-
-static int wl1271_tx_id(struct wl1271 *wl, struct sk_buff *skb)
-{
- int i;
- for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
- if (wl->tx_frames[i] == NULL) {
- wl->tx_frames[i] = skb;
- wl->tx_frames_cnt++;
- return i;
- }
-
- return -EBUSY;
-}
-
-static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
- u32 buf_offset)
-{
- struct wl1271_tx_hw_descr *desc;
- u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
- u32 total_blocks;
- int id, ret = -EBUSY;
-
- if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
- return -EBUSY;
-
- /* allocate free identifier for the packet */
- id = wl1271_tx_id(wl, skb);
- if (id < 0)
- return id;
-
- /* approximate the number of blocks required for this packet
- in the firmware */
- total_blocks = total_len + TX_HW_BLOCK_SIZE - 1;
- total_blocks = total_blocks / TX_HW_BLOCK_SIZE + TX_HW_BLOCK_SPARE;
- if (total_blocks <= wl->tx_blocks_available) {
- desc = (struct wl1271_tx_hw_descr *)skb_push(
- skb, total_len - skb->len);
-
- desc->extra_mem_blocks = TX_HW_BLOCK_SPARE;
- desc->total_mem_blocks = total_blocks;
- desc->id = id;
-
- wl->tx_blocks_available -= total_blocks;
-
- ret = 0;
-
- wl1271_debug(DEBUG_TX,
- "tx_allocate: size: %d, blocks: %d, id: %d",
- total_len, total_blocks, id);
- } else {
- wl->tx_frames[id] = NULL;
- wl->tx_frames_cnt--;
- }
-
- return ret;
-}
-
-static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
- u32 extra, struct ieee80211_tx_info *control)
-{
- struct timespec ts;
- struct wl1271_tx_hw_descr *desc;
- int pad, ac;
- s64 hosttime;
- u16 tx_attr;
-
- desc = (struct wl1271_tx_hw_descr *) skb->data;
-
- /* relocate space for security header */
- if (extra) {
- void *framestart = skb->data + sizeof(*desc);
- u16 fc = *(u16 *)(framestart + extra);
- int hdrlen = ieee80211_hdrlen(cpu_to_le16(fc));
- memmove(framestart, framestart + extra, hdrlen);
- }
-
- /* configure packet life time */
- getnstimeofday(&ts);
- hosttime = (timespec_to_ns(&ts) >> 10);
- desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
- desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
-
- /* configure the tx attributes */
- tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
-
- /* queue (we use same identifiers for tid's and ac's */
- ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
- desc->tid = ac;
-
- desc->aid = TX_HW_DEFAULT_AID;
- desc->reserved = 0;
-
- /* align the length (and store in terms of words) */
- pad = WL1271_TX_ALIGN(skb->len);
- desc->length = cpu_to_le16(pad >> 2);
-
- /* calculate number of padding bytes */
- pad = pad - skb->len;
- tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
-
- /* if the packets are destined for AP (have a STA entry) send them
- with AP rate policies, otherwise use default basic rates */
- if (control->control.sta)
- tx_attr |= ACX_TX_AP_FULL_RATE << TX_HW_ATTR_OFST_RATE_POLICY;
-
- desc->tx_attr = cpu_to_le16(tx_attr);
-
- wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad);
-}
-
-/* caller must hold wl->mutex */
-static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
- u32 buf_offset)
-{
- struct ieee80211_tx_info *info;
- u32 extra = 0;
- int ret = 0;
- u8 idx;
- u32 total_len;
-
- if (!skb)
- return -EINVAL;
-
- info = IEEE80211_SKB_CB(skb);
-
- if (info->control.hw_key &&
- info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
- extra = WL1271_TKIP_IV_SPACE;
-
- if (info->control.hw_key) {
- idx = info->control.hw_key->hw_key_idx;
-
- /* FIXME: do we have to do this if we're not using WEP? */
- if (unlikely(wl->default_key != idx)) {
- ret = wl1271_cmd_set_default_wep_key(wl, idx);
- if (ret < 0)
- return ret;
- wl->default_key = idx;
- }
- }
-
- ret = wl1271_tx_allocate(wl, skb, extra, buf_offset);
- if (ret < 0)
- return ret;
-
- wl1271_tx_fill_hdr(wl, skb, extra, info);
-
- /*
- * The length of each packet is stored in terms of words. Thus, we must
- * pad the skb data to make sure its length is aligned.
- * The number of padding bytes is computed and set in wl1271_tx_fill_hdr
- */
- total_len = WL1271_TX_ALIGN(skb->len);
- memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
- memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
-
- return total_len;
-}
-
-u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
-{
- struct ieee80211_supported_band *band;
- u32 enabled_rates = 0;
- int bit;
-
- band = wl->hw->wiphy->bands[wl->band];
- for (bit = 0; bit < band->n_bitrates; bit++) {
- if (rate_set & 0x1)
- enabled_rates |= band->bitrates[bit].hw_value;
- rate_set >>= 1;
- }
-
- return enabled_rates;
-}
-
-void wl1271_tx_work(struct work_struct *work)
-{
- struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
- struct sk_buff *skb;
- bool woken_up = false;
- u32 sta_rates = 0;
- u32 buf_offset;
- int ret;
-
- /* check if the rates supported by the AP have changed */
- if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED,
- &wl->flags))) {
- unsigned long flags;
- spin_lock_irqsave(&wl->wl_lock, flags);
- sta_rates = wl->sta_rate_set;
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- }
-
- mutex_lock(&wl->mutex);
-
- if (unlikely(wl->state == WL1271_STATE_OFF))
- goto out;
-
- /* if rates have changed, re-configure the rate policy */
- if (unlikely(sta_rates)) {
- wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
- wl1271_acx_rate_policies(wl);
- }
-
- /* Prepare the transfer buffer, by aggregating all
- * available packets */
- buf_offset = 0;
- while ((skb = skb_dequeue(&wl->tx_queue))) {
- if (!woken_up) {
- ret = wl1271_ps_elp_wakeup(wl, false);
- if (ret < 0)
- goto out_ack;
- woken_up = true;
- }
-
- ret = wl1271_prepare_tx_frame(wl, skb, buf_offset);
- if (ret == -EBUSY) {
- /*
- * Either the firmware buffer is full, or the
- * aggregation buffer is.
- * Queue back last skb, and stop aggregating.
- */
- skb_queue_head(&wl->tx_queue, skb);
- goto out_ack;
- } else if (ret < 0) {
- dev_kfree_skb(skb);
- goto out_ack;
- }
- buf_offset += ret;
- wl->tx_packets_count++;
- }
-
-out_ack:
- if (buf_offset) {
- wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
- buf_offset, true);
- /* interrupt the firmware with the new packets */
- wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
- }
-
-out:
- if (woken_up)
- wl1271_ps_elp_sleep(wl);
-
- mutex_unlock(&wl->mutex);
-}
-
-static void wl1271_tx_complete_packet(struct wl1271 *wl,
- struct wl1271_tx_hw_res_descr *result)
-{
- struct ieee80211_tx_info *info;
- struct sk_buff *skb;
- int id = result->id;
- int rate = -1;
- u8 retries = 0;
-
- /* check for id legality */
- if (unlikely(id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL)) {
- wl1271_warning("TX result illegal id: %d", id);
- return;
- }
-
- skb = wl->tx_frames[id];
- info = IEEE80211_SKB_CB(skb);
-
- /* update the TX status info */
- if (result->status == TX_SUCCESS) {
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
- info->flags |= IEEE80211_TX_STAT_ACK;
- rate = wl1271_rate_to_idx(wl, result->rate_class_index);
- retries = result->ack_failures;
- } else if (result->status == TX_RETRY_EXCEEDED) {
- wl->stats.excessive_retries++;
- retries = result->ack_failures;
- }
-
- info->status.rates[0].idx = rate;
- info->status.rates[0].count = retries;
- info->status.rates[0].flags = 0;
- info->status.ack_signal = -1;
-
- wl->stats.retry_count += result->ack_failures;
-
- /* update security sequence number */
- wl->tx_security_seq += (result->lsb_security_sequence_number -
- wl->tx_security_last_seq);
- wl->tx_security_last_seq = result->lsb_security_sequence_number;
-
- /* remove private header from packet */
- skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
-
- /* remove TKIP header space if present */
- if (info->control.hw_key &&
- info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
- int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data, hdrlen);
- skb_pull(skb, WL1271_TKIP_IV_SPACE);
- }
-
- wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
- " status 0x%x",
- result->id, skb, result->ack_failures,
- result->rate_class_index, result->status);
-
- /* return the packet to the stack */
- ieee80211_tx_status(wl->hw, skb);
- wl->tx_frames[result->id] = NULL;
- wl->tx_frames_cnt--;
-}
-
-/* Called upon reception of a TX complete interrupt */
-void wl1271_tx_complete(struct wl1271 *wl)
-{
- struct wl1271_acx_mem_map *memmap =
- (struct wl1271_acx_mem_map *)wl->target_mem_map;
- u32 count, fw_counter;
- u32 i;
-
- /* read the tx results from the chipset */
- wl1271_read(wl, le32_to_cpu(memmap->tx_result),
- wl->tx_res_if, sizeof(*wl->tx_res_if), false);
- fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
-
- /* write host counter to chipset (to ack) */
- wl1271_write32(wl, le32_to_cpu(memmap->tx_result) +
- offsetof(struct wl1271_tx_hw_res_if,
- tx_result_host_counter), fw_counter);
-
- count = fw_counter - wl->tx_results_count;
- wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
-
- /* verify that the result buffer is not getting overrun */
- if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
- wl1271_warning("TX result overflow from chipset: %d", count);
-
- /* process the results */
- for (i = 0; i < count; i++) {
- struct wl1271_tx_hw_res_descr *result;
- u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK;
-
- /* process the packet */
- result = &(wl->tx_res_if->tx_results_queue[offset]);
- wl1271_tx_complete_packet(wl, result);
-
- wl->tx_results_count++;
- }
-
- if (test_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags) &&
- skb_queue_len(&wl->tx_queue) <= WL1271_TX_QUEUE_LOW_WATERMARK) {
- unsigned long flags;
-
- /* firmware buffer has space, restart queues */
- wl1271_debug(DEBUG_TX, "tx_complete: waking queues");
- spin_lock_irqsave(&wl->wl_lock, flags);
- ieee80211_wake_queues(wl->hw);
- clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- ieee80211_queue_work(wl->hw, &wl->tx_work);
- }
-}
-
-/* caller must hold wl->mutex */
-void wl1271_tx_reset(struct wl1271 *wl)
-{
- int i;
- struct sk_buff *skb;
-
- /* TX failure */
- while ((skb = skb_dequeue(&wl->tx_queue))) {
- wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
- ieee80211_tx_status(wl->hw, skb);
- }
-
- for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
- if (wl->tx_frames[i] != NULL) {
- skb = wl->tx_frames[i];
- wl->tx_frames[i] = NULL;
- wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
- ieee80211_tx_status(wl->hw, skb);
- }
- wl->tx_frames_cnt = 0;
-}
-
-#define WL1271_TX_FLUSH_TIMEOUT 500000
-
-/* caller must *NOT* hold wl->mutex */
-void wl1271_tx_flush(struct wl1271 *wl)
-{
- unsigned long timeout;
- timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
-
- while (!time_after(jiffies, timeout)) {
- mutex_lock(&wl->mutex);
- wl1271_debug(DEBUG_TX, "flushing tx buffer: %d",
- wl->tx_frames_cnt);
- if ((wl->tx_frames_cnt == 0) &&
- skb_queue_empty(&wl->tx_queue)) {
- mutex_unlock(&wl->mutex);
- return;
- }
- mutex_unlock(&wl->mutex);
- msleep(1);
- }
-
- wl1271_warning("Unable to flush all TX buffers, timed out.");
-}
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl12xx.h
index 8a4cd763e5a2..86be83e25ec5 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -22,8 +22,8 @@
*
*/
-#ifndef __WL1271_H__
-#define __WL1271_H__
+#ifndef __WL12XX_H__
+#define __WL12XX_H__
#include <linux/mutex.h>
#include <linux/completion.h>
@@ -32,12 +32,19 @@
#include <linux/bitops.h>
#include <net/mac80211.h>
-#include "wl1271_conf.h"
-#include "wl1271_ini.h"
+#include "conf.h"
+#include "ini.h"
#define DRIVER_NAME "wl1271"
#define DRIVER_PREFIX DRIVER_NAME ": "
+/*
+ * FW versions support BA 11n
+ * versions marks x.x.x.50-60.x
+ */
+#define WL12XX_BA_SUPPORT_FW_COST_VER2_START 50
+#define WL12XX_BA_SUPPORT_FW_COST_VER2_END 60
+
enum {
DEBUG_NONE = 0,
DEBUG_IRQ = BIT(0),
@@ -57,34 +64,37 @@ enum {
DEBUG_SDIO = BIT(14),
DEBUG_FILTERS = BIT(15),
DEBUG_ADHOC = BIT(16),
+ DEBUG_AP = BIT(17),
+ DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP),
DEBUG_ALL = ~0,
};
-#define DEBUG_LEVEL (DEBUG_NONE)
+extern u32 wl12xx_debug_level;
#define DEBUG_DUMP_LIMIT 1024
#define wl1271_error(fmt, arg...) \
- printk(KERN_ERR DRIVER_PREFIX "ERROR " fmt "\n", ##arg)
+ pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg)
#define wl1271_warning(fmt, arg...) \
- printk(KERN_WARNING DRIVER_PREFIX "WARNING " fmt "\n", ##arg)
+ pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg)
#define wl1271_notice(fmt, arg...) \
- printk(KERN_INFO DRIVER_PREFIX fmt "\n", ##arg)
+ pr_info(DRIVER_PREFIX fmt "\n", ##arg)
#define wl1271_info(fmt, arg...) \
- printk(KERN_DEBUG DRIVER_PREFIX fmt "\n", ##arg)
+ pr_info(DRIVER_PREFIX fmt "\n", ##arg)
#define wl1271_debug(level, fmt, arg...) \
do { \
- if (level & DEBUG_LEVEL) \
- printk(KERN_DEBUG DRIVER_PREFIX fmt "\n", ##arg); \
+ if (level & wl12xx_debug_level) \
+ pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
} while (0)
+/* TODO: use pr_debug_hex_dump when it will be available */
#define wl1271_dump(level, prefix, buf, len) \
do { \
- if (level & DEBUG_LEVEL) \
+ if (level & wl12xx_debug_level) \
print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
DUMP_PREFIX_OFFSET, 16, 1, \
buf, \
@@ -94,7 +104,7 @@ enum {
#define wl1271_dump_ascii(level, prefix, buf, len) \
do { \
- if (level & DEBUG_LEVEL) \
+ if (level & wl12xx_debug_level) \
print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
DUMP_PREFIX_OFFSET, 16, 1, \
buf, \
@@ -102,17 +112,28 @@ enum {
true); \
} while (0)
-#define WL1271_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN | \
+#define WL1271_DEFAULT_STA_RX_CONFIG (CFG_UNI_FILTER_EN | \
CFG_BSSID_FILTER_EN | \
CFG_MC_FILTER_EN)
-#define WL1271_DEFAULT_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \
+#define WL1271_DEFAULT_STA_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \
CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
CFG_RX_CTL_EN | CFG_RX_BCN_EN | \
CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
-#define WL1271_FW_NAME "wl1271-fw.bin"
-#define WL1271_NVS_NAME "wl1271-nvs.bin"
+#define WL1271_DEFAULT_AP_RX_CONFIG 0
+
+#define WL1271_DEFAULT_AP_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PREQ_EN | \
+ CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
+ CFG_RX_CTL_EN | CFG_RX_AUTH_EN | \
+ CFG_RX_ASSOC_EN)
+
+
+
+#define WL1271_FW_NAME "ti-connectivity/wl1271-fw-2.bin"
+#define WL1271_AP_FW_NAME "ti-connectivity/wl1271-fw-ap.bin"
+
+#define WL1271_NVS_NAME "ti-connectivity/wl1271-nvs.bin"
#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
@@ -128,6 +149,25 @@ enum {
#define WL1271_DEFAULT_BEACON_INT 100
#define WL1271_DEFAULT_DTIM_PERIOD 1
+#define WL1271_AP_GLOBAL_HLID 0
+#define WL1271_AP_BROADCAST_HLID 1
+#define WL1271_AP_STA_HLID_START 2
+
+/*
+ * When in AP-mode, we allow (at least) this number of mem-blocks
+ * to be transmitted to FW for a STA in PS-mode. Only when packets are
+ * present in the FW buffers it will wake the sleeping STA. We want to put
+ * enough packets for the driver to transmit all of its buffered data before
+ * the STA goes to sleep again. But we don't want to take too much mem-blocks
+ * as it might hurt the throughput of active STAs.
+ * The number of blocks (18) is enough for 2 large packets.
+ */
+#define WL1271_PS_STA_MAX_BLOCKS (2 * 9)
+
+#define WL1271_AP_BSS_INDEX 0
+#define WL1271_AP_DEF_INACTIV_SEC 300
+#define WL1271_AP_DEF_BEACON_EXP 20
+
#define ACX_TX_DESCRIPTORS 32
#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
@@ -160,10 +200,13 @@ struct wl1271_partition_set {
struct wl1271;
+#define WL12XX_NUM_FW_VER 5
+
/* FIXME: I'm not sure about this structure name */
struct wl1271_chip {
u32 id;
- char fw_ver[21];
+ char fw_ver_str[ETHTOOL_BUSINFO_LEN];
+ unsigned int fw_ver[WL12XX_NUM_FW_VER];
};
struct wl1271_stats {
@@ -174,113 +217,16 @@ struct wl1271_stats {
unsigned int excessive_retries;
};
-struct wl1271_debugfs {
- struct dentry *rootdir;
- struct dentry *fw_statistics;
-
- struct dentry *tx_internal_desc_overflow;
-
- struct dentry *rx_out_of_mem;
- struct dentry *rx_hdr_overflow;
- struct dentry *rx_hw_stuck;
- struct dentry *rx_dropped;
- struct dentry *rx_fcs_err;
- struct dentry *rx_xfr_hint_trig;
- struct dentry *rx_path_reset;
- struct dentry *rx_reset_counter;
-
- struct dentry *dma_rx_requested;
- struct dentry *dma_rx_errors;
- struct dentry *dma_tx_requested;
- struct dentry *dma_tx_errors;
-
- struct dentry *isr_cmd_cmplt;
- struct dentry *isr_fiqs;
- struct dentry *isr_rx_headers;
- struct dentry *isr_rx_mem_overflow;
- struct dentry *isr_rx_rdys;
- struct dentry *isr_irqs;
- struct dentry *isr_tx_procs;
- struct dentry *isr_decrypt_done;
- struct dentry *isr_dma0_done;
- struct dentry *isr_dma1_done;
- struct dentry *isr_tx_exch_complete;
- struct dentry *isr_commands;
- struct dentry *isr_rx_procs;
- struct dentry *isr_hw_pm_mode_changes;
- struct dentry *isr_host_acknowledges;
- struct dentry *isr_pci_pm;
- struct dentry *isr_wakeups;
- struct dentry *isr_low_rssi;
-
- struct dentry *wep_addr_key_count;
- struct dentry *wep_default_key_count;
- /* skipping wep.reserved */
- struct dentry *wep_key_not_found;
- struct dentry *wep_decrypt_fail;
- struct dentry *wep_packets;
- struct dentry *wep_interrupt;
-
- struct dentry *pwr_ps_enter;
- struct dentry *pwr_elp_enter;
- struct dentry *pwr_missing_bcns;
- struct dentry *pwr_wake_on_host;
- struct dentry *pwr_wake_on_timer_exp;
- struct dentry *pwr_tx_with_ps;
- struct dentry *pwr_tx_without_ps;
- struct dentry *pwr_rcvd_beacons;
- struct dentry *pwr_power_save_off;
- struct dentry *pwr_enable_ps;
- struct dentry *pwr_disable_ps;
- struct dentry *pwr_fix_tsf_ps;
- /* skipping cont_miss_bcns_spread for now */
- struct dentry *pwr_rcvd_awake_beacons;
-
- struct dentry *mic_rx_pkts;
- struct dentry *mic_calc_failure;
-
- struct dentry *aes_encrypt_fail;
- struct dentry *aes_decrypt_fail;
- struct dentry *aes_encrypt_packets;
- struct dentry *aes_decrypt_packets;
- struct dentry *aes_encrypt_interrupt;
- struct dentry *aes_decrypt_interrupt;
-
- struct dentry *event_heart_beat;
- struct dentry *event_calibration;
- struct dentry *event_rx_mismatch;
- struct dentry *event_rx_mem_empty;
- struct dentry *event_rx_pool;
- struct dentry *event_oom_late;
- struct dentry *event_phy_transmit_error;
- struct dentry *event_tx_stuck;
-
- struct dentry *ps_pspoll_timeouts;
- struct dentry *ps_upsd_timeouts;
- struct dentry *ps_upsd_max_sptime;
- struct dentry *ps_upsd_max_apturn;
- struct dentry *ps_pspoll_max_apturn;
- struct dentry *ps_pspoll_utilization;
- struct dentry *ps_upsd_utilization;
-
- struct dentry *rxpipe_rx_prep_beacon_drop;
- struct dentry *rxpipe_descr_host_int_trig_rx_data;
- struct dentry *rxpipe_beacon_buffer_thres_host_int_trig_rx_data;
- struct dentry *rxpipe_missed_beacon_host_int_trig_rx_data;
- struct dentry *rxpipe_tx_xfr_host_int_trig_rx_data;
-
- struct dentry *tx_queue_len;
-
- struct dentry *retry_count;
- struct dentry *excessive_retries;
- struct dentry *gpio_power;
-};
-
#define NUM_TX_QUEUES 4
#define NUM_RX_PKT_DESC 8
-/* FW status registers */
-struct wl1271_fw_status {
+#define AP_MAX_STATIONS 5
+
+/* Broadcast and Global links + links to stations */
+#define AP_MAX_LINKS (AP_MAX_STATIONS + 2)
+
+/* FW status registers common for AP/STA */
+struct wl1271_fw_common_status {
__le32 intr;
u8 fw_rx_counter;
u8 drv_rx_counter;
@@ -289,9 +235,43 @@ struct wl1271_fw_status {
__le32 rx_pkt_descs[NUM_RX_PKT_DESC];
__le32 tx_released_blks[NUM_TX_QUEUES];
__le32 fw_localtime;
- __le32 padding[2];
} __packed;
+/* FW status registers for AP */
+struct wl1271_fw_ap_status {
+ struct wl1271_fw_common_status common;
+
+ /* Next fields valid only in AP FW */
+
+ /*
+ * A bitmap (where each bit represents a single HLID)
+ * to indicate if the station is in PS mode.
+ */
+ __le32 link_ps_bitmap;
+
+ /* Number of freed MBs per HLID */
+ u8 tx_lnk_free_blks[AP_MAX_LINKS];
+ u8 padding_1[1];
+} __packed;
+
+/* FW status registers for STA */
+struct wl1271_fw_sta_status {
+ struct wl1271_fw_common_status common;
+
+ u8 tx_total;
+ u8 reserved1;
+ __le16 reserved2;
+} __packed;
+
+struct wl1271_fw_full_status {
+ union {
+ struct wl1271_fw_common_status common;
+ struct wl1271_fw_sta_status sta;
+ struct wl1271_fw_ap_status ap;
+ };
+} __packed;
+
+
struct wl1271_rx_mem_pool_addr {
u32 addr;
u32 addr_extra;
@@ -319,6 +299,48 @@ struct wl1271_if_operations {
void (*disable_irq)(struct wl1271 *wl);
};
+#define MAX_NUM_KEYS 14
+#define MAX_KEY_SIZE 32
+
+struct wl1271_ap_key {
+ u8 id;
+ u8 key_type;
+ u8 key_size;
+ u8 key[MAX_KEY_SIZE];
+ u8 hlid;
+ u32 tx_seq_32;
+ u16 tx_seq_16;
+};
+
+enum wl12xx_flags {
+ WL1271_FLAG_STA_ASSOCIATED,
+ WL1271_FLAG_JOINED,
+ WL1271_FLAG_GPIO_POWER,
+ WL1271_FLAG_TX_QUEUE_STOPPED,
+ WL1271_FLAG_TX_PENDING,
+ WL1271_FLAG_IN_ELP,
+ WL1271_FLAG_PSM,
+ WL1271_FLAG_PSM_REQUESTED,
+ WL1271_FLAG_IRQ_RUNNING,
+ WL1271_FLAG_IDLE,
+ WL1271_FLAG_IDLE_REQUESTED,
+ WL1271_FLAG_PSPOLL_FAILURE,
+ WL1271_FLAG_STA_STATE_SENT,
+ WL1271_FLAG_FW_TX_BUSY,
+ WL1271_FLAG_AP_STARTED
+};
+
+struct wl1271_link {
+ /* AP-mode - TX queue per AC in link */
+ struct sk_buff_head tx_queue[NUM_TX_QUEUES];
+
+ /* accounting for allocated / available TX blocks in FW */
+ u8 allocated_blks;
+ u8 prev_freed_blks;
+
+ u8 addr[ETH_ALEN];
+};
+
struct wl1271 {
struct platform_device *plat_dev;
struct ieee80211_hw *hw;
@@ -337,20 +359,6 @@ struct wl1271 {
enum wl1271_state state;
struct mutex mutex;
-#define WL1271_FLAG_STA_RATES_CHANGED (0)
-#define WL1271_FLAG_STA_ASSOCIATED (1)
-#define WL1271_FLAG_JOINED (2)
-#define WL1271_FLAG_GPIO_POWER (3)
-#define WL1271_FLAG_TX_QUEUE_STOPPED (4)
-#define WL1271_FLAG_IN_ELP (5)
-#define WL1271_FLAG_PSM (6)
-#define WL1271_FLAG_PSM_REQUESTED (7)
-#define WL1271_FLAG_IRQ_PENDING (8)
-#define WL1271_FLAG_IRQ_RUNNING (9)
-#define WL1271_FLAG_IDLE (10)
-#define WL1271_FLAG_IDLE_REQUESTED (11)
-#define WL1271_FLAG_PSPOLL_FAILURE (12)
-#define WL1271_FLAG_STA_STATE_SENT (13)
unsigned long flags;
struct wl1271_partition_set part;
@@ -362,6 +370,7 @@ struct wl1271 {
u8 *fw;
size_t fw_len;
+ u8 fw_bss_type;
struct wl1271_nvs_file *nvs;
size_t nvs_len;
@@ -392,11 +401,19 @@ struct wl1271 {
int session_counter;
/* Frames scheduled for transmission, not handled yet */
- struct sk_buff_head tx_queue;
+ struct sk_buff_head tx_queue[NUM_TX_QUEUES];
+ int tx_queue_count;
+
+ /* Frames received, not handled yet by mac80211 */
+ struct sk_buff_head deferred_rx_queue;
+
+ /* Frames sent, not returned yet to mac80211 */
+ struct sk_buff_head deferred_tx_queue;
struct work_struct tx_work;
/* Pending TX frames */
+ unsigned long tx_frames_map[BITS_TO_LONGS(ACX_TX_DESCRIPTORS)];
struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
int tx_frames_cnt;
@@ -413,8 +430,8 @@ struct wl1271 {
/* Intermediate buffer, used for packet aggregation */
u8 *aggr_buf;
- /* The target interrupt mask */
- struct work_struct irq_work;
+ /* Network stack work */
+ struct work_struct netstack_work;
/* Hardware recovery work */
struct work_struct recovery_work;
@@ -429,11 +446,18 @@ struct wl1271 {
struct wl1271_scan scan;
struct delayed_work scan_complete_work;
+ /* probe-req template for the current AP */
+ struct sk_buff *probereq;
+
/* Our association ID */
u16 aid;
- /* currently configured rate set */
- u32 sta_rate_set;
+ /*
+ * currently configured rate set:
+ * bits 0-15 - 802.11abg rates
+ * bits 16-23 - 802.11n MCS index mask
+ * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
+ */
u32 basic_rate_set;
u32 basic_rate;
u32 rate_set;
@@ -468,13 +492,12 @@ struct wl1271 {
int last_rssi_event;
struct wl1271_stats stats;
- struct wl1271_debugfs debugfs;
__le32 buffer_32;
u32 buffer_cmd;
u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
- struct wl1271_fw_status *fw_status;
+ struct wl1271_fw_full_status *fw_status;
struct wl1271_tx_hw_res_if *tx_res_if;
struct ieee80211_vif *vif;
@@ -490,6 +513,41 @@ struct wl1271 {
/* Most recently reported noise in dBm */
s8 noise;
+
+ /* map for HLIDs of associated stations - when operating in AP mode */
+ unsigned long ap_hlid_map[BITS_TO_LONGS(AP_MAX_STATIONS)];
+
+ /* recoreded keys for AP-mode - set here before AP startup */
+ struct wl1271_ap_key *recorded_ap_keys[MAX_NUM_KEYS];
+
+ /* bands supported by this instance of wl12xx */
+ struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+
+ /* RX BA constraint value */
+ bool ba_support;
+ u8 ba_rx_bitmap;
+
+ /*
+ * AP-mode - links indexed by HLID. The global and broadcast links
+ * are always active.
+ */
+ struct wl1271_link links[AP_MAX_LINKS];
+
+ /* the hlid of the link where the last transmitted skb came from */
+ int last_tx_hlid;
+
+ /* AP-mode - a bitmap of links currently in PS mode according to FW */
+ u32 ap_fw_ps_map;
+
+ /* AP-mode - a bitmap of links currently in PS mode in mac80211 */
+ unsigned long ap_ps_map;
+
+ /* Quirks of specific hardware revisions */
+ unsigned int quirks;
+};
+
+struct wl1271_station {
+ u8 hlid;
};
int wl1271_plt_start(struct wl1271 *wl);
@@ -504,9 +562,20 @@ int wl1271_plt_stop(struct wl1271 *wl);
#define WL1271_TX_QUEUE_LOW_WATERMARK 10
#define WL1271_TX_QUEUE_HIGH_WATERMARK 25
+#define WL1271_DEFERRED_QUEUE_LIMIT 64
+
/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
on in case is has been shut down shortly before */
-#define WL1271_PRE_POWER_ON_SLEEP 20 /* in miliseconds */
-#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */
+#define WL1271_PRE_POWER_ON_SLEEP 20 /* in milliseconds */
+#define WL1271_POWER_ON_SLEEP 200 /* in milliseconds */
+
+/* Macros to handle wl1271.sta_rate_set */
+#define HW_BG_RATES_MASK 0xffff
+#define HW_HT_RATES_OFFSET 16
+
+/* Quirks */
+
+/* Each RX/TX transaction requires an end-of-transaction transfer */
+#define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0)
#endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index 184628027213..18fe542360f2 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -2,6 +2,7 @@
#define __WL12XX_80211_H__
#include <linux/if_ether.h> /* ETH_ALEN */
+#include <linux/if_arp.h>
/* RATES */
#define IEEE80211_CCK_RATE_1MB 0x02
@@ -54,7 +55,6 @@
/* This really should be 8, but not for our firmware */
#define MAX_SUPPORTED_RATES 32
-#define COUNTRY_STRING_LEN 3
#define MAX_COUNTRY_TRIPLETS 32
/* Headers */
@@ -98,7 +98,7 @@ struct country_triplet {
struct wl12xx_ie_country {
struct wl12xx_ie_header header;
- u8 country_string[COUNTRY_STRING_LEN];
+ u8 country_string[IEEE80211_COUNTRY_STRING_LEN];
struct country_triplet triplets[MAX_COUNTRY_TRIPLETS];
} __packed;
@@ -133,11 +133,17 @@ struct wl12xx_qos_null_data_template {
__le16 qos_ctl;
} __packed;
-struct wl12xx_probe_req_template {
- struct ieee80211_header header;
- struct wl12xx_ie_ssid ssid;
- struct wl12xx_ie_rates rates;
- struct wl12xx_ie_rates ext_rates;
+struct wl12xx_arp_rsp_template {
+ struct ieee80211_hdr_3addr hdr;
+
+ u8 llc_hdr[sizeof(rfc1042_header)];
+ __be16 llc_type;
+
+ struct arphdr arp_hdr;
+ u8 sender_hw[ETH_ALEN];
+ __be32 sender_ip;
+ u8 target_hw[ETH_ALEN];
+ __be32 target_ip;
} __packed;
@@ -153,4 +159,9 @@ struct wl12xx_probe_resp_template {
struct wl12xx_ie_country country;
} __packed;
+struct wl12xx_disconn_template {
+ struct ieee80211_header header;
+ __le16 disconn_reason;
+} __packed;
+
#endif
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index ee82df62e646..3e5befe4d03b 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -192,7 +192,7 @@ static inline void wl3501_switch_page(struct wl3501_card *this, u8 page)
}
/*
- * Get Ethernet MAC addresss.
+ * Get Ethernet MAC address.
*
* WARNING: We switch to FPAGE0 and switc back again.
* Making sure there is no other WL function beening called by ISR.
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 390d77f762c4..415eec401e2e 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -30,6 +30,7 @@ static struct usb_device_id zd1201_table[] = {
{USB_DEVICE(0x0ace, 0x1201)}, /* ZyDAS ZD1201 Wireless USB Adapter */
{USB_DEVICE(0x050d, 0x6051)}, /* Belkin F5D6051 usb adapter */
{USB_DEVICE(0x0db0, 0x6823)}, /* MSI UB11B usb adapter */
+ {USB_DEVICE(0x1044, 0x8004)}, /* Gigabyte GN-WLBZ101 */
{USB_DEVICE(0x1044, 0x8005)}, /* GIGABYTE GN-WLBZ201 usb adapter */
{}
};
@@ -1829,7 +1830,7 @@ err_zd:
static void zd1201_disconnect(struct usb_interface *interface)
{
- struct zd1201 *zd=(struct zd1201 *)usb_get_intfdata(interface);
+ struct zd1201 *zd = usb_get_intfdata(interface);
struct hlist_node *node, *node2;
struct zd1201_frag *frag;
diff --git a/drivers/net/wireless/zd1211rw/Makefile b/drivers/net/wireless/zd1211rw/Makefile
index 1907eafb9b16..5728a918e508 100644
--- a/drivers/net/wireless/zd1211rw/Makefile
+++ b/drivers/net/wireless/zd1211rw/Makefile
@@ -5,7 +5,5 @@ zd1211rw-objs := zd_chip.o zd_mac.o \
zd_rf_al7230b.o zd_rf_uw2453.o \
zd_rf.o zd_usb.o
-ifeq ($(CONFIG_ZD1211RW_DEBUG),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_ZD1211RW_DEBUG) := -DDEBUG
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 87a95bcfee57..a73a305d3cba 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -108,24 +108,17 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
{
int r;
int i;
- zd_addr_t *a16;
- u16 *v16;
+ zd_addr_t a16[USB_MAX_IOREAD32_COUNT * 2];
+ u16 v16[USB_MAX_IOREAD32_COUNT * 2];
unsigned int count16;
if (count > USB_MAX_IOREAD32_COUNT)
return -EINVAL;
- /* Allocate a single memory block for values and addresses. */
- count16 = 2*count;
- a16 = (zd_addr_t *) kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)),
- GFP_KERNEL);
- if (!a16) {
- dev_dbg_f(zd_chip_dev(chip),
- "error ENOMEM in allocation of a16\n");
- r = -ENOMEM;
- goto out;
- }
- v16 = (u16 *)(a16 + count16);
+ /* Use stack for values and addresses. */
+ count16 = 2 * count;
+ BUG_ON(count16 * sizeof(zd_addr_t) > sizeof(a16));
+ BUG_ON(count16 * sizeof(u16) > sizeof(v16));
for (i = 0; i < count; i++) {
int j = 2*i;
@@ -138,7 +131,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
if (r) {
dev_dbg_f(zd_chip_dev(chip),
"error: zd_ioread16v_locked. Error number %d\n", r);
- goto out;
+ return r;
}
for (i = 0; i < count; i++) {
@@ -146,18 +139,19 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
values[i] = (v16[j] << 16) | v16[j+1];
}
-out:
- kfree((void *)a16);
- return r;
+ return 0;
}
-int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
- unsigned int count)
+static int _zd_iowrite32v_async_locked(struct zd_chip *chip,
+ const struct zd_ioreq32 *ioreqs,
+ unsigned int count)
{
int i, j, r;
- struct zd_ioreq16 *ioreqs16;
+ struct zd_ioreq16 ioreqs16[USB_MAX_IOWRITE32_COUNT * 2];
unsigned int count16;
+ /* Use stack for values and addresses. */
+
ZD_ASSERT(mutex_is_locked(&chip->mutex));
if (count == 0)
@@ -165,15 +159,8 @@ int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
if (count > USB_MAX_IOWRITE32_COUNT)
return -EINVAL;
- /* Allocate a single memory block for values and addresses. */
- count16 = 2*count;
- ioreqs16 = kmalloc(count16 * sizeof(struct zd_ioreq16), GFP_KERNEL);
- if (!ioreqs16) {
- r = -ENOMEM;
- dev_dbg_f(zd_chip_dev(chip),
- "error %d in ioreqs16 allocation\n", r);
- goto out;
- }
+ count16 = 2 * count;
+ BUG_ON(count16 * sizeof(struct zd_ioreq16) > sizeof(ioreqs16));
for (i = 0; i < count; i++) {
j = 2*i;
@@ -184,18 +171,30 @@ int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
ioreqs16[j+1].addr = ioreqs[i].addr;
}
- r = zd_usb_iowrite16v(&chip->usb, ioreqs16, count16);
+ r = zd_usb_iowrite16v_async(&chip->usb, ioreqs16, count16);
#ifdef DEBUG
if (r) {
dev_dbg_f(zd_chip_dev(chip),
"error %d in zd_usb_write16v\n", r);
}
#endif /* DEBUG */
-out:
- kfree(ioreqs16);
return r;
}
+int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
+ unsigned int count)
+{
+ int r;
+
+ zd_usb_iowrite16v_async_start(&chip->usb);
+ r = _zd_iowrite32v_async_locked(chip, ioreqs, count);
+ if (r) {
+ zd_usb_iowrite16v_async_end(&chip->usb, 0);
+ return r;
+ }
+ return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */);
+}
+
int zd_iowrite16a_locked(struct zd_chip *chip,
const struct zd_ioreq16 *ioreqs, unsigned int count)
{
@@ -203,6 +202,8 @@ int zd_iowrite16a_locked(struct zd_chip *chip,
unsigned int i, j, t, max;
ZD_ASSERT(mutex_is_locked(&chip->mutex));
+ zd_usb_iowrite16v_async_start(&chip->usb);
+
for (i = 0; i < count; i += j + t) {
t = 0;
max = count-i;
@@ -215,8 +216,9 @@ int zd_iowrite16a_locked(struct zd_chip *chip,
}
}
- r = zd_usb_iowrite16v(&chip->usb, &ioreqs[i], j);
+ r = zd_usb_iowrite16v_async(&chip->usb, &ioreqs[i], j);
if (r) {
+ zd_usb_iowrite16v_async_end(&chip->usb, 0);
dev_dbg_f(zd_chip_dev(chip),
"error zd_usb_iowrite16v. Error number %d\n",
r);
@@ -224,7 +226,7 @@ int zd_iowrite16a_locked(struct zd_chip *chip,
}
}
- return 0;
+ return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */);
}
/* Writes a variable number of 32 bit registers. The functions will split
@@ -237,6 +239,8 @@ int zd_iowrite32a_locked(struct zd_chip *chip,
int r;
unsigned int i, j, t, max;
+ zd_usb_iowrite16v_async_start(&chip->usb);
+
for (i = 0; i < count; i += j + t) {
t = 0;
max = count-i;
@@ -249,8 +253,9 @@ int zd_iowrite32a_locked(struct zd_chip *chip,
}
}
- r = _zd_iowrite32v_locked(chip, &ioreqs[i], j);
+ r = _zd_iowrite32v_async_locked(chip, &ioreqs[i], j);
if (r) {
+ zd_usb_iowrite16v_async_end(&chip->usb, 0);
dev_dbg_f(zd_chip_dev(chip),
"error _zd_iowrite32v_locked."
" Error number %d\n", r);
@@ -258,7 +263,7 @@ int zd_iowrite32a_locked(struct zd_chip *chip,
}
}
- return 0;
+ return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */);
}
int zd_ioread16(struct zd_chip *chip, zd_addr_t addr, u16 *value)
@@ -369,16 +374,12 @@ error:
return r;
}
-/* MAC address: if custom mac addresses are to be used CR_MAC_ADDR_P1 and
- * CR_MAC_ADDR_P2 must be overwritten
- */
-int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
+static int zd_write_mac_addr_common(struct zd_chip *chip, const u8 *mac_addr,
+ const struct zd_ioreq32 *in_reqs,
+ const char *type)
{
int r;
- struct zd_ioreq32 reqs[2] = {
- [0] = { .addr = CR_MAC_ADDR_P1 },
- [1] = { .addr = CR_MAC_ADDR_P2 },
- };
+ struct zd_ioreq32 reqs[2] = {in_reqs[0], in_reqs[1]};
if (mac_addr) {
reqs[0].value = (mac_addr[3] << 24)
@@ -387,9 +388,9 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
| mac_addr[0];
reqs[1].value = (mac_addr[5] << 8)
| mac_addr[4];
- dev_dbg_f(zd_chip_dev(chip), "mac addr %pM\n", mac_addr);
+ dev_dbg_f(zd_chip_dev(chip), "%s addr %pM\n", type, mac_addr);
} else {
- dev_dbg_f(zd_chip_dev(chip), "set NULL mac\n");
+ dev_dbg_f(zd_chip_dev(chip), "set NULL %s\n", type);
}
mutex_lock(&chip->mutex);
@@ -398,6 +399,29 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
return r;
}
+/* MAC address: if custom mac addresses are to be used CR_MAC_ADDR_P1 and
+ * CR_MAC_ADDR_P2 must be overwritten
+ */
+int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
+{
+ static const struct zd_ioreq32 reqs[2] = {
+ [0] = { .addr = CR_MAC_ADDR_P1 },
+ [1] = { .addr = CR_MAC_ADDR_P2 },
+ };
+
+ return zd_write_mac_addr_common(chip, mac_addr, reqs, "mac");
+}
+
+int zd_write_bssid(struct zd_chip *chip, const u8 *bssid)
+{
+ static const struct zd_ioreq32 reqs[2] = {
+ [0] = { .addr = CR_BSSID_P1 },
+ [1] = { .addr = CR_BSSID_P2 },
+ };
+
+ return zd_write_mac_addr_common(chip, bssid, reqs, "bssid");
+}
+
int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain)
{
int r;
@@ -848,11 +872,12 @@ static int get_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
{
struct zd_ioreq32 reqs[3];
+ u16 b_interval = s->beacon_interval & 0xffff;
- if (s->beacon_interval <= 5)
- s->beacon_interval = 5;
- if (s->pre_tbtt < 4 || s->pre_tbtt >= s->beacon_interval)
- s->pre_tbtt = s->beacon_interval - 1;
+ if (b_interval <= 5)
+ b_interval = 5;
+ if (s->pre_tbtt < 4 || s->pre_tbtt >= b_interval)
+ s->pre_tbtt = b_interval - 1;
if (s->atim_wnd_period >= s->pre_tbtt)
s->atim_wnd_period = s->pre_tbtt - 1;
@@ -861,31 +886,57 @@ static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
reqs[1].addr = CR_PRE_TBTT;
reqs[1].value = s->pre_tbtt;
reqs[2].addr = CR_BCN_INTERVAL;
- reqs[2].value = s->beacon_interval;
+ reqs[2].value = (s->beacon_interval & ~0xffff) | b_interval;
return zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs));
}
-static int set_beacon_interval(struct zd_chip *chip, u32 interval)
+static int set_beacon_interval(struct zd_chip *chip, u16 interval,
+ u8 dtim_period, int type)
{
int r;
struct aw_pt_bi s;
+ u32 b_interval, mode_flag;
ZD_ASSERT(mutex_is_locked(&chip->mutex));
+
+ if (interval > 0) {
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
+ mode_flag = BCN_MODE_IBSS;
+ break;
+ case NL80211_IFTYPE_AP:
+ mode_flag = BCN_MODE_AP;
+ break;
+ default:
+ mode_flag = 0;
+ break;
+ }
+ } else {
+ dtim_period = 0;
+ mode_flag = 0;
+ }
+
+ b_interval = mode_flag | (dtim_period << 16) | interval;
+
+ r = zd_iowrite32_locked(chip, b_interval, CR_BCN_INTERVAL);
+ if (r)
+ return r;
r = get_aw_pt_bi(chip, &s);
if (r)
return r;
- s.beacon_interval = interval;
return set_aw_pt_bi(chip, &s);
}
-int zd_set_beacon_interval(struct zd_chip *chip, u32 interval)
+int zd_set_beacon_interval(struct zd_chip *chip, u16 interval, u8 dtim_period,
+ int type)
{
int r;
mutex_lock(&chip->mutex);
- r = set_beacon_interval(chip, interval);
+ r = set_beacon_interval(chip, interval, dtim_period, type);
mutex_unlock(&chip->mutex);
return r;
}
@@ -904,7 +955,7 @@ static int hw_init(struct zd_chip *chip)
if (r)
return r;
- return set_beacon_interval(chip, 100);
+ return set_beacon_interval(chip, 100, 0, NL80211_IFTYPE_UNSPECIFIED);
}
static zd_addr_t fw_reg_addr(struct zd_chip *chip, u16 offset)
@@ -1406,6 +1457,9 @@ void zd_chip_disable_int(struct zd_chip *chip)
mutex_lock(&chip->mutex);
zd_usb_disable_int(&chip->usb);
mutex_unlock(&chip->mutex);
+
+ /* cancel pending interrupt work */
+ cancel_work_sync(&zd_chip_to_mac(chip)->process_intr);
}
int zd_chip_enable_rxtx(struct zd_chip *chip)
@@ -1415,6 +1469,7 @@ int zd_chip_enable_rxtx(struct zd_chip *chip)
mutex_lock(&chip->mutex);
zd_usb_enable_tx(&chip->usb);
r = zd_usb_enable_rx(&chip->usb);
+ zd_tx_watchdog_enable(&chip->usb);
mutex_unlock(&chip->mutex);
return r;
}
@@ -1422,6 +1477,7 @@ int zd_chip_enable_rxtx(struct zd_chip *chip)
void zd_chip_disable_rxtx(struct zd_chip *chip)
{
mutex_lock(&chip->mutex);
+ zd_tx_watchdog_disable(&chip->usb);
zd_usb_disable_rx(&chip->usb);
zd_usb_disable_tx(&chip->usb);
mutex_unlock(&chip->mutex);
@@ -1448,7 +1504,7 @@ int zd_rfwritev_locked(struct zd_chip *chip,
*/
int zd_rfwrite_cr_locked(struct zd_chip *chip, u32 value)
{
- struct zd_ioreq16 ioreqs[] = {
+ const struct zd_ioreq16 ioreqs[] = {
{ CR244, (value >> 16) & 0xff },
{ CR243, (value >> 8) & 0xff },
{ CR242, value & 0xff },
@@ -1475,7 +1531,7 @@ int zd_rfwritev_cr_locked(struct zd_chip *chip,
int zd_chip_set_multicast_hash(struct zd_chip *chip,
struct zd_mc_hash *hash)
{
- struct zd_ioreq32 ioreqs[] = {
+ const struct zd_ioreq32 ioreqs[] = {
{ CR_GROUP_HASH_P1, hash->low },
{ CR_GROUP_HASH_P2, hash->high },
};
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index f8bbf7d302ae..14e4402a6111 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -546,6 +546,7 @@ enum {
#define RX_FILTER_CTRL (RX_FILTER_RTS | RX_FILTER_CTS | \
RX_FILTER_CFEND | RX_FILTER_CFACK)
+#define BCN_MODE_AP 0x1000000
#define BCN_MODE_IBSS 0x2000000
/* Monitor mode sets filter to 0xfffff */
@@ -881,6 +882,7 @@ static inline u8 _zd_chip_get_channel(struct zd_chip *chip)
u8 zd_chip_get_channel(struct zd_chip *chip);
int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain);
int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr);
+int zd_write_bssid(struct zd_chip *chip, const u8 *bssid);
int zd_chip_switch_radio_on(struct zd_chip *chip);
int zd_chip_switch_radio_off(struct zd_chip *chip);
int zd_chip_enable_int(struct zd_chip *chip);
@@ -920,7 +922,8 @@ enum led_status {
int zd_chip_control_leds(struct zd_chip *chip, enum led_status status);
-int zd_set_beacon_interval(struct zd_chip *chip, u32 interval);
+int zd_set_beacon_interval(struct zd_chip *chip, u16 interval, u8 dtim_period,
+ int type);
static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval)
{
diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h
index 6ac597ffd3b9..5463ca9ebc01 100644
--- a/drivers/net/wireless/zd1211rw/zd_def.h
+++ b/drivers/net/wireless/zd1211rw/zd_def.h
@@ -45,7 +45,7 @@ typedef u16 __nocast zd_addr_t;
#ifdef DEBUG
# define ZD_ASSERT(x) \
do { \
- if (!(x)) { \
+ if (unlikely(!(x))) { \
pr_debug("%s:%d ASSERT %s VIOLATED!\n", \
__FILE__, __LINE__, __stringify(x)); \
dump_stack(); \
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 43307bd42a69..5037c8b2b415 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -138,6 +138,12 @@ static const struct ieee80211_channel zd_channels[] = {
static void housekeeping_init(struct zd_mac *mac);
static void housekeeping_enable(struct zd_mac *mac);
static void housekeeping_disable(struct zd_mac *mac);
+static void beacon_init(struct zd_mac *mac);
+static void beacon_enable(struct zd_mac *mac);
+static void beacon_disable(struct zd_mac *mac);
+static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble);
+static int zd_mac_config_beacon(struct ieee80211_hw *hw,
+ struct sk_buff *beacon);
static int zd_reg2alpha2(u8 regdomain, char *alpha2)
{
@@ -231,6 +237,26 @@ static int set_rx_filter(struct zd_mac *mac)
return zd_iowrite32(&mac->chip, CR_RX_FILTER, filter);
}
+static int set_mac_and_bssid(struct zd_mac *mac)
+{
+ int r;
+
+ if (!mac->vif)
+ return -1;
+
+ r = zd_write_mac_addr(&mac->chip, mac->vif->addr);
+ if (r)
+ return r;
+
+ /* Vendor driver after setting MAC either sets BSSID for AP or
+ * filter for other modes.
+ */
+ if (mac->type != NL80211_IFTYPE_AP)
+ return set_rx_filter(mac);
+ else
+ return zd_write_bssid(&mac->chip, mac->vif->addr);
+}
+
static int set_mc_hash(struct zd_mac *mac)
{
struct zd_mc_hash hash;
@@ -238,7 +264,7 @@ static int set_mc_hash(struct zd_mac *mac)
return zd_chip_set_multicast_hash(&mac->chip, &hash);
}
-static int zd_op_start(struct ieee80211_hw *hw)
+int zd_op_start(struct ieee80211_hw *hw)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct zd_chip *chip = &mac->chip;
@@ -275,6 +301,8 @@ static int zd_op_start(struct ieee80211_hw *hw)
goto disable_rxtx;
housekeeping_enable(mac);
+ beacon_enable(mac);
+ set_bit(ZD_DEVICE_RUNNING, &mac->flags);
return 0;
disable_rxtx:
zd_chip_disable_rxtx(chip);
@@ -286,19 +314,22 @@ out:
return r;
}
-static void zd_op_stop(struct ieee80211_hw *hw)
+void zd_op_stop(struct ieee80211_hw *hw)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct zd_chip *chip = &mac->chip;
struct sk_buff *skb;
struct sk_buff_head *ack_wait_queue = &mac->ack_wait_queue;
+ clear_bit(ZD_DEVICE_RUNNING, &mac->flags);
+
/* The order here deliberately is a little different from the open()
* method, since we need to make sure there is no opportunity for RX
* frames to be processed by mac80211 after we have stopped it.
*/
zd_chip_disable_rxtx(chip);
+ beacon_disable(mac);
housekeeping_disable(mac);
flush_workqueue(zd_workqueue);
@@ -311,6 +342,68 @@ static void zd_op_stop(struct ieee80211_hw *hw)
dev_kfree_skb_any(skb);
}
+int zd_restore_settings(struct zd_mac *mac)
+{
+ struct sk_buff *beacon;
+ struct zd_mc_hash multicast_hash;
+ unsigned int short_preamble;
+ int r, beacon_interval, beacon_period;
+ u8 channel;
+
+ dev_dbg_f(zd_mac_dev(mac), "\n");
+
+ spin_lock_irq(&mac->lock);
+ multicast_hash = mac->multicast_hash;
+ short_preamble = mac->short_preamble;
+ beacon_interval = mac->beacon.interval;
+ beacon_period = mac->beacon.period;
+ channel = mac->channel;
+ spin_unlock_irq(&mac->lock);
+
+ r = set_mac_and_bssid(mac);
+ if (r < 0) {
+ dev_dbg_f(zd_mac_dev(mac), "set_mac_and_bssid failed, %d\n", r);
+ return r;
+ }
+
+ r = zd_chip_set_channel(&mac->chip, channel);
+ if (r < 0) {
+ dev_dbg_f(zd_mac_dev(mac), "zd_chip_set_channel failed, %d\n",
+ r);
+ return r;
+ }
+
+ set_rts_cts(mac, short_preamble);
+
+ r = zd_chip_set_multicast_hash(&mac->chip, &multicast_hash);
+ if (r < 0) {
+ dev_dbg_f(zd_mac_dev(mac),
+ "zd_chip_set_multicast_hash failed, %d\n", r);
+ return r;
+ }
+
+ if (mac->type == NL80211_IFTYPE_MESH_POINT ||
+ mac->type == NL80211_IFTYPE_ADHOC ||
+ mac->type == NL80211_IFTYPE_AP) {
+ if (mac->vif != NULL) {
+ beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+ if (beacon) {
+ zd_mac_config_beacon(mac->hw, beacon);
+ kfree_skb(beacon);
+ }
+ }
+
+ zd_set_beacon_interval(&mac->chip, beacon_interval,
+ beacon_period, mac->type);
+
+ spin_lock_irq(&mac->lock);
+ mac->beacon.last_update = jiffies;
+ spin_unlock_irq(&mac->lock);
+ }
+
+ return 0;
+}
+
/**
* zd_mac_tx_status - reports tx status of a packet if required
* @hw - a &struct ieee80211_hw pointer
@@ -574,64 +667,120 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
static int zd_mac_config_beacon(struct ieee80211_hw *hw, struct sk_buff *beacon)
{
struct zd_mac *mac = zd_hw_mac(hw);
- int r;
+ int r, ret, num_cmds, req_pos = 0;
u32 tmp, j = 0;
/* 4 more bytes for tail CRC */
u32 full_len = beacon->len + 4;
+ unsigned long end_jiffies, message_jiffies;
+ struct zd_ioreq32 *ioreqs;
- r = zd_iowrite32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, 0);
+ /* Alloc memory for full beacon write at once. */
+ num_cmds = 1 + zd_chip_is_zd1211b(&mac->chip) + full_len;
+ ioreqs = kmalloc(num_cmds * sizeof(struct zd_ioreq32), GFP_KERNEL);
+ if (!ioreqs)
+ return -ENOMEM;
+
+ mutex_lock(&mac->chip.mutex);
+
+ r = zd_iowrite32_locked(&mac->chip, 0, CR_BCN_FIFO_SEMAPHORE);
if (r < 0)
- return r;
- r = zd_ioread32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, &tmp);
+ goto out;
+ r = zd_ioread32_locked(&mac->chip, &tmp, CR_BCN_FIFO_SEMAPHORE);
if (r < 0)
- return r;
+ goto release_sema;
+ end_jiffies = jiffies + HZ / 2; /*~500ms*/
+ message_jiffies = jiffies + HZ / 10; /*~100ms*/
while (tmp & 0x2) {
- r = zd_ioread32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, &tmp);
+ r = zd_ioread32_locked(&mac->chip, &tmp, CR_BCN_FIFO_SEMAPHORE);
if (r < 0)
- return r;
- if ((++j % 100) == 0) {
- printk(KERN_ERR "CR_BCN_FIFO_SEMAPHORE not ready\n");
- if (j >= 500) {
- printk(KERN_ERR "Giving up beacon config.\n");
- return -ETIMEDOUT;
+ goto release_sema;
+ if (time_is_before_eq_jiffies(message_jiffies)) {
+ message_jiffies = jiffies + HZ / 10;
+ dev_err(zd_mac_dev(mac),
+ "CR_BCN_FIFO_SEMAPHORE not ready\n");
+ if (time_is_before_eq_jiffies(end_jiffies)) {
+ dev_err(zd_mac_dev(mac),
+ "Giving up beacon config.\n");
+ r = -ETIMEDOUT;
+ goto reset_device;
}
}
- msleep(1);
+ msleep(20);
}
- r = zd_iowrite32(&mac->chip, CR_BCN_FIFO, full_len - 1);
- if (r < 0)
- return r;
+ ioreqs[req_pos].addr = CR_BCN_FIFO;
+ ioreqs[req_pos].value = full_len - 1;
+ req_pos++;
if (zd_chip_is_zd1211b(&mac->chip)) {
- r = zd_iowrite32(&mac->chip, CR_BCN_LENGTH, full_len - 1);
- if (r < 0)
- return r;
+ ioreqs[req_pos].addr = CR_BCN_LENGTH;
+ ioreqs[req_pos].value = full_len - 1;
+ req_pos++;
}
for (j = 0 ; j < beacon->len; j++) {
- r = zd_iowrite32(&mac->chip, CR_BCN_FIFO,
- *((u8 *)(beacon->data + j)));
- if (r < 0)
- return r;
+ ioreqs[req_pos].addr = CR_BCN_FIFO;
+ ioreqs[req_pos].value = *((u8 *)(beacon->data + j));
+ req_pos++;
}
for (j = 0; j < 4; j++) {
- r = zd_iowrite32(&mac->chip, CR_BCN_FIFO, 0x0);
- if (r < 0)
- return r;
+ ioreqs[req_pos].addr = CR_BCN_FIFO;
+ ioreqs[req_pos].value = 0x0;
+ req_pos++;
}
- r = zd_iowrite32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, 1);
- if (r < 0)
- return r;
+ BUG_ON(req_pos != num_cmds);
+
+ r = zd_iowrite32a_locked(&mac->chip, ioreqs, num_cmds);
+
+release_sema:
+ /*
+ * Try very hard to release device beacon semaphore, as otherwise
+ * device/driver can be left in unusable state.
+ */
+ end_jiffies = jiffies + HZ / 2; /*~500ms*/
+ ret = zd_iowrite32_locked(&mac->chip, 1, CR_BCN_FIFO_SEMAPHORE);
+ while (ret < 0) {
+ if (time_is_before_eq_jiffies(end_jiffies)) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ msleep(20);
+ ret = zd_iowrite32_locked(&mac->chip, 1, CR_BCN_FIFO_SEMAPHORE);
+ }
+
+ if (ret < 0)
+ dev_err(zd_mac_dev(mac), "Could not release "
+ "CR_BCN_FIFO_SEMAPHORE!\n");
+ if (r < 0 || ret < 0) {
+ if (r >= 0)
+ r = ret;
+ goto out;
+ }
/* 802.11b/g 2.4G CCK 1Mb
* 802.11a, not yet implemented, uses different values (see GPL vendor
* driver)
*/
- return zd_iowrite32(&mac->chip, CR_BCN_PLCP_CFG, 0x00000400 |
- (full_len << 19));
+ r = zd_iowrite32_locked(&mac->chip, 0x00000400 | (full_len << 19),
+ CR_BCN_PLCP_CFG);
+out:
+ mutex_unlock(&mac->chip.mutex);
+ kfree(ioreqs);
+ return r;
+
+reset_device:
+ mutex_unlock(&mac->chip.mutex);
+ kfree(ioreqs);
+
+ /* semaphore stuck, reset device to avoid fw freeze later */
+ dev_warn(zd_mac_dev(mac), "CR_BCN_FIFO_SEMAPHORE stuck, "
+ "reseting device...");
+ usb_queue_reset_device(mac->chip.usb.intf);
+
+ return r;
}
static int fill_ctrlset(struct zd_mac *mac,
@@ -701,7 +850,7 @@ static int fill_ctrlset(struct zd_mac *mac,
* control block of the skbuff will be initialized. If necessary the incoming
* mac80211 queues will be stopped.
*/
-static int zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -716,11 +865,10 @@ static int zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
r = zd_usb_tx(&mac->chip.usb, skb);
if (r)
goto fail;
- return 0;
+ return;
fail:
dev_kfree_skb(skb);
- return 0;
}
/**
@@ -779,6 +927,13 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
mac->ack_pending = 1;
mac->ack_signal = stats->signal;
+
+ /* Prevent pending tx-packet on AP-mode */
+ if (mac->type == NL80211_IFTYPE_AP) {
+ skb = __skb_dequeue(q);
+ zd_mac_tx_status(hw, skb, mac->ack_signal, NULL);
+ mac->ack_pending = 0;
+ }
}
spin_unlock_irqrestore(&q->lock, flags);
@@ -882,13 +1037,16 @@ static int zd_op_add_interface(struct ieee80211_hw *hw,
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP:
mac->type = vif->type;
break;
default:
return -EOPNOTSUPP;
}
- return zd_write_mac_addr(&mac->chip, vif->addr);
+ mac->vif = vif;
+
+ return set_mac_and_bssid(mac);
}
static void zd_op_remove_interface(struct ieee80211_hw *hw,
@@ -896,7 +1054,8 @@ static void zd_op_remove_interface(struct ieee80211_hw *hw,
{
struct zd_mac *mac = zd_hw_mac(hw);
mac->type = NL80211_IFTYPE_UNSPECIFIED;
- zd_set_beacon_interval(&mac->chip, 0);
+ mac->vif = NULL;
+ zd_set_beacon_interval(&mac->chip, 0, 0, NL80211_IFTYPE_UNSPECIFIED);
zd_write_mac_addr(&mac->chip, NULL);
}
@@ -905,49 +1064,67 @@ static int zd_op_config(struct ieee80211_hw *hw, u32 changed)
struct zd_mac *mac = zd_hw_mac(hw);
struct ieee80211_conf *conf = &hw->conf;
+ spin_lock_irq(&mac->lock);
+ mac->channel = conf->channel->hw_value;
+ spin_unlock_irq(&mac->lock);
+
return zd_chip_set_channel(&mac->chip, conf->channel->hw_value);
}
-static void zd_process_intr(struct work_struct *work)
+static void zd_beacon_done(struct zd_mac *mac)
{
- u16 int_status;
- struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
+ struct sk_buff *skb, *beacon;
- int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer+4));
- if (int_status & INT_CFG_NEXT_BCN)
- dev_dbg_f_limit(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n");
- else
- dev_dbg_f(zd_mac_dev(mac), "Unsupported interrupt\n");
-
- zd_chip_enable_hwint(&mac->chip);
-}
+ if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
+ return;
+ if (!mac->vif || mac->vif->type != NL80211_IFTYPE_AP)
+ return;
+ /*
+ * Send out buffered broad- and multicast frames.
+ */
+ while (!ieee80211_queue_stopped(mac->hw, 0)) {
+ skb = ieee80211_get_buffered_bc(mac->hw, mac->vif);
+ if (!skb)
+ break;
+ zd_op_tx(mac->hw, skb);
+ }
-static void set_multicast_hash_handler(struct work_struct *work)
-{
- struct zd_mac *mac =
- container_of(work, struct zd_mac, set_multicast_hash_work);
- struct zd_mc_hash hash;
+ /*
+ * Fetch next beacon so that tim_count is updated.
+ */
+ beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+ if (beacon) {
+ zd_mac_config_beacon(mac->hw, beacon);
+ kfree_skb(beacon);
+ }
spin_lock_irq(&mac->lock);
- hash = mac->multicast_hash;
+ mac->beacon.last_update = jiffies;
spin_unlock_irq(&mac->lock);
-
- zd_chip_set_multicast_hash(&mac->chip, &hash);
}
-static void set_rx_filter_handler(struct work_struct *work)
+static void zd_process_intr(struct work_struct *work)
{
- struct zd_mac *mac =
- container_of(work, struct zd_mac, set_rx_filter_work);
- int r;
+ u16 int_status;
+ unsigned long flags;
+ struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
- dev_dbg_f(zd_mac_dev(mac), "\n");
- r = set_rx_filter(mac);
- if (r)
- dev_err(zd_mac_dev(mac), "set_rx_filter_handler error %d\n", r);
+ spin_lock_irqsave(&mac->lock, flags);
+ int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer + 4));
+ spin_unlock_irqrestore(&mac->lock, flags);
+
+ if (int_status & INT_CFG_NEXT_BCN) {
+ /*dev_dbg_f_limit(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n");*/
+ zd_beacon_done(mac);
+ } else {
+ dev_dbg_f(zd_mac_dev(mac), "Unsupported interrupt\n");
+ }
+
+ zd_chip_enable_hwint(&mac->chip);
}
+
static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list)
{
@@ -979,6 +1156,7 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
};
struct zd_mac *mac = zd_hw_mac(hw);
unsigned long flags;
+ int r;
/* Only deal with supported flags */
changed_flags &= SUPPORTED_FIF_FLAGS;
@@ -1000,11 +1178,13 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
mac->multicast_hash = hash;
spin_unlock_irqrestore(&mac->lock, flags);
- /* XXX: these can be called here now, can sleep now! */
- queue_work(zd_workqueue, &mac->set_multicast_hash_work);
+ zd_chip_set_multicast_hash(&mac->chip, &hash);
- if (changed_flags & FIF_CONTROL)
- queue_work(zd_workqueue, &mac->set_rx_filter_work);
+ if (changed_flags & FIF_CONTROL) {
+ r = set_rx_filter(mac);
+ if (r)
+ dev_err(zd_mac_dev(mac), "set_rx_filter error %d\n", r);
+ }
/* no handling required for FIF_OTHER_BSS as we don't currently
* do BSSID filtering */
@@ -1016,20 +1196,9 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
* time. */
}
-static void set_rts_cts_work(struct work_struct *work)
+static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble)
{
- struct zd_mac *mac =
- container_of(work, struct zd_mac, set_rts_cts_work);
- unsigned long flags;
- unsigned int short_preamble;
-
mutex_lock(&mac->chip.mutex);
-
- spin_lock_irqsave(&mac->lock, flags);
- mac->updating_rts_rate = 0;
- short_preamble = mac->short_preamble;
- spin_unlock_irqrestore(&mac->lock, flags);
-
zd_chip_set_rts_cts_rate_locked(&mac->chip, short_preamble);
mutex_unlock(&mac->chip.mutex);
}
@@ -1040,33 +1209,42 @@ static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
u32 changes)
{
struct zd_mac *mac = zd_hw_mac(hw);
- unsigned long flags;
int associated;
dev_dbg_f(zd_mac_dev(mac), "changes: %x\n", changes);
if (mac->type == NL80211_IFTYPE_MESH_POINT ||
- mac->type == NL80211_IFTYPE_ADHOC) {
+ mac->type == NL80211_IFTYPE_ADHOC ||
+ mac->type == NL80211_IFTYPE_AP) {
associated = true;
if (changes & BSS_CHANGED_BEACON) {
struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
if (beacon) {
+ zd_chip_disable_hwint(&mac->chip);
zd_mac_config_beacon(hw, beacon);
+ zd_chip_enable_hwint(&mac->chip);
kfree_skb(beacon);
}
}
if (changes & BSS_CHANGED_BEACON_ENABLED) {
- u32 interval;
+ u16 interval = 0;
+ u8 period = 0;
- if (bss_conf->enable_beacon)
- interval = BCN_MODE_IBSS |
- bss_conf->beacon_int;
- else
- interval = 0;
+ if (bss_conf->enable_beacon) {
+ period = bss_conf->dtim_period;
+ interval = bss_conf->beacon_int;
+ }
- zd_set_beacon_interval(&mac->chip, interval);
+ spin_lock_irq(&mac->lock);
+ mac->beacon.period = period;
+ mac->beacon.interval = interval;
+ mac->beacon.last_update = jiffies;
+ spin_unlock_irq(&mac->lock);
+
+ zd_set_beacon_interval(&mac->chip, interval, period,
+ mac->type);
}
} else
associated = is_valid_ether_addr(bss_conf->bssid);
@@ -1078,15 +1256,11 @@ static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
/* TODO: do hardware bssid filtering */
if (changes & BSS_CHANGED_ERP_PREAMBLE) {
- spin_lock_irqsave(&mac->lock, flags);
+ spin_lock_irq(&mac->lock);
mac->short_preamble = bss_conf->use_short_preamble;
- if (!mac->updating_rts_rate) {
- mac->updating_rts_rate = 1;
- /* FIXME: should disable TX here, until work has
- * completed and RTS_CTS reg is updated */
- queue_work(zd_workqueue, &mac->set_rts_cts_work);
- }
- spin_unlock_irqrestore(&mac->lock, flags);
+ spin_unlock_irq(&mac->lock);
+
+ set_rts_cts(mac, bss_conf->use_short_preamble);
}
}
@@ -1138,12 +1312,14 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band;
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_UNSPEC;
+ IEEE80211_HW_SIGNAL_UNSPEC |
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC);
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP);
hw->max_signal = 100;
hw->queues = 1;
@@ -1160,15 +1336,82 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
zd_chip_init(&mac->chip, hw, intf);
housekeeping_init(mac);
- INIT_WORK(&mac->set_multicast_hash_work, set_multicast_hash_handler);
- INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work);
- INIT_WORK(&mac->set_rx_filter_work, set_rx_filter_handler);
+ beacon_init(mac);
INIT_WORK(&mac->process_intr, zd_process_intr);
SET_IEEE80211_DEV(hw, &intf->dev);
return hw;
}
+#define BEACON_WATCHDOG_DELAY round_jiffies_relative(HZ)
+
+static void beacon_watchdog_handler(struct work_struct *work)
+{
+ struct zd_mac *mac =
+ container_of(work, struct zd_mac, beacon.watchdog_work.work);
+ struct sk_buff *beacon;
+ unsigned long timeout;
+ int interval, period;
+
+ if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
+ goto rearm;
+ if (mac->type != NL80211_IFTYPE_AP || !mac->vif)
+ goto rearm;
+
+ spin_lock_irq(&mac->lock);
+ interval = mac->beacon.interval;
+ period = mac->beacon.period;
+ timeout = mac->beacon.last_update + msecs_to_jiffies(interval) + HZ;
+ spin_unlock_irq(&mac->lock);
+
+ if (interval > 0 && time_is_before_jiffies(timeout)) {
+ dev_dbg_f(zd_mac_dev(mac), "beacon interrupt stalled, "
+ "restarting. "
+ "(interval: %d, dtim: %d)\n",
+ interval, period);
+
+ zd_chip_disable_hwint(&mac->chip);
+
+ beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+ if (beacon) {
+ zd_mac_config_beacon(mac->hw, beacon);
+ kfree_skb(beacon);
+ }
+
+ zd_set_beacon_interval(&mac->chip, interval, period, mac->type);
+
+ zd_chip_enable_hwint(&mac->chip);
+
+ spin_lock_irq(&mac->lock);
+ mac->beacon.last_update = jiffies;
+ spin_unlock_irq(&mac->lock);
+ }
+
+rearm:
+ queue_delayed_work(zd_workqueue, &mac->beacon.watchdog_work,
+ BEACON_WATCHDOG_DELAY);
+}
+
+static void beacon_init(struct zd_mac *mac)
+{
+ INIT_DELAYED_WORK(&mac->beacon.watchdog_work, beacon_watchdog_handler);
+}
+
+static void beacon_enable(struct zd_mac *mac)
+{
+ dev_dbg_f(zd_mac_dev(mac), "\n");
+
+ mac->beacon.last_update = jiffies;
+ queue_delayed_work(zd_workqueue, &mac->beacon.watchdog_work,
+ BEACON_WATCHDOG_DELAY);
+}
+
+static void beacon_disable(struct zd_mac *mac)
+{
+ dev_dbg_f(zd_mac_dev(mac), "\n");
+ cancel_delayed_work_sync(&mac->beacon.watchdog_work);
+}
+
#define LINK_LED_WORK_DELAY HZ
static void link_led_handler(struct work_struct *work)
@@ -1179,6 +1422,9 @@ static void link_led_handler(struct work_struct *work)
int is_associated;
int r;
+ if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
+ goto requeue;
+
spin_lock_irq(&mac->lock);
is_associated = mac->associated;
spin_unlock_irq(&mac->lock);
@@ -1188,6 +1434,7 @@ static void link_led_handler(struct work_struct *work)
if (r)
dev_dbg_f(zd_mac_dev(mac), "zd_chip_control_leds error %d\n", r);
+requeue:
queue_delayed_work(zd_workqueue, &mac->housekeeping.link_led_work,
LINK_LED_WORK_DELAY);
}
@@ -1207,7 +1454,6 @@ static void housekeeping_enable(struct zd_mac *mac)
static void housekeeping_disable(struct zd_mac *mac)
{
dev_dbg_f(zd_mac_dev(mac), "\n");
- cancel_rearming_delayed_workqueue(zd_workqueue,
- &mac->housekeeping.link_led_work);
+ cancel_delayed_work_sync(&mac->housekeeping.link_led_work);
zd_chip_control_leds(&mac->chip, ZD_LED_OFF);
}
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index a6d86b996c79..f8c93c3fe755 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -163,6 +163,17 @@ struct housekeeping {
struct delayed_work link_led_work;
};
+struct beacon {
+ struct delayed_work watchdog_work;
+ unsigned long last_update;
+ u16 interval;
+ u8 period;
+};
+
+enum zd_device_flags {
+ ZD_DEVICE_RUNNING,
+};
+
#define ZD_MAC_STATS_BUFFER_SIZE 16
#define ZD_MAC_MAX_ACK_WAITERS 50
@@ -172,17 +183,19 @@ struct zd_mac {
spinlock_t lock;
spinlock_t intr_lock;
struct ieee80211_hw *hw;
+ struct ieee80211_vif *vif;
struct housekeeping housekeeping;
- struct work_struct set_multicast_hash_work;
+ struct beacon beacon;
struct work_struct set_rts_cts_work;
- struct work_struct set_rx_filter_work;
struct work_struct process_intr;
struct zd_mc_hash multicast_hash;
u8 intr_buffer[USB_MAX_EP_INT_BUFFER];
u8 regdomain;
u8 default_regdomain;
+ u8 channel;
int type;
int associated;
+ unsigned long flags;
struct sk_buff_head ack_wait_queue;
struct ieee80211_channel channels[14];
struct ieee80211_rate rates[12];
@@ -191,9 +204,6 @@ struct zd_mac {
/* Short preamble (used for RTS/CTS) */
unsigned int short_preamble:1;
- /* flags to indicate update in progress */
- unsigned int updating_rts_rate:1;
-
/* whether to pass frames with CRC errors to stack */
unsigned int pass_failed_fcs:1;
@@ -304,6 +314,10 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length);
void zd_mac_tx_failed(struct urb *urb);
void zd_mac_tx_to_dev(struct sk_buff *skb, int error);
+int zd_op_start(struct ieee80211_hw *hw);
+void zd_op_stop(struct ieee80211_hw *hw);
+int zd_restore_settings(struct zd_mac *mac);
+
#ifdef DEBUG
void zd_dump_rx_status(const struct rx_status *status);
#else
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 818e1480ca93..81e80489a052 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -55,6 +55,7 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x129b, 0x1666), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x13b1, 0x001e), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x1435, 0x0711), .driver_info = DEVICE_ZD1211 },
+ { USB_DEVICE(0x14ea, 0xab10), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x14ea, 0xab13), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 },
@@ -92,6 +93,7 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x2019, 0x5303), .driver_info = DEVICE_ZD1211B },
+ { USB_DEVICE(0x2019, 0xed01), .driver_info = DEVICE_ZD1211B },
/* "Driverless" devices that need ejecting */
{ USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
{ USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
@@ -375,8 +377,10 @@ static inline void handle_regs_int(struct urb *urb)
int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2));
if (int_num == CR_INTERRUPT) {
struct zd_mac *mac = zd_hw_mac(zd_usb_to_hw(urb->context));
+ spin_lock(&mac->lock);
memcpy(&mac->intr_buffer, urb->transfer_buffer,
USB_MAX_EP_INT_BUFFER);
+ spin_unlock(&mac->lock);
schedule_work(&mac->process_intr);
} else if (intr->read_regs_enabled) {
intr->read_regs.length = len = urb->actual_length;
@@ -407,8 +411,10 @@ static void int_urb_complete(struct urb *urb)
case -ENOENT:
case -ECONNRESET:
case -EPIPE:
- goto kfree;
+ dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
+ return;
default:
+ dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
goto resubmit;
}
@@ -439,12 +445,11 @@ static void int_urb_complete(struct urb *urb)
resubmit:
r = usb_submit_urb(urb, GFP_ATOMIC);
if (r) {
- dev_dbg_f(urb_dev(urb), "resubmit urb %p\n", urb);
- goto kfree;
+ dev_dbg_f(urb_dev(urb), "error: resubmit urb %p err code %d\n",
+ urb, r);
+ /* TODO: add worker to reset intr->urb */
}
return;
-kfree:
- kfree(urb->transfer_buffer);
}
static inline int int_urb_interval(struct usb_device *udev)
@@ -475,9 +480,8 @@ static inline int usb_int_enabled(struct zd_usb *usb)
int zd_usb_enable_int(struct zd_usb *usb)
{
int r;
- struct usb_device *udev;
+ struct usb_device *udev = zd_usb_to_usbdev(usb);
struct zd_usb_interrupt *intr = &usb->intr;
- void *transfer_buffer = NULL;
struct urb *urb;
dev_dbg_f(zd_usb_dev(usb), "\n");
@@ -498,20 +502,21 @@ int zd_usb_enable_int(struct zd_usb *usb)
intr->urb = urb;
spin_unlock_irq(&intr->lock);
- /* TODO: make it a DMA buffer */
r = -ENOMEM;
- transfer_buffer = kmalloc(USB_MAX_EP_INT_BUFFER, GFP_KERNEL);
- if (!transfer_buffer) {
+ intr->buffer = usb_alloc_coherent(udev, USB_MAX_EP_INT_BUFFER,
+ GFP_KERNEL, &intr->buffer_dma);
+ if (!intr->buffer) {
dev_dbg_f(zd_usb_dev(usb),
"couldn't allocate transfer_buffer\n");
goto error_set_urb_null;
}
- udev = zd_usb_to_usbdev(usb);
usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, EP_INT_IN),
- transfer_buffer, USB_MAX_EP_INT_BUFFER,
+ intr->buffer, USB_MAX_EP_INT_BUFFER,
int_urb_complete, usb,
intr->interval);
+ urb->transfer_dma = intr->buffer_dma;
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
dev_dbg_f(zd_usb_dev(usb), "submit urb %p\n", intr->urb);
r = usb_submit_urb(urb, GFP_KERNEL);
@@ -523,7 +528,8 @@ int zd_usb_enable_int(struct zd_usb *usb)
return 0;
error:
- kfree(transfer_buffer);
+ usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER,
+ intr->buffer, intr->buffer_dma);
error_set_urb_null:
spin_lock_irq(&intr->lock);
intr->urb = NULL;
@@ -537,8 +543,11 @@ out:
void zd_usb_disable_int(struct zd_usb *usb)
{
unsigned long flags;
+ struct usb_device *udev = zd_usb_to_usbdev(usb);
struct zd_usb_interrupt *intr = &usb->intr;
struct urb *urb;
+ void *buffer;
+ dma_addr_t buffer_dma;
spin_lock_irqsave(&intr->lock, flags);
urb = intr->urb;
@@ -547,11 +556,18 @@ void zd_usb_disable_int(struct zd_usb *usb)
return;
}
intr->urb = NULL;
+ buffer = intr->buffer;
+ buffer_dma = intr->buffer_dma;
+ intr->buffer = NULL;
spin_unlock_irqrestore(&intr->lock, flags);
usb_kill_urb(urb);
dev_dbg_f(zd_usb_dev(usb), "urb %p killed\n", urb);
usb_free_urb(urb);
+
+ if (buffer)
+ usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER,
+ buffer, buffer_dma);
}
static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
@@ -599,6 +615,7 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
static void rx_urb_complete(struct urb *urb)
{
+ int r;
struct zd_usb *usb;
struct zd_usb_rx *rx;
const u8 *buffer;
@@ -613,6 +630,7 @@ static void rx_urb_complete(struct urb *urb)
case -ENOENT:
case -ECONNRESET:
case -EPIPE:
+ dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
return;
default:
dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
@@ -624,6 +642,8 @@ static void rx_urb_complete(struct urb *urb)
usb = urb->context;
rx = &usb->rx;
+ zd_usb_reset_rx_idle_timer(usb);
+
if (length%rx->usb_packet_size > rx->usb_packet_size-4) {
/* If there is an old first fragment, we don't care. */
dev_dbg_f(urb_dev(urb), "*** first fragment ***\n");
@@ -652,7 +672,9 @@ static void rx_urb_complete(struct urb *urb)
}
resubmit:
- usb_submit_urb(urb, GFP_ATOMIC);
+ r = usb_submit_urb(urb, GFP_ATOMIC);
+ if (r)
+ dev_dbg_f(urb_dev(urb), "urb %p resubmit error %d\n", urb, r);
}
static struct urb *alloc_rx_urb(struct zd_usb *usb)
@@ -688,7 +710,7 @@ static void free_rx_urb(struct urb *urb)
usb_free_urb(urb);
}
-int zd_usb_enable_rx(struct zd_usb *usb)
+static int __zd_usb_enable_rx(struct zd_usb *usb)
{
int i, r;
struct zd_usb_rx *rx = &usb->rx;
@@ -740,7 +762,21 @@ error:
return r;
}
-void zd_usb_disable_rx(struct zd_usb *usb)
+int zd_usb_enable_rx(struct zd_usb *usb)
+{
+ int r;
+ struct zd_usb_rx *rx = &usb->rx;
+
+ mutex_lock(&rx->setup_mutex);
+ r = __zd_usb_enable_rx(usb);
+ mutex_unlock(&rx->setup_mutex);
+
+ zd_usb_reset_rx_idle_timer(usb);
+
+ return r;
+}
+
+static void __zd_usb_disable_rx(struct zd_usb *usb)
{
int i;
unsigned long flags;
@@ -767,6 +803,40 @@ void zd_usb_disable_rx(struct zd_usb *usb)
spin_unlock_irqrestore(&rx->lock, flags);
}
+void zd_usb_disable_rx(struct zd_usb *usb)
+{
+ struct zd_usb_rx *rx = &usb->rx;
+
+ mutex_lock(&rx->setup_mutex);
+ __zd_usb_disable_rx(usb);
+ mutex_unlock(&rx->setup_mutex);
+
+ cancel_delayed_work_sync(&rx->idle_work);
+}
+
+static void zd_usb_reset_rx(struct zd_usb *usb)
+{
+ bool do_reset;
+ struct zd_usb_rx *rx = &usb->rx;
+ unsigned long flags;
+
+ mutex_lock(&rx->setup_mutex);
+
+ spin_lock_irqsave(&rx->lock, flags);
+ do_reset = rx->urbs != NULL;
+ spin_unlock_irqrestore(&rx->lock, flags);
+
+ if (do_reset) {
+ __zd_usb_disable_rx(usb);
+ __zd_usb_enable_rx(usb);
+ }
+
+ mutex_unlock(&rx->setup_mutex);
+
+ if (do_reset)
+ zd_usb_reset_rx_idle_timer(usb);
+}
+
/**
* zd_usb_disable_tx - disable transmission
* @usb: the zd1211rw-private USB structure
@@ -777,19 +847,21 @@ void zd_usb_disable_tx(struct zd_usb *usb)
{
struct zd_usb_tx *tx = &usb->tx;
unsigned long flags;
- struct list_head *pos, *n;
+
+ atomic_set(&tx->enabled, 0);
+
+ /* kill all submitted tx-urbs */
+ usb_kill_anchored_urbs(&tx->submitted);
spin_lock_irqsave(&tx->lock, flags);
- list_for_each_safe(pos, n, &tx->free_urb_list) {
- list_del(pos);
- usb_free_urb(list_entry(pos, struct urb, urb_list));
- }
- tx->enabled = 0;
+ WARN_ON(!skb_queue_empty(&tx->submitted_skbs));
+ WARN_ON(tx->submitted_urbs != 0);
tx->submitted_urbs = 0;
+ spin_unlock_irqrestore(&tx->lock, flags);
+
/* The stopped state is ignored, relying on ieee80211_wake_queues()
* in a potentionally following zd_usb_enable_tx().
*/
- spin_unlock_irqrestore(&tx->lock, flags);
}
/**
@@ -805,63 +877,13 @@ void zd_usb_enable_tx(struct zd_usb *usb)
struct zd_usb_tx *tx = &usb->tx;
spin_lock_irqsave(&tx->lock, flags);
- tx->enabled = 1;
+ atomic_set(&tx->enabled, 1);
tx->submitted_urbs = 0;
ieee80211_wake_queues(zd_usb_to_hw(usb));
tx->stopped = 0;
spin_unlock_irqrestore(&tx->lock, flags);
}
-/**
- * alloc_tx_urb - provides an tx URB
- * @usb: a &struct zd_usb pointer
- *
- * Allocates a new URB. If possible takes the urb from the free list in
- * usb->tx.
- */
-static struct urb *alloc_tx_urb(struct zd_usb *usb)
-{
- struct zd_usb_tx *tx = &usb->tx;
- unsigned long flags;
- struct list_head *entry;
- struct urb *urb;
-
- spin_lock_irqsave(&tx->lock, flags);
- if (list_empty(&tx->free_urb_list)) {
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- goto out;
- }
- entry = tx->free_urb_list.next;
- list_del(entry);
- urb = list_entry(entry, struct urb, urb_list);
-out:
- spin_unlock_irqrestore(&tx->lock, flags);
- return urb;
-}
-
-/**
- * free_tx_urb - frees a used tx URB
- * @usb: a &struct zd_usb pointer
- * @urb: URB to be freed
- *
- * Frees the transmission URB, which means to put it on the free URB
- * list.
- */
-static void free_tx_urb(struct zd_usb *usb, struct urb *urb)
-{
- struct zd_usb_tx *tx = &usb->tx;
- unsigned long flags;
-
- spin_lock_irqsave(&tx->lock, flags);
- if (!tx->enabled) {
- usb_free_urb(urb);
- goto out;
- }
- list_add(&urb->urb_list, &tx->free_urb_list);
-out:
- spin_unlock_irqrestore(&tx->lock, flags);
-}
-
static void tx_dec_submitted_urbs(struct zd_usb *usb)
{
struct zd_usb_tx *tx = &usb->tx;
@@ -903,6 +925,16 @@ static void tx_urb_complete(struct urb *urb)
struct sk_buff *skb;
struct ieee80211_tx_info *info;
struct zd_usb *usb;
+ struct zd_usb_tx *tx;
+
+ skb = (struct sk_buff *)urb->context;
+ info = IEEE80211_SKB_CB(skb);
+ /*
+ * grab 'usb' pointer before handing off the skb (since
+ * it might be freed by zd_mac_tx_to_dev or mac80211)
+ */
+ usb = &zd_hw_mac(info->rate_driver_data[0])->chip.usb;
+ tx = &usb->tx;
switch (urb->status) {
case 0:
@@ -920,20 +952,16 @@ static void tx_urb_complete(struct urb *urb)
goto resubmit;
}
free_urb:
- skb = (struct sk_buff *)urb->context;
- /*
- * grab 'usb' pointer before handing off the skb (since
- * it might be freed by zd_mac_tx_to_dev or mac80211)
- */
- info = IEEE80211_SKB_CB(skb);
- usb = &zd_hw_mac(info->rate_driver_data[0])->chip.usb;
+ skb_unlink(skb, &usb->tx.submitted_skbs);
zd_mac_tx_to_dev(skb, urb->status);
- free_tx_urb(usb, urb);
+ usb_free_urb(urb);
tx_dec_submitted_urbs(usb);
return;
resubmit:
+ usb_anchor_urb(urb, &tx->submitted);
r = usb_submit_urb(urb, GFP_ATOMIC);
if (r) {
+ usb_unanchor_urb(urb);
dev_dbg_f(urb_dev(urb), "error resubmit urb %p %d\n", urb, r);
goto free_urb;
}
@@ -954,10 +982,17 @@ resubmit:
int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb)
{
int r;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct usb_device *udev = zd_usb_to_usbdev(usb);
struct urb *urb;
+ struct zd_usb_tx *tx = &usb->tx;
+
+ if (!atomic_read(&tx->enabled)) {
+ r = -ENOENT;
+ goto out;
+ }
- urb = alloc_tx_urb(usb);
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
r = -ENOMEM;
goto out;
@@ -966,17 +1001,118 @@ int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb)
usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT),
skb->data, skb->len, tx_urb_complete, skb);
+ info->rate_driver_data[1] = (void *)jiffies;
+ skb_queue_tail(&tx->submitted_skbs, skb);
+ usb_anchor_urb(urb, &tx->submitted);
+
r = usb_submit_urb(urb, GFP_ATOMIC);
- if (r)
+ if (r) {
+ dev_dbg_f(zd_usb_dev(usb), "error submit urb %p %d\n", urb, r);
+ usb_unanchor_urb(urb);
+ skb_unlink(skb, &tx->submitted_skbs);
goto error;
+ }
tx_inc_submitted_urbs(usb);
return 0;
error:
- free_tx_urb(usb, urb);
+ usb_free_urb(urb);
out:
return r;
}
+static bool zd_tx_timeout(struct zd_usb *usb)
+{
+ struct zd_usb_tx *tx = &usb->tx;
+ struct sk_buff_head *q = &tx->submitted_skbs;
+ struct sk_buff *skb, *skbnext;
+ struct ieee80211_tx_info *info;
+ unsigned long flags, trans_start;
+ bool have_timedout = false;
+
+ spin_lock_irqsave(&q->lock, flags);
+ skb_queue_walk_safe(q, skb, skbnext) {
+ info = IEEE80211_SKB_CB(skb);
+ trans_start = (unsigned long)info->rate_driver_data[1];
+
+ if (time_is_before_jiffies(trans_start + ZD_TX_TIMEOUT)) {
+ have_timedout = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return have_timedout;
+}
+
+static void zd_tx_watchdog_handler(struct work_struct *work)
+{
+ struct zd_usb *usb =
+ container_of(work, struct zd_usb, tx.watchdog_work.work);
+ struct zd_usb_tx *tx = &usb->tx;
+
+ if (!atomic_read(&tx->enabled) || !tx->watchdog_enabled)
+ goto out;
+ if (!zd_tx_timeout(usb))
+ goto out;
+
+ /* TX halted, try reset */
+ dev_warn(zd_usb_dev(usb), "TX-stall detected, reseting device...");
+
+ usb_queue_reset_device(usb->intf);
+
+ /* reset will stop this worker, don't rearm */
+ return;
+out:
+ queue_delayed_work(zd_workqueue, &tx->watchdog_work,
+ ZD_TX_WATCHDOG_INTERVAL);
+}
+
+void zd_tx_watchdog_enable(struct zd_usb *usb)
+{
+ struct zd_usb_tx *tx = &usb->tx;
+
+ if (!tx->watchdog_enabled) {
+ dev_dbg_f(zd_usb_dev(usb), "\n");
+ queue_delayed_work(zd_workqueue, &tx->watchdog_work,
+ ZD_TX_WATCHDOG_INTERVAL);
+ tx->watchdog_enabled = 1;
+ }
+}
+
+void zd_tx_watchdog_disable(struct zd_usb *usb)
+{
+ struct zd_usb_tx *tx = &usb->tx;
+
+ if (tx->watchdog_enabled) {
+ dev_dbg_f(zd_usb_dev(usb), "\n");
+ tx->watchdog_enabled = 0;
+ cancel_delayed_work_sync(&tx->watchdog_work);
+ }
+}
+
+static void zd_rx_idle_timer_handler(struct work_struct *work)
+{
+ struct zd_usb *usb =
+ container_of(work, struct zd_usb, rx.idle_work.work);
+ struct zd_mac *mac = zd_usb_to_mac(usb);
+
+ if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
+ return;
+
+ dev_dbg_f(zd_usb_dev(usb), "\n");
+
+ /* 30 seconds since last rx, reset rx */
+ zd_usb_reset_rx(usb);
+}
+
+void zd_usb_reset_rx_idle_timer(struct zd_usb *usb)
+{
+ struct zd_usb_rx *rx = &usb->rx;
+
+ cancel_delayed_work(&rx->idle_work);
+ queue_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL);
+}
+
static inline void init_usb_interrupt(struct zd_usb *usb)
{
struct zd_usb_interrupt *intr = &usb->intr;
@@ -991,22 +1127,27 @@ static inline void init_usb_rx(struct zd_usb *usb)
{
struct zd_usb_rx *rx = &usb->rx;
spin_lock_init(&rx->lock);
+ mutex_init(&rx->setup_mutex);
if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) {
rx->usb_packet_size = 512;
} else {
rx->usb_packet_size = 64;
}
ZD_ASSERT(rx->fragment_length == 0);
+ INIT_DELAYED_WORK(&rx->idle_work, zd_rx_idle_timer_handler);
}
static inline void init_usb_tx(struct zd_usb *usb)
{
struct zd_usb_tx *tx = &usb->tx;
spin_lock_init(&tx->lock);
- tx->enabled = 0;
+ atomic_set(&tx->enabled, 0);
tx->stopped = 0;
- INIT_LIST_HEAD(&tx->free_urb_list);
+ skb_queue_head_init(&tx->submitted_skbs);
+ init_usb_anchor(&tx->submitted);
tx->submitted_urbs = 0;
+ tx->watchdog_enabled = 0;
+ INIT_DELAYED_WORK(&tx->watchdog_work, zd_tx_watchdog_handler);
}
void zd_usb_init(struct zd_usb *usb, struct ieee80211_hw *hw,
@@ -1015,6 +1156,7 @@ void zd_usb_init(struct zd_usb *usb, struct ieee80211_hw *hw,
memset(usb, 0, sizeof(*usb));
usb->intf = usb_get_intf(intf);
usb_set_intfdata(usb->intf, hw);
+ init_usb_anchor(&usb->submitted_cmds);
init_usb_interrupt(usb);
init_usb_tx(usb);
init_usb_rx(usb);
@@ -1238,6 +1380,7 @@ static void disconnect(struct usb_interface *intf)
ieee80211_unregister_hw(hw);
/* Just in case something has gone wrong! */
+ zd_usb_disable_tx(usb);
zd_usb_disable_rx(usb);
zd_usb_disable_int(usb);
@@ -1253,11 +1396,92 @@ static void disconnect(struct usb_interface *intf)
dev_dbg(&intf->dev, "disconnected\n");
}
+static void zd_usb_resume(struct zd_usb *usb)
+{
+ struct zd_mac *mac = zd_usb_to_mac(usb);
+ int r;
+
+ dev_dbg_f(zd_usb_dev(usb), "\n");
+
+ r = zd_op_start(zd_usb_to_hw(usb));
+ if (r < 0) {
+ dev_warn(zd_usb_dev(usb), "Device resume failed "
+ "with error code %d. Retrying...\n", r);
+ if (usb->was_running)
+ set_bit(ZD_DEVICE_RUNNING, &mac->flags);
+ usb_queue_reset_device(usb->intf);
+ return;
+ }
+
+ if (mac->type != NL80211_IFTYPE_UNSPECIFIED) {
+ r = zd_restore_settings(mac);
+ if (r < 0) {
+ dev_dbg(zd_usb_dev(usb),
+ "failed to restore settings, %d\n", r);
+ return;
+ }
+ }
+}
+
+static void zd_usb_stop(struct zd_usb *usb)
+{
+ dev_dbg_f(zd_usb_dev(usb), "\n");
+
+ zd_op_stop(zd_usb_to_hw(usb));
+
+ zd_usb_disable_tx(usb);
+ zd_usb_disable_rx(usb);
+ zd_usb_disable_int(usb);
+
+ usb->initialized = 0;
+}
+
+static int pre_reset(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw = usb_get_intfdata(intf);
+ struct zd_mac *mac;
+ struct zd_usb *usb;
+
+ if (!hw || intf->condition != USB_INTERFACE_BOUND)
+ return 0;
+
+ mac = zd_hw_mac(hw);
+ usb = &mac->chip.usb;
+
+ usb->was_running = test_bit(ZD_DEVICE_RUNNING, &mac->flags);
+
+ zd_usb_stop(usb);
+
+ mutex_lock(&mac->chip.mutex);
+ return 0;
+}
+
+static int post_reset(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw = usb_get_intfdata(intf);
+ struct zd_mac *mac;
+ struct zd_usb *usb;
+
+ if (!hw || intf->condition != USB_INTERFACE_BOUND)
+ return 0;
+
+ mac = zd_hw_mac(hw);
+ usb = &mac->chip.usb;
+
+ mutex_unlock(&mac->chip.mutex);
+
+ if (usb->was_running)
+ zd_usb_resume(usb);
+ return 0;
+}
+
static struct usb_driver driver = {
.name = KBUILD_MODNAME,
.id_table = usb_ids,
.probe = probe,
.disconnect = disconnect,
+ .pre_reset = pre_reset,
+ .post_reset = post_reset,
};
struct workqueue_struct *zd_workqueue;
@@ -1391,30 +1615,35 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
return -EWOULDBLOCK;
}
if (!usb_int_enabled(usb)) {
- dev_dbg_f(zd_usb_dev(usb),
+ dev_dbg_f(zd_usb_dev(usb),
"error: usb interrupt not enabled\n");
return -EWOULDBLOCK;
}
+ ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+ BUILD_BUG_ON(sizeof(struct usb_req_read_regs) + USB_MAX_IOREAD16_COUNT *
+ sizeof(__le16) > sizeof(usb->req_buf));
+ BUG_ON(sizeof(struct usb_req_read_regs) + count * sizeof(__le16) >
+ sizeof(usb->req_buf));
+
req_len = sizeof(struct usb_req_read_regs) + count * sizeof(__le16);
- req = kmalloc(req_len, GFP_KERNEL);
- if (!req)
- return -ENOMEM;
+ req = (void *)usb->req_buf;
+
req->id = cpu_to_le16(USB_REQ_READ_REGS);
for (i = 0; i < count; i++)
req->addr[i] = cpu_to_le16((u16)addresses[i]);
udev = zd_usb_to_usbdev(usb);
prepare_read_regs_int(usb);
- r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
- req, req_len, &actual_req_len, 1000 /* ms */);
+ r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT),
+ req, req_len, &actual_req_len, 50 /* ms */);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
- "error in usb_bulk_msg(). Error number %d\n", r);
+ "error in usb_interrupt_msg(). Error number %d\n", r);
goto error;
}
if (req_len != actual_req_len) {
- dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()\n"
+ dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()\n"
" req_len %d != actual_req_len %d\n",
req_len, actual_req_len);
r = -EIO;
@@ -1422,7 +1651,7 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
}
timeout = wait_for_completion_timeout(&usb->intr.read_regs.completion,
- msecs_to_jiffies(1000));
+ msecs_to_jiffies(50));
if (!timeout) {
disable_read_regs_int(usb);
dev_dbg_f(zd_usb_dev(usb), "read timed out\n");
@@ -1432,17 +1661,106 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
r = get_results(usb, values, req, count);
error:
- kfree(req);
return r;
}
-int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
- unsigned int count)
+static void iowrite16v_urb_complete(struct urb *urb)
+{
+ struct zd_usb *usb = urb->context;
+
+ if (urb->status && !usb->cmd_error)
+ usb->cmd_error = urb->status;
+}
+
+static int zd_submit_waiting_urb(struct zd_usb *usb, bool last)
+{
+ int r = 0;
+ struct urb *urb = usb->urb_async_waiting;
+
+ if (!urb)
+ return 0;
+
+ usb->urb_async_waiting = NULL;
+
+ if (!last)
+ urb->transfer_flags |= URB_NO_INTERRUPT;
+
+ usb_anchor_urb(urb, &usb->submitted_cmds);
+ r = usb_submit_urb(urb, GFP_KERNEL);
+ if (r) {
+ usb_unanchor_urb(urb);
+ dev_dbg_f(zd_usb_dev(usb),
+ "error in usb_submit_urb(). Error number %d\n", r);
+ goto error;
+ }
+
+ /* fall-through with r == 0 */
+error:
+ usb_free_urb(urb);
+ return r;
+}
+
+void zd_usb_iowrite16v_async_start(struct zd_usb *usb)
+{
+ ZD_ASSERT(usb_anchor_empty(&usb->submitted_cmds));
+ ZD_ASSERT(usb->urb_async_waiting == NULL);
+ ZD_ASSERT(!usb->in_async);
+
+ ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+
+ usb->in_async = 1;
+ usb->cmd_error = 0;
+ usb->urb_async_waiting = NULL;
+}
+
+int zd_usb_iowrite16v_async_end(struct zd_usb *usb, unsigned int timeout)
+{
+ int r;
+
+ ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+ ZD_ASSERT(usb->in_async);
+
+ /* Submit last iowrite16v URB */
+ r = zd_submit_waiting_urb(usb, true);
+ if (r) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "error in zd_submit_waiting_usb(). "
+ "Error number %d\n", r);
+
+ usb_kill_anchored_urbs(&usb->submitted_cmds);
+ goto error;
+ }
+
+ if (timeout)
+ timeout = usb_wait_anchor_empty_timeout(&usb->submitted_cmds,
+ timeout);
+ if (!timeout) {
+ usb_kill_anchored_urbs(&usb->submitted_cmds);
+ if (usb->cmd_error == -ENOENT) {
+ dev_dbg_f(zd_usb_dev(usb), "timed out");
+ r = -ETIMEDOUT;
+ goto error;
+ }
+ }
+
+ r = usb->cmd_error;
+error:
+ usb->in_async = 0;
+ return r;
+}
+
+int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
+ unsigned int count)
{
int r;
struct usb_device *udev;
struct usb_req_write_regs *req = NULL;
- int i, req_len, actual_req_len;
+ int i, req_len;
+ struct urb *urb;
+ struct usb_host_endpoint *ep;
+
+ ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+ ZD_ASSERT(usb->in_async);
if (count == 0)
return 0;
@@ -1458,11 +1776,23 @@ int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
return -EWOULDBLOCK;
}
+ udev = zd_usb_to_usbdev(usb);
+
+ ep = usb_pipe_endpoint(udev, usb_sndintpipe(udev, EP_REGS_OUT));
+ if (!ep)
+ return -ENOENT;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ return -ENOMEM;
+
req_len = sizeof(struct usb_req_write_regs) +
count * sizeof(struct reg_data);
req = kmalloc(req_len, GFP_KERNEL);
- if (!req)
- return -ENOMEM;
+ if (!req) {
+ r = -ENOMEM;
+ goto error;
+ }
req->id = cpu_to_le16(USB_REQ_WRITE_REGS);
for (i = 0; i < count; i++) {
@@ -1471,29 +1801,44 @@ int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
rw->value = cpu_to_le16(ioreqs[i].value);
}
- udev = zd_usb_to_usbdev(usb);
- r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
- req, req_len, &actual_req_len, 1000 /* ms */);
+ usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
+ req, req_len, iowrite16v_urb_complete, usb,
+ ep->desc.bInterval);
+ urb->transfer_flags |= URB_FREE_BUFFER | URB_SHORT_NOT_OK;
+
+ /* Submit previous URB */
+ r = zd_submit_waiting_urb(usb, false);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
- "error in usb_bulk_msg(). Error number %d\n", r);
- goto error;
- }
- if (req_len != actual_req_len) {
- dev_dbg_f(zd_usb_dev(usb),
- "error in usb_bulk_msg()"
- " req_len %d != actual_req_len %d\n",
- req_len, actual_req_len);
- r = -EIO;
+ "error in zd_submit_waiting_usb(). "
+ "Error number %d\n", r);
goto error;
}
- /* FALL-THROUGH with r == 0 */
+ /* Delay submit so that URB_NO_INTERRUPT flag can be set for all URBs
+ * of currect batch except for very last.
+ */
+ usb->urb_async_waiting = urb;
+ return 0;
error:
- kfree(req);
+ usb_free_urb(urb);
return r;
}
+int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
+ unsigned int count)
+{
+ int r;
+
+ zd_usb_iowrite16v_async_start(usb);
+ r = zd_usb_iowrite16v_async(usb, ioreqs, count);
+ if (r) {
+ zd_usb_iowrite16v_async_end(usb, 0);
+ return r;
+ }
+ return zd_usb_iowrite16v_async_end(usb, 50 /* ms */);
+}
+
int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
{
int r;
@@ -1535,14 +1880,19 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
if (r) {
dev_dbg_f(zd_usb_dev(usb),
"error %d: Couldn't read CR203\n", r);
- goto out;
+ return r;
}
bit_value_template &= ~(RF_IF_LE|RF_CLK|RF_DATA);
+ ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+ BUILD_BUG_ON(sizeof(struct usb_req_rfwrite) +
+ USB_MAX_RFWRITE_BIT_COUNT * sizeof(__le16) >
+ sizeof(usb->req_buf));
+ BUG_ON(sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16) >
+ sizeof(usb->req_buf));
+
req_len = sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16);
- req = kmalloc(req_len, GFP_KERNEL);
- if (!req)
- return -ENOMEM;
+ req = (void *)usb->req_buf;
req->id = cpu_to_le16(USB_REQ_WRITE_RF);
/* 1: 3683a, but not used in ZYDAS driver */
@@ -1557,15 +1907,15 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
}
udev = zd_usb_to_usbdev(usb);
- r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
- req, req_len, &actual_req_len, 1000 /* ms */);
+ r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT),
+ req, req_len, &actual_req_len, 50 /* ms */);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
- "error in usb_bulk_msg(). Error number %d\n", r);
+ "error in usb_interrupt_msg(). Error number %d\n", r);
goto out;
}
if (req_len != actual_req_len) {
- dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()"
+ dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()"
" req_len %d != actual_req_len %d\n",
req_len, actual_req_len);
r = -EIO;
@@ -1574,6 +1924,5 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
/* FALL-THROUGH with r == 0 */
out:
- kfree(req);
return r;
}
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 1b1655cb7cb4..b3df2c8116cc 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -32,6 +32,10 @@
#define ZD_USB_TX_HIGH 5
#define ZD_USB_TX_LOW 2
+#define ZD_TX_TIMEOUT (HZ * 5)
+#define ZD_TX_WATCHDOG_INTERVAL round_jiffies_relative(HZ)
+#define ZD_RX_IDLE_INTERVAL round_jiffies_relative(30 * HZ)
+
enum devicetype {
DEVICE_ZD1211 = 0,
DEVICE_ZD1211B = 1,
@@ -162,6 +166,8 @@ struct zd_usb_interrupt {
struct read_regs_int read_regs;
spinlock_t lock;
struct urb *urb;
+ void *buffer;
+ dma_addr_t buffer_dma;
int interval;
u8 read_regs_enabled:1;
};
@@ -175,7 +181,9 @@ static inline struct usb_int_regs *get_read_regs(struct zd_usb_interrupt *intr)
struct zd_usb_rx {
spinlock_t lock;
- u8 fragment[2*USB_MAX_RX_SIZE];
+ struct mutex setup_mutex;
+ struct delayed_work idle_work;
+ u8 fragment[2 * USB_MAX_RX_SIZE];
unsigned int fragment_length;
unsigned int usb_packet_size;
struct urb **urbs;
@@ -184,19 +192,21 @@ struct zd_usb_rx {
/**
* struct zd_usb_tx - structure used for transmitting frames
+ * @enabled: atomic enabled flag, indicates whether tx is enabled
* @lock: lock for transmission
- * @free_urb_list: list of free URBs, contains all the URBs, which can be used
+ * @submitted: anchor for URBs sent to device
* @submitted_urbs: atomic integer that counts the URBs having sent to the
* device, which haven't been completed
- * @enabled: enabled flag, indicates whether tx is enabled
* @stopped: indicates whether higher level tx queues are stopped
*/
struct zd_usb_tx {
+ atomic_t enabled;
spinlock_t lock;
- struct list_head free_urb_list;
+ struct delayed_work watchdog_work;
+ struct sk_buff_head submitted_skbs;
+ struct usb_anchor submitted;
int submitted_urbs;
- int enabled;
- int stopped;
+ u8 stopped:1, watchdog_enabled:1;
};
/* Contains the usb parts. The structure doesn't require a lock because intf
@@ -207,7 +217,11 @@ struct zd_usb {
struct zd_usb_rx rx;
struct zd_usb_tx tx;
struct usb_interface *intf;
- u8 is_zd1211b:1, initialized:1;
+ struct usb_anchor submitted_cmds;
+ struct urb *urb_async_waiting;
+ int cmd_error;
+ u8 req_buf[64]; /* zd_usb_iowrite16v needs 62 bytes */
+ u8 is_zd1211b:1, initialized:1, was_running:1, in_async:1;
};
#define zd_usb_dev(usb) (&usb->intf->dev)
@@ -234,12 +248,17 @@ void zd_usb_clear(struct zd_usb *usb);
int zd_usb_scnprint_id(struct zd_usb *usb, char *buffer, size_t size);
+void zd_tx_watchdog_enable(struct zd_usb *usb);
+void zd_tx_watchdog_disable(struct zd_usb *usb);
+
int zd_usb_enable_int(struct zd_usb *usb);
void zd_usb_disable_int(struct zd_usb *usb);
int zd_usb_enable_rx(struct zd_usb *usb);
void zd_usb_disable_rx(struct zd_usb *usb);
+void zd_usb_reset_rx_idle_timer(struct zd_usb *usb);
+
void zd_usb_enable_tx(struct zd_usb *usb);
void zd_usb_disable_tx(struct zd_usb *usb);
@@ -254,6 +273,10 @@ static inline int zd_usb_ioread16(struct zd_usb *usb, u16 *value,
return zd_usb_ioread16v(usb, value, (const zd_addr_t *)&addr, 1);
}
+void zd_usb_iowrite16v_async_start(struct zd_usb *usb);
+int zd_usb_iowrite16v_async_end(struct zd_usb *usb, unsigned int timeout);
+int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
+ unsigned int count);
int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
unsigned int count);
diff --git a/drivers/net/xen-netback/Makefile b/drivers/net/xen-netback/Makefile
new file mode 100644
index 000000000000..e346e8125ef5
--- /dev/null
+++ b/drivers/net/xen-netback/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_XEN_NETDEV_BACKEND) := xen-netback.o
+
+xen-netback-y := netback.o xenbus.o interface.o
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
new file mode 100644
index 000000000000..5d7bbf2b2ee7
--- /dev/null
+++ b/drivers/net/xen-netback/common.h
@@ -0,0 +1,161 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __XEN_NETBACK__COMMON_H__
+#define __XEN_NETBACK__COMMON_H__
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+
+#include <xen/interface/io/netif.h>
+#include <xen/interface/grant_table.h>
+#include <xen/grant_table.h>
+#include <xen/xenbus.h>
+
+struct xen_netbk;
+
+struct xenvif {
+ /* Unique identifier for this interface. */
+ domid_t domid;
+ unsigned int handle;
+
+ /* Reference to netback processing backend. */
+ struct xen_netbk *netbk;
+
+ u8 fe_dev_addr[6];
+
+ /* Physical parameters of the comms window. */
+ grant_handle_t tx_shmem_handle;
+ grant_ref_t tx_shmem_ref;
+ grant_handle_t rx_shmem_handle;
+ grant_ref_t rx_shmem_ref;
+ unsigned int irq;
+
+ /* List of frontends to notify after a batch of frames sent. */
+ struct list_head notify_list;
+
+ /* The shared rings and indexes. */
+ struct xen_netif_tx_back_ring tx;
+ struct xen_netif_rx_back_ring rx;
+ struct vm_struct *tx_comms_area;
+ struct vm_struct *rx_comms_area;
+
+ /* Flags that must not be set in dev->features */
+ u32 features_disabled;
+
+ /* Frontend feature information. */
+ u8 can_sg:1;
+ u8 gso:1;
+ u8 gso_prefix:1;
+ u8 csum:1;
+
+ /* Internal feature information. */
+ u8 can_queue:1; /* can queue packets for receiver? */
+
+ /*
+ * Allow xenvif_start_xmit() to peek ahead in the rx request
+ * ring. This is a prediction of what rx_req_cons will be
+ * once all queued skbs are put on the ring.
+ */
+ RING_IDX rx_req_cons_peek;
+
+ /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
+ unsigned long credit_bytes;
+ unsigned long credit_usec;
+ unsigned long remaining_credit;
+ struct timer_list credit_timeout;
+
+ /* Statistics */
+ unsigned long rx_gso_checksum_fixup;
+
+ /* Miscellaneous private stuff. */
+ struct list_head schedule_list;
+ atomic_t refcnt;
+ struct net_device *dev;
+
+ wait_queue_head_t waiting_to_free;
+};
+
+#define XEN_NETIF_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE)
+#define XEN_NETIF_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE)
+
+struct xenvif *xenvif_alloc(struct device *parent,
+ domid_t domid,
+ unsigned int handle);
+
+int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
+ unsigned long rx_ring_ref, unsigned int evtchn);
+void xenvif_disconnect(struct xenvif *vif);
+
+void xenvif_get(struct xenvif *vif);
+void xenvif_put(struct xenvif *vif);
+
+int xenvif_xenbus_init(void);
+
+int xenvif_schedulable(struct xenvif *vif);
+
+int xen_netbk_rx_ring_full(struct xenvif *vif);
+
+int xen_netbk_must_stop_queue(struct xenvif *vif);
+
+/* (Un)Map communication rings. */
+void xen_netbk_unmap_frontend_rings(struct xenvif *vif);
+int xen_netbk_map_frontend_rings(struct xenvif *vif,
+ grant_ref_t tx_ring_ref,
+ grant_ref_t rx_ring_ref);
+
+/* (De)Register a xenvif with the netback backend. */
+void xen_netbk_add_xenvif(struct xenvif *vif);
+void xen_netbk_remove_xenvif(struct xenvif *vif);
+
+/* (De)Schedule backend processing for a xenvif */
+void xen_netbk_schedule_xenvif(struct xenvif *vif);
+void xen_netbk_deschedule_xenvif(struct xenvif *vif);
+
+/* Check for SKBs from frontend and schedule backend processing */
+void xen_netbk_check_rx_xenvif(struct xenvif *vif);
+/* Receive an SKB from the frontend */
+void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb);
+
+/* Queue an SKB for transmission to the frontend */
+void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
+/* Notify xenvif that ring now has space to send an skb to the frontend */
+void xenvif_notify_tx_completion(struct xenvif *vif);
+
+/* Returns number of ring slots required to send an skb to the frontend */
+unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
+
+#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
new file mode 100644
index 000000000000..de569cc19da4
--- /dev/null
+++ b/drivers/net/xen-netback/interface.c
@@ -0,0 +1,424 @@
+/*
+ * Network-device interface management.
+ *
+ * Copyright (c) 2004-2005, Keir Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "common.h"
+
+#include <linux/ethtool.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+
+#include <xen/events.h>
+#include <asm/xen/hypercall.h>
+
+#define XENVIF_QUEUE_LENGTH 32
+
+void xenvif_get(struct xenvif *vif)
+{
+ atomic_inc(&vif->refcnt);
+}
+
+void xenvif_put(struct xenvif *vif)
+{
+ if (atomic_dec_and_test(&vif->refcnt))
+ wake_up(&vif->waiting_to_free);
+}
+
+int xenvif_schedulable(struct xenvif *vif)
+{
+ return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
+}
+
+static int xenvif_rx_schedulable(struct xenvif *vif)
+{
+ return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif);
+}
+
+static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
+{
+ struct xenvif *vif = dev_id;
+
+ if (vif->netbk == NULL)
+ return IRQ_NONE;
+
+ xen_netbk_schedule_xenvif(vif);
+
+ if (xenvif_rx_schedulable(vif))
+ netif_wake_queue(vif->dev);
+
+ return IRQ_HANDLED;
+}
+
+static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct xenvif *vif = netdev_priv(dev);
+
+ BUG_ON(skb->dev != dev);
+
+ if (vif->netbk == NULL)
+ goto drop;
+
+ /* Drop the packet if the target domain has no receive buffers. */
+ if (!xenvif_rx_schedulable(vif))
+ goto drop;
+
+ /* Reserve ring slots for the worst-case number of fragments. */
+ vif->rx_req_cons_peek += xen_netbk_count_skb_slots(vif, skb);
+ xenvif_get(vif);
+
+ if (vif->can_queue && xen_netbk_must_stop_queue(vif))
+ netif_stop_queue(dev);
+
+ xen_netbk_queue_tx_skb(vif, skb);
+
+ return NETDEV_TX_OK;
+
+ drop:
+ vif->dev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb)
+{
+ netif_rx_ni(skb);
+}
+
+void xenvif_notify_tx_completion(struct xenvif *vif)
+{
+ if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif))
+ netif_wake_queue(vif->dev);
+}
+
+static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
+{
+ struct xenvif *vif = netdev_priv(dev);
+ return &vif->dev->stats;
+}
+
+static void xenvif_up(struct xenvif *vif)
+{
+ xen_netbk_add_xenvif(vif);
+ enable_irq(vif->irq);
+ xen_netbk_check_rx_xenvif(vif);
+}
+
+static void xenvif_down(struct xenvif *vif)
+{
+ disable_irq(vif->irq);
+ xen_netbk_deschedule_xenvif(vif);
+ xen_netbk_remove_xenvif(vif);
+}
+
+static int xenvif_open(struct net_device *dev)
+{
+ struct xenvif *vif = netdev_priv(dev);
+ if (netif_carrier_ok(dev))
+ xenvif_up(vif);
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int xenvif_close(struct net_device *dev)
+{
+ struct xenvif *vif = netdev_priv(dev);
+ if (netif_carrier_ok(dev))
+ xenvif_down(vif);
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static int xenvif_change_mtu(struct net_device *dev, int mtu)
+{
+ struct xenvif *vif = netdev_priv(dev);
+ int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;
+
+ if (mtu > max)
+ return -EINVAL;
+ dev->mtu = mtu;
+ return 0;
+}
+
+static void xenvif_set_features(struct xenvif *vif)
+{
+ struct net_device *dev = vif->dev;
+ u32 features = dev->features;
+
+ if (vif->can_sg)
+ features |= NETIF_F_SG;
+ if (vif->gso || vif->gso_prefix)
+ features |= NETIF_F_TSO;
+ if (vif->csum)
+ features |= NETIF_F_IP_CSUM;
+
+ features &= ~(vif->features_disabled);
+
+ if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN)
+ dev->mtu = ETH_DATA_LEN;
+
+ dev->features = features;
+}
+
+static int xenvif_set_tx_csum(struct net_device *dev, u32 data)
+{
+ struct xenvif *vif = netdev_priv(dev);
+ if (data) {
+ if (!vif->csum)
+ return -EOPNOTSUPP;
+ vif->features_disabled &= ~NETIF_F_IP_CSUM;
+ } else {
+ vif->features_disabled |= NETIF_F_IP_CSUM;
+ }
+
+ xenvif_set_features(vif);
+ return 0;
+}
+
+static int xenvif_set_sg(struct net_device *dev, u32 data)
+{
+ struct xenvif *vif = netdev_priv(dev);
+ if (data) {
+ if (!vif->can_sg)
+ return -EOPNOTSUPP;
+ vif->features_disabled &= ~NETIF_F_SG;
+ } else {
+ vif->features_disabled |= NETIF_F_SG;
+ }
+
+ xenvif_set_features(vif);
+ return 0;
+}
+
+static int xenvif_set_tso(struct net_device *dev, u32 data)
+{
+ struct xenvif *vif = netdev_priv(dev);
+ if (data) {
+ if (!vif->gso && !vif->gso_prefix)
+ return -EOPNOTSUPP;
+ vif->features_disabled &= ~NETIF_F_TSO;
+ } else {
+ vif->features_disabled |= NETIF_F_TSO;
+ }
+
+ xenvif_set_features(vif);
+ return 0;
+}
+
+static const struct xenvif_stat {
+ char name[ETH_GSTRING_LEN];
+ u16 offset;
+} xenvif_stats[] = {
+ {
+ "rx_gso_checksum_fixup",
+ offsetof(struct xenvif, rx_gso_checksum_fixup)
+ },
+};
+
+static int xenvif_get_sset_count(struct net_device *dev, int string_set)
+{
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(xenvif_stats);
+ default:
+ return -EINVAL;
+ }
+}
+
+static void xenvif_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 * data)
+{
+ void *vif = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
+ data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset);
+}
+
+static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ xenvif_stats[i].name, ETH_GSTRING_LEN);
+ break;
+ }
+}
+
+static struct ethtool_ops xenvif_ethtool_ops = {
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = xenvif_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = xenvif_set_sg,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = xenvif_set_tso,
+ .get_link = ethtool_op_get_link,
+
+ .get_sset_count = xenvif_get_sset_count,
+ .get_ethtool_stats = xenvif_get_ethtool_stats,
+ .get_strings = xenvif_get_strings,
+};
+
+static struct net_device_ops xenvif_netdev_ops = {
+ .ndo_start_xmit = xenvif_start_xmit,
+ .ndo_get_stats = xenvif_get_stats,
+ .ndo_open = xenvif_open,
+ .ndo_stop = xenvif_close,
+ .ndo_change_mtu = xenvif_change_mtu,
+};
+
+struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
+ unsigned int handle)
+{
+ int err;
+ struct net_device *dev;
+ struct xenvif *vif;
+ char name[IFNAMSIZ] = {};
+
+ snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
+ dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup);
+ if (dev == NULL) {
+ pr_warn("Could not allocate netdev\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ SET_NETDEV_DEV(dev, parent);
+
+ vif = netdev_priv(dev);
+ vif->domid = domid;
+ vif->handle = handle;
+ vif->netbk = NULL;
+ vif->can_sg = 1;
+ vif->csum = 1;
+ atomic_set(&vif->refcnt, 1);
+ init_waitqueue_head(&vif->waiting_to_free);
+ vif->dev = dev;
+ INIT_LIST_HEAD(&vif->schedule_list);
+ INIT_LIST_HEAD(&vif->notify_list);
+
+ vif->credit_bytes = vif->remaining_credit = ~0UL;
+ vif->credit_usec = 0UL;
+ init_timer(&vif->credit_timeout);
+ /* Initialize 'expires' now: it's used to track the credit window. */
+ vif->credit_timeout.expires = jiffies;
+
+ dev->netdev_ops = &xenvif_netdev_ops;
+ xenvif_set_features(vif);
+ SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
+
+ dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
+
+ /*
+ * Initialise a dummy MAC address. We choose the numerically
+ * largest non-broadcast address to prevent the address getting
+ * stolen by an Ethernet bridge for STP purposes.
+ * (FE:FF:FF:FF:FF:FF)
+ */
+ memset(dev->dev_addr, 0xFF, ETH_ALEN);
+ dev->dev_addr[0] &= ~0x01;
+
+ netif_carrier_off(dev);
+
+ err = register_netdev(dev);
+ if (err) {
+ netdev_warn(dev, "Could not register device: err=%d\n", err);
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+
+ netdev_dbg(dev, "Successfully created xenvif\n");
+ return vif;
+}
+
+int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
+ unsigned long rx_ring_ref, unsigned int evtchn)
+{
+ int err = -ENOMEM;
+
+ /* Already connected through? */
+ if (vif->irq)
+ return 0;
+
+ xenvif_set_features(vif);
+
+ err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
+ if (err < 0)
+ goto err;
+
+ err = bind_interdomain_evtchn_to_irqhandler(
+ vif->domid, evtchn, xenvif_interrupt, 0,
+ vif->dev->name, vif);
+ if (err < 0)
+ goto err_unmap;
+ vif->irq = err;
+ disable_irq(vif->irq);
+
+ xenvif_get(vif);
+
+ rtnl_lock();
+ netif_carrier_on(vif->dev);
+ if (netif_running(vif->dev))
+ xenvif_up(vif);
+ rtnl_unlock();
+
+ return 0;
+err_unmap:
+ xen_netbk_unmap_frontend_rings(vif);
+err:
+ return err;
+}
+
+void xenvif_disconnect(struct xenvif *vif)
+{
+ struct net_device *dev = vif->dev;
+ if (netif_carrier_ok(dev)) {
+ rtnl_lock();
+ netif_carrier_off(dev); /* discard queued packets */
+ if (netif_running(dev))
+ xenvif_down(vif);
+ rtnl_unlock();
+ xenvif_put(vif);
+ }
+
+ atomic_dec(&vif->refcnt);
+ wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
+
+ del_timer_sync(&vif->credit_timeout);
+
+ if (vif->irq)
+ unbind_from_irqhandler(vif->irq, vif);
+
+ unregister_netdev(vif->dev);
+
+ xen_netbk_unmap_frontend_rings(vif);
+
+ free_netdev(vif->dev);
+}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
new file mode 100644
index 000000000000..0e4851b8a773
--- /dev/null
+++ b/drivers/net/xen-netback/netback.c
@@ -0,0 +1,1745 @@
+/*
+ * Back-end of the driver for virtual network devices. This portion of the
+ * driver exports a 'unified' network-device interface that can be accessed
+ * by any operating system that implements a compatible front end. A
+ * reference front-end implementation can be found in:
+ * drivers/net/xen-netfront.c
+ *
+ * Copyright (c) 2002-2005, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "common.h"
+
+#include <linux/kthread.h>
+#include <linux/if_vlan.h>
+#include <linux/udp.h>
+
+#include <net/tcp.h>
+
+#include <xen/events.h>
+#include <xen/interface/memory.h>
+
+#include <asm/xen/hypercall.h>
+#include <asm/xen/page.h>
+
+struct pending_tx_info {
+ struct xen_netif_tx_request req;
+ struct xenvif *vif;
+};
+typedef unsigned int pending_ring_idx_t;
+
+struct netbk_rx_meta {
+ int id;
+ int size;
+ int gso_size;
+};
+
+#define MAX_PENDING_REQS 256
+
+#define MAX_BUFFER_OFFSET PAGE_SIZE
+
+/* extra field used in struct page */
+union page_ext {
+ struct {
+#if BITS_PER_LONG < 64
+#define IDX_WIDTH 8
+#define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH)
+ unsigned int group:GROUP_WIDTH;
+ unsigned int idx:IDX_WIDTH;
+#else
+ unsigned int group, idx;
+#endif
+ } e;
+ void *mapping;
+};
+
+struct xen_netbk {
+ wait_queue_head_t wq;
+ struct task_struct *task;
+
+ struct sk_buff_head rx_queue;
+ struct sk_buff_head tx_queue;
+
+ struct timer_list net_timer;
+
+ struct page *mmap_pages[MAX_PENDING_REQS];
+
+ pending_ring_idx_t pending_prod;
+ pending_ring_idx_t pending_cons;
+ struct list_head net_schedule_list;
+
+ /* Protect the net_schedule_list in netif. */
+ spinlock_t net_schedule_list_lock;
+
+ atomic_t netfront_count;
+
+ struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
+ struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
+
+ u16 pending_ring[MAX_PENDING_REQS];
+
+ /*
+ * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
+ * head/fragment page uses 2 copy operations because it
+ * straddles two buffers in the frontend.
+ */
+ struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
+ struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
+};
+
+static struct xen_netbk *xen_netbk;
+static int xen_netbk_group_nr;
+
+void xen_netbk_add_xenvif(struct xenvif *vif)
+{
+ int i;
+ int min_netfront_count;
+ int min_group = 0;
+ struct xen_netbk *netbk;
+
+ min_netfront_count = atomic_read(&xen_netbk[0].netfront_count);
+ for (i = 0; i < xen_netbk_group_nr; i++) {
+ int netfront_count = atomic_read(&xen_netbk[i].netfront_count);
+ if (netfront_count < min_netfront_count) {
+ min_group = i;
+ min_netfront_count = netfront_count;
+ }
+ }
+
+ netbk = &xen_netbk[min_group];
+
+ vif->netbk = netbk;
+ atomic_inc(&netbk->netfront_count);
+}
+
+void xen_netbk_remove_xenvif(struct xenvif *vif)
+{
+ struct xen_netbk *netbk = vif->netbk;
+ vif->netbk = NULL;
+ atomic_dec(&netbk->netfront_count);
+}
+
+static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
+static void make_tx_response(struct xenvif *vif,
+ struct xen_netif_tx_request *txp,
+ s8 st);
+static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
+ u16 id,
+ s8 st,
+ u16 offset,
+ u16 size,
+ u16 flags);
+
+static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
+ unsigned int idx)
+{
+ return page_to_pfn(netbk->mmap_pages[idx]);
+}
+
+static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
+ unsigned int idx)
+{
+ return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
+}
+
+/* extra field used in struct page */
+static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk,
+ unsigned int idx)
+{
+ unsigned int group = netbk - xen_netbk;
+ union page_ext ext = { .e = { .group = group + 1, .idx = idx } };
+
+ BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping));
+ pg->mapping = ext.mapping;
+}
+
+static int get_page_ext(struct page *pg,
+ unsigned int *pgroup, unsigned int *pidx)
+{
+ union page_ext ext = { .mapping = pg->mapping };
+ struct xen_netbk *netbk;
+ unsigned int group, idx;
+
+ group = ext.e.group - 1;
+
+ if (group < 0 || group >= xen_netbk_group_nr)
+ return 0;
+
+ netbk = &xen_netbk[group];
+
+ idx = ext.e.idx;
+
+ if ((idx < 0) || (idx >= MAX_PENDING_REQS))
+ return 0;
+
+ if (netbk->mmap_pages[idx] != pg)
+ return 0;
+
+ *pgroup = group;
+ *pidx = idx;
+
+ return 1;
+}
+
+/*
+ * This is the amount of packet we copy rather than map, so that the
+ * guest can't fiddle with the contents of the headers while we do
+ * packet processing on them (netfilter, routing, etc).
+ */
+#define PKT_PROT_LEN (ETH_HLEN + \
+ VLAN_HLEN + \
+ sizeof(struct iphdr) + MAX_IPOPTLEN + \
+ sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
+
+static inline pending_ring_idx_t pending_index(unsigned i)
+{
+ return i & (MAX_PENDING_REQS-1);
+}
+
+static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk)
+{
+ return MAX_PENDING_REQS -
+ netbk->pending_prod + netbk->pending_cons;
+}
+
+static void xen_netbk_kick_thread(struct xen_netbk *netbk)
+{
+ wake_up(&netbk->wq);
+}
+
+static int max_required_rx_slots(struct xenvif *vif)
+{
+ int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
+
+ if (vif->can_sg || vif->gso || vif->gso_prefix)
+ max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
+
+ return max;
+}
+
+int xen_netbk_rx_ring_full(struct xenvif *vif)
+{
+ RING_IDX peek = vif->rx_req_cons_peek;
+ RING_IDX needed = max_required_rx_slots(vif);
+
+ return ((vif->rx.sring->req_prod - peek) < needed) ||
+ ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
+}
+
+int xen_netbk_must_stop_queue(struct xenvif *vif)
+{
+ if (!xen_netbk_rx_ring_full(vif))
+ return 0;
+
+ vif->rx.sring->req_event = vif->rx_req_cons_peek +
+ max_required_rx_slots(vif);
+ mb(); /* request notification /then/ check the queue */
+
+ return xen_netbk_rx_ring_full(vif);
+}
+
+/*
+ * Returns true if we should start a new receive buffer instead of
+ * adding 'size' bytes to a buffer which currently contains 'offset'
+ * bytes.
+ */
+static bool start_new_rx_buffer(int offset, unsigned long size, int head)
+{
+ /* simple case: we have completely filled the current buffer. */
+ if (offset == MAX_BUFFER_OFFSET)
+ return true;
+
+ /*
+ * complex case: start a fresh buffer if the current frag
+ * would overflow the current buffer but only if:
+ * (i) this frag would fit completely in the next buffer
+ * and (ii) there is already some data in the current buffer
+ * and (iii) this is not the head buffer.
+ *
+ * Where:
+ * - (i) stops us splitting a frag into two copies
+ * unless the frag is too large for a single buffer.
+ * - (ii) stops us from leaving a buffer pointlessly empty.
+ * - (iii) stops us leaving the first buffer
+ * empty. Strictly speaking this is already covered
+ * by (ii) but is explicitly checked because
+ * netfront relies on the first buffer being
+ * non-empty and can crash otherwise.
+ *
+ * This means we will effectively linearise small
+ * frags but do not needlessly split large buffers
+ * into multiple copies tend to give large frags their
+ * own buffers as before.
+ */
+ if ((offset + size > MAX_BUFFER_OFFSET) &&
+ (size <= MAX_BUFFER_OFFSET) && offset && !head)
+ return true;
+
+ return false;
+}
+
+/*
+ * Figure out how many ring slots we're going to need to send @skb to
+ * the guest. This function is essentially a dry run of
+ * netbk_gop_frag_copy.
+ */
+unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
+{
+ unsigned int count;
+ int i, copy_off;
+
+ count = DIV_ROUND_UP(
+ offset_in_page(skb->data)+skb_headlen(skb), PAGE_SIZE);
+
+ copy_off = skb_headlen(skb) % PAGE_SIZE;
+
+ if (skb_shinfo(skb)->gso_size)
+ count++;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ unsigned long size = skb_shinfo(skb)->frags[i].size;
+ unsigned long bytes;
+ while (size > 0) {
+ BUG_ON(copy_off > MAX_BUFFER_OFFSET);
+
+ if (start_new_rx_buffer(copy_off, size, 0)) {
+ count++;
+ copy_off = 0;
+ }
+
+ bytes = size;
+ if (copy_off + bytes > MAX_BUFFER_OFFSET)
+ bytes = MAX_BUFFER_OFFSET - copy_off;
+
+ copy_off += bytes;
+ size -= bytes;
+ }
+ }
+ return count;
+}
+
+struct netrx_pending_operations {
+ unsigned copy_prod, copy_cons;
+ unsigned meta_prod, meta_cons;
+ struct gnttab_copy *copy;
+ struct netbk_rx_meta *meta;
+ int copy_off;
+ grant_ref_t copy_gref;
+};
+
+static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
+ struct netrx_pending_operations *npo)
+{
+ struct netbk_rx_meta *meta;
+ struct xen_netif_rx_request *req;
+
+ req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
+
+ meta = npo->meta + npo->meta_prod++;
+ meta->gso_size = 0;
+ meta->size = 0;
+ meta->id = req->id;
+
+ npo->copy_off = 0;
+ npo->copy_gref = req->gref;
+
+ return meta;
+}
+
+/*
+ * Set up the grant operations for this fragment. If it's a flipping
+ * interface, we also set up the unmap request from here.
+ */
+static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
+ struct netrx_pending_operations *npo,
+ struct page *page, unsigned long size,
+ unsigned long offset, int *head)
+{
+ struct gnttab_copy *copy_gop;
+ struct netbk_rx_meta *meta;
+ /*
+ * These variables a used iff get_page_ext returns true,
+ * in which case they are guaranteed to be initialized.
+ */
+ unsigned int uninitialized_var(group), uninitialized_var(idx);
+ int foreign = get_page_ext(page, &group, &idx);
+ unsigned long bytes;
+
+ /* Data must not cross a page boundary. */
+ BUG_ON(size + offset > PAGE_SIZE);
+
+ meta = npo->meta + npo->meta_prod - 1;
+
+ while (size > 0) {
+ BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
+
+ if (start_new_rx_buffer(npo->copy_off, size, *head)) {
+ /*
+ * Netfront requires there to be some data in the head
+ * buffer.
+ */
+ BUG_ON(*head);
+
+ meta = get_next_rx_buffer(vif, npo);
+ }
+
+ bytes = size;
+ if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
+ bytes = MAX_BUFFER_OFFSET - npo->copy_off;
+
+ copy_gop = npo->copy + npo->copy_prod++;
+ copy_gop->flags = GNTCOPY_dest_gref;
+ if (foreign) {
+ struct xen_netbk *netbk = &xen_netbk[group];
+ struct pending_tx_info *src_pend;
+
+ src_pend = &netbk->pending_tx_info[idx];
+
+ copy_gop->source.domid = src_pend->vif->domid;
+ copy_gop->source.u.ref = src_pend->req.gref;
+ copy_gop->flags |= GNTCOPY_source_gref;
+ } else {
+ void *vaddr = page_address(page);
+ copy_gop->source.domid = DOMID_SELF;
+ copy_gop->source.u.gmfn = virt_to_mfn(vaddr);
+ }
+ copy_gop->source.offset = offset;
+ copy_gop->dest.domid = vif->domid;
+
+ copy_gop->dest.offset = npo->copy_off;
+ copy_gop->dest.u.ref = npo->copy_gref;
+ copy_gop->len = bytes;
+
+ npo->copy_off += bytes;
+ meta->size += bytes;
+
+ offset += bytes;
+ size -= bytes;
+
+ /* Leave a gap for the GSO descriptor. */
+ if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
+ vif->rx.req_cons++;
+
+ *head = 0; /* There must be something in this buffer now. */
+
+ }
+}
+
+/*
+ * Prepare an SKB to be transmitted to the frontend.
+ *
+ * This function is responsible for allocating grant operations, meta
+ * structures, etc.
+ *
+ * It returns the number of meta structures consumed. The number of
+ * ring slots used is always equal to the number of meta slots used
+ * plus the number of GSO descriptors used. Currently, we use either
+ * zero GSO descriptors (for non-GSO packets) or one descriptor (for
+ * frontend-side LRO).
+ */
+static int netbk_gop_skb(struct sk_buff *skb,
+ struct netrx_pending_operations *npo)
+{
+ struct xenvif *vif = netdev_priv(skb->dev);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int i;
+ struct xen_netif_rx_request *req;
+ struct netbk_rx_meta *meta;
+ unsigned char *data;
+ int head = 1;
+ int old_meta_prod;
+
+ old_meta_prod = npo->meta_prod;
+
+ /* Set up a GSO prefix descriptor, if necessary */
+ if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
+ req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
+ meta = npo->meta + npo->meta_prod++;
+ meta->gso_size = skb_shinfo(skb)->gso_size;
+ meta->size = 0;
+ meta->id = req->id;
+ }
+
+ req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
+ meta = npo->meta + npo->meta_prod++;
+
+ if (!vif->gso_prefix)
+ meta->gso_size = skb_shinfo(skb)->gso_size;
+ else
+ meta->gso_size = 0;
+
+ meta->size = 0;
+ meta->id = req->id;
+ npo->copy_off = 0;
+ npo->copy_gref = req->gref;
+
+ data = skb->data;
+ while (data < skb_tail_pointer(skb)) {
+ unsigned int offset = offset_in_page(data);
+ unsigned int len = PAGE_SIZE - offset;
+
+ if (data + len > skb_tail_pointer(skb))
+ len = skb_tail_pointer(skb) - data;
+
+ netbk_gop_frag_copy(vif, skb, npo,
+ virt_to_page(data), len, offset, &head);
+ data += len;
+ }
+
+ for (i = 0; i < nr_frags; i++) {
+ netbk_gop_frag_copy(vif, skb, npo,
+ skb_shinfo(skb)->frags[i].page,
+ skb_shinfo(skb)->frags[i].size,
+ skb_shinfo(skb)->frags[i].page_offset,
+ &head);
+ }
+
+ return npo->meta_prod - old_meta_prod;
+}
+
+/*
+ * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
+ * used to set up the operations on the top of
+ * netrx_pending_operations, which have since been done. Check that
+ * they didn't give any errors and advance over them.
+ */
+static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
+ struct netrx_pending_operations *npo)
+{
+ struct gnttab_copy *copy_op;
+ int status = XEN_NETIF_RSP_OKAY;
+ int i;
+
+ for (i = 0; i < nr_meta_slots; i++) {
+ copy_op = npo->copy + npo->copy_cons++;
+ if (copy_op->status != GNTST_okay) {
+ netdev_dbg(vif->dev,
+ "Bad status %d from copy to DOM%d.\n",
+ copy_op->status, vif->domid);
+ status = XEN_NETIF_RSP_ERROR;
+ }
+ }
+
+ return status;
+}
+
+static void netbk_add_frag_responses(struct xenvif *vif, int status,
+ struct netbk_rx_meta *meta,
+ int nr_meta_slots)
+{
+ int i;
+ unsigned long offset;
+
+ /* No fragments used */
+ if (nr_meta_slots <= 1)
+ return;
+
+ nr_meta_slots--;
+
+ for (i = 0; i < nr_meta_slots; i++) {
+ int flags;
+ if (i == nr_meta_slots - 1)
+ flags = 0;
+ else
+ flags = XEN_NETRXF_more_data;
+
+ offset = 0;
+ make_rx_response(vif, meta[i].id, status, offset,
+ meta[i].size, flags);
+ }
+}
+
+struct skb_cb_overlay {
+ int meta_slots_used;
+};
+
+static void xen_netbk_rx_action(struct xen_netbk *netbk)
+{
+ struct xenvif *vif = NULL, *tmp;
+ s8 status;
+ u16 irq, flags;
+ struct xen_netif_rx_response *resp;
+ struct sk_buff_head rxq;
+ struct sk_buff *skb;
+ LIST_HEAD(notify);
+ int ret;
+ int nr_frags;
+ int count;
+ unsigned long offset;
+ struct skb_cb_overlay *sco;
+
+ struct netrx_pending_operations npo = {
+ .copy = netbk->grant_copy_op,
+ .meta = netbk->meta,
+ };
+
+ skb_queue_head_init(&rxq);
+
+ count = 0;
+
+ while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
+ vif = netdev_priv(skb->dev);
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ sco = (struct skb_cb_overlay *)skb->cb;
+ sco->meta_slots_used = netbk_gop_skb(skb, &npo);
+
+ count += nr_frags + 1;
+
+ __skb_queue_tail(&rxq, skb);
+
+ /* Filled the batch queue? */
+ if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
+ break;
+ }
+
+ BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
+
+ if (!npo.copy_prod)
+ return;
+
+ BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, &netbk->grant_copy_op,
+ npo.copy_prod);
+ BUG_ON(ret != 0);
+
+ while ((skb = __skb_dequeue(&rxq)) != NULL) {
+ sco = (struct skb_cb_overlay *)skb->cb;
+
+ vif = netdev_priv(skb->dev);
+
+ if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
+ resp = RING_GET_RESPONSE(&vif->rx,
+ vif->rx.rsp_prod_pvt++);
+
+ resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
+
+ resp->offset = netbk->meta[npo.meta_cons].gso_size;
+ resp->id = netbk->meta[npo.meta_cons].id;
+ resp->status = sco->meta_slots_used;
+
+ npo.meta_cons++;
+ sco->meta_slots_used--;
+ }
+
+
+ vif->dev->stats.tx_bytes += skb->len;
+ vif->dev->stats.tx_packets++;
+
+ status = netbk_check_gop(vif, sco->meta_slots_used, &npo);
+
+ if (sco->meta_slots_used == 1)
+ flags = 0;
+ else
+ flags = XEN_NETRXF_more_data;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
+ flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
+ else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ /* remote but checksummed. */
+ flags |= XEN_NETRXF_data_validated;
+
+ offset = 0;
+ resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id,
+ status, offset,
+ netbk->meta[npo.meta_cons].size,
+ flags);
+
+ if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
+ struct xen_netif_extra_info *gso =
+ (struct xen_netif_extra_info *)
+ RING_GET_RESPONSE(&vif->rx,
+ vif->rx.rsp_prod_pvt++);
+
+ resp->flags |= XEN_NETRXF_extra_info;
+
+ gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size;
+ gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
+ gso->u.gso.pad = 0;
+ gso->u.gso.features = 0;
+
+ gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
+ gso->flags = 0;
+ }
+
+ netbk_add_frag_responses(vif, status,
+ netbk->meta + npo.meta_cons + 1,
+ sco->meta_slots_used);
+
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
+ irq = vif->irq;
+ if (ret && list_empty(&vif->notify_list))
+ list_add_tail(&vif->notify_list, &notify);
+
+ xenvif_notify_tx_completion(vif);
+
+ xenvif_put(vif);
+ npo.meta_cons += sco->meta_slots_used;
+ dev_kfree_skb(skb);
+ }
+
+ list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
+ notify_remote_via_irq(vif->irq);
+ list_del_init(&vif->notify_list);
+ }
+
+ /* More work to do? */
+ if (!skb_queue_empty(&netbk->rx_queue) &&
+ !timer_pending(&netbk->net_timer))
+ xen_netbk_kick_thread(netbk);
+}
+
+void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
+{
+ struct xen_netbk *netbk = vif->netbk;
+
+ skb_queue_tail(&netbk->rx_queue, skb);
+
+ xen_netbk_kick_thread(netbk);
+}
+
+static void xen_netbk_alarm(unsigned long data)
+{
+ struct xen_netbk *netbk = (struct xen_netbk *)data;
+ xen_netbk_kick_thread(netbk);
+}
+
+static int __on_net_schedule_list(struct xenvif *vif)
+{
+ return !list_empty(&vif->schedule_list);
+}
+
+/* Must be called with net_schedule_list_lock held */
+static void remove_from_net_schedule_list(struct xenvif *vif)
+{
+ if (likely(__on_net_schedule_list(vif))) {
+ list_del_init(&vif->schedule_list);
+ xenvif_put(vif);
+ }
+}
+
+static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk)
+{
+ struct xenvif *vif = NULL;
+
+ spin_lock_irq(&netbk->net_schedule_list_lock);
+ if (list_empty(&netbk->net_schedule_list))
+ goto out;
+
+ vif = list_first_entry(&netbk->net_schedule_list,
+ struct xenvif, schedule_list);
+ if (!vif)
+ goto out;
+
+ xenvif_get(vif);
+
+ remove_from_net_schedule_list(vif);
+out:
+ spin_unlock_irq(&netbk->net_schedule_list_lock);
+ return vif;
+}
+
+void xen_netbk_schedule_xenvif(struct xenvif *vif)
+{
+ unsigned long flags;
+ struct xen_netbk *netbk = vif->netbk;
+
+ if (__on_net_schedule_list(vif))
+ goto kick;
+
+ spin_lock_irqsave(&netbk->net_schedule_list_lock, flags);
+ if (!__on_net_schedule_list(vif) &&
+ likely(xenvif_schedulable(vif))) {
+ list_add_tail(&vif->schedule_list, &netbk->net_schedule_list);
+ xenvif_get(vif);
+ }
+ spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags);
+
+kick:
+ smp_mb();
+ if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
+ !list_empty(&netbk->net_schedule_list))
+ xen_netbk_kick_thread(netbk);
+}
+
+void xen_netbk_deschedule_xenvif(struct xenvif *vif)
+{
+ struct xen_netbk *netbk = vif->netbk;
+ spin_lock_irq(&netbk->net_schedule_list_lock);
+ remove_from_net_schedule_list(vif);
+ spin_unlock_irq(&netbk->net_schedule_list_lock);
+}
+
+void xen_netbk_check_rx_xenvif(struct xenvif *vif)
+{
+ int more_to_do;
+
+ RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
+
+ if (more_to_do)
+ xen_netbk_schedule_xenvif(vif);
+}
+
+static void tx_add_credit(struct xenvif *vif)
+{
+ unsigned long max_burst, max_credit;
+
+ /*
+ * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
+ * Otherwise the interface can seize up due to insufficient credit.
+ */
+ max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
+ max_burst = min(max_burst, 131072UL);
+ max_burst = max(max_burst, vif->credit_bytes);
+
+ /* Take care that adding a new chunk of credit doesn't wrap to zero. */
+ max_credit = vif->remaining_credit + vif->credit_bytes;
+ if (max_credit < vif->remaining_credit)
+ max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
+
+ vif->remaining_credit = min(max_credit, max_burst);
+}
+
+static void tx_credit_callback(unsigned long data)
+{
+ struct xenvif *vif = (struct xenvif *)data;
+ tx_add_credit(vif);
+ xen_netbk_check_rx_xenvif(vif);
+}
+
+static void netbk_tx_err(struct xenvif *vif,
+ struct xen_netif_tx_request *txp, RING_IDX end)
+{
+ RING_IDX cons = vif->tx.req_cons;
+
+ do {
+ make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+ if (cons >= end)
+ break;
+ txp = RING_GET_REQUEST(&vif->tx, cons++);
+ } while (1);
+ vif->tx.req_cons = cons;
+ xen_netbk_check_rx_xenvif(vif);
+ xenvif_put(vif);
+}
+
+static int netbk_count_requests(struct xenvif *vif,
+ struct xen_netif_tx_request *first,
+ struct xen_netif_tx_request *txp,
+ int work_to_do)
+{
+ RING_IDX cons = vif->tx.req_cons;
+ int frags = 0;
+
+ if (!(first->flags & XEN_NETTXF_more_data))
+ return 0;
+
+ do {
+ if (frags >= work_to_do) {
+ netdev_dbg(vif->dev, "Need more frags\n");
+ return -frags;
+ }
+
+ if (unlikely(frags >= MAX_SKB_FRAGS)) {
+ netdev_dbg(vif->dev, "Too many frags\n");
+ return -frags;
+ }
+
+ memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
+ sizeof(*txp));
+ if (txp->size > first->size) {
+ netdev_dbg(vif->dev, "Frags galore\n");
+ return -frags;
+ }
+
+ first->size -= txp->size;
+ frags++;
+
+ if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
+ netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
+ txp->offset, txp->size);
+ return -frags;
+ }
+ } while ((txp++)->flags & XEN_NETTXF_more_data);
+ return frags;
+}
+
+static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
+ struct sk_buff *skb,
+ unsigned long pending_idx)
+{
+ struct page *page;
+ page = alloc_page(GFP_KERNEL|__GFP_COLD);
+ if (!page)
+ return NULL;
+ set_page_ext(page, netbk, pending_idx);
+ netbk->mmap_pages[pending_idx] = page;
+ return page;
+}
+
+static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
+ struct xenvif *vif,
+ struct sk_buff *skb,
+ struct xen_netif_tx_request *txp,
+ struct gnttab_copy *gop)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ skb_frag_t *frags = shinfo->frags;
+ unsigned long pending_idx = *((u16 *)skb->data);
+ int i, start;
+
+ /* Skip first skb fragment if it is on same page as header fragment. */
+ start = ((unsigned long)shinfo->frags[0].page == pending_idx);
+
+ for (i = start; i < shinfo->nr_frags; i++, txp++) {
+ struct page *page;
+ pending_ring_idx_t index;
+ struct pending_tx_info *pending_tx_info =
+ netbk->pending_tx_info;
+
+ index = pending_index(netbk->pending_cons++);
+ pending_idx = netbk->pending_ring[index];
+ page = xen_netbk_alloc_page(netbk, skb, pending_idx);
+ if (!page)
+ return NULL;
+
+ netbk->mmap_pages[pending_idx] = page;
+
+ gop->source.u.ref = txp->gref;
+ gop->source.domid = vif->domid;
+ gop->source.offset = txp->offset;
+
+ gop->dest.u.gmfn = virt_to_mfn(page_address(page));
+ gop->dest.domid = DOMID_SELF;
+ gop->dest.offset = txp->offset;
+
+ gop->len = txp->size;
+ gop->flags = GNTCOPY_source_gref;
+
+ gop++;
+
+ memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
+ xenvif_get(vif);
+ pending_tx_info[pending_idx].vif = vif;
+ frags[i].page = (void *)pending_idx;
+ }
+
+ return gop;
+}
+
+static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
+ struct sk_buff *skb,
+ struct gnttab_copy **gopp)
+{
+ struct gnttab_copy *gop = *gopp;
+ int pending_idx = *((u16 *)skb->data);
+ struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
+ struct xenvif *vif = pending_tx_info[pending_idx].vif;
+ struct xen_netif_tx_request *txp;
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ int nr_frags = shinfo->nr_frags;
+ int i, err, start;
+
+ /* Check status of header. */
+ err = gop->status;
+ if (unlikely(err)) {
+ pending_ring_idx_t index;
+ index = pending_index(netbk->pending_prod++);
+ txp = &pending_tx_info[pending_idx].req;
+ make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+ netbk->pending_ring[index] = pending_idx;
+ xenvif_put(vif);
+ }
+
+ /* Skip first skb fragment if it is on same page as header fragment. */
+ start = ((unsigned long)shinfo->frags[0].page == pending_idx);
+
+ for (i = start; i < nr_frags; i++) {
+ int j, newerr;
+ pending_ring_idx_t index;
+
+ pending_idx = (unsigned long)shinfo->frags[i].page;
+
+ /* Check error status: if okay then remember grant handle. */
+ newerr = (++gop)->status;
+ if (likely(!newerr)) {
+ /* Had a previous error? Invalidate this fragment. */
+ if (unlikely(err))
+ xen_netbk_idx_release(netbk, pending_idx);
+ continue;
+ }
+
+ /* Error on this fragment: respond to client with an error. */
+ txp = &netbk->pending_tx_info[pending_idx].req;
+ make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+ index = pending_index(netbk->pending_prod++);
+ netbk->pending_ring[index] = pending_idx;
+ xenvif_put(vif);
+
+ /* Not the first error? Preceding frags already invalidated. */
+ if (err)
+ continue;
+
+ /* First error: invalidate header and preceding fragments. */
+ pending_idx = *((u16 *)skb->data);
+ xen_netbk_idx_release(netbk, pending_idx);
+ for (j = start; j < i; j++) {
+ pending_idx = (unsigned long)shinfo->frags[i].page;
+ xen_netbk_idx_release(netbk, pending_idx);
+ }
+
+ /* Remember the error: invalidate all subsequent fragments. */
+ err = newerr;
+ }
+
+ *gopp = gop + 1;
+ return err;
+}
+
+static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ int nr_frags = shinfo->nr_frags;
+ int i;
+
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag_t *frag = shinfo->frags + i;
+ struct xen_netif_tx_request *txp;
+ unsigned long pending_idx;
+
+ pending_idx = (unsigned long)frag->page;
+
+ txp = &netbk->pending_tx_info[pending_idx].req;
+ frag->page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
+ frag->size = txp->size;
+ frag->page_offset = txp->offset;
+
+ skb->len += txp->size;
+ skb->data_len += txp->size;
+ skb->truesize += txp->size;
+
+ /* Take an extra reference to offset xen_netbk_idx_release */
+ get_page(netbk->mmap_pages[pending_idx]);
+ xen_netbk_idx_release(netbk, pending_idx);
+ }
+}
+
+static int xen_netbk_get_extras(struct xenvif *vif,
+ struct xen_netif_extra_info *extras,
+ int work_to_do)
+{
+ struct xen_netif_extra_info extra;
+ RING_IDX cons = vif->tx.req_cons;
+
+ do {
+ if (unlikely(work_to_do-- <= 0)) {
+ netdev_dbg(vif->dev, "Missing extra info\n");
+ return -EBADR;
+ }
+
+ memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
+ sizeof(extra));
+ if (unlikely(!extra.type ||
+ extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
+ vif->tx.req_cons = ++cons;
+ netdev_dbg(vif->dev,
+ "Invalid extra type: %d\n", extra.type);
+ return -EINVAL;
+ }
+
+ memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
+ vif->tx.req_cons = ++cons;
+ } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
+
+ return work_to_do;
+}
+
+static int netbk_set_skb_gso(struct xenvif *vif,
+ struct sk_buff *skb,
+ struct xen_netif_extra_info *gso)
+{
+ if (!gso->u.gso.size) {
+ netdev_dbg(vif->dev, "GSO size must not be zero.\n");
+ return -EINVAL;
+ }
+
+ /* Currently only TCPv4 S.O. is supported. */
+ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
+ netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
+ return -EINVAL;
+ }
+
+ skb_shinfo(skb)->gso_size = gso->u.gso.size;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+
+ /* Header must be checked, and gso_segs computed. */
+ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
+ skb_shinfo(skb)->gso_segs = 0;
+
+ return 0;
+}
+
+static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
+{
+ struct iphdr *iph;
+ unsigned char *th;
+ int err = -EPROTO;
+ int recalculate_partial_csum = 0;
+
+ /*
+ * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
+ * peers can fail to set NETRXF_csum_blank when sending a GSO
+ * frame. In this case force the SKB to CHECKSUM_PARTIAL and
+ * recalculate the partial checksum.
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
+ vif->rx_gso_checksum_fixup++;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ recalculate_partial_csum = 1;
+ }
+
+ /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ goto out;
+
+ iph = (void *)skb->data;
+ th = skb->data + 4 * iph->ihl;
+ if (th >= skb_tail_pointer(skb))
+ goto out;
+
+ skb->csum_start = th - skb->head;
+ switch (iph->protocol) {
+ case IPPROTO_TCP:
+ skb->csum_offset = offsetof(struct tcphdr, check);
+
+ if (recalculate_partial_csum) {
+ struct tcphdr *tcph = (struct tcphdr *)th;
+ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - iph->ihl*4,
+ IPPROTO_TCP, 0);
+ }
+ break;
+ case IPPROTO_UDP:
+ skb->csum_offset = offsetof(struct udphdr, check);
+
+ if (recalculate_partial_csum) {
+ struct udphdr *udph = (struct udphdr *)th;
+ udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - iph->ihl*4,
+ IPPROTO_UDP, 0);
+ }
+ break;
+ default:
+ if (net_ratelimit())
+ netdev_err(vif->dev,
+ "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
+ iph->protocol);
+ goto out;
+ }
+
+ if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
+ goto out;
+
+ err = 0;
+
+out:
+ return err;
+}
+
+static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
+{
+ unsigned long now = jiffies;
+ unsigned long next_credit =
+ vif->credit_timeout.expires +
+ msecs_to_jiffies(vif->credit_usec / 1000);
+
+ /* Timer could already be pending in rare cases. */
+ if (timer_pending(&vif->credit_timeout))
+ return true;
+
+ /* Passed the point where we can replenish credit? */
+ if (time_after_eq(now, next_credit)) {
+ vif->credit_timeout.expires = now;
+ tx_add_credit(vif);
+ }
+
+ /* Still too big to send right now? Set a callback. */
+ if (size > vif->remaining_credit) {
+ vif->credit_timeout.data =
+ (unsigned long)vif;
+ vif->credit_timeout.function =
+ tx_credit_callback;
+ mod_timer(&vif->credit_timeout,
+ next_credit);
+
+ return true;
+ }
+
+ return false;
+}
+
+static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+{
+ struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop;
+ struct sk_buff *skb;
+ int ret;
+
+ while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
+ !list_empty(&netbk->net_schedule_list)) {
+ struct xenvif *vif;
+ struct xen_netif_tx_request txreq;
+ struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS];
+ struct page *page;
+ struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
+ u16 pending_idx;
+ RING_IDX idx;
+ int work_to_do;
+ unsigned int data_len;
+ pending_ring_idx_t index;
+
+ /* Get a netif from the list with work to do. */
+ vif = poll_net_schedule_list(netbk);
+ if (!vif)
+ continue;
+
+ RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
+ if (!work_to_do) {
+ xenvif_put(vif);
+ continue;
+ }
+
+ idx = vif->tx.req_cons;
+ rmb(); /* Ensure that we see the request before we copy it. */
+ memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
+
+ /* Credit-based scheduling. */
+ if (txreq.size > vif->remaining_credit &&
+ tx_credit_exceeded(vif, txreq.size)) {
+ xenvif_put(vif);
+ continue;
+ }
+
+ vif->remaining_credit -= txreq.size;
+
+ work_to_do--;
+ vif->tx.req_cons = ++idx;
+
+ memset(extras, 0, sizeof(extras));
+ if (txreq.flags & XEN_NETTXF_extra_info) {
+ work_to_do = xen_netbk_get_extras(vif, extras,
+ work_to_do);
+ idx = vif->tx.req_cons;
+ if (unlikely(work_to_do < 0)) {
+ netbk_tx_err(vif, &txreq, idx);
+ continue;
+ }
+ }
+
+ ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
+ if (unlikely(ret < 0)) {
+ netbk_tx_err(vif, &txreq, idx - ret);
+ continue;
+ }
+ idx += ret;
+
+ if (unlikely(txreq.size < ETH_HLEN)) {
+ netdev_dbg(vif->dev,
+ "Bad packet size: %d\n", txreq.size);
+ netbk_tx_err(vif, &txreq, idx);
+ continue;
+ }
+
+ /* No crossing a page as the payload mustn't fragment. */
+ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
+ netdev_dbg(vif->dev,
+ "txreq.offset: %x, size: %u, end: %lu\n",
+ txreq.offset, txreq.size,
+ (txreq.offset&~PAGE_MASK) + txreq.size);
+ netbk_tx_err(vif, &txreq, idx);
+ continue;
+ }
+
+ index = pending_index(netbk->pending_cons);
+ pending_idx = netbk->pending_ring[index];
+
+ data_len = (txreq.size > PKT_PROT_LEN &&
+ ret < MAX_SKB_FRAGS) ?
+ PKT_PROT_LEN : txreq.size;
+
+ skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(skb == NULL)) {
+ netdev_dbg(vif->dev,
+ "Can't allocate a skb in start_xmit.\n");
+ netbk_tx_err(vif, &txreq, idx);
+ break;
+ }
+
+ /* Packets passed to netif_rx() must have some headroom. */
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+ if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
+ struct xen_netif_extra_info *gso;
+ gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
+
+ if (netbk_set_skb_gso(vif, skb, gso)) {
+ kfree_skb(skb);
+ netbk_tx_err(vif, &txreq, idx);
+ continue;
+ }
+ }
+
+ /* XXX could copy straight to head */
+ page = xen_netbk_alloc_page(netbk, skb, pending_idx);
+ if (!page) {
+ kfree_skb(skb);
+ netbk_tx_err(vif, &txreq, idx);
+ continue;
+ }
+
+ netbk->mmap_pages[pending_idx] = page;
+
+ gop->source.u.ref = txreq.gref;
+ gop->source.domid = vif->domid;
+ gop->source.offset = txreq.offset;
+
+ gop->dest.u.gmfn = virt_to_mfn(page_address(page));
+ gop->dest.domid = DOMID_SELF;
+ gop->dest.offset = txreq.offset;
+
+ gop->len = txreq.size;
+ gop->flags = GNTCOPY_source_gref;
+
+ gop++;
+
+ memcpy(&netbk->pending_tx_info[pending_idx].req,
+ &txreq, sizeof(txreq));
+ netbk->pending_tx_info[pending_idx].vif = vif;
+ *((u16 *)skb->data) = pending_idx;
+
+ __skb_put(skb, data_len);
+
+ skb_shinfo(skb)->nr_frags = ret;
+ if (data_len < txreq.size) {
+ skb_shinfo(skb)->nr_frags++;
+ skb_shinfo(skb)->frags[0].page =
+ (void *)(unsigned long)pending_idx;
+ } else {
+ /* Discriminate from any valid pending_idx value. */
+ skb_shinfo(skb)->frags[0].page = (void *)~0UL;
+ }
+
+ __skb_queue_tail(&netbk->tx_queue, skb);
+
+ netbk->pending_cons++;
+
+ request_gop = xen_netbk_get_requests(netbk, vif,
+ skb, txfrags, gop);
+ if (request_gop == NULL) {
+ kfree_skb(skb);
+ netbk_tx_err(vif, &txreq, idx);
+ continue;
+ }
+ gop = request_gop;
+
+ vif->tx.req_cons = idx;
+ xen_netbk_check_rx_xenvif(vif);
+
+ if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops))
+ break;
+ }
+
+ return gop - netbk->tx_copy_ops;
+}
+
+static void xen_netbk_tx_submit(struct xen_netbk *netbk)
+{
+ struct gnttab_copy *gop = netbk->tx_copy_ops;
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) {
+ struct xen_netif_tx_request *txp;
+ struct xenvif *vif;
+ u16 pending_idx;
+ unsigned data_len;
+
+ pending_idx = *((u16 *)skb->data);
+ vif = netbk->pending_tx_info[pending_idx].vif;
+ txp = &netbk->pending_tx_info[pending_idx].req;
+
+ /* Check the remap error code. */
+ if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) {
+ netdev_dbg(vif->dev, "netback grant failed.\n");
+ skb_shinfo(skb)->nr_frags = 0;
+ kfree_skb(skb);
+ continue;
+ }
+
+ data_len = skb->len;
+ memcpy(skb->data,
+ (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset),
+ data_len);
+ if (data_len < txp->size) {
+ /* Append the packet payload as a fragment. */
+ txp->offset += data_len;
+ txp->size -= data_len;
+ } else {
+ /* Schedule a response immediately. */
+ xen_netbk_idx_release(netbk, pending_idx);
+ }
+
+ if (txp->flags & XEN_NETTXF_csum_blank)
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ else if (txp->flags & XEN_NETTXF_data_validated)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ xen_netbk_fill_frags(netbk, skb);
+
+ /*
+ * If the initial fragment was < PKT_PROT_LEN then
+ * pull through some bytes from the other fragments to
+ * increase the linear region to PKT_PROT_LEN bytes.
+ */
+ if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
+ int target = min_t(int, skb->len, PKT_PROT_LEN);
+ __pskb_pull_tail(skb, target - skb_headlen(skb));
+ }
+
+ skb->dev = vif->dev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (checksum_setup(vif, skb)) {
+ netdev_dbg(vif->dev,
+ "Can't setup checksum in net_tx_action\n");
+ kfree_skb(skb);
+ continue;
+ }
+
+ vif->dev->stats.rx_bytes += skb->len;
+ vif->dev->stats.rx_packets++;
+
+ xenvif_receive_skb(vif, skb);
+ }
+}
+
+/* Called after netfront has transmitted */
+static void xen_netbk_tx_action(struct xen_netbk *netbk)
+{
+ unsigned nr_gops;
+ int ret;
+
+ nr_gops = xen_netbk_tx_build_gops(netbk);
+
+ if (nr_gops == 0)
+ return;
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_copy,
+ netbk->tx_copy_ops, nr_gops);
+ BUG_ON(ret);
+
+ xen_netbk_tx_submit(netbk);
+
+}
+
+static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
+{
+ struct xenvif *vif;
+ struct pending_tx_info *pending_tx_info;
+ pending_ring_idx_t index;
+
+ /* Already complete? */
+ if (netbk->mmap_pages[pending_idx] == NULL)
+ return;
+
+ pending_tx_info = &netbk->pending_tx_info[pending_idx];
+
+ vif = pending_tx_info->vif;
+
+ make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
+
+ index = pending_index(netbk->pending_prod++);
+ netbk->pending_ring[index] = pending_idx;
+
+ xenvif_put(vif);
+
+ netbk->mmap_pages[pending_idx]->mapping = 0;
+ put_page(netbk->mmap_pages[pending_idx]);
+ netbk->mmap_pages[pending_idx] = NULL;
+}
+
+static void make_tx_response(struct xenvif *vif,
+ struct xen_netif_tx_request *txp,
+ s8 st)
+{
+ RING_IDX i = vif->tx.rsp_prod_pvt;
+ struct xen_netif_tx_response *resp;
+ int notify;
+
+ resp = RING_GET_RESPONSE(&vif->tx, i);
+ resp->id = txp->id;
+ resp->status = st;
+
+ if (txp->flags & XEN_NETTXF_extra_info)
+ RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
+
+ vif->tx.rsp_prod_pvt = ++i;
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
+ if (notify)
+ notify_remote_via_irq(vif->irq);
+}
+
+static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
+ u16 id,
+ s8 st,
+ u16 offset,
+ u16 size,
+ u16 flags)
+{
+ RING_IDX i = vif->rx.rsp_prod_pvt;
+ struct xen_netif_rx_response *resp;
+
+ resp = RING_GET_RESPONSE(&vif->rx, i);
+ resp->offset = offset;
+ resp->flags = flags;
+ resp->id = id;
+ resp->status = (s16)size;
+ if (st < 0)
+ resp->status = (s16)st;
+
+ vif->rx.rsp_prod_pvt = ++i;
+
+ return resp;
+}
+
+static inline int rx_work_todo(struct xen_netbk *netbk)
+{
+ return !skb_queue_empty(&netbk->rx_queue);
+}
+
+static inline int tx_work_todo(struct xen_netbk *netbk)
+{
+
+ if (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
+ !list_empty(&netbk->net_schedule_list))
+ return 1;
+
+ return 0;
+}
+
+static int xen_netbk_kthread(void *data)
+{
+ struct xen_netbk *netbk = data;
+ while (!kthread_should_stop()) {
+ wait_event_interruptible(netbk->wq,
+ rx_work_todo(netbk) ||
+ tx_work_todo(netbk) ||
+ kthread_should_stop());
+ cond_resched();
+
+ if (kthread_should_stop())
+ break;
+
+ if (rx_work_todo(netbk))
+ xen_netbk_rx_action(netbk);
+
+ if (tx_work_todo(netbk))
+ xen_netbk_tx_action(netbk);
+ }
+
+ return 0;
+}
+
+void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
+{
+ struct gnttab_unmap_grant_ref op;
+
+ if (vif->tx.sring) {
+ gnttab_set_unmap_op(&op, (unsigned long)vif->tx_comms_area->addr,
+ GNTMAP_host_map, vif->tx_shmem_handle);
+
+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
+ BUG();
+ }
+
+ if (vif->rx.sring) {
+ gnttab_set_unmap_op(&op, (unsigned long)vif->rx_comms_area->addr,
+ GNTMAP_host_map, vif->rx_shmem_handle);
+
+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
+ BUG();
+ }
+ if (vif->rx_comms_area)
+ free_vm_area(vif->rx_comms_area);
+ if (vif->tx_comms_area)
+ free_vm_area(vif->tx_comms_area);
+}
+
+int xen_netbk_map_frontend_rings(struct xenvif *vif,
+ grant_ref_t tx_ring_ref,
+ grant_ref_t rx_ring_ref)
+{
+ struct gnttab_map_grant_ref op;
+ struct xen_netif_tx_sring *txs;
+ struct xen_netif_rx_sring *rxs;
+
+ int err = -ENOMEM;
+
+ vif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
+ if (vif->tx_comms_area == NULL)
+ goto err;
+
+ vif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
+ if (vif->rx_comms_area == NULL)
+ goto err;
+
+ gnttab_set_map_op(&op, (unsigned long)vif->tx_comms_area->addr,
+ GNTMAP_host_map, tx_ring_ref, vif->domid);
+
+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
+ BUG();
+
+ if (op.status) {
+ netdev_warn(vif->dev,
+ "failed to map tx ring. err=%d status=%d\n",
+ err, op.status);
+ err = op.status;
+ goto err;
+ }
+
+ vif->tx_shmem_ref = tx_ring_ref;
+ vif->tx_shmem_handle = op.handle;
+
+ txs = (struct xen_netif_tx_sring *)vif->tx_comms_area->addr;
+ BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
+
+ gnttab_set_map_op(&op, (unsigned long)vif->rx_comms_area->addr,
+ GNTMAP_host_map, rx_ring_ref, vif->domid);
+
+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
+ BUG();
+
+ if (op.status) {
+ netdev_warn(vif->dev,
+ "failed to map rx ring. err=%d status=%d\n",
+ err, op.status);
+ err = op.status;
+ goto err;
+ }
+
+ vif->rx_shmem_ref = rx_ring_ref;
+ vif->rx_shmem_handle = op.handle;
+ vif->rx_req_cons_peek = 0;
+
+ rxs = (struct xen_netif_rx_sring *)vif->rx_comms_area->addr;
+ BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
+
+ return 0;
+
+err:
+ xen_netbk_unmap_frontend_rings(vif);
+ return err;
+}
+
+static int __init netback_init(void)
+{
+ int i;
+ int rc = 0;
+ int group;
+
+ if (!xen_pv_domain())
+ return -ENODEV;
+
+ xen_netbk_group_nr = num_online_cpus();
+ xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
+ if (!xen_netbk) {
+ printk(KERN_ALERT "%s: out of memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ for (group = 0; group < xen_netbk_group_nr; group++) {
+ struct xen_netbk *netbk = &xen_netbk[group];
+ skb_queue_head_init(&netbk->rx_queue);
+ skb_queue_head_init(&netbk->tx_queue);
+
+ init_timer(&netbk->net_timer);
+ netbk->net_timer.data = (unsigned long)netbk;
+ netbk->net_timer.function = xen_netbk_alarm;
+
+ netbk->pending_cons = 0;
+ netbk->pending_prod = MAX_PENDING_REQS;
+ for (i = 0; i < MAX_PENDING_REQS; i++)
+ netbk->pending_ring[i] = i;
+
+ init_waitqueue_head(&netbk->wq);
+ netbk->task = kthread_create(xen_netbk_kthread,
+ (void *)netbk,
+ "netback/%u", group);
+
+ if (IS_ERR(netbk->task)) {
+ printk(KERN_ALERT "kthread_run() fails at netback\n");
+ del_timer(&netbk->net_timer);
+ rc = PTR_ERR(netbk->task);
+ goto failed_init;
+ }
+
+ kthread_bind(netbk->task, group);
+
+ INIT_LIST_HEAD(&netbk->net_schedule_list);
+
+ spin_lock_init(&netbk->net_schedule_list_lock);
+
+ atomic_set(&netbk->netfront_count, 0);
+
+ wake_up_process(netbk->task);
+ }
+
+ rc = xenvif_xenbus_init();
+ if (rc)
+ goto failed_init;
+
+ return 0;
+
+failed_init:
+ while (--group >= 0) {
+ struct xen_netbk *netbk = &xen_netbk[group];
+ for (i = 0; i < MAX_PENDING_REQS; i++) {
+ if (netbk->mmap_pages[i])
+ __free_page(netbk->mmap_pages[i]);
+ }
+ del_timer(&netbk->net_timer);
+ kthread_stop(netbk->task);
+ }
+ vfree(xen_netbk);
+ return rc;
+
+}
+
+module_init(netback_init);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
new file mode 100644
index 000000000000..22b8c3505991
--- /dev/null
+++ b/drivers/net/xen-netback/xenbus.c
@@ -0,0 +1,490 @@
+/*
+ * Xenbus code for netif backend
+ *
+ * Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
+ * Copyright (C) 2005 XenSource Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include "common.h"
+
+struct backend_info {
+ struct xenbus_device *dev;
+ struct xenvif *vif;
+ enum xenbus_state frontend_state;
+ struct xenbus_watch hotplug_status_watch;
+ int have_hotplug_status_watch:1;
+};
+
+static int connect_rings(struct backend_info *);
+static void connect(struct backend_info *);
+static void backend_create_xenvif(struct backend_info *be);
+static void unregister_hotplug_status_watch(struct backend_info *be);
+
+static int netback_remove(struct xenbus_device *dev)
+{
+ struct backend_info *be = dev_get_drvdata(&dev->dev);
+
+ unregister_hotplug_status_watch(be);
+ if (be->vif) {
+ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
+ xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
+ xenvif_disconnect(be->vif);
+ be->vif = NULL;
+ }
+ kfree(be);
+ dev_set_drvdata(&dev->dev, NULL);
+ return 0;
+}
+
+
+/**
+ * Entry point to this code when a new device is created. Allocate the basic
+ * structures and switch to InitWait.
+ */
+static int netback_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ const char *message;
+ struct xenbus_transaction xbt;
+ int err;
+ int sg;
+ struct backend_info *be = kzalloc(sizeof(struct backend_info),
+ GFP_KERNEL);
+ if (!be) {
+ xenbus_dev_fatal(dev, -ENOMEM,
+ "allocating backend structure");
+ return -ENOMEM;
+ }
+
+ be->dev = dev;
+ dev_set_drvdata(&dev->dev, be);
+
+ sg = 1;
+
+ do {
+ err = xenbus_transaction_start(&xbt);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "starting transaction");
+ goto fail;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg);
+ if (err) {
+ message = "writing feature-sg";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
+ "%d", sg);
+ if (err) {
+ message = "writing feature-gso-tcpv4";
+ goto abort_transaction;
+ }
+
+ /* We support rx-copy path. */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-rx-copy", "%d", 1);
+ if (err) {
+ message = "writing feature-rx-copy";
+ goto abort_transaction;
+ }
+
+ /*
+ * We don't support rx-flip path (except old guests who don't
+ * grok this feature flag).
+ */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-rx-flip", "%d", 0);
+ if (err) {
+ message = "writing feature-rx-flip";
+ goto abort_transaction;
+ }
+
+ err = xenbus_transaction_end(xbt, 0);
+ } while (err == -EAGAIN);
+
+ if (err) {
+ xenbus_dev_fatal(dev, err, "completing transaction");
+ goto fail;
+ }
+
+ err = xenbus_switch_state(dev, XenbusStateInitWait);
+ if (err)
+ goto fail;
+
+ /* This kicks hotplug scripts, so do it immediately. */
+ backend_create_xenvif(be);
+
+ return 0;
+
+abort_transaction:
+ xenbus_transaction_end(xbt, 1);
+ xenbus_dev_fatal(dev, err, "%s", message);
+fail:
+ pr_debug("failed");
+ netback_remove(dev);
+ return err;
+}
+
+
+/*
+ * Handle the creation of the hotplug script environment. We add the script
+ * and vif variables to the environment, for the benefit of the vif-* hotplug
+ * scripts.
+ */
+static int netback_uevent(struct xenbus_device *xdev,
+ struct kobj_uevent_env *env)
+{
+ struct backend_info *be = dev_get_drvdata(&xdev->dev);
+ char *val;
+
+ val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
+ if (IS_ERR(val)) {
+ int err = PTR_ERR(val);
+ xenbus_dev_fatal(xdev, err, "reading script");
+ return err;
+ } else {
+ if (add_uevent_var(env, "script=%s", val)) {
+ kfree(val);
+ return -ENOMEM;
+ }
+ kfree(val);
+ }
+
+ if (!be || !be->vif)
+ return 0;
+
+ return add_uevent_var(env, "vif=%s", be->vif->dev->name);
+}
+
+
+static void backend_create_xenvif(struct backend_info *be)
+{
+ int err;
+ long handle;
+ struct xenbus_device *dev = be->dev;
+
+ if (be->vif != NULL)
+ return;
+
+ err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
+ if (err != 1) {
+ xenbus_dev_fatal(dev, err, "reading handle");
+ return;
+ }
+
+ be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
+ if (IS_ERR(be->vif)) {
+ err = PTR_ERR(be->vif);
+ be->vif = NULL;
+ xenbus_dev_fatal(dev, err, "creating interface");
+ return;
+ }
+
+ kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
+}
+
+
+static void disconnect_backend(struct xenbus_device *dev)
+{
+ struct backend_info *be = dev_get_drvdata(&dev->dev);
+
+ if (be->vif) {
+ xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
+ xenvif_disconnect(be->vif);
+ be->vif = NULL;
+ }
+}
+
+/**
+ * Callback received when the frontend's state changes.
+ */
+static void frontend_changed(struct xenbus_device *dev,
+ enum xenbus_state frontend_state)
+{
+ struct backend_info *be = dev_get_drvdata(&dev->dev);
+
+ pr_debug("frontend state %s", xenbus_strstate(frontend_state));
+
+ be->frontend_state = frontend_state;
+
+ switch (frontend_state) {
+ case XenbusStateInitialising:
+ if (dev->state == XenbusStateClosed) {
+ printk(KERN_INFO "%s: %s: prepare for reconnect\n",
+ __func__, dev->nodename);
+ xenbus_switch_state(dev, XenbusStateInitWait);
+ }
+ break;
+
+ case XenbusStateInitialised:
+ break;
+
+ case XenbusStateConnected:
+ if (dev->state == XenbusStateConnected)
+ break;
+ backend_create_xenvif(be);
+ if (be->vif)
+ connect(be);
+ break;
+
+ case XenbusStateClosing:
+ if (be->vif)
+ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
+ disconnect_backend(dev);
+ xenbus_switch_state(dev, XenbusStateClosing);
+ break;
+
+ case XenbusStateClosed:
+ xenbus_switch_state(dev, XenbusStateClosed);
+ if (xenbus_dev_is_online(dev))
+ break;
+ /* fall through if not online */
+ case XenbusStateUnknown:
+ device_unregister(&dev->dev);
+ break;
+
+ default:
+ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
+ frontend_state);
+ break;
+ }
+}
+
+
+static void xen_net_read_rate(struct xenbus_device *dev,
+ unsigned long *bytes, unsigned long *usec)
+{
+ char *s, *e;
+ unsigned long b, u;
+ char *ratestr;
+
+ /* Default to unlimited bandwidth. */
+ *bytes = ~0UL;
+ *usec = 0;
+
+ ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL);
+ if (IS_ERR(ratestr))
+ return;
+
+ s = ratestr;
+ b = simple_strtoul(s, &e, 10);
+ if ((s == e) || (*e != ','))
+ goto fail;
+
+ s = e + 1;
+ u = simple_strtoul(s, &e, 10);
+ if ((s == e) || (*e != '\0'))
+ goto fail;
+
+ *bytes = b;
+ *usec = u;
+
+ kfree(ratestr);
+ return;
+
+ fail:
+ pr_warn("Failed to parse network rate limit. Traffic unlimited.\n");
+ kfree(ratestr);
+}
+
+static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
+{
+ char *s, *e, *macstr;
+ int i;
+
+ macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
+ if (IS_ERR(macstr))
+ return PTR_ERR(macstr);
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac[i] = simple_strtoul(s, &e, 16);
+ if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
+ kfree(macstr);
+ return -ENOENT;
+ }
+ s = e+1;
+ }
+
+ kfree(macstr);
+ return 0;
+}
+
+static void unregister_hotplug_status_watch(struct backend_info *be)
+{
+ if (be->have_hotplug_status_watch) {
+ unregister_xenbus_watch(&be->hotplug_status_watch);
+ kfree(be->hotplug_status_watch.node);
+ }
+ be->have_hotplug_status_watch = 0;
+}
+
+static void hotplug_status_changed(struct xenbus_watch *watch,
+ const char **vec,
+ unsigned int vec_size)
+{
+ struct backend_info *be = container_of(watch,
+ struct backend_info,
+ hotplug_status_watch);
+ char *str;
+ unsigned int len;
+
+ str = xenbus_read(XBT_NIL, be->dev->nodename, "hotplug-status", &len);
+ if (IS_ERR(str))
+ return;
+ if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) {
+ xenbus_switch_state(be->dev, XenbusStateConnected);
+ /* Not interested in this watch anymore. */
+ unregister_hotplug_status_watch(be);
+ }
+ kfree(str);
+}
+
+static void connect(struct backend_info *be)
+{
+ int err;
+ struct xenbus_device *dev = be->dev;
+
+ err = connect_rings(be);
+ if (err)
+ return;
+
+ err = xen_net_read_mac(dev, be->vif->fe_dev_addr);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
+ return;
+ }
+
+ xen_net_read_rate(dev, &be->vif->credit_bytes,
+ &be->vif->credit_usec);
+ be->vif->remaining_credit = be->vif->credit_bytes;
+
+ unregister_hotplug_status_watch(be);
+ err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
+ hotplug_status_changed,
+ "%s/%s", dev->nodename, "hotplug-status");
+ if (err) {
+ /* Switch now, since we can't do a watch. */
+ xenbus_switch_state(dev, XenbusStateConnected);
+ } else {
+ be->have_hotplug_status_watch = 1;
+ }
+
+ netif_wake_queue(be->vif->dev);
+}
+
+
+static int connect_rings(struct backend_info *be)
+{
+ struct xenvif *vif = be->vif;
+ struct xenbus_device *dev = be->dev;
+ unsigned long tx_ring_ref, rx_ring_ref;
+ unsigned int evtchn, rx_copy;
+ int err;
+ int val;
+
+ err = xenbus_gather(XBT_NIL, dev->otherend,
+ "tx-ring-ref", "%lu", &tx_ring_ref,
+ "rx-ring-ref", "%lu", &rx_ring_ref,
+ "event-channel", "%u", &evtchn, NULL);
+ if (err) {
+ xenbus_dev_fatal(dev, err,
+ "reading %s/ring-ref and event-channel",
+ dev->otherend);
+ return err;
+ }
+
+ err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
+ &rx_copy);
+ if (err == -ENOENT) {
+ err = 0;
+ rx_copy = 0;
+ }
+ if (err < 0) {
+ xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy",
+ dev->otherend);
+ return err;
+ }
+ if (!rx_copy)
+ return -EOPNOTSUPP;
+
+ if (vif->dev->tx_queue_len != 0) {
+ if (xenbus_scanf(XBT_NIL, dev->otherend,
+ "feature-rx-notify", "%d", &val) < 0)
+ val = 0;
+ if (val)
+ vif->can_queue = 1;
+ else
+ /* Must be non-zero for pfifo_fast to work. */
+ vif->dev->tx_queue_len = 1;
+ }
+
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg",
+ "%d", &val) < 0)
+ val = 0;
+ vif->can_sg = !!val;
+
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4",
+ "%d", &val) < 0)
+ val = 0;
+ vif->gso = !!val;
+
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix",
+ "%d", &val) < 0)
+ val = 0;
+ vif->gso_prefix = !!val;
+
+ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
+ "%d", &val) < 0)
+ val = 0;
+ vif->csum = !val;
+
+ /* Map the shared frame, irq etc. */
+ err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref, evtchn);
+ if (err) {
+ xenbus_dev_fatal(dev, err,
+ "mapping shared-frames %lu/%lu port %u",
+ tx_ring_ref, rx_ring_ref, evtchn);
+ return err;
+ }
+ return 0;
+}
+
+
+/* ** Driver Registration ** */
+
+
+static const struct xenbus_device_id netback_ids[] = {
+ { "vif" },
+ { "" }
+};
+
+
+static struct xenbus_driver netback = {
+ .name = "vif",
+ .owner = THIS_MODULE,
+ .ids = netback_ids,
+ .probe = netback_probe,
+ .remove = netback_remove,
+ .uevent = netback_uevent,
+ .otherend_changed = frontend_changed,
+};
+
+int xenvif_xenbus_init(void)
+{
+ return xenbus_register_backend(&netback);
+}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 458bb57914a3..5c8d9c385be0 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -66,8 +66,8 @@ struct netfront_cb {
#define GRANT_INVALID_REF 0
-#define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE)
-#define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE)
+#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
+#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
struct netfront_info {
@@ -120,6 +120,9 @@ struct netfront_info {
unsigned long rx_pfn_array[NET_RX_RING_SIZE];
struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
struct mmu_update rx_mmu[NET_RX_RING_SIZE];
+
+ /* Statistics */
+ unsigned long rx_gso_checksum_fixup;
};
struct netfront_rx_info {
@@ -356,7 +359,7 @@ static void xennet_tx_buf_gc(struct net_device *dev)
struct xen_netif_tx_response *txrsp;
txrsp = RING_GET_RESPONSE(&np->tx, cons);
- if (txrsp->status == NETIF_RSP_NULL)
+ if (txrsp->status == XEN_NETIF_RSP_NULL)
continue;
id = txrsp->id;
@@ -413,7 +416,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
larger than a page), split it it into page-sized chunks. */
while (len > PAGE_SIZE - offset) {
tx->size = PAGE_SIZE - offset;
- tx->flags |= NETTXF_more_data;
+ tx->flags |= XEN_NETTXF_more_data;
len -= tx->size;
data += tx->size;
offset = 0;
@@ -439,7 +442,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
- tx->flags |= NETTXF_more_data;
+ tx->flags |= XEN_NETTXF_more_data;
id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
np->tx_skbs[id].skb = skb_get(skb);
@@ -488,7 +491,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!netif_carrier_ok(dev) ||
(frags > 1 && !xennet_can_sg(dev)) ||
- netif_needs_gso(dev, skb))) {
+ netif_needs_gso(skb, netif_skb_features(skb)))) {
spin_unlock_irq(&np->tx_lock);
goto drop;
}
@@ -514,10 +517,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx->flags = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL)
/* local packet? */
- tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
+ tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
/* remote but checksummed. */
- tx->flags |= NETTXF_data_validated;
+ tx->flags |= XEN_NETTXF_data_validated;
if (skb_shinfo(skb)->gso_size) {
struct xen_netif_extra_info *gso;
@@ -528,7 +531,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (extra)
extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
else
- tx->flags |= NETTXF_extra_info;
+ tx->flags |= XEN_NETTXF_extra_info;
gso->u.gso.size = skb_shinfo(skb)->gso_size;
gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
@@ -648,7 +651,7 @@ static int xennet_get_responses(struct netfront_info *np,
int err = 0;
unsigned long ret;
- if (rx->flags & NETRXF_extra_info) {
+ if (rx->flags & XEN_NETRXF_extra_info) {
err = xennet_get_extras(np, extras, rp);
cons = np->rx.rsp_cons;
}
@@ -685,7 +688,7 @@ static int xennet_get_responses(struct netfront_info *np,
__skb_queue_tail(list, skb);
next:
- if (!(rx->flags & NETRXF_more_data))
+ if (!(rx->flags & XEN_NETRXF_more_data))
break;
if (cons + frags == rp) {
@@ -770,11 +773,29 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
return cons;
}
-static int skb_checksum_setup(struct sk_buff *skb)
+static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
{
struct iphdr *iph;
unsigned char *th;
int err = -EPROTO;
+ int recalculate_partial_csum = 0;
+
+ /*
+ * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
+ * peers can fail to set NETRXF_csum_blank when sending a GSO
+ * frame. In this case force the SKB to CHECKSUM_PARTIAL and
+ * recalculate the partial checksum.
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
+ struct netfront_info *np = netdev_priv(dev);
+ np->rx_gso_checksum_fixup++;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ recalculate_partial_csum = 1;
+ }
+
+ /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
if (skb->protocol != htons(ETH_P_IP))
goto out;
@@ -788,9 +809,23 @@ static int skb_checksum_setup(struct sk_buff *skb)
switch (iph->protocol) {
case IPPROTO_TCP:
skb->csum_offset = offsetof(struct tcphdr, check);
+
+ if (recalculate_partial_csum) {
+ struct tcphdr *tcph = (struct tcphdr *)th;
+ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - iph->ihl*4,
+ IPPROTO_TCP, 0);
+ }
break;
case IPPROTO_UDP:
skb->csum_offset = offsetof(struct udphdr, check);
+
+ if (recalculate_partial_csum) {
+ struct udphdr *udph = (struct udphdr *)th;
+ udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - iph->ihl*4,
+ IPPROTO_UDP, 0);
+ }
break;
default:
if (net_ratelimit())
@@ -829,13 +864,11 @@ static int handle_incoming_queue(struct net_device *dev,
/* Ethernet work: Delayed to here as it peeks the header. */
skb->protocol = eth_type_trans(skb, dev);
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (skb_checksum_setup(skb)) {
- kfree_skb(skb);
- packets_dropped++;
- dev->stats.rx_errors++;
- continue;
- }
+ if (checksum_setup(dev, skb)) {
+ kfree_skb(skb);
+ packets_dropped++;
+ dev->stats.rx_errors++;
+ continue;
}
dev->stats.rx_packets++;
@@ -950,9 +983,9 @@ err:
skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
skb->len += skb->data_len;
- if (rx->flags & NETRXF_csum_blank)
+ if (rx->flags & XEN_NETRXF_csum_blank)
skb->ip_summed = CHECKSUM_PARTIAL;
- else if (rx->flags & NETRXF_data_validated)
+ else if (rx->flags & XEN_NETRXF_data_validated)
skb->ip_summed = CHECKSUM_UNNECESSARY;
__skb_queue_tail(&rxq, skb);
@@ -1632,12 +1665,59 @@ static void netback_changed(struct xenbus_device *dev,
}
}
+static const struct xennet_stat {
+ char name[ETH_GSTRING_LEN];
+ u16 offset;
+} xennet_stats[] = {
+ {
+ "rx_gso_checksum_fixup",
+ offsetof(struct netfront_info, rx_gso_checksum_fixup)
+ },
+};
+
+static int xennet_get_sset_count(struct net_device *dev, int string_set)
+{
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(xennet_stats);
+ default:
+ return -EINVAL;
+ }
+}
+
+static void xennet_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 * data)
+{
+ void *np = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
+ data[i] = *(unsigned long *)(np + xennet_stats[i].offset);
+}
+
+static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ xennet_stats[i].name, ETH_GSTRING_LEN);
+ break;
+ }
+}
+
static const struct ethtool_ops xennet_ethtool_ops =
{
.set_tx_csum = ethtool_op_set_tx_csum,
.set_sg = xennet_set_sg,
.set_tso = xennet_set_tso,
.get_link = ethtool_op_get_link,
+
+ .get_sset_count = xennet_get_sset_count,
+ .get_ethtool_stats = xennet_get_ethtool_stats,
+ .get_strings = xennet_get_strings,
};
#ifdef CONFIG_SYSFS
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 14f0955eca68..2642af4ee491 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -24,6 +24,7 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/of_mdio.h>
+#include <linux/of_net.h>
#include <linux/phy.h>
#define DRIVER_NAME "xilinx_emaclite"
@@ -515,7 +516,7 @@ static void xemaclite_update_address(struct net_local *drvdata,
*/
static int xemaclite_set_mac_address(struct net_device *dev, void *address)
{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
struct sockaddr *addr = address;
if (netif_running(dev))
@@ -534,7 +535,7 @@ static int xemaclite_set_mac_address(struct net_device *dev, void *address)
*/
static void xemaclite_tx_timeout(struct net_device *dev)
{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
unsigned long flags;
dev_err(&lp->ndev->dev, "Exceeded transmit timeout of %lu ms\n",
@@ -578,7 +579,7 @@ static void xemaclite_tx_timeout(struct net_device *dev)
*/
static void xemaclite_tx_handler(struct net_device *dev)
{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
dev->stats.tx_packets++;
if (lp->deferred_skb) {
@@ -605,7 +606,7 @@ static void xemaclite_tx_handler(struct net_device *dev)
*/
static void xemaclite_rx_handler(struct net_device *dev)
{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
struct sk_buff *skb;
unsigned int align;
u32 len;
@@ -661,7 +662,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
{
bool tx_complete = 0;
struct net_device *dev = dev_id;
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
void __iomem *base_addr = lp->base_addr;
u32 tx_status;
@@ -918,7 +919,7 @@ void xemaclite_adjust_link(struct net_device *ndev)
*/
static int xemaclite_open(struct net_device *dev)
{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
int retval;
/* Just to be safe, stop the device first */
@@ -987,7 +988,7 @@ static int xemaclite_open(struct net_device *dev)
*/
static int xemaclite_close(struct net_device *dev)
{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
netif_stop_queue(dev);
xemaclite_disable_interrupts(lp);
@@ -1001,21 +1002,6 @@ static int xemaclite_close(struct net_device *dev)
}
/**
- * xemaclite_get_stats - Get the stats for the net_device
- * @dev: Pointer to the network device
- *
- * This function returns the address of the 'net_device_stats' structure for the
- * given network device. This structure holds usage statistics for the network
- * device.
- *
- * Return: Pointer to the net_device_stats structure.
- */
-static struct net_device_stats *xemaclite_get_stats(struct net_device *dev)
-{
- return &dev->stats;
-}
-
-/**
* xemaclite_send - Transmit a frame
* @orig_skb: Pointer to the socket buffer to be transmitted
* @dev: Pointer to the network device
@@ -1031,7 +1017,7 @@ static struct net_device_stats *xemaclite_get_stats(struct net_device *dev)
*/
static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct net_local *lp = netdev_priv(dev);
struct sk_buff *new_skb;
unsigned int len;
unsigned long flags;
@@ -1068,7 +1054,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
static void xemaclite_remove_ndev(struct net_device *ndev)
{
if (ndev) {
- struct net_local *lp = (struct net_local *) netdev_priv(ndev);
+ struct net_local *lp = netdev_priv(ndev);
if (lp->base_addr)
iounmap((void __iomem __force *) (lp->base_addr));
@@ -1115,8 +1101,7 @@ static struct net_device_ops xemaclite_netdev_ops;
* Return: 0, if the driver is bound to the Emaclite device, or
* a negative error if there is failure.
*/
-static int __devinit xemaclite_of_probe(struct platform_device *ofdev,
- const struct of_device_id *match)
+static int __devinit xemaclite_of_probe(struct platform_device *ofdev)
{
struct resource r_irq; /* Interrupt resources */
struct resource r_mem; /* IO mem resources */
@@ -1245,7 +1230,7 @@ static int __devexit xemaclite_of_remove(struct platform_device *of_dev)
struct device *dev = &of_dev->dev;
struct net_device *ndev = dev_get_drvdata(dev);
- struct net_local *lp = (struct net_local *) netdev_priv(ndev);
+ struct net_local *lp = netdev_priv(ndev);
/* Un-register the mii_bus, if configured */
if (lp->has_mdio) {
@@ -1285,7 +1270,6 @@ static struct net_device_ops xemaclite_netdev_ops = {
.ndo_start_xmit = xemaclite_send,
.ndo_set_mac_address = xemaclite_set_mac_address,
.ndo_tx_timeout = xemaclite_tx_timeout,
- .ndo_get_stats = xemaclite_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = xemaclite_poll_controller,
#endif
@@ -1303,7 +1287,7 @@ static struct of_device_id xemaclite_of_match[] __devinitdata = {
};
MODULE_DEVICE_TABLE(of, xemaclite_of_match);
-static struct of_platform_driver xemaclite_of_driver = {
+static struct platform_driver xemaclite_of_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
@@ -1321,7 +1305,7 @@ static struct of_platform_driver xemaclite_of_driver = {
static int __init xemaclite_init(void)
{
/* No kernel boot options used, we just need to register the driver */
- return of_register_platform_driver(&xemaclite_of_driver);
+ return platform_driver_register(&xemaclite_of_driver);
}
/**
@@ -1329,7 +1313,7 @@ static int __init xemaclite_init(void)
*/
static void __exit xemaclite_cleanup(void)
{
- of_unregister_platform_driver(&xemaclite_of_driver);
+ platform_driver_unregister(&xemaclite_of_driver);
}
module_init(xemaclite_init);
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index cd1b3dcd61db..ec47e22fa186 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -744,7 +744,7 @@ static int yellowfin_init_ring(struct net_device *dev)
}
for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
+ struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2);
yp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
@@ -1157,7 +1157,7 @@ static int yellowfin_rx(struct net_device *dev)
for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
entry = yp->dirty_rx % RX_RING_SIZE;
if (yp->rx_skbuff[entry] == NULL) {
- struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
+ struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2);
if (skb == NULL)
break; /* Better luck next round. */
yp->rx_skbuff[entry] = skb;
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index c3a329204511..ae07b3dfbcc1 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -124,7 +124,7 @@ MODULE_LICENSE("GPL");
#define TX_BUF_SIZE 8192
#define DMA_BUF_SIZE (RX_BUF_SIZE + 16) /* 8k + 16 bytes for trailers */
-#define TX_TIMEOUT 10
+#define TX_TIMEOUT (HZ/10)
struct znet_private {
int rx_dma, tx_dma;